query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Accepts Server challenge and sends signed Nonce. Also it is sent the protocols that the client is using, in order to verify if they were not downgraded
|
def accept_challenge(self,nonce2):
logger.info("Sending POST to accept Challenge")
if self.state=='START_CHALLENGE':
snonce2 = self.sign_message(nonce2)
self.challenge_nonce2 = snonce2
key, salt = self.derive_key(self.shared_key)
if self.session_id != None:
headers = {
'Content-Type': 'application/json',
'session_id': str(self.session_id)
}
message = json.dumps({
'method': 'ACCEPT_CHALLENGE',
'snonce2':snonce2.decode('latin'),
'protocols':json.dumps({'cipher':self.ciphers,'mode':self.ciphermodes,'digest':self.digests})
}).encode('latin')
data, iv = self.encrypt_message(message,key)
logger.info("Sucessfuly encrypted challenge,certificate and communication protocols.")
message = {
'data': base64.b64encode(data),
'iv': base64.b64encode(iv),
'salt': base64.b64encode(salt),
'hmac': base64.b64encode(self.add_hmac(data,key))
}
logger.info("Sending POST Challenge")
request = requests.post(f'{SERVER_URL}/api',json=message, headers=headers)
response = json.loads(request.text)
message, key, iv, salt, hmac = self.receive_message(response)
if not self.verify_hmac(hmac,message,key):
exit(0)
else:
logger.info("HMAC OK")
message = self.decrypt_message(message,iv,key)
message=json.loads(message)
if message['method'] == 'ACK':
self.state='ACCEPT_CHALLENGE'
else:
logger.error(message['content'])
return False
else:
return False
|
[
"def start_challenge(self):\r\n\t\tif self.state=='KEY_EXCHANGE':\r\n\r\n\t\t\tlogger.info(\"Starting Challenge\")\r\n\t\t\tnonce = os.urandom(16)\r\n\t\t\tself.challenge_nonce = nonce\r\n\t\t\tkey, salt = self.derive_key(self.shared_key)\r\n\t\t\tif self.session_id != None:\r\n\t\t\t\theaders = {\r\n\t\t\t\t\t'Content-Type': 'application/json',\r\n\t\t\t\t\t'session_id' : str(self.session_id)\r\n\t\t\t\t\t}\t\r\n\t\t\tmessage = json.dumps({\r\n\t\t\t\t'method': 'START_CHALLENGE',\r\n\t\t\t\t'nonce': nonce.decode('latin'), \r\n\t\t\t\t'cert': self.certificate.public_bytes(serialization.Encoding.PEM).decode('latin'),\r\n\t\t\t}).encode('latin')\t\t\r\n\t\t\tdata,iv = self.encrypt_message(message,key)\r\n\t\t\t\r\n\t\t\tlogger.info(\"Sucessfuly encrypted challenge and certificate\")\r\n\t\t\t\r\n\t\t\tmessage = {\r\n\t\t\t\t'data': base64.b64encode(data),\r\n\t\t\t\t'iv': base64.b64encode(iv),\r\n\t\t\t\t'hmac': base64.b64encode(self.add_hmac(data,key)),\r\n\t\t\t\t'salt': base64.b64encode(salt)\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tlogger.info(\"Sending POST Challenge and Client Certificate\")\r\n\t\t\trequest = requests.post(f'{SERVER_URL}/api',json=message, headers=headers)\r\n\t\t\t\r\n\t\t\tresponse = json.loads(request.text)\r\n\t\t\tmessage, key, iv, salt, hmac = self.receive_message(response)\r\n\t\t\t#iv = base64.b64decode(response['iv'])\r\n\t\t\t#hmac = base64.b64decode(response['hmac'])\r\n\t\t\t#salt = base64.b64decode(response['salt'])\r\n\t\t\t#msg = base64.b64decode(response['message'])\r\n\t\t\t\r\n\t\t\t#key, _ = self.derive_key(self.shared_key,salt)\r\n\t\t\tif not self.verify_hmac(hmac,message,key):\r\n\t\t\t\texit(0)\r\n\t\t\telse:\r\n\t\t\t\tlogger.info(\"HMAC OK\")\r\n\t\t\t\tmessage = self.decrypt_message(message,iv,key)\r\n\t\t\t\tmessage = json.loads(message)\r\n\t\t\t\tnonce = message['snonce'].encode('latin')\r\n\t\t\t\tnonce2 = message['nonce2'].encode('latin')\r\n\t\t\t\tself.state='START_CHALLENGE'\r\n\t\t\t\tif self.verify_challenge(nonce):\r\n\t\t\t\t\tself.accept_challenge(nonce2)\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn False\r\n\r\n\t\telse:\r\n\t\t\treturn False",
"def complete_hybi00(headers, challenge):\n\n key1 = headers[\"Sec-WebSocket-Key1\"]\n key2 = headers[\"Sec-WebSocket-Key2\"]\n\n first = int(\"\".join(i for i in key1 if i in digits)) / key1.count(\" \")\n second = int(\"\".join(i for i in key2 if i in digits)) / key2.count(\" \")\n\n nonce = pack(\">II8s\", first, second, challenge)\n\n return md5(nonce).digest()",
"def complete_hybi00(headers, challenge):\r\n\r\n key1 = headers[\"Sec-WebSocket-Key1\"]\r\n key2 = headers[\"Sec-WebSocket-Key2\"]\r\n\r\n first = int(\"\".join(i for i in key1 if i in digits)) / key1.count(\" \")\r\n second = int(\"\".join(i for i in key2 if i in digits)) / key2.count(\" \")\r\n\r\n nonce = pack(\">II8s\", first, second, challenge)\r\n\r\n return md5(nonce).digest()",
"def challenge_response(\n serial: Optional[str],\n host: str,\n user: str,\n prompt: str,\n credential_id: str,\n challenge: str,\n udp: bool,\n) -> None:\n\n nkfido2.find().simple_secret(\n credential_id,\n challenge,\n host=host,\n user_id=user,\n serial=serial,\n prompt=prompt,\n output=True,\n udp=udp,\n )",
"def generate_nt_response_mschap2(authenticator_challenge,peer_challenge,username,password):\n challenge=challenge_hash(peer_challenge,authenticator_challenge,username)\n password_hash=nt_password_hash(password)\n return challenge_response(challenge,password_hash)",
"def negotiate_algs(self):\r\n\r\n\t\tdata = {\r\n\t\t\t'method': \"NEGOTIATE_ALG\",\r\n\t\t\t'ciphers': self.ciphers,\r\n\t\t\t'digests': self.digests,\r\n\t\t\t'ciphermodes': self.ciphermodes\r\n\t\t}\r\n\t\trequest = requests.post(f'{SERVER_URL}/api/protocols',json=data, headers={'Content-Type': 'application/json'})\r\n\r\n\r\n\r\n\t\tresponse = json.loads(request.text)\r\n\t\t\r\n\t\tif response['method'] == 'NACK':\r\n\t\t\tlogger.info('ERROR NEGOTIATING ALGORITHMS')\r\n\t\telse:\r\n\t\t\tlogger.info('NEGOTIATED ALGORITHMS WITH SUCCESS')\r\n\t\t\tself.session_id = response['id']\r\n\t\t\tself.cipher, self.digest, self.ciphermode = response['cipher'], response['digest'], response['mode']\r\n\t\t\t\r\n\t\t\tcert = base64.b64decode(response['cert'])\r\n\t\t\tcert = x509.load_pem_x509_certificate(cert)\r\n\t\t\tself.build_cert_chain(cert)\r\n\t\t\tif self.validate_cert_chain() and self.validate_server_purpose(cert):\r\n\t\t\t\tlogger.info(\"Server Certificate OK\")\r\n\t\t\t\tself.server_cert = cert\r\n\t\t\t\tself.state = 'NEGOTIATE_ALG'\r\n\t\t\telse:\r\n\t\t\t\tlogger.info(\"Certificate is not valid\")\r\n\t\t\t\texit(1)\t\t# TODO: ver\r\n\r\n\t\t\t#self.server_cert=cert\r",
"def send_response(box_url: str, username: str, challenge_response: str) -> str:\n # Build response params\n post_data_dict = {\"username\": username, \"response\": challenge_response}\n post_data = urllib.parse.urlencode(post_data_dict).encode()\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n url = box_url + LOGIN_SID_ROUTE\n # Send response\n http_request = urllib.request.Request(url, post_data, headers)\n http_response = urllib.request.urlopen(http_request)\n # Parse SID from resulting XML.\n xml = ET.fromstring(http_response.read())\n return xml.find(\"SID\").text",
"def test_handshake(self):\n cli, svr, p = connectedServerAndClient(\n ServerClass=SecurableProto,\n ClientClass=SecurableProto)\n\n okc = OKCert()\n svr.certFactory = lambda : okc\n\n cli.callRemote(\n amp.StartTLS, tls_localCertificate=okc,\n tls_verifyAuthorities=[PretendRemoteCertificateAuthority()])\n\n # let's buffer something to be delivered securely\n L = []\n cli.callRemote(SecuredPing).addCallback(L.append)\n p.flush()\n # once for client once for server\n self.assertEqual(okc.verifyCount, 2)\n L = []\n cli.callRemote(SecuredPing).addCallback(L.append)\n p.flush()\n self.assertEqual(L[0], {'pinged': True})",
"def generate_nt_response_mschap(challenge,password):\n password_hash=nt_password_hash(password)\n return challenge_response(challenge,password_hash)",
"def generate_authenticator_response(nt_response,peer_challenge,authenticator_challenge,username,password=False,password_hash=False):\n Magic1=\"\\x4D\\x61\\x67\\x69\\x63\\x20\\x73\\x65\\x72\\x76\\x65\\x72\\x20\\x74\\x6F\\x20\\x63\\x6C\\x69\\x65\\x6E\\x74\\x20\\x73\\x69\\x67\\x6E\\x69\\x6E\\x67\\x20\\x63\\x6F\\x6E\\x73\\x74\\x61\\x6E\\x74\"\n Magic2=\"\\x50\\x61\\x64\\x20\\x74\\x6F\\x20\\x6D\\x61\\x6B\\x65\\x20\\x69\\x74\\x20\\x64\\x6F\\x20\\x6D\\x6F\\x72\\x65\\x20\\x74\\x68\\x61\\x6E\\x20\\x6F\\x6E\\x65\\x20\\x69\\x74\\x65\\x72\\x61\\x74\\x69\\x6F\\x6E\"\n\n # the2nd: modifed for OTPme to allow verification without the need to have a clear-text password\n # if we got a password we have to generate its hash and hash_hash\n if password:\n password_hash=nt_password_hash(password,False)\n password_hash_hash=hash_nt_password_hash(password_hash)\n elif password_hash:\n # if we got the password_hash we only have to generate the hash_hash\n password_hash_hash=hash_nt_password_hash(password_hash)\n\n sha_hash=sha.new()\n sha_hash.update(password_hash_hash)\n sha_hash.update(nt_response)\n sha_hash.update(Magic1)\n digest=sha_hash.digest()\n\n challenge=challenge_hash(peer_challenge,authenticator_challenge,username)\n\n sha_hash=sha.new()\n sha_hash.update(digest)\n sha_hash.update(challenge)\n sha_hash.update(Magic2)\n digest=sha_hash.digest()\n\n return \"S=\"+convert_to_hex_string(digest)",
"def test_client_protocol():\n vectors = get_test_vectors()\n application_key = bytes(binascii.a2b_base64(vectors['test1']['application_key']))\n\n\n client_ephemeral_key = Curve25519KeyPair(\n PrivateKey(binascii.a2b_base64(vectors['test1']['client_ephemeral_priv_key'])),\n PublicKey(binascii.a2b_base64(vectors['test1']['client_ephemeral_pub_key']))\n )\n client_signing_private_key = SigningKey(binascii.a2b_base64(vectors['test1']['client_signing_priv_key']))\n client_signing_key = Ed25519KeyPair(\n client_signing_private_key,\n client_signing_private_key.verify_key\n )\n\n server_ephemeral_key = Curve25519KeyPair(\n PrivateKey(binascii.a2b_base64(vectors['test1']['server_ephemeral_priv_key'])),\n PublicKey(binascii.a2b_base64(vectors['test1']['server_ephemeral_pub_key']))\n )\n server_signing_private_key = SigningKey(binascii.a2b_base64(vectors['test1']['server_signing_priv_key']))\n server_signing_key = Ed25519KeyPair(\n server_signing_private_key,\n server_signing_private_key.verify_key\n )\n\n client_factory = SecretHandshakeClientFactory(\n application_key,\n client_ephemeral_key,\n client_signing_key,\n server_signing_key.public_key\n )\n client_protocol = client_factory.buildProtocol(None)\n client_transport = proto_helpers.StringTransport()\n\n server_factory = SecretHandshakeServerFactory(\n application_key,\n server_ephemeral_key,\n server_signing_key,\n )\n server_protocol = server_factory.buildProtocol(None)\n server_transport = proto_helpers.StringTransport()\n \n client_protocol.makeConnection(client_transport)\n server_protocol.makeConnection(server_transport)\n assert len(client_transport.value()) == 68\n\n server_protocol.dataReceived(client_transport.value())\n client_transport.clear()\n client_protocol.dataReceived(server_transport.value())\n server_transport.clear()\n server_protocol.dataReceived(client_transport.value())\n client_transport.clear()\n client_protocol.dataReceived(server_transport.value())\n server_transport.clear()\n\n yield client_protocol.when_connected()\n yield server_protocol.when_connected()\n\n server_transport.clear()\n client_transport.clear()\n\n client_protocol.messageSend(\"Alice was not a bit hurt, and she \\\njumped up on to her feet in a moment: she looked up, but it was all \\\ndark overhead; before her was another long passage, and the White \\\nRabbit was still in sight, hurrying down it. There was not a moment \\\nto be lost: away went Alice like the wind, and was just in time to \\\nhear it say, as it turned a corner, 'Oh my ears and whiskers, how late \\\nit's getting!' She was close behind it when she turned the corner, but \\\nthe Rabbit was no longer to be seen: she found herself in a long, low \\\nhall, which was lit up by a row of lamps hanging from the roof.\")\n server_protocol.dataReceived(client_transport.value())\n client_transport.clear()\n\n server_protocol.messageSend(\"There were doors all round the hall, \\\nbut they were all locked; and when Alice had been all the way down \\\none side and up the other, trying every door, she walked sadly down \\\nthe middle, wondering how she was ever to get out again.\")\n client_protocol.dataReceived(server_transport.value())\n server_transport.clear()\n\n client_protocol.messageSend(\"Suddenly she came upon a little \\\nthree-legged table, all made of solid glass; there was nothing on it \\\nexcept a tiny golden key, and Alice's first thought was that it might \\\nbelong to one of the doors of the hall; but, alas! either the locks \\\nwere too large, or the key was too small, but at any rate it would not \\\nopen any of them. However, on the second time round, she came upon a \\\nlow curtain she had not noticed before, and behind it was a little \\\ndoor about fifteen inches high: she tried the little golden key in the \\\nlock, and to her great delight it fitted!\")\n server_protocol.dataReceived(client_transport.value())\n client_transport.clear()\n\n server_protocol.messageSend(\"Alice opened the door and found that \\\nit led into a small passage, not much larger than a rat-hole: she \\\nknelt down and looked along the passage into the loveliest garden you \\\never saw. How she longed to get out of that dark hall, and wander \\\nabout among those beds of bright flowers and those cool fountains, but \\\nshe could not even get her head through the doorway; 'and even if my \\\nhead would go through,' thought poor Alice, 'it would be of very \\\nlittle use without my shoulders. Oh, how I wish I could shut up like a \\\ntelescope! I think I could, if I only knew how to begin.' For, you \\\nsee, so many out-of-the-way things had happened lately, that Alice had \\\nbegun to think that very few things indeed were really impossible.\")\n client_protocol.dataReceived(server_transport.value())\n server_transport.clear()",
"def srp_protocol(self) -> bool:\n a = random.randint(0, self.N)\n A = pow(self.g, a, self.N)\n\n salt, B = self.server.srp_protocol_one(\n A\n )\n\n u = int.from_bytes(\n matasano.hash.SHA256(\n matasano.util.bytes_for_int(A) +\n matasano.util.bytes_for_int(B)\n ),\n byteorder='little'\n )\n\n x = int.from_bytes(\n matasano.hash.SHA256(\n salt + self._password\n ), byteorder=\"little\"\n )\n\n s = pow(\n B - self.k * pow(self.g, x, self.N),\n a + u * x,\n self.N\n )\n\n self.key = matasano.hash.SHA256(\n matasano.util.bytes_for_int(s)\n )\n\n return self.server.srp_protocol_two(\n matasano.mac.hmac_sha256(\n self.key,\n salt\n )\n )",
"def test_NPNAndALPNNoOverlap(self):\n clientProtocols = [b'h2', b'http/1.1']\n serverProtocols = [b'spdy/3']\n negotiatedProtocol, lostReason = negotiateProtocol(\n serverProtocols=clientProtocols,\n clientProtocols=serverProtocols,\n )\n self.assertIsNone(negotiatedProtocol)\n self.assertEqual(lostReason.type, SSL.Error)",
"def _safecookie_authchallenge(self, reply):\n if self._cookie_data is None:\n raise RuntimeError(\"Cookie data not read.\")\n kw = parse_keywords(reply.replace(' ', '\\n'))\n\n server_hash = base64.b16decode(kw['SERVERHASH'])\n server_nonce = base64.b16decode(kw['SERVERNONCE'])\n # FIXME put string in global. or something.\n expected_server_hash = hmac_sha256(\n b\"Tor safe cookie authentication server-to-controller hash\",\n self._cookie_data + self.client_nonce + server_nonce,\n )\n\n if not compare_via_hash(expected_server_hash, server_hash):\n raise RuntimeError(\n 'Server hash not expected; wanted \"%s\" and got \"%s\".' %\n (base64.b16encode(expected_server_hash),\n base64.b16encode(server_hash))\n )\n\n client_hash = hmac_sha256(\n b\"Tor safe cookie authentication controller-to-server hash\",\n self._cookie_data + self.client_nonce + server_nonce\n )\n client_hash_hex = base64.b16encode(client_hash)\n return self.queue_command(b'AUTHENTICATE ' + client_hash_hex)",
"def test_empty_sni_default(self):\n self.init()\n vhs = TlsHandshake()\n vhs.sni = \"\"\n vhs.host = \"vhost1.net\"\n vhs.send_data = []\n res = vhs.do_12()\n self.assertFalse(res, \"Handshake successfull with empty sni: %s\" % res)\n\n vhs = TlsHandshake()\n vhs.sni = \"vhost2.net\"\n vhs.host = \"vhost2.net\"\n res = vhs.do_12()\n self.assertTrue(res, \"Bad handshake: %s\" % res)\n resp = vhs.hs.server_data[0].data.decode(\"utf-8\")\n self.assertTrue(resp.endswith(\"be2\"), \"Bad response from vhost2: [%s]\" % resp)\n self.assertTrue(\n x509_check_cn(vhs.hs.server_cert[0], \"vhost2.net\"),\n \"Wrong certificate received for vhost1\",\n )",
"def verify_challenge(self,crypt):\r\n\t\ttry:\r\n\t\t\tself.server_cert.public_key().verify(\r\n\t\t\t\tcrypt,\r\n\t\t\t\tself.challenge_nonce,\r\n\t\t\t\tpd.PSS(\r\n\t\t\t\tmgf=pd.MGF1(hashes.SHA256()),\r\n\t\t\t\tsalt_length=pd.PSS.MAX_LENGTH),\r\n\t\t\t\thashes.SHA256()\r\n\t\t\t)\r\n\t\t\tlogger.info(\"Challenge OK\")\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\tlogger.error(\"Challenge wrong. Comms Compromised\")\r\n\t\t\treturn False",
"def challenge_hash(peer_challenge,authenticator_challenge,username):\n sha_hash=sha.new()\n sha_hash.update(peer_challenge)\n sha_hash.update(authenticator_challenge)\n sha_hash.update(username)\n return sha_hash.digest()[:8]",
"def test_NPNAndALPNSuccess(self):\n protocols = [b'h2', b'http/1.1']\n negotiatedProtocol, lostReason = negotiateProtocol(\n clientProtocols=protocols,\n serverProtocols=protocols,\n )\n self.assertEqual(negotiatedProtocol, b'h2')\n self.assertIsNone(lostReason)",
"def _second_message(self):\n if not self._auth_data:\n raise errors.InterfaceError(\"Missing authentication data (seed)\")\n\n passw = self._normalize(self._password)\n salted_password = self._hi(passw,\n b64decode(self.server_salt),\n self.iterations)\n\n _LOGGER.debug(\"salted_password: %s\",\n b64encode(salted_password).decode())\n\n client_key = self._hmac(salted_password, b\"Client Key\")\n _LOGGER.debug(\"client_key: %s\", b64encode(client_key).decode())\n\n stored_key = self.def_digest_mode(client_key).digest()\n _LOGGER.debug(\"stored_key: %s\", b64encode(stored_key).decode())\n\n server_key = self._hmac(salted_password, b\"Server Key\")\n _LOGGER.debug(\"server_key: %s\", b64encode(server_key).decode())\n\n client_first_no_header = \",\".join([\n \"n={}\".format(self._normalize(self._username)),\n \"r={}\".format(self.client_nonce)])\n _LOGGER.debug(\"client_first_no_header: %s\", client_first_no_header)\n auth_msg = ','.join([\n client_first_no_header,\n self.servers_first,\n \"c={}\".format(b64encode(\"n,a={},\".format(\n self._normalize(self._username)).encode()).decode()),\n \"r={}\".format(self.server_nonce)])\n _LOGGER.debug(\"auth_msg: %s\", auth_msg)\n\n client_signature = self._hmac(stored_key, auth_msg.encode())\n _LOGGER.debug(\"client_signature: %s\",\n b64encode(client_signature).decode())\n\n client_proof = self._xor(client_key, client_signature)\n _LOGGER.debug(\"client_proof: %s\", b64encode(client_proof).decode())\n\n self.server_auth_var = b64encode(\n self._hmac(server_key, auth_msg.encode())).decode()\n _LOGGER.debug(\"server_auth_var: %s\", self.server_auth_var)\n\n client_header = b64encode(\n \"n,a={},\".format(self._normalize(self._username)).encode()).decode()\n msg = \",\".join([\"c={}\".format(client_header),\n \"r={}\".format(self.server_nonce),\n \"p={}\".format(b64encode(client_proof).decode())])\n _LOGGER.debug(\"second_message: %s\", msg)\n return msg.encode()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Derives key from a given data
|
def derive_key(self, data, salt=None):
digest=None
if salt==None:
salt=os.urandom(16)
if self.digest == 'SHA-512':
digest = hashes.SHA512()
elif self.digest == 'SHA-256':
digest =hashes.SHA256()
key_size = 32
if self.cipher=='3DES':
key_size = 16
# derive
kdf = PBKDF2HMAC(
algorithm=digest,
length=key_size*2 ,
salt=salt,
iterations=10000,
)
key = kdf.derive(data)
return key,salt
|
[
"def get_key_from_data(data):\n if 'key' in data:\n item_key = data['key']\n return item_key\n return None",
"def _create_key(self):\n return uuid.uuid4().hex",
"def key( self, digram ):\n\t\ta,b = digram.refdigram()\n\t\treturn str( a ) + self.keyseparator + str( b )",
"def dkim_id(data: bytes, lid: Optional[bytes] = None) -> str:\n hashable: bytes\n lid, hashable = rfc6376_rascal(data, lid)\n digest_256: bytes = hmac.digest(lid, hashable, \"sha256\")\n truncated_bits: int = 160\n return pibble32(digest_256[: truncated_bits // 8])",
"def sourcekey_to_key(self, sourcekey: str) -> str:\n ...",
"def __generate_inv_key(self):\n \n self.__inv_key = self.__key\n return self.__inv_key",
"def normalize_key(key: Any):",
"def dgKey(pre, dig):\n if hasattr(pre, \"encode\"):\n pre = pre.encode(\"utf-8\") # convert str to bytes\n if hasattr(dig, \"encode\"):\n dig = dig.encode(\"utf-8\") # convert str to bytes\n\n return (b'%s.%s' % (pre, dig))",
"def createFieldKey(record, key_fileds):\n key = tuple ( [ record[field] for field in key_fields ] )\n return key",
"def key_to_sourcekey(self, key: str) -> str:\n ...",
"def arg_to_key(arg):\n return arg.lstrip('-').replace('-', '_')",
"def derive_key(self, passphrase, salt):\n pass",
"def _get_key(self, entity_id):\n if entity_id:\n return self.client.key(self.kind, entity_id)\n return self.client.key(self.kind)",
"def build_key(cls, user_id):\n key = ndb.Key(cls, user_id)\n return key",
"def key_of_case(cls, test_case):\n if hasattr(test_case, 'items'):\n test_case = test_case.items()\n return _hash_from_fields(\n (k, v) for k, v in test_case\n if k in cls.CASE_PRIMARY_KEYS\n )",
"def get_hash_key(prefix, key_to_hash):\n key_to_hash = key_to_hash.encode('utf-8')\n key = prefix + \":\" + hashlib.md5(key_to_hash).hexdigest()\n return key",
"def generate_key(self):\n try:\n return self.proto.genuid()\n except ValueError:\n return uuid.uuid4()",
"def createFieldKey(record):\n key = tuple ( [ record[field] for field in KEY_FIELDS if field in record ] )\n return key",
"def key2(self):\n return test_keys.dsa_key"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
value from 0 to 0xffffff
|
def naivecolormap(value):
# value2pixel(0.5) -> (0.5,0.5,0.5)
red = (value & 0x00ff0000) >> 16
green = (value & 0x0000ff00) >> 8
blue = (value & 0x000000ff) >> 0
return (int(red), int(green), int(blue)) # rgb
|
[
"def colorUpdate(self):\n if self.value ==0:\n self.color = [255,255,255]\n return\n k = 0\n V = self.value\n while V>0:\n k += 18\n V //= 2\n self.color = [k,255-k,0]",
"def hass_to_wilight_position(value: int) -> int:\n return min(255, round((value * 255) / 100))",
"def lower_nibble(value):\n return value & 0x0F",
"def pink():\n\n return color2float(Uint8Tensor([[254, 194, 194]]))",
"def just_check_rgb(value):\n # TODO\n return value",
"def getPackedValue(self) -> \"uint32_t\":\n return _coin.SbColor4f_getPackedValue(self)",
"def red():\n\n return color2float(Uint8Tensor([237, 28, 36]))",
"def get_color_code(self):\n if self.color == 'r':\n return (254, 0, 0)\n else:\n return (0, 0, 0)",
"def __getitem__(self, i: 'int') -> \"float\":\n return _coin.SbColor4f___getitem__(self, i)",
"def green():\n\n return color2float(Uint8Tensor([34, 177, 76]))",
"def value_normalization(v, v_min=500, v_max=800):\r\n if v < v_min:\r\n return 255\r\n elif v > v_max:\r\n return 255\r\n else:\r\n return int(255 * (v-v_min) / (v_max - v_min))",
"def __tuple__(self):\n return (self.color & 0xff0000, self.color & 0xff00, self.color & 0xff)",
"def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n #remaps from -1 to 1 to 0 to 255 so it can be a color\n return int(color_code)",
"def __nq__(self, u: 'SbColor4f') -> \"int\":\n return _coin.SbColor4f___nq__(self, u)",
"def blue():\n\n return color2float(Uint8Tensor([0, 162, 232]))",
"def __getitem__(self, i: 'int') -> \"float\":\n return _coin.SbColor___getitem__(self, i)",
"def yellow():\n\n return color2float(Uint8Tensor([[255, 242, 0]]))",
"def brown():\n\n return color2float(Uint8Tensor([[149, 116, 83]]))",
"def wilight_to_hass_position(value: int) -> int:\n return min(100, round((value * 100) / 255))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints out all residues (mod N.Z_p) of rotations of the form, along with the image mod N.Z_p of h. It then numerates all lattice cosets of the type Nlattice + h' which are padically equivalent to Nlattice + h. Finally, it prints out the orbit of these Nlattice + h' cosets under the isometries of the (global) form form must be a diagonal 3x3 matrix in a numpy array datatype
|
def enumerate_pAdic_class(form, h, N, p, opfile = None, rotations_only = True, debug = False):
N = p**amf.order(N,p)
if opfile == None:
adjective = "proper " if rotations_only else ''
opfile = 'output %d-adic %scls - form=(%d,%d,%d) h=(%d,%d,%d) N=%d.txt'%(p, adjective, form[0,0], form[1,1], form[2,2], h[0,0], h[1,0], h[2,0], N)
c = 0
c_true = 0
images = set()
#The matrices X over here are actually [\sigma] mod N, for some \sigma in SO(NL_p) (here [\sigma] is the matrix of \sigma in the basis in which the lattice has the diagonal splitting. It is usually simple enough to compute [\sigma] by solving a couple of simultaneous diophantine equations
file_mode = 'w'
for X in findAllpAdicMatricesMod(form, N, p, rotations_only, debug):
c += 1
h1 = X.dot(h) % N
images.add(tuple(h1.flatten()))
with open(opfile, file_mode) as f:
f.write(str(c) + ') ' + str(h1.flatten()) + '\n' + str(X) + "\n\n")
# for i in xrange(3):
# f.write(str(X[i]) + '\n')
# f.write('\n')
if ((h1 - h) % N == 0).all():
c_true += 1
file_mode = 'a'
if debug:
print c, '/', c_true
print
with open(opfile, 'a') as f:
f.write("Group Index = %d / %d = %d\n\n"%(c, c_true, c/c_true)) #c / c_true is the group index [SO(NL_p) : SO(NL_p + h)]
O_L = [X for X in iterAllLatticeRotations(form)]
images = list(images)
orbits = {}
while len(images) > 0:
h1 = np.array([[images[0][0]], [images[0][1]], [images[0][2]]])
key = images[0]
orbits[key] = set()
for X in O_L:
Y = tuple((X.dot(h1) % N).flatten())
if Y in images:
images.remove(Y)
orbits[key].add(Y)
#print
with open(opfile, 'a') as f:
for key in orbits.keys():
f.write(str(key) + ' :')
for t in orbits[key]:
f.write(' ' + str(t))
f.write('\n')
|
[
"def impmat(structure, freq):\n center = Matrix(structure.center)\n center_ = Matrix(structure.center_)\n edge_length = structure.edge_length\n\n edges_total = structure.edges_total\n triangles_total = structure.triangles_total\n speed_of_light = structure.speed_of_light\n\n wn = 2*PI*freq/speed_of_light\n Z_matrix = np.zeros(shape = (edges_total, edges_total), dtype = np.complex_)\n\n\n for tri in range(triangles_total):\n ## find the edge no for each tri, and group into plus or minus\n plus = []\n minus = []\n count = 0\n for index in range(edges_total):\n if count <= 3:\n if structure.triangle_plus[index] == tri:\n plus.append(index)\n count += 1\n elif structure.triangle_minus[index] == tri:\n minus.append(index)\n count += 1\n\n G = center_ - fill(center(tri), triangles_total, 9) ## dim = no of ttl triangles x 9 sub's\n abs(G)\n fphase = lambda R: (cos(wn*R) - 1j*sin(wn*R))/R\n G.element_wise(fphase)\n ZF=[]\n for k in range(edges_total):\n Fi = sum(G.row(structure.triangle_plus[k])) - sum(G.row(structure.triangle_minus[k]))\n ZF.append(structure.FactorFi[k] / freq * Fi / 9)\n\n Z = None\n Zi = None\n ### --- loop thru each source edge (S); for each edge, fill in the entire column\n for n in plus + minus:\n if n in plus:\n source = structure.rho__plus\n func = lambda x, y: x+y\n else:\n source = structure.rho__minus\n func = lambda x, y: x-y\n src = source[n]\n rho_p = [[each.dot(structure.rho_plus[row]) for each in src] for row in range(edges_total)]\n rho_m = [[each.dot(structure.rho_minus[row])for each in src] for row in range(edges_total)]\n # dim of rho_p and rho_m: no of triangles x 9 sub's\n\n area = []\n ### --- loop through each obverver edge (O)\n for m in range(edges_total):\n g_p = G.row(structure.triangle_plus[m])\n g_m = G.row(structure.triangle_minus[m])\n r_p = rho_p[m]\n r_m = rho_m[m]\n area_p = sum([each*g_p[index] for index, each in enumerate(r_p)])\n area_m = sum([each*g_m[index] for index, each in enumerate(r_m)])\n area.append(area_p + area_m)\n Z1 = [ each*area[index]*freq/9.0 for index, each in enumerate(structure.FactorA)]\n edge = edge_length[n]\n Zi = [ each*edge for each in map(func, Z1, ZF)]\n Z_matrix[:,n] += Zi\n\n return Z_matrix",
"def load_fluctuations_3D_all(self):\n #similar to the 2D case, we first read one file to determine the total toroidal plane number in the simulation\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[0]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n\n self.planes = np.unique(np.array([np.unique(self.prevplane),np.unique(self.nextplane)]))\n self.planeID = {self.planes[i]:i for i in range(len(self.planes))} #the dictionary contains the positions of each chosen plane, useful when we want to get the data on a given plane known only its plane number in xgc file.\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n self.nane_bar = np.zeros((len(self.time_steps)))\n\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n self.dni_bar = np.zeros((len(self.time_steps)))\n\n self.phi = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n self.phi_bar = np.zeros((len(self.time_steps)))\n for i in range(len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n\n if(i==0):\n #self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.center_planes = np.arange(self.n_cross_section)*dn\n\n self.phi_bar[i] = np.mean(fluc_mesh['dpot'][...])\n if (self.HaveElectron):\n self.nane_bar[i] = np.mean(fluc_mesh['eden'][...])\n if (self.load_ions):\n self.dni_bar[i] = np.mean(fluc_mesh['iden'][...])\n\n for j in range(self.n_cross_section):\n self.phi[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,(self.center_planes[j] + self.planes)%self.n_plane],0,1)\n self.phi[j,i] -= self.phi_bar[i]\n if(self.HaveElectron):\n self.nane[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,(self.center_planes[j] + self.planes)%self.n_plane],0,1)\n self.nane[j,i] -= self.nane_bar[i]\n if(self.load_ions):\n self.dni[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,(self.center_planes[j] + self.planes)%self.n_plane],0,1)\n self.dni[j,i] -= self.dni_bar[i]\n fluc_mesh.close()\n\n return 0",
"def zplane(system, show:bool=True, figsize:Tuple[int, int]=(8, 8)):\r\n b = system[0]\r\n a = system[1]\r\n\r\n \r\n \"\"\"\r\n # The coefficients are less than 1, normalize the coeficients\r\n if np.max(np.abs(b)) > 1:\r\n kn = np.max(np.abs(b))\r\n b = np.abs(b)/float(kn)\r\n else:\r\n kn = 1\r\n\r\n if np.max(np.abs(a)) > 1:\r\n kd = np.max(np.abs(a))\r\n a = np.abs(a)/float(kd)\r\n else:\r\n kd = 1\r\n \r\n # Get the poles and zeros\r\n p = np.roots(a)\r\n z = np.roots(b)\r\n k = kn/float(kd)\r\n \"\"\"\r\n # Get the poles, zeros and gain\r\n z, p, k = signal.tf2zpk(b, a)\r\n \r\n if show == True:\r\n plt.figure(figsize=figsize)\r\n ax = plt.subplot(111)\r\n uc = patches.Circle((0, 0), radius=1, fill=False,\r\n color='black', ls='dashed')\r\n ax.add_patch(uc)\r\n plt.plot(z.real, z.imag, 'go', ms=10)\r\n plt.plot(p.real, p.imag, 'rx', ms=10)\r\n ax.spines['left'].set_position('center')\r\n ax.spines['bottom'].set_position('center')\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n #r = 1.5\r\n plt.axis('scaled')\r\n #plt.axis([-r, r, -r, r])\r\n ticks = [-1, -.5, .5, 1]\r\n plt.xticks(ticks)\r\n plt.yticks(ticks)\r\n\r\n return z, p, k",
"def visualise_ICASAR_inversion(interferograms, sources, time_courses, mask, n_data = 10):\n import numpy as np\n \n def plot_ifg(ifg, ax, mask, vmin, vmax):\n \"\"\"\n \"\"\"\n w = ax.matshow(col_to_ma(ifg, mask), interpolation ='none', aspect = 'equal', vmin = vmin, vmax = vmax) # \n axin = ax.inset_axes([0, -0.06, 1, 0.05])\n fig.colorbar(w, cax=axin, orientation='horizontal')\n ax.set_yticks([])\n ax.set_xticks([])\n \n import matplotlib.pyplot as plt\n \n interferograms_mc = interferograms - np.mean(interferograms, axis = 1)[:, np.newaxis]\n interferograms_ICASAR = time_courses @ sources\n residual = interferograms_mc - interferograms_ICASAR\n \n if n_data > interferograms.shape[0]:\n n_data = interferograms.shape[0]\n\n \n fig, axes = plt.subplots(3, n_data, figsize = (15,7)) \n if n_data == 1: \n axes = np.atleast_2d(axes).T # make 2d, and a column (not a row)\n \n row_labels = ['Data', 'Model', 'Resid.' ]\n for ax, label in zip(axes[:,0], row_labels):\n ax.set_ylabel(label)\n\n for data_n in range(n_data):\n vmin = np.min(np.stack((interferograms_mc[data_n,], interferograms_ICASAR[data_n,], residual[data_n])))\n vmax = np.max(np.stack((interferograms_mc[data_n,], interferograms_ICASAR[data_n,], residual[data_n])))\n plot_ifg(interferograms_mc[data_n,], axes[0,data_n], mask, vmin, vmax)\n plot_ifg(interferograms_ICASAR[data_n,], axes[1,data_n], mask, vmin, vmax)\n plot_ifg(residual[data_n,], axes[2,data_n], mask, vmin, vmax)",
"def generate_zigzag_lattice(NH, theta, eta=0.):\n LV = np.array([[np.cos(theta), np.sin(theta)], [np.cos(theta), -np.sin(theta)]])\n\n xy = np.array([np.ceil(i*0.5)*LV[0] + np.ceil((i-1)*0.5)*LV[1] for i in range(NH)])\n xy -= np.array([np.mean(xy[:, 0]), np.mean(xy[:, 1])])\n print 'xy = ', xy\n\n # Connectivity\n BL = np.zeros((NH-1, 2), dtype=int)\n for i in range(0, NH-1):\n BL[i, 0] = i\n BL[i, 1] = i + 1\n\n print 'BL = ', BL\n\n TRI = le.BL2TRI(BL, xy)\n\n # scale lattice down to size\n if eta == 0:\n xypts = xy\n else:\n print 'Randomizing lattice by eta=', eta\n jitter = eta*np.random.rand(np.shape(xy)[0], np.shape(xy)[1])\n xypts = np.dstack((xy[:, 0] + jitter[:, 0], xy[:, 1] + jitter[:, 1]))[0]\n\n # Naming\n etastr = '{0:.3f}'.format(eta).replace('.', 'p')\n thetastr = '{0:.3f}'.format(theta/np.pi).replace('.', 'p')\n exten = '_line_theta' + thetastr + 'pi_eta'+etastr\n\n # BL = latticevec_filter(BL,xy, C, CBL)\n NL, KL = le.BL2NLandKL(BL, NP=NH, NN=2)\n lattice_exten = 'linear' + exten\n print 'lattice_exten = ', lattice_exten\n return xypts, NL, KL, BL, LV, lattice_exten",
"def calc_knotplanes(self):\n # Calculate the set of knot planes at the origin\n if self.s_ == 1:\n H = list(set(self.Xi_))\n else:\n tmp = set([ncross_product(nt) for nt in combinations(set(self.Xi_), self.s_ - 1)])\n H = [x for x in tmp if len([y for y in x if y != 0]) > 0]\n H = [vector(v).normalized() for v in H]\n #\n Hprime = []\n Hshell = []\n\n for plane in H:\n d_list = [0]\n\n for v in self.Xi_:\n dlp = set(d_list[:])\n d = vector(v).dot_product(vector(plane))\n for dp in dlp:\n d_list.append(d + dp)\n\n d_list = list(set(d_list))\n d_list.sort()\n\n min_d, max_d = d_list.pop(0), d_list.pop()\n Hshell += [(min_d, plane), (max_d, plane)]\n\n for d in d_list:\n Hprime.append((d, plane))\n\n Hprime = [(d- vector(self.c_xi)*vector(n), n) for (d, n) in Hprime]\n Hshell = [(d- vector(self.c_xi)*vector(n), n) for (d, n) in Hshell]\n\n return Hprime, Hshell",
"def print_cube(self):\n for f in range(3): #right-next-faces\n for r in range(3): # rows\n print(self._row_str(2*f,r) + ' ' + self._row_str(2*f+1,r))\n print('')",
"def printTerm3(self):\n vprint=[]\n counter=0\n n_swaps=0\n n_x=0; n_y=0\n maxi=0\n mini=self.l\n for x in self.pl:\n if self.pl[x] != ['Y']:\n n_y += 1\n if self.pl[x] != ['X']:\n n_x += 1\n if self.pl[x] != ['I']:\n counter=counter+1\n vprint += self.pl[x]\n if x>maxi:\n maxi=x\n if x<mini:\n mini=x\n vprint += str(x)\n elif self.pl[x] == ['I']:\n vprint += [' ']\n for n in range(mini,maxi+1):\n if self.pl[n] == ['I']:\n n_swaps += 2\n vprint=''.join(vprint)\n n_cnots=2*(maxi-mini)-n_swaps\n sqcount=n_x*2+n_y*2+2*n_cnots+6*n_swaps\n czcount=1*(n_cnots)+3*n_swaps\n return self.c,vprint,8-counter,n,sqcount,czcount,n_swaps,n_cnots",
"def __generateCarrots(self):\n\t\tfor i in range(self.carrotNumber):\n\t\t\tc = Carrot(self, 0, 0)\n\t\t\tc.reposition(self, Panda.pandaList, Spike.spikeNormalList)",
"def printComposition(self):\r\n self.findComposition()\r\n for kmer in self.kmerComp:\r\n print(kmer)",
"def display_pointings(jones, obsinfo=None, do_3D=False,\n do_parallactic_rot=None):\n def plotsphgrid():\n # Plot grid for the LCL spherical crd sys\n nr_theta_ticks = 10 # 0..90 => 10 deg\n nr_phi_ticks = 4*4+1\n theta_ticks = np.linspace(0., np.pi/2, nr_theta_ticks)\n phi_ticks = np.linspace(0., 2*np.pi, nr_phi_ticks, endpoint=True)\n sg_linewidth = 0.5\n stn_tick_clrmrk = 'y:'\n itrf_tick_clrmrk = 'g:'\n # Compute iso-phi tick lines:\n for phi_tick in phi_ticks:\n xth = np.sin(theta_ticks)*np.sin(phi_tick)\n yth = np.sin(theta_ticks)*np.cos(phi_tick)\n zth = np.cos(theta_ticks)*np.ones((nr_theta_ticks,))\n xyzth_itrf = np.matmul(jones.stnRot.T, [xth, yth, zth])\n (xth_itrf, yth_itrf, zth_itrf) = (xyzth_itrf[0], xyzth_itrf[1],\n xyzth_itrf[2])\n if hidebelowhrz:\n abovehrz = zth_itrf > 0\n xth_itrf = xth_itrf[abovehrz]\n yth_itrf = yth_itrf[abovehrz]\n zth_itrf = zth_itrf[abovehrz]\n # plot iso-phi lines\n if do_3D:\n ax.plot(xth, yth, zth, stn_tick_clrmrk, linewidth=sg_linewidth)\n ax.plot(xth_itrf, yth_itrf, zth_itrf, itrf_tick_clrmrk,\n linewidth=sg_linewidth)\n else:\n ax.plot(xth, yth, stn_tick_clrmrk, linewidth=sg_linewidth)\n ax.plot(xth_itrf, yth_itrf, itrf_tick_clrmrk,\n linewidth=sg_linewidth)\n\n # Compute iso-theta tick lines:\n # N.B. the curvaure of iso-theta lines requires more phi points\n phi_xtrticks = np.linspace(0., 2*np.pi, 361)\n nr_phi_xtrticks = len(phi_xtrticks)\n for theta_tick in theta_ticks:\n xph = np.sin(theta_tick)*np.sin(phi_xtrticks)\n yph = np.sin(theta_tick)*np.cos(phi_xtrticks)\n zph = np.cos(theta_tick)*np.ones((nr_phi_xtrticks,))\n xyzph_itrf = np.matmul(jones.stnRot.T, [xph, yph, zph])\n (xph_itrf, yph_itrf, zph_itrf) = (xyzph_itrf[0], xyzph_itrf[1],\n xyzph_itrf[2])\n # plot iso-theta lines\n if do_3D:\n ax.plot(xph, yph, zph, stn_tick_clrmrk,\n linewidth=sg_linewidth)\n ax.plot(xph_itrf, yph_itrf, zph_itrf, itrf_tick_clrmrk,\n linewidth=sg_linewidth)\n else:\n ax.plot(xph, yph, stn_tick_clrmrk, linewidth=sg_linewidth)\n ax.plot(xph_itrf, yph_itrf, itrf_tick_clrmrk,\n linewidth=sg_linewidth)\n\n jn = jones.getValue()\n jnf = jn[256, :, :, :].squeeze() # Midpoint freq.\n\n # NCP is z-base-vec of ITRF in stn crdsys\n itrf_z_stn = np.matmul(jones.stnRot.T, [[0], [0], [1]])\n\n # Pointings in Cartesian station crds\n xp = jones.jonesbasis[:, 0, 0]\n yp = jones.jonesbasis[:, 1, 0]\n zp = jones.jonesbasis[:, 2, 0]\n\n # Cartesian resp. of antenna basis in Ludwig3 w.r.t stn crdsys\n jbant = jones.get_basis()\n\n # N.B: imag part of ant response not used:\n jbresp = np.real(np.matmul(jbant[:, :, 1:], jnf))\n\n # Find starting point (Save in case below horizon)\n xp0, yp0, zp0 = xp[0], yp[0], zp[0]\n\n nrsamps = jones.jonesbasis.shape[0]\n\n # Optionally remove data below stn horizon\n hidebelowhrz = True\n if hidebelowhrz:\n abovehrz = zp > 0\n xp = xp[abovehrz]\n yp = yp[abovehrz]\n zp = zp[abovehrz]\n jbant = jbant[abovehrz]\n jbresp = jbresp[abovehrz]\n nrsamps = len(zp)\n\n # Plot using 3d or 2d. (2d uses orthographic projection)\n fig = plt.figure()\n mplprojection = '3d' if do_3D else None\n ax = fig.add_subplot(111, projection=mplprojection)\n plotsphgrid()\n\n # Display pointings in horizontal coordinates\n if do_3D:\n ax.scatter(xp, yp, zp, c='c', marker='.')\n ax.plot(xp, yp, 'c.', label='Pointing')\n\n # Mark out start point\n label_start = 'Start' if zp0 > 0 else 'Start (below horizon)'\n ax.plot([xp0], [yp0], 'rP', label=label_start)\n\n # Plot antenna dipole basis\n s = 0.1\n for j in range(1, 3):\n lw = 2 if j == 1 else 1\n ant = 'X' if j == 1 else 'Y'\n for i in range(nrsamps):\n # Label only first samp so that legend only has it once\n label = 'antdip_'+ant if i == 0 else None\n if do_3D:\n ax.plot([xp[i], xp[i]+s*jbant[i, 0, j]],\n [yp[i], yp[i]+s*jbant[i, 1, j]],\n [zp[i], zp[i]+s*jbant[i, 2, j]],\n 'm', linewidth=lw, label=label)\n # ax.quiver()\n else:\n ax.plot([xp[i], xp[i]+s*jbant[i, 0, j]],\n [yp[i], yp[i]+s*jbant[i, 1, j]],\n 'm', linewidth=lw, label=label)\n\n # Plot Jones antenna X & Y-channels\n s = 0.2\n for j in range(2):\n lw = 2 if j == 0 else 1\n respiaucmp = 'x' if j == 0 else 'y'\n for i in range(nrsamps):\n # Label only first samp so that legend only has it once\n label = 'respSKY_'+respiaucmp if i == 0 else None\n if do_3D:\n ax.plot([xp[i], xp[i]+s*jbresp[i, 0, j]],\n [yp[i], yp[i]+s*jbresp[i, 1, j]],\n [zp[i], zp[i]+s*jbresp[i, 2, j]],\n 'b', linewidth=lw, label=label)\n else:\n ax.plot([xp[i], xp[i]+s*jbresp[i, 0, j]],\n [yp[i], yp[i]+s*jbresp[i, 1, j]],\n 'b', linewidth=lw, label=label)\n\n # Plot NCP (ITRF z-base in STN crdsys)\n if do_3D:\n ax.plot(itrf_z_stn[0], itrf_z_stn[1], itrf_z_stn[2], 'y*', label='NCP')\n else:\n ax.plot(itrf_z_stn[0], itrf_z_stn[1], 'y*', label='NCP')\n\n # Fix plot settings\n title = \"Pointing map\"\n if obsinfo:\n title += \"\"\" [{} STN crdsys], Band: {}, Freq: {:.2f} MHz\nStart @ {}, Model: {}, Pararot: {}\"\"\"\\\n .format(obsinfo['stnid'], obsinfo['band'],\n obsinfo['freq']/1e6,\n obsinfo['starttime'].isoformat()+' UT',\n obsinfo['antmodel'],\n do_parallactic_rot)\n # Plot origin\n ax.plot([0.], [0.], 'k.', label='Origin/Zenith')\n if do_3D:\n ax.set_xlim3d(left=-1.0, right=1.0)\n ax.set_ylim3d(bottom=-1.0, top=1.0)\n ax.set_zlim3d(bottom=0.0, top=1.0)\n ax.text(0, 1, 0, 'N (stn)')\n ax.text(1, 0, 0, 'E (stn)')\n ax.set_zticks([])\n else:\n ax.text(0, 1, 'N (stn)')\n ax.text(1, 0, 'E (stn)')\n ax.axis('equal')\n ax.grid(False)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.title(title)\n ax.legend(numpoints=1, loc='lower left')\n plt.draw()",
"def show(self):\n\n #finds every element and stores it in order\n elements = [[0 for i in range(self.n)] for j in range(self.n)]\n for i in range(self.n * self.n):\n elements[self.array[0,i]][self.array[1,i]] = self.array[2,i]\n\n #prints the table\n for i in range(self.n):\n line = \"\"\n for j in range(self.n):\n line += str(elements[i][j])\n if j != self.n - 1:\n line += \"|\"\n print(line)\n print()",
"def OrbitCom(galaxy, start, end, n):\n \n # compose the filename for output\n fileout = \"Orbit_\" + galaxy + \".txt\"\n # set tolerance and VolDec for calculating COM_P in CenterOfMass\n d = 0.1\n VD = 4\n # for M33 that is stripped more, use different values for VolDec (4)\n print galaxy\n \n # generate the snapshot id sequence \n # it is always a good idea to also check if the input is eligible (not required)\n \n snap_ids = np.arange(start,end,n)\n # initialize the array for orbital info: t, x, y, z, vx, vy, vz of COM\n orbit = np.zeros((len(snap_ids),7))\n \n # a for loop \n for i, snap_id in enumerate(snap_ids): # loop over files\n \n # compose the data filename (be careful about the folder)\n ilbl = '000'+ str(snap_id)\n ilbl = ilbl[-3:]\n filename = \"./VLowRes/%s_\"%(galaxy)+ilbl+\".txt\"\n #print filename #troubleshooting\n #initialize an instance of CenterOfMass class, using disk particles\n COM = CenterOfMass(filename,2)\n # Store the COM pos and vel. Remember that now COM_P required VolDec\n COMP = COM.COM_P(d,VD)\n COMV = COM.COM_V(COMP[0],COMP[1],COMP[2])\n \n # store the time, pos, vel in ith element of the orbit array, without units (.value)\n #print COMV[0] #troubleshooting\n orbit[i][0] = COM.time.value/1000\n orbit[i][1] = COMP[0].value\n orbit[i][2] = COMP[1].value\n orbit[i][3] = COMP[2].value\n orbit[i][4] = COMV[0].value\n orbit[i][5] = COMV[1].value\n orbit[i][6] = COMV[2].value\n \n # note that you can store \n # a[i] = var1, *tuple(array1)\n\n \n # print snap_id to see the progress\n print(snap_id)\n \n # write the data to a file\n # we do this because we don't want to have to repeat this process \n # this code should only have to be called once per galaxy.\n np.savetxt(fileout, orbit, fmt = \"%11.3f\"*7, comments='#',\n header=\"{:>10s}{:>11s}{:>11s}{:>11s}{:>11s}{:>11s}{:>11s}\"\\\n .format('t', 'x', 'y', 'z', 'vx', 'vy', 'vz'))",
"def load_fluctuations_2D_all(self):\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.mesh['R'])) )\n self.nane_bar = np.zeros((len(self.time_steps)))\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.mesh['R'])) )\n self.dni_bar = np.zeros((len(self.time_steps)))\n\n self.phi = np.zeros((self.n_cross_section,len(self.time_steps),len(self.mesh['R'])))\n self.phi_bar = np.zeros((len(self.time_steps)))\n for i in range(len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n if (i == 0):\n self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.planes = np.arange(self.n_cross_section) * dn\n\n self.phi_bar[i] = np.mean(fluc_mesh['dpot'][...])\n if(self.HaveElectron):\n self.nane_bar[i] = np.mean(fluc_mesh['eden'][...])\n if(self.load_ions):\n self.dni_bar[i] = np.mean(fluc_mesh['iden'][...])\n for j in range(self.n_cross_section):\n self.phi[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,self.planes[j]],0,1)\n self.phi[j,i] -= self.phi_bar[i]\n\n if(self.HaveElectron):\n self.nane[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,self.planes[j]],0,1)\n self.nane[j,i] -= self.nane_bar[i]\n if(self.load_ions):\n self.dni[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,self.planes[j]],0,1)\n self.dni[j,i] -= self.dni_bar[i]\n fluc_mesh.close()\n\n\n\n\n return 0",
"def supercontinuumgeneration():\n\n betas = [0,0,-11.830e-3*1e-24, 8.1038e-5*1e-36, -9.5205e-8*1e-48, 2.0737e-10*1e-60,\n -5.3943e-13*1e-72, 1.3486e-15*1e-84, -2.5495e-18*1e-96, 3.0524e-21*1e-108,\n -1.7140e-24*1e-120];\n gamma = 0.1\n flength = 0.15\n simparams = prepare_sim_params(0.0, \n betas ,\n 835e-9,\n gamma,\n flength,\n 13, # Npoints\n 1.0, #tempspread\n zpoints=200, \n integratortype='dop853', \n reltol=1e-3, \n abstol=1e-6 ,\n shock=True,\n raman = True,\n ramantype = 'blowwood',#'hollenbeck', #or 'blowwood', 'linagrawal'\n fr=0.18 )\n t0 = 28.4e-15\n p = 10e3\n inifield = np.sqrt(p) * 1./np.cosh(simparams['tvec']/t0) \n tf,ff,zv = perform_simulation( simparams, inifield)\n saveoutput('scg.demo', tf, ff, zv, simparams)\n #\n # output plot\n #\n d = loadoutput('scg.demo')\n inoutplot(d,zparams={\"fignr\":3, \"clim\":(-360,-220),'fylim':(-360,-220)})\n plt.show()",
"def initialize_reflections(self):\n if self.range_automatic:\n self.automatic_hkl_range()\n\n #Clear existing stuff\n refls_dict = {}\n\n #Lists of h,k and l\n h_list = range(int(self.range_h[0]), int(self.range_h[1]+1))\n k_list = range(int(self.range_k[0]), int(self.range_k[1]+1))\n l_list = range(int(self.range_l[0]), int(self.range_l[1]+1))\n\n #Overall number of reflections\n num_h, num_k, num_l = (len(h_list), len(k_list), len(l_list))\n n = num_h * num_k * num_l\n\n #--- Make the hkl 3xN array ---\n reflections_hkl = np.zeros( (3, n) )\n #First axis (h) varies slowest. Each element repeats num_k * num_l times\n reflections_hkl[0, :] = np.tile(h_list, (num_k * num_l, 1)).ravel('F')\n #Second axis,( each element repeats num_l times) repeats num_h times\n will_repeat = np.tile(k_list, (num_l, 1)).ravel('F')\n reflections_hkl[1, :] = np.tile( will_repeat, num_h)\n #Last axis (l) varies fastest. Repeat sequence over and over\n reflections_hkl[2, :] = np.tile(l_list, num_h*num_k)\n \n \n # Get a list of all the reflection conditions contained.\n rc_list = self.crystal.get_reflection_conditions()\n # Start with all true\n visible = h_list==h_list\n for rc in rc_list:\n if not rc is None:\n visible = visible & rc.reflection_visible_matrix(reflections_hkl)\n \n # Take off anything not visible\n reflections_hkl = reflections_hkl[:,visible]\n \n #Calculate all the q vectors at once\n all_q_vectors = np.dot(self.crystal.reciprocal_lattice, reflections_hkl)\n self.reflections_q_norm = np.sqrt(np.sum(all_q_vectors**2, axis=0))\n\n if self.range_limit_to_sphere:\n #Limit to a sphere of radius 2pi/dmin\n inside_sphere = (self.reflections_q_norm < self.inst.qlim)\n reflections_hkl = reflections_hkl[:, inside_sphere]\n all_q_vectors = all_q_vectors[:, inside_sphere]\n self.reflections_q_norm = self.reflections_q_norm[inside_sphere]\n\n #Create each object, and add to the list\n refls = list()\n for i in xrange(reflections_hkl.shape[1]):\n hkl = tuple(reflections_hkl[:, i])\n new_refl = Reflection( hkl, all_q_vectors[:, i] )\n refls.append( new_refl )\n refls_dict[hkl] = new_refl\n\n #Save them in the object\n self.reflections_hkl = reflections_hkl\n self.reflections_dict = refls_dict\n self.reflections = refls\n self.reflections_q_vector = all_q_vectors\n\n #Now we find the primary reflections using crystal symmetry.\n self.find_primary_reflections()\n\n #Clear the list of reflection times measured\n self.get_reflections_times_measured(clear_list=True)\n\n #And if you had any peaks files loaded, reload them.\n self.reload_peaks_files()\n\n #At this point, the reflections mask needs to be updated since the reflections changed.\n self.calculate_reflections_mask()",
"def verif_orbite2(n):\n orb = Orbite(n, randint(0, 90)*DEG_RAD, randint(0, 359)*DEG_RAD)\n interv = PERIODE/20\n fig = plt.figure()\n ax = ax = fig.add_subplot(111, projection='3d')\n for _ in range(20):\n X, Y, Z = [], [], []\n rotation(orb.compo, interv)\n for sat in orb.compo:\n X.append(sat.x) ; Y.append(sat.y) ; Z.append(sat.z)\n ax.scatter(X, Y, Z, color='r')\n plt.show()",
"def make_icosahedron_map(N,nRmax,extrinsic_rotation=None):\n log_debug(logger, \"Building icosahedral geometry\")\n log_debug(logger, \"Grid: %i x %i x %i (%i voxels)\" % (N,N,N,N**3))\n t0 = time.time()\n if extrinsic_rotation is not None:\n q = extrinsic_rotation.get_as_quaternion()\n icomap = icosahedron.icosahedron(N,nRmax,q)\n else:\n icomap = icosahedron.icosahedron(N,nRmax)\n t1 = time.time()\n log_debug(logger, \"Built map within %f seconds.\" % (t1-t0))\n return icomap",
"def cdf_output_2D(self,output_path,filehead='fluctuation'):\n file_start = output_path + filehead\n for i in range(self.n_cross_section):\n for j in range(len(self.time_steps)):\n\n fname = file_start + str(self.time_steps[j])+'_'+str(i) + '.cdf'\n f = nc.netcdf_file(fname,'w')\n f.createDimension('z_dim',self.grid.NZ)\n f.createDimension('r_dim',self.grid.NR)\n\n rr = f.createVariable('rr','d',('r_dim',))\n rr[:] = self.grid.R1D[:]\n zz = f.createVariable('zz','d',('z_dim',))\n zz[:] = self.grid.Z1D[:]\n rr.units = zz.units = 'Meter'\n\n bb = f.createVariable('bb','d',('z_dim','r_dim'))\n bb[:,:] = self.B_on_grid[:,:]\n bb.units = 'Tesla'\n\n dne = f.createVariable('dne','d',('z_dim','r_dim'))\n dne[:,:] = self.dne_ad_on_grid[i,j,:,:] + self.nane_on_grid[i,j,:,:]\n dne.units = 'per cubic meter'\n\n ne = f.createVariable('ne','d',('z_dim','r_dim'))\n ne[:,:] = self.ne0_on_grid[:,:] + dne[:,:]\n ne.units = 'per cubic meter'\n\n te = f.createVariable('te','d',('z_dim','r_dim'))\n te[:,:] = self.te_on_grid[:,:]/1000\n te.units = 'keV'\n\n ti = f.createVariable('ti','d',('z_dim','r_dim'))\n ti[:,:] = self.ti_on_grid[:,:]/1000\n ti.units = 'keV'\n\n f.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Display a pass message to the user, for a given amout of time. timeout Time to display the message, in seconds
|
def DisplayPass(self, message = 'PASS', timeout=0):
self.DisplayMessage(message, fgcolor=colorama.Fore.GREEN )
time.sleep(timeout)
|
[
"def DisplayFail(self, message = 'FAIL', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.RED)\n time.sleep(timeout)",
"def DisplayError(self, message = 'ERROR', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.RED)",
"def display_timeout(self, timeout=600):\n\n while True:\n elapsed = time.time() - self.display_start_time\n if elapsed >= timeout:\n self.clear_display()\n print('display has timed out, backlight is off')\n self.timed_out = True\n else:\n print('LCD timer, on time is: ', round(elapsed), ' seconds')\n time.sleep(15)\n return",
"def monitor_timeout(self):\n delta = time.time() - self.login_time\n if delta > self.timeout:\n self.logout()\n self.lcd_timeout.display(int(round(self.timeout - delta, 0)))",
"def time_out():",
"def _timeout_(cmd, timeout):\n if timeout:\n return \"%s %d\"%(cmd,timeout)\n return cmd",
"def on_timeout(self):\n if(self.unlocked):\n self.lock()\n self.status(\"Locked due to inactivity\")\n else:\n self.stored_password = None\n self.textbuffer.set_text(\"\")\n self.status(\"Deleted due to inactivity\")",
"def time_out():\n\n return \"<script>window.voila_heartbeat()</script>\\n\"",
"def handler(signum, frame):\r\n msg = \"SCRIPT TIMED OUT!!!\\n More than \" + str(timeout) + \" seconds have elapsed.\"\r\n raise Exception(msg)",
"def timed_input(prompt='', timer=10):\n\n try:\n answer = __input_with_timeout(prompt, timer)\n except TimeoutExpired:\n return ''\n else:\n return answer",
"def timeout(self, user, time):\n timeout = \"/timeout {} {}\".format(user, time)\n if self.irc is not None:\n self.irc.write(self.channel, timeout)\n else:\n logging.warning(\"The bot {} in channel {} wanted to timout {}, but irc isn't set.\".format(self.nickname, self.channel, user))",
"def _format_timeout(timeout: float):\n return '{:.3f}s'.format(timeout)",
"def user32_MessageBoxTimeout(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\", \"lpText\", \"lpCaption\", \"uType\", \"wLanguageId\", \"dwMilliseconds\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def check_timeout(self):\n if self.hotsos_logout.instate(['selected']):\n while self.app.window_activate(window='Auto Logout'):\n self.app.type_info('enter')\n self.add_log('**Timeout Reset**')\n self.after(20000, self.check_timeout)",
"def timeoutconversion(self):\n\n self.controller.logger.info(\"timeoutconversion is called.\")\n\n if len(self.Global.pdf) == 0:\n tkMessageBox.showinfo(\"ERROR\", \"Il n'y a pas de conversion en cours.\")\n\n else:\n self.Global.timeout = True\n self.Global.addLogMsg(\"DEMANDE DE PAUSE\")\n tkMessageBox.showinfo(\"INFO\", \"La conversion sera mise en pause apres la conversion du pdf en cours.\")",
"def set_timeout(self, timeout):\r\n self.timeout = float(timeout)/1000.",
"def do_exit(timeout):\n timeout.remove()\n simple_error(GPS.Console(\"Messages\").get_text())\n GPS.exit(force=1)",
"def _gui_notify_expired(self, message):\n alert_name = message.data.get(\"name\")\n alert_kind = message.data.get(\"kind\")\n if alert_kind == \"timer\":\n self.gui.show_text(\"Time's Up!\", alert_name)\n else:\n self.gui.show_text(alert_name, alert_kind)\n if self.neon_core:\n self.clear_gui_timeout()",
"def display_message(text):\n\n clear_shell()\n print figlet.renderText(text)\n sleep(.75)\n clear_shell()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Display a failure message to the user, for a given amout of time. timeout Time to display the message, in seconds
|
def DisplayError(self, message = 'ERROR', timeout=0):
self.DisplayMessage(message, fgcolor=colorama.Fore.RED)
|
[
"def DisplayFail(self, message = 'FAIL', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.RED)\n time.sleep(timeout)",
"def time_out():",
"def DisplayPass(self, message = 'PASS', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.GREEN )\n time.sleep(timeout)",
"def handler(signum, frame):\r\n msg = \"SCRIPT TIMED OUT!!!\\n More than \" + str(timeout) + \" seconds have elapsed.\"\r\n raise Exception(msg)",
"def display_timeout(self, timeout=600):\n\n while True:\n elapsed = time.time() - self.display_start_time\n if elapsed >= timeout:\n self.clear_display()\n print('display has timed out, backlight is off')\n self.timed_out = True\n else:\n print('LCD timer, on time is: ', round(elapsed), ' seconds')\n time.sleep(15)\n return",
"def etimeout():\n return pexc.JobRequestTimedOut(operation_name='foo', seconds=1800)",
"def monitor_timeout(self):\n delta = time.time() - self.login_time\n if delta > self.timeout:\n self.logout()\n self.lcd_timeout.display(int(round(self.timeout - delta, 0)))",
"def _timeout_(cmd, timeout):\n if timeout:\n return \"%s %d\"%(cmd,timeout)\n return cmd",
"def time_out():\n\n return \"<script>window.voila_heartbeat()</script>\\n\"",
"def timed_out(self):\n\t\traise NotImplementedError",
"def timed_out(self):\n\t\ttry:\n\t\t\tself.rval = self.cmd(*self.args)\n\t\texcept Exception,e:\n\t\t\tself.exception = e\n\t\tfinally:\n\t\t\tself.complete = True",
"def _test_failed(self, tc, ctx, fail):\n if self.config.tm and hasattr(tc, \"elapsed\"):\n tm = '\\t[{:06.3F} s]'.format(tc.elapsed)\n else:\n tm = ''\n self.msg.print('{}: {}FAILED{}\\t({}/{}) {}'\n .format(tc, futils.Color.RED,\n futils.Color.END, tc.test_type, ctx, tm))\n self.msg.print(fail)\n\n if not self.config.keep_going:\n sys.exit(1)",
"def test_invalid_timeout_format(self):\n test_response = self.client.get(reverse('test-list'))\n self.assertEqual(test_response.status_code,\n status.HTTP_400_BAD_REQUEST)\n self.assertEqual(test_response.json()['error'],\n 'Set the timeout as a string representing number of seconds')",
"def handle_timeout(self):\n\t\tpass",
"def _format_timeout(timeout: float):\n return '{:.3f}s'.format(timeout)",
"def do_exit(timeout):\n timeout.remove()\n simple_error(GPS.Console(\"Messages\").get_text())\n GPS.exit(force=1)",
"def display_error(title, error_message):\n messagebox.showerror(title=title, message=error_message)",
"def timeout(self):\n raise NotImplementedError",
"def check_timeout(self):\n if self.hotsos_logout.instate(['selected']):\n while self.app.window_activate(window='Auto Logout'):\n self.app.type_info('enter')\n self.add_log('**Timeout Reset**')\n self.after(20000, self.check_timeout)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Display a failure message to the user, for a given amout of time. timeout Time to display the message, in seconds
|
def DisplayFail(self, message = 'FAIL', timeout=0):
self.DisplayMessage(message, fgcolor=colorama.Fore.RED)
time.sleep(timeout)
|
[
"def DisplayError(self, message = 'ERROR', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.RED)",
"def time_out():",
"def DisplayPass(self, message = 'PASS', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.GREEN )\n time.sleep(timeout)",
"def handler(signum, frame):\r\n msg = \"SCRIPT TIMED OUT!!!\\n More than \" + str(timeout) + \" seconds have elapsed.\"\r\n raise Exception(msg)",
"def display_timeout(self, timeout=600):\n\n while True:\n elapsed = time.time() - self.display_start_time\n if elapsed >= timeout:\n self.clear_display()\n print('display has timed out, backlight is off')\n self.timed_out = True\n else:\n print('LCD timer, on time is: ', round(elapsed), ' seconds')\n time.sleep(15)\n return",
"def etimeout():\n return pexc.JobRequestTimedOut(operation_name='foo', seconds=1800)",
"def monitor_timeout(self):\n delta = time.time() - self.login_time\n if delta > self.timeout:\n self.logout()\n self.lcd_timeout.display(int(round(self.timeout - delta, 0)))",
"def _timeout_(cmd, timeout):\n if timeout:\n return \"%s %d\"%(cmd,timeout)\n return cmd",
"def time_out():\n\n return \"<script>window.voila_heartbeat()</script>\\n\"",
"def timed_out(self):\n\t\traise NotImplementedError",
"def timed_out(self):\n\t\ttry:\n\t\t\tself.rval = self.cmd(*self.args)\n\t\texcept Exception,e:\n\t\t\tself.exception = e\n\t\tfinally:\n\t\t\tself.complete = True",
"def _test_failed(self, tc, ctx, fail):\n if self.config.tm and hasattr(tc, \"elapsed\"):\n tm = '\\t[{:06.3F} s]'.format(tc.elapsed)\n else:\n tm = ''\n self.msg.print('{}: {}FAILED{}\\t({}/{}) {}'\n .format(tc, futils.Color.RED,\n futils.Color.END, tc.test_type, ctx, tm))\n self.msg.print(fail)\n\n if not self.config.keep_going:\n sys.exit(1)",
"def test_invalid_timeout_format(self):\n test_response = self.client.get(reverse('test-list'))\n self.assertEqual(test_response.status_code,\n status.HTTP_400_BAD_REQUEST)\n self.assertEqual(test_response.json()['error'],\n 'Set the timeout as a string representing number of seconds')",
"def handle_timeout(self):\n\t\tpass",
"def _format_timeout(timeout: float):\n return '{:.3f}s'.format(timeout)",
"def do_exit(timeout):\n timeout.remove()\n simple_error(GPS.Console(\"Messages\").get_text())\n GPS.exit(force=1)",
"def display_error(title, error_message):\n messagebox.showerror(title=title, message=error_message)",
"def timeout(self):\n raise NotImplementedError",
"def check_timeout(self):\n if self.hotsos_logout.instate(['selected']):\n while self.app.window_activate(window='Auto Logout'):\n self.app.type_info('enter')\n self.add_log('**Timeout Reset**')\n self.after(20000, self.check_timeout)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the number of target qubits
|
def num_target_qubits(self):
return self._num_target_qubits
|
[
"def num_targets(self):",
"def nqudits(self) -> int:\n return self._nqudits",
"def target_sizes(self):\n return Counter(self.targets.values())",
"def num_evaluation_qubits(self) -> int:\n return self._num_evaluation_qubits",
"def num_targets(self) -> int:\n return len(self.targets)",
"def n_labels(self):\n return len(self.y[_TARGET_NAME].unique())",
"def testcases_length(self):\n total = self.S(len(self.nodes), self.number_of_partitions)\n total *= len(self.target_nodes)\n total **= self.number_of_rounds\n return total",
"def num_elements(obj: SynapseIdModel) -> int:\n return obj.nplast * obj.nstate**2 + obj.nstate",
"def numSuits(self):\n return self.suits",
"def getNumberOfHeuristics(self) -> None:",
"def answers_count(self):\n return len(self.skill_increments)",
"def quantity_size():",
"def dim_target(self) -> int:\n return 1",
"def test_size(self):\n return self.__test_batches * len(self.__sources)",
"def count(self) -> int:\n return self.__solution_count",
"def get_count(self):\n return self.hand.compute_bj_count()",
"def num_qoperations(self, mode: str) -> int:\n if mode == \"state\":\n return len(self.states)\n elif mode == \"povm\":\n return len(self.povms)\n elif mode == \"gate\":\n return len(self.gates)\n elif mode == \"mprocess\":\n return len(self.mprocesses)\n else:\n raise ValueError(f\"An unsupported mode is specified. mode={mode}\")",
"def get_number_of_operands(self) -> int:\n return self._number_of_operands",
"def get_number_of_workers():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds inverse of corresponding subcircuit to given circuit
|
def build_inverse(self, qc, q, q_ancillas=None, params=None):
qc_ = QuantumCircuit(*qc.qregs)
self.build(qc_, q, q_ancillas, params)
try:
qc_.data = [gate.inverse() for gate in reversed(qc_.data)]
except Exception as exc:
raise AquaError('Irreversible circuit! Gate does not support inverse method.') from exc
qc.extend(qc_)
|
[
"def build_controlled_inverse(self, qc, q, q_control, q_ancillas=None, params=None):\n qc_ = QuantumCircuit(*qc.qregs)\n\n self.build_controlled(qc_, q, q_control, q_ancillas, params)\n try:\n qc_.data = [gate.inverse() for gate in reversed(qc_.data)]\n except AquaError:\n print('Irreversible circuit! Does not support inverse method.')\n qc.extend(qc_)",
"def _add_inverse(self):\n o = rdflib.URIRef(self.namespace.get_iri() + \"INVERSE_OF_\" + self.name)\n x = (self.iri, rdflib.OWL.inverseOf, o)\n y = (o, rdflib.RDF.type, rdflib.OWL.ObjectProperty)\n z = (o, rdflib.RDFS.label, rdflib.Literal(\"INVERSE_OF_\" + self.name,\n lang=\"en\"))\n\n self.namespace._graph.add(x)\n self.namespace._graph.add(y)\n self.namespace._graph.add(z)\n for superclass in self.direct_superclasses:\n self.namespace._graph.add((\n o, rdflib.RDFS.subPropertyOf, superclass.inverse.iri\n ))\n return self.namespace._namespace_registry.from_iri(o)",
"def qnode_for_inverse(self, mock_device):\n\n def circuit(x):\n qml.RZ(x, wires=[1]).inv()\n qml.RZ(x, wires=[1]).inv().inv()\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliZ(1))\n\n node = qml.QNode(circuit, mock_device)\n node.construct([1.0], {})\n\n return node",
"def test_inv(self):\n\n operation = CirqOperation(\n lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]\n )\n\n assert not operation.is_inverse\n\n operation.inv()\n\n assert operation.is_inverse\n\n operation.inv()\n\n assert not operation.is_inverse",
"def inverse(self): \n if self._inverse is None:\n if self._name is None:\n inv_name = None\n else:\n inv_name = self._name + '^(-1)'\n if self._latex_name is None:\n inv_latex_name = None\n else:\n inv_latex_name = self._latex_name + r'^{-1}'\n self._inverse = AutomorphismField(self._vmodule, name=inv_name, \n latex_name=inv_latex_name)\n for dom, rst in self._restrictions.iteritems():\n self._inverse._restrictions[dom] = rst.inverse()\n return self._inverse",
"def addInverse(self, addInverse=False):\n self.addInverse_ = addInverse",
"def test_inv_queuing(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom Operation\"\"\"\n num_wires = 1\n\n with qml.tape.QuantumTape() as tape:\n op = DummyOp(wires=[0]).inv()\n assert op.inverse is True\n\n assert op.inverse is True",
"def transform(self, circuit: cirq.Circuit) -> cirq.Circuit:\n return None",
"def simplify(circuit):\n circuit=removeZeroRotations(circuit)\n circuit,wires=removeDoubleCZ(circuit)\n circuit=combineRotations(circuit,wires)\n return circuit",
"def transform(self, circuit: cirq.Circuit) -> cirq.Circuit:\n initial_mapping = imu.calculate_initial_mapping(self.device, circuit)\n updater = su.SwapUpdater(circuit, self.device.qubit_set(), initial_mapping)\n return cirq.Circuit(updater.add_swaps())",
"def test_operation_inverse_defined(self, qnode_for_inverse):\n assert qnode_for_inverse.qtape.operations[0].name == \"RZ.inv\"\n assert qnode_for_inverse.qtape.operations[0].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[0].__class__, qml.operation.Operation)\n assert qnode_for_inverse.qtape.operations[1].name == \"RZ\"\n assert not qnode_for_inverse.qtape.operations[1].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[1].__class__, qml.operation.Operation)",
"def inverse(self):\n ret = copy.deepcopy(self)\n for l in xrange(0, self.lmax + 1):\n ret.clmat[l, :, :] = np.linalg.pinv(self.clmat[l])\n return ret",
"def invert_in_place(self) -> \"vnl_diag_matrixSI &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_invert_in_place(self)",
"def inverse_differential_power(cls, power, tangent_vec, base_point):\n (\n eigvectors,\n transconj_eigvectors,\n numerator,\n denominator,\n temp_result,\n ) = cls._aux_differential_power(power, tangent_vec, base_point)\n power_operator = denominator / numerator\n result = power_operator * temp_result\n result = Matrices.mul(eigvectors, result, transconj_eigvectors)\n return result",
"def reapply(self, circuit):\n self._modifiers(circuit.y(self.qargs[0]))",
"def inverse(self):\n\n # We use the Extended GCD algorithm\n # http://en.wikipedia.org/wiki/Polynomial_greatest_common_divisor\n\n if self._value == 0:\n raise ValueError(\"Inversion of zero\")\n\n r0, r1 = self._value, self.irr_poly\n s0, s1 = 1, 0\n while r1 > 0:\n q = _div_gf2(r0, r1)[0]\n r0, r1 = r1, r0 ^ _mult_gf2(q, r1)\n s0, s1 = s1, s0 ^ _mult_gf2(q, s1)\n return _Element(s0)",
"def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\") \n \n matrix = self.g\n inverse = []\n\n if (len(matrix) == 1):\n inverse.append([1/matrix[0][0]])\n else: \n #The following code was suggested from this project's code review:\n #find identity matrix\n I = identity(2)\n #find trace\n tr = self.trace()\n #find the determinant with the previously computed function.\n det = self.determinant()\n #perform inverse\n return 1.0 / det * (tr * I - self)",
"def invert(self, solution, inverse_data):\n status = self.STATUS_MAP[solution[\"info\"][\"status\"]]\n\n attr = {}\n attr[s.SOLVE_TIME] = solution[\"info\"][\"solveTime\"]\n attr[s.SETUP_TIME] = solution[\"info\"][\"setupTime\"]\n attr[s.NUM_ITERS] = solution[\"info\"][\"iter\"]\n\n if status in s.SOLUTION_PRESENT:\n primal_val = solution[\"info\"][\"pobj\"]\n opt_val = primal_val + inverse_data[s.OFFSET]\n primal_vars = {\n inverse_data[SCS.VAR_ID]:\n intf.DEFAULT_INTF.const_to_matrix(solution[\"x\"])\n }\n eq_dual_vars = utilities.get_dual_values(\n intf.DEFAULT_INTF.const_to_matrix(\n solution[\"y\"][:inverse_data[ConicSolver.DIMS].zero]),\n self.extract_dual_value,\n inverse_data[SCS.EQ_CONSTR])\n ineq_dual_vars = utilities.get_dual_values(\n intf.DEFAULT_INTF.const_to_matrix(\n solution[\"y\"][inverse_data[ConicSolver.DIMS].zero:]),\n self.extract_dual_value,\n inverse_data[SCS.NEQ_CONSTR])\n dual_vars = {}\n dual_vars.update(eq_dual_vars)\n dual_vars.update(ineq_dual_vars)\n return Solution(status, opt_val, primal_vars, dual_vars, attr)\n else:\n return failure_solution(status)",
"def test_custom_inverse():\n\n p = models.Polynomial1D(1, c0=-2, c1=3)\n # A trivial inverse for a trivial polynomial\n inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))\n\n with pytest.raises(NotImplementedError):\n p.inverse\n\n p.inverse = inv\n\n x = np.arange(100)\n\n assert_allclose(x, p(p.inverse(x)))\n assert_allclose(x, p.inverse(p(x)))\n\n p.inverse = None\n\n with pytest.raises(NotImplementedError):\n p.inverse"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds controlled inverse of corresponding subcircuit to given circuit
|
def build_controlled_inverse(self, qc, q, q_control, q_ancillas=None, params=None):
qc_ = QuantumCircuit(*qc.qregs)
self.build_controlled(qc_, q, q_control, q_ancillas, params)
try:
qc_.data = [gate.inverse() for gate in reversed(qc_.data)]
except AquaError:
print('Irreversible circuit! Does not support inverse method.')
qc.extend(qc_)
|
[
"def build_inverse(self, qc, q, q_ancillas=None, params=None):\n qc_ = QuantumCircuit(*qc.qregs)\n\n self.build(qc_, q, q_ancillas, params)\n try:\n qc_.data = [gate.inverse() for gate in reversed(qc_.data)]\n except Exception as exc:\n raise AquaError('Irreversible circuit! Gate does not support inverse method.') from exc\n qc.extend(qc_)",
"def qnode_for_inverse(self, mock_device):\n\n def circuit(x):\n qml.RZ(x, wires=[1]).inv()\n qml.RZ(x, wires=[1]).inv().inv()\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliZ(1))\n\n node = qml.QNode(circuit, mock_device)\n node.construct([1.0], {})\n\n return node",
"def _add_inverse(self):\n o = rdflib.URIRef(self.namespace.get_iri() + \"INVERSE_OF_\" + self.name)\n x = (self.iri, rdflib.OWL.inverseOf, o)\n y = (o, rdflib.RDF.type, rdflib.OWL.ObjectProperty)\n z = (o, rdflib.RDFS.label, rdflib.Literal(\"INVERSE_OF_\" + self.name,\n lang=\"en\"))\n\n self.namespace._graph.add(x)\n self.namespace._graph.add(y)\n self.namespace._graph.add(z)\n for superclass in self.direct_superclasses:\n self.namespace._graph.add((\n o, rdflib.RDFS.subPropertyOf, superclass.inverse.iri\n ))\n return self.namespace._namespace_registry.from_iri(o)",
"def reapply(self, circuit):\n self._modifiers(circuit.y(self.qargs[0]))",
"def transform(self, circuit: cirq.Circuit) -> cirq.Circuit:\n return None",
"def test_inv(self):\n\n operation = CirqOperation(\n lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]\n )\n\n assert not operation.is_inverse\n\n operation.inv()\n\n assert operation.is_inverse\n\n operation.inv()\n\n assert not operation.is_inverse",
"def circuit(self):\n raise NotImplementedError",
"def transform(self, circuit: cirq.Circuit) -> cirq.Circuit:\n initial_mapping = imu.calculate_initial_mapping(self.device, circuit)\n updater = su.SwapUpdater(circuit, self.device.qubit_set(), initial_mapping)\n return cirq.Circuit(updater.add_swaps())",
"def inverse(self): \n if self._inverse is None:\n if self._name is None:\n inv_name = None\n else:\n inv_name = self._name + '^(-1)'\n if self._latex_name is None:\n inv_latex_name = None\n else:\n inv_latex_name = self._latex_name + r'^{-1}'\n self._inverse = AutomorphismField(self._vmodule, name=inv_name, \n latex_name=inv_latex_name)\n for dom, rst in self._restrictions.iteritems():\n self._inverse._restrictions[dom] = rst.inverse()\n return self._inverse",
"def add_to_circuit_inputs(self, expr: Expression) -> HybridArgumentIdf:\n privacy = expr.annotated_type.privacy_annotation.privacy_annotation_label() if expr.annotated_type.is_private() else Expression.all_expr()\n is_public = privacy == Expression.all_expr()\n\n expr_text = expr.code()\n input_expr = self._expr_trafo.visit(expr)\n t = input_expr.annotated_type.type_name\n locally_decrypted_idf = None\n\n # If expression has literal type -> evaluate it inside the circuit (constant folding will be used)\n # rather than introducing an unnecessary public circuit input (expensive)\n if isinstance(t, BooleanLiteralType):\n return self._evaluate_private_expression(input_expr, str(t.value))\n elif isinstance(t, NumberLiteralType):\n return self._evaluate_private_expression(input_expr, str(t.value))\n\n t_suffix = ''\n if isinstance(expr, IdentifierExpr):\n # Look in cache before doing expensive move-in\n if self._remapper.is_remapped(expr.target.idf):\n remapped_idf = self._remapper.get_current(expr.target.idf)\n return remapped_idf\n\n t_suffix = f'_{expr.idf.name}'\n\n # Generate circuit inputs\n if is_public:\n tname = f'{self._in_name_factory.get_new_name(expr.annotated_type.type_name)}{t_suffix}'\n return_idf = input_idf = self._in_name_factory.add_idf(tname, expr.annotated_type.type_name)\n self._phi.append(CircComment(f'{input_idf.name} = {expr_text}'))\n else:\n # Encrypted inputs need to be decrypted inside the circuit (i.e. add plain as private input and prove encryption)\n tname = f'{self._secret_input_name_factory.get_new_name(expr.annotated_type.type_name)}{t_suffix}'\n return_idf = locally_decrypted_idf = self._secret_input_name_factory.add_idf(tname, expr.annotated_type.type_name)\n cipher_t = TypeName.cipher_type(input_expr.annotated_type, expr.annotated_type.homomorphism)\n tname = f'{self._in_name_factory.get_new_name(cipher_t)}{t_suffix}'\n input_idf = self._in_name_factory.add_idf(tname, cipher_t, IdentifierExpr(locally_decrypted_idf))\n\n # Add a CircuitInputStatement to the solidity code, which looks like a normal assignment statement,\n # but also signals the offchain simulator to perform decryption if necessary\n expr.statement.pre_statements.append(CircuitInputStatement(input_idf.get_loc_expr(), input_expr))\n\n if not is_public:\n # Check if the secret plain input corresponds to the decrypted cipher value\n crypto_params = cfg.get_crypto_params(expr.annotated_type.homomorphism)\n self._phi.append(CircComment(f'{locally_decrypted_idf} = dec({expr_text}) [{input_idf.name}]'))\n self._ensure_encryption(expr.statement, locally_decrypted_idf, Expression.me_expr(),\n crypto_params, input_idf, False, True)\n\n # Cache circuit input for later reuse if possible\n if cfg.opt_cache_circuit_inputs and isinstance(expr, IdentifierExpr):\n # TODO: What if a homomorphic variable gets used as both a plain variable and as a ciphertext?\n # This works for now because we never perform homomorphic operations on variables we can decrypt.\n self._remapper.remap(expr.target.idf, return_idf)\n\n return return_idf",
"def simplify(circuit):\n circuit=removeZeroRotations(circuit)\n circuit,wires=removeDoubleCZ(circuit)\n circuit=combineRotations(circuit,wires)\n return circuit",
"def addInverse(self, addInverse=False):\n self.addInverse_ = addInverse",
"def _circuit_handler(event):\n if not event.build_flags or 'IS_INTERNAL' not in event.build_flags:\n if event.id == self._cid:\n probe.circs.append(event)\n if self._circuit_built.is_set():\n if event.status in ('FAILED', 'CLOSED'):\n self._circuit_finished.set()\n if not self._circuit_built.is_set():\n if event.status in ('FAILED', 'BUILT'):\n self._circuit_built.set()\n elif event.status == 'LAUNCHED' and not self._cid:\n self._cid = event.id\n probe.circs.append(event)\n self._manager.circ_launched.release()",
"def updateCircuit(circuit,\n verbose = False):\n if verbose:\n Warning(\"Currently only replaces to h,s,x,y,z gates\")\n possible_gates = list('hsxyz')\n \n # Convert circuit to qasm string so we can use string processing to switch\n qasm = circuit.qasm().split(';')\n \n \n # Make sure the gate you choose is not a cx gate\n gate_to_switch = np.random.randint(3,len(qasm)-1)\n while qasm[gate_to_switch][1:3] == 'cx' or qasm[gate_to_switch][1:3] == 'ba':\n gate_to_switch = np.random.randint(3,len(qasm)-1)\n \n # Get a new gate and make sure it's different form the current gate\n this_gate = qasm[gate_to_switch][1]\n new_gate = np.random.choice(possible_gates)\n while new_gate == this_gate:\n new_gate = np.random.choice(possible_gates)\n \n qasm[gate_to_switch] = '\\n' + new_gate + ' ' + qasm[gate_to_switch].split(' ')[1]\n \n qasm = ';'.join(qasm) \n circuit = qk.QuantumCircuit.from_qasm_str(qasm)\n \n if verbose:\n print(circuit)\n \n return circuit",
"def test_inv_queuing(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom Operation\"\"\"\n num_wires = 1\n\n with qml.tape.QuantumTape() as tape:\n op = DummyOp(wires=[0]).inv()\n assert op.inverse is True\n\n assert op.inverse is True",
"def test_operation_inverse_defined(self, qnode_for_inverse):\n assert qnode_for_inverse.qtape.operations[0].name == \"RZ.inv\"\n assert qnode_for_inverse.qtape.operations[0].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[0].__class__, qml.operation.Operation)\n assert qnode_for_inverse.qtape.operations[1].name == \"RZ\"\n assert not qnode_for_inverse.qtape.operations[1].inverse\n assert issubclass(qnode_for_inverse.qtape.operations[1].__class__, qml.operation.Operation)",
"def invert(self, solution, inverse_data):\n status = self.STATUS_MAP[solution[\"info\"][\"status\"]]\n\n attr = {}\n attr[s.SOLVE_TIME] = solution[\"info\"][\"solveTime\"]\n attr[s.SETUP_TIME] = solution[\"info\"][\"setupTime\"]\n attr[s.NUM_ITERS] = solution[\"info\"][\"iter\"]\n\n if status in s.SOLUTION_PRESENT:\n primal_val = solution[\"info\"][\"pobj\"]\n opt_val = primal_val + inverse_data[s.OFFSET]\n primal_vars = {\n inverse_data[SCS.VAR_ID]:\n intf.DEFAULT_INTF.const_to_matrix(solution[\"x\"])\n }\n eq_dual_vars = utilities.get_dual_values(\n intf.DEFAULT_INTF.const_to_matrix(\n solution[\"y\"][:inverse_data[ConicSolver.DIMS].zero]),\n self.extract_dual_value,\n inverse_data[SCS.EQ_CONSTR])\n ineq_dual_vars = utilities.get_dual_values(\n intf.DEFAULT_INTF.const_to_matrix(\n solution[\"y\"][inverse_data[ConicSolver.DIMS].zero:]),\n self.extract_dual_value,\n inverse_data[SCS.NEQ_CONSTR])\n dual_vars = {}\n dual_vars.update(eq_dual_vars)\n dual_vars.update(ineq_dual_vars)\n return Solution(status, opt_val, primal_vars, dual_vars, attr)\n else:\n return failure_solution(status)",
"def dry_exner_inverse(\n self,\n zz: FlowFieldVal,\n additional_states: Optional[FlowFieldMap] = None,\n ) -> FlowFieldVal:\n p_ref = self.p_ref(zz, additional_states)\n return tf.nest.map_structure(\n lambda p: tf.pow(p / self._p_thermal, -_R_D / self.cp_d), p_ref)",
"def square_inv(self, use_full=False, rcond=1e-15):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for doing square inverse')\n\n # Copy tensor temporarily\n newten = self._as_new_tensor(self.ten)\n\n # Do dense tensor inverse\n if newten.sym is None:\n init_shape = self.ten.shape\n nleg = len(init_shape)\n assert(nleg%2==0)\n left_size, right_size = np.prod(init_shape[:int(nleg/2)]), np.prod(init_shape[int(nleg/2):])\n mat = self.backend.reshape(self.ten,(left_size,right_size))\n inv = self.backend.pinv(mat)\n newten.ten = self.backend.reshape(inv,init_shape)\n\n # Do sparse tensor inverse\n else:\n # Ensure the tensor is square\n nleg = len(self.legs)\n nleg_2 = int(nleg/2)\n assert(nleg%2 == 0)\n\n if use_full:\n # Convert to a sparse tensor, then use pinv\n # function to find inverse\n\n # Convert symtensor into full tensor\n mat = self.ten.make_sparse()\n\n # Convert into a matrix\n tenshape = mat.shape\n order = []\n matshape = [1,1]\n for i in range(nleg):\n order += [i,i+nleg]\n if i < int(nleg/2):\n matshape[0] *= tenshape[i]*tenshape[i+nleg]\n else:\n matshape[1] *= tenshape[i]*tenshape[i+nleg]\n\n mat = mat.transpose(order)\n tenshape = mat.shape\n mat = mat.reshape(matshape)\n\n # Take Inverse\n inv = self.backend.pinv(mat)\n\n # Convert back into a tensor\n inv = inv.reshape(tenshape)\n order = []\n for i in range(nleg):\n order += [2*i]\n for i in range(nleg):\n order += [2*i+1]\n inv = inv.transpose(order)\n\n # Convert back into a symtensor\n delta = self.ten.get_irrep_map()\n einstr = LETTERS[:nleg].upper()+LETTERS[:nleg].lower() + ',' + \\\n LETTERS[:nleg].upper() + '->' + \\\n LETTERS[:nleg-1].upper()+LETTERS[:nleg].lower()\n inv = self.backend.einsum(einstr, inv, delta)\n newten.ten.array = inv\n\n else:\n # Do a self-implemented pinv function to find\n # pseudo inverse without using dense tensors\n\n # Take the conjugate (in case complex)\n a = self.conj()\n\n # Do the SVD of the tensor\n U,S,V = a.svd(nleg_2,\n truncate_mbd=None,\n return_ent=False,\n return_wgt=False)\n \n # Determine the cutoff value\n cutoff = rcond * S.max()\n\n # Invert S\n if S.is_ctf:\n tmpS = S.ten.array.copy().to_nparray()\n large = tmpS > cutoff\n tmpS = np.divide(1., tmpS, where=large, out=tmpS)\n tmpS[~large] = 0\n if S.is_ctf:\n tmpS = ctf.from_nparray(tmpS)\n S.ten.array = tmpS\n\n # Contract to get the inverse\n einstr = LETTERS[:nleg_2+1] + ',' + \\\n LETTERS[nleg_2:nleg_2+2] + '->' + \\\n LETTERS[:nleg_2]+LETTERS[nleg_2+1]\n inv = einsum(einstr,U,S)\n einstr = LETTERS[:nleg_2+1] + ',' + \\\n LETTERS[nleg_2:nleg+1] + '->' + \\\n LETTERS[:nleg_2]+LETTERS[nleg_2+1:nleg+1]\n inv = einsum(einstr,inv,V)\n newten.ten.array = inv.ten.array\n\n # Return result\n return newten"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds inverse power of corresponding circuit. May be overridden if a more efficient implementation is possible
|
def build_inverse_power(self, qc, q, power, q_ancillas=None, params=None):
for _ in range(power):
self.build_inverse(qc, q, q_ancillas, params)
|
[
"def inverse_differential_power(cls, power, tangent_vec, base_point):\n (\n eigvectors,\n transconj_eigvectors,\n numerator,\n denominator,\n temp_result,\n ) = cls._aux_differential_power(power, tangent_vec, base_point)\n power_operator = denominator / numerator\n result = power_operator * temp_result\n result = Matrices.mul(eigvectors, result, transconj_eigvectors)\n return result",
"def dry_exner_inverse(\n self,\n zz: FlowFieldVal,\n additional_states: Optional[FlowFieldMap] = None,\n ) -> FlowFieldVal:\n p_ref = self.p_ref(zz, additional_states)\n return tf.nest.map_structure(\n lambda p: tf.pow(p / self._p_thermal, -_R_D / self.cp_d), p_ref)",
"def build_inverse(self, qc, q, q_ancillas=None, params=None):\n qc_ = QuantumCircuit(*qc.qregs)\n\n self.build(qc_, q, q_ancillas, params)\n try:\n qc_.data = [gate.inverse() for gate in reversed(qc_.data)]\n except Exception as exc:\n raise AquaError('Irreversible circuit! Gate does not support inverse method.') from exc\n qc.extend(qc_)",
"def build_controlled_inverse(self, qc, q, q_control, q_ancillas=None, params=None):\n qc_ = QuantumCircuit(*qc.qregs)\n\n self.build_controlled(qc_, q, q_control, q_ancillas, params)\n try:\n qc_.data = [gate.inverse() for gate in reversed(qc_.data)]\n except AquaError:\n print('Irreversible circuit! Does not support inverse method.')\n qc.extend(qc_)",
"def __pow__(self,int_power):\r\n if not self.target==self.source:\r\n raise Exception(\"Morphism should be an endomorphism\")\r\n U = self.copy()\r\n U.set_to_identity()\r\n for i in range(int_power):\r\n U = self*U\r\n U.set_name(self.name+\"^\"+str(int_power))\r\n\r\n return U",
"def qnode_for_inverse(self, mock_device):\n\n def circuit(x):\n qml.RZ(x, wires=[1]).inv()\n qml.RZ(x, wires=[1]).inv().inv()\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliZ(1))\n\n node = qml.QNode(circuit, mock_device)\n node.construct([1.0], {})\n\n return node",
"def power_on(self):",
"def power(self, exponent: float):\n return CPhaseGate(-0.5 * numpy.pi * exponent)",
"def test_inverse_differential_power(self):\n base_point = gs.array([[1., 0., 0.],\n [0., 2.5, 1.5],\n [0., 1.5, 2.5]])\n tangent_vec = gs.array([[1., 1 / 3, 1 / 3],\n [1 / 3, .125, .125],\n [1 / 3, .125, .125]])\n power = .5\n result = self.space.inverse_differential_power(\n power=power,\n tangent_vec=tangent_vec,\n base_point=base_point)\n expected = gs.array([[2., 1., 1.],\n [1., .5, .5],\n [1., .5, .5]])\n self.assertAllClose(result, expected)",
"def power(self):\n return self.curr * self.emf",
"def power(self,n):\r\n\t\t\r\n\t\t# make sure n is an integer\r\n\t\tn = int(n)\r\n\t\t\r\n\t\t# take top and bottom to power\r\n\t\tt = self.top().power(n)\r\n\t\tb = self.bottom().power(n)\r\n\t\t\r\n\t\t# if power is negative, invert expression\r\n\t\tif n < 0:\r\n\t\t\tt,b = b,t\r\n\t\t\t\r\n\t\t# keep name\r\n\t\tn = self.name\r\n\t\t\r\n\t\t# new expression\r\n\t\tw = Ex(t,b,n)\r\n\t\t\t\r\n\t\t# reinstate equation status\r\n\t\tw.__class__ = self.__class__\r\n\t\t\t\r\n\t\treturn w",
"def __invert__(self) -> IntegerValue:\n try:\n node = ops.BitwiseNot(self)\n except (IbisTypeError, NotImplementedError):\n return NotImplemented\n else:\n return node.to_expr()",
"def circuit(self):\n raise NotImplementedError",
"def inverse(self):\n\n # We use the Extended GCD algorithm\n # http://en.wikipedia.org/wiki/Polynomial_greatest_common_divisor\n\n if self._value == 0:\n raise ValueError(\"Inversion of zero\")\n\n r0, r1 = self._value, self.irr_poly\n s0, s1 = 1, 0\n while r1 > 0:\n q = _div_gf2(r0, r1)[0]\n r0, r1 = r1, r0 ^ _mult_gf2(q, r1)\n s0, s1 = s1, s0 ^ _mult_gf2(q, s1)\n return _Element(s0)",
"def _add_inverse(self):\n o = rdflib.URIRef(self.namespace.get_iri() + \"INVERSE_OF_\" + self.name)\n x = (self.iri, rdflib.OWL.inverseOf, o)\n y = (o, rdflib.RDF.type, rdflib.OWL.ObjectProperty)\n z = (o, rdflib.RDFS.label, rdflib.Literal(\"INVERSE_OF_\" + self.name,\n lang=\"en\"))\n\n self.namespace._graph.add(x)\n self.namespace._graph.add(y)\n self.namespace._graph.add(z)\n for superclass in self.direct_superclasses:\n self.namespace._graph.add((\n o, rdflib.RDFS.subPropertyOf, superclass.inverse.iri\n ))\n return self.namespace._namespace_registry.from_iri(o)",
"def add_powder(self):",
"def opposite(self):\n return KenzoChainComplexMorphism(__opps__(self._kenzo))",
"def __mul__(self, integer):\n \n result = self\n \n for i in range(integer - 1):\n result += self\n \n return result\n \n \n # def invert(self):\n \"\"\"\n I don't think we need an invert method for scales.\n \"\"\"",
"def inverse(self):\n ret = copy.deepcopy(self)\n for l in xrange(0, self.lmax + 1):\n ret.clmat[l, :, :] = np.linalg.pinv(self.clmat[l])\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a file to be uploaded.
|
def add_file(self, fieldname, filename, content, mimetype=None):
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((fieldname, filename, mimetype, content))
|
[
"def add_file(self, fpath):\n if not os.path.isfile(fpath):\n print(\"cloudtalker: cannot find file\", fpath)\n return None\n #try to parse filename\n parsed = self.parse_filename(fpath)\n print(\"after parsing:\", parsed)\n if parsed is not None:\n fdata = {\n \"path\": fpath,\n \"type\": parsed[0],\n \"ts\": parsed[1],\n \"segno\": parsed[2],\n }\n self.inq.put(fdata)\n print(\"upload module accepted file\", fpath)",
"def add_file(self, field_name, file_name, file_handle,\n mimetype=None):\n body = file_handle.read()\n if mimetype is None:\n mimetype = (\n mimetypes.guess_type(file_name)[0] or\n \"application/octet-stream\"\n )\n self.files.append((field_name, file_name, mimetype, body))",
"def _add_file(file_path):\n _db_content[\"files\"].append(file_path)",
"def addFile(self, file_path, type_, description):\n if not os.path.exists(file_path):\n raise ValueError, _(\"Incorrect path\")\n _name = os.path.basename(file_path)\n _isin = False\n for _ofile in self.__files:\n if _ofile.name == _name:\n _isin = True\n if not _isin:\n _file = File(_name, type_, description)\n self.__files.append(_file)",
"def add_file_to_instance(self, *, agent_name: str, instance_name: str, file_id: str, file_path: str) -> None:",
"def add(self, filename):\n self.index.add_new_file(filename)",
"def add_file(self, file_path):\n self._repo.index.add([str(file_path)])",
"def add_upload(self, srcpath, target):\n # TODO: make sure to support all types\n assert 'attachments://' in target or 'files://' in target, \"Malformed targetpath,\"\\\n \" try something like 'files://' \"\n p = Path(srcpath)\n item = {'source': str(p.absolute()), 'target': target}\n self.upload['upload'].append(item)",
"def add_attachment(self, fname):\n self._attachments.append(fname)",
"def add_file_to_project(self, file, project):\n if project is not None and file is not None:\n LOGGER.debug(\"Adding item '%s' to project '%s'\" % (file.name,\n project.name))\n project.files.append(file)\n self.__SESSION.commit()\n LOGGER.debug(\"File has been added to project successfully!\")\n else:\n raise ValueError(\"Value of parameter 'file' and 'project'\\\n can't be None\")",
"def add_file(self, filename):\r\n file = open(filename, 'r', encoding='utf8', errors='ignore')\r\n text = file.read() # read it all in at once!\r\n file.close()\r\n self.add_string(text)",
"def add_file(self, filename):\n self.filenames.insert(0, filename)\n del self.filenames[self.max_no_files:]\n self.filenames = list(dict.fromkeys(self.filenames))\n self.settings.setValue('recent_files_list', [self.filenames])\n self.update_actions()",
"def add_file(self, filename, UUID):\n self.files[UUID] = Data(filename=filename)",
"def add_file(self, root, filename):\n if filename in self.ignored_filenames:\n return\n\n item = File(os.path.join(root, filename, configuration=self.configuration))\n if root in self.__directory_index__:\n item.parent = self.__directory_index__[root]\n self.files.append(item)",
"def add(self, file, fsize):\r\n if (self.size + fsize) > self.max_size:\r\n raise FilesTooBigError()\r\n\r\n self.files[file] = fsize\r\n self.size += fsize",
"def publish_add_file(self, pth, header=None, trans_id=None):\n\n try:\n self._frepo.add_file(trans_id, pth)\n except svr_repo.RepositoryError as e:\n raise tx.TransportOperationError(str(e))",
"def add_uaudiofile():\n\n file_id = request.form.get(\"file_id\")\n\n audiofile = AudioFile.query.get(file_id)\n\n user_id = session.get(\"user_id\")\n\n add_audiofile_to_library(user_id, file_id)\n\n print \"{} file added to your library.\".format(audiofile.file_type)\n\n message = \"{} file added to your library.\".format(audiofile.file_type)\n\n result = {\"message\": message, \"in_db\": True}\n\n return jsonify(result)",
"def add_file(self, file_list, name, path, executable=False):\n\n if not os.path.exists(path):\n raise Exception(\"{} does not exist.\".format(path))\n\n if isinstance(file_list, basestring):\n file_list = file_list.split()\n\n f = File(name, path, False, executable)\n\n for fl in file_list:\n self.file_lists[fl].append(f)",
"def upload_file(self, file_id, name, folder='.', overwrite=True):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a string representing the form data, including attached files.
|
def __str__(self):
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields. Make sure the value fields have been
# converted to str properly. This includes convert list/tuple
# and numbers. Unicode will be utf-8 encoded str.
parts.extend([
part_boundary,
'Content-Disposition: form-data; name="%s"' % \
self._safe_str(name),
'',
self._safe_str(value),
] for name, value in self.form_fields)
# Add the files to upload
parts.extend([
part_boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' % \
(self._safe_str(field_name), self._safe_str(filename)),
'Content-Type: %s' % (self._safe_str(content_type)),
'',
body,
] for field_name, filename, content_type, body in self.files)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
|
[
"def document_form_data():\n\n return {\"document\": SimpleUploadedFile(\"myimage.png\", b\"file_content\")}",
"def encode_multipart_formdata(\n self,\n ) -> Tuple[Optional[str], Optional[bytes]]:\n if not (self._fields or self._files):\n return None, None\n\n NEWLINE = b'\\r\\n'\n BOUNDARY = self._make_mime_boundary()\n content = BytesIO()\n\n for key, value in self._fields.items():\n content.write(b'--%s%s' % (BOUNDARY, NEWLINE))\n content.write(b'Content-Disposition: form-data; name=\"%s\"%s'\n % (key, NEWLINE))\n content.write(NEWLINE)\n content.write(value)\n content.write(NEWLINE)\n\n for key, file_info in self._files.items():\n content.write(b'--%s%s' % (BOUNDARY, NEWLINE))\n content.write(b'Content-Disposition: form-data; name=\"%s\"; ' % key)\n content.write(b'filename=\"%s\"%s' % (file_info['filename'],\n NEWLINE))\n content.write(b'Content-Type: %s%s' % (file_info['mimetype'],\n NEWLINE))\n content.write(NEWLINE)\n content.write(file_info['content'])\n content.write(NEWLINE)\n\n content.write(b'--%s--%s%s' % (BOUNDARY, NEWLINE, NEWLINE))\n content_type = ('multipart/form-data; boundary=%s'\n % BOUNDARY.decode('utf-8'))\n\n return content_type, content.getvalue()",
"def value(self):\n if self.has_value():\n return f\"[Attachment: {self.filename}]\"\n else:\n return None",
"def multipart_data(self) -> Optional[UploadFileDict]:\n if not self.input_files:\n return None\n return {\n (input_file.attach_name or self.name): input_file.field_tuple\n for input_file in self.input_files\n }",
"def __encodeMultipartFormdata(self, formDataList):\n boundary = str(time.time()).replace(\".\", \"_\").rjust(32, \"-\")\n \n lines = []\n for formName, data in formDataList:\n lines.append(\"--\" + boundary)\n if type(data) is types.StringType:\n cd = \"Content-Disposition: form-data; name=\\\"%s\\\"\" % formName\n lines.append(cd)\n else:\n dataType = type(data)\n if dataType is types.TupleType:\n filename, data = data\n elif dataType is types.FileType:\n filename = data.name\n data = data.read()\n else:\n print \"Ignoring unsupported data type: %s\" % dataType\n continue\n cd = \"Content-Disposition: form-data; name=\\\"%s\\\"; filename=\\\"%s\\\"\" % (formName, filename)\n lines.append(cd)\n lines.append(\"Content-Type: %s\" % self.__getFileContentType(filename))\n lines.append(\"\")\n lines.append(data)\n lines.append(\"--\" + boundary + \"--\")\n lines.append(\"\")\n data = string.join(lines, \"\\r\\n\")\n contentType = \"multipart/form-data; boundary=%s\" % boundary\n return contentType, data",
"def encode_multipart_formdata(self, fields, files):\n\t\tBOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n\t\tCRLF = '\\r\\n'\n\t\tL = []\n\t\tfor (key, value) in fields:\n\t\t\tL.append('--' + BOUNDARY)\n\t\t\tL.append('Content-Disposition: form-data; name=\"%s\"' % key)\n\t\t\tL.append('')\n\t\t\tL.append(value)\n\t\tfor (key, filename, value) in files:\n\t\t\tL.append('--' + BOUNDARY)\n\t\t\tL.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n\t\t\tL.append('Content-Type: %s' % self.get_content_type(filename))\n\t\t\tL.append('')\n\t\t\tL.append(value)\n\t\tL.append('--' + BOUNDARY + '--')\n\t\tL.append('')\n\t\tbody = CRLF.join(L)\n\t\tcontent_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n\t\treturn content_type, body",
"def __str__(self) -> str:\n return self.mime.as_string()",
"def encode_multipart_formdata(self, fields, files):\n BOUNDARY = '----------%s' % hex(int(time.time() * 1000))\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % str(key))\n L.append('')\n if isinstance(value, unicode):\n L.append(value.encode('utf-8'))\n else:\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (str(key), str(filename)))\n L.append('Content-Type: %s' % str(self.get_content_type(filename)))\n L.append('Content-Length: %d' % len(value))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body",
"def get_attachment(self) -> Dict[str, Any]:\n # TODO: Refactor into another file to preserve purity\n text_pairs = [\n ('Slack ID', self.slack_id),\n ('Name', self.name),\n ('Email', self.email),\n ('Github Username', self.github_username),\n ('Github ID', self.github_id),\n ('Major', self.major),\n ('Position', self.position),\n ('Biography', self.biography),\n ('Image URL', self.image_url),\n ('Permissions Level', str(self.permissions_level)),\n ('Karma', self.karma)\n ]\n\n fields = [{'title': t, 'value': v if v else 'n/a', 'short': True}\n for t, v in text_pairs]\n fallback = str('\\n'.join(map(str, text_pairs)))\n\n return {'fallback': fallback, 'fields': fields}",
"def _get_attachment_skeleton(cls):\n return {\n # String\n # Plaintext summary of the attachment; renders in non-markdown compliant clients,\n # such as push notifications.\n 'fallback': '',\n\n # String, hex color\n # Colors the vertical bar to the left of the text.\n 'color': '#36a64f',\n\n # String\n # Text that appears above the vertical bar to the left of the attachment.\n # Supports markdown if it's included in \"mrkdwn_in\"\n 'pretext': '',\n\n # String\n # The attachment's author name.\n # If this field is omitted, then the entire author row is omitted.\n 'author_name': '',\n\n # String, URL\n # Provide a URL; Adds a clickable link to the author name\n 'author_link': '',\n\n # String, URL of an image\n # The icon appears to the left of the author name\n 'author_icon': '',\n\n # String\n # Appears as bold text above the attachment itself.\n # If this field is omitted, the entire title row is omitted.\n 'title': '',\n\n # String, URL\n # Adds a clickable link to the title\n 'title_link': '',\n\n # String\n # Raw text that appears in the attachment, below the title but above the fields\n # Supports markdown if it's included in \"mrkdwn_in\".\n # Use \\n for newline characters.\n # This field has a field limit of cls.MAX_MESSAGE_SIZE\n 'text': '',\n\n # Array of dicts; Each dict should have keys \"title\", \"value\", \"short\"\n # An array of fields that appears below the text. These fields are clearly delineated\n # with title and value.\n 'fields': [\n # Sample field:\n # {\n # \"title\": \"Priority\",\n # \"value\": \"High\",\n # \"short\": False\n # }\n ],\n\n # String, URL of an image\n # Large image that appears as an attachment\n 'image_url': '',\n\n # String, URL of an image\n # When image_url is omitted, this one renders a smaller image to the right\n 'thumb_url': '',\n\n # String\n # Appears at the very bottom\n # If this field is omitted, also omits the footer icon\n 'footer': '',\n\n # String, URL\n # This icon appears to the left of the footer\n 'footer_icon': '',\n\n # Integer, Unix timestamp\n # This will show up next to the footer at the bottom.\n # This timestamp does not change the time the message is actually sent.\n 'ts': '',\n\n # List of strings\n # Defines which of the above fields will support Slack's simple markdown (with special\n # characters like *, ~, _, `, or ```... etc)\n # By default, we respect markdown in \"text\" and \"pretext\"\n \"mrkdwn_in\": [\n 'text',\n 'pretext',\n ],\n }",
"def spoken_form_as_string(self):\n return string.join(self.spoken_forms())",
"def get_from_form_data(self, data, files, name):\n return data.get(name, None)",
"def getData(self):\n return (\n self.classnameEdit.text(),\n os.path.join(self.pathnamePicker.text(),\n self.filenameEdit.text())\n )",
"def filename(self):\n if self.type == 'literal':\n return self._message.filename\n return ''",
"def get_from_form_data(self, data, files, name):\n return self.field.widget.value_from_datadict(data, files, name)",
"def get_extdata(self) -> str:\n return '\\n'.join(self._extdata)",
"def index(self):\n\n return \"\"\"\n <html>\n <body>\n <form action=\"upload\" method=\"post\" enctype=\"multipart/form-data\">\n File: <input type=\"file\" name=\"theFile\"/>\n <br/>\n <input type=\"submit\"/>\n </form>\n </body>\n </html>\n \"\"\"",
"def extract_file_fields(form):\n result = []\n for fieldname in form:\n field = form[fieldname]\n if isinstance(field, list):\n for field_entry in field:\n if field_entry.filename:\n result.append(field_entry)\n\n elif field.filename:\n result.append(field)\n\n return result",
"def get_details_and_mimetype(self, message):\n payload = message.get_payload()\n if not message.is_multipart():\n mimetype = message.get_content_type()\n charset = message.get_content_charset()\n logger.info(\"Charset: %r\", charset)\n if charset and charset != 'utf-8':\n # We only want to store unicode or ascii or utf-8 in\n # Plone.\n # Decode to unicode:\n payload = payload.decode(charset, 'replace')\n # Encode to utf-8:\n payload = payload.encode('utf-8', 'replace')\n return payload, mimetype\n for part in payload:\n if part.is_multipart():\n text, mimetype = self.get_details_and_mimetype(part)\n else:\n text, mimetype = self.part_to_text_and_mimetype(part)\n text = text.strip()\n # Might be empty?\n if text:\n return text, mimetype\n return '', 'text/plain'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find the (mode, count) of a set of data, including a tolerance window +/ window if > 0
|
def find_mode(data, window=0):
vals = np.unique(data)
counts = [len([x for x in data if abs(x-val) <= window]) for val in vals]
bestix = np.argmax(counts)
return (vals[bestix], counts[bestix])
|
[
"def mode_detrend(data, window=500, bins=None, threshold=3.0):\n d1 = data.view(np.ndarray)\n ends = [d1[:window], d1[-window:]]\n y = [float_mode(w, bins=bins) for w in ends]\n \n x0 = window / 2.0\n x1 = len(data) - x0\n m = (y[1] - y[0]) / (x1 - x0)\n b0 = y[1] - m * x1\n b1 = b0 + m * len(data)\n \n base = np.linspace(b0, b1, len(data))\n return d1 - base",
"def estimate_mode_width(distribution):\n mode = distribution.argmax()\n halfmax = float(distribution[mode]) / 2\n whm = (distribution > halfmax).astype(np.int).sum()\n return mode, whm",
"def get_mode(signal):\n\n signal = np.array(signal)\n hist = np.histogram(signal)\n freqs = hist[0]\n bins = hist[1]\n max_freq = max(freqs)\n index_arr = np.where(freqs == max_freq)\n index = index_arr[0][0]\n mode = (bins[index] + bins[index + 1]) / 2\n return mode",
"def get_windowing(data):\n dicom_fields = [data[('0028','1050')].value, #window center\n data[('0028','1051')].value, #window width\n data[('0028','1052')].value, #intercept\n data[('0028','1053')].value] #slope\n return [get_first_of_dicom_field_as_int(x) for x in dicom_fields]",
"def mode(data_set):\n counter = Counter(data_set)\n max_count = max(counter.values())\n return [x for x, count in counter.items() if count == max_count]",
"def zmode(list) -> float:\n # mode = 0\n # mode_count = 0\n for i in list:\n mode_count = 0\n mode = 0\n # index = 0\n for i in list:\n if list.count(i) > mode_count:\n mode_count = list.count(i)\n mode = i\n return mode",
"def n_windows(self):\n return len(self.data) - self.n_past - self.n_future + 1",
"def score_windows(window_generator, model, total_count):\n start_time = datetime.datetime.now()\n count_so_far = 0 \n \n results = [] \n while True:\n windows = list(it.islice(window_generator, 4096))\n if not windows:\n break\n \n result = score_window_list(windows, model)\n results.append(result)\n \n count_so_far = count_so_far + len(result)\n \n if total_count:\n logging.info(progress_report(count_so_far, total_count, start_time))\n else:\n logging.info('Processed {0} window_centers'.format(count_so_far))\n\n \n results = sp.concatenate(results)\n \n return results",
"def find_best_window_size(x, f, fs, window=DEFAULT_WINDOW, t=DEFAULT_THRESHOLD, err=DEFAULT_FREQ_ERROR, verbose=False):\n for k in xrange(1, 1000):\n M = 100 * k + 1\n (mX, pX, ploc, iploc, ipmag, ipphase, fEst, N) = run_one_estimate(\n x, fs, M, window=window, t=t)\n if verbose:\n print \"f={0} M={1} N={2} fEst={3} error={4}\".format(f, M, N, fEst, f-fEst)\n if iploc.size > 0 and all(abs(fEst - f) < err):\n return (fEst, M, N)",
"def windowedStats(arr, nseg=16):\n #splits array into nseg segments and creates empty arrays for each value, each array has nseg elements\n segSize = int(arr.shape[0] / nseg) #how many elements in each segment\n minVals = np.zeros(nseg)\n maxVals = np.zeros(nseg)\n meanVals = np.zeros(nseg)\n stdVals = np.zeros(nseg)\n snrVals = np.zeros(nseg)\n\n #takes sidth segment and assigns value for that segment to sidth element of value array\n #put KS testing in here too?\n for sid in np.arange(nseg):\n sid = int(sid)\n minVals[sid] = arr[segSize*sid:segSize*(sid+1)].min()\n maxVals[sid] = arr[segSize*sid:segSize*(sid+1)].max()\n meanVals[sid] = arr[segSize*sid:segSize*(sid+1)].mean()\n stdVals[sid] = np.std(arr[segSize*sid:segSize*(sid+1)])\n if np.isclose(stdVals[sid], 0): snrVals[sid] = 0.\n else: snrVals[sid] = maxVals[sid] / stdVals[sid]\n \n return { 'min': minVals, 'max': maxVals, 'mean': meanVals, 'std': stdVals, 'snr': snrVals }",
"def mode(data):\n if len(np.unique(data)) == 1:\n return data[0]\n else:\n kde = smnp.KDEUnivariate(data.astype('double'))\n kde.fit(cut=0)\n grid, y = kde.support, kde.density\n return grid[y == y.max()][0]",
"def _win_view_stat(x, win_size=5, stat=\"nanmean\"):\n #if x.shape == (1, 1):\n #return x\n\n\n measure = getattr(np, stat)\n\n pad = int(win_size//2)\n data = np.pad(x, (pad, pad), mode=\"constant\", constant_values=(np.nan))\n\n #sh = np.asarray(x).shape\n #mask = np.zeros_like(x)\n #mask[pad:sh[0]-pad, pad:sh[1]-pad] = 1\n\n #data = np.where(mask==1, x, np.nan)\n\n #get windowed view of array\n windowed = ski.util.view_as_windows(data, (win_size, win_size))\n\n #calculate measure over last to axis\n res = measure(windowed, axis=(2, 3))\n\n return res",
"def test_dfs_mode2():\n pdfs = np.array([0, 1, 12, 3, 5])\n x = np.arange(5)\n mds = pval.dfs_mode(pdfs, x)\n np.testing.assert_equal(mds, 2)",
"def countTof(evt, record, signalThreshold=1, minWindow=0, maxWindow=-1, hitscoreThreshold=2, outkey=\"tof: \"):\n hitscore = record.data[minWindow:maxWindow] > signalThreshold\n hit = hitscore > hitscoreThreshold\n v = evt[\"analysis\"]\n add_record(v, \"analysis\", outkey + \"isHit\", hit) \n add_record(v, \"analysis\", outkey + \"hitscore\", hitscore)",
"def get_important_neurons(patterns, mode='raw', n=10):\n if mode == 'percentile':\n n = (100-n) * patterns.shape[1]\n\n inds = []\n for pattern in patterns:\n inds.append(np.argpartition(pattern, -n)[-n:])\n\n return inds",
"def _determine_window(self, event, onset_data, tt, fraction_tt):\n\n arrival_idx = util.time2sample(event.otime + tt - onset_data.starttime,\n onset_data.sampling_rate)\n\n # Add length of marginal window to this and convert to index\n samples = util.time2sample(tt * fraction_tt + event.marginal_window,\n onset_data.sampling_rate)\n\n return [arrival_idx - samples, arrival_idx, arrival_idx + samples]",
"def mode(self, mode_occurrence=False):\r\n\t\treturn find_mode(self.dataset, mode_occurrence)",
"def get_dist_counts(data, distance):\n #print \"in dist_counts\"\n distances = spatial.distance.squareform(spatial.distance.pdist(data, distance))\n closest_5 = np.argpartition(distances, kth=5, axis=1)[:,:5]\n # no argpartition in np 1.7 on hulk\n counts = np.zeros(len(distances))\n #print closest_5\n \n for i in range(len(closest_5)):\n for j in range(5):\n if j != i:\n counts[closest_5[i][j]] += 1\n \n\n \n return counts",
"def data2count(data, cols, reference, greater_than=[], less_than=[], overlaps=[], equals=[]):\n truth = np.ones(len(data), dtype=bool)\n\n for col, thr in greater_than:\n truth *= data[:,cols.index(col)] > thr\n\n for col, thr in less_than:\n truth *= data[:,cols.index(col)] < thr\n\n for start, stop, low, high in overlaps:\n truth *= (data[:,cols.index(start)] <= high) * (low <= data[:,cols.index(stop)])\n\n for col, val in equals:\n truth *= np.isclose(data[:,cols.index(col)], val) # use default rtol and atol\n\n return len(np.unique(data[:,cols.index(reference)][truth]))",
"def get_indexes(r_peak_times, window):\n\n indexes = []\n multiplier = 1\n for i in range(0, len(r_peak_times)):\n if r_peak_times[i] >= multiplier*window:\n indexes.append(i)\n multiplier += 1\n return indexes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Match audio against fingerprint hash table. Return top N matches as (id, filteredmatches, timoffs, rawmatches) If hashesfor specified, return the actual matching hashes for that hit (0=top hit).
|
def match_hashes(ht, hashes, hashesfor=None, window=1):
# find the implicated id, time pairs from hash table
hits = ht.get_hits(hashes)
# Sorted list of all the track ids that got hits
idlist = np.r_[-1, sorted([id for id, time, hash, otime in hits]), -1]
# Counts of unique entries in the sorted list - diff of locations of changes
counts = np.diff(np.nonzero(idlist[:-1] != idlist[1:]))[0]
# ids corresponding to each count - just read after the changes in the list
ids = idlist[np.cumsum(counts)]
# Find all the actual hits for a the most popular ids
bestcountsids = sorted(zip(counts, ids), reverse=True)
# Try the top 100 results
results = []
for rawcount, tid in bestcountsids[:100]:
(mode, filtcount) = find_mode([time for (id, time, hash, otime) in hits
if id == tid],
window=window)
matchhashes = [((otime), hash) for (id, time, hash, otime) in hits
if id == tid and abs(time - mode) <= window]
# matchhashes may include repeats because multiple
# ref hashes may match a single query hash under window. Uniqify:
matchhashes = sorted(list(set(matchhashes)))
filtcount = len(matchhashes)
results.append( (tid, filtcount, mode, rawcount, matchhashes) )
results = sorted(results, key=lambda x:x[1], reverse=True)
shortresults = [(tid, filtcount, mode, rawcount)
for (tid, filtcount, mode, rawcount, matchhashes) in results]
if hashesfor is not None:
return shortresults, results[hashesfor][4]
else:
return shortresults
|
[
"def return_matches(self, hashes: List[Tuple[str, int]],\n batch_size: int = 1000) -> Tuple[List[Tuple[int, int]], Dict[int, int]]:\n # Create a dictionary of hash => offset pairs for later lookups\n mapper = {}\n for hsh, offset in hashes:\n if hsh.upper() in mapper.keys():\n mapper[hsh.upper()].append(offset)\n else:\n mapper[hsh.upper()] = [offset]\n\n values = list(mapper.keys())\n\n # in order to count each hash only once per db offset we use the dic below\n dedup_hashes = {}\n\n results = []\n with self.cursor() as cur:\n for index in range(0, len(values), batch_size):\n # Create our IN part of the query\n query = self.SELECT_MULTIPLE % ', '.join([self.IN_MATCH] * len(values[index: index + batch_size]))\n\n cur.execute(query, values[index: index + batch_size])\n\n for hsh, sid, offset in cur:\n if sid not in dedup_hashes.keys():\n dedup_hashes[sid] = 1\n else:\n dedup_hashes[sid] += 1\n # we now evaluate all offset for each hash matched\n for audio_sampled_offset in mapper[hsh]:\n results.append((sid, offset - audio_sampled_offset))\n\n return results, dedup_hashes",
"def match_file(ht, filename, density=None, sr=11025, n_fft=512, n_hop=256, window=1, shifts=4, verbose=False):\n hq = audfprint.wavfile2hashes(filename, sr=sr, density=density, \n n_fft=n_fft, n_hop=n_hop, shifts=shifts)\n # Fake durations as largest hash time\n if len(hq) == 0:\n durd = 0.0\n else:\n durd = float(n_hop * hq[-1][0])/sr\n if verbose:\n print \"Analyzed\",filename,\"of\",('%.3f'%durd),\"s to\",len(hq),\"hashes\"\n # Run query\n return match_hashes(ht, hq, window=window), durd, len(hq)",
"def align_matches(self, matches):\n\t\t# align by diffs\n\t\tdiff_counter = {}\n\t\tlargest = 0\n\t\tlargest_count = 0\n\t\tsong_id = -1\n\t\tfor tup in matches:\n\t\t\tsid, diff = tup\n\t\t\tif diff not in diff_counter:\n\t\t\t\tdiff_counter[diff] = {}\n\t\t\tif sid not in diff_counter[diff]:\n\t\t\t\tdiff_counter[diff][sid] = 0\n\t\t\tdiff_counter[diff][sid] += 1\n\n\t\t\tif diff_counter[diff][sid] > largest_count:\n\t\t\t\tlargest = diff\n\t\t\t\tlargest_count = diff_counter[diff][sid]\n\t\t\t\tsong_id = sid\n\n\t\tsong = self.db.get_song_by_id(song_id)\n\t\t#print largest_count\n\t\t# return match info\n\t\tnseconds = round(float(largest) / self.config.get('fingerprint').get('samplerate') *\n\t\t\t\t\t\t self.config.get('fingerprint').get('window_size') *\n\t\t\t\t\t\t self.config.get('fingerprint').get('overlap_ratio'), 5)\n\t\tif song is None:\n\t\t\treturn None\n\t\t# self.log_event()\n\t\tsong = {\n\t\t\tDejavu.SONG_ID: song_id,\n\t\t\tDejavu.SONG_NAME: songname,\n\t\t\tDejavu.CONFIDENCE: largest_count,\n\t\t\tDejavu.OFFSET: int(largest),\n\t\t\tDejavu.OFFSET_SECS: nseconds,\n\t\t\tDatabase.FIELD_FILE_SHA1: song.get(Database.FIELD_FILE_SHA1, None), }\n\t\tself.log_match(song_id, largest_count, int(largest), nseconds)\n\t\treturn song",
"def top_match(self):\n\n # If no matches return empty list\n if len([x for x in self.matches().keys()]) == 0:\n return []\n\n # get and sort the list of matches previously used\n mtch_lst = [(k, v) for k, v in self.matches().items()]\n srtd = sorted(mtch_lst, reverse=True, key=lambda x: x[1])\n\n # check if there are any ties\n top_score = srtd[0][1]\n return [x[0] for x in srtd if x[1] == top_score]",
"def search_match_1(snip_fgp1):\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"SELECT song_id FROM songs\")\n uniq_id = cur.fetchall()\n uniq_id = reduce(np.append, uniq_id)\n \n tolerance = 10**(-3) # this is the default tolerance level, tuned\n \n matching_cnt = []\n window_num = []\n \n for song_id in uniq_id:\n distance = retriv_fgp1(int(song_id), snip_fgp1)\n matching_cnt.append(np.sum(distance<=tolerance))\n window_num.append(len(distance))\n \n # This is the new criterion: must have more than 10% similarity of a song\n # in the database - considered different lengths of songs\n similarity_lst = list(map(lambda i,j: i/j > 0.1, matching_cnt, window_num))\n matched_idx = [i for i,val in enumerate(similarity_lst) if val==True]\n matched_sid = [uniq_id[i] for i in matched_idx]\n \n if matched_sid == []:\n sm_logger.info('Oops, we try hard but find nothing...')\n return None\n else:\n possible_lst = []\n for i in matched_sid:\n possible_lst.append(retriv_name(int(i)))\n sm_logger.info('Found some songs matched the snippet!')\n return possible_lst",
"def library_match(spectra_list,lib_mgf,precursor_tol=1.0,cosine=0.7,n_peaks=3):\n\n \n library=load_from_mgf(lib_mgf)\n\n # Apply filters to clean and enhance each spectrum\n library_spectra = []\n for spectrum in library:\n # spectrum = default_filters(spectrum)\n # Scale peak intensities to maximum of 1\n spectrum = normalize_intensities(spectrum)\n library_spectra.append(spectrum)\n\n\n scores = calculate_scores(references=library_spectra,\n queries=spectra_list,\n similarity_function=CosineHungarian())\n\n scores_list=[]\n for score in scores:\n print(score)\n scores_list.append(score)\n \n scores_list.sort(reverse=True,key=lambda tuple:tuple[2])\n\n\n \n \n\n\n\n # if reference != query and n_matching >= 20:\n\n # for test_spectra in spectra_list:\n # pos=bisect.bisect(library_sort,test_spectra)\n # matches=[]\n # for lib in library_sort[pos-2:pos+2]:\n # score,peaks=cosine_score_max(test_spectra,lib,modified=False,precursor_tolerance=precursor_tol)\n # if score>=cosine and peaks>=n_peaks:\n # matches.append((score,peaks,lib))\n \n # if len(matches)>0:\n # #sort possible library matches by cosine score\n # matches.sort(reverse=True,key=lambda tuple: tuple[0])\n # #use parameters of spectrum match with highest cosine score\n # test_spectra.library_parameters=matches[0][2].parameters",
"def count_matched_audios_by_md5(self, md5: str) -> int:\n with self.cursor(dictionary=True) as cur:\n cur.execute(self.COUNT_MATCHED_AUDIOS, (md5,))\n count = cur.fetchone()[0] if cur.rowcount != 0 else 0\n\n return count",
"def hash_match(idx, query_run, **kwargs):\n logger_debug('match_hash: start....')\n matches = []\n query_hash = tokens_hash(query_run.tokens)\n rid = idx.rid_by_hash.get(query_hash)\n if rid is not None:\n rule = idx.rules_by_rid[rid]\n itokens = idx.tids_by_rid[rid]\n len_legalese = idx.len_legalese\n logger_debug('match_hash: Match:', rule.identifier)\n qspan = Span(range(query_run.start, query_run.end + 1))\n ispan = Span(range(0, rule.length))\n hispan = Span(p for p in ispan if itokens[p] < len_legalese)\n match = LicenseMatch(rule, qspan, ispan, hispan, query_run.start, matcher=MATCH_HASH, query=query_run.query)\n matches.append(match)\n return matches",
"def match(\n self, sources, threshold, class_ids=..., quantized_images=..., masks=...\n ) -> Tuple[matches, quantized_images]:\n ...",
"def samples(max = 10000):\n list_results = []\n with open(\"data/key.json\", \"r\") as file: # Requires web api key to be saved in a file named \"key\" with quotes around the key \n key = json.load(file)\n i = 0\n while(len(list_results) != max):\n try:\n results = dota2api.Initialise(key).get_match_history_by_seq_num(start_at_match_seq_num=(2416543502-(100*i)))\n for j in range(len(results[\"matches\"])):\n if(results[\"matches\"][j][\"duration\"] > 900 and results[\"matches\"][j][\"duration\"] < 3600 and results[\"matches\"][j][\"game_mode\"] == 22):\n if(len(list_results) == max):\n print(\"Match threshold acquired, saving file...\")\n break\n else:\n list_results.append(results[\"matches\"][j])\n i += 1\n print(\"Analyzed %d matches; using %d.\" % (i*100, len(list_results)))\n except:\n pass\n\n with open(\"data/matchdata.json\", \"w\") as file:\n json.dump(list_results, file)\n file.close()",
"def _matches_by_path_and_hash(session, file_identifiers):\n query_file = aliased(Files)\n match_file = aliased(Files)\n\n tuple_filter = tuple_(query_file.file_path, query_file.sha256, match_file.file_path, match_file.sha256).in_(\n file_identifiers\n )\n\n return (\n session.query(Matches)\n .join(query_file, Matches.query_video_file)\n .join(match_file, Matches.match_video_file)\n .filter(tuple_filter)\n )",
"def match(self, dbh):\n attempt_id = self.load_match_attempt(dbh)\n # match_query_text = \"\"\"SELECT {}.time_index, {}.signature, {}.song_id, totals.total_signatures FROM {}\n # INNER JOIN {} ON {}.signature = {}.signature\n # INNER JOIN (SELECT count(time_index) as total_signatures, song_id FROM {}\n # GROUP BY song_id) totals\n # ON totals.song_id={}.song_id\n # WHERE {}.match_id = ?\n # ORDER BY {}.signature;\n # \"\"\".format(\n # TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE,\n # TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE_MATCH,\n # TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE_MATCH,\n # TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE,\n # TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE_MATCH,\n # TABLE_NAMES.SIGNATURE)\n # df_matches = pd.read_sql(match_query_text, dbh.con, params=(attempt_id,))\n\n match_query_text = \"\"\"SELECT {}.match_id, {}.time_index, {}.signature, {}.song_id, totals.total_signatures FROM {} \n INNER JOIN {} ON {}.signature = {}.signature\n INNER JOIN (SELECT count(time_index) as total_signatures, song_id FROM {} \n GROUP BY song_id) totals\n ON totals.song_id={}.song_id \n \n ORDER BY {}.signature;\n \"\"\".format(TABLE_NAMES.SIGNATURE_MATCH,\n TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE,\n TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE_MATCH,\n TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE_MATCH,\n TABLE_NAMES.SIGNATURE, TABLE_NAMES.SIGNATURE,\n TABLE_NAMES.SIGNATURE,\n TABLE_NAMES.SIGNATURE)\n\n df_matches = pd.read_sql(match_query_text, dbh.con)\n df_matches = df_matches.loc[df_matches.match_id == attempt_id,]\n if df_matches.empty:\n logger.info('No Matching signatures found')\n return -1, 0, attempt_id\n\n f, t, sxx = self.spectrogram\n df_matches['neighbors'] = df_matches.apply(lambda x: df_matches.loc[(df_matches['time_index'] < x['time_index'] + len(t)) &\n (df_matches['time_index'] >= x['time_index']) &\n (df_matches['song_id'] == x['song_id'])].shape[0], axis=1)\n df_song_matches = df_matches.groupby('song_id').agg({'neighbors': 'sum', 'total_signatures': 'first'})\n df_song_matches['weighting'] = df_song_matches['neighbors'] / df_song_matches['total_signatures']\n df_song_matches['P'] = df_song_matches['weighting'] / df_song_matches['weighting'].sum()\n probability = df_song_matches['P'].max()\n\n if (probability < self.MIN_PROBABILITY_FOR_MATCH) or pd.isnull(probability)\\\n or (df_matches['neighbors'].max() < self.MIN_NEIGHBORS_FOR_MATCH):\n logger.info('No adequate matches found')\n return -1, probability, attempt_id\n\n logger.info('Match selected')\n\n song_id = int(df_song_matches['P'].idxmax())\n song_info = dbh.get_formatted_song_info(song_id)\n\n update_prediction = '''UPDATE {} SET predicted_song_id=? WHERE id=?;'''.format(TABLE_NAMES.MATCH_ATTEMPT)\n dbh.cur.execute(update_prediction, (song_id, attempt_id))\n return song_info, probability, attempt_id",
"def lookup_fp(fingerprint, duration):\n import acoustid\n mb_json = acoustid.lookup(mb_apikey, fingerprint, duration)\n # # lookup(apikey, fingerprint, duration): Make a request to the\n # Acoustid API to look up the fingerprint returned by the\n # previous function. An API key is required, as is the length,\n # in seconds, of the source audio. Returns a parsed JSON\n # response.\n result = acoustid.parse_lookup_result(mb_json)\n # # parse_lookup_result(data): Given a parsed JSON response, return\n # an iterator over tuples containing the match score (a float\n # between 0 and 1), the MusicBrainz recording ID, title, and\n # artist name for each match\n return result",
"def query(self, query_hash: str) -> t.List[IndexMatch[IndexT]]:\n features = prepare_vpdq_feature(query_hash, self.quality_threshold)\n if not features:\n return []\n results = self.index.search_with_distance_in_result(\n features, VPDQ_DISTANCE_THRESHOLD\n )\n query_matched: t.Dict[int, t.Set[str]] = {}\n index_matched: t.Dict[int, t.Set[int]] = {}\n matches: t.List[IndexMatch[IndexT]] = []\n for hash in results:\n for match in results[hash]:\n # query_str => (matched_idx, distance)\n vpdq_match, entry_list = self._index_idx_to_vpdqHex_and_entry[match[0]]\n for entry_id in entry_list:\n if entry_id not in query_matched:\n query_matched[entry_id] = set()\n query_matched[entry_id].add(hash)\n\n if entry_id not in index_matched:\n index_matched[entry_id] = set()\n index_matched[entry_id].add(vpdq_match)\n for entry_id in query_matched.keys():\n query_matched_percent = len(query_matched[entry_id]) * 100 / len(features)\n index_matched_percent = (\n len(index_matched[entry_id])\n * 100\n / len(self._entry_idx_to_features_and_entries[entry_id][0])\n )\n if (\n query_matched_percent >= self.query_match_threshold_pct\n and index_matched_percent >= self.index_match_threshold_pct\n ):\n matches.append(\n IndexMatch(\n VPDQSimilarityInfo(\n query_matched_percent, index_matched_percent\n ),\n self._entry_idx_to_features_and_entries[entry_id][1],\n )\n )\n return matches",
"def fuzzy_matcher(mapper, favorite_movie, verbose=True):\n match_tuple = []\n # geting our match\n for title, index in mapper.items():\n ratio = fuzz.ratio(title.lower(), favorite_movie.lower())\n if ratio >= 60:\n match_tuple.append((title, index, ratio))\n # sorting\n match_tuple = sorted(match_tuple, key=lambda x: x[2])[::-1]\n if not match_tuple:\n print('Uh-Oh! Something went wrong on our end, please resubmit entry')\n return\n if verbose:\n print('Top ten similar matches: {0}\\n'.format(\n [x[0] for x in match_tuple]))\n return match_tuple[0][1]",
"def _avg_matches(self, test_target_matches_counts, num):\n avg_total = []\n avg_sdgs = {}\n for i in range(1, 6):\n avg_sdgs[i] = []\n for i in range(num):\n adder, counter = 0, 0\n adder_sdgs = [0, 0, 0, 0, 0]\n counter_sdgs = [0, 0, 0, 0, 0]\n for key in self._matches_by_sent:\n try:\n adder += (self._matches_by_sent[key][i] * test_target_matches_counts[key])\n counter += test_target_matches_counts[key]\n adder_sdgs[int(key[0])-1] += (self._matches_by_sent[key][i] * test_target_matches_counts[key])\n counter_sdgs[int(key[0])-1] += test_target_matches_counts[key]\n except:\n adder += (self._matches_by_sent[key][-1] * test_target_matches_counts[key])\n counter += test_target_matches_counts[key]\n adder_sdgs[int(key[0])-1] += (self._matches_by_sent[key][-1] * test_target_matches_counts[key])\n counter_sdgs[int(key[0])-1] += test_target_matches_counts[key]\n avg_total.append(adder / counter)\n for j in range(1, 6):\n avg_sdgs[j].append(adder_sdgs[j-1]/counter_sdgs[j-1])\n return avg_total, avg_sdgs",
"def findLockerBestMatch(song, library, index):\n\n matches = defaultdict(int)\n length, title = song\n for word in set(title.lower().split()):\n if len(word) < FLAGS.shortword:\n continue\n if word in index:\n for songid, val in index[word].iteritems():\n matches[songid] += val\n\n def _l(sid):\n return int(library[sid]['durationMillis'])/1000\n\n matches = sorted(matches.iteritems(), key=lambda a: a[1])[-5:]\n matches2 = []\n for songid, _ in matches:\n # check song length\n lenscore = (_l(songid) - length)**2\n\n # check title length\n titlescore = abs(\n len(title) -\n len(library[songid]['artist'] + library[songid]['title']))\n\n if lenscore + titlescore > FLAGS.local_match:\n continue\n\n matches2.append((lenscore+titlescore, songid))\n\n matches2 = sorted(matches2)\n\n if FLAGS.debug_locker:\n print 'Matches for %s (%d):' % (title, length)\n\n def _t(sid):\n return u'{artist} / {album} / {title}'.format(**library[sid])\n\n for score, sid in matches2:\n print ' %s (%d) = %d' % (_t(sid), _l(sid), score)\n\n print ''\n\n if matches2:\n return library[matches2[0][1]]\n else:\n return None",
"def find_matches(samples, geno_db, unique):\n mapper = {}\n matches = {}\n for hash, offset in samples:\n mapper[hash] = offset\n for h in mapper.keys():\n for g in geno_db:\n if h in geno_db[g]:\n offset = geno_db[g][h]\n if g not in matches:\n matches[g] = [] \n matches[g].append((offset - mapper[h], offset, mapper[h])) \n diff_counter = {}\n largest = 0\n largest_count = 0\n geno_id = []\n for gid in matches:\n for tup in matches[gid]:\n diff_exact, offset, fan_time = tup\n diff = round(diff_exact/200) #round after exact matching to reference but before attempting to find consistent offsets on both strands\n if diff not in diff_counter:\n diff_counter[diff] = {}\n if gid not in diff_counter[diff]:\n diff_counter[diff][gid] = 0\n diff_counter[diff][gid] += 1\n if diff_counter[diff][gid] > largest_count:\n largest = diff\n largest_count = diff_counter[diff][gid]\n geno_id = [gid]\n elif diff_counter[diff][gid] == largest_count:\n geno_id.append(gid)\n if unique and len(geno_id) >1: \n return ([], -1, {})\n return (geno_id, largest_count, diff_counter)",
"def get_best_n_matches(self, query_interps, num_matches, metrics=None):\n\n self._debug_print('Finding top %d matches from the %d templates...' % (num_matches, len(self._templates)))\n\n # start timer\n start_time = time.time()\n\n results = []\n for interp, template in itertools.product(query_interps, self._templates):\n match_score, phrase_match_mapping = self._calculate_match_score(\n template.template_tokenized, interp.interpreted_query_tokens)\n results.append(TemplateMatchResult(template, interp, match_score, phrase_match_mapping))\n\n top_matches = heapq.nlargest(num_matches, results, key=lambda x: x.match_score)\n\n if self._debug:\n for match in top_matches:\n print match\n\n # calculate time spent\n if metrics is not None:\n elapse = time.time() - start_time\n metrics[self.__class__.__name__] = elapse\n\n return top_matches"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read in an audio file, calculate its landmarks, query against hash table. Return top N matches as (id, filterdmatchcount, timeoffs, rawmatchcount), also length of input file in sec, and count of raw query hashes extracted
|
def match_file(ht, filename, density=None, sr=11025, n_fft=512, n_hop=256, window=1, shifts=4, verbose=False):
hq = audfprint.wavfile2hashes(filename, sr=sr, density=density,
n_fft=n_fft, n_hop=n_hop, shifts=shifts)
# Fake durations as largest hash time
if len(hq) == 0:
durd = 0.0
else:
durd = float(n_hop * hq[-1][0])/sr
if verbose:
print "Analyzed",filename,"of",('%.3f'%durd),"s to",len(hq),"hashes"
# Run query
return match_hashes(ht, hq, window=window), durd, len(hq)
|
[
"def analyze_file(filename):\n bps = 0\n seqs = 0\n input_iter = khmer.ReadParser(filename)\n unique = {}\n for k in (21, 31, 51):\n unique[k] = khmer.HLLCounter(ksize=k)\n for record in input_iter:\n bps += len(record.sequence)\n seqs += 1\n# for hll in unique.values():\n# hll.consume_string(record.sequence)\n input_iter.close()\n for hll in unique.values():\n hll.consume_seqfile(filename)\n return bps, seqs, unique",
"def return_matches(self, hashes: List[Tuple[str, int]],\n batch_size: int = 1000) -> Tuple[List[Tuple[int, int]], Dict[int, int]]:\n # Create a dictionary of hash => offset pairs for later lookups\n mapper = {}\n for hsh, offset in hashes:\n if hsh.upper() in mapper.keys():\n mapper[hsh.upper()].append(offset)\n else:\n mapper[hsh.upper()] = [offset]\n\n values = list(mapper.keys())\n\n # in order to count each hash only once per db offset we use the dic below\n dedup_hashes = {}\n\n results = []\n with self.cursor() as cur:\n for index in range(0, len(values), batch_size):\n # Create our IN part of the query\n query = self.SELECT_MULTIPLE % ', '.join([self.IN_MATCH] * len(values[index: index + batch_size]))\n\n cur.execute(query, values[index: index + batch_size])\n\n for hsh, sid, offset in cur:\n if sid not in dedup_hashes.keys():\n dedup_hashes[sid] = 1\n else:\n dedup_hashes[sid] += 1\n # we now evaluate all offset for each hash matched\n for audio_sampled_offset in mapper[hsh]:\n results.append((sid, offset - audio_sampled_offset))\n\n return results, dedup_hashes",
"def search_match_1(snip_fgp1):\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"SELECT song_id FROM songs\")\n uniq_id = cur.fetchall()\n uniq_id = reduce(np.append, uniq_id)\n \n tolerance = 10**(-3) # this is the default tolerance level, tuned\n \n matching_cnt = []\n window_num = []\n \n for song_id in uniq_id:\n distance = retriv_fgp1(int(song_id), snip_fgp1)\n matching_cnt.append(np.sum(distance<=tolerance))\n window_num.append(len(distance))\n \n # This is the new criterion: must have more than 10% similarity of a song\n # in the database - considered different lengths of songs\n similarity_lst = list(map(lambda i,j: i/j > 0.1, matching_cnt, window_num))\n matched_idx = [i for i,val in enumerate(similarity_lst) if val==True]\n matched_sid = [uniq_id[i] for i in matched_idx]\n \n if matched_sid == []:\n sm_logger.info('Oops, we try hard but find nothing...')\n return None\n else:\n possible_lst = []\n for i in matched_sid:\n possible_lst.append(retriv_name(int(i)))\n sm_logger.info('Found some songs matched the snippet!')\n return possible_lst",
"def samples(max = 10000):\n list_results = []\n with open(\"data/key.json\", \"r\") as file: # Requires web api key to be saved in a file named \"key\" with quotes around the key \n key = json.load(file)\n i = 0\n while(len(list_results) != max):\n try:\n results = dota2api.Initialise(key).get_match_history_by_seq_num(start_at_match_seq_num=(2416543502-(100*i)))\n for j in range(len(results[\"matches\"])):\n if(results[\"matches\"][j][\"duration\"] > 900 and results[\"matches\"][j][\"duration\"] < 3600 and results[\"matches\"][j][\"game_mode\"] == 22):\n if(len(list_results) == max):\n print(\"Match threshold acquired, saving file...\")\n break\n else:\n list_results.append(results[\"matches\"][j])\n i += 1\n print(\"Analyzed %d matches; using %d.\" % (i*100, len(list_results)))\n except:\n pass\n\n with open(\"data/matchdata.json\", \"w\") as file:\n json.dump(list_results, file)\n file.close()",
"def align_matches(self, matches):\n\t\t# align by diffs\n\t\tdiff_counter = {}\n\t\tlargest = 0\n\t\tlargest_count = 0\n\t\tsong_id = -1\n\t\tfor tup in matches:\n\t\t\tsid, diff = tup\n\t\t\tif diff not in diff_counter:\n\t\t\t\tdiff_counter[diff] = {}\n\t\t\tif sid not in diff_counter[diff]:\n\t\t\t\tdiff_counter[diff][sid] = 0\n\t\t\tdiff_counter[diff][sid] += 1\n\n\t\t\tif diff_counter[diff][sid] > largest_count:\n\t\t\t\tlargest = diff\n\t\t\t\tlargest_count = diff_counter[diff][sid]\n\t\t\t\tsong_id = sid\n\n\t\tsong = self.db.get_song_by_id(song_id)\n\t\t#print largest_count\n\t\t# return match info\n\t\tnseconds = round(float(largest) / self.config.get('fingerprint').get('samplerate') *\n\t\t\t\t\t\t self.config.get('fingerprint').get('window_size') *\n\t\t\t\t\t\t self.config.get('fingerprint').get('overlap_ratio'), 5)\n\t\tif song is None:\n\t\t\treturn None\n\t\t# self.log_event()\n\t\tsong = {\n\t\t\tDejavu.SONG_ID: song_id,\n\t\t\tDejavu.SONG_NAME: songname,\n\t\t\tDejavu.CONFIDENCE: largest_count,\n\t\t\tDejavu.OFFSET: int(largest),\n\t\t\tDejavu.OFFSET_SECS: nseconds,\n\t\t\tDatabase.FIELD_FILE_SHA1: song.get(Database.FIELD_FILE_SHA1, None), }\n\t\tself.log_match(song_id, largest_count, int(largest), nseconds)\n\t\treturn song",
"def find_most_similar(query,n_results, start = None, end = None, artist = None, relevance_feedback=True):\n #Define used global variables\n global vectorizer, tf_idf, annotation_to_text, annotation_to_song, annotation_to_fragment,song_to_name\n\n #vectorize query\n query_vector = vectorizer.transform([query])\n\n #find cosine similarities and the indices of related docs\n cosine_similarities = linear_kernel(query_vector, tf_idf).flatten()\n related_docs_indices = cosine_similarities.argsort()[-n_results:]\n\n if relevance_feedback:\n #psueodo-rel feedback take top 4 centroid\n top4_doc_ids = related_docs_indices[:4]\n for doc_id in top4_doc_ids:\n query_vector += tf_idf[doc_id] / len(top4_doc_ids)\n # do search again with transformed query\n cosine_similarities = linear_kernel(query_vector, tf_idf).flatten()\n related_docs_indices = cosine_similarities.argsort()[-n_results:]\n\n\n #find highest similarity scores\n sim_scores = cosine_similarities[related_docs_indices]\n\n #find ids of most similar annotations\n annotation_ids = [index_to_id[index] for index in related_docs_indices] #can later be used to find lyric fragment maybe\n\n # group them by songs\n song_id_to_annotations = {}\n max_sim_sum = 0\n max_song_page_views = 0\n for annotation_id, sim_score in zip(annotation_ids, sim_scores):\n song_id = annotation_to_song[annotation_id]\n if sim_score < 0.1 or should_filter(start, end, artist, song_id):\n continue\n if song_id not in song_id_to_annotations:\n song_id_to_annotations[song_id] = []\n song_id_to_annotations[song_id].append((annotation_id, sim_score))\n song_id_to_annotations[song_id].sort(key=lambda x: x[1], reverse=True)\n max_sim_sum = max(\n max_sim_sum,\n reduce(\n lambda acc, x: acc + x[1],\n song_id_to_annotations[song_id],\n 0,\n )\n )\n max_song_page_views = max(max_song_page_views,\n all_songs[song_id]['page_views'])\n\n print(\"max_song_page_views\", max_song_page_views)\n print(\"max_sim_sum\", max_sim_sum)\n\n result = []\n for song_id in song_id_to_annotations:\n song = {}\n song['id'] = song_id\n song[\"song\"] = all_songs[song_id][\"title\"]\n song[\"artist\"] = all_songs[song_id][\"artists_names\"]\n song[\"image\"] = all_songs[song_id][\"header_image_url\"]\n if not all_songs[song_id][\"album\"] == None:\n song[\"album\"] = all_songs[song_id][\"album\"][\"full_title\"]\n else:\n song[\"album\"] = \"No album found\"\n song['release_date'] = all_songs[song_id]['release_date']\n\n\n song[\"annotations\"] = [\n {'text':annotation_to_text[aid],\n 'similarity': score,\n 'lyric': annotation_to_fragment[aid]\n }\n for aid, score in song_id_to_annotations[song_id]\n ]\n\n # TODO take into page_views (need to normalize though before weighting)\n song['page_views'] = max(all_songs[song_id]['page_views'], 0)\n\n # score calculation\n similarity_sum_normalized = reduce(\n lambda acc, x: acc + x[1],\n song_id_to_annotations[song_id],\n 0,\n )/max_sim_sum\n page_views_normalized = song['page_views'] / max_song_page_views\n\n song['score'] = round(.8 * similarity_sum_normalized + .2 * page_views_normalized, 2)\n\n result.append(song)\n\n result.sort(key = lambda x : x['score'], reverse = True)\n return result",
"def find_matches(filename, e):\n best_matches = {}\n with open(filename) as f:\n try:\n for record in NCBIXML.parse(f):\n best = {}\n if record.alignments:\n for alignment in record.alignments:\n genome = extract_id(alignment.hit_def)\n locus = extract_locus(alignment.hit_def)\n \n best_value = e\n for hsp in alignment.hsps:\n if hsp.expect < best_value:\n best_value = hsp.expect\n \n if genome not in best:\n best[genome] = []\n \n best[genome].add((locus, best_value))\n\n best_matches[extract_full_id(record.query)] = best\n\n except ValueError as e:\n return None\n\n return best_matches",
"def parse_hitfile(self):\n\n self.hits_df = []\n with open(self.fname, \"r\") as fhandle:\n for line in fhandle:\n if line[0] != \"#\":\n line = line[20:].split()\n self.hits_df.append([line[-6], line[-5], line[-4], line[-3], line[-2], line[-1]])\n self.hits_df = pd.DataFrame(self.hits_df)\n self.hits_df.columns = [\"qscore\", \"rmsd\", \"seq_id\", \"n_align\", \"n_res\", \"fname\"]",
"def top_files(query, files, idfs, n):\n # Initialize empty files ranks dictionary\n files_ranks = dict()\n \n # Iterate over files\n for filename in files:\n # Initialize file rank value to 0\n file_rank = 0\n \n # Iterate over words in query\n for word in query:\n # Count it's appearences in file\n appearences = files[filename].count(word)\n \n # If word appeared at least once\n if appearences:\n # Add it's TF-IDF value to file rank value\n file_rank += appearences * idfs[word]\n \n # Store file rank value in files ranks dictionary\n files_ranks[filename] = file_rank\n \n # Sort filenames from files ranks dictionary by their values from bigger to smaller\n filenames = sorted(files_ranks, reverse=True, key=lambda f: files_ranks[f])\n \n # Return first 'n' filenames\n return filenames[:n]",
"def top_files(query, files, idfs, n):\n file_freq = dict()\n for a in files:\n frequencies = dict()\n for word in files[a]:\n if word not in frequencies:\n frequencies[word] = 1\n else:\n frequencies[word] += 1\n file_freq[a] = frequencies\n tfidfs = dict()\n for filename in files:\n tfidfs[filename] = []\n for word in files[filename]:\n tf = file_freq[filename][word]\n tfidfs[filename].append((word, tf * idfs[word]))\n op = dict()\n for filename in tfidfs:\n op[filename] = []\n for j in tfidfs[filename]:\n if j[0] in query and j not in op[filename]:\n op[filename].append(j)\n\n sum_tfidf = dict()\n\n for f in op:\n sum_tfidf[f] = sum([i[1] for i in op[f]])\n # temp = Counter(sum_tfidf)\n # print('most_common', temp.most_common(n))\n res = nlargest(n, sum_tfidf, key=sum_tfidf.get)\n return res",
"def library_match(spectra_list,lib_mgf,precursor_tol=1.0,cosine=0.7,n_peaks=3):\n\n \n library=load_from_mgf(lib_mgf)\n\n # Apply filters to clean and enhance each spectrum\n library_spectra = []\n for spectrum in library:\n # spectrum = default_filters(spectrum)\n # Scale peak intensities to maximum of 1\n spectrum = normalize_intensities(spectrum)\n library_spectra.append(spectrum)\n\n\n scores = calculate_scores(references=library_spectra,\n queries=spectra_list,\n similarity_function=CosineHungarian())\n\n scores_list=[]\n for score in scores:\n print(score)\n scores_list.append(score)\n \n scores_list.sort(reverse=True,key=lambda tuple:tuple[2])\n\n\n \n \n\n\n\n # if reference != query and n_matching >= 20:\n\n # for test_spectra in spectra_list:\n # pos=bisect.bisect(library_sort,test_spectra)\n # matches=[]\n # for lib in library_sort[pos-2:pos+2]:\n # score,peaks=cosine_score_max(test_spectra,lib,modified=False,precursor_tolerance=precursor_tol)\n # if score>=cosine and peaks>=n_peaks:\n # matches.append((score,peaks,lib))\n \n # if len(matches)>0:\n # #sort possible library matches by cosine score\n # matches.sort(reverse=True,key=lambda tuple: tuple[0])\n # #use parameters of spectrum match with highest cosine score\n # test_spectra.library_parameters=matches[0][2].parameters",
"def ranking(query_embedds, target_embedds, img_ids, file_name):\n print(query_embedds.shape)\n print(target_embedds.shape)\n\n\n cos_sim = cosine_similarity(query_embedds, target_embedds)\n print(cos_sim.shape)\n\n top20 = np.argsort(cos_sim, axis=1)[:, ::-1][:, :20]\n # top20 = idx.cpu().numpy()\n\n print(top20.shape)\n img_ids = np.array(img_ids)\n count = 0\n with open(file_name, 'w') as f:\n f.write(\"Descritpion_ID,Top_20_Image_IDs\\n\")\n for i, img_id in enumerate(img_ids):\n top_imgs = img_ids[top20[i]]\n top_imgs_str = \" \".join(list(top_imgs))\n text_id = img_id.split(\".\")[0]+\".txt\"\n f.write(text_id+\",\"+top_imgs_str+\"\\n\")\n if img_id in list(top_imgs):\n count+=1\n print(\"count\", count)",
"def wavfile2hashes(self, filename):\n ext = os.path.splitext(filename)[1]\n if ext == PRECOMPEXT:\n # short-circuit - precomputed fingerprint file\n hashes = hashes_load(filename)\n dur = np.max(hashes, axis=0)[0] * self.n_hop / self.target_sr\n # instrumentation to track total amount of sound processed\n self.soundfiledur = dur\n self.soundfiletotaldur += dur\n self.soundfilecount += 1\n else:\n peaks = self.wavfile2peaks(filename, self.shifts)\n if len(peaks) == 0:\n return []\n # Did we get returned a list of lists of peaks due to shift?\n if isinstance(peaks[0], list):\n peaklists = peaks\n query_hashes = []\n for peaklist in peaklists:\n query_hashes.append(landmarks2hashes(\n self.peaks2landmarks(peaklist)))\n query_hashes = np.concatenate(query_hashes)\n else:\n query_hashes = landmarks2hashes(self.peaks2landmarks(peaks))\n\n # Remove duplicates by merging each row into a single value.\n hashes_hashes = (((query_hashes[:, 0].astype(np.uint64)) << 32)\n + query_hashes[:, 1].astype(np.uint64))\n unique_hash_hash = np.sort(np.unique(hashes_hashes))\n unique_hashes = np.hstack([\n (unique_hash_hash >> 32)[:, np.newaxis],\n (unique_hash_hash & ((1 << 32) - 1))[:, np.newaxis]\n ]).astype(np.int32)\n hashes = unique_hashes\n # Or simply np.unique(query_hashes, axis=0) for numpy >= 1.13\n\n # print(\"wavfile2hashes: read\", len(hashes), \"hashes from\", filename)\n return hashes",
"def find_matches(samples, geno_db, unique):\n mapper = {}\n matches = {}\n for hash, offset in samples:\n mapper[hash] = offset\n for h in mapper.keys():\n for g in geno_db:\n if h in geno_db[g]:\n offset = geno_db[g][h]\n if g not in matches:\n matches[g] = [] \n matches[g].append((offset - mapper[h], offset, mapper[h])) \n diff_counter = {}\n largest = 0\n largest_count = 0\n geno_id = []\n for gid in matches:\n for tup in matches[gid]:\n diff_exact, offset, fan_time = tup\n diff = round(diff_exact/200) #round after exact matching to reference but before attempting to find consistent offsets on both strands\n if diff not in diff_counter:\n diff_counter[diff] = {}\n if gid not in diff_counter[diff]:\n diff_counter[diff][gid] = 0\n diff_counter[diff][gid] += 1\n if diff_counter[diff][gid] > largest_count:\n largest = diff\n largest_count = diff_counter[diff][gid]\n geno_id = [gid]\n elif diff_counter[diff][gid] == largest_count:\n geno_id.append(gid)\n if unique and len(geno_id) >1: \n return ([], -1, {})\n return (geno_id, largest_count, diff_counter)",
"def filter_aol_queries(aol_data_dir, filtered_queries_dir):\n nav_query_substr = ['http', 'www.', '.com', '.net', '.org', '.edu']\n\n unique_queries = set()\n filtered_nav_queries = set()\n count_total_lines = 0\n for filename in os.listdir(aol_data_dir):\n print(filename)\n uniq_q_file = set()\n filtered_nav_q_file = set()\n with gzip.open(aol_data_dir + filename, 'rb') as f:\n count = 0\n for line in f:\n count = count + 1\n if count == 1:\n continue\n\n line = line.strip().split(\"\\t\")\n query = line[1]\n unique_queries.add(query)\n uniq_q_file.add(query)\n if not any(substr in query for substr in nav_query_substr):\n filtered_nav_queries.add(query)\n filtered_nav_q_file.add(query)\n print('No. of lines read %d' % count)\n count_total_lines = count_total_lines + count\n percentage = len(filtered_nav_q_file) * 100 / len(uniq_q_file)\n print('No. of unique queries in file({}) : {}/{} {:.2f}'.format(filename, len(filtered_nav_q_file),\n len(uniq_q_file), percentage))\n\n print('Total no. of lines read %d' % count_total_lines)\n total_percentage = len(filtered_nav_queries) * 100 / len(unique_queries)\n print('Percentage of filtered queries in total: {}/{} {:.2f}'.format(len(filtered_nav_queries), len(unique_queries),\n total_percentage))\n with gzip.open(filtered_queries_dir + 'queries.txt.gz', 'w') as fout:\n for query in filtered_nav_queries:\n print(query, file=fout)",
"def read_matches_files(data_dir, dataset_name):\n matches = []\n with open(os.path.join(data_dir, dataset_name + '_match.txt'), 'r') as f:\n for line in f:\n l = line.split()\n matches.append([int(l[0]), int(l[2]), int(l[1] == l[3])])\n return torch.LongTensor(matches)",
"def scan(folder, amp_threshold, plot):\r\n files = get_files(Path(folder))\r\n length_s = 0\r\n length_s_above_threshold = 0\r\n\r\n print(f'{len(files)} have been found and will be analyzed.')\r\n\r\n for wavfile in files:\r\n analysis = analyze_wav(wavfile, amp_threshold, plot)\r\n length_s += analysis['length_s']\r\n length_s_above_threshold += analysis['length_s_above_threshold']\r\n\r\n print(f'{len(files)} files have been analyzed')\r\n print(f'Overall Length: {round(length_s)} s / {round(length_s/60)} m / {round(length_s/60/60)} h')\r\n print(f'Overall Length (above Treshold (amp > {amp_threshold})): {round(length_s_above_threshold)} s / {round(length_s_above_threshold/60)} m')",
"def load_BLAST_results( blast_result_file, match_sim_cutoff, match_len_cutoff, unique_score_ratio ):\n\t\n\t# --- load raw hits --- #\n\tblast_hits = {}\n\twith open( blast_result_file, \"r\" ) as f:\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tparts = line.strip().split('\\t')\n\t\t\tif int( parts[3] ) >= match_len_cutoff:\n\t\t\t\tif float( parts[2] ) >= match_sim_cutoff:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tblast_hits[ parts[0] ].append( { 'LG': parts[0].split('_%_')[0], 'cm': float( parts[0].split('_%_')[1] ), 'chr': parts[1], 'pos': ( int( parts[8] ) + int( parts[9] ) ) / 2.0, 'score': float( parts[-1] ) } )\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tblast_hits.update( { parts[0]: [ { 'LG': parts[0].split('_%_')[0], 'cm': float( parts[0].split('_%_')[1] ), 'chr': parts[1], 'pos': ( int( parts[8] ) + int( parts[9] ) ) / 2.0, 'score': float( parts[-1] ) } ] } )\n\t\t\tline = f.readline()\n\t\n\t# --- screen and clean --- #\n\tfinal_hits = []\n\tfor hits in blast_hits.values():\n\t\tif len( hits ) == 1:\n\t\t\tfinal_hits.append( hits[0] )\n\t\telse:\n\t\t\tsorted_hits = sorted( hits, key=itemgetter( 'score' ) )\n\t\t\tif sorted_hits[-2]['score'] / sorted_hits[-1]['score'] <= unique_score_ratio:\n\t\t\t\tfinal_hits.append( sorted_hits[-1] )\n\treturn final_hits",
"def get_lm_matched_docs(query, searcher, qparser, topk=2000):\n #did_dict = {}\n dids = []\n scores = []\n query = qparser.parse(query)\n # searcher.setSimilarity(LMDirichletSimilarity())\n scoreDocs = searcher.search(query, topk).scoreDocs\n # print(\"Found %d document(s) that matched query '%s':\" % (len(scoreDocs), query))\n\n for scoreDoc in scoreDocs:\n if len(dids) > 1000:\n break\n\n doc = searcher.doc(scoreDoc.doc)\n did = doc.get(\"id\")\n\n if check_if_spam(did):\n continue\n #text = doc.get(\"raw\")\n #did_dict[did] = {}\n #did_dict[did]['text'] = text\n #did_dict[did]['score'] = scoreDoc.score\n dids.append(did)\n scores.append(scoreDoc.score)\n\n return dids, scores"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show the query fingerprints and the matching ones plotted over a spectrogram
|
def illustrate_match(ht, filename, density=None, sr=11025, n_fft=512, n_hop=256, window=1, shifts=4):
# Make the spectrogram
d, sr = librosa.load(filename, sr=sr)
S = np.abs(librosa.stft(d, n_fft=512, hop_length=256,
window=np.hanning(512+2)[1:-1]))
S = 20.0*np.log10(np.maximum(S, np.max(S)/1e6))
S = S - np.max(S)
librosa.display.specshow(S, sr=sr,
y_axis='linear', x_axis='time',
cmap='gray_r', vmin=-80.0, vmax=0)
# Do the match
hq = audfprint.wavfile2hashes(filename, sr=sr, density=density,
n_fft=n_fft, n_hop=n_hop, shifts=shifts)
# Run query, get back the hashes for match zero
results, matchhashes = match_hashes(ht, hq, hashesfor=0, window=window)
# Convert the hashes to landmarks
lms = audfprint.hashes2landmarks(hq)
mlms = audfprint.hashes2landmarks(matchhashes)
# Overplot on the spectrogram
plt.plot(np.array([[x[0], x[0]+x[3]] for x in lms]).T,
np.array([[x[1],x[2]] for x in lms]).T,
'.-g')
plt.plot(np.array([[x[0], x[0]+x[3]] for x in mlms]).T,
np.array([[x[1],x[2]] for x in mlms]).T,
'.-r')
# Add title
plt.title(filename + " : Matched as " + ht.names[results[0][0]]
+ (" with %d of %d hashes" % (len(matchhashes), len(hq))))
# Display
plt.show()
# Return
return results
|
[
"def plotSpectrogram(self):\n\n\t\t#max freq represetnedf (nyquist constrained)\n\t\timgHeight = self.sampleRate/2\n\n\t\tself.p.y_range.end = imgHeight * self.numChannels\n\n\t\timgWidth = self.signalDuration\n\t\tself.p.x_range.end = imgWidth\n\n\t\tfor channelNum in self.activeChannels:\n\t\t\tchannelSignal = self.signal[:,channelNum]\n\n\t\t\tfreqs,times,data = self.log_specgram(channelSignal,self.sampleRate)\n\n\t\t\tself.p.image(image=[data], x=0, y=imgHeight*channelNum, dw=imgWidth, dh=imgHeight, palette=\"Spectral11\")",
"def get_spectrogram_data(frame_rate, np_frames):\n # Set format details for plot.\n #fig = plt.figure(num=None, figsize=(12, 7.5), dpi=300)\n #ax = fig.add_subplot(111)\n #ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n #ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))\n #ax.yaxis.set_major_locator(ticker.MultipleLocator(2000))\n #ax.yaxis.set_minor_locator(ticker.MultipleLocator(500))\n #ax.tick_params(axis='both', direction='inout')\n #plt.title(f\"Spectrogram of:\\n{input_file}\")\n plt.title(f\"Spectrogram\")\n plt.xlabel('Time (seconds)')\n plt.ylabel('Frequency (Hz)')\n\n # If NFFT is too high, then there the horizontal (frequency) resolution is\n # too fine, and there are multiple bands for each formant. However, if\n # NFFT is too low, then the whole image is rather blurry and even the\n # formants are not well differentiated (i.e. at the default vaules for NFFT\n # and noverlap). noverlap that is half of NFFT seems to minimize background\n # noise, as well.\n noverlap = 128 # default: 128; other: 256\n NFFT = 256 # default: 256; other: 512\n\n # Create the plot.\n spectrum, frequencies, times, img = plt.specgram(\n np_frames,\n Fs=frame_rate,\n cmap='gnuplot',\n noverlap=noverlap,\n NFFT=NFFT,\n )\n return spectrum, frequencies, times, img",
"def plot_spectrogram(self):\n f, t, sxx = self.compute_spectrogram()\n plt.pcolormesh(t, f, sxx)\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time (sec)')\n plt.show()",
"def spectrogram(samples):\n S, freqs, times = mlab.specgram(samples, NFFT=4096, Fs=44100,\n window=mlab.window_hanning,\n noverlap=(4096 // 2))\n return S, freqs, times",
"def plot_all_spectra(self):\n self.plot_gamma_spectra()\n self.plot_TAC_spectra()",
"def plot_shard_vs_xcorr_tel(db, shift, shard):\n\n spectrum = next(iter(shard.spectra.values())) #only one spectrum in shard\n\n db_spectrum = np.ones(len(spectrum.log_y))\n for record in db:\n px = record[dbi.PX_IND] + shift\n if record[dbi.ORD_IND] == shard.order and shard.lo_px <= px and px < shard.hi_px:\n db_spectrum[px - shard.lo_px] = np.exp(record[dbi.INT_IND])\n\n fig = plt.figure(facecolor = 'white')\n plt.plot(spectrum.lin_x, np.exp(spectrum.log_y), color='purple', label='CHIRON Spectrum')\n plt.plot(spectrum.lin_x, db_spectrum, label='Telluric Spectrum')\n plt.title(\"Order {} px {}-{}, spectrum and xcorr, unscaled telluric model\".format(shard.order, \n shard.lo_px,\n shard.hi_px))\n plt.xlabel(\"Wavelength (Angstroms)\")\n plt.ylabel(\"Signal strength\")\n plt.tight_layout()\n plt.legend()\n plt.show()",
"def displaySpectra(spectra):\n \n colList = ['r', 'g', 'b', 'm', 'c', 'y', 'k']\n for idx, spectrum in enumerate(spectra):\n #assign color\n c = colList[idx % len(colList)]\n plt.plot(spectrum[:,0], spectrum[:,1], c)\n \n plt.show()",
"def display_query(query, index): # pragma: no cover\n QueryPlot(query, index)",
"def spectrogram_plot(self, spectrogram_image, audio_index=5) -> None:\n # Now plot for a test\n\n plt.figure(figsize=(4, 12))\n for points in range(self._temporal_point):\n plt.subplot(self._temporal_point, 1, points + 1)\n im = plt.imshow(spectrogram_image[audio_index][points, :, :, 0], cmap=plt.get_cmap('jet'))\n plt.axis('off')\n plt.colorbar(im)\n plt.tight_layout()\n plt.subplots_adjust(wspace=0, hspace=0.5)\n plt.savefig('Spectrogram1.png')\n plt.close()",
"def plot_spectra(self, **kwargs):\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots()\n ax.set(xlabel=\"Wavelength ($\\\\AA$)\", ylabel=\"Intensity (a.u.)\")\n\n for spinch in [self.dd, self.du, self.ud, self.uu]:\n if spinch is None:\n continue\n x = spinch.processed_spectrum[\"m_lambda\"][0]\n y = spinch.processed_spectrum[\"m_spec\"][0]\n yerr = spinch.processed_spectrum[\"m_spec_sd\"][0]\n ax.errorbar(x, y, yerr, label=spinch.cat.sample_name)\n return fig, ax",
"def plot_similarity_matrix(self, filename=None):\n matplotlib.rcParams['text.usetex'] = True\n matrix, queries, all_sounds = self.similarity_matrix()\n fig = plt.figure(figsize=(10, 4))\n ax = fig.add_subplot(111)\n cax = ax.matshow(matrix, cmap='YlOrRd', vmin=0, vmax=5,\n extent=[0, 150, 0, 10], aspect=4)\n cbar = fig.colorbar(cax, boundaries=[0, 1, 2, 3, 4, 5, 6],\n shrink=0.5, aspect=30, pad=0.01)\n cbar.set_label(label=r'\\\"Ahnlichkeitsgrad', size=8.4)\n cbar.set_ticks([x for x in range(6)])\n cbar.ax.set_yticklabels([0, 1, 2, 3, 4, 5], va='bottom', size=9)\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n\n ax.set_xticks([x for x in range(10, 150, 14)])\n ax.set_xticks([x for x in range(150)], minor=True)\n ax.set_yticks([x for x in range(10)])\n\n labels = [r'Katze', r'Hydrant', r'T\\\"ur', r'Spielzeug', r'Uhr',\n r'Lachen', r'Regen', r'Grollen', r'Feuerwerk', r'Schrei']\n\n # ax.set_xticklabels(queries, rotation=0, ha='left')\n ax.set_xticklabels(['Cluster %d\\n%s' % (i + 1, x)\n for (i, x) in enumerate(labels)],\n ha='left', rotation=10)\n ax.set_xticklabels(['Bspkl.\\n1-10'], minor=True, rotation=10,\n ha='left')\n ax.set_yticklabels(['%s - %d' % (x, 10 - i)\n for (i, x) in enumerate(reversed(labels))],\n va='bottom')\n # major ticks\n ax.tick_params(axis='both', which='major', labelsize=8.4,\n direction='out', length=3, pad=1, bottom='off',\n right='off')\n ax.tick_params(axis='x', which='major', pad=-3, length=18)\n # minor ticks\n ax.tick_params(axis='x', which='minor', labelsize=8.4,\n direction='out', length=5, bottom='off', right='off')\n ax.grid(which='minor', alpha=0.2, linestyle='-')\n ax.grid(which='major', alpha=1.0, linestyle='-')\n if not filename:\n plt.show()\n else:\n plt.savefig(filename, dpi=800, bbox_inches='tight')",
"def plot_quality(self):\n compare_channel_replicates(\n self.phospho_norm, group=False, cross=True,\n title='Tyr/raw_quality/Tyr_phospho_norm', col_groups=self.groups)",
"def analyze_spectra_fits(self):\n if self.fit_scan_file is None:\n self.open_fit_scan_file()\n \n #result_no = int(self.ui.result_spinBox.value())\n #self.matplotlibwidget = MatplotlibWidget(size=(12,8), dpi=300)\n #self.fit_scan_file['result_'+str(0)].plot(fig=self.matplotlibwidget.getFigure().add_subplot(111))\n #self.matplotlibwidget.draw()\n #self.matplotlibwidget.show()\n analyze_window = Analyze(scan_fit_file=self.fit_scan_file)\n analyze_window.run()",
"def create_spectogram1(track_id):\n filename = get_path(track_id)\n y, sr = librosa.load(filename)\n spectrogram = librosa.feature.melspectrogram(y = y, sr = sr, n_fft = 2048, hop_length = 1024)\n spectrogram = librosa.power_to_db(spectrogram, ref = np.max)\n return spectrogram[:, 473:601]",
"def spectrogram_summary(audio, audio_gen, step, name=''):\n specgram = lambda a: ddsp.spectral_ops.compute_logmag(tf_float32(a), size=768)\n\n # Batch spectrogram operations\n spectrograms = specgram(audio)\n spectrograms_gen = specgram(audio_gen)\n\n batch_size = int(audio.shape[0])\n for i in range(batch_size):\n # Manually specify exact size of fig for tensorboard\n fig, axs = plt.subplots(2, 1, figsize=(8, 8))\n\n ax = axs[0]\n spec = np.rot90(spectrograms[i])\n ax.matshow(spec, vmin=-5, vmax=1, aspect='auto', cmap=plt.cm.magma)\n ax.set_title('original')\n ax.set_xticks([])\n ax.set_yticks([])\n\n ax = axs[1]\n spec = np.rot90(spectrograms_gen[i])\n ax.matshow(spec, vmin=-5, vmax=1, aspect='auto', cmap=plt.cm.magma)\n ax.set_title('synthesized')\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Format and save plot to image\n name = name + '_' if name else ''\n tag = 'spectrogram/{}{}'.format(name, i + 1)\n fig_summary(tag, fig, step)",
"def cubescanplot(options):\n fd=open(options.cubescanfile)\n import pickle\n data=pickle.load(fd)\n fd.close()\n #\n cubegrid=data[0]\n scanresults=data[1]\n #\n # Instantiate scoring class\n #\n SC=score_class(options)\n #\n # Score the ghosts from each cubescan\n #\n scores=[]\n for cube in sorted(scanresults.keys()):\n calc_ghosts=scanresults[cube]\n xs,ys,experrors,satisfied=SC.score_ghosts(calc_ghosts)\n rmsd=RMSD(xs,ys,experrors)\n scores.append([rmsd,cube])\n #import pylab\n #pylab.errorbar(xs,ys,xerr=experrors,fmt='ro')\n #pylab.plot(xs,xs,'g-')\n #pylab.xlabel('Experimental dCS')\n #pylab.ylabel('Calculated dCS')\n #pylab.title('Cubescan of cube %4d, atom: %s, RMSD: %5.3f' %(cube,options.atom,rmsd))\n #pylab.savefig('Cubescan_%d.png' %(cube))\n #pylab.clf()\n rmsds=[]\n scores.sort()\n import Protool\n P=Protool.structureIO()\n P.readpdb('2LZT_H.pdb')\n count=0\n for rmsd,cube in scores[:25]:\n print '%4d, rmsd: %5.2f' %(cube,rmsd)\n center=cubegrid[cube]['coord']\n P.add_atom('X:%4d:CS' %(count+1000),\n atomnumber=0,atomname='CS',\n chainid='X',residuename='CSS',residuenumber='999',\n xcoord=center[0],ycoord=center[1],zcoord=center[2],update=1,BFACTOR=None,OCCUPANCY=None,CHARGE=None,RADIUS=None,tag=None,accept_duplicate=False)\n count=count+1\n rmsds.append(rmsd)\n P.writepdb('cubescan.pdb')\n import pylab\n pylab.hist(rmsds)\n pylab.savefig('Cubescanhist.png')\n return",
"def plot_f_peak(sims, snap):\n for sss in sims:\n hspec = get_hspec(sss, snap)\n hspec.plot_f_peak(\"Si\", 2, color=colors[sss], ls=lss[sss])\n hspec = get_hspec(5, snap, box=10)\n hspec.label=labels[\"S\"]\n hspec.plot_f_peak(\"Si\", 2, color=colors[\"S\"], ls=\"--\")\n hspec.plot_f_peak_errors(\"Si\", 2, samples=100,cumulative=False, color=colors2[\"S\"])\n plt.legend(loc=1,ncol=3)\n vel_data.plot_extra_stat_hist(True)\n plt.ylim(-0.03,3.1)\n save_figure(path.join(outdir,\"cosmo_peak_z\"+str(snap)))\n plt.clf()",
"def spectrogram(intensity, taxis, faxis):\r\n\r\n fig, ax = plt.subplots()\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Frequency (Hz)')\r\n k = ax.pcolormesh(taxis, faxis, intensity)\r\n c = plt.colorbar(k)\r\n c.set_label('Intensity (dB rel.)')\r\n plt.show()",
"def plot_quality(self):\n compare_channel_replicates(\n self.phospho_norm, group=False, cross=True,\n title='Sep/raw_quality/Sep_phospho_norm', col_groups=self.groups)",
"def plot_AR_EPSP_amp(rec_file_list, description_list=\"\", title=None):\n if not type(rec_file_list) == list:\n rec_file_list = [rec_file_list]\n if not type(description_list) == list:\n description_list = [description_list]\n default_sec_types = ['basal', 'trunk', 'apical', 'tuft']\n with h5py.File(data_dir+rec_file_list[0]+'.hdf5', 'r') as f:\n temp_sec_types = []\n for sim in [sim for sim in f.values() if sim.attrs['stim_loc'] == 'spine']:\n rec = sim['rec']['0'] if sim['rec']['0'].attrs['description'] == 'branch' else sim['rec']['1']\n sec_type = rec.attrs['type']\n if not sec_type in temp_sec_types:\n temp_sec_types.append(sec_type)\n # enforce the default order of input and recording locations for plotting, but allow for adding or subtracting\n # sec_types\n sec_types = [sec_type for sec_type in default_sec_types if sec_type in temp_sec_types]+\\\n [sec_type for sec_type in temp_sec_types if not sec_type in default_sec_types]\n distances = {}\n spine_amp = {'spine': {}, 'branch': {}}\n branch_amp = {'spine': {}, 'branch': {}}\n fig, axes = plt.subplots(max(2, len(sec_types)), 4)\n colors = ['k', 'r', 'c', 'y', 'm', 'g', 'b']\n for index, rec_filename in enumerate(rec_file_list):\n index_dict = {}\n for sec_type in sec_types:\n distances[sec_type] = []\n for stim_loc in ['spine', 'branch']:\n spine_amp[stim_loc][sec_type] = []\n branch_amp[stim_loc][sec_type] = []\n with h5py.File(data_dir+rec_filename+'.hdf5', 'r') as f:\n amp = f['0'].attrs['amp']\n equilibrate = f['0'].attrs['equilibrate']\n duration = f['0'].attrs['duration']\n # following parallel execution and combine_rec_files, the order of simulation records is shuffled\n # here the indices of paired records from spine_stim and branch_stim are collected\n for simiter in f:\n sim = f[simiter]\n stim_loc = sim.attrs['stim_loc']\n spine_rec = sim['rec']['0'] if sim['rec']['0'].attrs['description'] == 'spine' else sim['rec']['1']\n spine_index = spine_rec.attrs['index']\n if not spine_index in index_dict:\n index_dict[spine_index] = {}\n index_dict[spine_index][stim_loc] = simiter\n for indices in index_dict.values():\n spine_stim = f[indices['spine']]['rec']\n for rec in spine_stim.values():\n if rec.attrs['description'] == 'branch':\n branch_rec = rec\n sec_type = rec.attrs['type']\n distances[sec_type].append(branch_rec.attrs['branch_distance'])\n for stim_loc, stim, tvec in [(stim_loc, f[indices[stim_loc]]['rec'], f[indices[stim_loc]]['time'])\n for stim_loc in ['spine', 'branch']]:\n for rec in stim.values():\n if rec.attrs['description'] == 'branch':\n branch_rec = rec\n else:\n spine_rec = rec\n interp_t = np.arange(0., duration, 0.001)\n interp_branch_vm = np.interp(interp_t, tvec[:], branch_rec[:])\n interp_spine_vm = np.interp(interp_t, tvec[:], spine_rec[:])\n left, right = time2index(interp_t, equilibrate-3.0, equilibrate-1.0)\n baseline_branch = np.average(interp_branch_vm[left:right])\n baseline_spine = np.average(interp_spine_vm[left:right])\n left, right = time2index(interp_t, equilibrate, duration)\n peak_branch = np.max(interp_branch_vm[left:right]) - baseline_branch\n peak_spine = np.max(interp_spine_vm[left:right]) - baseline_spine\n spine_amp[stim_loc][sec_type].append(peak_spine)\n branch_amp[stim_loc][sec_type].append(peak_branch)\n for i, sec_type in enumerate(sec_types):\n axes[i][0].scatter(distances[sec_type], branch_amp['branch'][sec_type], label=description_list[index],\n color=colors[index])\n axes[i][1].scatter(distances[sec_type], spine_amp['branch'][sec_type], label=description_list[index],\n color=colors[index])\n axes[i][2].scatter(distances[sec_type], branch_amp['spine'][sec_type], label=description_list[index],\n color=colors[index])\n axes[i][3].scatter(distances[sec_type], spine_amp['spine'][sec_type], label=description_list[index],\n color=colors[index])\n for i, sec_type in enumerate(sec_types):\n for j, label in enumerate(['Stim Branch - Record Branch', 'Stim Branch - Record Spine',\n 'Stim Spine - Record Branch', 'Stim Spine - Record Spine']):\n axes[i][j].set_xlabel('Distance from Dendrite Origin (um)')\n axes[i][j].set_ylabel('Input Loc: '+sec_type+'\\nEPSP Amplitude (mV)')\n axes[i][j].set_title(label)\n if not description_list == [\"\"]:\n axes[0][0].legend(loc='best', scatterpoints=1, frameon=False, framealpha=0.5)\n fig.subplots_adjust(hspace=0.5, wspace=0.3, left=0.05, right=0.98, top=0.95, bottom=0.05)\n if not title is None:\n fig.set_size_inches(19.2, 12)\n fig.savefig(data_dir+title+' - spine AR - EPSP amp.svg', format='svg')\n plt.show()\n plt.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Interpret a byte as an unsigned int
|
def as_unsigned_char(byte):
return unsigned_char.unpack(byte)[0]
|
[
"def __convert_to_unsigned_num(self, bytestring:str):\n return BitArray(bin=bytestring).uint",
"def bytes_to_uint(raw_bytes):\n if not builtins.is_bytes(raw_bytes):\n raise TypeError(\"argument must be raw bytes: got %r\" %\n type(raw_bytes).__name__)\n # binascii.b2a_hex is written in C as is int.\n return int(binascii.b2a_hex(raw_bytes), 16)",
"def _getOneByteUnsigned( self, r ):\r\n return r",
"def getUInt(self):\n b = int(self.getNext()) & 0xFF\n bl = int(b)\n if b >= 255:\n b = int(self.getNext()) & 0xFF\n bb = int(self.getNext()) & 0xFF\n bl = b + (0x100 * bb)\n return bl",
"def _decodeUnsigned(packet):\n # # Make sure data types match\n # if packet[0] != encodeTag('Unsigned32'):\n # raise SnmplibTypeMismatch, \"Attempted decoding of non-Unsigned32 as Unsigned32 (tag=%02x).\" % ord(packet[0])\n # Unpack the length\n (length, size) = decodeLength(packet[1:])\n\n # Setup an index on the data area\n index = size + 1\n\n # Get the first octet\n result = ord(packet[index])\n\n result = long(result)\n\n # Concatinate the rest\n while index < length + size:\n index = index + 1\n result = result * 256\n result = result + ord(packet[index])\n\n # Return result\n return result",
"def twosComplementInt1byte( byte ):\r\n # take everything except the top bit\r\n topbit = bitOfByte( 7, byte )\r\n lowerbits = byte & 127\r\n if topbit == 1:\r\n return lowerbits - (1 << 7)\r\n else:\r\n return lowerbits",
"def byte_to_int(data):\n # type: (bytes) -> int\n if isinstance(data, int):\n return data\n\n if isinstance(data, str):\n return ord(data[0])\n\n raise ValueError(\n \"Expected byte or int as input, got: {0}\".format(\n type(data).__name__\n )\n )",
"def twosComplementInt2bytes( highByte, lowByte ):\r\n # take everything except the top bit\r\n topbit = bitOfByte( 7, highByte )\r\n lowerbits = highByte & 127\r\n unsignedInt = lowerbits << 8 | (lowByte & 0xFF)\r\n if topbit == 1:\r\n # with sufficient thought, I've convinced\r\n # myself of this... we'll see, I suppose.\r\n return unsignedInt - (1 << 15)\r\n else:\r\n return unsignedInt",
"def byte_to_int(data):\n # type: (bytes) -> int\n if isinstance(data, int):\n return data\n\n if isinstance(data, bytes):\n return data[0]\n\n raise ValueError(\n \"Expected byte or int as input, got: {0}\".format(\n type(data).__name__\n )\n )",
"def read_unsigned_integer(stream, size):\r\n\r\n value = 0\r\n for i in range(0, size):\r\n byte = ord(stream.read(1))\r\n value = (value << 8) | byte\r\n return value",
"def unpack_uint8(data: bytes) -> Tuple[int, int]:\n value = unpack(DecodeUtils.UINT8_BYTE_FORMAT, data[:1])[0]\n return value, 1",
"def uint(addr):\n return readtype(pwndbg.typeinfo.uint, addr)",
"def get_upper_nibble(byte: int) -> int:\n return (byte & UPPER_NIBBLE_MASK) >> 4",
"def get_uint(self):\n return pn_data_get_uint(self._data)",
"def get_signed8(uint: int) -> int:\n if uint > 127:\n return uint - 256\n return uint",
"def unpack_integer(value):\n return struct.unpack('<I', value)[0]",
"def toUInt(self, p_str): # real signature unknown; restored from __doc__\n pass",
"def receive_byte_signed(self):\n return unpack('b', self.read(1))[0]",
"def read_2_byte_int(self):\n\n return struct.unpack('>h', self.read_bytes(2))[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read a (monochrome) PNG image and convert to 1bpp raw data This should work with any 8 bit PNG. To ensure compatibility, the image can be processed with Imagemagick first using the monochrome flag.
|
def read_png(path):
buf = bytearray()
# State for bit packing
bit_cursor = 8
byte = 0
# Read the PNG image
reader = png.Reader(filename=path)
width, height, rows, metadata = reader.asRGB()
# Loop over image and pack into 1bpp buffer
for row in rows:
for pixel in range(0, len(row), 3):
bit_cursor -= 1
if row[pixel] == 0:
byte |= (1 << bit_cursor)
if bit_cursor == 0:
buf.append(unsigned_char.pack(byte)[0])
byte = 0
bit_cursor = 8
return buf
|
[
"def read_raw(filename, height=979, width=1312, bayer = False):\r\n\r\n raw_file = open(filename,'rb')\r\n image = (np.fromfile(raw_file, count = height*width, dtype='uint16'))/256\r\n image = np.reshape(image, (height,width), 'C')\r\n\r\n if bayer == True:\r\n image = cv2.cvtColor(image, cv2.COLOR_BAYER_BG2BGR)\r\n\r\n return image.astype('uint8')",
"def decode_image(data: bytes) -> np.ndarray:\n height = int.from_bytes(data[0:2], 'little')\n width = int.from_bytes(data[2:4], 'little')\n image_format = ImageFormat(int.from_bytes(data[4:5], 'little', signed=False))\n if image_format == ImageFormat.RAW_BGR:\n return np.frombuffer(data[5:], np.uint8).reshape((height, width, 3))\n elif image_format == ImageFormat.JPG_RGB:\n return cv2.imdecode(np.frombuffer(data[5:], np.uint8), cv2.IMREAD_COLOR)",
"def make_binary_image(im):",
"def to_monochrome(im):\n return np.array(Image.fromarray(im).convert('L'))",
"def open_image():\n rawData = open(\"rose.raw\", 'rb').read()\n imgSize = (256, 256) \n img = Image.frombytes('L', imgSize, rawData)\n original_image = np.asarray(img)\n\n return original_image",
"def growl_raw_image(image):\n b = Buffer()\n image.save(b, 'PNG')\n return b.getvalue()",
"def fancyConvert(image):",
"def read_pgm_img_data(filename, byteorder='>'):\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n \n return numpy.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=len(header)\n )",
"def read_image(img_bytes: bytes) -> Image.Image:\n image = Image.open(BytesIO(img_bytes))\n if image.mode != 'RGB':\n image = image.convert(mode='RGB')\n return image",
"def pil2opencv(image: PIL.BmpImagePlugin.BmpImageFile) -> np.ndarray:\n\n image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n return image",
"def __decode_image(image_bytes) -> np.ndarray:\n\n bytes_buffer = io.BytesIO()\n bytes_buffer.write(image_bytes)\n pil_img = Image.open(bytes_buffer)\n return np.asarray(pil_img, dtype=np.float32) / 255",
"def convert_image_to_greyscale_bytes(img, quality=75):\n img = img.convert('L')\n buf = BytesIO()\n img.save(buf, format='JPEG', quality=quality)\n return buf.getvalue()",
"def to_uint8(img):\n return img.astype(numpy.uint8)",
"def __decode_img(img):\n img = img.astype(float)\n return transform.resize(img, [80, 80])",
"def imread(path, is_grayscale=True):\n if is_grayscale:\n # flatten=True 以灰度图的形式读�?\n return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)\n else:\n return scipy.misc.imread(path, mode='YCbCr').astype(np.float)",
"def prepare_image_to_recognize(self, img):\n if img.mode == 'P':\n prep = np.array(img.convert('RGBA'))\n else:\n prep = np.array(img)\n prep = cv2.cvtColor(prep, cv2.COLOR_RGB2GRAY)\n prep -= prep.min()\n max = prep.max()\n for i in range(prep.shape[0]):\n for j in range(prep.shape[1]):\n prep[i][j] = np.uint8(prep[i][j]*255.0/max)\n #ret, thresh = cv2.threshold(prep, prep.min() + (prep.max() - prep.min())/2, 255, cv2.THRESH_TRUNC)\n return Image.fromarray(prep).convert('RGB')",
"def decode_image(img):\r\n width, height = img.size\r\n msg = \"\"\r\n index = 0\r\n for row in range(height):\r\n for col in range(width):\r\n try:\r\n r, g, b = img.getpixel((col, row))\r\n except ValueError:\r\n # need to add transparency a for some .png files\r\n r, g, b, a = img.getpixel((col, row))\r\n # first pixel r value is length of message\r\n if row == 0 and col == 0:\r\n length = r\r\n elif index <= length:\r\n msg += chr(r)\r\n index += 1\r\n return msg",
"def decode_png(input):\n # type: (Tensor) -> Tensor\n if not isinstance(input, torch.Tensor) or input.numel() == 0 or input.ndim != 1:\n raise ValueError(\"Expected a non empty 1-dimensional tensor.\")\n\n if not input.dtype == torch.uint8:\n raise ValueError(\"Expected a torch.uint8 tensor.\")\n output = torch.ops.image.decode_png(input)\n return output",
"def convert_from_uint8(img):\n return img.astype(np.float32) / 255.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function that takes in the domain x, y (2D meshgrids) and a list of 2D arrays eta_list and creates an animation of all eta images. To get updating title one also need specify time step dt between each frame in the simulation, the number of time steps between each eta in eta_list and finally, a filename for video.
|
def eta_animation(X, Y, eta_list, frame_interval, filename):
fig, ax = plt.subplots(1, 1)
#plt.title("Velocity field $\mathbf{u}(x,y)$ after 0.0 days", fontname = "serif", fontsize = 17)
plt.xlabel("x [m]", fontname = "serif", fontsize = 12)
plt.ylabel("y [m]", fontname = "serif", fontsize = 12)
pmesh = plt.pcolormesh(X, Y, eta_list[0], vmin = -0.7*np.abs(eta_list[int(len(eta_list)/2)]).max(),
vmax = np.abs(eta_list[int(len(eta_list)/2)]).max(), cmap = plt.cm.RdBu_r)
plt.colorbar(pmesh, orientation = "vertical")
# Update function for quiver animation.
def update_eta(num):
ax.set_title("Surface elevation $\eta$ after t = {:.2f} hours".format(
num*frame_interval/3600), fontname = "serif", fontsize = 16)
pmesh.set_array(eta_list[num][:-1, :-1].flatten())
return pmesh,
anim = animation.FuncAnimation(fig, update_eta,
frames = len(eta_list), interval = 10, blit = False)
mpeg_writer = animation.FFMpegWriter(fps = 24, bitrate = 10000,
codec = "libx264", extra_args = ["-pix_fmt", "yuv420p"])
anim.save("{}.mp4".format(filename), writer = mpeg_writer)
return anim # Need to return anim object to see the animation
|
[
"def create_animation(env, images):\n\n # We keep the borders, but remove top padding\n og_width = env.width\n og_height = env.height-4\n\n width = og_width * IMAGE_RESCALE\n height = og_height * IMAGE_RESCALE\n\n file_name = './videos/video_exp' + str(EXP) + '_seed' + str(SEED) + '.avi'\n codec = cv2.VideoWriter_fourcc(*'mp4v')\n writer = cv2.VideoWriter(file_name, codec, float(FPS), (width, height))\n\n for raw_image in images:\n\n image = np.ones((height, width, 3), dtype='uint8')\n\n for r in range(og_height):\n for c in range(og_width):\n\n rgb = COLORS[raw_image[r, c]]\n\n r0 = r*IMAGE_RESCALE\n r1 = (r+1)*IMAGE_RESCALE\n c0 = c*IMAGE_RESCALE\n c1 = (c+1)*IMAGE_RESCALE\n\n image[r0:r1, c0:c1, :] = np.array(rgb, dtype='uint8')\n writer.write(image)\n\n writer.release()\n writer=None\n print('Video saved to ' + file_name)",
"def create_animation(images):\n\n plt.ioff()\n fig, ax = plt.subplots()\n dpi = 100\n size_inches = 1000 / dpi\n fig.set_size_inches([size_inches, size_inches])\n plt.ion()\n\n def animate_func(i):\n ax.imshow(images[i])\n ax.set_xticks([])\n ax.set_yticks([])\n ax.grid('off')\n\n anim = animation.FuncAnimation(\n fig, animate_func, frames=len(images), interval=100)\n plt.close(fig)\n return anim",
"def fill_animation(self, movie_filename, *args, **kwargs):\n dpi = 100\n fig = plt.figure(figsize=(1920/dpi, 1080/dpi), dpi=dpi)\n fig.patch.set_facecolor('black')\n axes = {\n 'xy': fig.add_subplot(1, 3, 1),\n 'xz': fig.add_subplot(1, 3, 2),\n 'zy': fig.add_subplot(1, 3, 3),\n }\n\n planes = {'xy': 0, 'xz': 1, 'zy': 2}\n\n def get_plane(arr, vox, plane):\n return {\n 'xy': lambda a, v: a[v[0], :, :],\n 'xz': lambda a, v: a[:, v[1], :],\n 'zy': lambda a, v: np.transpose(a[:, :, v[2]]),\n }[plane](arr, np.round(vox).astype(np.int64))\n\n def get_hv(vox, plane):\n # rel = np.divide(vox, self.bounds)\n rel = vox\n # rel = self.bounds - vox\n return {\n 'xy': {'h': rel[1], 'v': rel[2]},\n 'xz': {'h': rel[0], 'v': rel[2]},\n 'zy': {'h': rel[1], 'v': rel[0]},\n }[plane]\n\n def get_aspect(plane):\n return {\n 'xy': CONFIG.volume.resolution[1] / CONFIG.volume.resolution[2],\n 'xz': CONFIG.volume.resolution[0] / CONFIG.volume.resolution[2],\n 'zy': CONFIG.volume.resolution[1] / CONFIG.volume.resolution[0],\n }[plane]\n\n images = {\n 'last': None,\n 'image': {},\n 'mask': {},\n }\n lines = {\n 'v': {},\n 'h': {},\n 'bl': {},\n 'bt': {},\n }\n current_vox = self.pos_to_vox(self.seed_pos)\n margin = CONFIG.model.input_fov_shape // 2\n for plane, ax in six.iteritems(axes):\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n image_data = get_plane(self.image, current_vox, plane)\n im = ax.imshow(image_data, cmap='gray')\n im.set_clim([0, 1])\n images['image'][plane] = im\n\n mask_data = get_plane(self.mask, current_vox, plane)\n im = ax.imshow(mask_data, cmap='jet', alpha=0.8)\n im.set_clim([0, 1])\n images['mask'][plane] = im\n\n aspect = get_aspect(plane)\n lines['h'][plane] = ax.axhline(y=get_hv(current_vox - margin, plane)['h'], color='w')\n lines['v'][plane] = ax.axvline(x=get_hv(current_vox + margin, plane)['v'], color='w')\n lines['bl'][plane] = ax.axvline(x=get_hv(current_vox - margin, plane)['v'], color='w')\n lines['bt'][plane] = ax.axhline(y=get_hv(current_vox + margin, plane)['h'], color='w')\n\n ax.set_aspect(aspect)\n\n images['last'] = np.round(current_vox).astype(np.int64)\n\n plt.tight_layout()\n\n fill_generator = self.fill(*args, generator=True, **kwargs)\n\n def update_fn(vox):\n mask_changed = False\n if np.array_equal(np.round(vox).astype(np.int64), update_fn.next_pos_vox):\n try:\n batch_block_data, output = six.next(fill_generator)\n block_data = batch_block_data[0]\n mask_changed = True\n except (StopIteration, Region.EarlyFillTermination):\n block_data = None\n\n if block_data is not None:\n update_fn.next_pos_vox = self.pos_to_vox(block_data['position'])\n if not np.array_equal(np.round(vox).astype(np.int64), update_fn.next_pos_vox):\n p = update_fn.next_pos_vox - vox\n steps = np.linspace(0, 1, 16)\n interp_vox = vox + np.outer(steps, p)\n for row in interp_vox:\n update_fn.vox_queue.put(row)\n else:\n update_fn.vox_queue.put(vox)\n\n vox_round = np.round(vox).astype(np.int64)\n changed_images = []\n for plane, im in six.iteritems(images['image']):\n if vox_round[planes[plane]] != images['last'][planes[plane]]:\n image_data = get_plane(self.image, vox, plane)\n im.set_data(image_data)\n changed_images.append(im)\n\n for plane, im in six.iteritems(images['mask']):\n if mask_changed or vox_round[planes[plane]] != images['last'][planes[plane]]:\n image_data = get_plane(self.mask, vox, plane)\n masked_data = np.ma.masked_where(image_data < 0.5, image_data)\n im.set_data(masked_data)\n changed_images.append(im)\n images['last'] = vox_round\n\n for plane in axes.iterkeys():\n lines['h'][plane].set_ydata(get_hv(vox - margin, plane)['h'])\n lines['v'][plane].set_xdata(get_hv(vox + margin, plane)['v'])\n lines['bl'][plane].set_xdata(get_hv(vox - margin, plane)['v'])\n lines['bt'][plane].set_ydata(get_hv(vox + margin, plane)['h'])\n\n return changed_images + \\\n lines['h'].values() + lines['v'].values() + \\\n lines['bl'].values() + lines['bt'].values()\n\n update_fn.moves = 0\n update_fn.next_pos_vox = current_vox\n update_fn.vox_queue = queue.Queue()\n update_fn.vox_queue.put(current_vox)\n\n def vox_gen():\n last_vox = None\n while 1:\n if update_fn.vox_queue.empty():\n return\n else:\n last_vox = update_fn.vox_queue.get()\n yield last_vox\n\n ani = animation.FuncAnimation(fig, update_fn, frames=vox_gen(), interval=16, repeat=False, save_count=60*60)\n writer = animation.writers['ffmpeg'](fps=60)\n\n ani.save(movie_filename, writer=writer, dpi=dpi, savefig_kwargs={'facecolor': 'black'})\n\n return ani",
"def make_gif(name=''):\n\n json_files = sorted(glob.glob(f'./data/{name}_*.json'))\n data_files = sorted(glob.glob(f'./data/{name}_*.dat'))\n img_list = []\n c = 0\n for json_file, data_file in zip(json_files, data_files):\n with open(json_file, 'r') as fp:\n obj = json.load(fp)\n\n index = json_file.split('_')[1].split('.')[0]\n print(f'Working on step {index}...')\n\n array = np.fromfile(data_file, dtype=obj['datatype'])\n array = array.reshape(obj['shape'], order='C')\n\n fig, ax = plt.subplots(1, 2)\n\n ax[0].imshow(array[..., 1], vmin=0, vmax=1)\n ax[1].imshow(array[..., 0], vmin=0, vmax=1)\n\n # ax.set_colorbar()\n ax[0].set_title(f\"Temperature - Time: {obj['time']:6.4f}\")\n ax[1].set_title(f\"Field - Time: {obj['time']:6.4f}\")\n\n fig.tight_layout()\n\n fig.canvas.draw() # draw the canvas, cache the renderer\n image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n img_list.append(image.reshape(fig.canvas.get_width_height()[::-1] + (3,)))\n plt.close()\n\n # c +=1\n # if c == 3:\n # break\n\n # imageio.mimsave('./test.gif', img_list, fps=8, subrectangles=True)\n imageio.mimsave('./test.mp4', img_list, fps=8)",
"def animate_zstacks(img_list, frames=None, titles=None, vmin=None, vmax=None, cmaps=None, interval=200, gif_name=None, bgcolor=None, **kwargs):\n def update_frame(f):\n for i, img in enumerate(img_list):\n imxy[i].set_data(img[:,:,f])\n return imxy\n\n if not all([img.shape[2] == img_list[0].shape[2] for img in img_list]):\n print('Error: all images must have same length in z-dimension.')\n\n if not frames:\n frames = list(range(img_list[0].shape[2]))\n if not vmin:\n vmin = [np.amin(img) for img in img_list]\n if not vmax:\n vmax = [np.amax(img) for img in img_list]\n if not cmaps:\n cmaps = ['binary_r'] * len(img_list)\n\n fig, ax = plt.subplots(1, len(img_list), figsize=(3.*len(img_list), 3.))\n imxy = []\n for i, img in enumerate(img_list):\n if bgcolor:\n ax[i].set_facecolor(bgcolor)\n imxy.append(ax[i].imshow(img[:,:,frames[0]], vmin=vmin[i], vmax=vmax[i], cmap=cmaps[i], **kwargs))\n if titles:\n for i, title in enumerate(titles):\n ax[i].set_title(title)\n anim = FuncAnimation(fig, update_frame, frames=frames, interval=interval, blit=False)\n if gif_name:\n # Writer = animation.writers['imagemagick']\n # writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n anim.save(gif_name, writer='imagemagick', fps=8, dpi=300)\n # plt.show()\n plt.close()\n return True",
"def visualize(li, offset, T, dx, dt):\n fig = plt.figure()\n plts = []\n \n # This list will be populated with u, x, and color\n solve_list = []\n \n # Pre-compute u and x values to save processing power\n for i in range(len(li)):\n u, x, t = solver(li[i][0], li[i][1], li[i][2], li[i][3], li[i][4], dx, dt)\n color = li[i][7]\n solve_list.append([u, x, color])\n \n # Group the correct animations together\n # for each time step n\n for n in range(T):\n plts_tmp = []\n \n # for each 1D wave in the list\n for i in range(len(li)):\n u, x, color = solve_list[i][0], solve_list[i][1], solve_list[i][2]\n p = plt.plot(x, u[n][:] + offset * i, color)\n plts_tmp.append(*p)\n \n plts.append(plts_tmp)\n \n # If PillowWriter does not work, try:\n # wr = animation.FFMpegFileWriter()\n # or another writer instead\n wr = animation.PillowWriter()\n ani = animation.ArtistAnimation(fig, plts) \n\n # You must manually create an 'output/' directory, or change the filename to \"waves.gif\"\n ani.save(\"output/waves.gif\", writer=wr)\n \n plt.show()",
"def save_animation(self, output_folder=None, image_type=\"cleaned\", title=None):\n if hasattr(self, image_type):\n from matplotlib import animation\n\n animation.rcParams[\"animation.writer\"] = \"ffmpeg\"\n\n image = getattr(self, image_type)\n\n # If we have rgb, choose a channel\n\n if len(image.shape) == 4:\n channel = random.choice(range(image.shape[3]))\n bot.warning(\"Selecting channel %s for rendering\" % channel)\n image = image[:, :, :, channel]\n\n # Now we expect 3D, we can animate one dimension over time\n if len(image.shape) == 3:\n movie_file = self._get_clean_name(output_folder, \"mp4\")\n\n # First set up the figure, the axis, and the plot element we want to animate\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 6))\n plt.close()\n ax.xlim = (0, image.shape[1])\n ax.ylim = (0, image.shape[2])\n ax.set_xticks([])\n ax.set_yticks([])\n img = ax.imshow(image[0, :, :].T, cmap=\"gray\")\n img.set_interpolation(\"nearest\")\n\n # The animation function should take an index i\n def animate(i):\n img.set_data(image[i, :, :].T)\n sys.stdout.flush()\n return (img,)\n\n bot.info(\"Generating animation...\")\n anim = animation.FuncAnimation(\n fig, animate, frames=image.shape[0], interval=50, blit=True\n )\n anim.save(\n movie_file,\n writer=\"ffmpeg\",\n fps=10,\n dpi=100,\n metadata={\"title\": title or \"deid-animation\"},\n )\n return movie_file\n else:\n bot.warning(\n \"save_animation() is only for 4D data. Use save_png instead.\"\n )\n else:\n bot.warning(\"use detect() --> clean() before saving is possible.\")",
"def save_gif(images, n=0, fname=None):\n fig = plt.figure()\n plt.axis('off')\n ims = []\n for img in images:\n im = plt.imshow(img, animated=True, vmin=0., vmax=1.)\n ims.append([im])\n anim = ArtistAnimation(fig, ims, interval=100, repeat=False)\n fname = fname if fname is not None else 'figs/cav_transition/test{}.gif'.format(n)\n anim.save(fname)",
"def velocity_animation(X, Y, u_list, v_list, frame_interval, filename):\n fig, ax = plt.subplots(figsize = (8, 8), facecolor = \"white\")\n plt.title(\"Velocity field $\\mathbf{u}(x,y)$ after 0.0 days\", fontname = \"serif\", fontsize = 19)\n plt.xlabel(\"x [km]\", fontname = \"serif\", fontsize = 16)\n plt.ylabel(\"y [km]\", fontname = \"serif\", fontsize = 16)\n q_int = 3\n Q = ax.quiver(X[::q_int, ::q_int]/1000.0, Y[::q_int, ::q_int]/1000.0, u_list[0][::q_int,::q_int], v_list[0][::q_int,::q_int],\n scale=0.2, scale_units='inches')\n #qk = plt.quiverkey(Q, 0.9, 0.9, 0.001, \"0.1 m/s\", labelpos = \"E\", coordinates = \"figure\")\n\n # Update function for quiver animation.\n def update_quiver(num):\n u = u_list[num]\n v = v_list[num]\n ax.set_title(\"Velocity field $\\mathbf{{u}}(x,y,t)$ after t = {:.2f} hours\".format(\n num*frame_interval/3600), fontname = \"serif\", fontsize = 19)\n Q.set_UVC(u[::q_int, ::q_int], v[::q_int, ::q_int])\n return Q,\n\n anim = animation.FuncAnimation(fig, update_quiver,\n frames = len(u_list), interval = 10, blit = False)\n mpeg_writer = animation.FFMpegWriter(fps = 24, bitrate = 10000,\n codec = \"libx264\", extra_args = [\"-pix_fmt\", \"yuv420p\"])\n fig.tight_layout()\n anim.save(\"{}.mp4\".format(filename), writer = mpeg_writer)\n return anim # Need to return anim object to see the animation",
"def animate(\n *images, labels=None, interval=500,\n path=None, block_shape=None, annotations=None, fig_unit_size=1,\n text=None, text_loc=None, fontsize='x-small', text_color='black', normalize=None,\n **kwargs):\n n_image_sets = len(images)\n B, T = images[0].shape[:2]\n\n if block_shape is None:\n N = n_image_sets\n sqrt_N = int(np.ceil(np.sqrt(N)))\n m = int(np.ceil(N / sqrt_N))\n block_shape = (m, sqrt_N)\n\n images = [\n img[..., 0] if img.ndim == 5 and img.shape[-1] == 1 else img\n for img in images]\n\n assert np.prod(block_shape) >= n_image_sets\n\n fig, axes = square_subplots(B, block_shape=block_shape, fig_unit_size=fig_unit_size)\n time_text = fig.text(0.01, .99, 't=0', ha='left', va='top', transform=fig.transFigure, fontsize=12)\n\n plots = np.zeros_like(axes)\n text_elements = np.zeros_like(axes)\n\n if text is None:\n text = {}\n elif not isinstance(text, dict):\n text = {0: text}\n\n if text_loc is None:\n text_loc = (0.05, 0.95)\n\n if labels is not None:\n for j in range(n_image_sets):\n axes[0, j].set_title(str(labels[j]))\n\n for ax in axes.flatten():\n set_axis_off(ax)\n\n for i in range(B):\n for j in range(n_image_sets):\n ax = axes[i, j]\n\n _normalize = False\n if normalize is not None:\n _normalize = normalize[j]\n\n # A note on vmin/vmax: vmin and vmax are set permanently when imshow is called.\n # They are not modified when you call set_array.\n\n if _normalize:\n vmin = images[j][i].min()\n vmax = images[j][i].max()\n mean = images[j][i].mean()\n\n ax.set_ylabel('min={:.3f}, mean={:.3f}, max={:.3f}'.format(vmin, mean, vmax))\n else:\n vmin = 0.0\n vmax = 1.0\n\n plots[i, j] = ax.imshow(images[j][i, 0], vmin=vmin, vmax=vmax)\n\n text_elements[i, j] = ax.text(\n *text_loc, '', ha='left', va='top', transform=ax.transAxes, fontsize=fontsize, color=text_color)\n\n plt.subplots_adjust(top=0.95, bottom=0.02, left=0.02, right=.98, wspace=0.1, hspace=0.1)\n\n def func(t):\n time_text.set_text('t={}'.format(t))\n\n for i in range(B):\n for j in range(n_image_sets):\n plots[i, j].set_array(images[j][i, t])\n\n ax = axes[i, j]\n for obj in ax.findobj(match=plt.Rectangle):\n try:\n obj.remove()\n except NotImplementedError:\n pass\n\n if j in text:\n text_elements[i, j].set_text(text[j][i, t])\n\n if annotations is not None:\n ax = axes[i, 0]\n annotate_with_rectangles(ax, annotations[i][t])\n\n anim = animation.FuncAnimation(fig, func, frames=T, interval=interval)\n\n if path is not None:\n if not path.endswith('.mp4'):\n path = path + '.mp4'\n\n anim.save(path, writer='ffmpeg', codec='hevc', extra_args=['-preset', 'ultrafast'])\n\n return fig, axes, anim, path",
"def quick_image_animate(arr, cmap='jet', axis=0, save=None, annotate='frame'):\n\tif not len(arr.shape) == 3:\n print \"Invalid array shape\"\n return\n\n\t#Swap axes so that 0 is the time axis, if necessary.\n if not axis == 0:\n\t\tarr= np.swapaxes(arr,axis,0)\n\tfig = plt.figure()\n if isinstance(annotate, list):\n if not len(annotate) == arr.shape[0]: annotate='frame' #Default to frame if incorrect shape\n else: frame_labels = map(str, annotate)\n if isinstance(annotate, str):\n if annotate=='frame': frame_labels=map(str,range(arr.shape[0]))\n if annotate=='none' : frame_labels=None\n\n\tim = plt.imshow(arr[0,:,:], cmap=cmap, interpolation='nearest')\n if not annotate is None: text = plt.title(frame_labels[0], ha='center', va='center')\n\tdef update_image(n):\n\t\tim.set_data(arr[n,:,:])\n if not annotate is None: text.set_text(frame_labels[n])\n\n\tani = anim.FuncAnimation(fig, update_image, arr.shape[0], interval=1)\n if not save is None:\n\t\twriter = anim.writers['ffmpeg'](fps=20)\n\t\tani.save(save,writer=writer, dpi=200)\n\telse:\n\t\tplt.show()",
"def visualize_animations(aseprite_json, animation_name=None):\n\t\tpg.init()\n\n\t\tscreen = pg.display.set_mode((128, 128))\n\n\t\tanimated_sprite = AnimatedAndScalableSpriteTest()\n\t\tScalableSprite.set_display_scale_factor(16)\n\t\tanimated_sprite.load_aseprite_json(aseprite_json)\n\n\t\tif isinstance(animation_name, str):\n\t\t\tanimation_name = (animation_name, )\n\t\telif animation_name is None:\n\t\t\tanimation_name = list(animated_sprite.animations.keys())\n\n\t\ti_anim = 0\n\t\tanimated_sprite.set_current_animation(animation_name[i_anim])\n\t\tprint(\"set {} animation\".format(animation_name[i_anim]))\n\n\t\t# infinite loop, esc. to quit\n\t\t_done = False\n\t\twhile not _done:\n\t\t\tfor ev in pg.event.get():\n\t\t\t\tif ev.type == pg.KEYDOWN:\n\t\t\t\t\tif ev.key == pg.K_ESCAPE:\n\t\t\t\t\t\t_done = True\n\n\t\t\t\t\t# change current animation\n\t\t\t\t\telif ev.key == pg.K_SPACE:\n\t\t\t\t\t\ti_anim = (i_anim + 1) % len(animation_name)\n\t\t\t\t\t\tanimated_sprite.set_current_animation(animation_name[i_anim])\n\t\t\t\t\t\tprint(\"set {} animation\".format(animation_name[i_anim]))\n\n\t\t\t# clean screen and blit image\n\t\t\tscreen.fill((0, 0, 0))\n\t\t\tanimated_sprite.update()\n\t\t\tscreen.blit(animated_sprite.image, animated_sprite.image.get_clip())\n\t\t\tpg.display.flip()\n\t\tpg.quit()",
"def show_video_abi_glm_times(\n start_date, end_date, out_dir,\n img_out=\"{platform_name}-{sensor}-{name}-\"\n \"{start_time:%Y%m%d%H%M%S}-{end_time:%Y%m%d%H%M%S}.tif\",\n vid_out=\"{platform_name}-{name}-{area.area_id}-\"\n \"{start_time:%Y%m%d%H%M%S}-{end_time:%Y%m%d%H%M%S}.mp4\",\n sector=\"F\",\n area=None,\n enh_args=enh_args):\n ms = next(scutil.get_abi_glm_multiscenes(\n start_date,\n end_date,\n chans=[14],\n sector=sector,\n from_glm=[\"C14_yellow_lightning\"]))\n if area:\n ls = ms.resample(area)\n ls.scenes\n else:\n ls = ms\n ls.scenes[0].save_datasets(\n filename=str(out_dir / img_out),\n overlay=enh_args.get(\"overlay\", None))\n ls.save_animation(str(out_dir / vid_out), enh_args=enh_args)",
"def particle_animation(self, Nt, particle_location, branchID=1, verbose='surface'):\r\n import matplotlib.animation as animation\r\n \r\n Writer = animation.writers['ffmpeg']\r\n writer = Writer(fps=5, metadata=dict(artist='Me'), bitrate=1800)\r\n \r\n xx = np.arange(particle_location.shape[0]) + 1\r\n \r\n if branchID == 1:\r\n WB = W2_Bathymetry(self.Bthfile)\r\n pat = WB.VisBranch2(branchID)\r\n \r\n x_branch = WB.X\r\n \r\n elif branchID == 5:\r\n \r\n WB = W2_Bathymetry(self.Bthfile)\r\n pat = WB.VisBranch2(branchID)\r\n \r\n x_branch5 = WB.X #### segment x coordinates for branch 5\r\n \r\n #### read segment information for branch 1\r\n WB = W2_Bathymetry(self.Bthfile)\r\n pat = WB.VisBranch2(branchID=1)\r\n \r\n x_branch1 = WB.X\r\n \r\n #### combine the two branch cells\r\n x_branch = x_branch5.tolist()[0:] + \\\r\n (x_branch1[self.DHS5-1:] - x_branch1[self.DHS5-1] + x_branch5[-2]).tolist()\r\n x_branch = np.asarray(x_branch)\r\n \r\n \r\n \r\n plt.rcParams.update({'font.size': 18})\r\n fig = plt.figure(figsize=(8,12.5))\r\n ax = fig.add_subplot(111)\r\n \r\n def animate(ii):\r\n ax.clear()\r\n ### grid segments\r\n for yc in x_branch:\r\n ax.axhline(y=yc, color='gray', linestyle='-', linewidth=1)\r\n \r\n #### particle positions\r\n #for i in range(particle_location.shape[0]):\r\n cs = ax.plot(xx, particle_location[:,ii], 'ok', markersize=3.5) ## at 3rd time step\r\n \r\n ax.title.set_text('%s \\n Time step = %d'%(verbose, ii))\r\n ax.set_ylim([-1500, 28500])\r\n ax.set_ylim(ax.get_ylim()[::-1])\r\n ax.set_xlabel('Particle ID')\r\n ax.set_ylabel('Distance from upstream (m)')\r\n \r\n \r\n return cs\r\n \r\n anim = animation.FuncAnimation(fig, animate, frames=Nt, interval=600, blit=False)\r\n anim.save(r'videos\\particle\\%s.mp4'%verbose, writer=writer)\r\n \r\n #plt.show()\r",
"def plot_joint_score_scatter_animation_snapshots(\n asv_scores_list,\n cm_scores_list,\n asv_is_target_list,\n cm_is_target_list,\n titles_list,\n filename,\n num_snapshots=5,\n):\n fig, ax = pyplot.subplots(figsize=[6.4, 6.4])\n\n # Fix x-lim and y-lim for clarity\n max_asv = max([max(x) for x in asv_scores_list])\n min_asv = min([min(x) for x in asv_scores_list])\n max_cm = max([max(x) for x in cm_scores_list])\n min_cm = min([min(x) for x in cm_scores_list])\n\n # Select points from which we create plots\n num_scores = len(asv_scores_list)\n\n plot_points = [int(i * (num_scores - 1) / (num_snapshots - 1)) for i in range(num_snapshots)]\n\n for i, frame_idx in enumerate(plot_points):\n # Clear the current plot\n ax.clear()\n # Pick right data\n asv_scores = asv_scores_list[frame_idx]\n cm_scores = cm_scores_list[frame_idx]\n asv_is_target = asv_is_target_list[frame_idx]\n cm_is_target = cm_is_target_list[frame_idx]\n title = titles_list[frame_idx]\n\n # Spoof samples\n spoof_idxs = ~cm_is_target\n ax.scatter(asv_scores[spoof_idxs], cm_scores[spoof_idxs], c=\"g\", s=15,\n alpha=1.0, edgecolors=\"none\", linewidth=0)\n # Non-targets\n nontarget_idxs = (~asv_is_target) & cm_is_target\n ax.scatter(asv_scores[nontarget_idxs], cm_scores[nontarget_idxs], c=\"r\", s=15, \n alpha=1.0, edgecolors=\"none\", linewidth=0)\n # Target samples\n target_idxs = asv_is_target & cm_is_target\n ax.scatter(asv_scores[target_idxs], cm_scores[target_idxs], c=\"b\", s=15,\n alpha=1.0, edgecolors=\"none\", linewidth=0)\n\n # No labels for for paper\n ax.set_xlim((min_asv, max_asv))\n ax.set_ylim((min_cm, max_cm))\n ax.tick_params(axis='both', which='both', labelsize=27)\n\n # Plot legend only to first plot\n if i == 0:\n # Trick stolen from Stackoverflow #24706125\n # to increase size of ticks in legend\n lgnd = ax.legend((\"Spoof\", \"Nontarget\", \"Target\"), prop={\"size\": 29})\n lgnd.legendHandles[0]._sizes = [50]\n lgnd.legendHandles[1]._sizes = [50]\n lgnd.legendHandles[2]._sizes = [50]\n\n fig.tight_layout()\n fig.savefig(filename.replace(\".\", \"_%d.\" % frame_idx))",
"def animation_objects(self):\n\n blue = (0.0, 0.3, 1.0, 1.0)\n # Pendulum\n pendulum = self.pendulum_sys.pose()\n self.line, = self.ax.plot(\n pendulum[:, 0],\n pendulum[:, 1],\n color=blue,\n linewidth=5,\n animated=True\n )\n # Mass\n self.m, = self.ax.plot(\n self.pendulum_sys.origin[0], self.pendulum_sys.parameters.L,\n color=blue, marker='o', markersize=12.5, animated=True)\n # Base\n self.ax.plot([-0.5, 0.5], self.pendulum_sys.origin,\n c='g', linewidth=7.5)\n # Muscles\n musc = self.muscle_sys.position_from_angle(self.state[0, 0])\n\n muscles = [self.ax.plot(m[:, 0], m[:, 1], color='r', linewidth=3.5,\n animated=True)[0]\n for m in musc]\n\n # Time\n time = self.ax.text(-0.5, 0.05, \"Time: 0.0\",\n fontsize=14, animated=True)\n\n # Neurons\n if self.neural_sys is not None:\n neurons = [self.ax.scatter(\n self.neurons_pos[:, 0], self.neurons_pos[:, 1],\n s=np.ones(4) * 250, c='r', animated=True)]\n return [self.line, self.m] + muscles + [time] + neurons\n return [self.line, self.m] + muscles + [time]",
"def display_frames_as_gif(frames):\n plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0),dpi=90)\n patch = plt.imshow(frames[0])\n plt.axis('off')\n\n def animate(i):\n patch.set_data(frames[i])\n\n anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames),interval=20)\n\n anim.save('anim-right.gif', writer='imagemagick')\n #display(display_animation(anim, default_mode='loop'))",
"def __init__(self, time_vector, particlesList,speed = 20,forever = True,dim = 1,show_trajectory = True):\n self.forever = forever\n self.particlesList = particlesList\n self.time_vector = time_vector\n self.stream = self.data_stream()\n self.dim = dim\n self.show_trajectory = show_trajectory\n\n\n # Setup the figure and axes...\n self.fig, self.ax = plt.subplots()\n plt.axis('scaled')\n\n # Then setup FuncAnimation.\n self.ani = animation.FuncAnimation(self.fig, self.update, interval=speed,\n init_func=self.setup_plot, blit=True)",
"def plot_joint_score_scatter_animation(\n asv_scores_list,\n cm_scores_list,\n asv_is_target_list,\n cm_is_target_list,\n titles_list,\n filename,\n fps=5,\n):\n pyplot.rcParams['animation.ffmpeg_path'] = \"ffmpeg\"\n\n fig, ax = pyplot.subplots(figsize=[6.4*3, 4.8*3], dpi=200)\n\n writer = FFMpegWriter(fps=fps, bitrate=10000)\n num_frames = len(asv_scores_list)\n\n # Fix x-lim and y-lim for clarity\n max_asv = max([max(x) for x in asv_scores_list])\n min_asv = min([min(x) for x in asv_scores_list])\n max_cm = max([max(x) for x in cm_scores_list])\n min_cm = min([min(x) for x in cm_scores_list])\n\n with writer.saving(fig, filename, dpi=200):\n # Loop over frames and repeat drawing on all of them\n for frame_idx in tqdm(range(num_frames), desc=\"render\"):\n # Clear the current plot\n\n ax.clear()\n # Pick right data\n asv_scores = asv_scores_list[frame_idx]\n cm_scores = cm_scores_list[frame_idx]\n asv_is_target = asv_is_target_list[frame_idx]\n cm_is_target = cm_is_target_list[frame_idx]\n title = titles_list[frame_idx]\n\n nontarget_idxs = (~asv_is_target) & cm_is_target\n ax.scatter(asv_scores[nontarget_idxs], cm_scores[nontarget_idxs], c=\"r\", s=10,\n alpha=0.5, edgecolors=\"none\", linewidth=0)\n # Spoof samples\n spoof_idxs = ~cm_is_target\n ax.scatter(asv_scores[spoof_idxs], cm_scores[spoof_idxs], c=\"g\", s=10,\n alpha=0.5, edgecolors=\"none\", linewidth=0)\n # Target samples\n target_idxs = asv_is_target & cm_is_target\n ax.scatter(asv_scores[target_idxs], cm_scores[target_idxs], c=\"b\", s=10,\n alpha=0.5, edgecolors=\"none\", linewidth=0)\n\n ax.set_xlabel(\"ASV score\")\n ax.set_ylabel(\"CM score\")\n ax.set_xlim((min_asv, max_asv))\n ax.set_ylim((min_cm, max_cm))\n ax.legend((\"Nontarget\", \"Spoof\", \"Target\"))\n ax.set_title(title)\n\n writer.grab_frame()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function that takes in the domain x, y (2D meshgrids) and a lists of 2D arrays u_list, v_list and creates an quiver animation of the velocity field (u, v). To get updating title one also need specify time step dt between each frame in the simulation, the number of time steps between each eta in eta_list and finally, a filename for video.
|
def velocity_animation(X, Y, u_list, v_list, frame_interval, filename):
fig, ax = plt.subplots(figsize = (8, 8), facecolor = "white")
plt.title("Velocity field $\mathbf{u}(x,y)$ after 0.0 days", fontname = "serif", fontsize = 19)
plt.xlabel("x [km]", fontname = "serif", fontsize = 16)
plt.ylabel("y [km]", fontname = "serif", fontsize = 16)
q_int = 3
Q = ax.quiver(X[::q_int, ::q_int]/1000.0, Y[::q_int, ::q_int]/1000.0, u_list[0][::q_int,::q_int], v_list[0][::q_int,::q_int],
scale=0.2, scale_units='inches')
#qk = plt.quiverkey(Q, 0.9, 0.9, 0.001, "0.1 m/s", labelpos = "E", coordinates = "figure")
# Update function for quiver animation.
def update_quiver(num):
u = u_list[num]
v = v_list[num]
ax.set_title("Velocity field $\mathbf{{u}}(x,y,t)$ after t = {:.2f} hours".format(
num*frame_interval/3600), fontname = "serif", fontsize = 19)
Q.set_UVC(u[::q_int, ::q_int], v[::q_int, ::q_int])
return Q,
anim = animation.FuncAnimation(fig, update_quiver,
frames = len(u_list), interval = 10, blit = False)
mpeg_writer = animation.FFMpegWriter(fps = 24, bitrate = 10000,
codec = "libx264", extra_args = ["-pix_fmt", "yuv420p"])
fig.tight_layout()
anim.save("{}.mp4".format(filename), writer = mpeg_writer)
return anim # Need to return anim object to see the animation
|
[
"def eta_animation(X, Y, eta_list, frame_interval, filename):\n fig, ax = plt.subplots(1, 1)\n #plt.title(\"Velocity field $\\mathbf{u}(x,y)$ after 0.0 days\", fontname = \"serif\", fontsize = 17)\n plt.xlabel(\"x [m]\", fontname = \"serif\", fontsize = 12)\n plt.ylabel(\"y [m]\", fontname = \"serif\", fontsize = 12)\n pmesh = plt.pcolormesh(X, Y, eta_list[0], vmin = -0.7*np.abs(eta_list[int(len(eta_list)/2)]).max(),\n vmax = np.abs(eta_list[int(len(eta_list)/2)]).max(), cmap = plt.cm.RdBu_r)\n plt.colorbar(pmesh, orientation = \"vertical\")\n\n # Update function for quiver animation.\n def update_eta(num):\n ax.set_title(\"Surface elevation $\\eta$ after t = {:.2f} hours\".format(\n num*frame_interval/3600), fontname = \"serif\", fontsize = 16)\n pmesh.set_array(eta_list[num][:-1, :-1].flatten())\n return pmesh,\n\n anim = animation.FuncAnimation(fig, update_eta,\n frames = len(eta_list), interval = 10, blit = False)\n mpeg_writer = animation.FFMpegWriter(fps = 24, bitrate = 10000,\n codec = \"libx264\", extra_args = [\"-pix_fmt\", \"yuv420p\"])\n anim.save(\"{}.mp4\".format(filename), writer = mpeg_writer)\n return anim # Need to return anim object to see the animation",
"def main() -> None:\n save, file, n, l, t, r, v, nu, kappa = parse_args()\n print(f\"\"\"Hyperparameters:-\n Save to File: {save}\n Save File Name: {file}\n Number of Particles: {n}\n Periodic Spatial Domain: {l}\n Simulation Length (in Seconds): {t}\n Interaction Radius: {r}\n Initial Particle velocity: {v}\n Jump Rate: {nu}\n Concentration Parameter: {kappa}\"\"\")\n\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n\n start.record(None)\n fig, ax = plt.subplots(dpi=300, subplot_kw=dict(projection=\"3d\"))\n\n writer = writers['ffmpeg'](fps=15, metadata=dict(artist=\"Jawad\"), bitrate=1800)\n ani = FuncAnimation(fig, update_quiver_frame, frames=process_particles(n, l, t, r, v, nu, kappa),\n fargs=(ax, l, r, v, nu, kappa), interval=30, save_count=int(100 * t * nu) + 1, repeat=False)\n\n if save:\n ani.save(file, writer=writer)\n end.record(None)\n torch.cuda.synchronize()\n print(\"[100% Complete] Time taken:\", start.elapsed_time(end) // 1000, \"seconds\")\n else:\n mng = plt.get_current_fig_manager()\n mng.window.state(\"zoomed\")\n plt.show()",
"def save_quiver(ohw_dataset, savepath, singleframe = False, skipquivers = 1, t_cut = 0, *args, **kwargs):\r\n \r\n absMotions, unitMVs = ohw_dataset.absMotions, ohw_dataset.unitMVs \r\n timeindex = ohw_dataset.timeindex\r\n analysisImageStack = ohw_dataset.analysisImageStack\r\n mean_absMotions = ohw_dataset.mean_absMotions\r\n videometa = ohw_dataset.videometa\r\n\r\n scale_max = helpfunctions.get_scale_maxMotion2(absMotions) \r\n MV_zerofiltered = Filters.zeromotion_to_nan(unitMVs, copy=True)\r\n MV_cutoff = Filters.cutoffMVs(MV_zerofiltered, max_length = scale_max, copy=True)\r\n # is done twice here... just refer to QuiverMotionX from ohw?\r\n \r\n MotionX = MV_cutoff[:,0,:,:]\r\n MotionY = MV_cutoff[:,1,:,:]\r\n\r\n blockwidth = ohw_dataset.analysis_meta[\"MV_parameters\"][\"blockwidth\"]\r\n MotionCoordinatesX, MotionCoordinatesY = np.meshgrid(\r\n np.arange(blockwidth/2, analysisImageStack.shape[2], blockwidth), \r\n np.arange(blockwidth/2, analysisImageStack.shape[1], blockwidth)) \r\n \r\n #prepare figure\r\n fig_quivers, ax_quivers = plt.subplots(1,1, figsize=(14,10), dpi = 150)\r\n ax_quivers.axis('off') \r\n \r\n qslice=(slice(None,None,skipquivers),slice(None,None,skipquivers))\r\n distance_between_arrows = blockwidth * skipquivers\r\n arrowscale = 1 / (distance_between_arrows / scale_max)\r\n\r\n imshow_quivers = ax_quivers.imshow(\r\n analysisImageStack[0], vmin = videometa[\"Blackval\"], vmax = videometa[\"Whiteval\"], cmap = \"gray\")\r\n\r\n # adjust desired quiver plotstyles here!\r\n quiver_quivers = ax_quivers.quiver(\r\n MotionCoordinatesX[qslice], MotionCoordinatesY[qslice], MotionX[0][qslice], MotionY[0][qslice], \r\n pivot='mid', color='r', units =\"xy\", scale_units = \"xy\", angles = \"xy\", scale = arrowscale, \r\n width = 4, headwidth = 3, headlength = 5, headaxislength = 5, minshaft =1.5) #width = 4, headwidth = 2, headlength = 3\r\n\r\n #ax_quivers.set_title('Motion [µm/s]', fontsize = 16, fontweight = 'bold')\r\n\r\n savepath.mkdir(parents = True, exist_ok = True) #create folder for results\r\n \r\n if singleframe != False:\r\n # save only specified frame\r\n\r\n imshow_quivers.set_data(analysisImageStack[singleframe])\r\n quiver_quivers.set_UVC(MotionX[singleframe][qslice], MotionY[singleframe][qslice])\r\n \r\n quivers_filename = str(savepath / ('quiver_frame' + str(singleframe) + '.png'))\r\n fig_quivers.savefig(quivers_filename, bbox_inches =\"tight\", pad_inches = 0, dpi = 200)\r\n \r\n else: \r\n # save video\r\n def make_frame_mpl(t):\r\n\r\n frame = int(round(t*videometa[\"fps\"]))\r\n imshow_quivers.set_data(analysisImageStack[frame])\r\n quiver_quivers.set_UVC(MotionX[frame][qslice], MotionY[frame][qslice])\r\n \r\n return mplfig_to_npimage(fig_quivers) # RGB image of the figure\r\n \r\n quivers_filename = str(savepath / 'quivervideo.mp4')\r\n duration = 1/videometa[\"fps\"] * (MotionX.shape[0] - 1)\r\n animation = mpy.VideoClip(make_frame_mpl, duration=duration)\r\n \r\n #cut clip if desired by user\r\n #animation_to_save = self.cut_clip(clip_full=animation, t_cut=t_cut)\r\n #animation_to_save.write_videofile(quivers_filename, fps=self.videometa[\"fps\"])\r\n animation.write_videofile(quivers_filename, fps=videometa[\"fps\"])",
"def vortex(axisY=float, magnitude=float, axisX=float, position=\"string\", perVertex=bool, maxDistance=\"string\", attenuation=float, axisZ=float, name=\"string\"):\n pass",
"def quiver_plot(X, Y, U, V, plot_title):\n plt.figure()\n plt.title(plot_title, fontname = \"serif\", fontsize = 17)\n plt.xlabel(\"x [m]\", fontname = \"serif\", fontsize = 12)\n plt.ylabel(\"y [m]\", fontname = \"serif\", fontsize = 12)\n Q = plt.quiver(X[::4, ::4], Y[::4, ::4], U[::4, ::4], V[::4, ::4],\n units = \"xy\", scale = 0.002, scale_units = \"inches\")\n qk = plt.quiverkey(Q, 0.9, 0.9, 0.001, \"0.1 m/s\",\n labelpos = \"E\", coordinates = \"figure\")",
"def visualize_1d(a):\n true_v=utils.read_flow_field(os.path.join('data/velocity', str(a)))\n ff=utils.read_mag_field(os.path.join('data/magnitude', str(a)))\n ff=np.array(ff)\n true_v=np.array(true_v)\n size=ff.shape[0]\n x,y = np.meshgrid(np.array([i for i in range(25)]), np.array([i for i in range(24,-1,-1)]))\n z = ff \n plt.subplot(1,2,1)\n plt.contourf(x,y,z,10, alpha=.75, cmap='jet')\n plt.colorbar()\n plt.title(\"Magnitude Map\")\n\n x, y = np.meshgrid(np.arange(25),np.arange(24,-1,-1))\n u = true_v[:,:,0]\n v = true_v[:,:,1]\n plt.subplot(1,2,2)\n plt.quiver(x,y,u,v, scale=300)\n plt.title(\"Velocity Map\")\n plt.show()",
"def animshow(video, framerate=2., as_html5=True, repeat=False,\n vrange='indep1', zoom=1, title='', col_wrap=None, ax=None,\n cmap=None, plot_complex='rectangular', **kwargs):\n\n video = _convert_signal_to_list(video)\n video_n_frames = np.array([v.shape[0] for v in video])\n if (video_n_frames != video_n_frames[0]).any():\n raise Exception(\"All videos must have the same number of frames! But you \"\n \"passed videos with {} frames\".format(video_n_frames))\n title, vert_pct = _convert_title_to_list(title, video)\n video, title, contains_rgb = _process_signal(video, title, plot_complex, video=True)\n zooms, max_shape = _check_zooms(video, zoom, contains_rgb, video=True)\n fig, axes = _setup_figure(ax, col_wrap, video, zoom, max_shape, vert_pct)\n vrange_list, cmap = colormap_range(image=video, vrange=vrange, cmap=cmap)\n\n first_image = [v[0] for v in video]\n for im, a, r, t, z in zip(first_image, axes, vrange_list, title, zooms):\n _showIm(im, a, r, z, t, cmap, **kwargs)\n\n artists = [fig.axes[i].images[0] for i in range(len(fig.axes))]\n\n for i, a in enumerate(artists):\n a.set_clim(vrange_list[i])\n\n def animate_video(t):\n for i, a in enumerate(artists):\n frame = video[i][t].astype(float)\n a.set_data(frame)\n return artists\n\n # Produce the animation\n anim = animation.FuncAnimation(fig, frames=len(video[0]),\n interval=1000/framerate, blit=True,\n func=animate_video, repeat=repeat,\n repeat_delay=500)\n\n plt.close(fig)\n\n if as_html5:\n # to_html5_video will call savefig with a dpi kwarg, so our custom figure class will raise\n # a warning. we don't want to worry people, so we go ahead and suppress it\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n return HTML(anim.to_html5_video())\n return anim",
"def init_evelocity_diag13():\n s1.mtv = int((nloop - 1)/in1.ntv) + 1; s1.itv = 0\n# fv = global electron velocity distribution functions\n s1.fv = numpy.empty((2*in1.nmv+2,in1.ndim),float_type,'F')\n# sfv = electron velocity distribution functions in tile\n s1.sfv = numpy.empty((2*in1.nmv+2,in1.ndim,mx1+1),float_type,'F')\n# fvm = electron vdrift, vth, entropy for global distribution\n s1.fvm = numpy.empty((in1.ndim,3),float_type,'F')\n# fvtm = time history of electron vdrift, vth, and entropy\n s1.fvtm = numpy.zeros((s1.mtv,in1.ndim,3),float_type,'F')\n ws[0] = 2.0*max(4.0*in1.vtx+abs(in1.vx0),4.0*in1.vtdx+abs(in1.vdx))\n ws[0] = max(ws[0],2.0*max(4.0*in1.vty+abs(in1.vy0),\n 4.0*in1.vtdy+abs(in1.vdy)))\n ws[0] = max(ws[0],2.0*max(4.0*in1.vtz+abs(in1.vz0),\n 4.0*in1.vtdz+abs(in1.vdz)))\n s1.sfv[0,0,:] = ws[0]\n s1.sfv[0,1,:] = ws[0]\n s1.sfv[0,2,:] = ws[0]",
"def __init__(self, time_vector, particlesList,speed = 20,forever = True,dim = 1,show_trajectory = True):\n self.forever = forever\n self.particlesList = particlesList\n self.time_vector = time_vector\n self.stream = self.data_stream()\n self.dim = dim\n self.show_trajectory = show_trajectory\n\n\n # Setup the figure and axes...\n self.fig, self.ax = plt.subplots()\n plt.axis('scaled')\n\n # Then setup FuncAnimation.\n self.ani = animation.FuncAnimation(self.fig, self.update, interval=speed,\n init_func=self.setup_plot, blit=True)",
"def save_quiver3(ohw_dataset, savepath, singleframe = False, skipquivers = 1, t_cut = 0, *args, **kwargs):\r\n \r\n absMotions, unitMVs = ohw_dataset.absMotions, ohw_dataset.unitMVs \r\n timeindex = ohw_dataset.timeindex\r\n analysisImageStack = ohw_dataset.analysisImageStack\r\n mean_absMotions = ohw_dataset.mean_absMotions\r\n videometa = ohw_dataset.videometa\r\n \r\n scale_max = helpfunctions.get_scale_maxMotion2(absMotions) \r\n MV_zerofiltered = Filters.zeromotion_to_nan(unitMVs, copy=True)\r\n MV_cutoff = Filters.cutoffMVs(MV_zerofiltered, max_length = scale_max, copy=True)\r\n \r\n MotionX = MV_cutoff[:,0,:,:]\r\n MotionY = MV_cutoff[:,1,:,:]\r\n\r\n blockwidth = ohw_dataset.analysis_meta[\"MV_parameters\"][\"blockwidth\"]\r\n MotionCoordinatesX, MotionCoordinatesY = np.meshgrid(\r\n np.arange(blockwidth/2, analysisImageStack.shape[2], blockwidth), \r\n np.arange(blockwidth/2, analysisImageStack.shape[1], blockwidth)) \r\n \r\n #prepare figure\r\n outputfigure = plt.figure(figsize=(14,10), dpi = 150)#figsize=(6.5,12)\r\n\r\n gs = gridspec.GridSpec(3,2, figure=outputfigure)\r\n gs.tight_layout(outputfigure)\r\n \r\n saveax_video = outputfigure.add_subplot(gs[0:2, 0])\r\n saveax_video.axis('off') \r\n \r\n saveax_quivers = outputfigure.add_subplot(gs[0:2, 1])\r\n saveax_quivers.axis('off')\r\n\r\n saveax_trace = outputfigure.add_subplot(gs[2,:])\r\n saveax_trace.plot(timeindex, mean_absMotions, '-', linewidth = 2)\r\n \r\n saveax_trace.set_xlim(left = 0, right = timeindex[-1])\r\n saveax_trace.set_ylim(bottom = 0)\r\n saveax_trace.set_xlabel('t [s]', fontsize = 22)\r\n saveax_trace.set_ylabel(u'$\\mathrm{\\overline {v}}$ [\\xb5m/s]', fontsize = 22)\r\n saveax_trace.tick_params(labelsize = 20)\r\n\r\n for side in ['top','right','bottom','left']:\r\n saveax_trace.spines[side].set_linewidth(2) \r\n \r\n marker, = saveax_trace.plot(timeindex[0],mean_absMotions[0],'ro')\r\n\r\n ###### prepare video axis\r\n imshow_video = saveax_video.imshow(\r\n analysisImageStack[0], vmin = videometa[\"Blackval\"], vmax = videometa[\"Whiteval\"], cmap = \"gray\")\r\n \r\n qslice=(slice(None,None,skipquivers),slice(None,None,skipquivers))\r\n distance_between_arrows = blockwidth * skipquivers\r\n arrowscale = 1 / (distance_between_arrows / scale_max)\r\n \r\n imshow_quivers = saveax_quivers.imshow(analysisImageStack[0], vmin = videometa[\"Blackval\"], vmax = videometa[\"Whiteval\"], cmap = \"gray\")\r\n # adjust desired quiver plotstyles here!\r\n quiver_quivers = saveax_quivers.quiver(\r\n MotionCoordinatesX[qslice], MotionCoordinatesY[qslice], MotionX[0][qslice], MotionY[0][qslice], \r\n pivot='mid', color='r', units =\"xy\", scale_units = \"xy\", angles = \"xy\", scale = arrowscale, \r\n width = 4, headwidth = 3, headlength = 5, headaxislength = 5, minshaft =1.5) #width = 4, headwidth = 2, headlength = 3\r\n \r\n #saveax_quivers.set_title('Motion [µm/s]', fontsize = 16, fontweight = 'bold')\r\n\r\n savepath.mkdir(parents = True, exist_ok = True) #create folder for results\r\n\r\n # parameters for cropping white border in output video\r\n sizex, sizey = outputfigure.get_size_inches()*outputfigure.dpi\r\n bbox = outputfigure.get_tightbbox(outputfigure.canvas.get_renderer())\r\n bbox_bounds_px = np.round(np.asarray(bbox.extents*outputfigure.dpi)).astype(int)\r\n\r\n # to do: introduce min/max to be on the safe side!\r\n # reverse for np indexing\r\n bbox_bounds_px[3] = sizey - bbox_bounds_px[1]#y1\r\n bbox_bounds_px[1] = sizey - bbox_bounds_px[3]#y0\r\n\r\n bbox_bounds_px[2] = sizex - bbox_bounds_px[0]#x1\r\n bbox_bounds_px[0] = sizex - bbox_bounds_px[2]#x0\r\n\r\n # save only specified frame \r\n #if not isinstance(singleframe, bool):\r\n if singleframe != False:\r\n print(\"export single frame\")\r\n imshow_quivers.set_data(analysisImageStack[singleframe])\r\n imshow_video.set_data(analysisImageStack[singleframe])\r\n quiver_quivers.set_UVC(MotionX[singleframe][qslice], MotionY[singleframe][qslice])\r\n \r\n marker.remove()\r\n marker, = saveax_trace.plot(timeindex[singleframe],mean_absMotions[singleframe],'ro')\r\n marker.set_clip_on(False)\r\n \r\n outputfigure.savefig(str(savepath / ('quiver3_frame' + str(singleframe) + '.png')), bbox_inches = \"tight\")\r\n \r\n else:\r\n # save video\r\n def make_frame_mpl(t):\r\n #calculate the current frame number:\r\n frame = int(round(t*videometa[\"fps\"]))\r\n \r\n imshow_quivers.set_data(analysisImageStack[frame])\r\n imshow_video.set_data(analysisImageStack[frame])\r\n \r\n quiver_quivers.set_UVC(MotionX[frame][qslice], MotionY[frame][qslice])\r\n\r\n #marker.remove() # does not work, only if used as global variable...\r\n saveax_trace.lines[1].remove()\r\n marker, = saveax_trace.plot(timeindex[frame],mean_absMotions[frame],'ro')\r\n marker.set_clip_on(False)\r\n \r\n return mplfig_to_npimage(outputfigure)[bbox_bounds_px[1]:bbox_bounds_px[3],bbox_bounds_px[0]:bbox_bounds_px[2]] # RGB image of the figure #150:1450,100:1950\r\n \r\n # slicing here really hacky! find better solution!\r\n # find equivalent to bbox_inches='tight' in savefig\r\n # mplfig_to_npimage just uses barer canvas.tostring_rgb()\r\n # -> check how bbox_inches works under the hood\r\n # -> in print_figure:\r\n # if bbox_inches:\r\n # call adjust_bbox to save only the given area\r\n \r\n quivers_filename = str(savepath / 'quivervideo3.mp4')\r\n duration = 1/videometa[\"fps\"] * (MotionX.shape[0] - 1)\r\n animation = mpy.VideoClip(make_frame_mpl, duration=duration)\r\n \r\n animation.write_videofile(quivers_filename, fps=videometa[\"fps\"])\r\n #cut clip if desired by user in future\r\n #animation_to_save = cut_clip(clip_full=animation, t_cut=t_cut)",
"def plot_uv_track(bu, bv, outname=None, show=True):\n _, ax = plt.subplots(1, 1, figsize=(8, 8))\n for i in range(bu.shape[0]):\n ax.plot(bu[i, :], bv[i, :])\n ax.set_xlim(-1500, 1500)\n ax.set_ylim(-1500, 1500)\n ax.text(-1200, 1200, \"UV Coverage\")\n ax.set_xlabel(\"$u$ (m)\")\n ax.set_ylabel(\"$v$ (m)\")\n if outname is not None:\n plt.savefig(f\"{outname}_uv.png\")\n if not show:\n plt.close()",
"def plot_velocity(self, x, uu, figname):\r\n \r\n #pdb.set_trace()\r\n ## 120 days\r\n uu = uu[:self.period]\r\n \r\n umin = -0.04\r\n umax = 0.04\r\n #unew[unew<umin] = umin\r\n #unew[unew>umax] = umax\r\n \r\n ## this step is only needed for visualizing the extremly large positive and negative velocities\r\n for i in range(len(uu)):\r\n for j in range(len(uu[i])):\r\n if uu[i][j] > umax:\r\n uu[i][j] = umax\r\n elif uu[i][j] < umin:\r\n uu[i][j] = umin\r\n \r\n \r\n tt = np.arange(len(uu)) + 1\r\n \r\n lx = max(map(len, x))\r\n for i in range(len(x)):\r\n if len(x[i]) == lx:\r\n y = x[i]\r\n exit\r\n \r\n #y = np.array([[None]*(lx-len(xi)) + xi for xi in x])\r\n unew = np.array([[None]*(lx-len(xi)) + xi for xi in uu])\r\n \r\n plt.rcParams.update({'font.size': 18})\r\n fig = plt.figure(figsize=(9.5,8))\r\n ax = fig.add_subplot(111)\r\n \r\n \r\n \r\n levels = np.linspace(umin, umax, 100)\r\n cmap = plt.set_cmap('bwr')\r\n CS = ax.contourf(tt, y, unew.T, cmap=cmap, levels=levels)\r\n ax.set_ylim(ax.get_ylim()[::-1])\r\n ax.set_xlabel('Time (day)')\r\n ax.set_ylabel('Distance from upstream (m)')\r\n \r\n cb = fig.colorbar(CS, orientation='vertical')\r\n cb.set_label('Velocity (m/s)', fontsize=16)\r\n #plt.show()\r\n plt.savefig(figname)\r\n plt.close()",
"def fill_animation(self, movie_filename, *args, **kwargs):\n dpi = 100\n fig = plt.figure(figsize=(1920/dpi, 1080/dpi), dpi=dpi)\n fig.patch.set_facecolor('black')\n axes = {\n 'xy': fig.add_subplot(1, 3, 1),\n 'xz': fig.add_subplot(1, 3, 2),\n 'zy': fig.add_subplot(1, 3, 3),\n }\n\n planes = {'xy': 0, 'xz': 1, 'zy': 2}\n\n def get_plane(arr, vox, plane):\n return {\n 'xy': lambda a, v: a[v[0], :, :],\n 'xz': lambda a, v: a[:, v[1], :],\n 'zy': lambda a, v: np.transpose(a[:, :, v[2]]),\n }[plane](arr, np.round(vox).astype(np.int64))\n\n def get_hv(vox, plane):\n # rel = np.divide(vox, self.bounds)\n rel = vox\n # rel = self.bounds - vox\n return {\n 'xy': {'h': rel[1], 'v': rel[2]},\n 'xz': {'h': rel[0], 'v': rel[2]},\n 'zy': {'h': rel[1], 'v': rel[0]},\n }[plane]\n\n def get_aspect(plane):\n return {\n 'xy': CONFIG.volume.resolution[1] / CONFIG.volume.resolution[2],\n 'xz': CONFIG.volume.resolution[0] / CONFIG.volume.resolution[2],\n 'zy': CONFIG.volume.resolution[1] / CONFIG.volume.resolution[0],\n }[plane]\n\n images = {\n 'last': None,\n 'image': {},\n 'mask': {},\n }\n lines = {\n 'v': {},\n 'h': {},\n 'bl': {},\n 'bt': {},\n }\n current_vox = self.pos_to_vox(self.seed_pos)\n margin = CONFIG.model.input_fov_shape // 2\n for plane, ax in six.iteritems(axes):\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n image_data = get_plane(self.image, current_vox, plane)\n im = ax.imshow(image_data, cmap='gray')\n im.set_clim([0, 1])\n images['image'][plane] = im\n\n mask_data = get_plane(self.mask, current_vox, plane)\n im = ax.imshow(mask_data, cmap='jet', alpha=0.8)\n im.set_clim([0, 1])\n images['mask'][plane] = im\n\n aspect = get_aspect(plane)\n lines['h'][plane] = ax.axhline(y=get_hv(current_vox - margin, plane)['h'], color='w')\n lines['v'][plane] = ax.axvline(x=get_hv(current_vox + margin, plane)['v'], color='w')\n lines['bl'][plane] = ax.axvline(x=get_hv(current_vox - margin, plane)['v'], color='w')\n lines['bt'][plane] = ax.axhline(y=get_hv(current_vox + margin, plane)['h'], color='w')\n\n ax.set_aspect(aspect)\n\n images['last'] = np.round(current_vox).astype(np.int64)\n\n plt.tight_layout()\n\n fill_generator = self.fill(*args, generator=True, **kwargs)\n\n def update_fn(vox):\n mask_changed = False\n if np.array_equal(np.round(vox).astype(np.int64), update_fn.next_pos_vox):\n try:\n batch_block_data, output = six.next(fill_generator)\n block_data = batch_block_data[0]\n mask_changed = True\n except (StopIteration, Region.EarlyFillTermination):\n block_data = None\n\n if block_data is not None:\n update_fn.next_pos_vox = self.pos_to_vox(block_data['position'])\n if not np.array_equal(np.round(vox).astype(np.int64), update_fn.next_pos_vox):\n p = update_fn.next_pos_vox - vox\n steps = np.linspace(0, 1, 16)\n interp_vox = vox + np.outer(steps, p)\n for row in interp_vox:\n update_fn.vox_queue.put(row)\n else:\n update_fn.vox_queue.put(vox)\n\n vox_round = np.round(vox).astype(np.int64)\n changed_images = []\n for plane, im in six.iteritems(images['image']):\n if vox_round[planes[plane]] != images['last'][planes[plane]]:\n image_data = get_plane(self.image, vox, plane)\n im.set_data(image_data)\n changed_images.append(im)\n\n for plane, im in six.iteritems(images['mask']):\n if mask_changed or vox_round[planes[plane]] != images['last'][planes[plane]]:\n image_data = get_plane(self.mask, vox, plane)\n masked_data = np.ma.masked_where(image_data < 0.5, image_data)\n im.set_data(masked_data)\n changed_images.append(im)\n images['last'] = vox_round\n\n for plane in axes.iterkeys():\n lines['h'][plane].set_ydata(get_hv(vox - margin, plane)['h'])\n lines['v'][plane].set_xdata(get_hv(vox + margin, plane)['v'])\n lines['bl'][plane].set_xdata(get_hv(vox - margin, plane)['v'])\n lines['bt'][plane].set_ydata(get_hv(vox + margin, plane)['h'])\n\n return changed_images + \\\n lines['h'].values() + lines['v'].values() + \\\n lines['bl'].values() + lines['bt'].values()\n\n update_fn.moves = 0\n update_fn.next_pos_vox = current_vox\n update_fn.vox_queue = queue.Queue()\n update_fn.vox_queue.put(current_vox)\n\n def vox_gen():\n last_vox = None\n while 1:\n if update_fn.vox_queue.empty():\n return\n else:\n last_vox = update_fn.vox_queue.get()\n yield last_vox\n\n ani = animation.FuncAnimation(fig, update_fn, frames=vox_gen(), interval=16, repeat=False, save_count=60*60)\n writer = animation.writers['ffmpeg'](fps=60)\n\n ani.save(movie_filename, writer=writer, dpi=dpi, savefig_kwargs={'facecolor': 'black'})\n\n return ani",
"def quiver(x, v, ax=None, **kwargs):\n plt = _import_pyplot()\n # multiple axes ?\n try:\n fields = [quiver(x, v, i, **kwargs) for i in ax]\n return fields\n except:\n pass\n\n if not ax:\n ax = plt.gca()\n\n dim = dimension(x)\n\n if dim < 2:\n raise Exception('ndim < 2')\n elif dim < 3:\n h = ax.quiver(x[0, :], x[1, :],\n v[0, :], v[1, :], **kwargs)\n else:\n raise NotImplementedError\n\n from mayavi.mlab import quiver3d\n\n if ax:\n print('axes arg ignored, mayavi used')\n\n h = quiver3d(x[0, :], x[1, :], x[2, :],\n v[0, :], v[1, :], v[2, :], **kwargs)\n\n if dim > 3:\n warn('quiver:ndim #dimensions > 3,' +\n 'plotting only 3D component.')\n\n return h",
"def f(u):\n \n #h = u[0] # Not used anywhere\n v = u[1]\n \n return numpy.array([v,-g + mDot_p*v_e/(m_s+m_p) - 0.5*rho*v*abs(v)*A*C_D/(m_s+m_p) ]) # ohh abs(v) is sooo much important, for downward velocity, the drag must be up!",
"def genvis(dist_uvw, l, m, flux):\n # For background, see section 4.2 of\n # https://github.com/griffinfoster/fundamentals_of_interferometry/\n\n # Component of source vector along n-axis / w-axis\n # (Doubles as flux attenuation factor due to projection effects)\n src_n = np.sqrt(1 - l ** 2 - m ** 2)\n\n # src vec = [l,m, src_w]\n # phase centre vec = [0,0,1]\n # src - centre:\n src_offset = -np.array([l, m, src_n - 1])\n\n return flux * src_n * np.exp(-2j * np.pi * np.dot(dist_uvw, src_offset))",
"def create_animation(env, images):\n\n # We keep the borders, but remove top padding\n og_width = env.width\n og_height = env.height-4\n\n width = og_width * IMAGE_RESCALE\n height = og_height * IMAGE_RESCALE\n\n file_name = './videos/video_exp' + str(EXP) + '_seed' + str(SEED) + '.avi'\n codec = cv2.VideoWriter_fourcc(*'mp4v')\n writer = cv2.VideoWriter(file_name, codec, float(FPS), (width, height))\n\n for raw_image in images:\n\n image = np.ones((height, width, 3), dtype='uint8')\n\n for r in range(og_height):\n for c in range(og_width):\n\n rgb = COLORS[raw_image[r, c]]\n\n r0 = r*IMAGE_RESCALE\n r1 = (r+1)*IMAGE_RESCALE\n c0 = c*IMAGE_RESCALE\n c1 = (c+1)*IMAGE_RESCALE\n\n image[r0:r1, c0:c1, :] = np.array(rgb, dtype='uint8')\n writer.write(image)\n\n writer.release()\n writer=None\n print('Video saved to ' + file_name)",
"def WriteVideoVision(self, outputPath, fps, segmentSize, widthVideo,\n heightVideo, widthEqui, heightEqui,\n horizontalFoVAngle, verticalFoVAngle):\n with FFmpeg.VideoWrite(outputPath,\n width=widthVideo,\n height=heightVideo,\n fps=fps) as vo:\n posMatList = list()\n vmax = 0\n for timestamp in np.arange(self.minStartTime,\n self.maxEndTime,#-segmentSize,\n 1/fps):\n startTime = timestamp\n endTime = timestamp + segmentSize\n posMat = np.zeros((heightEqui, widthEqui))\n posMatList.append((startTime, endTime, posMat))\n\n for result in self.processedResultList:\n for (startTime, endTime, posMat) in posMatList:\n h, w = posMat.shape\n d = dict((t, q) for (t, q) in result.filteredQuaternions.items()\n if t >= startTime and t < endTime)\n ans = Q.ComputeVision(d, w, h,\n horizontalFoVAngle, verticalFoVAngle)\n for i in range(len(ans)):\n for j in range(len(ans[i])):\n posMat[j, i] += ans[i][j]\n for (startTime, endTime, posMat) in posMatList:\n sumPos = posMat.sum()\n if sumPos > 0:\n posMat /= sumPos\n vmax = max(vmax, posMat.max())\n\n for (startTime, endTime, posMat) in posMatList:\n plt.matshow(posMat, cmap='hot', vmax=vmax, vmin=0)\n buffer_ = io.BytesIO()\n plt.axis('off')\n plt.title('From {:6.2f} s to {:6.2f} s'.format(startTime,\n endTime))\n plt.colorbar()\n plt.savefig(buffer_, format = \"png\",\n bbox_inches='tight',\n pad_inches = 0)\n buffer_.seek(0)\n image = PIL.Image.open(buffer_)\n image.load()\n buffer_.close()\n plt.close()\n vo.AddPicture(image)\n plt.close()",
"def uz_fps(self, compute_ux = False, compute_uy = False):\n # Loop the receivers\n if compute_ux and compute_uy:\n message = 'Processing particle velocity (x,y,z dir at field point)'\n elif compute_ux:\n message = 'Processing particle velocity (x,z dir at field point)'\n elif compute_uy:\n message = 'Processing particle velocity (y,z dir at field point)'\n else:\n message = 'Processing particle velocity (z dir at field point)'\n\n for js, s_coord in enumerate(self.sources.coord):\n hs = s_coord[2] # source height\n uz_rec = np.zeros((self.receivers.coord.shape[0], len(self.controls.freq)), dtype = np.csingle)\n if compute_ux:\n ux_rec = np.zeros((self.receivers.coord.shape[0], len(self.controls.freq)), dtype = np.csingle)\n if compute_uy:\n uy_rec = np.zeros((self.receivers.coord.shape[0], len(self.controls.freq)), dtype = np.csingle)\n for jrec, r_coord in enumerate(self.receivers.coord):\n r = ((s_coord[0] - r_coord[0])**2.0 + (s_coord[1] - r_coord[1])**2.0)**0.5 # horizontal distance source-receiver\n zr = r_coord[2] # receiver height\n r1 = (r ** 2 + (hs - zr) ** 2) ** 0.5\n r2 = (r ** 2 + (hs + zr) ** 2) ** 0.5\n print('Calculate particle vel. (z-dir) for source {} and receiver {}'.format(js+1, jrec+1))\n # bar = ChargingBar('Processing particle velocity z-dir',\n # max=len(self.controls.k0), suffix='%(percent)d%%')\n bar = tqdm(total = len(self.controls.k0),\n desc = message)\n for jf, k0 in enumerate(self.controls.k0):\n uz_scat = insitu_cpp._bemflush_uzscat(r_coord, self.node_x, self.node_y,\n self.Nzeta, self.Nweights.T, k0, self.beta[jf], self.p_surface[:,jf])\n # print(uz_scat)\n # print('p_scat for freq {} Hz is: {}'.format(self.controls.freq[jf], p_scat))\n uz_rec[jrec, jf] = (np.exp(-1j * k0 * r1) / r1)*\\\n (1 + (1 / (1j * k0 * r1)))* ((hs - zr)/r1)-\\\n (np.exp(-1j * k0 * r2) / r2) *\\\n (1 + (1 / (1j * k0 * r2))) * ((hs + zr)/r2) - uz_scat\n if compute_ux:\n ux_scat = insitu_cpp._bemflush_uxscat(r_coord, self.node_x, self.node_y,\n self.Nzeta, self.Nweights.T, k0, self.beta[jf], self.p_surface[:,jf])\n ux_rec[jrec, jf] = (np.exp(-1j * k0 * r1) / r1)*\\\n (1 + (1 / (1j * k0 * r1)))* (-r_coord[0]/r1)-\\\n (np.exp(-1j * k0 * r2) / r2) *\\\n (1 + (1 / (1j * k0 * r2))) * (-r_coord[0]/r2) - ux_scat\n if compute_uy:\n uy_scat = insitu_cpp._bemflush_uyscat(r_coord, self.node_x, self.node_y,\n self.Nzeta, self.Nweights.T, k0, self.beta[jf], self.p_surface[:,jf])\n uy_rec[jrec, jf] = (np.exp(-1j * k0 * r1) / r1)*\\\n (1 + (1 / (1j * k0 * r1)))* (-r_coord[1]/r1)-\\\n (np.exp(-1j * k0 * r2) / r2) *\\\n (1 + (1 / (1j * k0 * r2))) * (-r_coord[1]/r2) - uy_scat\n # Progress bar stuff\n bar.update(1)\n bar.close()\n self.uz_s.append(uz_rec)\n if compute_ux:\n self.ux_s.append(ux_rec)\n if compute_uy:\n self.uy_s.append(uy_rec)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function that makes a quiver plot of (U, V) at points (X, Y).
|
def quiver_plot(X, Y, U, V, plot_title):
plt.figure()
plt.title(plot_title, fontname = "serif", fontsize = 17)
plt.xlabel("x [m]", fontname = "serif", fontsize = 12)
plt.ylabel("y [m]", fontname = "serif", fontsize = 12)
Q = plt.quiver(X[::4, ::4], Y[::4, ::4], U[::4, ::4], V[::4, ::4],
units = "xy", scale = 0.002, scale_units = "inches")
qk = plt.quiverkey(Q, 0.9, 0.9, 0.001, "0.1 m/s",
labelpos = "E", coordinates = "figure")
|
[
"def quiver(\n ds: Dataset,\n x: Hashable,\n y: Hashable,\n ax: Axes,\n u: Hashable,\n v: Hashable,\n **kwargs: Any,\n) -> Quiver:\n import matplotlib as mpl\n\n if x is None or y is None or u is None or v is None:\n raise ValueError(\"Must specify x, y, u, v for quiver plots.\")\n\n dx, dy, du, dv = broadcast(ds[x], ds[y], ds[u], ds[v])\n\n args = [dx.values, dy.values, du.values, dv.values]\n hue = kwargs.pop(\"hue\")\n cmap_params = kwargs.pop(\"cmap_params\")\n\n if hue:\n args.append(ds[hue].values)\n\n # TODO: Fix this by always returning a norm with vmin, vmax in cmap_params\n if not cmap_params[\"norm\"]:\n cmap_params[\"norm\"] = mpl.colors.Normalize(\n cmap_params.pop(\"vmin\"), cmap_params.pop(\"vmax\")\n )\n\n kwargs.pop(\"hue_style\")\n kwargs.setdefault(\"pivot\", \"middle\")\n hdl = ax.quiver(*args, **kwargs, **cmap_params)\n return hdl",
"def quiver(x, v, ax=None, **kwargs):\n plt = _import_pyplot()\n # multiple axes ?\n try:\n fields = [quiver(x, v, i, **kwargs) for i in ax]\n return fields\n except:\n pass\n\n if not ax:\n ax = plt.gca()\n\n dim = dimension(x)\n\n if dim < 2:\n raise Exception('ndim < 2')\n elif dim < 3:\n h = ax.quiver(x[0, :], x[1, :],\n v[0, :], v[1, :], **kwargs)\n else:\n raise NotImplementedError\n\n from mayavi.mlab import quiver3d\n\n if ax:\n print('axes arg ignored, mayavi used')\n\n h = quiver3d(x[0, :], x[1, :], x[2, :],\n v[0, :], v[1, :], v[2, :], **kwargs)\n\n if dim > 3:\n warn('quiver:ndim #dimensions > 3,' +\n 'plotting only 3D component.')\n\n return h",
"def visualize_vector_transverse(vectors, points, axes=None, O=GCS):\n assert all(isinstance(v, vec.Vector) for v in vectors), 'ks must be a list of {} instances.'.format(vec.Vector)\n assert all(isinstance(p, vec.Point) for p in points), 'ks must be a list of {} instances.'.format(vec.Point)\n assert len(vectors) == len(points), 'len(ks) must equal to len(points)'\n assert axes is None or isinstance(axes, plt.Axes), 'axes must be None or an instance of {}.'.format(plt.Axes)\n assert isinstance(O, vec.CoordSys3D), 'O must be an instance of {}'.format(vec.CoordSys3D)\n\n vecs_np = vectors_to_ndarray(vectors, O)\n points_np = points_to_ndarray(points, O)\n\n if axes is None:\n fig, axes = plt.subplots()\n plt.sca(axes)\n\n x = points_np[:, 0]\n y = points_np[:,1]\n u = vecs_np[:,0]\n v = vecs_np[:,1]\n\n axes.set_xlim(np.min(x)-1, np.max(x)+1)\n axes.set_ylim(np.min(y)-1, np.max(y)+1)\n\n Q = plt.quiver(x, y, u, v)\n\n plt.show()\n\n return Q, axes",
"def svdVisualize(self):\n \n (U,S,V) = numpy.linalg.svd(numpy.dot(self.X_mapped.T,self.X_mapped)/self.m)\n Z = numpy.zeros((self.m,2))\n Z[:,0] = numpy.dot(self.X_mapped,U[:,0])\n Z[:,1] = numpy.dot(self.X_mapped,U[:,1])\n # plot projected data for visualization\n colors = map(lambda x: 'r' if x else 'b', self.y)\n plt.scatter(Z[:,0],Z[:,1],20,colors)\n plt.show()",
"def plot_meancontquiv(self, save=False, show=False, savedir=\"Figures\",\n savetype=\".pdf\", cb_orientation=\"vertical\",\n newfig=True):\n if newfig:\n plt.figure(figsize=(7.5, 2.625))\n # Add contours of mean velocity\n cs = plt.contourf(self.y_R, self.z_H, self.df.mean_u/self.U_infty,\n np.arange(0.15, 1.25, 0.05), cmap=plt.cm.coolwarm)\n if cb_orientation == \"horizontal\":\n cb = plt.colorbar(cs, shrink=1, extend=\"both\",\n orientation=\"horizontal\", pad=0.14)\n elif cb_orientation == \"vertical\":\n cb = plt.colorbar(cs, shrink=0.83, extend=\"both\",\n orientation=\"vertical\", pad=0.02)\n cb.set_label(r\"$U/U_{\\infty}$\")\n plt.hold(True)\n # Make quiver plot of v and w velocities\n Q = plt.quiver(self.y_R, self.z_H, self.df.mean_v/self.U_infty,\n self.df.mean_w/self.U_infty, width=0.0022, scale=3,\n edgecolor=\"none\")\n plt.xlabel(r\"$y/R$\")\n plt.ylabel(r\"$z/H$\")\n plt.ylim(-0.2, 0.78)\n plt.xlim(-3.2, 3.2)\n if cb_orientation == \"horizontal\":\n plt.quiverkey(Q, 0.65, 0.26, 0.1, r\"$0.1 U_\\infty$\",\n labelpos=\"E\", coordinates=\"figure\")\n elif cb_orientation == \"vertical\":\n plt.quiverkey(Q, 0.65, 0.088, 0.1, r\"$0.1 U_\\infty$\",\n labelpos=\"E\", coordinates=\"figure\",\n fontproperties={\"size\": \"small\"})\n self.turb_lines()\n ax = plt.axes()\n ax.set_aspect(2)\n plt.yticks([0, 0.13, 0.25, 0.38, 0.5, 0.63])\n plt.tight_layout()\n if save:\n label = str(self.U_infty).replace(\".\", \"\")\n plt.savefig(savedir+\"/meancontquiv_{}{}\".format(label, savetype))\n if show:\n self.show()",
"def make_vplot(args):\n if not args.out:\n args.out = '.'.join(os.path.basename(args.bed).split('.')[0:-1])\n chunks = ChunkList.read(args.bed, strand_col = args.strand)\n sets = chunks.split(items = min(args.cores*20,len(chunks)))\n params = _VplotParams(flank = args.flank, lower = args.lower, upper = args.upper, bam = args.bam,\n atac = args.atac, scale = args.scale)\n pool = Pool(processes = args.cores)\n tmp = pool.map(_vplotHelper, zip(sets,itertools.repeat(params)))\n pool.close()\n pool.join()\n result = sum(tmp)\n ##Turn matrix into VMat object\n vmat=V.VMat(result,args.lower,args.upper)\n if not args.no_plot:\n vmat.plot(filename=args.out+\".Vplot.eps\")\n if args.plot_extra:\n ##get insertion profile represented by vplot\n vmat.converto1d()\n vmat.plot_1d(filename=args.out+'.InsertionProfile.eps')\n #get insert size dstribution represented by vplot\n vmat.plot_insertsize(filename= args.out + \".InsertSizes.eps\")\n ##save\n vmat.save(args.out+\".VMat\")",
"def vector(f: Callable[[float, float], Tuple[float, float]], x_min: float,\r\n x_max: float, y_min: float=None, y_max: float=None, grid=True,\r\n title: str=None, show=True, equal_aspect=False, stream=False,\r\n resolution: int=17, style: str=DEFAULT_STYLE) -> None:\r\n if not y_min:\r\n y_min = x_min\r\n if not y_max:\r\n y_max = x_max\r\n\r\n x, y, (i, j) = _two_in_one_out_helper(f, x_min, x_max, y_min, y_max, resolution)\r\n vec_len = (i**2 + j**2)**.5 # For color coding\r\n\r\n # Style seems to require a reset, or some properties from previous styles stick.\r\n plt.style.use('classic')\r\n plt.style.use(style) # style must be set before setting fix, ax.\r\n fig, ax = plt.subplots()\r\n\r\n if stream:\r\n ax.streamplot(x, y, i, j, color=vec_len, cmap=COLORMAP_PRIORITY[0])\r\n else:\r\n ax.quiver(x, y, i, j, vec_len, width=.003, minshaft=3, cmap=COLORMAP_PRIORITY[0])\r\n\r\n _set_misc(fig, ax, title, grid, equal_aspect)\r\n return _show_or_return(ax, show)",
"def vectorfield(xs,ys,fs,**kw):\n length = kw.pop('length') if 'length' in kw else 1\n x, y = np.meshgrid(xs, ys)\n # calculate vector field\n vx,vy=fs(x,y)\n # plot vecor field\n norm = length \n plt.quiver(x, y, vx * norm, vy * norm, angles='xy',**kw)",
"def ShowEigenVectors(v):\n plt.figure(1)\n plt.clf()\n for i in xrange(v.shape[1]):\n plt.subplot(1, v.shape[1], i+1)\n plt.imshow(v[:, v.shape[1] - i - 1].reshape(16, 16).T,\n cmap=plt.cm.gray)\n plt.draw()\n raw_input('Press Enter.')",
"def _plt_vector(self, origin, vector, color='b', label=None) -> None:\n assert len(origin) == 3, 'origin must contain 3 coordinates'\n assert len(vector) == 3, 'vector must contain 3 coordinates'\n assert all(isinstance(elem, (float, int)) for elem in origin), 'origin elements must be numbers'\n assert all(isinstance(elem, (float, int)) for elem in vector), 'vector elements must be numbers'\n\n self._ax.quiver(*origin, *vector, color=color, label=label)",
"def plotHV_IV():\n global current_directory, folder_name\n plt.clf()\n # plt.plot(data['index'], data['iv_from_dataset'], label = 'iv_data', color = 'orange')\n plt.plot(data['index'], data['implied_volatility'], label = 'impl_volatility', color = 'orange')\n plt.plot(data['index'], data['historical_volatility'], label = 'hist_volatility', color = 'blue')\n plt.legend(loc = 'best')\n plt.xlabel('index')\n plt.ylabel('volatility in decimal')\n plt.savefig(current_directory + '/output/{}/graphs/iv_vs_hv.svg'.format(folder_name), format = 'svg', dpi = 1200)\n # plt.show()",
"def XyToVtu(x, y):\n \n ugrid = vtk.vtkUnstructuredGrid()\n \n # Add the points\n points = vtk.vtkPoints()\n points.SetDataTypeToDouble()\n xyToNode = [[] for i in range(len(x))]\n index = 0\n for i, xCoord in enumerate(x):\n for yCoord in y:\n points.InsertNextPoint(xCoord, yCoord, 0.0)\n xyToNode[i].append(index)\n index += 1\n ugrid.SetPoints(points)\n \n # Add the volume elements\n for i, xCoord in enumerate(x[:-1]):\n for j, yCoord in enumerate(y[:-1]):\n idList = vtk.vtkIdList()\n idList.InsertNextId(xyToNode[i][j])\n idList.InsertNextId(xyToNode[i + 1][j])\n idList.InsertNextId(xyToNode[i + 1][j + 1])\n idList.InsertNextId(xyToNode[i][j + 1])\n ugrid.InsertNextCell(VTK_QUAD, idList)\n \n # Surface elements are not currently added\n \n # Construct the vtu\n result = vtu()\n result.ugrid = ugrid\n \n return result",
"def stick_plot(time, u, v, **kw):\n\n # Read keyword arguments\n width = kw.pop('width', 0.002)\n headwidth = kw.pop('headwidth', 0)\n headlength = kw.pop('headlength', 0)\n headaxislength = kw.pop('headaxislength', 0)\n angles = kw.pop('angles', 'uv')\n ax = kw.pop('ax', None)\n ref = kw.pop('ref',1)\n units = kw.pop('units',r\"$m s^{-1}$\")\n \n if angles != 'uv':\n raise AssertionError(\"Stickplot angles must be 'uv' so that\"\n \"if *U*==*V* the angle of the arrow on\"\n \"the plot is 45 degrees CCW from the *x*-axis.\")\n\n time, u, v = map(_np.asanyarray, (time, u, v))\n if not ax:\n fig, ax = _plt.subplots()\n \n q = ax.quiver(_date2num(time), [[0]*len(time)], u, v,\n angles='uv', width=width, headwidth=headwidth,\n headlength=headlength, headaxislength=headaxislength,\n **kw)\n\n ax.axes.get_yaxis().set_visible(False)\n ax.xaxis_date()\n\n qk = ax.quiverkey(q, 0.1, 0.85, ref,\n _np.str(ref) + ' ' + units,\n labelpos='N', coordinates='axes') \n \n return q,qk,ax",
"def add_quiverset(\n self, x: np.ndarray, v: np.ndarray, color=None, alpha=1.0, label=None\n ):\n if label is None:\n label = \"arrows %d\" % (self.num_quiversets() + 1)\n # TODO: allow creation by passing only x and a function that computes v from x\n qs = QuiverData(x, v, color, alpha, label)\n assert_num_dims(qs.num_dims, self.num_dims)\n self.quiver_sets += [qs]",
"def visualize(X, Y):\n plt.plot(X, Y, \"bx\")\n plt.show()",
"def plot_uv_track(bu, bv, outname=None, show=True):\n _, ax = plt.subplots(1, 1, figsize=(8, 8))\n for i in range(bu.shape[0]):\n ax.plot(bu[i, :], bv[i, :])\n ax.set_xlim(-1500, 1500)\n ax.set_ylim(-1500, 1500)\n ax.text(-1200, 1200, \"UV Coverage\")\n ax.set_xlabel(\"$u$ (m)\")\n ax.set_ylabel(\"$v$ (m)\")\n if outname is not None:\n plt.savefig(f\"{outname}_uv.png\")\n if not show:\n plt.close()",
"def fplot_vector(v, figsize=(10,10)):\n fig = Figure(figsize=figsize)\n ax1 = fig.add_subplot(1, 1, 1)\n ax1.plot(v)\n return fig",
"def plot_stress_vector(self, sigxs, sigys, title, cmap, normalize, **kwargs):\n\n # create plot and setup the plot\n with post.plotting_context(title=title, **kwargs) as (fig, ax):\n # set up the colormap\n cmap = cm.get_cmap(name=cmap)\n\n # initialise quiver plot list max scale\n quiv_list = []\n max_scale = 0\n\n norm = None\n if normalize:\n norm = CenteredNorm()\n\n # plot the vectors\n for (i, sigx) in enumerate(sigxs):\n sigy = sigys[i]\n\n # scale the colour with respect to the magnitude of the vector\n c = np.hypot(sigx, sigy)\n\n quiv = ax.quiver(\n self.section.mesh_nodes[:, 0],\n self.section.mesh_nodes[:, 1],\n sigx,\n sigy,\n c,\n cmap=cmap,\n norm=norm,\n )\n\n # get the scale and store the max value\n quiv._init()\n max_scale = max(max_scale, quiv.scale)\n quiv_list.append(quiv)\n\n # update the colormap values\n if i == 0:\n c_min = min(c)\n c_max = max(c)\n else:\n c_min = min(c_min, min(c))\n c_max = max(c_max, max(c))\n\n # apply the scale\n for quiv_plot in quiv_list:\n quiv_plot.scale = max_scale\n\n # apply the colourbar\n v1 = np.linspace(c_min, c_max, 15, endpoint=True)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1)\n\n fig.colorbar(quiv, label=\"Stress\", format=\"%.4e\", ticks=v1, cax=cax)\n\n # plot the finite element mesh\n self.section.plot_mesh(materials=False, **dict(kwargs, ax=ax))\n\n return ax",
"def quiver_plot_of_predictions(\n self, dim_reducer=None, dimensions_to_keep=(0, 1),\n aggregator='mean',\n n_points_each_dim=30,\n color_values='speed', colorbar_label='speed',\n ax=None, save_fig=None,\n mask_velocity_to_convex_hull_of_data=True,\n axis_labels_dict=None,\n **subplots_kws):\n mask_to_convex_hull = mask_velocity_to_convex_hull_of_data\n grid_points, velocities = self.compute_predictions_on_grid(\n n_points_each_dim=n_points_each_dim,\n dim_reducer=dim_reducer,\n mask_velocity_to_convex_hull_of_data=mask_to_convex_hull)\n grid_points, velocities = (\n aggregate_dimensions_of_grid_points_and_velocities(\n grid_points, velocities, dimensions_to_keep,\n aggregator=aggregator))\n return vis_model.quiver_plot(\n *grid_points, *[v.T for v in velocities],\n **make_axis_labels(axis_labels_dict, dimensions_to_keep),\n color_values=color_values,\n colorbar_label=colorbar_label, ax=ax,\n save_fig=save_fig,\n **subplots_kws)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function that generates a Hovmuller diagram of eta as a function of x and t at a choosen ycoordinate
|
def hovmuller_plot(x, t, eta):
X, T = np.meshgrid(x, np.array(t))
X = np.transpose(X) # Transpose for plotting
T = np.transpose(T) # Transpose for plotting
eta_hm = np.transpose(np.array(eta)) # Transpose for plotting
plt.figure(figsize = (5, 8))
plt.pcolormesh(X, T, eta_hm, vmin = eta_hm.min(), vmax = eta_hm.max(), cmap = plt.cm.PiYG)
plt.colorbar(orientation = "vertical")
plt.title("x-t plot for middle of domain", fontname = "serif", fontsize = 17)
plt.xlabel("x [m]", fontname = "serif", fontsize = 12)
plt.ylabel("t [s]", fontname = "serif", fontsize = 12)
|
[
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def y_exact(t):\n return -epsilon*cos(omega*t)",
"def HenonHeiles_Hamiltonian(t, u):\n points_positions = u.T[:2]\n points_momenta = u.T[2:4]\n x, y = points_positions\n p_x, p_y = points_momenta\n # Potential energy function\n H = 0.5*(x**2 + y**2) + (y * x**2) - (1/3)*y**3 + 0.5*(p_x**2 + p_y**2)\n return H",
"def f(self,t,y):\n return -self.lambd*y + 2*scipy.ones_like(y)*scipy.exp(-t)*scipy.cos(2*t)",
"def calculate_height(v0, t, a=-g, y0 = 0):\n return y0 + v0*t + 1/2*a*t**2",
"def euler_step(t, y, h, f):\n tp = t + h\n yp = y + h * f(t, y)\n evals = 1\n return tp, yp, evals",
"def euler_method(t, f_y_t, y0, vin):\n \n y = np.zeros((len(y0), len(t)+1))\n dt = t[1]-t[0]\n print(y.shape)\n y[:,0] = y0\n \n\n \n for index, tn in enumerate(t):\n \n y[:,index+1] = dt * (f_y_t(tn, y[:,index], dt)) + y[:,index]\n \n return y[:,:len(t)]",
"def energy_level_plot(qbt):\n pl=Plotter(fig_width=9.0, fig_height=6.0)\n EjdivEc=linspace(0.1, 300, 3000)\n Ej=EjdivEc*qbt.Ec\n E0, E1, E2=qbt._get_transmon_energy_levels(Ej=Ej, n_energy=3)\n line(EjdivEc, (E0+Ej)/h/1e9, plotter=pl, linestyle=\"dashed\", linewidth=1.0, color=\"blue\")\n line(EjdivEc, (E1+Ej)/h/1e9, plotter=pl, linestyle=\"dashed\", linewidth=1.0, color=\"red\")\n line(EjdivEc, (E2+Ej)/h/1e9, plotter=pl, linestyle=\"dashed\", linewidth=1.0, color=\"green\")\n\n Ec=qbt.Ec\n E0 = sqrt(8.0*Ej*Ec)*0.5 - Ec/4.0\n E1 = sqrt(8.0*Ej*Ec)*1.5 - (Ec/12.0)*(6.0+6.0+3.0)\n E2 = sqrt(8.0*Ej*Ec)*2.5 - (Ec/12.0)*(6.0*2**2+6.0*2+3.0)\n\n line(EjdivEc, E0/h/1e9, plotter=pl, linewidth=0.5, color=\"blue\")\n line(EjdivEc, E1/h/1e9, plotter=pl, linewidth=0.5, color=\"red\")\n line(EjdivEc, E2/h/1e9, plotter=pl, linewidth=0.5, color=\"green\")\n\n pl.xlabel=\"$E_j/E_c$\"\n pl.ylabel=\"Frequency (GHz)\"\n return pl",
"def calculate_hypotenuse(base, height):\n pass",
"def trapezoid(h,a,b):\n A = h * ((a+b)/2)\n return A",
"def contour_trajectory_plot():\n X = np.linspace(-2, 2, 100)\n Y = np.linspace(-1, 3, 100)\n [x, y] = np.meshgrid(X, Y)\n z = h(x, y)\n etas = [0.0001, 0.001, 0.005, 0.01]\n levels = np.linspace(z.min(), z.max(), 100)\n\n for eta in etas:\n x_list, y_list, h_list = gradient_descent(start_x=0, start_y=0, eta=eta, max_iter=1000000)\n plt.plot(x_list, y_list, c='r', label=str(eta), linewidth=2.0)\n plt.contourf(x,y,z, cmap=plt.cm.viridis, levels=levels)\n plt.title('$\\eta = {}$ n_steps = {}'.format(eta, len(h_list)))\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.colorbar(label='$h$')\n plt.savefig('eta_{}.png'.format(str(eta).replace('.', '_')))\n plt.show()",
"def plot_analytical(self, x_label = \"x / m\", y_label = \"Psi\", title = \"Analytical and Numerical solution to QHO\"):\r\n #find corresponding quantum number\r\n n = int(round((self.energy-1)/2))\r\n\r\n #solutions\r\n ys_analytical = H(n, self.times)*np.exp(-0.5* self.times**2)\r\n ys_numerical = np.transpose(self.y)[0]\r\n\r\n #normalize analytical solution\r\n if abs(ys_numerical[0]) > 1e-2:\r\n ys_analytical = ys_analytical*ys_numerical[0]/ys_analytical[0]\r\n elif abs(ys_numerical[100]) > 1e-2:\r\n ys_analytical = ys_analytical*ys_numerical[100]/ys_analytical[100]\r\n else:\r\n print(\"Everything is too close to zero to normalize properly\")\r\n\r\n #plot everything\r\n plt.plot(self.times, ys_analytical)\r\n plt.plot(self.times, ys_numerical)\r\n plt.xlabel(x_label)\r\n plt.ylabel(y_label)\r\n plt.title(title)\r\n plt.show()",
"def trapezoid_area(a,b,h):\n return 0.5*(a+b)*h",
"def HamSN1D_Hamiltonian(t, u):\n x, y = u.T\n return 0.5*y*y + x**3/3 + 0.5*x*x",
"def compute_xyz_from_tth_eta(tth, eta, omega,\n t_x=0.0, t_y=0.0, t_z=0.0,\n # == phi at chi=90\n wedge=0.0, # Wedge == theta on 4circ\n chi=0.0, # == chi - 90\n **kwds): # last line is for laziness -\n # xyz = unit vectors along the scattered vectors\n xyz = np.zeros((3, tth.shape[0]), np.float)\n rtth = np.radians(tth)\n reta = np.radians(eta)\n xyz[0, :] = np.cos(rtth)\n # eta = np.degrees(np.arctan2(-s1[1, :], s1[2, :]))\n xyz[1, :] = -np.sin(rtth) * np.sin(reta)\n xyz[2, :] = np.sin(rtth) * np.cos(reta)\n\n # Find vectors in the fast, slow directions in the detector plane\n pks = np.array([(1, 0),\n (0, 1),\n (0, 0) ], np.float).T\n dxyzl = compute_xyz_lab(pks, **kwds)\n # == [xpos, ypos, zpos] shape (3,n)\n #\n # This was based on the recipe from Thomas in Acta Cryst ...\n # ... Modern Equations of ...\n\n ds = dxyzl[:,0] - dxyzl[:,2] # 1,0 in plane is (1,0)-(0,0)\n df = dxyzl[:,1] - dxyzl[:,2] # 0,1 in plane\n dO = dxyzl[:,2] # origin pixel\n\n # Cross products to get the detector normal\n # Thomas uses an inverse matrix, but then divides out the determinant anyway\n det_norm = np.cross( ds, df )\n\n # Scattered rays on detector normal\n norm = np.dot( det_norm, xyz )\n # Check for divide by zero\n msk = (norm == 0)\n needmask = False\n if msk.sum()>0:\n norm += msk\n needmask = True\n\n # Intersect ray on detector plane\n sc = np.dot( np.cross( df, dO ), xyz ) / norm\n fc = np.dot( np.cross( dO, ds ), xyz ) / norm\n\n if (t_x != 0) or (t_y != 0) or (t_z != 0):\n go = compute_grain_origins(omega,\n wedge=wedge, chi=chi,\n t_x=t_x, t_y=t_y, t_z=t_z)\n # project these onto the detector face to give shifts\n sct = ( xyz * np.cross( df, go.T ).T ).sum(axis=0) / norm\n fct = ( xyz * np.cross( go.T, ds ).T ).sum(axis=0) / norm\n sc -= sct\n fc -= fct\n\n if needmask:\n fc = np.where( msk, 0, fc )\n sc = np.where( msk, 0, sc )\n\n return fc, sc",
"def gen_NACAfoil(n,m,p,t):\n t = t/100.\n m = m/100.\n p = p/10.\n x = np.linspace(0,1,n)\n a0, a1, a2, a3, a4 = 0.2969, -0.126, -0.3516, 0.2843, -0.1036\n #thickness distribution\n yt = (t/0.2)*(a0*x**0.5 + a1*x + a2*x**2 + a3*x**3 + a4*x**4)\n yc = np.zeros((n,1))\n theta = np.zeros((n,1))\n #camber distribution\n for i,ix in enumerate(x):\n if 0 <= ix < p:\n yc[i] = (m/p**2)*(2*p*ix - ix**2)\n dyc = (2*m/p**2)*(p-ix)\n theta[i] = np.arctan(dyc)\n elif p <= ix <= 1:\n yc[i] = (m/(1-p)**2)*(1 - 2*p + 2*p*ix - ix**2)\n dyc = (2*m/(1-p)**2)*(p - ix)\n theta[i] = np.arctan(dyc)\n pos = np.zeros((n*2,2))\n j = n\n for i in range(n):\n pos[i, 0] = x[i] - yt[i] * np.sin(theta[i])\n pos[i, 1] = yc[i] + yt[i] * np.cos(theta[i])\n pos[j, 0] = x[i] + yt[i] * np.sin(theta[i])\n pos[j, 1] = yc[i] - yt[i] * np.cos(theta[i])\n j += 1\n pos = np.delete(pos, n, 0)\n pos[:n, :] = pos[n - 1::-1, :]\n return pos, yc",
"def get_phi(x, y, xa, ya):\n return np.exp(-alpha*(x - xa)**2 - alpha*(y - ya)**2)",
"def traj2(t,y):\r\n\r\n\r\n x=y[0]\r\n ay=y[1]\r\n V=y[2]\r\n gamma=y[3]\r\n m=y[4]\r\n\r\n lr=y[8]\r\n lg=y[9]\r\n lv=y[10]\r\n\r\n\r\n sa=-lg/(V*sqrt((lg/V)**2+lv**2))\r\n ca=-lv/sqrt((lg/V)**2+lv**2)\r\n\r\n\r\n g=Ue/(ay+Re)**2\r\n TM=T/m\r\n r=ay+Re\r\n \r\n dx=V*cos(gamma)\r\n dy=V*sin(gamma)\r\n dV=TM*ca-g*sin(gamma)\r\n dgamma=TM*sa/V+(V/r-g/V)*cos(gamma)\r\n dm=-T/ISP/g0\r\n dvg=g*sin(gamma)\r\n dD=0\r\n dva=TM-TM*ca\r\n\r\n dlr=V*lg*cos(gamma)/r**2-(2*Ue*lv*sin(gamma)+2*Ue*lg*cos(gamma)/V)/r**3\r\n dlg=-lr*cos(gamma)*V+Ue*lv*cos(gamma)/r**2+lg*sin(gamma)*(V/r-Ue/(r**2*V))\r\n dlv=-lr*sin(gamma)-lg*(cos(gamma)*(1/r+Ue/((r**2)*(V**2)))-TM/V**2*sa)\r\n\r\n #print(dlr,dlv,dlg)\r\n \r\n return [dx,dy,dV,dgamma,dm,dvg,dD,dva,dlr,dlg,dlv]",
"def y(self, t, n):\n s = self.s\n if n == 0:\n # eq. A.3\n y = np.tanh(2*(2*t - 1) / ((4*t*(1 - t))**s))\n elif n == 1:\n # eq. A.5\n y = self.a(t, 2)*(1 - self.y(t, 0)**2)\n else:\n # eq. A.7\n y = sum(sp.special.binom(n - 1, k)*self.a(t, k + 2)*self.z(t, n - 1 - k) for k in range(0, n))\n return y"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function that takes a signal and its corresponding time array. Then plots the time signal as well as its Fourier transform.
|
def plot_time_series_and_ft(t, signal):
t = np.array(t)
signal = np.array(signal)
# Plotting the time series.
plt.figure(figsize = (8, 7))
plt.subplot(2, 1, 1)
plt.plot(t, signal, linewidth = 2)
plt.title("Time series of $\eta$ at center of domain", fontname = "serif", fontsize = 17)
plt.xlabel("t [s]", fontname = "serif", fontsize = 12)
plt.ylabel("$\eta$ [m]", fontname = "serif", fontsize = 12)
# Plotting the Fourier transform of the time series (calling homemade ft).
freq, spectrum = ft.fourier_transform(signal, len(signal), len(signal)*np.diff(t)[1])
plt.subplot(2, 1, 2)
plt.plot(freq, spectrum, linewidth = 2)
plt.title("Fourier transformed signal", fontname = "serif", fontsize = 17)
plt.xlabel("Frequency [Hz]", fontname = "serif", fontsize = 12)
plt.ylabel("Amplitude", fontname = "serif", fontsize = 12)
plt.tight_layout()
|
[
"def plot_inverse_fourier_transform(fs, wave, time, title):\n plt.figure(num=title+\" - \"+filename[:-4], figsize=(8, 5))\n plt.plot(time, wave, color=\"blue\", label=\"ifft(t)\")\n plt.legend(loc=1)\n plt.xlim(time[0], time[-1])\n plt.xlabel('Time (s)')\n plt.ylabel('ifft(t)')\n plt.title(title)",
"def plot_audio_signal(fs, wave, time, title):\n plt.figure(num=title+\" - \"+filename[:-4], figsize=(8, 5))\n plt.plot(time, wave, color=\"blue\", label=\"f(t)\")\n plt.legend(loc=1)\n plt.xlim(time[0], time[-1])\n plt.xlabel('Time (s)')\n plt.ylabel('f(t)')\n plt.title(title)",
"def signal_plot(t, y, **kwargs):\n\n\n fun = kwargs['vin']\n\n plt.figure(figsize=kwargs['figsize'])\n (plt.plot(t, fun(t), 'r', linewidth = 2, label = 'Input'),\n plt.plot(t, y[1].T, 'b', linewidth = 2, label = \"Out \"),\n plt.plot(t, y[0].T*0.2, 'orange', linewidth = 2, label = 'Change in S (Scaled 1 to 0.2)'),\n plt.xlabel('Time [s]'), plt.ylabel('Out [Adm]'),\n plt.title('Dynamic System Evolution'),\n plt.grid(), plt.legend(), plt.axis([0,np.max(t)*1.10, np.min(y*0.2)*1.1, np.max(y*0.2)*1.1]),\n plt.show())",
"def time_plot(speaker_signal, mic_signal):\n fig, axis = plt.subplots(2, 1)\n fig.set_figheight(10)\n fig.set_figwidth(10)\n plt.suptitle(\"Time domain visualization\")\n axis[0].plot(speaker_signal[0], speaker_signal[1])\n axis[0].set(title='Emitted signal', xlabel=\"Time (s)\", ylabel=\"Intensity\")\n axis[1].plot(mic_signal[0], mic_signal[1])\n axis[1].set(title='Acquired signal', xlabel=\"Time (s)\", ylabel=\"Intensity\")\n plt.tight_layout()\n plt.show()",
"def fplot_signal_vs_time_mus(signal,\n t_min = 0,\n t_max = 1200,\n signal_min = 0,\n signal_max = 200,\n figsize=(10,10)):\n fig = Figure(figsize=figsize)\n tstep = 25 # in ns\n PMTWL = signal.shape[0]\n signal_t = np.arange(0., PMTWL * tstep, tstep)/units.mus\n ax1 = fig.add_subplot(1, 1, 1)\n ax1.set_xlim([t_min, t_max])\n ax1.set_ylim([signal_min, signal_max])\n set_plot_labels(xlabel = \"t (mus)\",\n ylabel = \"signal (pes/adc)\")\n ax1.plot(signal_t, signal)\n return fig",
"def plotFFT(func,t_range=(0,2*pi),points=128,tol=1e-5,\n func_name=None,unwrap_=True,wlim=(-10,10),scatter_size=40,\n iff=False, plot=True, window=False):\n \n # default name for function\n if func_name == None:\n func_name = func.__name__\n \n # time points to sample\n t = linspace(*t_range,points+1)[:-1]\n T = t_range[1]-t_range[0]\n samplingfreq = points/T\n \n y = func(t)\n if iff:\n y[0]=0\n y = fftshift(y)\n \n # corresponding frequencies of the sampled signal\n w = linspace(-pi,pi,points+1)[:-1]\n w = w*samplingfreq\n \n # find fft\n if window:\n wnd = fftshift(hamming(arange(points)))\n y = y*wnd\n Y = fftshift( fft(y))/points\n \n if not plot:return w,Y\n # get phase\n ph = angle(Y)\n if unwrap_:\n ph = unwrap(ph)\n \n # get mag\n mag = abs(Y)\n \n # clean up phase where mag is sufficiently close to 0\n ph[where(mag<tol)]=0\n \n # plot \n fig,axes = subplots(1,2)\n ax1,ax2 = axes\n \n # magnitude\n ax1.set_title(\"Magnitude of DFT of {}\".format(func_name))\n ax1.set_xlabel(\"Frequency in rad/s\")\n ax1.set_ylabel(\"Magnitude\")\n ax1.plot(w,mag,color='red')\n ax1.scatter(w,mag,color='red',s=scatter_size)\n ax1.set_xlim(wlim)\n ax1.grid()\n \n # phase\n ax2.set_title(\"Phase of DFT of {}\".format(func_name))\n ax2.set_xlabel(\"Frequency in rad/s\")\n ax2.set_ylabel(\"Phase in rad\")\n ax2.plot(w,ph,color='green')\n ax2.scatter(w,ph,color='green',s=scatter_size)\n ax2.set_xlim(wlim)\n ax2.grid()\n \n show()\n return w,Y",
"def y_plot(traj, time_array, dim=2):\n y_traj = traj[:,1]\n\n fig, ax = plt.subplots()\n\n ax.plot(time_array[:len(traj)], y_traj, 'r', label='y')\n\n legend = ax.legend(shadow=True, fontsize='x-large')\n legend.get_frame().set_facecolor('#FFFFFF')\n\n plt.title(\"y vs. time\")\n plt.xlabel(\"time (sec)\")\n plt.show()",
"def PlotFftCompare(Sinal, Sinal_Filtered, dt):\r\n pylab.figure();\r\n pylab.subplot(3, 1, 1) # all togueter 3 in the same graph\r\n pylab.plot(Sinal_Filtered);\r\n pylab.plot(Sinal);\r\n pylab.xlabel('Time')\r\n pylab.subplot(3, 1, 2)\r\n PlotFftAbs(Sinal_Filtered, dt);\r\n PlotFftAbs(Sinal, dt);\r\n # I dont understand this phase here anyway..\r\n pylab.subplot(3, 1, 3)\r\n PlotFftPhase(Sinal_Filtered, dt);\r\n PlotFftPhase(Sinal, dt);",
"def grafFourier(Sas , x , nfr , Nfig):\n#\n plt.figure(Nfig)\n plt.plot(x,Sas)\n plt.grid()\n plt.xlabel('Frecuencia (Hz)')\n plt.ylabel('Amplitud')\n#\n return",
"def plot_formant(signal, width, step, fs, nb=4):\n formant = formants(signal, width, step, fs, nb)\n axis = get_timeAxis(1/(step*1e-3), formant[:, 0])\n plt.figure(figsize=(12, 7))\n plt.title('Formants')\n for i in range(formant.shape[1]):\n plt.plot(axis, formant[:, i], label=f'f{i+1}')\n plt.legend()\n plt.xlabel('Time (s)')\n plt.ylabel('Frequency (Hz)')\n plt.grid()\n plt.margins(x=0)\n plt.show()",
"def plot_FFT_spectrum(self, ts = None, log = True, vlines = np.arange(1,11), fname = None):\n\n import matplotlib.pyplot as plt\n\n delta = self.time[1] - self.time[0]\n if delta == 1:\n # daily time series\n fs = 1./86400 # Hz\n elif abs(delta - 30) < 3.0:\n # monthly time series\n fs = 1./2.628e+6\n elif abs(delta - 365) < 2.0:\n # yearly time series\n fs = 1./3.154e+7\n\n plt.figure(figsize = (15,7))\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n ts = ts if ts is not None else self.data.copy()\n if isinstance(ts, list):\n ts = np.array(ts).T\n if ts.ndim > 2:\n ts = ts.reshape([ts.shape[0], np.prod(ts.shape[1:])])\n fft = np.abs(np.fft.rfft(ts, axis = 0))\n freqs = np.fft.rfftfreq(ts.shape[0], d = 1./fs)\n freqs *= 3.154e+7\n if log:\n plt.semilogx(freqs, 20*np.log10(fft), linewidth = 0.8) # in dB hopefully...\n plt.xlabel('FREQUENCY [log 1/year]', size = 25)\n else:\n plt.plot(freqs, 20*np.log10(fft), linewidth = 0.8)\n plt.xlabel('FREQUENCY [1/year]', size = 25)\n for vline in vlines:\n plt.axvline(1./vline, 0, 1, linestyle = ':',linewidth = 0.6, color = \"#333333\")\n plt.xlim([freqs[0], freqs[-1]])\n plt.ylabel('FFT SPECTRUM [dB]', size = 25)\n if fname is None:\n plt.show()\n else:\n plt.savefig(fname, bbox_inches = 'tight')",
"def alpha_plot(traj, time_array, dim=2):\n alpha_index = len(traj[0])/2 - 1\n alpha_traj = traj[:,alpha_index]\n\n fig, ax = plt.subplots()\n\n ax.plot(time_array[:len(traj)], alpha_traj, 'b--', label='alpha')\n\n legend = ax.legend(shadow=True, fontsize='x-large')\n legend.get_frame().set_facecolor('#FFFFFF')\n\n plt.title(\"alpha vs. time\")\n plt.xlabel(\"time (sec)\")\n plt.show()",
"def showft(sig_xt, ff_lim):\n # ***** Prepare x(t), swap pos/neg parts of time axis *****\n N = len(sig_xt) # blocklength of DFT/FFT\n Fs = sig_xt.get_Fs() # sampling rate\n tt = sig_xt.timeAxis() # get time axis for x(t)\n ixp = np.where(tt>=0)[0] # indexes for t>=0\n ixn = np.where(tt<0)[0] # indexes for t<0\n xt = sig_xt.signal() # get x(t)\n xt = np.hstack((xt[ixp], xt[ixn]))\n # swap pos/neg time axis parts\n # ***** Compute X(f), make frequency axis *****\n Xf = np.fft.fft(xt)/float(Fs) # DFT/FFT of x(t),\n # scaled for X(f) approximation\n Df = Fs/float(N) # frequency resolution\n ff = Df*np.arange(N) # frequency axis [0...Fs)\n\n f1, f2, llim = ff_lim\n nff = ff.copy()\n nxf = Xf.copy()\n if f1 < 0:\n # swap freqs\n Clower = -N // 2 + (1 if N % 2 == 1 else 0)\n Cupper = N // 2 + (1 if N % 2 == 1 else 0)\n nff = (Fs / N) * np.arange(Clower, Cupper)\n\n assert(nxf.size == N)\n nxf = nxf[[((i + (N // 2 + (1 if N % 2 == 1 else 0))) % N) for i in range(N)]]\n idxs = np.where((nff <= f2) & (nff >= f1))[0]\n \n nff = nff[idxs]\n Xf = nxf\n\n # ***** Compute |X(f)|, arg[X(f)] *****\n absXf = np.abs(Xf)[idxs] # magnitude |X(f)|\n argXf = np.angle(Xf)[idxs] # phase arg[X(f)]\n\n if llim > 0:\n argXf[np.where(absXf < llim)] = 0\n elif llim < 0:\n mx = np.max(absXf)\n normalized = absXf / mx\n # mask apenas para não ter problemas com log10 de 0\n delete_mask = normalized < 10 ** ((llim - 1) / 20)\n normalized[delete_mask] = 1\n absXf = 20 * np.log10(normalized)\n argXf[np.where(absXf < llim)] = 0\n argXf[delete_mask] = 0\n absXf[delete_mask] = llim - 1\n absXf[np.where(absXf < llim)] = llim - 1\n\n # ***** Plot magnitude/phase *****\n f1 = plt.figure()\n af11 = f1.add_subplot(211)\n # af11.stem(nff, absXf, use_line_collection = True) # plot magnitude (linear)\n af11.plot(nff, absXf, '-b') # plot magnitude (linear)\n if llim < 0:\n af11.set_ylim([llim, 0])\n strt = 'FT Approximation, $F_s$={:.2f} Hz'.format(Fs)\n strt = strt + ', $N$={}'.format(N)\n strt = strt + ', $\\Delta_f$={:3.2f} Hz'.format(Df)\n af11.set_title(strt)\n if llim < 0:\n af11.set_ylabel('20$log_{10}(|X(f)|)$ [dB]')\n else:\n af11.set_ylabel('$|X(f)|$')\n af11.grid()\n af12 = f1.add_subplot(212)\n af12.plot(nff, (180/np.pi*argXf), '-b') # plot phase in degrees\n af12.set_yticks([-180, -100, 0, 100, 180])\n af12.set_ylabel('$\\\\angle X(f)$ [deg]')\n af12.set_xlabel('$f$ [Hz]')\n af12.grid()\n plt.tight_layout()\n plt.show()\n ## retornando valores do plot (para abs(Xf))\n return nff, absXf, argXf",
"def fplot_pmt_signals_vs_time_mus(pmt_signals,\n pmt_active,\n t_min = 0,\n t_max = 1200,\n signal_min = 0,\n signal_max = 200,\n figsize=(10,10)):\n\n tstep = 25\n PMTWL = pmt_signals[0].shape[0]\n signal_t = np.arange(0., PMTWL * tstep, tstep)/units.mus\n fig = Figure(figsize=figsize)\n\n for j, i in enumerate(pmt_active):\n ax1 = fig.add_subplot(3, 4, j+1)\n ax1.set_xlim([t_min, t_max])\n ax1.set_ylim([signal_min, signal_max])\n set_plot_labels(xlabel = \"t (mus)\",\n ylabel = \"signal (pes/adc)\")\n\n ax1.plot(signal_t, pmt_signals[i])\n\n return fig",
"def plot_fft(frame: np.ndarray, sample_rate: int):\n frame_len = len(frame)\n y_f = scipy.fftpack.fft(frame)\n x_f = np.linspace(0.0, (0.5*sample_rate), int(frame_len / 2))\n fig = px.line(\n x=x_f,\n y=2.0/frame_len * np.abs(y_f[:frame_len//2]),\n # xaxis_label=\"Frequência\",\n # yaxis_label=\"Magnitude\",\n title=\"Transformada de Fourier\",\n )\n fig.show()",
"def plot_fourier_transform(fft_freq, fft_wave, fs, title):\n plt.figure(num=title+\" - \"+filename[:-4], figsize=(8, 5))\n plt.plot(fft_freq, abs(fft_wave), color=\"blue\", label=\"|F(w)|\")\n plt.legend(loc=1)\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('|F(w)|')\n plt.xlim(-fs/2, fs/2)\n plt.title(title)",
"def plot_sigs(start, end, signals):\n time_range = np.arange(start, end)\n\n fig, axs = plt.subplots(len(signals), sharex=True)\n axs = [axs] if len(signals) == 1 else axs\n\n for i, ax in enumerate(axs):\n ax.plot(time_range, signals[i][start:end], 'r-')\n\n plt.show()",
"def _day_plot(self, time_idx, x, title='', step=1, color='k',\n savefig=False):\n plt.figure()\n plt.rc('text', usetex=True)\n for ri, row in enumerate(x):\n current_step = ri * step\n y = x[ri, :]\n scaled = self._scale1(y)\n shifted = scaled + current_step\n if (x.shape[1] == len(time_idx)):\n plt.plot(time_idx, shifted, color)\n else:\n plt.plot(shifted, color)\n plt.ylim([-1, x.shape[0] * step])\n plt.ylabel('Channels')\n plt.xlabel('Time (s)')\n plt.title(title)\n plt.show()\n if savefig:\n plt.savefig('%s.pdf' % title)\n plt.close()",
"def plot_time_step(self):\n timestep = self.timestep\n fig = plt.figure(1)\n ax = plt.subplot(1, 1, 1)\n ax.plot(zip(*timestep)[0], zip(*timestep)[1], '-b.')\n ax.set_xlabel('Simulation Hour')\n ax.set_ylabel('Average time step in hour (minutes)')\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read all ATL08 from CSVs of a given year after extract_filter_atl08.py Write to a pickle file by year Return a geodataframe
|
def atl08_io(
self,
atl08_csv_output_dir,
year_search, do_pickle=True,
filename_regex='ATL08*.csv'
):
dir_pickle = atl08_csv_output_dir
filename_regex = os.path.join(
atl08_csv_output_dir, year_search, filename_regex)
all_atl08_csvs = glob.glob(filename_regex, recursive=True)
if len(all_atl08_csvs) < 1:
logging.info(f"No ATL08 CSVs were found under {filename_regex}")
return
logging.info(f"Processing ATL08 CSV: {filename_regex}")
atl08_gdf = pd.concat(
(pd.read_csv(f) for f in all_atl08_csvs),
sort=False, ignore_index=True) # <--generator is (), list is []
atl08_gdf = gpd.GeoDataFrame(
atl08_gdf, geometry=gpd.points_from_xy(
atl08_gdf.lon, atl08_gdf.lat), crs='epsg:4326')
if do_pickle:
# Pickle the file
if year_search == "**":
year_search = 'allyears'
cur_time = time.strftime("%Y%m%d%H%M")
out_pickle_fn = os.path.join(
dir_pickle, f"atl08_{year_search}_filt_gdf_{cur_time}.pkl")
atl08_gdf.to_pickle(out_pickle_fn)
return atl08_gdf
|
[
"def load_year(observatory=None, year=None, path=None):\n dates_in_year = pd.date_range(\n start=f'{year}-01-01', end=f'{year}-12-31', freq='D'\n )\n df = pd.DataFrame()\n for date in dates_in_year:\n ymd = date.strftime('%Y%m%d')\n file_name = f'{observatory}{ymd}dmin.min'\n file_path = os.path.join(path, file_name)\n df = df.append(IAGA2002_Data_Reader(file_path))\n return df",
"def extract_all_years(self):\n headers = {}\n for sheet, _ in SHEET_NAMES_TO_CSV_FILENAMES.items():\n headers[sheet] = {}\n for current_year in self.years:\n print(f'Extracting data for {current_year}')\n self.current_year = current_year\n self._extract_data(headers)\n for sheet, csv_name in SHEET_NAMES_TO_CSV_FILENAMES.items():\n headers_df = pd.DataFrame.from_dict(headers[sheet], orient='index')\n headers_df.transpose().to_csv(os.path.join(self.save_path,\n f'cols_{csv_name}'),\n index=None)\n return self.files",
"def flagStats_allYears(self, csvName):\n start = time.time()\n print 'dfStats_allYears ncpath:', self.ncpath\n filesArr = os.listdir(self.ncpath)\n filesArr.sort()\n dict = {}\n for fn in filesArr:\n regex = re.search(re.compile('^'+self.prefix+'(\\d{4})\\.nc'), fn)\n if regex:\n yr = regex.group(1)\n print yr, fn\n dict[yr] = self.flagStats_single(os.path.join(self.ncpath, fn))\n pd.DataFrame(dict).to_csv(csvName)\n print \"Done!\", time.asctime(),\"Runtime:\", time.time()-start",
"def get_features(year):",
"def yearly_weather_csv_to_shp(in_csv, year, columns, out_dir, csv_name,\r\n\t\t\t\t\t\t\t shp_name):\r\n\r\n\tdata = pd.read_csv(in_csv, skiprows=10)\r\n\tfields = ['Name', 'Date', 'Latitude', 'Longitude']\r\n\tfor i in columns:\r\n\t\tfields.append(i)\r\n\r\n\tdata_tmp = data[fields]\r\n\tquery = 'Date == %d' % year\r\n\tdata_1980 = data_tmp.query(query)\r\n\r\n\tif not os.path.exists(out_dir):\r\n\t\tos.mkdir(out_dir)\r\n\tyear_csv = '%s_%d%s' % (csv_name, year, '.csv')\r\n\tout_csv = '%s/%s' % (out_dir, year_csv)\r\n\tif os.path.exists(out_csv):\r\n\t\tprint('Output CSV file exists already.')\r\n\tif not os.path.exists(out_csv):\r\n\t\tdata_1980.to_csv(out_csv, index=None, header=True)\r\n\tyear_shp = '%s_%d%s' % (shp_name, year, '.shp')\r\n\tout_shp = '%s/%s' % (out_dir, year_shp)\r\n\tif os.path.exists(out_shp):\r\n\t\tprint('Output shapefile exists already')\r\n\tif not os.path.exists(out_shp):\r\n\t\tarcpy.management.XYTableToPoint(out_csv, out_shp, 'Longitude',\r\n\t\t\t\t\t\t\t\t\t\t'Latitude')\r\n\t\tdelete_fields = ['Latitude', 'Longitude']\r\n\t\tarcpy.DeleteField_management(out_shp, delete_fields)\r\n\r\n\tprint('Complete.')",
"def _get_data_pre2007(date): \r\n \r\n # build the url based on year\r\n url = '{}/Environmental_Data_{}.txt'.format(BASE_URL, date.year)\r\n print('Fetching online data for {} (full year)'.format(date.year))\r\n \r\n try:\r\n year_data = request.urlopen(url).read().decode(encoding='utf_8').split('\\n') \r\n except:\r\n raise ValueError(date) # error accessing website\r\n else:\r\n year_data.pop(0) # remove first item which contain column header info\r\n \r\n for line in year_data:\r\n \r\n elements = line.split()\r\n yield dict(Date = elements[0],\r\n Time = elements[1],\r\n Status = 'COMPLETE', # all data from pre2007 will be complete\r\n Air_Temp = elements[5],\r\n Barometric_Press = elements[7],\r\n Wind_Speed = elements[2])",
"def read_data_extracted(year):\n#\n#--- sometime there are out of spot data in the file; so remove them\n#\n xbot = str(year) + ':001:00:00:00'\n xbot = out = Chandra.Time.DateTime(xbot).secs\n\n infile = data_dir + 'data_extracted_' + str(year)\n data = mcf.read_data_file(infile)\n \n time = []\n dy = []\n dz = []\n dtheta = []\n inst = []\n for ent in data:\n atemp = re.split('\\s+', ent)\n try:\n if float(atemp[0]) < xbot:\n continue\n tval = mcf.chandratime_to_fraq_year(atemp[0])\n yval = float(atemp[3])\n zval = float(atemp[4])\n hval = float(atemp[5]) * 3600.0\n\n time.append(tval)\n dy.append(yval)\n dz.append(zval)\n dtheta.append(hval)\n inst.append(atemp[2])\n except:\n continue\n\n return [time, dy, dz, dtheta, inst]",
"def createFileByYear(year, destinationFolder='Formatted Files'):\n\trows = []\n\tallRecords = 0\n\t\n\t# Setup database connection\n\tcur = db.cursor()\n\t\n\t# Create a header row\n\tindicatorList = ['Country']\n\tindicatorIDs = []\n\n\tcur.execute(\"SELECT * FROM indicator_dim ORDER BY indicator_key ASC;\")\t\n\tfor row in cur.fetchall():\n\t\tindicator = str(row[0]).zfill(4) + ': ' + row[1]\n\t\tif row[2] != None:\n\t\t\tindicator += ' - ' + row[2]\n\t\tif row[3] != None:\n\t\t\tindicator += ' (' + row[3] + ')'\n\t\tindicator += ' - ' + row[6]\n\t\tindicator = indicator.replace(',', ';')\n\t\t# print indicator\n\t\tindicatorList.append(indicator)\n\t\tindicatorIDs.append(row[0])\n\t\n\t# if year == 1919:\n\t# \tonlyIndicators = indicatorList[1:]\n\t# \tfor i in range(len(onlyIndicators)):\n\t# \t\tprint onlyIndicators[i] + '==>' + str(i)\n\n\trows.append(indicatorList)\n\n\t# Create a row for each country\n\tcountryList = []\n\tcur.execute(\"SELECT * FROM country_dim ORDER BY country_key ASC;\")\t\n\tfor row in cur.fetchall():\n\t\tcountryList.append((row[0],row[1],row[2]))\n\t# countryList = [(3,'AFG','Afghanistan')]\n\tfor (cid, abbr, cname) in countryList:\n\t\tcur.execute(\"SELECT * FROM record_fact WHERE country_key = %d AND year = %d ORDER BY indicator_key ASC;\" % (cid,year))\t\n\t\tfacts = []\n\t\tfor row in cur.fetchall():\n\t\t\t# print (row[0],row[1],row[2],row[4])\n\t\t\tfacts.append((row[0],row[1],row[2],row[4]))\n\t\tallRecords += len(facts)\n\t\tarow = [str(cid).zfill(3) + ': ' + cname + ' (' + abbr + ')']\n\t\tfor i in range(len(indicatorIDs)):\n\t\t\tif len(facts) == 0:\n\t\t\t\tarow.append('')\n\t\t\telif facts[0][1] == indicatorIDs[i]:\n\t\t\t\tarow.append(facts[0][3])\n\t\t\t\tfacts.pop(0)\n\t\t\telse:\n\t\t\t\tarow.append('')\n\t\t# print arow\n\t\trows.append(arow)\n\t\t# print (cid, abbr, cname)\n\n\tdb.close()\n\n\t# CWrite to file\n\tfilename = './'+destinationFolder+'/'+str(year)+'_'+time.strftime(\"%Y-%m-%d-%H-%M-%S\")+'.csv'\n\twith open(filename,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(rows)\n\tw.close()\n\n\treturn (filename, 1.0*allRecords/(len(countryList)*len(indicatorIDs)))",
"def createFileByYearIgnoreMissingColumn(year, destinationFolder='Formatted Files Without Missing'):\n\trows = []\n\tallRecords = 0\n\t\n\t# Setup database connection\n\tcur = db.cursor()\n\n\t\n\t# Create a header row\n\tindicatorList = ['Country']\n\tindicatorIDs = []\n\n\tcur.execute(\"SELECT DISTINCT indicator_key FROM record_fact WHERE year = %d ORDER BY indicator_key ASC;\" % (year))\t\n\tfor row in cur.fetchall():\n\t\tindicatorIDs.append(row[0])\n\t\n\ti = 0\t\n\tfor indicator_key in indicatorIDs:\n\t\tcur.execute(\"SELECT * FROM indicator_dim WHERE indicator_key = %d;\" % (indicator_key))\n\t\trow = cur.fetchone()\n\t\tindicator = str(row[0]).zfill(4) + ': ' + row[1]\n\t\tif row[2] != None:\n\t\t\tindicator += ' - ' + row[2]\n\t\tif row[3] != None:\n\t\t\tindicator += ' (' + row[3] + ')'\n\t\tindicator += ' - ' + row[6]\n\t\tindicator += ' - ' + str(i)\n\t\tindicator = indicator.replace(',', ';')\n\t\t# print indicator\n\t\tindicatorList.append(indicator)\n\t\t# indicatorIDs.append(row[0])\n\t\ti += 1\n\t\n\t# if year == 1919:\n\t# \tonlyIndicators = indicatorList[1:]\n\t# \tfor i in range(len(onlyIndicators)):\n\t# \t\tprint onlyIndicators[i] + '==>' + str(i)\n\n\trows.append(indicatorList)\n\n\t# Create a row for each country\n\tcountryList = []\n\tcur.execute(\"SELECT * FROM country_dim ORDER BY country_key ASC;\")\t\n\tfor row in cur.fetchall():\n\t\tcountryList.append((row[0],row[1],row[2]))\n\t# countryList = [(3,'AFG','Afghanistan')]\n\tfor (cid, abbr, cname) in countryList:\n\t\tcur.execute(\"SELECT * FROM record_fact WHERE country_key = %d AND year = %d ORDER BY indicator_key ASC;\" % (cid,year))\t\n\t\tfacts = []\n\t\tfor row in cur.fetchall():\n\t\t\t# print (row[0],row[1],row[2],row[4])\n\t\t\tfacts.append((row[0],row[1],row[2],row[4]))\n\t\tallRecords += len(facts)\n\t\tarow = [str(cid).zfill(3) + ': ' + cname + ' (' + abbr + ')']\n\t\tfor i in range(len(indicatorIDs)):\n\t\t\tif len(facts) == 0:\n\t\t\t\tarow.append('')\n\t\t\telif facts[0][1] == indicatorIDs[i]:\n\t\t\t\tarow.append(facts[0][3])\n\t\t\t\tfacts.pop(0)\n\t\t\telse:\n\t\t\t\tarow.append('')\n\t\t# print arow\n\t\trows.append(arow)\n\t\t# print (cid, abbr, cname)\n\n\tdb.close()\n\n\t# CWrite to file\n\tfilename = './'+destinationFolder+'/'+str(year)+'_'+time.strftime(\"%Y-%m-%d-%H-%M-%S\")+'.csv'\n\twith open(filename,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(rows)\n\tw.close()\n\n\treturn (filename, 1.0*allRecords/(len(countryList)*len(indicatorIDs)))",
"def main():\n\tcong = pd.read_csv(sys.argv[1], parse_dates = [\"date\"])\n\tcong = cong[cong[\"date\"].dt.year != 2001] # Removes about 1400 instances\n\t\n\tcong.to_csv(\"congressionalRecords.csv\")",
"def earthquakes_in_year(year='2015'):\n data = VARS['data']\n filtered_data = data[data['year'] == year]\n return filtered_data.to_csv(None, index=False)",
"def _load_flags(self, station: str, year: Union[int, None] = None) -> None:\n\n # File name\n file = generate_endpoint_path(self.granularity, station, year, True)\n\n # Get local file path\n path = get_local_file_path(self.cache_dir, self.cache_subdir, file)\n\n # Check if file in cache\n if self.max_age > 0 and file_in_cache(path, self.max_age):\n\n # Read cached data\n df = pd.read_pickle(path)\n\n else:\n\n # Get data from Meteostat\n df = load_handler(\n self.endpoint,\n file,\n self._columns,\n {key: \"string\" for key in self._columns[self._first_met_col :]},\n self._parse_dates,\n )\n\n # Validate Series\n df = validate_series(df, station)\n\n # Save as Pickle\n if self.max_age > 0:\n df.to_pickle(path)\n\n # Localize time column\n if (\n self.granularity == Granularity.HOURLY\n and self._timezone is not None\n and len(df.index) > 0\n ):\n df = localize(df, self._timezone)\n\n # Filter time period and append to DataFrame\n if self._start and self._end:\n df = filter_time(df, self._start, self._end)\n\n return df",
"def load_herd_area_data():\n\n # AreaData.query.delete() # deletes rows before adding so that data is not duplicated\n\n #loops through all csv files and imports them\n for year in range(2005, 2017):\n csvfile = open(\"csvs/\"+str(year)+\".csv\")\n data = csv.reader(csvfile)\n next(data, None) #skip the header row\n\n\n for row in data:\n try:\n\n row = [element if len(element) > 0 else None for element in row]\n if row[15] is not None:\n row[15] = datetime.strptime(row[15], '%B %Y')\n if row[14] is not None:\n row[14] = datetime.strptime(row[14], '%B %Y')\n if row[16] is not None:\n row[16] = datetime.strptime(row[16], '%Y')\n herd_info = HAData(herd_id=row[1],\n year=year,\n ha_blm_acres=row[2],\n ha_other_acres=row[3],\n horse_population=row[8],\n burro_population=row[12],\n last_gather=row[15])\n hma_info = HMAData(herd_id=row[1],\n year=year,\n hma_blm_acres=row[4],\n hma_other_acres=row[5],\n horse_aml_low=row[6],\n horse_aml_high=row[7],\n burro_aml_low=row[10],\n burro_aml_high=row[11],\n recent_count=row[14],\n most_recent_aml=row[16]\n )\n db.session.add(herd_info)\n db.session.add(hma_info)\n except Exception as detail:\n print \"failed to insert\" + str(row) + str(detail)\n db.session.commit()",
"def open_CERA(year_index, number):\n data_path = (\n \"/cluster/work/apatt/wojan/renewable_generation/wind_n_solar/data/CERA20C/\"\n )\n filelist = sorted(glob.glob(data_path + \"*.nc\"))\n data = xr.open_dataset(filelist[year_index], drop_variables=[\"s100\"],)\n data = data.rename(\n {\n \"latitude\": \"lat\",\n \"longitude\": \"lon\",\n \"ssrd\": \"global_horizontal\",\n \"fdir\": \"direct_radiation\",\n \"t2m\": \"temperature\",\n }\n )\n data = data.sel({\"number\": number}, drop=True)\n data = convert_units(data)\n data = provide_GSEE_fields(data)\n return data",
"def load_WADE_data(year,datadir='/ocean/eolson/MEOPAR/obs/WADE/ptools_data/ecology'):\n dfSta=pickle.load(open(os.path.join(datadir,'sta_df.p'),'rb'))\n dfBot=pickle.load(open(os.path.join(datadir,f'Bottles_{str(year)}.p'),'rb'))\n df=pd.merge(left=dfSta,right=dfBot,how='right',\n left_on='Station',right_on='Station')\n try:\n len(df.loc[pd.isnull(df['Latitude'])]) == 0\n except:\n pass\n print('Warning!, Stations found without Latitude or Longitude value!')\n try:\n len(df) == len(dfBot)\n except:\n pass\n print(f'Warning!, Merge completed incorrectly. length of bottle data = {len(dfBot)} length of merged data = {len(df)}')\n # where no time is provided, set time to midday Pacific time = ~ 20:00 UTC\n df['UTCDateTime']=[iiD+dt.timedelta(hours=20) if pd.isnull(iiU) \\\n else iiU for iiU,iiD in \\\n zip(df['UTCDateTime'],df['Date'])]\n df.rename(columns={'UTCDateTime':'dtUTC','Latitude':'Lat','Longitude':'Lon'},inplace=True)\n df['Z']=-1*df['Z']\n df.head()\n df['NO23']=df['NO3(uM)D']+df['NO2(uM)D'] # the model does not distinguish between NO2 and NO3\n df['Amm']=df['NH4(uM)D']\n df['Si']=df['SiOH4(uM)D']\n df['Year']=[ii.year for ii in df['dtUTC']]\n df['YD']=et.datetimeToYD(df['dtUTC'])\n return(df)",
"def get_other_airport_data(num_past_hours,past_numerical_features_other_airport,\n airport_code):\n data_file = \"..\\data\\processed\\\\\" + airport_code +\"_for_regressor.csv\"\n \n past_extended_column_names_other = get_extended_past_columns(\n past_numerical_features_other_airport, num_past_hours\n )\n \n other_df = pd.read_csv(data_file)\n other_df[\"DATETIME\"]=pd.to_datetime(other_df['DATETIME']) \n other_df.set_index(\"DATETIME\",inplace=True)\n other_df.sort_index()\n other_df.drop(other_df.index[:48], inplace=True)\n other_df.drop(other_df.tail(48).index,inplace=True) # drop last n rows\n \n other_df.drop(other_df.columns.difference(past_extended_column_names_other),\n 1,inplace=True)\n other_df = other_df[past_extended_column_names_other]\n \n return past_extended_column_names_other,other_df",
"def xlsx_to_csv(years):\n \n start_time = time.time()\n \n s3_resource = boto3.resource('s3')\n s3 = boto3.client('s3')\n for year in years:\n file = 'all_data_M_{}'.format(year)\n obj = s3.get_object(Bucket='dataeng-capstone-1', Key='all_data_M_{}.xlsx'.format(year))\n df = pd.read_excel(obj['Body'].read())\n df.insert(0, 'FY_YEAR', int('{}'.format(year))) \n csv_buffer = StringIO()\n df.to_csv(csv_buffer, sep=\"|\",index=False,quoting=csv.QUOTE_NONNUMERIC)\n s3_resource.Object('dataeng-capstone-1', 'clean/all_data_M_{}.dat'.format(year)).put(Body=csv_buffer.getvalue())\n print('converted ',file,' from .xlsx to .dat')\n \n \n file = '2017_NAICS_Descriptions' \n obj = s3.get_object(Bucket='dataeng-capstone-1', Key='2017_NAICS_Descriptions.xlsx')\n df = pd.read_excel(obj['Body'].read())\n df = df[['Code','Title']] \n\n csv_buffer = StringIO()\n df.to_csv(csv_buffer, sep=\"|\",index=False)\n s3_resource.Object('dataeng-capstone-1', 'clean/all_naics_codes.dat').put(Body=csv_buffer.getvalue())\n print('converted ',file,' from .xlsx to .dat')\n\n end_time = time.time()\n \n runtime = end_time - start_time\n \n print('\\n')\n print('runtime: ',runtime)\n print('\\n')\n dataend_bucket = s3_resource.Bucket('dataeng-capstone-1')\n\n print('List files in clean bucket: ')\n for objct in dataend_bucket.objects.filter(Delimiter='/',Prefix='clean/all'):\n print(objct.key)\n \n print('\\n')",
"def load_location_data_and_clean(states = True, modernized=True, save=False): \n if states:\n df = pd.read_csv('../../data/original_data/state_data.csv')\n save_local = '../../data/state_data_cleaned_final.csv'\n else: \n if modernized:\n df = pd.read_csv('../../data/original_data/foreign_data_modernized.csv')\n save_local = '../../data/foreign_data_modernized_cleaned_final.csv'\n\n else:\n df = pd.read_csv('../../data/original_data/foreign_data.csv')\n save_local = '../../data/foreign_data_cleaned_final.csv'\n\n\n\n df = df.rename(columns={'Unnamed: 0':'location'})\n year_df = df.iloc[:, 1:]\n yrs = list(year_df.columns)\n\n output_idx = list(range(len(yrs) * len(df['location'].unique())))\n result = pd.DataFrame(columns=['Location', 'Year', 'Prisoners'], index= output_idx) # stupid way to index and add, do something better \n\n\n index = 0\n for idx, row in df.iterrows():\n location = row['location'].strip()\n for yr in yrs:\n total = row[yr]\n if total == None:\n total = 0\n result.iloc[index] = [location, yr, total]\n index += 1\n result = result.fillna(0)\n result['Prisoners'] = result['Prisoners'].astype(int)\n result['Year'] = result['Year'].astype(int)\n result['Region'] = result['Location'].apply(lambda x: regional_apply(x))\n\n if save:\n result.to_csv(save_local)\n else:\n return result",
"def organize_br_reporting_files_by_year(tables, year):\n year = int(year)\n for table in tables:\n if 'BR_REPORTING' in table:\n log.info(f'organizing data for {table} from {str(year)}...')\n linewidthsdf = pd.read_csv(RCRA_DATA_PATH\n .joinpath('RCRA_FlatFile_LineComponents.csv'))\n fields = linewidthsdf['Data Element Name'].tolist()\n files = sorted([file for file in OUTPUT_PATH\n .glob(f'{table}*{str(year)}*.csv')])\n df_full = pd.DataFrame()\n for filepath in files:\n log.info(f'extracting {filepath}')\n df = pd.read_csv(filepath, header=0,\n usecols=list(range(0, len(fields))),\n names=fields,\n low_memory=False,\n encoding='utf-8')\n df = df[df['Report Cycle'].apply(\n lambda x: str(x).replace('.0', '').isdigit())]\n if df['Location Street Number'].dtype != 'str':\n df['Location Street Number'] = df['Location Street Number'].astype(str)\n df['Location Street Number'] = df['Location Street Number'].apply(\n lambda x: str(x).replace('.0', ''))\n df['Report Cycle'] = df['Report Cycle'].astype(int)\n df = df[df['Report Cycle'] == year]\n df_full = pd.concat([df_full, df])\n DIR_RCRA_BY_YEAR.mkdir(exist_ok=True)\n filepath = DIR_RCRA_BY_YEAR.joinpath(f'br_reporting_{str(year)}.csv')\n log.info(f'saving to {filepath}...')\n df_full.to_csv(filepath, index=False)\n generate_metadata(year, files, datatype='source')\n else:\n log.info(f'skipping {table}')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Filter raster filenames list by year.
|
def filter_raster_filenames_by_year(
self, filenames: list,
start_year: int,
end_year: int
):
new_list = []
years = [str(year) for year in range(start_year, end_year+1)]
for f in filenames:
date_match = re.search(
r'(?P<year>\d{4})(?P<month>\d{2})(?P<day>\d{2})', f)
if date_match['year'] in years:
new_list.append(f)
return sorted(new_list)
|
[
"def filter_years():\n years = sys.argv[1:]\n for year in years:\n infile = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME1)\n outfile1 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME2)\n outfile2 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME3)\n print year\n filter_terms(infile, outfile1, outfile2)\n print",
"def find_roster_files(year, directory=os.getcwd()):\n return list(fnmatch.filter(\n os.listdir(directory), '*{} rosters.csv'.format(year)))",
"def filter_netcdf_paths_by_date_range(start_date, end_date, netcdf_data_paths):\n filtered_by_date = []\n\n for file_name in netcdf_data_paths:\n start_year = file_name.split(\"_\")[5]\n end_year = file_name.split(\"_\")[6]\n\n file_year_range = range(int(start_year), int(end_year)+1)\n total_date_range = range(int(start_date[0:4]), int(end_date[0:4])+1)\n #print total_date_range, file_year_range\n\n for year in total_date_range:\n if year in file_year_range:\n filtered_by_date.append(file_name)\n\n # Return a sorted list of netcdf file names\n return sorted([x for x in set(filtered_by_date)])",
"def get_films_by_year(year: str, df=create_df()):\n df1 = df.loc[df.air_year.str.contains(year, regex=False)]\n if df1.shape[0] < 10:\n return df\n return df1",
"def years(self, yearfolder, clear=None):\n if clear:\n self.listyear = []\n self.index = 0\n\n folders = os.listdir(yearfolder)\n for folder in folders:\n if self.yearpref in folder:\n year = folder.lstrip(self.yearpref)\n self.listyear.append(year)",
"def filter_by_year(df_data, year):\n df_data_year = df_data.xs(year, level='date').copy()\n return df_data_year",
"def get_gsod_filenames(self, year=None, with_host=False):\n return get_gsod_filenames(self.usaf_id, year, with_host=with_host)",
"def get_sea_surface_file_codes_for_year(year):\n url = sea_surface_temp_main.format(year)\n soup = get_soup(url)\n\n p_code = re.compile(\"\\('([0-9]\\w+)'\")\n p_date = re.compile(\"'([0-9]+-[0-9]+-[0-9]+)'\")\n\n codes = dict()\n\n month_selects = soup.findAll('div', class_='slider-elem month')\n for ms in month_selects:\n t_js = ms.find('a').attrs['onclick']\n\n t_code = p_code.findall(t_js)\n t_date = p_date.findall(t_js)\n\n if len(t_code) == 1 and len(t_date) == 1:\n codes[datetime.strptime(t_date[0], '%Y-%m-%d').date()] = t_code[0]\n\n return codes",
"def merra2_filelist(varname, dbeg='19900101', dend='20190228'):\n\n dtbeg = dt.datetime.strptime(dbeg, '%Y%m%d')\n dtend = dt.datetime.strptime(dend, '%Y%m%d')\n \n globpath = os.path.join(merra2_diri, varname, '????', '??',\n f'MERRA2_???.tavg1_2d_slv_Nx.{varname}.????????.nc4')\n files = sorted(glob.glob(globpath))\n return [f for f in files if (time_from_filename(f) >= dtbeg) & (time_from_filename(f) <= dtend)]",
"def flagStats_allYears(self, csvName):\n start = time.time()\n print 'dfStats_allYears ncpath:', self.ncpath\n filesArr = os.listdir(self.ncpath)\n filesArr.sort()\n dict = {}\n for fn in filesArr:\n regex = re.search(re.compile('^'+self.prefix+'(\\d{4})\\.nc'), fn)\n if regex:\n yr = regex.group(1)\n print yr, fn\n dict[yr] = self.flagStats_single(os.path.join(self.ncpath, fn))\n pd.DataFrame(dict).to_csv(csvName)\n print \"Done!\", time.asctime(),\"Runtime:\", time.time()-start",
"def get_file(infolder, year=None):\n if not year is None:\n return glob.glob(f\"{infolder}{os.sep}*{year}.tif\")\n else:\n return glob.glob(f\"{infolder}{os.sep}*.tif\")",
"def get_filename_year(filename):\n new_filename = filename\n filename_year = None\n matches = re.findall(\"\\s\\(\\d+\\)\", new_filename)\n if not matches:\n matches = re.findall(\"\\s\\d+\", new_filename)\n if matches: \n match = matches[-1] # last match\n now = datetime.datetime.now() \n year_string = str(match)\n year = int(year_string.replace(\"(\", \"\").replace(\")\", \"\"))\n if new_filename.endswith(year_string):\n if year > 1945 and year <= now.year: \n filename_year = str(year)\n new_filename = filename.replace(year_string, \"\") \n return new_filename, filename_year",
"def group_by_year(self, year):\r\n self.if_list_empty(Library.books)\r\n self.validate_data_int(year)\r\n for book in Library.books:\r\n if year == book.year:\r\n print(book)",
"def extract_all_years(self):\n headers = {}\n for sheet, _ in SHEET_NAMES_TO_CSV_FILENAMES.items():\n headers[sheet] = {}\n for current_year in self.years:\n print(f'Extracting data for {current_year}')\n self.current_year = current_year\n self._extract_data(headers)\n for sheet, csv_name in SHEET_NAMES_TO_CSV_FILENAMES.items():\n headers_df = pd.DataFrame.from_dict(headers[sheet], orient='index')\n headers_df.transpose().to_csv(os.path.join(self.save_path,\n f'cols_{csv_name}'),\n index=None)\n return self.files",
"def get_movies_by_year(self, year):\r\n raise NotImplementedError",
"def sort_by_year(sort_list):\n sort_list.sort(key=lambda song: song.year)",
"def select_year(data: pd.DataFrame, year: str, date_column='date'):\n data = data[data[date_column] >= str(year)]\n data = data[data[date_column] < str(int(year)+1)]\n data['date'] = data[date_column]\n return data",
"def get_files():\n old_files = []\n new_files = []\n\n for file in os.listdir():\n if file.startswith(('2013', '2014', '2015', '2016')):\n old_files.append(file)\n elif file.startswith(('2017', '2018', '2019', '2020', '2021')):\n new_files.append(file)\n return old_files, new_files",
"def createFileByYear(year, destinationFolder='Formatted Files'):\n\trows = []\n\tallRecords = 0\n\t\n\t# Setup database connection\n\tcur = db.cursor()\n\t\n\t# Create a header row\n\tindicatorList = ['Country']\n\tindicatorIDs = []\n\n\tcur.execute(\"SELECT * FROM indicator_dim ORDER BY indicator_key ASC;\")\t\n\tfor row in cur.fetchall():\n\t\tindicator = str(row[0]).zfill(4) + ': ' + row[1]\n\t\tif row[2] != None:\n\t\t\tindicator += ' - ' + row[2]\n\t\tif row[3] != None:\n\t\t\tindicator += ' (' + row[3] + ')'\n\t\tindicator += ' - ' + row[6]\n\t\tindicator = indicator.replace(',', ';')\n\t\t# print indicator\n\t\tindicatorList.append(indicator)\n\t\tindicatorIDs.append(row[0])\n\t\n\t# if year == 1919:\n\t# \tonlyIndicators = indicatorList[1:]\n\t# \tfor i in range(len(onlyIndicators)):\n\t# \t\tprint onlyIndicators[i] + '==>' + str(i)\n\n\trows.append(indicatorList)\n\n\t# Create a row for each country\n\tcountryList = []\n\tcur.execute(\"SELECT * FROM country_dim ORDER BY country_key ASC;\")\t\n\tfor row in cur.fetchall():\n\t\tcountryList.append((row[0],row[1],row[2]))\n\t# countryList = [(3,'AFG','Afghanistan')]\n\tfor (cid, abbr, cname) in countryList:\n\t\tcur.execute(\"SELECT * FROM record_fact WHERE country_key = %d AND year = %d ORDER BY indicator_key ASC;\" % (cid,year))\t\n\t\tfacts = []\n\t\tfor row in cur.fetchall():\n\t\t\t# print (row[0],row[1],row[2],row[4])\n\t\t\tfacts.append((row[0],row[1],row[2],row[4]))\n\t\tallRecords += len(facts)\n\t\tarow = [str(cid).zfill(3) + ': ' + cname + ' (' + abbr + ')']\n\t\tfor i in range(len(indicatorIDs)):\n\t\t\tif len(facts) == 0:\n\t\t\t\tarow.append('')\n\t\t\telif facts[0][1] == indicatorIDs[i]:\n\t\t\t\tarow.append(facts[0][3])\n\t\t\t\tfacts.pop(0)\n\t\t\telse:\n\t\t\t\tarow.append('')\n\t\t# print arow\n\t\trows.append(arow)\n\t\t# print (cid, abbr, cname)\n\n\tdb.close()\n\n\t# CWrite to file\n\tfilename = './'+destinationFolder+'/'+str(year)+'_'+time.strftime(\"%Y-%m-%d-%H-%M-%S\")+'.csv'\n\twith open(filename,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(rows)\n\tw.close()\n\n\treturn (filename, 1.0*allRecords/(len(countryList)*len(indicatorIDs)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add new field to config
|
def add_field(self, field):
config = configparser.ConfigParser()
config.read(self.path)
config.set(self.section, field, '')
with open(self.path, 'w') as config_file:
config.write(config_file)
|
[
"def set_value(self, config_field, include_doc=False):\n raise NotImplementedError",
"def add_field(self, field):\n # lots of stuff left, needs to be done here\n if not field.get('name'):\n field['name'] = reduce_to_alphanumeric(unicode(field.get('label')).lower())\n \n if self.validate_field(field):\n self.fields.append(field)\n \n return True",
"def add_field(self, **field_kwargs: Any) -> None:\n self._post_field(\"add-field\", **field_kwargs)",
"def _registerField(self,name,data):\n self._registeredFields.append(name)\n self.__setattr__(name, data)",
"def add_custom_field(self, name=None, value=None):\n self.custom_fields.append(CustomField(name=name, value=value))",
"async def add_field(self, name, value, inline=False):\n self.embed.add_field(name=name, value=value, inline=inline)",
"def set_config(self, field, text):\n if field in self.all_configs['ai']:\n if self.all_configs['ai'][field].endswith('FLOAT'):\n tp = float\n else:\n tp = int\n elif not field.startswith(\"car\"):\n if self.all_configs['geral'][field].endswith('FLOAT'):\n tp = float\n else:\n tp = int\n\n if field in self.config['ai'] or field == 'max_frames':\n try:\n self.config['ai'][field] = tp(text)\n except:\n print(text, \"is a invalid input in\", field)\n elif field == 'car_visions':\n try:\n self.config['car']['number_of_visions'] = int(text)\n except:\n print(text, \"is a invalid input in\", field)\n elif field == 'car_vision_len':\n try:\n self.config['car']['vision_length'] = int(text)\n except:\n print(text, \"is a invalid input in\", field)\n else:\n try:\n self.config[field] = tp(text)\n except:\n print(text, \"is a invalid input in\", field)",
"def register_field(self, field, *args):\n self.spec.register_field(field, *args)",
"def addField(self, field):\n field = aq_base(field)\n self._validateOnAdd(field)\n name = field.getName()\n if name not in self._names:\n self._names.append(name)\n self._fields[name] = field",
"def addField(self,field=\"\"):\r\n self._NMEAFieldList.append(field)",
"def addProjectField(self, fieldName, value):\n if fieldName in self.data.keys() or fieldName in self.fieldNameList:\n raise Exception('Field (%s) already exists in theis entity!' % fieldName)\n else:\n self.data[fieldName] = value\n self.fieldNameList.append(fieldName)",
"def addField(self, base: 'SoFieldContainer', name: 'char const *', field: 'SoField') -> \"void\":\n return _coin.SoFieldData_addField(self, base, name, field)",
"def register(self, field_name, func, fake=...):\r\n ...",
"def update_custom_field(self, env, customfield, create=False):\r\n # Name, Type and Label is required\r\n if not (customfield.has_key('name') and customfield.has_key('type') \\\r\n and customfield.has_key('label')):\r\n raise TracError(\"Custom field needs at least a name, type and label.\")\r\n # Use lowercase custom fieldnames only\r\n customfield['name'] = str(customfield['name']).lower()\r\n # Only alphanumeric characters (and [-_]) allowed for custom fieldname\r\n # Note: This is not pretty, but it works... Anyone have an eaier way of checking ???\r\n matchlen = re.search(\"[a-z0-9-_]+\", customfield['name']).span()\r\n namelen = len(customfield['name'])\r\n if (matchlen[1]-matchlen[0] != namelen):\r\n raise TracError(\"Only alphanumeric characters allowed for custom field name (a-z or 0-9 or -_).\")\r\n # If Create, check that field does not already exist\r\n if create and env.config.get('ticket-custom', customfield['name']):\r\n raise TracError(\"Can not create as field already exists.\")\r\n # Check that it is a valid field type\r\n if not customfield['type'] in ['text', 'checkbox', 'select', 'radio', 'textarea']:\r\n raise TracError(\"%s is not a valid field type\" % customfield['type'])\r\n # Create/update the field name and type\r\n env.config.set('ticket-custom', customfield['name'], customfield['type'])\r\n # Set the field label\r\n env.config.set('ticket-custom', customfield['name'] + '.label', customfield['label'])\r\n # Set default value if it exist in dictionay with value, else remove it if it exists in config\r\n if customfield.has_key('value') and customfield['value']:\r\n env.config.set('ticket-custom', customfield['name'] + '.value', customfield['value'])\r\n elif env.config.get('ticket-custom', customfield['name'] + '.value'):\r\n env.config.remove('ticket-custom', customfield['name'] + '.value')\r\n # If select or radio set options, or remove if it exists and field no longer need options\r\n if customfield['type'] in ['select', 'radio']:\r\n if not customfield.has_key('options') or customfield['options'] == []:\r\n raise TracError(\"No options specified for %s field\" % customfield['type'])\r\n env.config.set('ticket-custom', customfield['name'] + '.options', '|'.join(customfield['options']))\r\n elif env.config.get('ticket-custom', customfield['name'] + '.options'):\r\n env.config.remove('ticket-custom', customfield['name'] + '.options')\r\n # Set defaults for textarea if none is specified, remove settings if no longer used\r\n if customfield['type'] == 'textarea':\r\n if (not customfield.has_key('cols')) or (not str(customfield['cols']).isdigit()):\r\n customfield['cols'] = \"60\"\r\n if (not customfield.has_key('rows')) or (not str(customfield['rows']).isdigit()):\r\n customfield['rows'] = \"5\"\r\n env.config.set('ticket-custom', customfield['name'] + '.cols', customfield['cols'])\r\n env.config.set('ticket-custom', customfield['name'] + '.rows', customfield['rows'])\r\n elif env.config.get('ticket-custom', customfield['name'] + '.cols'):\r\n env.config.remove('ticket-custom', customfield['name'] + '.cols')\r\n # Set sort setting if it is in customfield dict, remove if no longer present\r\n if create:\r\n last = len(self.get_custom_fields(env))\r\n env.config.set('ticket-custom', customfield['name'] + '.order',\r\n customfield.get('order',0) or last)\r\n elif customfield.has_key('order') and customfield['order']:\r\n # Exists and have value - note: will not update order conflicting with other fields\r\n if str(customfield['order']).isdigit():\r\n env.config.set('ticket-custom', customfield['name'] + '.order', customfield['order'])\r\n elif env.config.get('ticket-custom', customfield['name'] + '.order'):\r\n env.config.remove('ticket-custom', customfield['name'] + '.order')\r\n # Save settings\r\n env.config.save()",
"def added(self, configuration):",
"def add_value_for_field(self, val, doc_id, field):\n guid = self.get_guid(doc_id)\n\n if guid not in self.m_file[indexfields.FIELDS]:\n self.m_file[indexfields.FIELDS][guid] = {}\n\n self.m_file[indexfields.FIELDS][guid][field] = val",
"def add_field(self: E, *, name: str, value: str, inline: bool = True) -> E:\n self._fields.append(EmbedField(name=str(name), value=str(value), inline=inline))\n\n return self",
"def add_field(self, key, value):\n value = str(value).replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\n value = value.replace(\"'\", \"'\").replace(\"\\\"\", \""\")\n self.fields[key] = value",
"def init_custom_fields(self):\n mapping = {\n 'application': self.config['sde_application'],\n 'project': self.config['sde_project'],\n 'context': self.config['alm_context']\n }\n\n config_custom_fields = ['alm_custom_fields']\n if self.feature_custom_lookup:\n config_custom_fields.append('alm_custom_lookup_fields')\n\n for config_option in config_custom_fields:\n self.transform_config_value(config_option, mapping)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Recursively retrieves the current path, given a (potentially) old path.
|
def retrieve_current_path(self, old_path):
path = old_path
detect_endless_loop = 0
while path is not None and path not in self.repo_files_path and detect_endless_loop < 50:
if path in self.old_to_new_path:
path = self.old_to_new_path[path]
else:
path = None
detect_endless_loop += 1
return path
|
[
"def parseNewPath(fs, directory, path):\n parentDir = directory\n if path.startswith(\"/\"):\n path = path[1:]\n parentDir = fs.rootDir\n if parentDir.absolutePath == directory.absolutePath:\n parentDir = directory\n if \"/\" in path:\n name = path[path.rindex(\"/\")+1:]\n parentDir = getFileObject(fs, directory, path[:path.rindex(\"/\")], True)\n else:\n name = path\n return (parentDir, name)",
"def back_dir(cwd):\n\n prev = path.dirname(cwd)\n\n return get_root_path() if prev == cwd else prev",
"def rebase_path(\n path: pathlib.Path, root: pathlib.Path, new_root: pathlib.Path\n) -> pathlib.Path:\n return new_root / path.relative_to(root)",
"def _search_new_path(self, path):\n if not path.startswith(os.sep):\n return None\n path = path.split(os.sep)[1:]\n # First get name and remove it from path\n name = None\n for i in range(len(path)-1, -1, -1):\n if path[i] != \"\":\n name = path[i]\n path = path[:i]\n break\n if name is None:\n return None\n\n # Walk the directory hierarchy\n cur_dir = self.root_dir\n for node in path:\n if node == \"\":\n continue\n if not isinstance(cur_dir, Dir):\n # A file - doesn't have children\n return None\n try:\n cur_dir = cur_dir.files[node]\n except KeyError:\n return None\n return cur_dir, name",
"def changedir(currentpath, relpath):\n new = get_new_dir(currentpath, relpath)\n if (not new):\n print(relpath + \" does not exist\")\n return currentpath\n return new",
"def append_path(path_current: List[State], newPath: List[State]) -> List[State]:\n path = path_current[:]\n path.extend(newPath)\n return path",
"def get_path(self, path):\n return path[len(self.base)+2:]",
"def relative_path(self):\n if self.parent is not None:\n root = self.parent\n while True:\n if root.parent:\n root = root.parent\n else:\n break\n return self.path[len(root.path) + 1:]\n else:\n return ''",
"def get_path(root, path):\n\n return join(dirname(root), path)",
"def get_current_path(self):\n path = self.tree_view.fileInfo(\n self.tree_view.currentIndex()).filePath()\n # https://github.com/pyQode/pyQode/issues/6\n if not path:\n path = self.tree_view.root_path\n return path",
"def simplify_path(self, old_path):\n path = re.sub(r\"//+\", \"/\", old_path)\n path = re.sub(r\"/\\./+\", \"/\", path)\n new_path = re.sub(r\"/[^/]+/\\.\\./\", \"/\", path)\n while (new_path != path):\n path = new_path\n new_path = re.sub(r\"/[^/]+/\\.\\./\", \"/\", path)\n if (new_path != old_path):\n log.debug(\"simplified path from \" + old_path + \n \" to \" + new_path,'simplify_path')\n return path",
"def get_current_path():\n return os.path.join(\".deploy\", \"current\")",
"def getRelPathToRootUrlFrom(currentFilePath):\n depthOfFile = getDepth(currentFilePath)\n # Have one \"../\" less than there are subdirs because we want to go\n # from \"./out/alpha/beta/\" to \"./out/\" and not to \"./\"\n return \"../\"*(depthOfFile -1)",
"def current_path(self):\n # print(self.position)\n return os.sep.join([i.replace(os.sep, \"\") for i in self.position])",
"def get_vcs_root(path):\n previous_path = path\n while get_vcs_infos(path) is None:\n path = abspardir(path)\n if path == previous_path:\n return\n else:\n previous_path = path\n return osp.abspath(path)",
"def get_parent_ref(self, path):\n matches = [r for r in self.refs if path.startswith(r + '/')]\n if len(matches) != 1:\n raise FuseOSError(errno.ENOENT)\n return matches[0]",
"def get_local_directory_old(self):\n \n # Gives Local Direcory path equivalent to URL Path in server\n rval = os.path.join(self.rootdir, self.domainold)\n\n for diry in self.dirpathold:\n if not diry: continue\n rval = os.path.abspath( os.path.join(rval, self.make_valid_filename(diry)))\n\n return os.path.normpath(rval)",
"def _get_relative_path(storage_location, path):\n prefix_len = len(storage_location)\n return path[prefix_len:]",
"def short_relative_path_from_here(self):\n return self.__class__(os.getcwd()).short_relative_path_to(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks wether or not repo is a local or remote path to a repo.
|
def _is_remote_repository(repo: str) -> bool:
return repo.startswith("git@") or repo.startswith("https://")
|
[
"def is_repo(repo_dir: Path): \n try:\n r = git.Repo(repo_dir)\n except git.InvalidGitRepositoryError:\n return False \n return True",
"def has_local_repo(self):\n return all((self.local_path, self.local_file, self.local_repo))",
"def test_missing_git_and_slash_url(self, _, __):\n self.assertTrue(detect_repo.check_for_repo_name('fake-path', 'syzkaller'))",
"def is_git_repository(path):\n \n try:\n git = Git(path)\n return True\n except:\n return False",
"def is_git_repository():\n return os.path.isdir(os.path.join(_get_enso_directory(), \".git\"))",
"def _user_requested_repo(local_repo_path):\n # check that the user wants to update this repo\n for user_arg_prefix in repos:\n if local_repo_path.startswith(user_arg_prefix):\n return True\n return False",
"def is_remote_proto(repo):\n # frozenset never changes\n rps_set = frozenset([\"http\", \"https\", \"ftp\", \"smb\", \"nfs\"])\n if repo[0:repo.find(\"://\")] in rps_set:\n return True\n return False",
"def check(self):\n slab_logger.log(15, 'Checking for repo %s' % self.get_reponame())\n if os.path.exists(\"./{}\".format(self.get_reponame())):\n slab_logger.log(25, \"repo for %s exist as %s\"\n % (self.name, self.get_reponame()))\n return True\n return False",
"def is_vcs_repository(path):\n return get_vcs_root(path) is not None",
"def is_git_repo(path):\n if not path:\n return False\n args = ['git', '-C', path, 'status']\n return sp.call(args, stderr=sp.STDOUT, stdout=open(os.devnull, 'w')) == 0",
"def is_git_repository(self) -> bool:\n try:\n get_repository_root(self.root_dir)\n return True\n except NotAGitRepository:\n return False",
"def test_normal_style_repo_url(self, _, __):\n self.assertTrue(detect_repo.check_for_repo_name('fake-path', 'syzkaller'))",
"def test_no_exception_raised_if_repository_is_valid_git_repository(tmp_path):\n Repo.init(tmp_path)\n git_instance = zenml.core.git_wrapper.GitWrapper(tmp_path)\n assert git_instance.repo_path == tmp_path\n assert git_instance.repo_path.exists()\n assert git_instance.repo_path.is_dir()\n assert git_instance.git_root_path == str(\n tmp_path / zenml.core.git_wrapper.GIT_FOLDER_NAME\n )\n assert isinstance(git_instance.git_repo, Repo)",
"def _is_system_repo(ruri):\n\n return isinstance(ruri, SystemRepositoryURI)",
"def _link_is_valid_repo(self, link: str) -> bool:\n return link in self.github_info['repos']",
"def is_ssh_repo_url(repo_url: str):\n return repo_url.startswith(SSH_PREFIX) or repo_url.startswith(GIT_SSH_USER_PREFIX)",
"def repo_exists(self, repo_name, owner):\n resp = requests.get(API_URL.format(repo_name))\n return resp.ok",
"def is_gitrepo(repodir):\n notify.print_info(\"Checking if directory '%s' contains a Git repo...\" % repodir, 2)\n try:\n cmd = [\"git\", \"rev-parse\"]\n stdout, stderr = utils.execute(cmd, execdir=repodir, \\\n stderr=open(os.devnull))\n except errors.SystemCallError:\n # Exit code is non-zero\n return False\n else:\n # Success error code (i.e. dir is in a git repo)\n return True",
"def is_git_repo(dest):\n if not os.path.isdir(dest):\n return False\n\n try:\n proc = subprocess.Popen([\"git\", \"rev-parse\", \"--git-dir\"], cwd=dest, stdout=subprocess.PIPE)\n if proc.wait() != 0:\n return False\n output = proc.stdout.read().strip()\n git_dir = os.path.normpath(os.path.join(dest, output))\n retval = (git_dir == dest or git_dir == os.path.join(dest, \".git\"))\n return retval\n except subprocess.CalledProcessError:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clones the remote repo to path_to_folder.
|
def _clone_remote_repository(self, path_to_folder: str, repo: str) -> str:
repo_folder = os.path.join(path_to_folder, self._get_repo_name_from_url(repo))
git.Repo.clone_from(url=repo, to_path=repo_folder)
return repo_folder
|
[
"def clone(dirpath, git_url):\n purge_folder(dirpath)\n Repo.clone_from(git_url, dirpath, progress=Progress())",
"def clone_to_folder(destination, endpoint):\n click.echo('... cloning ' + endpoint + ' to ' + destination)\n execute('git clone -q ' + endpoint)",
"def sync(self):\n if not os.path.exists(self.repo_path):\n os.makedirs(self.repo_path)\n\n logger.info('Cloning repository %s to %s',\n self.clone_path, self.repo_path)\n execute(['git', 'clone', '--bare', self.clone_path,\n self.repo_path])\n else:\n logger.info('Fetching into existing repository %s',\n self.repo_path)\n execute(['git', '--git-dir=%s' % self.repo_path, 'fetch',\n 'origin', '+refs/heads/*:refs/heads/*'])",
"def clone(self, container, path = \"root\"):\n repo = clone_repo_in_folder(self.service, container, path)\n if repo:\n if not repo.is_corrupt():\n repo.pull()\n print(f\"{repo.get_name()} cloned.\")\n else:\n print(\"Error: Unable to clone a repository where one already exists\")",
"def clone(self, branch):\n targetdir = self.basedir + \"/\" + branch + \"/\" + self.postfix\n g = git.cmd.Git()\n g.clone(self.url, targetdir, branch=branch, depth=1)",
"def action_clone(self):\n self.change(\n EnsureDirectory(\n self.resource.name, self.resource.user, self.resource.group, 0o755\n )\n )\n\n try:\n self.action(\"init\", self.resource.name)\n except error.SystemError:\n raise error.CheckoutError(\"Cannot initialise local repository.\")\n\n self.action_set_remote()",
"def clone(self, url):\n cmdline = shlex.split(\"git clone\") + [url, self.directory]\n\n Program.runprogram(cmdline, use_sudo=self.use_sudo, user=self.sudo_user)",
"def git_clone(self, url, target):\n pass",
"def git_clone(url):\n execute(\"git clone {}\".format(url))",
"async def get_directory(\n self, from_path: Optional[str] = None, local_path: Optional[str] = None\n ) -> None:\n # CONSTRUCT COMMAND\n cmd = [\"git\", \"clone\", self._create_repo_url()]\n if self.reference:\n cmd += [\"-b\", self.reference]\n\n # Limit git history\n cmd += [\"--depth\", \"1\"]\n\n # Clone to a temporary directory and move the subdirectory over\n with TemporaryDirectory(suffix=\"prefect\") as tmp_dir:\n cmd.append(tmp_dir)\n\n err_stream = io.StringIO()\n out_stream = io.StringIO()\n process = await run_process(cmd, stream_output=(out_stream, err_stream))\n if process.returncode != 0:\n err_stream.seek(0)\n raise OSError(f\"Failed to pull from remote:\\n {err_stream.read()}\")\n\n content_source, content_destination = self._get_paths(\n dst_dir=local_path, src_dir=tmp_dir, sub_directory=from_path\n )\n\n ignore_func = None\n if not self.include_git_objects:\n ignore_func = ignore_patterns(\".git\")\n\n copytree(\n src=content_source,\n dst=content_destination,\n dirs_exist_ok=True,\n ignore=ignore_func,\n )",
"def git_clone_ref(url, ref, directory):\n execute(\"git clone -b {} {} --depth=1 {}\".format(ref, url, directory))",
"def _clone_project(self, github_url):\n temp_dir = tempfile.mkdtemp(prefix='github')\n project = github_url[(github_url.rfind('/') + 1):]\n project_path = os.path.join(temp_dir, project)\n repo = git.Repo.clone_from(github_url, project_path)\n self.output_remote_update(\"Clone project {} to {}\".format(github_url, project_path))\n return repo, project_path",
"def clone(repo, src, dest, shallow, rev):\n if dest is None:\n dest = posixpath.split(src)[-1] or '.'\n click.echo('Cloning repo %s to %s' % (src, os.path.abspath(dest)))\n repo.home = dest\n if shallow:\n click.echo('Making shallow checkout')\n click.echo('Checking out revision %s' % rev)",
"def _clone_hg_repo(self, name, dst_dir, branch='default'):\n conf = self.configuration\n try:\n repo = Repository(conf, name)\n clone_from = conf.get('master', 'clone_from')\n repo.clone_locally(dst_dir, branch, clone_from)\n except RepositoryError as error:\n log.error(error)\n raise MasterError(error)",
"def shallow_clone(self, remote_location, branch):\n\n parsed = urllib.parse.urlparse(remote_location)\n\n pool_manager = PoolManager(ca_certs=certifi.where())\n pool_manager.headers['Cookie'] = self.auth_cookie\n # Suppress ResourceWarning\n pool_manager.headers['Connection'] = 'close'\n\n client = HttpGitClient.from_parsedurl(\n parsed, config=self.get_config_stack(), pool_manager=pool_manager)\n fetch_result = client.fetch(\n parsed.path, self, determine_wants=lambda mapping:\n [mapping[REF_HEADS_PREFIX + _B(branch)]], depth=1)\n stripped_refs = strip_peeled_refs(fetch_result.refs)\n branches = {\n n[len(REF_HEADS_PREFIX):]: v\n for (n, v) in stripped_refs.items()\n if n.startswith(REF_HEADS_PREFIX)\n }\n self.refs.import_refs(REF_REMOTES_PREFIX + DEFAULT_REMOTE_NAME, branches)\n self[HEAD] = self[REF_REMOTES_PREFIX + DEFAULT_REMOTE_NAME + b'/' +\n _B(branch)]",
"def workspace_clone(ctx, clobber_mets, download, mets_url, workspace_dir):\n LOG = getLogger('ocrd.cli.workspace.clone')\n if workspace_dir:\n LOG.warning(DeprecationWarning(\"Use 'ocrd workspace --directory DIR clone' instead of argument 'WORKSPACE_DIR' ('%s')\" % workspace_dir))\n ctx.directory = workspace_dir\n\n workspace = ctx.resolver.workspace_from_url(\n mets_url,\n dst_dir=ctx.directory,\n mets_basename=ctx.mets_basename,\n clobber_mets=clobber_mets,\n download=download,\n )\n workspace.save_mets()\n print(workspace.directory)",
"def clone_repo(self):\n\n repo_url = \"{0}/{1}/{2}.git\".format(self.git_url, self.namespace, self.repo)\n try:\n shutil.rmtree(self.repo)\n except FileNotFoundError:\n pass\n self.logger.info(\"Clonning {}\".format(repo_url))\n\n max_retry = 5\n attempt = 1\n while True:\n try:\n git_repo = Repo.clone_from(repo_url, \"./{}\".format(self.repo))\n except Exception as exception:\n attempt += 1\n if attempt > max_retry:\n self.logger.error(exception)\n raise Exception(\"Couldn't clone {}\".format(repo_url))\n time.sleep(10)\n self.logger.info(\"Retrying to clone {}. Attempt {}/{}\".format(repo_url, attempt, max_retry))\n continue\n break\n\n self.logger.info(\"Checkout {}\".format(self.branch))\n git_repo.git.checkout(self.branch)\n\n if not self.pr:\n return\n\n current_dir = os.getcwd()\n os.chdir(self.repo)\n\n self.logger.info(\"Fetching PR {}\".format(self.pr))\n cmd = \"git fetch -fu origin refs/pull/{}/head:pr\".format(self.pr)\n self.logger.debug(\"Running {}\".format(cmd))\n attempt = 1\n while True:\n try:\n subprocess.run(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\n except subprocess.CalledProcessError as exception:\n attempt += 1\n if attempt > max_retry:\n self.logger.error(str(exception))\n if exception.stderr:\n self.logger.debug(exception.stderr)\n if exception.stdout:\n self.logger.debug(exception.stdout)\n raise Exception(\"Couldn't fetch PR {}\".format(self.pr)) from None\n self.logger.info(\"Retrying to fetch PR {}. Attempt {}/{}\".format(self.pr, attempt, max_retry))\n continue\n break\n\n self.logger.info(\"Merging PR {} to {}\".format(self.pr, self.branch))\n cmd = [\"git\", \"-c\", \"user.name=Fedora CI\", \"-c\", \"user.email=ci@lists.fedoraproject.org\",\n \"merge\", \"pr\", \"-m\", \"Fedora CI pipeline\"]\n self.logger.debug(\"Running {}\".format(\" \".join(cmd)))\n try:\n subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\n except subprocess.CalledProcessError as exception:\n self.logger.error(str(exception))\n if exception.stderr:\n self.logger.error(exception.stderr)\n if exception.stdout:\n self.logger.error(exception.stdout)\n raise Exception(\"Couldn't merge {}\".format(self.pr)) from None\n\n os.chdir(current_dir)",
"def test_clone_repo(self):\n repo = 'git@github.com:user/repository'\n unbox.main([repo])\n subprocess.check_call.assert_called_with(['git', 'clone', repo,\n 'repository'])",
"def test_clone_repo(tmpdir, settings):\n settings.REPO_ROOT = str(tmpdir)\n tasks.clone_repo('bower-cache', 'git://github.com/Tinche/bower-cache')\n assert len(tmpdir.listdir()) == 1\n assert tmpdir.listdir()[0].basename == 'bower-cache'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parses repo url to get its name.
|
def _get_repo_name_from_url(url: str) -> str:
last_slash_index = url.rfind("/")
last_suffix_index = url.rfind(".git")
if last_suffix_index < 0:
last_suffix_index = len(url)
if last_slash_index < 0 or last_suffix_index <= last_slash_index:
raise Exception("Badly formatted url {}".format(url))
return url[last_slash_index + 1:last_suffix_index]
|
[
"def _repo_name_from_url(url_decode: str):\n github_project_name = os.path.split(url_decode.path)[-1]\n return github_project_name.replace('.git', '')",
"def repo_name(repo):\n\n if \"name\" not in repo or repo[\"name\"] is None:\n repo[\"name\"] = os.path.basename(repo[\"upstream\"])\n if repo[\"name\"].endswith(\".git\"):\n repo[\"name\"] = repo[\"name\"][:-4]\n\n return repo[\"name\"]",
"def get_host_repo_for_link(cls, repo):\n hostname = None\n # return repo modified to result of extraction\n if repo.startswith(('https://', 'http://')):\n # parse hostname for passing to whatever holder selected\n url_parts = repo.split('/')\n hostname = url_parts[2]\n offset = 3 + cls.REPO_URL_PROJECT_OFFSET\n repo = \"/\".join(url_parts[offset:offset + cls.REPO_URL_PROJECT_COMPONENTS])\n return hostname, repo",
"def url_to_repo(self, url):\n tokens = re.split(r'://|/', url)\n owner, repo_name = tokens[3], tokens[4]\n return self.repo_root_url.format(owner=owner, repo_name=repo_name)",
"def parse_pr_url(url: str) -> Tuple[str, int]:\n arr = url.split('/')\n full_repo_name = '{}/{}'.format(arr[-4], arr[-3])\n pr_num = int(arr[-1])\n\n return full_repo_name, pr_num",
"def parse_github_url(url):\n \n if url is None or len(url.strip()) == 0:\n return None, None, None, None\n \n url = url.strip()\n parsed_url = urlparse(url)\n path_list = parsed_url.path.split('/')\n \n hostname = parsed_url.netloc\n org = path_list[1]\n repo = path_list[2]\n \n if len(path_list) == 5:\n pr_or_issue_number = path_list[4]\n\n return hostname, org, repo, pr_or_issue_number",
"def get_reponame(self):\n assert type(self).__name__ != \"Repo\", \"no repo name available \"",
"def __format_repo(repo):\n data = repo.split(\"/\")\n organization = data[0]\n repo_name = data[1]\n return organization, repo_name",
"def student_name_from(repo_name: str) -> str:\n m = re.search(github_prefix + \"-(.*)$\", repo_name)\n if not m:\n return \"\" # something funny in the name, so therefore not matching\n else:\n return m.group(1)",
"def extract_github_name(remote):\n\n result = re.match(_SSH_REGEX, remote)\n if result is not None:\n return result.group('user'), result.group('repo')\n\n result = re.match(_HTTPS_REGEX, remote)\n if result is not None:\n return result.group('user'), result.group('repo')\n\n return None",
"def infer_repo_details(self):\n remote_url = self.check_output_in_repo_dir(['git', 'config', '--get', 'remote.origin.url'])\n remote_matcher = re.search(r':([^\\/]+)/([^\\.]+)\\.git$', remote_url)\n if not remote_matcher:\n raise InvalidRemote(remote_url)\n\n # Returns the owner first, then the repo name\n return remote_matcher.group(1), remote_matcher.group(2)",
"def get_github_username(token):\n headers = {'Authorization': 'Bearer ' + token['access_token']}\n response = requests.get(USER_URL, headers=headers).json()\n username = response['login']\n repos_url = response['repos_url']\n return username, repos_url, headers",
"def package_repo_url(self, package_name):\n s = self._repos[package_name]\n if isinstance(s, basestring):\n return s\n else:\n # For packages that have sub-documents, rather than the value\n # as the URL. See repos.yaml for format documentation.\n return s['url']",
"def repo_url_from_metadata(filename, metadata):\n repo_url = matching_text(metadata, SOURCE_URL_REGEXES)\n if repo_url is None:\n print(f\"No repo URL in {filename}\")\n return None\n if repo_url == \"UNKNOWN\":\n print(f\"Repo URL is UNKNOWN in {filename}\")\n return None\n return repo_url",
"def get_matching_hostname(cls, repo):\n if not repo.startswith(('http://', 'https://')):\n return None\n if not cls.DEFAULT_HOSTNAME and not cls.SUBDOMAIN_INDICATOR:\n return None\n url_parts = repo.split('/')\n domain = url_parts[2]\n if cls.DEFAULT_HOSTNAME == domain:\n return domain\n if cls.SUBDOMAIN_INDICATOR and domain.startswith(cls.SUBDOMAIN_INDICATOR + \".\"):\n return domain\n return None",
"def repository_name(self) -> typing.Optional[str]:\n return self._values.get('repository_name')",
"def get_repo_url():\n default_repo = 's3://gluonnlp-numpy-data'\n repo_url = os.environ.get('GLUONNLP_REPO_URL', default_repo)\n if repo_url[-1] != '/':\n repo_url = repo_url + '/'\n return repo_url",
"def _get_name(ref):\n return ref.remote_head if isinstance(ref, git.RemoteReference) else ref.name",
"def get_github_repo_url():\n return 'git://github.com/%s/%s.git' % (MOZILLA_GITHUB_ACCOUNT, DEEPSPEECH_GITHUB_PROJ)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loads a commit graph stored in the pickle format.
|
def load_commit_graph(self, path):
self.commit_graph = nx.readwrite.gpickle.read_gpickle(path)
|
[
"def load_commit_graph_lines(self, path):\n\n self.commit_graph_lines = nx.readwrite.gpickle.read_gpickle(path)",
"def pickle_to_graph(pickle):\n\treturn pickle.loads(pickle)",
"def loadGraph(filename):\n with open(filename,\"rb\") as fp:\n return pickle.load(fp)",
"def _load_graph(filename):\r\n # Commad for loading the graph\r\n with open(filename, 'rb') as file:\r\n return pickle.load(file)",
"def unmarshal(cls, path):\n with open(path, 'rb') as loadfile:\n data = marshal.load(loadfile)\n return Graph(data)",
"def load_data(self):\n try:\n with open('_blockchain.txt', mode='r') as f:\n #file_content = pickle.loads(f.read())\n file_content = f.readlines()\n print(file_content)\n #blockchain = file_content['chain']\n #open_transactions = file_content['ot']\n blockchain = json.loads(file_content[0][:-1])\n updated_blockchain = []\n for block in blockchain:\n converted_tx = [Transaction(tx['sender'], tx['recipient'], tx['amount']) for tx in block['transactions']]\n updated_block = Block(block['index'], block['previous_hash'], converted_tx, block['proof'], block['timestamp'])\n updated_blockchain.append(updated_block)\n self.chain = updated_blockchain\n open_transactions = json.loads(file_content[1])\n updated_transactions = []\n for tx in open_transactions:\n updated_transaction = Transaction(tx['sender'], tx['recipient'], tx['amount'])\n updated_transactions.append(updated_transaction)\n self.__open_transactions = updated_transactions\n except (IOError, IndexError):\n pass\n finally:\n print('Success!')",
"def load_graph(graph: Union[str, pathlib.Path]) -> BGraph:\n try:\n bgraph: BGraph = pickle.load(open(graph, \"rb\"))\n except (pickle.PickleError, FileNotFoundError):\n raise bgraph.exc.BGraphLoadingException()\n\n return bgraph",
"def loadTree(filepath):\r\n return pickle.load(open(filepath, \"rb\"))",
"def graph_to_pickle(g):\n\treturn pickle.dumps(g, pickle.HIGHEST_PROTOCOL)",
"def load_graph(filename):\n with open(filename, 'r') as f:\n data = f.read()\n\n split_filename = filename.split('.')\n num_players = int(split_filename[0])\n num_seeds = int(split_filename[1])\n unique_id = int(split_filename[2])\n\n graph_dict = json.loads(data)\n G = nx.Graph(graph_dict)\n\n # Get rid of isolated nodes.\n G.remove_nodes_from(list(nx.isolates(G)))\n\n return G, num_players, num_seeds, unique_id",
"def load_network(self, fname):\n with self.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(self.sess, self.cp_path + \"model/\" + fname)",
"def load(load_input: PycographLoadInput) -> Graph:\n project = PythonProject(root_dir_path=load_input.project_dir_path) # type: ignore\n project_parse_result = project.parse()\n return populate_graph(load_input.graph_name, project_parse_result) # type: ignore",
"def load_data(filename):\n\tud_graph = grew.graph(filename)\n\treturn ud_graph",
"def load_pb(path):\n with tf.compat.v1.gfile.GFile(path, \"rb\") as f:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(f.read())\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def, name='')\n return graph",
"def load_graph(file_path):\n \n assert ('txt' in file_path), 'Please choose a graph file of type txt'\n\n G = nx.read_edgelist(file_path,create_using=nx.Graph(), nodetype = int)\n return G",
"def load(self, serialized):\n self.m = pickle.load(serialized)",
"def load_old_tree(self, path):\n tree = []\n try:\n ftree = open(path, 'r')\n tree = ftree.read()\n\n except IOError:\n logging.exception('Database tree file \"' + e.filename + '\" not found.')\n finally:\n ftree.close()\n\n self._load_tree(tree)",
"def load_ontology_pickle():\n check_ontology()\n fcso = pickle.load(open(CSO_PICKLE_PATH, \"rb\"))\n return fcso",
"def load_graph(fname):\n g = nx.Graph()\n with open(fname) as fl:\n for line in fl:\n u, v = line.split(\" \")\n g.add_edge(int(u), int(v))\n print(\"Loaded graph with {} nodes\".format(len(g.nodes)))\n return g"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loards a commit graphe for lines stored in the pickle format.
|
def load_commit_graph_lines(self, path):
self.commit_graph_lines = nx.readwrite.gpickle.read_gpickle(path)
|
[
"def __grab_patch__(self):\n\n patch = []\n line = self.buffer or self.fd.readline()\n\n while line:\n m = patterns['commit'].match(line)\n if m:\n patch = [line]\n break\n line = self.fd.readline()\n\n if not line:\n return None\n\n line = self.fd.readline()\n while line:\n # If this line starts a new commit, drop out.\n m = patterns['commit'].match(line)\n if m:\n self.buffer = line\n break\n\n patch.append(line)\n self.buffer = None\n line = self.fd.readline()\n\n return patch",
"def load_commit_graph(self, path):\n\n self.commit_graph = nx.readwrite.gpickle.read_gpickle(path)",
"def process_branch_commit(self, svn_commit):\n\n raise NotImplementedError()",
"def extract_git_blame_lines(file_name, susp_file_path, git_blame_output):\n file_path = find_file_path(file_name, susp_file_path)\n os.system(f\"git blame {file_path} > {git_blame_output}\")\n git_blame_data = csv.reader(open(git_blame_output, encoding='ISO-8859-1'), delimiter='\\n')\n git_blame_list = list(git_blame_data)\n git_blame_lines = {(i+1):git_blame_list[i] for i in range(len(git_blame_list))}\n\n return git_blame_lines",
"def analyze_correlation_commit_lines_graph_concurent(self, single_line=None):\n\n cwd = os.getcwd()\n os.chdir(self.repo_folder)\n\n commit_to_lines = {}\n\n # Print analyzing all the lines of the repo\n print('Print analyzing all the lines of the repo')\n file_lines = []\n \n\n if single_line:\n\n already_seen_files = set()\n modified_in_commits = self.get_commits_that_modified_line(single_line[1], single_line[1], single_line[0])\n modified_in_commits = [commit[1:-1] for commit in modified_in_commits]\n for commit in pydriller.Repository(self.repo_folder, only_commits=modified_in_commits).traverse_commits():\n\n for modification in commit.modified_files:\n\n path = single_line[0].replace(\"/\", \"\\\\\")\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n\n if current_path not in already_seen_files:\n if current_path is not None and modification.new_path[-4:] not in self.forbidden_file_extensions:\n\n # Get path to file to count number of lines\n filepath = self.repo_folder + '\\\\' + current_path\n linenumber = self.get_file_number_of_lines(filepath)\n already_seen_files.add(current_path)\n\n for i in range(1, linenumber):\n file_lines.append((current_path, i))\n\n else:\n\n for file_path in tqdm.tqdm(self.repo_files_path):\n\n # Get path to file and count number of lines\n complete_file_path = self.repo_folder + '\\\\' + file_path\n linenumber = self.get_file_number_of_lines(complete_file_path)\n\n for i in range(1, linenumber):\n file_lines.append((file_path, i))\n\n line_to_commits = {}\n with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_line = {executor.submit(self.analyze_line, file_line): file_line for file_line in file_lines}\n\n pbar = tqdm.tqdm(total=len(file_lines))\n for future in concurrent.futures.as_completed(future_to_line):\n file_line = future_to_line[future]\n try:\n modified_in_commits = future.result()\n line_to_commits[file_line] = modified_in_commits\n except Exception as exc:\n print(f'Error during execution : {exc}')\n pbar.update(1)\n pbar.close()\n\n for file_line, modified_in_commits in line_to_commits.items():\n\n file_path, line = file_line\n self.commit_graph_lines.add_node(f'{file_path}:{line}', number_modifications=len(modified_in_commits))\n\n for commit in modified_in_commits:\n\n if commit in commit_to_lines:\n commit_to_lines[commit].append(f'{file_path}:{line}')\n else:\n commit_to_lines[commit] = [f'{file_path}:{line}']\n\n\n # Building the graph\n print('\\n\\nBuilding the graph')\n for (_, list_lines) in tqdm.tqdm(commit_to_lines.items()):\n\n pairs_of_modified_lines = []\n for i in range(len(list_lines)):\n for j in range(i+1, len(list_lines)):\n pairs_of_modified_lines.append((list_lines[i], list_lines[j]))\n\n for edge in pairs_of_modified_lines:\n\n if edge[0] in self.commit_graph_lines.nodes and edge[1] in self.commit_graph_lines.nodes:\n if self.commit_graph_lines.has_edge(edge[0], edge[1]):\n self.commit_graph_lines.edges[edge[0], edge[1]]['number_modifications_same_commit'] += 1\n else:\n self.commit_graph_lines.add_edge(edge[0], edge[1], number_modifications_same_commit=1)\n\n os.chdir(cwd)",
"def checkout(self, commit_id):\n raise NotImplementedError",
"def process_primary_commit(self, svn_commit):\n\n raise NotImplementedError()",
"def get_commits_that_modified_line(self, start_line, end_line, path):\n\n # history = self.git_repo2.git.log('-L', f'{start_line},{end_line}:{path}').split('\\n')\n history = subprocess.run(['git', 'log', '-L', f'{start_line},{end_line}:{path}', '--format=\\\"%H\\\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\\n')\n modified_in_commits = [line for line in history if len(line) > 0]\n \n '''\n for line in history:\n if line[0:6] == 'commit':\n modified_in_commits.append(line[7:])\n '''\n \n return modified_in_commits",
"def gedcom_lines(self, offset):\n\n self._file.seek(offset)\n\n prev_gline = None\n while True:\n\n offset = self._file.tell()\n line = self._file.readline() # stops at \\n\n if not line:\n break\n line = line.lstrip().rstrip(b\"\\r\\n\")\n\n match = _re_gedcom_line.match(line)\n if not match:\n self._file.seek(offset)\n lineno = guess_lineno(self._file)\n line = line.decode(self._encoding, \"ignore\")\n raise ParserError(\"Invalid syntax at line \"\n \"{0}: `{1}'\".format(lineno, line))\n\n level = int(match.group('level'))\n xref_id = match.group('xref')\n if xref_id:\n xref_id = xref_id.decode(self._encoding, self._errors)\n tag = match.group('tag').decode(self._encoding, self._errors)\n\n # simple structural integrity check\n if prev_gline is not None:\n if level - prev_gline.level > 1:\n # nested levels should be incremental (+1)\n self._file.seek(offset)\n lineno = guess_lineno(self._file)\n line = line.decode(self._encoding, \"ignore\")\n raise IntegrityError(\"Structural integrity - \"\n \"illegal level nesting at line \"\n \"{0}: `{1}'\".format(lineno, line))\n if tag in (\"CONT\", \"CONC\"):\n # CONT/CONC level must be +1 from preceding non-CONT/CONC\n # record or the same as preceding CONT/CONC record\n if ((prev_gline.tag in (\"CONT\", \"CONC\") and\n level != prev_gline.level) or\n (prev_gline.tag not in (\"CONT\", \"CONC\") and\n level - prev_gline.level != 1)):\n self._file.seek(offset)\n lineno = guess_lineno(self._file)\n line = line.decode(self._encoding, \"ignore\")\n raise IntegrityError(\"Structural integrity - illegal \"\n \"CONC/CONT nesting at line \"\n \"{0}: `{1}'\".format(lineno, line))\n\n gline = gedcom_line(level=level,\n xref_id=xref_id,\n tag=tag,\n value=match.group('value'),\n offset=offset)\n yield gline\n\n prev_gline = gline",
"def synthesize_cvs_commit_ids(self):\n\n rows = self.db.query(self.db.rewrite_sql(\"SELECT count(*) FROM checkins WHERE commitid IS NULL\"), []);\n count = rows[0][0]\n if (count == 0):\n return\n\n print(\"Updating \" + str(count) + \" legacy CVS entries\")\n select = self.db.rewrite_sql(\"SELECT id, ci_when, whoid, repositoryid, branchid, descid FROM checkins WHERE commitid IS NULL ORDER BY repositoryid, branchid, whoid, ci_when LIMIT 100000\")\n rows = self.db.query(select, [])\n\n i = 0\n commitid = 0\n last_row = [0, 0, 0, 0, 0, 0]\n while len(rows) > 0:\n cursor = self.db.conn.cursor()\n for row in rows:\n if not self.are_rows_in_same_commit(row, last_row):\n cursor.execute(\"INSERT INTO commitids (hash, co_when, authorid, committerid) VALUES (%s, %s, %s, %s)\", [\"s\" + str(time.time()) + str(i), row[1], row[2], row[2]])\n commitid = cursor.lastrowid\n cursor.execute(self.db.rewrite_sql(\"UPDATE checkins SET commitid=%s WHERE id=%s\"), [commitid, row[0]])\n i = i + 1\n last_row = row\n\n cursor.close()\n self.db.conn.commit()\n self.db.conn.begin()\n print(\" Updated \" + str(i) + \" / \" + str(count))\n rows = self.db.query(select, []);\n cursor.close()\n self.db.conn.commit()\n print(\"OK: Converted CVS legacy entries\")",
"def commit_mutilation(self):\n #keep track of the current chunk being created\n chunkNum = 0\n with open(self.victim) as fileHandle:\n while True:\n #use readlines so we get a list of lines that can be sorted.\n chunk = fileHandle.readlines(self.chunkSize)\n\n #if the chunk is empty we are @ EOF; so break\n if not chunk:\n break\n\n #sort and write chunk to chunkFiles\n self._hide_corpse(chunk, chunkNum)\n\n #increment for next chunk file so they are uniquely named\n chunkNum += 1",
"def get_commit_record(repo_path,branches_names): \n commits = []\n commit_record = [] \n \n if 'master' in branches_names:\n del branches_names[branches_names.index('master')]\n branches_names.insert(0,'master') \n \n for b in branches_names: \n \n s = subprocess.check_output(\"cd %s; git checkout %s; git log \" % (repo_path,b), shell=True)\n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n\t \n\t l = m[2][3:len(m[2])-6] # m[2] contains date and time of commit log\n\t \n\t time = datetime.datetime.strptime(l, '%a %b %d %H:%M:%S %Y') # parses datetime string ('l' here) according to format\n\t \n\t commits.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=time, message=m[3].strip(),branches = b.strip()))\n \n if not commit_record:\n commit_record = commits + commit_record #concatenate bcz comit_record is empty\n \n else: \n\t for t in commits: # comapare commit hash to avoid repitition of same commit log\n\t for j in commit_record:\n\t\t if (t ['commit_hash'] != j['commit_hash']):\n\t\t if j == commit_record[-1]: \n\t\t\t commit_record.append(t)\n\t else:\n\t\t break\n commit_record = sorted(commit_record, key=operator.itemgetter('datetime'), reverse = True) # sort commit record according to date and time\t\n \n \n return commit_record",
"def test_with_multi_commit_diff(self):\n reader = DiffXReader(io.BytesIO(\n b'#diffx: encoding=utf-8, version=1.0\\n'\n b'#.change:\\n'\n b'#..preamble: indent=4, length=49, mimetype=text/markdown\\n'\n b' Summary of the _first_ commit in the series.\\n'\n b'#..meta: format=json, length=244\\n'\n b'{\\n'\n b' \"author\": \"Test User <test@example.com>\",\\n'\n b' \"committer\": \"Test User <test@example.com>\",\\n'\n b' \"committer date\": \"2021-06-02T13:12:06-07:00\",\\n'\n b' \"date\": \"2021-06-01T19:26:31-07:00\",\\n'\n b' \"id\": \"a25e7b28af5e3184946068f432122c68c1a30b23\"\\n'\n b'}\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file1\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef\",\\n'\n b' \"old\": \"c8839177d1a5605aa60abe69db95c84183f0eebe\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=60\\n'\n b'--- /file1\\n'\n b'+++ /file1\\n'\n b'@@ -498,7 +498,7 @@\\n'\n b' ... diff content\\n'\n b'#.change:\\n'\n b'#..preamble: indent=4, length=52\\n'\n b' Summary of commit #2\\n'\n b'\\n'\n b' Here\\'s a description.\\n'\n b'#..meta: format=json, length=244\\n'\n b'{\\n'\n b' \"author\": \"Test User <test@example.com>\",\\n'\n b' \"committer\": \"Test User <test@example.com>\",\\n'\n b' \"committer date\": \"2021-06-02T19:46:25-07:00\",\\n'\n b' \"date\": \"2021-06-01T19:46:22-07:00\",\\n'\n b' \"id\": \"91127b687f583184144161f432222748c1a30b23\"\\n'\n b'}\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file2\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"a2ccb0cb48383472345d41a32afde39a7e6a72dd\",\\n'\n b' \"old\": \"1b7af7f97076effed5db722afe31c993e6adbc78\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=80\\n'\n b'--- a/file2\\n'\n b'+++ b/file2\\n'\n b'@@ -66,7 +66,8 @@\\n'\n b' ... diff content for commit 2, file2\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file3\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"0d4a0fb8d62b762a26e13591d06d93d79d61102f\",\\n'\n b' \"old\": \"be089b7197974703c83682088a068bef3422c6c2\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=82\\n'\n b'--- a/file3\\n'\n b'+++ b/file3\\n'\n b'@@ -258,7 +258,8 @@\\n'\n b' ... diff content for commit 2, file3\\n'\n ))\n\n self.assertEqual(list(reader), [\n {\n 'level': 0,\n 'line': 0,\n 'options': {\n 'encoding': 'utf-8',\n 'version': '1.0',\n },\n 'section': Section.MAIN,\n 'type': 'diffx',\n },\n {\n 'level': 1,\n 'line': 1,\n 'options': {},\n 'section': Section.CHANGE,\n 'type': 'change',\n },\n {\n 'level': 2,\n 'line': 2,\n 'options': {\n 'indent': 4,\n 'length': 49,\n 'mimetype': 'text/markdown',\n },\n 'section': Section.CHANGE_PREAMBLE,\n 'text': 'Summary of the _first_ commit in the series.\\n',\n 'type': 'preamble',\n },\n {\n 'level': 2,\n 'line': 4,\n 'metadata': {\n 'author': 'Test User <test@example.com>',\n 'committer': 'Test User <test@example.com>',\n 'committer date': '2021-06-02T13:12:06-07:00',\n 'date': '2021-06-01T19:26:31-07:00',\n 'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',\n },\n 'options': {\n 'format': 'json',\n 'length': 244,\n },\n 'section': Section.CHANGE_META,\n 'type': 'meta',\n },\n {\n 'level': 2,\n 'line': 12,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 13,\n 'metadata': {\n 'path': 'file1',\n 'revision': {\n 'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',\n 'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 21,\n 'options': {\n 'length': 60,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- /file1\\n'\n b'+++ /file1\\n'\n b'@@ -498,7 +498,7 @@\\n'\n b' ... diff content\\n'\n ),\n 'type': 'diff',\n },\n {\n 'level': 1,\n 'line': 26,\n 'options': {},\n 'section': Section.CHANGE,\n 'type': 'change',\n },\n {\n 'level': 2,\n 'line': 27,\n 'options': {\n 'indent': 4,\n 'length': 52,\n },\n 'section': Section.CHANGE_PREAMBLE,\n 'text': (\n \"Summary of commit #2\\n\"\n \"\\n\"\n \"Here's a description.\\n\"\n ),\n 'type': 'preamble',\n },\n {\n 'level': 2,\n 'line': 31,\n 'metadata': {\n 'author': 'Test User <test@example.com>',\n 'committer': 'Test User <test@example.com>',\n 'committer date': '2021-06-02T19:46:25-07:00',\n 'date': '2021-06-01T19:46:22-07:00',\n 'id': '91127b687f583184144161f432222748c1a30b23',\n },\n 'options': {\n 'format': 'json',\n 'length': 244,\n },\n 'section': Section.CHANGE_META,\n 'type': 'meta',\n },\n {\n 'level': 2,\n 'line': 39,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 40,\n 'metadata': {\n 'path': 'file2',\n 'revision': {\n 'new': 'a2ccb0cb48383472345d41a32afde39a7e6a72dd',\n 'old': '1b7af7f97076effed5db722afe31c993e6adbc78',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 48,\n 'options': {\n 'length': 80,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- a/file2\\n'\n b'+++ b/file2\\n'\n b'@@ -66,7 +66,8 @@\\n'\n b' ... diff content for commit 2, file2\\n'\n ),\n 'type': 'diff',\n },\n {\n 'level': 2,\n 'line': 53,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 54,\n 'metadata': {\n 'path': 'file3',\n 'revision': {\n 'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',\n 'old': 'be089b7197974703c83682088a068bef3422c6c2',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 62,\n 'options': {\n 'length': 82,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- a/file3\\n'\n b'+++ b/file3\\n'\n b'@@ -258,7 +258,8 @@\\n'\n b' ... diff content for commit 2, file3\\n'\n ),\n 'type': 'diff',\n },\n ])",
"def test_change_line(self):\n tree = self.make_example_branch()\n self.build_tree_contents([\n ('goodbye', 'baz2\\n')])\n output = self.run_bzr('diff --stat-dir', retcode=1)[0]\n self.assertEqualDiff(output, '''\\\n . | 2 +-\n 1 directory changed, 1 insertion(+), 1 deletion(-)\n''')\n self.check_output_rules(output)",
"def get_first_commit_hash_before_given(self, commit_hash):\r\n return self._execute_command(get_first_commit_before_patch.format(since=commit_hash)).strip()",
"def process_commit_details(cid, repo, repo_name, filter_config):\n\n c = repo.commit(cid)\n pool = Pool()\n\n # blame = [assign_blame(d.b_blob.path, d.diff, p.hexsha,\n # repo_name, cid)\n output = []\n if len(c.parents) > 0:\n p = c.parents[0]\n output = [pool.apply_async(assign_blame,\n args=(d.b_blob.path,\n d.diff, p.hexsha,\n repo_name, cid))\n # for p in c.parents # iterate through each parent\n for d in c.diff(p, create_patch=True).iter_change_type('M')\n if (d.a_blob and d.b_blob\n and filter_file(d.b_blob.path, filter_config)\n and str(d.a_blob) != git.objects.blob.Blob.NULL_HEX_SHA\n and str(d.b_blob) != git.objects.blob.Blob.NULL_HEX_SHA\n and d.a_blob.size != 0\n and d.b_blob.size != 0)\n ]\n else:\n output = []\n\n blame = [parent.get() for parent in output]\n pool.close()\n pool.join()\n return dict(blame)",
"def blame(filename, lines=None):\n flags = {}\n if lines is not None:\n flags['L'] = ','.join(map(str, lines))\n\n output = shell('git', 'blame', filename, line_porcelain=True, **flags)\n\n # Output consists of sections of rows, where each section\n # corresponds to single line in the source file (``filename``).\n #\n # Section starts with commit hash, ends with source line itself (indented).\n # In between, there are fields with values, separated by whitespace, e.g.::\n #\n # author-mail coder@example.com\n # author-tz +0200\n\n result = []\n line_info = {}\n for row in output.splitlines():\n if row.startswith('\\t' ):\n line_info['line'] = row.lstrip('\\t')\n result.append(line_info)\n line_info = {}\n continue\n\n head, tail = row.split(None, 1)\n if head in BLAME_FIELDS:\n field, value = head, tail\n if field == 'previous':\n value = value.split()[0] # format: <hash> <filename>\n elif field.endswith('-mail'):\n value = value[1:-1] # strip angle brackets around email\n line_info[field] = value\n else:\n line_info['hash'] = head\n\n return result",
"def test_remove_line(self):\n tree = self.make_example_branch()\n self.build_tree_contents([\n ('goodbye', '')])\n output = self.run_bzr('diff --stat', retcode=1)[0]\n self.assertEqualDiff(output, '''\\\n goodbye | 1 -\n 1 file changed, 0 insertions(+), 1 deletion(-)\n''')\n self.check_output_rules(output)",
"def test_repo_get_single_commit_by_ref(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find the lines related to a given function and print them.
|
def find_lines_related_to_function(self, function_name, path):
modified_in_commits = self.get_commits_that_modified_function(function_name, path)
self.find_related_lines(path, modified_in_commits)
|
[
"def find_next_function(lines, line_number):\n assert False, \"Unimplemented!\"",
"def show_device_functions(source):\n # type: (str) -> str\n for match in FUNCTION_PATTERN.finditer(source):\n print(match.group('qualifiers').replace('\\n', r'\\n'),\n match.group('function'), '(')\n return source",
"def print_func(func_node, indent=False):\n if indent:\n stub = '# ' + func_stub(func_node)\n else:\n stub = '## ' + func_stub(func_node)\n print(stub)\n print(format_docstring(ast.get_docstring(func_node)))\n print()",
"def find_funcs_and_calls(tu):\n filename = tu.cursor.spelling\n\n calls = []\n funcs = []\n defs = []\n for c in tu.cursor.walk_preorder():\n p = False\n if c.location.file is None:\n pass\n elif c.location.file.name != filename:\n pass\n elif c.kind == CursorKind.CALL_EXPR:\n calls.append(c)\n p = True\n elif c.kind == CursorKind.FUNCTION_DECL:\n if c.is_definition():\n defs.append(c)\n else:\n funcs.append(c)\n p = True\n #if p:\n # print(c.kind,c.displayname, c.is_definition())\n # print()\n return funcs, calls, defs",
"def print_detail(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n info = calling_description(func, *args, **kwargs)\n print(f\"calling {info}\")\n return func(*args, **kwargs)\n return wrapper",
"def _report_summary_lines(self):\n func_order = self.all_functions()\n func_width = max(len(f) for f in func_order if f in self.summary_lines)\n lines = []\n fmt = self._format_summary(func_width)\n header = fmt.format(\n function = \"function\"\n , ncalls_a = \"ncalls_a\"\n , ncalls_b = \"ncalls_b\"\n , cumtime_a = \"cumtime_a\"\n , cumtime_b = \"cumtime_b\"\n )\n lines.append(header)\n\n for func in func_order:\n ab = self.summary_lines.get(func)\n if not ab:\n continue\n line = fmt.format(\n function = func\n , ncalls_a = _cell(ab, Side.A, \"ncalls\")\n , ncalls_b = _cell(ab, Side.B, \"ncalls\")\n , cumtime_a = _cell(ab, Side.A, \"cumtime\")\n , cumtime_b = _cell(ab, Side.B, \"cumtime\")\n )\n lines.append(line)\n return lines",
"def do_list_funcs(self, arg):\n # Check if file exists as .py\n if not (os.path.isfile(arg)\n and arg[-3:] == \".py\"\n and arg in os.listdir()):\n print(\"list_funcs: %s: Not a .py file\" % arg)\n return False\n # Search file contents for top-level function declarations\n file_contents = open(arg, mode=\"r\").read()\n for match in re.finditer(self.fun_pat, file_contents):\n # Don't return private methods\n if match.group(1)[:2] != \"__\":\n print(\"\\t\" + match.group(1))",
"def list_all_js_function_names(pfile):\n with open(pfile) as dataFile:\n data = dataFile.readlines()\n\n # searched = re.search(\"function\", data)\n\n for line in data:\n if 'function' in line:\n print ('found')\n\n else:\n print ('did not find')",
"def _report_callee_lines(self):\n func_order = self.all_functions()\n func_width = max(len(f) for f in func_order)\n lines = []\n fmt = self._format_callee(func_width + 3)\n\n\n for caller_func in func_order:\n mcd = self.callee.get(caller_func)\n if not mcd:\n LOG.debug(\"report_callee: SKIP {}\".format(caller_func))\n continue\n LOG.debug(\"report_callee: {}\".format(caller_func))\n\n header = fmt.format(\n function = \"-- \" + caller_func\n , ncalls_a = \"ncalls_a\"\n , ncalls_b = \"ncalls_b\"\n , cumtime_a = \"cumtime_a\"\n , cumtime_b = \"cumtime_b\"\n )\n lines.append(header)\n\n\n for callee_func in func_order: # Ayep, O(n^2)\n ab = mcd.get(callee_func)\n if not ab:\n continue\n line = fmt.format(\n function = \" \" + callee_func\n , ncalls_a = _cell(ab, Side.A, \"ncalls\")\n , ncalls_b = _cell(ab, Side.B, \"ncalls\")\n , cumtime_a = _cell(ab, Side.A, \"cumtime\")\n , cumtime_b = _cell(ab, Side.B, \"cumtime\")\n )\n lines.append(line)\n return lines",
"def test_find_functions(self):\n self.filename = \"parser_tests/ruby_functions.txt\"\n expected_functions = ['multiply', 'method_name']\n self.run_parser()\n self.assertListEqual(expected_functions, self.p.scanner.functions_calls)",
"def function_find_all(self, _funcea, _criteria):\n\t\tfound_ins = []\n\t\tif (_funcea != BADADDR):\n\t\t\tif (not type(_criteria) in [list, tuple]):\n\t\t\t\t_criteria = [_criteria]\n\t\t\t\t\n\t\t\tfdisasm = self.get_disasm_function_line(_funcea)\n\t\t\tif (len(fdisasm) > 0):\n\t\t\t\tfor ins in fdisasm:\n\t\t\t\t\tfor crit in _criteria:\n\t\t\t\t\t\tif (re.search(crit, ins)):\n\t\t\t\t\t\t\tfound_ins.append(ins)\n\t\treturn found_ins",
"def get_functions(text, startswith='def '):\n return get_definition(text, startswith)",
"def print_results():\n last_title = \"\"\n # `regex` isn't used here\n # pylint: disable=W0612\n for title, regex, positions in deprecated_features:\n if title != last_title:\n print\n print title, \"...\"\n print\n last_title = title\n if not positions:\n print \" no deprecated code found\"\n continue\n file_names = positions.keys()\n file_names.sort()\n for file_name in file_names:\n print file_name\n for line_number, line in positions[file_name]:\n print \"%5d: %s\" % (line_number, line)\n print\n print \"If possible, check your code also by other means.\"",
"def do_help_function(self, line):\n line = line.replace(';','')\n line = line.replace('()','')\n if line.split() == []:\n return self.do_help('help_function')\n function = line.split()[0]\n if function.lower() == 'all':\n print(\"\\nThese are the loaded functions for easyaccess:\\n\")\n for k in fun_utils.ea_func_dictionary.keys():\n print(' '+k)\n return\n if not function in fun_utils.ea_func_dictionary.keys():\n print(colored(\"\\nFunction {0} is not loaded, please import module (check help import for more info)\\n\".format(function),\"red\"))\n return\n else:\n print(\"\\nHelp for {0}:\\n\".format(function))\n func = fun_utils.ea_func_dictionary[function]\n print(function+func.__doc1__)\n print(func.__doc__)",
"def function_prints(function_name: str = None, report=MAIN_REPORT) -> bool:\n ast = parse_program(report=report)\n defs = ast.find_all(\"FunctionDef\")\n for a_def in defs:\n if function_name is not None and a_def.name != function_name:\n continue\n all_calls = a_def.find_all(\"Call\")\n for a_call in all_calls:\n if a_call.func.ast_name == \"Name\":\n if a_call.func.id == \"print\":\n return True\n return False",
"def print_yielded(func):\n print_all = functools.partial(map, print)\n print_results = compose(more_itertools.consume, print_all, func)\n return functools.wraps(func)(print_results)",
"def function_help():\r\n cprint('INTERACTIVE FUNCTIONS:')\r\n for f in i_functions:\r\n exec('print('+f+'.__doc__)')\r\n cprint('INTERACTIVE VARIABLES:')\r\n tab_list_print(i_variables)\r\n cprint('\\nINTERACTIVE LABELS:')\r\n tab_list_print(i_labels)",
"def print_result(method_name, equation, error):\n print(f'{method_name}:')\n print(f'Fitting line: {equation}')\n print(f'Total error: {error}')",
"def print_details(data, features, statistic_functions):\n for feat in features:\n # print the name of the feature\n print(f\"{feat}: \", end='')\n # print results of the statistical functions applied on the 'data[feat]' records\n print(*[func(data[feat]) for func in statistic_functions], sep=', ')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find lines in other files that are related to line in a given file, based on commit history.
|
def find_lines_related_to_lines(self, start_line, end_line, path, concurrent=False):
cwd = os.getcwd()
os.chdir(self.repo_folder)
modified_in_commits = self.get_commits_that_modified_line(start_line, end_line, path)
modified_in_commits = [commit[1:-1] for commit in modified_in_commits]
if concurrent:
self.find_related_lines_concurrent(path, modified_in_commits)
else:
self.find_related_lines(path, modified_in_commits)
os.chdir(cwd)
|
[
"def find_lines_related_to_function(self, function_name, path):\n\n modified_in_commits = self.get_commits_that_modified_function(function_name, path)\n self.find_related_lines(path, modified_in_commits)",
"def get_commits_that_modified_line(self, start_line, end_line, path):\n\n # history = self.git_repo2.git.log('-L', f'{start_line},{end_line}:{path}').split('\\n')\n history = subprocess.run(['git', 'log', '-L', f'{start_line},{end_line}:{path}', '--format=\\\"%H\\\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\\n')\n modified_in_commits = [line for line in history if len(line) > 0]\n \n '''\n for line in history:\n if line[0:6] == 'commit':\n modified_in_commits.append(line[7:])\n '''\n \n return modified_in_commits",
"def analyze_correlation_commit_lines_graph_concurent(self, single_line=None):\n\n cwd = os.getcwd()\n os.chdir(self.repo_folder)\n\n commit_to_lines = {}\n\n # Print analyzing all the lines of the repo\n print('Print analyzing all the lines of the repo')\n file_lines = []\n \n\n if single_line:\n\n already_seen_files = set()\n modified_in_commits = self.get_commits_that_modified_line(single_line[1], single_line[1], single_line[0])\n modified_in_commits = [commit[1:-1] for commit in modified_in_commits]\n for commit in pydriller.Repository(self.repo_folder, only_commits=modified_in_commits).traverse_commits():\n\n for modification in commit.modified_files:\n\n path = single_line[0].replace(\"/\", \"\\\\\")\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n\n if current_path not in already_seen_files:\n if current_path is not None and modification.new_path[-4:] not in self.forbidden_file_extensions:\n\n # Get path to file to count number of lines\n filepath = self.repo_folder + '\\\\' + current_path\n linenumber = self.get_file_number_of_lines(filepath)\n already_seen_files.add(current_path)\n\n for i in range(1, linenumber):\n file_lines.append((current_path, i))\n\n else:\n\n for file_path in tqdm.tqdm(self.repo_files_path):\n\n # Get path to file and count number of lines\n complete_file_path = self.repo_folder + '\\\\' + file_path\n linenumber = self.get_file_number_of_lines(complete_file_path)\n\n for i in range(1, linenumber):\n file_lines.append((file_path, i))\n\n line_to_commits = {}\n with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_line = {executor.submit(self.analyze_line, file_line): file_line for file_line in file_lines}\n\n pbar = tqdm.tqdm(total=len(file_lines))\n for future in concurrent.futures.as_completed(future_to_line):\n file_line = future_to_line[future]\n try:\n modified_in_commits = future.result()\n line_to_commits[file_line] = modified_in_commits\n except Exception as exc:\n print(f'Error during execution : {exc}')\n pbar.update(1)\n pbar.close()\n\n for file_line, modified_in_commits in line_to_commits.items():\n\n file_path, line = file_line\n self.commit_graph_lines.add_node(f'{file_path}:{line}', number_modifications=len(modified_in_commits))\n\n for commit in modified_in_commits:\n\n if commit in commit_to_lines:\n commit_to_lines[commit].append(f'{file_path}:{line}')\n else:\n commit_to_lines[commit] = [f'{file_path}:{line}']\n\n\n # Building the graph\n print('\\n\\nBuilding the graph')\n for (_, list_lines) in tqdm.tqdm(commit_to_lines.items()):\n\n pairs_of_modified_lines = []\n for i in range(len(list_lines)):\n for j in range(i+1, len(list_lines)):\n pairs_of_modified_lines.append((list_lines[i], list_lines[j]))\n\n for edge in pairs_of_modified_lines:\n\n if edge[0] in self.commit_graph_lines.nodes and edge[1] in self.commit_graph_lines.nodes:\n if self.commit_graph_lines.has_edge(edge[0], edge[1]):\n self.commit_graph_lines.edges[edge[0], edge[1]]['number_modifications_same_commit'] += 1\n else:\n self.commit_graph_lines.add_edge(edge[0], edge[1], number_modifications_same_commit=1)\n\n os.chdir(cwd)",
"def get_changed_files_in_commit(self, commit_hash):\r\n output = self._execute_command(get_changed_files_in_commit.format(commit_id=commit_hash))\r\n return re.match(r\"(?P<content>.*)\\ncommit {}\".format(commit_hash), output, re.DOTALL).group('content').splitlines()",
"def get_changeset_lines(repo_dir):\n cmds = ['cd %s' % repo_dir, 'git log --reverse --format=\"%H|%ct|%s\"']\n return execute(' && '.join(cmds)).splitlines()",
"def find_matching_lines(self, table_name, matches):\n pass",
"def _get_remote_refs(self):\n return frozenset([line[2:].strip() for line in self._do(['branch', '-r'], as_lines=True)])",
"def getVersionHistory(self, text):\n extractor =r'.*\\+node\\S+?\\<\\< %s \\>\\>.*?\\#\\@\\+at(.*)\\#\\@\\-at.*\\-node.*?\\<\\< %s \\>\\>.*'\n #\n # This Re is very slow on large files so we truncate since we are really pretty\n # sure that version history will be within the first 150 lines\n lines = \"\\n\".join(text.split(\"\\n\")[:150])\n for name in (\"version history\", \"change log\"):\n searcher = re.compile(extractor % (name, name), re.DOTALL+re.M)\n match = searcher.match(lines)\n if match:\n version_text = match.groups()[0]\n self.versions = version_text.replace(\"#\", \"\")\n return",
"def files_in_commit(commit: Optional[str] = None) -> Iterable[str]:\n cmd = ['git', 'diff', '--name-status']\n if commit is not None:\n cmd.append(f'{commit}..{commit}~1')\n\n output = subprocess.check_output(cmd)\n\n for line in output.decode('utf-8').split('\\n'):\n if line == '':\n break\n\n path = line.split('\\t')[-1]\n\n if not path.endswith('.py'):\n continue\n\n abs_path = os.path.abspath(path)\n\n yield abs_path",
"def extract_git_blame_lines(file_name, susp_file_path, git_blame_output):\n file_path = find_file_path(file_name, susp_file_path)\n os.system(f\"git blame {file_path} > {git_blame_output}\")\n git_blame_data = csv.reader(open(git_blame_output, encoding='ISO-8859-1'), delimiter='\\n')\n git_blame_list = list(git_blame_data)\n git_blame_lines = {(i+1):git_blame_list[i] for i in range(len(git_blame_list))}\n\n return git_blame_lines",
"def load_downloaded_changes(\n *, file_path: typing.Union[str, os.PathLike]\n) -> typing.Set[FileChangeResultKey]:\n try:\n with jsonlines.open(file_path, mode=\"r\") as lines:\n return {FileChangeResultKey.from_dict(raw_entry) for raw_entry in lines}\n except FileNotFoundError:\n return set()",
"def extract_changes_from_commit(commit_hash: str) -> Tuple[str, List[str], dict, str]:\n commit_github_url = os.path.join(GITHUB_BASE_URL, commit_hash)\n # Getting Commit Author ('%an') from last (-1) git log entry. Last git log entry is the current commit.\n output_stream = os.popen(\"git log -1 --pretty=format:'%an'\")\n commit_author = output_stream.read()\n\n # Getting changed file names between last commit on branch (HEAD^) and current commit.\n output_stream = os.popen(\n f\"git diff-tree --no-commit-id --name-only -r HEAD^ {commit_hash}\"\n )\n changed_files = output_stream.read().split(\"\\n\")\n logger.info(f\"all Changed files: {changed_files}\")\n\n change_diffs = {}\n for changed_file in changed_files:\n if changed_file.startswith(PREFIX) and changed_file.endswith(SUFFIX):\n # Getting diff of specific changed file from last commit on branch (HEAD^).\n # Filtering only lines indicating changes: starting with + or -\n output_stream = os.popen(\n f\"git diff HEAD^ -- {changed_file} | grep '^[+|-][^+|-]'\"\n )\n change_diffs[changed_file] = output_stream.read()\n\n relevant_changed_files = list(change_diffs.keys())\n return commit_github_url, relevant_changed_files, change_diffs, commit_author",
"def get_comit_difference(repo_path,c_hash):\n \n cdiff = []\n s = subprocess.check_output(\"cd %s; git log --stat -2 %s \" % (repo_path,c_hash), shell=True)\n \n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\\n(.*?)\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n cdiff.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip(), file_name=m[4].strip(), changes=m[5].strip()))\n \n\n\n return cdiff",
"def files_re_match_multiline(file1, file2, attributes=None):\n local_file = open( file1, 'U' ).read() # regex file\n if attributes is None:\n attributes = {}\n if attributes.get( 'sort', False ):\n history_data = open( file2, 'U' ).readlines()\n history_data.sort()\n history_data = ''.join( history_data )\n else:\n history_data = open( file2, 'U' ).read()\n # lines_diff not applicable to multiline matching\n assert re.match( local_file, history_data, re.MULTILINE ), \"Multiline Regular expression did not match data file\"",
"def grep(self, fileregex, lineregex):\n import glob, re, os\n for filename in glob.glob(fileregex):\n if os.path.isfile(filename):\n f = open(filename, 'r')\n for line in f.xreadlines():\n if re.match(lineregex, line):\n print \"%s: %s\" % (filename, line)",
"def parse_changes_file(file_path, versions=None):\r\n # Maps contributor name to a list of JIRA tickets\r\n contributors_map = defaultdict(set)\r\n\r\n in_entry = False\r\n active_version = None\r\n active_tickets = []\r\n\r\n with open(file_path, 'r') as fp:\r\n for line in fp:\r\n line = line.strip()\r\n\r\n match = re.search(r'Changes with Apache Libcloud '\r\n '(\\d+\\.\\d+\\.\\d+(-\\w+)?).*?$', line)\r\n\r\n if match:\r\n active_version = match.groups()[0]\r\n\r\n if versions and active_version not in versions:\r\n continue\r\n\r\n if line.startswith('-') or line.startswith('*)'):\r\n in_entry = True\r\n active_tickets = []\r\n\r\n if in_entry and line == '':\r\n in_entry = False\r\n\r\n if in_entry:\r\n match = re.search(r'\\((.+?)\\)$', line)\r\n\r\n if match:\r\n active_tickets = match.groups()[0]\r\n active_tickets = active_tickets.split(', ')\r\n active_tickets = [ticket for ticket in active_tickets if\r\n ticket.startswith('LIBCLOUD-') or\r\n ticket.startswith('GITHUB-')]\r\n\r\n match = re.search(r'^\\[(.+?)\\]$', line)\r\n\r\n if match:\r\n contributors = match.groups()[0]\r\n contributors = contributors.split(',')\r\n contributors = [name.strip() for name in contributors]\r\n\r\n for name in contributors:\r\n name = name.title()\r\n contributors_map[name].update(set(active_tickets))\r\n\r\n return contributors_map",
"def get_commits_that_modified_function(self, function_name, path):\n\n\n history = subprocess.run(['git', 'log', '-L', f':{function_name}:{path}', '--format=\\\"%H\\\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\\n')\n modified_in_commits = [line for line in history if len(line) > 0]\n \n return modified_in_commits",
"def test_number_of_executions_corresponds_to_filelines(\n self,\n test_file,\n ):\n test_filepath = test_file.as_posix()\n tweet1_content = \"This is a\\n\\nmultiline\\n\\ntweet.\"\n tweet2_content = \"This is another tweet.\"\n\n from logtweet.history import add_tweet_to_history\n add_tweet_to_history(tweet1_content, history_filepath=test_filepath)\n add_tweet_to_history(tweet2_content, history_filepath=test_filepath)\n\n with open(test_filepath, \"r\") as test_fileobj:\n lines = test_fileobj.readlines()\n assert len(lines) == 2",
"def find_references(bytes, projectname, fn):\n def find_refs(pattern):\n compiled = re.compile(pattern)\n refs = re.findall(compiled, bytes)\n return refs\n\n svn = svnpattern(projectname)\n wiki = wikipattern(projectname)\n\n return find_refs(svn) + find_refs(wiki)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a list of commits in which the given lines of a given file were modified.
|
def get_commits_that_modified_line(self, start_line, end_line, path):
# history = self.git_repo2.git.log('-L', f'{start_line},{end_line}:{path}').split('\n')
history = subprocess.run(['git', 'log', '-L', f'{start_line},{end_line}:{path}', '--format=\"%H\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\n')
modified_in_commits = [line for line in history if len(line) > 0]
'''
for line in history:
if line[0:6] == 'commit':
modified_in_commits.append(line[7:])
'''
return modified_in_commits
|
[
"def get_commits_that_modified_function(self, function_name, path):\n\n\n history = subprocess.run(['git', 'log', '-L', f':{function_name}:{path}', '--format=\\\"%H\\\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\\n')\n modified_in_commits = [line for line in history if len(line) > 0]\n \n return modified_in_commits",
"def get_changed_files_in_commit(self, commit_hash):\r\n output = self._execute_command(get_changed_files_in_commit.format(commit_id=commit_hash))\r\n return re.match(r\"(?P<content>.*)\\ncommit {}\".format(commit_hash), output, re.DOTALL).group('content').splitlines()",
"def git_commits(filepath, since):\n cmd = ('git', 'log', '--since=\"'+since+'\"', '--pretty=format:%H',\n '--', filepath)\n stdout, stderr = execute(cmd)\n\n commits = []\n if stdout:\n commits = [c for c in stdout.split('\\n') if c]\n\n return commits",
"def getChangedFilesForCommits(self):\n\t\t\"\"\"Returns [{'time':time, 'files':[filenames,]}]\"\"\"\n\t\trequestString = \"https://api.github.com/repos/{}/{}/compare\"\n\t\trequestString = requestString.format(self.user, self.repo)\n\t\tcommits = self.getCommits()\n\t\tchanges = []\n\t\tfor commitIndex in range(len(commits)):\n\t\t\tif commitIndex == 0:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcurrent = commits[commitIndex]['sha']\n\t\t\t\tprevious = commits[commitIndex - 1]['sha']\n\t\t\t\tcommitTime = parseGitTimeString(commits[commitIndex]['commit']['committer']['date'])\n\t\t\t\tcompareString = \"/{}...{}\"\n\t\t\t\tcompareString = compareString.format(previous, current)\n\t\t\t\ttempRequestString = requestString + compareString\n\t\t\t\tresponse = urllib.request.urlopen(tempRequestString)\n\t\t\t\tdata = response.read().decode('utf-8')\n\t\t\t\tdata = json.loads(data)\n\t\t\t\tfiles = data['files']\n\t\t\t\t#this right here is wrong... should be commitsha:{time:124523523,files:changed}\n\t\t\t\tfilesChanged = {'time': commitTime, 'files': [file['filename'] for file in files if file['status'] == 'modified']}\n\t\t\t\tchanges.append(filesChanged)\n\t\treturn changes",
"def _get_list_of_committed_files():\n files = []\n # pylint: disable=E1103\n diff_index_cmd = 'git diff-index --cached %s' % _current_commit()\n output = subprocess.check_output(\n diff_index_cmd.split()\n )\n for result in output.split('\\n'):\n if result != '':\n result = result.split()\n if result[4] in ['A', 'M']:\n files.append(result[5])\n\n return files",
"def get_modified_files(commit, file_extension):\n current_modified_files = [mod_file for mod_file in commit.modifications\n if mod_file.filename.endswith(file_extension)]\n return current_modified_files",
"def get_commits():\n repo = git.Repo(\".\")\n commits = list(repo.iter_commits())\n return commits",
"def get_changeset_lines(repo_dir):\n cmds = ['cd %s' % repo_dir, 'git log --reverse --format=\"%H|%ct|%s\"']\n return execute(' && '.join(cmds)).splitlines()",
"def all_commits(change_id, curr_project, curr_ref):\n commits = []\n manifest = ET.ElementTree(file='.repo/manifest.xml')\n commits.append((curr_project, project_path(manifest, curr_project), curr_ref))\n\n url = (GERRIT_ROOT + 'changes/?o=CURRENT_REVISION&q=status:open+' +\n change_id)\n changes = request.urlopen(url)\n for change in parse_response(changes):\n project = change['project']\n fetch = change['revisions'][change['current_revision']]['fetch']\n # The `ref` is the same for every download scheme, hence we can use\n # the first one that is there\n ref = fetch.values()[0]['ref']\n path = project_path(manifest, project, change['branch'])\n if path and project != curr_project:\n commits.append((project, path, ref))\n\n return commits",
"def get_commits(self):\n return get_commits(self.old, self.new, self.ref)",
"def files_in_commit(commit: Optional[str] = None) -> Iterable[str]:\n cmd = ['git', 'diff', '--name-status']\n if commit is not None:\n cmd.append(f'{commit}..{commit}~1')\n\n output = subprocess.check_output(cmd)\n\n for line in output.decode('utf-8').split('\\n'):\n if line == '':\n break\n\n path = line.split('\\t')[-1]\n\n if not path.endswith('.py'):\n continue\n\n abs_path = os.path.abspath(path)\n\n yield abs_path",
"def changed_files(revset, filter_re=None):\n require('code_dir')\n\n with cd(env.code_dir):\n result = run(\"hg status --rev '%s'\" % revset, quiet=True).splitlines()\n\n if filter_re:\n regex = re.compile(filter_re)\n result = filter(lambda filename: regex.search(filename), result)\n\n return result",
"def getFixCommits(self):\r\n # use regular expression to match the content.\r\n commit = re.compile('^commit [0-9a-z]{40}$', re.IGNORECASE)\r\n fixes = re.compile('^\\W+Fixes: [a-f0-9]{8,40} \\(.*\\)$', re.IGNORECASE)\r\n nr_fixes = 0\r\n fix_commit = []\r\n cmd = [\"git\", \"log\", \"-p\", \"--no-merges\", self.kernelRange]\r\n p = Popen(cmd, cwd=self.repo, stdout=PIPE)\r\n data, res = p.communicate()\r\n data = unicodedata.normalize(u'NFKD', data.decode(encoding=\"utf-8\", errors=\"ignore\"))\r\n for line in data.split(\"\\n\"):\r\n if(commit.match(line)): # match the commit\r\n cur_commit = line\r\n if(fixes.match(line)): # match the fixes\r\n nr_fixes += 1\r\n fix_commit.append(cur_commit[7:19])\r\n #print(\"total found fixes:\",nr_fixes)\r\n return fix_commit",
"def _get_changed_files(base_branch):\n # Get file changes between branch and merge-base of specified branch\n base_commit = check_output([\"git\", \"merge-base\", base_branch, \"HEAD\"]).rstrip()\n return check_output([\"git\", \"diff\", base_commit, \"--name-only\"]).splitlines()",
"def list_modified_files(self):\n return gitinfo.list_staged_files(gitinfo.current_commit())",
"def parse_changes_file(file_path, versions=None):\r\n # Maps contributor name to a list of JIRA tickets\r\n contributors_map = defaultdict(set)\r\n\r\n in_entry = False\r\n active_version = None\r\n active_tickets = []\r\n\r\n with open(file_path, 'r') as fp:\r\n for line in fp:\r\n line = line.strip()\r\n\r\n match = re.search(r'Changes with Apache Libcloud '\r\n '(\\d+\\.\\d+\\.\\d+(-\\w+)?).*?$', line)\r\n\r\n if match:\r\n active_version = match.groups()[0]\r\n\r\n if versions and active_version not in versions:\r\n continue\r\n\r\n if line.startswith('-') or line.startswith('*)'):\r\n in_entry = True\r\n active_tickets = []\r\n\r\n if in_entry and line == '':\r\n in_entry = False\r\n\r\n if in_entry:\r\n match = re.search(r'\\((.+?)\\)$', line)\r\n\r\n if match:\r\n active_tickets = match.groups()[0]\r\n active_tickets = active_tickets.split(', ')\r\n active_tickets = [ticket for ticket in active_tickets if\r\n ticket.startswith('LIBCLOUD-') or\r\n ticket.startswith('GITHUB-')]\r\n\r\n match = re.search(r'^\\[(.+?)\\]$', line)\r\n\r\n if match:\r\n contributors = match.groups()[0]\r\n contributors = contributors.split(',')\r\n contributors = [name.strip() for name in contributors]\r\n\r\n for name in contributors:\r\n name = name.title()\r\n contributors_map[name].update(set(active_tickets))\r\n\r\n return contributors_map",
"def load_downloaded_changes(\n *, file_path: typing.Union[str, os.PathLike]\n) -> typing.Set[FileChangeResultKey]:\n try:\n with jsonlines.open(file_path, mode=\"r\") as lines:\n return {FileChangeResultKey.from_dict(raw_entry) for raw_entry in lines}\n except FileNotFoundError:\n return set()",
"def get_commit_difference(repo_path):\n \n diff = []\n s = subprocess.check_output(\"cd %s; git log --stat \" % repo_path, shell=True)\n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\\n(.*?)\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n diff.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip(), file_name=m[4].strip(), changes=m[5].strip()))\n #diff.append(dict(commit_diff=m[0].strip()))\n\n\n return diff",
"def get_comit_difference(repo_path,c_hash):\n \n cdiff = []\n s = subprocess.check_output(\"cd %s; git log --stat -2 %s \" % (repo_path,c_hash), shell=True)\n \n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\\n(.*?)\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n cdiff.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip(), file_name=m[4].strip(), changes=m[5].strip()))\n \n\n\n return cdiff"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a list of commits in which a function was modified.
|
def get_commits_that_modified_function(self, function_name, path):
history = subprocess.run(['git', 'log', '-L', f':{function_name}:{path}', '--format=\"%H\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\n')
modified_in_commits = [line for line in history if len(line) > 0]
return modified_in_commits
|
[
"def get_commits():\n repo = git.Repo(\".\")\n commits = list(repo.iter_commits())\n return commits",
"def get_commits(self):\n return get_commits(self.old, self.new, self.ref)",
"def get_commits_that_modified_line(self, start_line, end_line, path):\n\n # history = self.git_repo2.git.log('-L', f'{start_line},{end_line}:{path}').split('\\n')\n history = subprocess.run(['git', 'log', '-L', f'{start_line},{end_line}:{path}', '--format=\\\"%H\\\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\\n')\n modified_in_commits = [line for line in history if len(line) > 0]\n \n '''\n for line in history:\n if line[0:6] == 'commit':\n modified_in_commits.append(line[7:])\n '''\n \n return modified_in_commits",
"def get_commit_messages(self):\n return get_commit_messages(self.old, self.new, self.ref)",
"def all_commits(repo: Optional[str] = None) -> List[str]:\n with Repo.open(repo) as _repo:\n return _repo.scm.list_all_commits()",
"def _get_list_of_committed_files():\n files = []\n # pylint: disable=E1103\n diff_index_cmd = 'git diff-index --cached %s' % _current_commit()\n output = subprocess.check_output(\n diff_index_cmd.split()\n )\n for result in output.split('\\n'):\n if result != '':\n result = result.split()\n if result[4] in ['A', 'M']:\n files.append(result[5])\n\n return files",
"def git_commits(filepath, since):\n cmd = ('git', 'log', '--since=\"'+since+'\"', '--pretty=format:%H',\n '--', filepath)\n stdout, stderr = execute(cmd)\n\n commits = []\n if stdout:\n commits = [c for c in stdout.split('\\n') if c]\n\n return commits",
"def getChangedFilesForCommits(self):\n\t\t\"\"\"Returns [{'time':time, 'files':[filenames,]}]\"\"\"\n\t\trequestString = \"https://api.github.com/repos/{}/{}/compare\"\n\t\trequestString = requestString.format(self.user, self.repo)\n\t\tcommits = self.getCommits()\n\t\tchanges = []\n\t\tfor commitIndex in range(len(commits)):\n\t\t\tif commitIndex == 0:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcurrent = commits[commitIndex]['sha']\n\t\t\t\tprevious = commits[commitIndex - 1]['sha']\n\t\t\t\tcommitTime = parseGitTimeString(commits[commitIndex]['commit']['committer']['date'])\n\t\t\t\tcompareString = \"/{}...{}\"\n\t\t\t\tcompareString = compareString.format(previous, current)\n\t\t\t\ttempRequestString = requestString + compareString\n\t\t\t\tresponse = urllib.request.urlopen(tempRequestString)\n\t\t\t\tdata = response.read().decode('utf-8')\n\t\t\t\tdata = json.loads(data)\n\t\t\t\tfiles = data['files']\n\t\t\t\t#this right here is wrong... should be commitsha:{time:124523523,files:changed}\n\t\t\t\tfilesChanged = {'time': commitTime, 'files': [file['filename'] for file in files if file['status'] == 'modified']}\n\t\t\t\tchanges.append(filesChanged)\n\t\treturn changes",
"def get_changed_files_in_commit(self, commit_hash):\r\n output = self._execute_command(get_changed_files_in_commit.format(commit_id=commit_hash))\r\n return re.match(r\"(?P<content>.*)\\ncommit {}\".format(commit_hash), output, re.DOTALL).group('content').splitlines()",
"def list_modified_files(self):\n return gitinfo.list_staged_files(gitinfo.current_commit())",
"def all_commits(change_id, curr_project, curr_ref):\n commits = []\n manifest = ET.ElementTree(file='.repo/manifest.xml')\n commits.append((curr_project, project_path(manifest, curr_project), curr_ref))\n\n url = (GERRIT_ROOT + 'changes/?o=CURRENT_REVISION&q=status:open+' +\n change_id)\n changes = request.urlopen(url)\n for change in parse_response(changes):\n project = change['project']\n fetch = change['revisions'][change['current_revision']]['fetch']\n # The `ref` is the same for every download scheme, hence we can use\n # the first one that is there\n ref = fetch.values()[0]['ref']\n path = project_path(manifest, project, change['branch'])\n if path and project != curr_project:\n commits.append((project, path, ref))\n\n return commits",
"def test_repo_get_all_commits(self):\n pass",
"def list_commit_set(self):\n return self._req(\n Service.PFS,\n \"ListCommitSet\",\n )",
"def get_modified_files(commit, file_extension):\n current_modified_files = [mod_file for mod_file in commit.modifications\n if mod_file.filename.endswith(file_extension)]\n return current_modified_files",
"def getFixCommits(self):\r\n # use regular expression to match the content.\r\n commit = re.compile('^commit [0-9a-z]{40}$', re.IGNORECASE)\r\n fixes = re.compile('^\\W+Fixes: [a-f0-9]{8,40} \\(.*\\)$', re.IGNORECASE)\r\n nr_fixes = 0\r\n fix_commit = []\r\n cmd = [\"git\", \"log\", \"-p\", \"--no-merges\", self.kernelRange]\r\n p = Popen(cmd, cwd=self.repo, stdout=PIPE)\r\n data, res = p.communicate()\r\n data = unicodedata.normalize(u'NFKD', data.decode(encoding=\"utf-8\", errors=\"ignore\"))\r\n for line in data.split(\"\\n\"):\r\n if(commit.match(line)): # match the commit\r\n cur_commit = line\r\n if(fixes.match(line)): # match the fixes\r\n nr_fixes += 1\r\n fix_commit.append(cur_commit[7:19])\r\n #print(\"total found fixes:\",nr_fixes)\r\n return fix_commit",
"def _commits(self, head='HEAD'):\n pending_commits = [head]\n history = []\n while pending_commits != []:\n head = pending_commits.pop(0)\n try:\n commit = self[head]\n except KeyError:\n raise KeyError(head)\n if type(commit) != Commit:\n raise TypeError(commit)\n if commit in history:\n continue\n i = 0\n for known_commit in history:\n if known_commit.commit_time > commit.commit_time:\n break\n i += 1\n history.insert(i, commit)\n pending_commits += commit.parents\n return history",
"def commits_log(self, obj1, obj2):\n return self._repo.iter_commits(rev='%(obj1)s..%(obj2)s' % {'obj1': obj1, 'obj2': obj2})",
"def get_changeset_lines(repo_dir):\n cmds = ['cd %s' % repo_dir, 'git log --reverse --format=\"%H|%ct|%s\"']\n return execute(' && '.join(cmds)).splitlines()",
"def __list__(self):\n return self.changes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if an interval is contained in a list of intervals.
|
def interval_contained_in_list(list_intervals, interval):
for (a, b) in list_intervals:
if a <= interval[0] and interval[1] <= b:
return True
return False
|
[
"def __contains__(self, interval):\n return interval in self._driver",
"def _intersects_with(cls, intervals: CommonIntervals, ci: CommonInterval) -> IntervalList:\n return [other for other in intervals\n if CommonInterval.intersect(ci, other) and ci.first_end <= other.first_end]",
"def __contains__(self, x):\n\n if isinstance(x, Interval):\n return self.lo <= x.lo and x.hi <= self.hi\n\n return self.lo <= x <= self.hi",
"def _in(value, interval):\n lower, upper = map(lambda v: v and float(v), interval)\n if lower and value < lower:\n return False\n if upper and upper < value:\n return False\n return True",
"def overlapping_ranges(lst):\n\n\tfirst_range = []\n\tsecond_range = []\n\tchecker = lst[-1]\n\n\tfor i in range (lst[0], lst[1] + 1):\n\t\tfirst_range.append(i)\n\t\n\n\tfor i in range(lst[2],lst[3] + 1):\n\t\tsecond_range.append(i)\n\t\n\n\tcount = 0 \n\tfor num in second_range:\n\t\tif num in first_range:\n\t\t\tcount += 1\n\n\tif checker <= count:\n\t\treturn True\n\n\treturn False",
"def is_interval(self, qubits: Sequence[int]) -> bool:\n return qubits == self.interval(min(qubits), max(qubits))",
"def interval_check(self, lower, upper):\n return self.function(lower) * self.function(upper) < 0",
"def _intervalConflictAlreadyDetected(self, interval, conflicts):\n for conflict in conflicts:\n for ival in conflict:\n if ival == interval:\n return True\n return False",
"def is_ins_in_list(instance, ins_list, end):\n for i in range(len(ins_list)):\n if i == end:\n break\n if instance.is_equal(ins_list[i]):\n return True\n return False",
"def _in_matched_range(start_idx, end_idx, matched_ranges):\n for range_start_idx, range_end_idx in matched_ranges:\n if not (end_idx <= range_start_idx or start_idx >= range_end_idx):\n return True\n return False",
"def _do_intervals_overlap(intervals_a, intervals_b):\n\n def contained(points, intervals):\n return np.logical_and(\n np.less_equal(intervals[:, 0], points),\n np.less_equal(points, intervals[:, 1]))\n\n return np.logical_or(\n np.logical_or(\n contained(intervals_a[:, 0], intervals_b),\n contained(intervals_a[:, 1], intervals_b)),\n np.logical_or(\n contained(intervals_b[:, 0], intervals_a),\n contained(intervals_b[:, 1], intervals_a)))",
"def _check_substr_in_list(self, s, l):\n\t\tlogic_vec = [el in s for el in l]\n\t\treturn any(logic_vec)",
"def __any_contained_in_list(self, what_list: List[str], in_list: List[str], lower: bool = True):\r\n if lower:\r\n what_list = [elem.lower() for elem in what_list]\r\n in_list = [elem.lower() for elem in in_list]\r\n\r\n return any([True if elem in in_list else False for elem in what_list])",
"def __contains__(self, item):\r\n if self == item:\r\n return True\r\n if isinstance(item, RangeSet):\r\n return all(rng in self for rng in item.ranges())\r\n else:\r\n try:\r\n return self._above_start(item) and self._below_end(item)\r\n except TypeError:\r\n try:\r\n rng_item = Range(item)\r\n return rng_item.start in self and rng_item.end in self\r\n except ValueError:\r\n pass\r\n raise TypeError(f\"'{item}' is not comparable with this Range's start and end\")",
"def overlaps(self, ranges):\n if isinstance(ranges, SourceRange):\n ranges = [ranges]\n\n for range in ranges:\n for self_range in self.affected_code:\n if range.overlaps(self_range):\n return True\n\n return False",
"def lists_overlap(self, a, b):\n\n sb = set(b)\n return any(el in sb for el in a)",
"def __contains__(self, item):\r\n if self == item:\r\n return True\r\n try:\r\n if _is_iterable_non_string(item):\r\n try:\r\n return all(\r\n any(subitem in rng for rng in self._ranges)\r\n for subitem in RangeSet._to_rangeset(item)\r\n )\r\n except ValueError:\r\n pass\r\n except TypeError:\r\n pass\r\n return any(item in rng for rng in self._ranges)",
"def test_shuffled_interval_overlap(intervals):\n print \"testing\"\n print intervals\n results = {}\n for interval in intervals.values()[0]:\n try:\n chromosome = interval[0]\n if chromosome not in results:\n results[chromosome] = {}\n results[chromosome][interval[1]] = interval[2]\n except:\n pass #Do not interrupt due to any exception. Continue to the next interval\n for chromosome in results:\n\tintervals = results[chromosome]\n ordered_intervals = collections.OrderedDict(sorted(intervals.items()))\n starts=[]\n ends=[]\n\t#print \"od\", ordered_intervals\n\t[(starts.append(start_), ends.append(end_)) for start_, end_ in ordered_intervals.items()]\n\n for x in range(0, len(starts)-1):\n if int(starts[x+1])<int(ends[x]):\n print \"reject\", starts, ends\n return False\n print \"accept\", starts, ends\n print intervals\n return True",
"def containsAllInRange(rangeStart, afterRangeEnd):\n if rangeStart >= afterRangeEnd:\n return True\n idx = self._idxOfRangeContaining(intVal)\n if idx == None:\n return False\n ranges = self.ranges\n lenRanges = len(ranges)\n while True:\n endCurrentRange = ranges[idx][1]\n if endCurrentRange >= afterRangeEnd:\n return True\n # endCurrentRange < afterRangeEnd\n idx += 1\n if idx >= lenRanges:\n # If there is no next range, the receiver\n # cannot possibly contain rangeEnd\n return False\n # There is a next range, but is there a gap between\n # it and the previous range?\n if endCurrentRange != ranges[idx][0]:\n return False\n assert(False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inserts an interval in a list of intervals.
|
def insert_interval_in_list(list_intervals, interval):
merge_left, merge_right = False, False
for (a, b) in list_intervals:
if b == interval[0] - 1:
merge_left = True
merge_left_pair = (a, b)
if a == interval[1] + 1:
merge_right = True
merge_right_pair = (a, b)
if merge_left and merge_right:
list_intervals.remove(merge_left_pair)
list_intervals.remove(merge_right_pair)
list_intervals.append((merge_left_pair[0], merge_right_pair[1]))
elif merge_left:
list_intervals.remove(merge_left_pair)
list_intervals.append((merge_left_pair[0], interval[1]))
elif merge_right:
list_intervals.remove(merge_right_pair)
list_intervals.append((interval[0], merge_right_pair[1]))
else:
list_intervals.append(interval)
|
[
"def insert_interval(intervals, new_interval):\n length = len(intervals)\n if length < 1:\n return [new_interval]\n\n i, start, end, merged = 0, new_interval.start, new_interval.end, []\n\n while i < length and intervals[i].end < start:\n merged.append(intervals[i])\n i += 1\n\n while i < length and new_interval.start < intervals[i].end:\n new_interval.start = min(new_interval.start, intervals[i].start)\n new_interval.end = max(new_interval.end, intervals[i].end)\n i += 1\n\n merged.append(new_interval)\n\n while i < length:\n merged.append(intervals[i])\n i += 1\n\n return merged",
"def insert_points(subdiv, p_list):\n for i in p_list:\n subdiv.insert(tuple(i))",
"def add(self, interval: Interval) -> None:\n if interval is None:\n return\n if not isinstance(interval, Interval):\n raise TypeError(\"Attempt to insert non-Interval into IntervalList\")\n self.intervals.append(interval)\n self._tidy()",
"def add_interval(self, interval):\n if not isinstance(interval, GenomeInterval):\n raise GenestackException(\n 'Interval is not of type GenomeInterval: %s' % type(interval)\n )\n self.requested_area.setdefault('intervals', []).append(interval)",
"def insert(self, insert_list):\n frac, elements = zip(*insert_list)\n lg = [0.0 if el is None else el.Length for el in elements]\n fr = numpy.asarray(frac, dtype=float)\n lg = 0.5 * numpy.asarray(lg, dtype=float) / self.Length\n drfrac = numpy.hstack((fr - lg, 1.0)) - numpy.hstack((0.0, fr + lg))\n long_elems = (drfrac != 0.0)\n drifts = numpy.ndarray((len(drfrac),), dtype='O')\n drifts[long_elems] = self.divide(drfrac[long_elems])\n line = [None] * (len(drifts) + len(elements))\n line[::2] = drifts\n line[1::2] = elements\n return [el for el in line if el is not None]",
"def gap_merge_intervals(intervals, gap):\n new_intervals = []\n for interval in sorted(intervals):\n if not new_intervals:\n new_intervals.append(ChromosomeInterval(interval.chromosome, interval.start, interval.stop,\n interval.strand, interval.data))\n elif interval.separation(new_intervals[-1]) <= gap:\n new_intervals[-1] = new_intervals[-1].hull(interval)\n else:\n new_intervals.append(ChromosomeInterval(interval.chromosome, interval.start, interval.stop,\n interval.strand, interval.data))\n return new_intervals",
"def list_insert(lst, value):\n if len(lst)>1:\n if value <= lst[0]:\n lst=[value]+lst\n \n elif value>lst[-1]:\n lst.append(value)\n else:\n for i in range(len(lst)-1):\n if value>=lst[i] and value<=lst[i+1]:\n lst.insert(i+1,value)\n break\n elif len(lst)==0:\n lst.append(value)\n elif len(lst)==1:\n if value>=lst[0]:\n lst.append(value)\n else:\n lst=[value,lst[0]]\n return lst",
"def insert_new_lines(input, interval):\n return '\\n'.join(input[i:i+interval] for i in range(0, len(input), interval))",
"def merge_intervals(i_list):\n\n sorted_i_list = sorted(i_list, key=lambda i_tup: i_tup[0])\n merged_i_list = []\n\n for idx in range(len(sorted_i_list)):\n\n # In the very begining, just add the left-most interval\n # to merged_i_list\n if idx == 0:\n merged_i_list.append(sorted_i_list[0])\n\n else:\n # Pop-out the right-most element from merged_i_list\n # [pop-outing is necessary because the interval\n # will be changed (extended) if merging is performed]\n left_i_tup = merged_i_list.pop()\n\n # Check for overlap with the next interval to the right from\n # sorted_i_list\n right_i_tup = sorted_i_list[idx]\n\n if left_i_tup[1] >= right_i_tup[0]:\n # The two intervals overlap:\n # merge them and append the extended interval to merged_i_list\n new_tup = tuple(\n [\n left_i_tup[0],\n max(left_i_tup[1], right_i_tup[1])\n ]\n )\n merged_i_list.append(new_tup)\n\n else:\n # The two intervals do not overlap:\n # return left_i_tup to its' original place in the merged_i_list and\n # append right_i_tup - the new right-most interval to be checked for\n # overlap with subsequent intervals from sorted_i_list\n merged_i_list.append(left_i_tup)\n merged_i_list.append(right_i_tup)\n\n return merged_i_list",
"def add(self, begin, end, item=None):\n self._copy_on_write()\n i = Interval(begin, end, item)\n self.tree.add(i)\n self.lookup[i] = i",
"def merge(intervalList):\n resultList = [intervalList.pop(0)]\n while len(intervalList) > 0:\n inInterval = intervalList.pop(0)\n for resInterval in resultList:\n intersection = range(max(resInterval.min(), inInterval.min()), min(resInterval.max(), inInterval.max()))\n if intersection.start <= intersection.stop: # intersection\n resInterval[0] = min(resInterval.min(), inInterval.min())\n resInterval[1] = max(resInterval.max(), inInterval.max())\n break\n else:\n resultList.append(inInterval)\n return resultList",
"def insert(self, i, x):",
"def dangerous_insert(x, i, my_list):\r\n return",
"def add_range(self, a, z):\n # our implementation assumes that codepoint is used in\n # comparisons\n a = force_text(a)\n z = force_text(z)\n if z < a:\n x = z\n z = a\n a = x\n if self.ranges:\n match_a, index_a = self._bisection_search(a, 0,\n len(self.ranges) - 1)\n match_z, index_z = self._bisection_search(z, 0,\n len(self.ranges) - 1)\n if match_a:\n if match_z:\n # Both ends of the new range are already matched\n if index_a == index_z:\n # Nothing to do\n return\n else:\n # We need to join the ranges from index_a to and\n # including index_z\n self.ranges[index_a:index_z + 1] = [\n [self.ranges[index_a][0], self.ranges[index_z][1]]]\n else:\n # Note that at this point, index_z must be > index_a\n # We need to join the ranges from index_a up to but\n # *not* including index_z extending the last range to\n # include z\n self.ranges[\n index_a:index_z] = [[self.ranges[index_a][0], z]]\n elif match_z:\n # We need to join the ranges from index_a up to and\n # including index_z extending the first range to include\n # a (works even if index_a==index_z)\n self.ranges[\n index_a:index_z + 1] = [[a, self.ranges[index_z][1]]]\n else:\n # We need to join the ranges from index_a to index_z-1,\n # extending them to include a and z respectively. Note\n # that if index_a==index_z then no ranges are joined and\n # the slice assignment simply inserts a new range.\n self.ranges[index_a:index_z] = [[a, z]]\n self._merge(index_a)\n else:\n self.ranges = [[a, z]]\n self._clear_cache()",
"def insert(self, index, elements):\n i = index\n for element in elements:\n self.list.insert(i, element)\n i += 1",
"def insert_patterns(self):\n for pattern in self.patterns:\n # Get the start positions for the pattern to be inserted.\n starts = self.generate_start_positions()\n\n # Insert the pattern at start positions.\n num_neurons_in_pattern = self.num_neurons * self.inv_ratio\n for left in starts:\n right = left + self.pattern_duration\n self.spike_trains[:num_neurons_in_pattern, left: right] = pattern\n\n # Save start positions for this pattern.\n self.start_positions.append(starts)",
"def load_intervals(intervals):\n\n if intervals == 'all':\n intervals = INTERVALS\n else:\n # NOTE: add try/except KeyError for non-supported intervals\n intervals = {INTERVALS[i] for i in intervals}\n\n return intervals",
"def hull_of_intervals(intervals):\n new_intervals = []\n for interval in sorted(intervals):\n if not new_intervals:\n new_intervals.append(interval)\n continue\n u = new_intervals[-1].hull(interval)\n if u is not None:\n new_intervals[-1] = u\n else:\n new_intervals.append(interval)\n return new_intervals",
"def add_intersection_with_interval(self, typ, branch_or_cusp, interval,\n with_sign=1):\n x = self.get_intersections_with_interval(interval)\n idx = self._path_idx(typ, branch_or_cusp)\n x[idx] += with_sign"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find files/folders that are modified together (ie. in same commit). Create an edge between them, and update its value.
|
def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):
for (node1, node2) in pairs_of_modified_files:
if node1 in self.repo_files_path and node2 in self.repo_files_path:
# Find common prefix
path_prefix = os.path.commonpath([node1, node2])
if len(path_prefix) > 0:
path_prefix_split = path_prefix.split('\\')
tree_commit_node_name1 = node1[len(path_prefix)+1:].split('\\')[0]
tree_commit_node_name2 = node2[len(path_prefix)+1:].split('\\')[0]
else:
path_prefix_split = []
tree_commit_node_name1 = node1[len(path_prefix):].split('\\')[0]
tree_commit_node_name2 = node2[len(path_prefix):].split('\\')[0]
# Create or update edge in TreeCommit graph
self.commit_tree_graph.add_edge(path_prefix_split, tree_commit_node_name1, tree_commit_node_name2)
|
[
"def test_change_two_files(self):\n tree = self.make_example_branch()\n self.build_tree_contents([\n ('goodbye', 'baz2\\n'),\n ('hello', 'foo2\\n\\n')])\n output = self.run_bzr('diff --stat-dir', retcode=1)[0]\n self.assertEqualDiff(output, '''\\\n . | 5 +++--\n 1 directory changed, 3 insertions(+), 2 deletions(-)\n''')\n self.check_output_rules(output)",
"def files_touched(self, commit):\n if commit.parents:\n par_list = commit.parents\n else:\n par_list = [empty_tree_oid()]\n new_oid_set = set()\n for p in par_list:\n diff = self._repo.diff(p, commit)\n for dd in diff.deltas:\n new_oid_set.add((dd.new_file.path, dd.new_file.id))\n return new_oid_set",
"def compute_files_that_should_be_in_commit(self, commit_hash):\n\n similar_commits = {}\n potential_nodes = set()\n\n # Get list of files modified in commit\n modified_files = []\n modified_files_dict = {}\n for commit in pydriller.Repository(self.repo_folder, single=commit_hash).traverse_commits():\n for modification in commit.modified_files:\n modified_files.append(modification.new_path)\n modified_files_dict[modification.new_path] = 1\n\n # Compute each commit similarity score\n print('Computing similarity score')\n for commit in tqdm.tqdm(pydriller.Repository(self.repo_folder).traverse_commits()):\n if commit.hash != commit_hash:\n modified_files_other_commit = []\n new_nodes = []\n similar_nodes = 0\n for modification in commit.modified_files:\n\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n\n if current_path is not None and current_path in modified_files_dict:\n similar_nodes += 1\n else:\n new_nodes.append(current_path)\n modified_files_other_commit.append(current_path)\n similarity = similar_nodes / max(len(modified_files), len(modified_files_other_commit))\n if similarity > 0.3:\n similar_commits[commit.hash] = (similarity, new_nodes)\n for node in new_nodes:\n if node not in potential_nodes:\n potential_nodes.add(node)\n\n # Compute score of new potential nodes\n print('Compute node scores')\n for node in tqdm.tqdm(potential_nodes):\n node_score = 0\n for _, (similarity, nodes) in similar_commits.items():\n if node in nodes:\n node_score += similarity\n node_score /= len(similar_commits)\n modified_files_dict[node] = node_score\n\n for node in self.repo_files_path:\n if node not in modified_files_dict:\n modified_files_dict[node] = 0\n\n return modified_files_dict",
"def addWeightedEdges(G):\n for nodeA in G.nodes:\n for nodeB in G.nodes:\n shared = 2\n if nodeA != nodeB:\n for i in nodeA:\n if i in nodeB:\n shared *= shared \n else:#\n \"\"\"this avoids adding weight when the nodes do not \n share the same root/higher level set\"\"\"\n break\n if True:\n G.add_edge(nodeA,nodeB,weight=shared**2)",
"def svn_fs_paths_changed2(*args) -> \"apr_hash_t **\":\n return _fs.svn_fs_paths_changed2(*args)",
"def get_changed_files(self):",
"def test_diff_viewer_affected_paths(repo_with_diffs: Tuple[Repo, Commit, Commit]):\n repo, previous_head, new_head = repo_with_diffs\n with DiffViewer(previous_head, new_head) as viewer:\n paths = viewer.affected_paths()\n # we touched 4 files, 1 is a rename so it has two paths (old and new)\n assert len(paths) == 5\n assert Path(\"other/gbac.rego\") in paths\n assert Path(\"mylist.txt\") in paths\n assert Path(\"other/data.json\") in paths\n assert Path(\"ignored.json\") in paths\n assert Path(\"ignored2.json\") in paths",
"def test_get_git_changed_files(self, repo):\n repo.return_value.merge_base.return_value[0].diff.return_value = [\n Change(\"/foo\", \"/foo\", False, False),\n Change(None, \"/bar\", True, False),\n Change(\"/baz\", None, False, True),\n ]\n actual = get_git_changed_files(os.getcwd())\n\n self.assertEqual(actual, {\"/bar\", \"/foo\", \"/baz\"})",
"def update_node_integrity(node):\n\n # Find suspect file copies in the database\n fcopy_query = (\n di.ArchiveFileCopy.select()\n .where(di.ArchiveFileCopy.node == node, di.ArchiveFileCopy.has_file == \"M\")\n .limit(25)\n )\n\n # Loop over these file copies and check their md5sum\n for fcopy in fcopy_query:\n fullpath = \"%s/%s/%s\" % (node.root, fcopy.file.acq.name, fcopy.file.name)\n log.info('Checking file \"%s\" on node \"%s\".' % (fullpath, node.name))\n\n # If the file exists calculate its md5sum and check against the DB\n if os.path.exists(fullpath):\n if di.util.md5sum_file(fullpath) == fcopy.file.md5sum:\n log.info(\"File is A-OK!\")\n fcopy.has_file = \"Y\"\n else:\n log.error(\"File is corrupted!\")\n fcopy.has_file = \"X\"\n else:\n log.error(\"File does not exist!\")\n fcopy.has_file = \"N\"\n\n # Update the copy status\n log.info(\"Updating file copy status [id=%i].\" % fcopy.id)\n fcopy.save()",
"def transfer_paths_to_node(H,node1,node2,node_id):\n\tif 'paths' in H.node[node1] and 'paths' not in H.node[node2]: \n\t\t# copy the dict \n\t\t# since we want to iterate on a copy of it and modify 2 other copies\n\t\tnode1_paths = H.node[node1]['paths']\n\t\t#H.node[node_id]['paths'] = copy.deepcopy(node_paths)\n\t\tedge_paths = H[node1][node2]['paths']\n\t\tfor text_id in edge_paths.keys():\n\t\t\tfor idx in edge_paths[text_id]['word_positions']:\n\t\t\t\tidx_n_e = find_previous_idx_in_node(node1_paths,text_id,idx)\n\t\t\t\tif idx_n_e>=0: # if there is a path corresponding to the node path in the edge paths,\n\t\t\t\t\tremove_path(H,node1,text_id,idx_n_e)\n\t\t\t\t\tadd_path(H,node_id,text_id,idx_n_e)\n\n\telif 'paths' in H.node[node2] and not 'paths' in H.node[node1]:\n\t\tnode2_paths = H.node[node2]['paths']\n\t\tedge_paths = H[node1][node2]['paths']\n\t\tfor text_id in edge_paths.keys():\n\t\t\tfor idx in edge_paths[text_id]['word_positions']:\n\t\t\t\tidx_e_n = find_next_idx_in_node(node2_paths,text_id,idx)\n\t\t\t\tif idx_e_n>=0: # if there is a path corresponding to the edge path in the node2 paths,\n\t\t\t\t\tremove_path(H,node2,text_id,idx_e_n)\n\t\t\t\t\tadd_path(H,node_id,text_id,idx)\n\n\telif 'paths' in H.node[node1] and 'paths' in H.node[node2]:\n\t\tnode1_paths = H.node[node1]['paths']\n\t\tnode2_paths = H.node[node2]['paths']\n\t\tedge_paths = H[node1][node2]['paths']\n\t\tfor text_id in edge_paths.keys():\n\t\t\tfor idx in edge_paths[text_id]['word_positions']:\n\t\t\t\t\tidx_n_e = find_previous_idx_in_node(node1_paths,text_id,idx)\n\t\t\t\t\tidx_e_n = find_next_idx_in_node(node2_paths,text_id,idx)\n\t\t\t\t\tif idx_e_n>=0 and idx_n_e>=0: # if there is a path corresponding to the edge path in the node1 and node2 paths,\n\t\t\t\t\t\tremove_path(H,node1,text_id,idx_n_e)\n\t\t\t\t\t\tremove_path(H,node2,text_id,idx_e_n)\n\t\t\t\t\t\tadd_path(H,node_id,text_id,idx_n_e)\n\t\t\t\t\telif idx_e_n>=0 and (not idx_n_e>=0): # if there is a path corresponding to the edge path in the node2 paths,\n\t\t\t\t\t\tremove_path(H,node2,text_id,idx_e_n)\n\t\t\t\t\t\tadd_path(H,node_id,text_id,idx)\n\t\t\t\t\telif (not idx_e_n>=0) and idx_n_e>=0: # if there is a path corresponding to the edge path in the node1 paths,\n\t\t\t\t\t\tremove_path(H,node1,text_id,idx_n_e)\n\t\t\t\t\t\tadd_path(H,node_id,text_id,idx_n_e)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('Warning: orphant path found between nodes: {} and {}'.format(node1,node2))\n\telse:\n\t\tnode1_paths = copy.deepcopy(H[node1][node2]['paths'])\n\t\tH.node[node_id]['paths']= node1_paths\n\t# cleaning\n\tif 'paths' in H.node[node1] and len(H.node[node1]['paths'])==0:\n\t\tdel H.node[node1]['paths']\n\tif 'paths' in H.node[node2] and len(H.node[node2]['paths'])==0:\n\t\tdel H.node[node2]['paths']",
"def relink_all(cls, old_file, new_file):\n assert old_file.checksum == new_file.checksum\n assert old_file.id\n assert new_file.id\n\n with db.session.begin_nested():\n ObjectVersion.query.filter_by(file_id=str(old_file.id)).update(\n {ObjectVersion.file_id: str(new_file.id)}\n )",
"def patch_obstructing_symlink_traversal(sbox):\n\n sbox.build()\n wc_dir = sbox.wc_dir\n alpha_contents = \"This is the file 'alpha'.\\n\"\n sbox.simple_append('A/B/F/alpha', alpha_contents)\n sbox.simple_add('A/B/F/alpha')\n sbox.simple_commit()\n sbox.simple_update()\n\n # Unversioned symlink A/B/E -> F obstructing versioned A/B/E so\n # versioned A/B/E/alpha is A/B/F/alpha\n svntest.main.safe_rmtree(sbox.ospath('A/B/E'))\n os.symlink('F', sbox.ospath('A/B/E'))\n\n unidiff_patch = (\n \"Index: A/B/E/alpha\\n\"\n \"===================================================================\\n\"\n \"--- A/B/E/alpha\\t(revision 2)\\n\"\n \"+++ A/B/E/alpha\\t(working copy)\\n\"\n \"@@ -1 +1,2 @@\\n\"\n \" This is the file 'alpha'.\\n\"\n \"+xx\\n\"\n )\n patch_file_path = sbox.get_tempname('my.patch')\n svntest.main.file_write(patch_file_path, unidiff_patch)\n\n ### Patch applies through the unversioned symlink\n expected_output = [\n 'U %s\\n' % sbox.ospath('A/B/E/alpha'),\n ]\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/B/E/alpha', 'A/B/E/beta')\n expected_disk.add({'A/B/F/alpha' : Item(contents=alpha_contents+\"xx\\n\")})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 2)\n expected_status.add({'A/B/F/alpha' : Item(status=' ', wc_rev=2)})\n expected_status.tweak('A/B/E', status='~ ')\n expected_status.tweak('A/B/E/alpha', 'A/B/F/alpha', status='M ')\n expected_status.tweak('A/B/E/beta', status='! ')\n expected_skip = wc.State('', { })\n svntest.actions.run_and_verify_patch(wc_dir, patch_file_path,\n expected_output, expected_disk,\n expected_status, expected_skip)",
"def svn_fs_paths_changed(*args) -> \"apr_hash_t **\":\n return _fs.svn_fs_paths_changed(*args)",
"def _dedup_edges(self, edges):\n seen = {}\n for edge in edges:\n edge_id = edge['id']\n if edge_id in seen:\n seen_modified = parser.parse(seen[edge_id]['modified'])\n current_modified = parser.parse(edge['modified'])\n if seen_modified > current_modified:\n continue\n seen[edge_id] = edge\n return list(seen.values())",
"def markmodified(self, opath, path=None, modpaths=None):\r\n modpaths = set() if modpaths is None else modpaths\r\n path = path if path else opath\r\n if not path:\r\n return\r\n modpaths.update(self.ctree[path] if path in self.ctree else set())\r\n self.paths[path].modified = True\r\n for npath in [\r\n unmodpath\r\n for unmodpath in modpaths\r\n if unmodpath in self.paths and not self.paths[unmodpath].modified\r\n ]:\r\n self.markmodified(opath, path=npath, modpaths=modpaths)\r\n return modpaths",
"def files_from_delta(delta, tree, revid):\n ret = set()\n for change in delta.added + delta.removed + delta.modified:\n (path, id, kind) = change[:3]\n if kind not in ('file', 'symlink'):\n continue\n if not tree.has_id(id) or tree.get_file_revision(id) == revid:\n ret.add(path)\n for (path, id, old_kind, new_kind) in delta.kind_changed:\n if old_kind in ('file', 'symlink') or new_kind in ('file', 'symlink'):\n ret.add(path)\n for (oldpath, newpath, id, kind, text_modified, meta_modified) in delta.renamed:\n if kind in ('file', 'symlink'):\n ret.update([oldpath, newpath])\n return sorted([p.encode(\"utf-8\") for p in ret])",
"def checkmodified(self, opath, path=None, modpaths=None):\r\n # return [paths for paths in self.ctree[path] if self.paths[paths].modified]\r\n modpaths = set() if modpaths is None else modpaths\r\n path = path if path else opath\r\n newpaths = set()\r\n if not path:\r\n return\r\n if path in self.paths and self.paths[path].modified:\r\n newpaths = (\r\n set(\r\n [\r\n conn\r\n for conn in self.ctree[path]\r\n if conn in self.paths and self.paths[path].modified\r\n ]\r\n )\r\n - modpaths\r\n )\r\n modpaths.update(newpaths | set([path]))\r\n for npath in [unmodpath for unmodpath in newpaths]:\r\n self.checkmodified(opath, path=npath, modpaths=modpaths)\r\n return modpaths",
"def link(self, oldPath, newPath):\n conn = sqlhub.getConnection()\n trans = conn.transaction()\n now = time.time()\n i_num = self.__get_inode(oldPath)\n parent_i_num = self.__get_parent_inode(newPath)\n parent_i = Inode.selectBy(inode_num=parent_i_num).orderBy(\"-rev_id\")[0]\n dl = Dentry.selectBy(parent=parent_i)\n new_i = Inode(inode_num=parent_i.inode_num,\n rev_id=parent_i.rev_id+1,\n uid=parent_i.uid, gid=parent_i.gid,\n atime=now, mtime=parent_i.mtime,\n ctime=parent_i.ctime, size=parent_i.size,\n mode=parent_i.mode, connection=trans)\n for de in dl:\n Dentry(parent=new_i, filename=de.filename,\n inode_num=de.inode_num, connection=trans)\n Dentry(parent=new_i, filename=split_path(newPath)[-1],\n inode_num=i_num, connection=trans)\n trans.commit()",
"def test_tree_between_consecutive_revisions(self):\n tree = self.make_example_branch()\n output = self.run_bzr('diff -r 1..2 --stat-dir', retcode=1)[0]\n self.assertEqualDiff(output, '''\\\n . | 1 +\n 1 directory changed, 1 insertion(+), 0 deletions(-)\n''')\n self.check_output_rules(output)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Same as analyze_correlation_commit_lines_graph() but performs the computations concurently.
|
def analyze_correlation_commit_lines_graph_concurent(self, single_line=None):
cwd = os.getcwd()
os.chdir(self.repo_folder)
commit_to_lines = {}
# Print analyzing all the lines of the repo
print('Print analyzing all the lines of the repo')
file_lines = []
if single_line:
already_seen_files = set()
modified_in_commits = self.get_commits_that_modified_line(single_line[1], single_line[1], single_line[0])
modified_in_commits = [commit[1:-1] for commit in modified_in_commits]
for commit in pydriller.Repository(self.repo_folder, only_commits=modified_in_commits).traverse_commits():
for modification in commit.modified_files:
path = single_line[0].replace("/", "\\")
if modification.new_path in self.repo_files_path:
current_path = modification.new_path
else:
current_path = self.retrieve_current_path(modification.new_path)
if current_path not in already_seen_files:
if current_path is not None and modification.new_path[-4:] not in self.forbidden_file_extensions:
# Get path to file to count number of lines
filepath = self.repo_folder + '\\' + current_path
linenumber = self.get_file_number_of_lines(filepath)
already_seen_files.add(current_path)
for i in range(1, linenumber):
file_lines.append((current_path, i))
else:
for file_path in tqdm.tqdm(self.repo_files_path):
# Get path to file and count number of lines
complete_file_path = self.repo_folder + '\\' + file_path
linenumber = self.get_file_number_of_lines(complete_file_path)
for i in range(1, linenumber):
file_lines.append((file_path, i))
line_to_commits = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
future_to_line = {executor.submit(self.analyze_line, file_line): file_line for file_line in file_lines}
pbar = tqdm.tqdm(total=len(file_lines))
for future in concurrent.futures.as_completed(future_to_line):
file_line = future_to_line[future]
try:
modified_in_commits = future.result()
line_to_commits[file_line] = modified_in_commits
except Exception as exc:
print(f'Error during execution : {exc}')
pbar.update(1)
pbar.close()
for file_line, modified_in_commits in line_to_commits.items():
file_path, line = file_line
self.commit_graph_lines.add_node(f'{file_path}:{line}', number_modifications=len(modified_in_commits))
for commit in modified_in_commits:
if commit in commit_to_lines:
commit_to_lines[commit].append(f'{file_path}:{line}')
else:
commit_to_lines[commit] = [f'{file_path}:{line}']
# Building the graph
print('\n\nBuilding the graph')
for (_, list_lines) in tqdm.tqdm(commit_to_lines.items()):
pairs_of_modified_lines = []
for i in range(len(list_lines)):
for j in range(i+1, len(list_lines)):
pairs_of_modified_lines.append((list_lines[i], list_lines[j]))
for edge in pairs_of_modified_lines:
if edge[0] in self.commit_graph_lines.nodes and edge[1] in self.commit_graph_lines.nodes:
if self.commit_graph_lines.has_edge(edge[0], edge[1]):
self.commit_graph_lines.edges[edge[0], edge[1]]['number_modifications_same_commit'] += 1
else:
self.commit_graph_lines.add_edge(edge[0], edge[1], number_modifications_same_commit=1)
os.chdir(cwd)
|
[
"def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):\n\n for (node1, node2) in pairs_of_modified_files:\n \n if node1 in self.repo_files_path and node2 in self.repo_files_path:\n\n # Find common prefix\n path_prefix = os.path.commonpath([node1, node2])\n \n if len(path_prefix) > 0:\n path_prefix_split = path_prefix.split('\\\\')\n tree_commit_node_name1 = node1[len(path_prefix)+1:].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix)+1:].split('\\\\')[0]\n else:\n path_prefix_split = []\n tree_commit_node_name1 = node1[len(path_prefix):].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix):].split('\\\\')[0]\n\n # Create or update edge in TreeCommit graph\n self.commit_tree_graph.add_edge(path_prefix_split, tree_commit_node_name1, tree_commit_node_name2)",
"def compute_correlation(self, node_name, commit_graph, method='basic', alpha=0.5):\n\n number_modifications = commit_graph.nodes[node_name][\"number_modifications\"]\n neighbors_correlation = []\n\n for neighbor in commit_graph.neighbors(node_name):\n\n number_modifications_same_commit = commit_graph.edges[node_name, neighbor][\"number_modifications_same_commit\"]\n number_modifications_neighbor = commit_graph.nodes[neighbor][\"number_modifications\"]\n\n if method == 'basic':\n correlation = Correlation.Correlation.basic_correlation(number_modifications_same_commit, number_modifications)\n\n elif method == 'addition':\n\n correlation = Correlation.Correlation.addition_correlation(number_modifications_same_commit, number_modifications, number_modifications_neighbor, alpha)\n \n elif method == 'multiplication':\n\n correlation = Correlation.Correlation.multiplication_correlation(number_modifications_same_commit, number_modifications, number_modifications_neighbor, alpha)\n\n neighbors_correlation.append((neighbor, correlation, number_modifications_same_commit))\n \n\n neighbors_correlation = self.parse_neighbors_correlation(neighbors_correlation)\n\n print(f'Correlation of {node_name} (modified in {number_modifications} commits) with :')\n for i, neighbor in enumerate(neighbors_correlation):\n if i < 200:\n print(f'{neighbor[0]}:{neighbor[1]} : {neighbor[2]}% (modified {neighbor[3]} times)')\n else:\n break",
"def compute_same_level_correlation(self, node_path):\n\n def compute_same_level_correlation_iteration(tree_graph, splitted_path):\n\n if len(splitted_path) == 1 and splitted_path[0] in tree_graph.kids:\n self.compute_correlation(splitted_path[0], tree_graph.graph)\n elif len(splitted_path) > 1 and splitted_path[0] in tree_graph.kids:\n compute_same_level_correlation_iteration(tree_graph.kids[splitted_path[0]], splitted_path[1:])\n\n\n tree_graph = self.commit_tree_graph\n\n splitted_path = node_path.split('\\\\')\n print(splitted_path)\n\n compute_same_level_correlation_iteration(tree_graph, splitted_path)",
"def __conserve_circulation(self,xBlobInsideList,yBlobInsideList,gBlobInsideList):\n \n #----------------------------------------------------------------------\n # Determine parameters\n\n # convert the hardware flag into an int to use in _base_convection\n if self.lagrangian.blobs.velocityComputationParams['hardware'] == 'gpu': \n blobs_hardware = blobOptions.GPU_HARDWARE\n else: \n blobs_hardware = blobOptions.CPU_HARDWARE\n\n # convert the method flag into an int to use in _base_convection\n if self.lagrangian.blobs.velocityComputationParams['method'] == 'fmm': \n blobs_method = blobOptions.FMM_METHOD\n else: \n blobs_method = blobOptions.DIRECT_METHOD\n \n #----------------------------------------------------------------------\n\n #----------------------------------------------------------------------\n # Make references to all the blobs\n\n # Make references to vortex-blobs\n xBlobOutside, yBlobOutside, gBlobOutside = self.lagrangian.blobs.x, self.lagrangian.blobs.y, self.lagrangian.blobs.g \n \n # Concatenate all the blobs inside\n xBlobInside = _numpy.concatenate(xBlobInsideList)\n yBlobInside = _numpy.concatenate(yBlobInsideList)\n gBlobInside = _numpy.concatenate(gBlobInsideList)\n \n # Full set of blobs\n xBlobAll = _numpy.concatenate((xBlobOutside,xBlobInside)).copy()\n yBlobAll = _numpy.concatenate((yBlobOutside,yBlobInside)).copy()\n gBlobAll = _numpy.concatenate((gBlobOutside,gBlobInside)).copy()\n \n # Determine the total circulations\n gBlobAllTotal = gBlobAll.sum()\n \n # Determine the total circulation of globs inside each eulerian domain\n gBlobInsideTotalList = _numpy.array([listItem.sum() for listItem in gBlobInsideList])\n\n # Make references to panel collocation points (where no-slip b.c. is enforced.)\n xCP, yCP = self.lagrangian.panels.xyCPGlobalCat\n \n # Determine total eulerian circulation\n gTotalEulerianList = self.multiEulerian.gTotalInside() # of N eulerian bodies\n \n # Determine the total disregarded circulation from the eulerian domain\n gTotalDisregardedList = gTotalEulerianList - gBlobInsideTotalList\n \n # Testing: print info\n # print 'gTotalEulerianList : %s' % str(gTotalEulerianList)\n # print 'gBlobInsideTotalList : %s' % str(gBlobInsideTotalList)\n # print 'gBlobOutside : %g' % gBlobOutside.sum()\n # print 'gTotalDisregardedList : %s' % str(gTotalDisregardedList)\n #----------------------------------------------------------------------\n \n #----------------------------------------------------------------------\n # Solve for panel strenths\n \n # Determine the slip velocity on panel collocation points\n vxSlip, vySlip = _blobs_velocity(xBlobAll,yBlobAll,gBlobAll,self.lagrangian.blobs.sigma,\n xEval=xCP,yEval=yCP,hardware=blobs_hardware, \n method=blobs_method) \\\n + self.lagrangian.vInf.reshape(2,-1)\n \n # Solve for no-slip panel strengths, gPanelTotal should be negative of gTotalIgnored\n self.lagrangian.panels.solve(vxSlip, vySlip, gTotal=gTotalDisregardedList)\n \n #----------------------------------------------------------------------\n\n #----------------------------------------------------------------------\n # Conserve circulation\n\n # Determine total panel circulation (of all bodies)\n gPanelTotal = _numpy.sum(self.lagrangian.panels.gTotal)\n \n # Determine the total lagrangian circulation\n gLagrangianTotal = gBlobAllTotal + gPanelTotal\n \n if _numpy.abs(gLagrangianTotal) > self.lagrangian.blobs.gThresholdGlobal:\n # Standard-uniform correction\n # Circulation to be given to particles inside.\n gExtraPerBlob = gLagrangianTotal / xBlobInside.shape[0]\n \n # Add circulation to each blobs\n gBlobInsideCorrected = gBlobInside - gExtraPerBlob \n \n # Testing: print info\n # print 'gExtraPerBlob: %g' % gExtraPerBlob\n else:\n # If the error is less that gThresholdGlobal, no need for correction.\n gBlobInsideCorrected = gBlobInside\n \n # Testing: print info\n # print 'gPanelTotal: %g' % gPanelTotal\n # print 'gLagrangianTotal: %g' % gLagrangianTotal\n # print 'final total lagrangian circulation : %g' % (gBlobInsideCorrected.sum()+gBlobOutside.sum()+gPanelTotal)\n #---------------------------------------------------------------------- \n\n # return the new blob circulation \n return xBlobInside, yBlobInside, gBlobInsideCorrected",
"def load_commit_graph_lines(self, path):\n\n self.commit_graph_lines = nx.readwrite.gpickle.read_gpickle(path)",
"def annotate_commit_loc(commits, project, clear_cache=False):\n print 'Annotating lines of code changed'\n cache = {}\n if not clear_cache:\n try:\n cache = jload(project_to_fname(project, loc=True))\n # Hack to remove artifacts left by jdump,\n # also remove any empty entries\n \"\"\"\n for k, entry in cache.items():\n if entry:\n if 'json_key' in entry:\n del cache[k]['json_key']\n else:\n del cache[k]\n \"\"\"\n print ' Loaded Lines of Code Changed cache'\n\n except Exception:\n print ' Failed to load Lines of Code Changed cache'\n cache = {}\n pass\n\n cache_initial_size = len(cache)\n print ' Initial Lines of Code Changed cache size:', cache_initial_size\n\n repo_name = get_repo_name(project)\n filter_config = get_filter_config(project)\n repo = git.Repo(repo_name)\n total_operations = 0\n for k, commit in commits.items():\n if commit['reachable'] and 'loc_add' not in commit:\n if k not in cache:\n # print commit['cid']\n c = repo.commit(commit['cid'])\n loc_add = 0\n loc_change = 0\n detail = {}\n if len(c.parents) > 0:\n p = c.parents[0]\n\n files = process_commit_files_unfiltered(c)\n subset_files = [f for f in files\n if filter_file(f, filter_config)]\n for path in subset_files:\n # print 'Getting diff object for path:', path\n d = c.diff(p, create_patch=True, paths=path)\n diff_text = d[0].diff\n # print diff_text\n\n adds = sum([1 for txt in diff_text.splitlines()\n if txt.startswith('+')]) - 1\n removes = sum([1 for txt in diff_text.splitlines()\n if txt.startswith('-')]) - 1\n changes = max(adds, removes)\n detail[path] = {'add': adds, 'changes': changes}\n loc_add += adds\n loc_change += changes\n\n cache[k] = {'loc_add': loc_add,\n 'loc_change': loc_change,\n 'loc_detail': detail}\n else:\n cache[k] = {'loc_add': 0,\n 'loc_change': 0,\n 'loc_detail': {}}\n\n commit['loc_add'] = cache[k]['loc_add']\n commit['loc_change'] = cache[k]['loc_change']\n commit['loc_detail'] = cache[k]['loc_detail']\n\n total_operations += 1\n if total_operations % 100 == 0:\n print '.',\n if total_operations % 1000 == 0:\n print total_operations,\n print\n\n if len(cache) > cache_initial_size:\n print\n print ' Saving updated Lines of Code Changed Cache'\n jdump(cache, project_to_fname(project, loc=True))\n \"\"\"\n # Hack to remove artifacts left by jdump\n for k in blame_cache.keys(): # remove key artifact from jload\n if 'json_key' in blame_cache[k]:\n del blame_cache[k]['json_key']\n \"\"\"",
"def correlate(self):\n self.logger.debug('correlate()')\n integration_time = self.server._integration_time\n self.logger.info(\"correlating for %0.2f seconds\" %integration_time)\n self.bee2.write_int('hb_cntto', integration_time+1)\n for baseline in self._include_baselines:\n raw = self.bee2.read('corr_out%d' %(int(baseline[1])-1), 128)\n self._correlations[baseline] = array(CORR_OUT.unpack(raw))\n self.logger.info('baseline %s, mean %d' %(baseline, self._correlations[baseline].mean()))\n self.bee2.write_int('corr_record', 0)\n self.bee2.write_int('corr_en', 0)\n self.bee2.write_int('corr_rst', 1)\n self.bee2.write_int('corr_rst', 0)\n self.bee2.write_int('corr_en', 1)\n sleep(integration_time+1)\n self.bee2.write_int('corr_record', 1)",
"def calculate(self):\n corr=0.\n n=0\n cr_sum = 0.\n \n for i in range((self.data.shape[1]-1)):\n if self.red_glyphs.mlab_source.scalars[i] != self.hi_color:\n print(i)\n cr_mean = self.cr[:,2*i+1].mean() \n n+=1\n cr_sum+=cr_mean\n corr+=(self.data[:,1 +i]+1.)*cr_mean**2.\n corr=corr*n/cr_sum**2.-1.\n self.correlation = np.empty(shape = (self.data.shape[0], 2), dtype = 'float')\n self.correlation[:,1] = corr\n self.correlation[:,0] = self.data[:,0]\n self.corr_fig.ax.cla()\n self.corr_fig.ax.semilogx(self.data[:,0],corr)\n self.corr_fig.update = True\n return corr",
"def create_commits_dataframe_lines(self):\n\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n columns.append(commit.hash)\n\n pbar.update(1)\n pbar.close()\n\n\n dataframe_list = []\n index = []\n\n\n cwd = os.getcwd()\n os.chdir(self.repo_folder)\n\n\n # Print analyzing all the lines of the repo\n print('Print analyzing all the lines of the repo')\n file_lines = []\n \n\n for file_path in tqdm.tqdm(self.repo_files_path):\n\n # Get path to file and count number of lines\n complete_file_path = self.repo_folder + '\\\\' + file_path\n linenumber = self.get_file_number_of_lines(complete_file_path)\n\n for i in range(1, linenumber):\n file_lines.append((file_path, i))\n\n line_to_commits = {}\n with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_line = {executor.submit(self.analyze_line, file_line): file_line for file_line in file_lines}\n\n pbar = tqdm.tqdm(total=len(file_lines))\n for future in concurrent.futures.as_completed(future_to_line):\n file_line = future_to_line[future]\n try:\n \n modified_in_commits = future.result()\n modified_in_commits = [commit[1:-1] for commit in modified_in_commits]\n index.append(f'{file_line[0]}:{file_line[1]}')\n file_line_commits = []\n for commit in columns:\n if commit in modified_in_commits:\n file_line_commits.append(1)\n else:\n file_line_commits.append(0)\n dataframe_list.append(file_line_commits)\n except Exception as exc:\n print(f'Error during execution : {exc}')\n pbar.update(1)\n pbar.close()\n\n\n os.chdir(cwd)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def compute_files_that_should_be_in_commit(self, commit_hash):\n\n similar_commits = {}\n potential_nodes = set()\n\n # Get list of files modified in commit\n modified_files = []\n modified_files_dict = {}\n for commit in pydriller.Repository(self.repo_folder, single=commit_hash).traverse_commits():\n for modification in commit.modified_files:\n modified_files.append(modification.new_path)\n modified_files_dict[modification.new_path] = 1\n\n # Compute each commit similarity score\n print('Computing similarity score')\n for commit in tqdm.tqdm(pydriller.Repository(self.repo_folder).traverse_commits()):\n if commit.hash != commit_hash:\n modified_files_other_commit = []\n new_nodes = []\n similar_nodes = 0\n for modification in commit.modified_files:\n\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n\n if current_path is not None and current_path in modified_files_dict:\n similar_nodes += 1\n else:\n new_nodes.append(current_path)\n modified_files_other_commit.append(current_path)\n similarity = similar_nodes / max(len(modified_files), len(modified_files_other_commit))\n if similarity > 0.3:\n similar_commits[commit.hash] = (similarity, new_nodes)\n for node in new_nodes:\n if node not in potential_nodes:\n potential_nodes.add(node)\n\n # Compute score of new potential nodes\n print('Compute node scores')\n for node in tqdm.tqdm(potential_nodes):\n node_score = 0\n for _, (similarity, nodes) in similar_commits.items():\n if node in nodes:\n node_score += similarity\n node_score /= len(similar_commits)\n modified_files_dict[node] = node_score\n\n for node in self.repo_files_path:\n if node not in modified_files_dict:\n modified_files_dict[node] = 0\n\n return modified_files_dict",
"def calculate(self):\n corr=0.\n n=0\n cr_sum = 0.\n \n for i in range((self.data.shape[1]-1)):\n if str(i) in self.usable_data:\n cr_mean = self.cr[:,2*i+1].mean() \n n+=1\n cr_sum+=cr_mean\n corr+=(self.data[:,1 +i]+1.)*cr_mean**2.\n self.correlation = np.empty(shape = (self.data.shape[0], 2), dtype = 'float')\n corr=corr*n/cr_sum**2.-1.\n self.correlation[:,1] = corr\n self.correlation[:,0] = self.data[:,0]\n self.corr_fig.ax.cla()\n self.corr_fig.ax.semilogx(self.data[:,0],corr)\n self.corr_fig.update = True\n return corr",
"def process(self):\n \n if (self.__ccore is True):\n self.__clusters = wrapper.kmeans(self.__pointer_data, self.__centers, self.__tolerance);\n self.__centers = self.__update_centers();\n else: \n changes = float('inf');\n \n stop_condition = self.__tolerance * self.__tolerance; # Fast solution\n #stop_condition = self.__tolerance; # Slow solution\n \n # Check for dimension\n if (len(self.__pointer_data[0]) != len(self.__centers[0])):\n raise NameError('Dimension of the input data and dimension of the initial cluster centers must be equal.');\n \n while (changes > stop_condition):\n self.__clusters = self.__update_clusters();\n updated_centers = self.__update_centers(); # changes should be calculated before asignment\n \n #changes = max([euclidean_distance(self.__centers[index], updated_centers[index]) for index in range(len(self.__centers))]); # Slow solution\n changes = max([euclidean_distance_sqrt(self.__centers[index], updated_centers[index]) for index in range(len(updated_centers))]); # Fast solution\n \n self.__centers = updated_centers;",
"def recompute(self):\n if not self.samples:\n print('Error: Data has not been loaded yet!')\n else:\n for sample in self.samples:\n ret, normed_time_series = cyclic_analysis(sample['TimeSeries'], p=1, normalize=self.norm,\n trend_removal=self.trend_removal)\n lm, phases, perm, sorted_lm, eigenvalues = ret\n cm = np.corrcoef(normed_time_series)\n (_, n) = lm.shape\n sample['SLM'] = sorted_lm\n sample['ULM'] = lm\n sample['Eigenvalues'] = eigenvalues\n sample['Phases'] = phases\n sample['Permutation'] = perm\n sample['CM'] = cm\n sample['NormedTS'] = normed_time_series\n sample['FlatULM'] = lm[np.triu_indices(n, 1)]\n sample['FlatSLM'] = sorted_lm[np.triu_indices(n, 1)]\n sample['FlatCM'] = cm[np.triu_indices(n, 1)]\n\n self.reset()",
"def _collect_git_commits(self):\n self.git_commits = {}\n for step in PROCESSING_STEPS:\n commit = self._find_git_commit_from_processing_history(step)\n self.git_commits[step] = commit",
"def test_collect_from_back_as_expected(self, do_commutative_analysis):\n\n # original circuit\n circuit = QuantumCircuit(3)\n circuit.cx(1, 2)\n circuit.cx(1, 0)\n circuit.h(2)\n circuit.cx(1, 2)\n\n # If we collect from the back, we expect the cx(1, 0) to be part of the second block.\n circuit1 = PassManager(\n CollectLinearFunctions(\n split_blocks=False,\n min_block_size=1,\n do_commutative_analysis=do_commutative_analysis,\n collect_from_back=True,\n )\n ).run(circuit)\n\n # We expect to see 3 gates (linear, h, linear)\n self.assertEqual(len(circuit1.data), 3)\n inst1 = circuit1.data[0]\n inst2 = circuit1.data[2]\n self.assertIsInstance(inst1.operation, LinearFunction)\n self.assertIsInstance(inst2.operation, LinearFunction)\n\n resulting_subcircuit1 = QuantumCircuit(3)\n resulting_subcircuit1.append(inst1)\n resulting_subcircuit2 = QuantumCircuit(3)\n resulting_subcircuit2.append(inst2)\n\n expected_subcircuit1 = QuantumCircuit(3)\n expected_subcircuit1.cx(1, 2)\n\n expected_subcircuit2 = QuantumCircuit(3)\n expected_subcircuit2.cx(1, 0)\n expected_subcircuit2.cx(1, 2)\n\n self.assertEqual(Operator(resulting_subcircuit1), Operator(expected_subcircuit1))\n self.assertEqual(Operator(resulting_subcircuit2), Operator(expected_subcircuit2))",
"def run(self):\n\n codelines = defaultdict(lambda: 0)\n non_matches = 0\n\n # rewind log file in case other sections are walking the lines\n self.mloginfo.logfileOpen.seek(0, 0)\n\n # get log file information\n lfinfo = LogFile(self.mloginfo.logfileOpen)\n if lfinfo.start and lfinfo.end:\n progress_start = self.mloginfo._datetime_to_epoch(lfinfo.start)\n progress_total = self.mloginfo._datetime_to_epoch(lfinfo.end) - progress_start\n else:\n self.progress_bar_enabled = False\n\n for i, line in enumerate(self.mloginfo.logfileOpen):\n cl = self.log2code(line)\n\n # update progress bar every 1000 lines\n if self.progress_bar_enabled and (i % 1000 == 0):\n ll = LogLine(line)\n if ll.datetime:\n progress_curr = self.mloginfo._datetime_to_epoch(ll.datetime)\n self.mloginfo.update_progress(float(progress_curr-progress_start) / progress_total)\n\n if cl:\n codelines[cl.pattern] += 1\n else:\n ll = LogLine(line)\n if ll.operation:\n # skip operations (command, insert, update, delete, query, getmore)\n continue\n if not ll.thread:\n # skip the lines that don't have a thread name (usually map/reduce or assertions)\n continue\n if len(ll.split_tokens) - ll._thread_offset <= 1:\n # skip empty log messages (after thread name)\n continue\n if \"warning: log line attempted\" in ll.line_str and \"over max size\" in ll.line_str:\n # skip lines that are too long\n continue\n\n # everything else is a real non-match\n non_matches += 1\n if self.mloginfo.args['verbose']:\n print \"couldn't match:\", line,\n\n # clear progress bar again\n self.mloginfo.update_progress(1.0)\n\n if self.mloginfo.args['verbose']: \n print\n\n for cl in sorted(codelines, key=lambda x: codelines[x], reverse=True):\n print \"%8i\"%codelines[cl], \" \", \" ... \".join(cl)\n\n print\n if non_matches > 0:\n print \"distinct couldn't match %i lines\"%non_matches\n if not self.mloginfo.args['verbose']:\n print \"to show non-matched lines, run with --verbose.\"",
"def process_arc_run(self):\n unconverged_spc_keys, converged_spc_keys = list(), list()\n unconverged_rxn_keys, converged_rxn_keys = list(), list()\n if os.path.isfile(self.paths['ARC info']):\n content = read_yaml_file(path=self.paths['ARC info'])\n for species in content['species']:\n key = self.get_species_key(label=species['label'])\n if key is not None:\n if species['success']:\n converged_spc_keys.append(key)\n self.species[key]['converged'] = True\n else:\n unconverged_spc_keys.append(key)\n self.species[key]['converged'] = False\n for reaction in content['reactions']:\n key = self.get_reaction_key(label=reaction['label'])\n if key is not None:\n if reaction['success']:\n converged_rxn_keys.append(key)\n self.reactions[key]['converged'] = True\n else:\n unconverged_rxn_keys.append(key)\n self.reactions[key]['converged'] = False\n else:\n raise ValueError(f'ARC did not save a project_info.yml file (expected to find it in {self.paths[\"ARC info\"]}, '\n f'something must be wrong.')\n self.logger.log_unconverged_species_and_reactions(\n species_keys=unconverged_spc_keys,\n species_dict=self.species,\n reaction_keys=unconverged_rxn_keys,\n reaction_dict=self.reactions,\n )\n if len(converged_spc_keys) or len(converged_rxn_keys):\n # we calculated something, add to thermo/kinetic library\n self.add_to_rmg_libraries()\n # clear the calculated objects from self.qm:\n self.qm['species'], self.qm['reactions'] = list(), list()\n self.dump_species_and_reactions()",
"def broadcast(graph, faulty_nodes=set(), trusted_nodes=set()):\n\n # Round 0: initialize and the source commits\n curr_round = 0\n # every non-faulty node (except the src) appears in non_faulty_commit_queue only once,\n # but all faulty nodes will not be in this queue\n non_faulty_commit_queue = deque()\n non_faulty_commit_queue.append(0)\n\n # to record whether a node has committed the value before (due to cyclic graph),\n # we put the node to the set if it is the source, or\n # it receives a commit from the source/trusted node, or\n # it receives (MAX_FAULT_NODES + 1) commits from incoming nodes (faulty nodes don't commit)\n non_faulty_has_committed = {0}\n\n # to record the number of proposes a node receives if it's not directly linked to the source\n propose_received = defaultdict(lambda: 0)\n\n # Round >= 1: all non-faulty nodes commits\n while len(non_faulty_commit_queue): # while not all nodes have committed\n curr_round += 1\n for curr_node in range(len(non_faulty_commit_queue)): # for all nodes in the current round of commits\n curr_node = non_faulty_commit_queue.popleft()\n curr_node_neis = [edge[1] for edge in graph.edges(curr_node)] # all outgoing neighbours of the current node\n\n if curr_node in trusted_nodes: # if this commit comes from the source or trusted nodes\n for nei in curr_node_neis:\n # If this node has committed before (due to cyclic graph) OR if this node is faulty, ignore it;\n if nei in non_faulty_has_committed or nei in faulty_nodes:\n continue\n\n non_faulty_commit_queue.append(nei)\n non_faulty_has_committed.add(nei)\n else:\n for nei in curr_node_neis:\n # If this node has committed before (due to cyclic graph) OR if this node is faulty, ignore it;\n if nei in non_faulty_has_committed or nei in faulty_nodes:\n continue\n\n # If this node is non-faulty, it commits iff it has heard (MAX_FAULT_NODES + 1) non-faulty proposes.\n # note: faulty nodes don't propose values\n # TODO: does MAX_FAULTY_NODES logic goes well with giant graph? yes?\n propose_received[nei] += 1\n if propose_received[nei] >= MAX_FAULTY_NODES + 1:\n non_faulty_commit_queue.append(nei)\n non_faulty_has_committed.add(nei)\n return len(non_faulty_has_committed), curr_round",
"def listener_side_gig(self, synchro):\n # This does unsafe things, so it can only be called when the worker is\n # in a tight loop that respects the data lock.\n\n verbose = self.PHoptions[\"verbose\"]\n # See if we have enough xbars to proceed (need not be perfect)\n xbarin = 0 # count ranks (close enough to be a proxy for scenarios)\n self.synchronizer._unsafe_get_global_data(\"FirstReduce\",\n self.node_concats)\n self.synchronizer._unsafe_get_global_data(\"SecondReduce\",\n self.node_concats)\n # last_phi_tau_update_time\n lptut = np.max(self.node_concats[\"SecondReduce\"][\"ROOT\"][6:])\n logging.debug('enter side gig, last phi update={}'.format(lptut))\n for cr in range(self.n_proc):\n backdist = self.n_proc - cr\n logging.debug('*side_gig* cr {} on rank {} time {}'.\\\n format(cr, self.rank,\n self.node_concats[\"FirstReduce\"][\"ROOT\"][-backdist]))\n if self.node_concats[\"FirstReduce\"][\"ROOT\"][-backdist] \\\n >= lptut:\n xbarin += 1\n if xbarin/self.n_proc < self.PHoptions[\"async_frac_needed\"]:\n logging.debug(' not enough on rank {}'.format(self.rank))\n # We have not really \"done\" the side gig.\n return\n\n # If we are still here, we have enough to do the calculations\n logging.debug(' good to go on rank {}'.format(self.rank))\n if verbose and self.rank == self.rank0:\n print (\"(%d)\" % xbarin)\n \n # set the xbar, xsqbar, and ybar in all the scenarios\n for k,s in self.local_scenarios.items():\n nlens = s._PySP_nlens \n for (ndn,i) in s._nonant_indexes:\n s._xbars[(ndn,i)]._value \\\n = self.node_concats[\"FirstReduce\"][ndn][i]\n s._xsqbars[(ndn,i)]._value \\\n = self.node_concats[\"FirstReduce\"][ndn][nlens[ndn]+i]\n s._ybars[(ndn,i)]._value \\\n = self.node_concats[\"FirstReduce\"][ndn][2*nlens[ndn]+i]\n\n if verbose and self.rank == self.rank0:\n print (\"rank, scen, node, var, xbar:\",\n self.rank,k,ndn,s._nonant_indexes[ndn,i].name,\n pyo.value(s._xbars[(ndn,i)]))\n\n # There is one tau_summand for the rank; global_tau is out of date when\n # we get here because we could not compute it until the averages were.\n # vk is just going to be ybar directly\n if not hasattr(self, \"uk\"):\n self.uk = {} # indexed by sname and nonant index [sname][(ndn,i)]\n self.local_punorm = 0 # local summand for probability weighted norm\n self.local_pvnorm = 0\n new_tau_summand = 0 # for this rank\n for sname,s in self.local_scenarios.items():\n scen_unorm = 0.0\n scen_vnorm = 0.0\n if sname not in self.uk:\n self.uk[sname] = {}\n nlens = s._PySP_nlens \n for (ndn,i), xvar in s._nonant_indexes.items():\n self.uk[sname][(ndn,i)] = xvar._value \\\n - pyo.value(s._xbars[(ndn,i)])\n # compute the unorm and vnorm\n scen_unorm += self.uk[sname][(ndn,i)] \\\n * self.uk[sname][(ndn,i)]\n scen_vnorm += pyo.value(s._ybars[(ndn,i)]) \\\n * pyo.value(s._ybars[(ndn,i)])\n self.local_punorm += pyo.value(s.PySP_prob) * scen_unorm\n self.local_pvnorm += pyo.value(s.PySP_prob) * scen_vnorm\n new_tau_summand += pyo.value(s.PySP_prob) \\\n * (scen_unorm + scen_vnorm/self.APHgamma)\n \n\n \n # tauk is the expecation of the sum sum of squares; update for this calc\n logging.debug(' in side-gig, old global_tau={}'.format(self.global_tau))\n logging.debug(' in side-gig, old summand={}'.format(self.tau_summand))\n logging.debug(' in side-gig, new summand={}'.format(new_tau_summand))\n self.global_tau = self.global_tau - self.tau_summand + new_tau_summand\n self.tau_summand = new_tau_summand # make available for the next reduce\n logging.debug(' in side-gig, new global_tau={}'.format(self.global_tau))\n\n # now we can get the local contribution to the phi_sum \n if self.global_tau <= 0:\n logging.debug(' *** Negative tau={} on rank {}'\\\n .format(self.global_tau, self.rank))\n self.phi_summand = self.compute_phis_summand()\n\n # prepare for the reduction that will take place after this side-gig\n self.local_concats[\"SecondReduce\"][\"ROOT\"][0] = self.tau_summand\n self.local_concats[\"SecondReduce\"][\"ROOT\"][1] = self.phi_summand\n self.local_concats[\"SecondReduce\"][\"ROOT\"][2] = self.local_punorm\n self.local_concats[\"SecondReduce\"][\"ROOT\"][3] = self.local_pvnorm\n self.local_concats[\"SecondReduce\"][\"ROOT\"][4] = self.local_pwnorm\n self.local_concats[\"SecondReduce\"][\"ROOT\"][5] = self.local_pznorm\n # we have updated our summands and the listener will do a reduction\n secs_so_far = (dt.datetime.now() - self.startdt).total_seconds()\n # Put in a time only for this rank, so the \"sum\" is really a report\n self.local_concats[\"SecondReduce\"][\"ROOT\"][6+self.rank] = secs_so_far\n # This is run by the listener, so don't tell the worker you have done\n # it until you are sure you have.\n self.synchronizer._unsafe_put_local_data(\"SecondReduce\",\n self.local_concats)\n self.synchronizer.enable_side_gig = False # we did it\n logging.debug(' exit side_gid on rank {}'.format(self.rank))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute correlation between a file and another one in commit graph based on value of edge. Correlation = Value of edge / max value of edge for this node
|
def compute_correlation(self, node_name, commit_graph, method='basic', alpha=0.5):
number_modifications = commit_graph.nodes[node_name]["number_modifications"]
neighbors_correlation = []
for neighbor in commit_graph.neighbors(node_name):
number_modifications_same_commit = commit_graph.edges[node_name, neighbor]["number_modifications_same_commit"]
number_modifications_neighbor = commit_graph.nodes[neighbor]["number_modifications"]
if method == 'basic':
correlation = Correlation.Correlation.basic_correlation(number_modifications_same_commit, number_modifications)
elif method == 'addition':
correlation = Correlation.Correlation.addition_correlation(number_modifications_same_commit, number_modifications, number_modifications_neighbor, alpha)
elif method == 'multiplication':
correlation = Correlation.Correlation.multiplication_correlation(number_modifications_same_commit, number_modifications, number_modifications_neighbor, alpha)
neighbors_correlation.append((neighbor, correlation, number_modifications_same_commit))
neighbors_correlation = self.parse_neighbors_correlation(neighbors_correlation)
print(f'Correlation of {node_name} (modified in {number_modifications} commits) with :')
for i, neighbor in enumerate(neighbors_correlation):
if i < 200:
print(f'{neighbor[0]}:{neighbor[1]} : {neighbor[2]}% (modified {neighbor[3]} times)')
else:
break
|
[
"def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):\n\n for (node1, node2) in pairs_of_modified_files:\n \n if node1 in self.repo_files_path and node2 in self.repo_files_path:\n\n # Find common prefix\n path_prefix = os.path.commonpath([node1, node2])\n \n if len(path_prefix) > 0:\n path_prefix_split = path_prefix.split('\\\\')\n tree_commit_node_name1 = node1[len(path_prefix)+1:].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix)+1:].split('\\\\')[0]\n else:\n path_prefix_split = []\n tree_commit_node_name1 = node1[len(path_prefix):].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix):].split('\\\\')[0]\n\n # Create or update edge in TreeCommit graph\n self.commit_tree_graph.add_edge(path_prefix_split, tree_commit_node_name1, tree_commit_node_name2)",
"def compute_same_level_correlation(self, node_path):\n\n def compute_same_level_correlation_iteration(tree_graph, splitted_path):\n\n if len(splitted_path) == 1 and splitted_path[0] in tree_graph.kids:\n self.compute_correlation(splitted_path[0], tree_graph.graph)\n elif len(splitted_path) > 1 and splitted_path[0] in tree_graph.kids:\n compute_same_level_correlation_iteration(tree_graph.kids[splitted_path[0]], splitted_path[1:])\n\n\n tree_graph = self.commit_tree_graph\n\n splitted_path = node_path.split('\\\\')\n print(splitted_path)\n\n compute_same_level_correlation_iteration(tree_graph, splitted_path)",
"def calc_frict_ratio(cone_maxes, donut_maxes, range_maxes, filename):\n # Areas to calculate the friction ratios.\n A_cone = config_file[\"areas\"][\"cone\"]\n A_donut = config_file[\"areas\"][\"donut\"]\n \n # Convert voltages to pressure (V -> psi)\n cone_max_f = []\n for cone_max in cone_maxes:\n lb = config_file['calibValues']['cone_m']*cone_max + \\\n config_file['calibValues']['cone_b']\n psi = lb/A_cone\n cone_max_f.append(psi)\n \n donut_max_f = []\n for donut_max in donut_maxes:\n lb = config_file['calibValues']['donut_m']*donut_max + \\\n config_file['calibValues']['donut_b']\n psi = lb/A_donut\n donut_max_f.append(psi)\n \n # Calculate friction ratio.\n friction_ratios = []\n for i in range(0, len(cone_max_f)):\n Fr = (donut_max_f[i]/cone_max_f[i])*100\n friction_ratios.append(Fr)\n\n # Convert cone max f to bar\n cone_max_bar = []\n for elem in cone_max_f:\n cone_max_bar.append(elem*0.0689476)\n\n with open(_makefilename(filename), 'w') as f:\n f.write('depth (in), fri (%), qc (bar) \\n')\n for i in range(0,len(friction_ratios)):\n f.write(str(range_maxes[i]) + ',' + str(friction_ratios[i]) + ',' + str(cone_max_bar[i]) + '\\n')\n\n return (friction_ratios, cone_max_bar)",
"def max_correlation(a, b):\n\n # compute the correlation\n # approximately equal to the following, with an error on the order of 1E18\n # signal.correlate2d(a/a.sum(), b/b.sum(), mode=\"full\", boundary=\"fill\", fillvalue=0)\n return np.max(signal.fftconvolve(a/a.sum(), b[::-1,::-1]/b.sum(), mode=\"full\"))",
"def string_correlation(node, end, s1, s2):\n if node == end:\n # necessary because we assume end node with indexes -1, -1\n return 0\n ref = _scorr(s1, s2)\n hs1 = s1[node.i:]\n hs2 = s2[node.j:]\n cur = _scorr(hs1, hs2)\n #print node, hs1, hs2, cur, ref\n h = cur/float(ref)\n return h",
"def correlation(self):\n pass",
"def _calculate_relations(files, relation_threshold):\n get_relations.get_relations(files[PMI_FILE_PATH], relation_threshold, \n files[RELATION_FILE_PATH])",
"def calculate(self):\n corr=0.\n n=0\n cr_sum = 0.\n \n for i in range((self.data.shape[1]-1)):\n if self.red_glyphs.mlab_source.scalars[i] != self.hi_color:\n print(i)\n cr_mean = self.cr[:,2*i+1].mean() \n n+=1\n cr_sum+=cr_mean\n corr+=(self.data[:,1 +i]+1.)*cr_mean**2.\n corr=corr*n/cr_sum**2.-1.\n self.correlation = np.empty(shape = (self.data.shape[0], 2), dtype = 'float')\n self.correlation[:,1] = corr\n self.correlation[:,0] = self.data[:,0]\n self.corr_fig.ax.cla()\n self.corr_fig.ax.semilogx(self.data[:,0],corr)\n self.corr_fig.update = True\n return corr",
"def corr(x, y):\n\treturn abs(np.corrcoef(x, y)[0][1])",
"def compute_relationship(\n v1: np.ndarray,\n v2: np.ndarray,\n v1_label: Text = 'v1',\n v2_label: Text = 'v2',\n maxlag: int = 4,\n fname: Text = '',\n verbose: bool = True) -> dict:\n # Correlation test.\n rval, pval = pearsonr(v1, v2)\n\n if verbose:\n significant = ''\n if pval < 0.05:\n significant = 'yay!!!!'\n print('r-val: {}\\np-val: {} \\t{}'.format(rval, pval, significant))\n\n # Scatter plot.\n f = plt.figure()\n sns.scatterplot(v2, v1)\n # plt.plot((min(v1), max(v2)), (max(v1), min(v2)), 'r')\n plt.plot(np.linspace(min(v2), max(v2)), np.linspace(min(v1), max(v1)), 'r')\n plt.xlabel(v2_label)\n plt.ylabel(v1_label)\n plt.show()\n if fname:\n f.savefig('{}.png'.format(fname), bbox_inches='tight')\n f.savefig('{}.pdf'.format(fname), bbox_inches='tight')\n\n # Causality test.\n causality_res = grangercausalitytests(\n np.column_stack((v1, v2)),\n maxlag=maxlag,\n verbose=verbose)\n return {'rval': rval, 'pval': pval, 'causality': causality_res}",
"def pearson_r(self, file1, file2):\n \n mdist = self._file_to_array(file1)\n mreward = self._file_to_array(file2, type=np.float32)\n\n return np.corrcoef(mdist, mreward)[0, 1]",
"def correlation(self, column_a, column_b):\n\n return self._scala.correlation(column_a, column_b)",
"def correlation(f1, f2, patch, max_displacement, stride1=1, stride2=1):\n channel = f1.shape[-1]\n norm = np.prod(to_list(patch, 2) + [channel])\n v1 = _make_vector(f1, patch, stride1)\n v1 = tf.expand_dims(v1, -2)\n v2 = _make_displacement(f2, patch, max_displacement, stride1, stride2)\n corr = tf.matmul(v1, v2) / tf.to_float(norm)\n return tf.squeeze(corr, axis=-2)",
"def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue",
"def calculate(self):\n corr=0.\n n=0\n cr_sum = 0.\n \n for i in range((self.data.shape[1]-1)):\n if str(i) in self.usable_data:\n cr_mean = self.cr[:,2*i+1].mean() \n n+=1\n cr_sum+=cr_mean\n corr+=(self.data[:,1 +i]+1.)*cr_mean**2.\n self.correlation = np.empty(shape = (self.data.shape[0], 2), dtype = 'float')\n corr=corr*n/cr_sum**2.-1.\n self.correlation[:,1] = corr\n self.correlation[:,0] = self.data[:,0]\n self.corr_fig.ax.cla()\n self.corr_fig.ax.semilogx(self.data[:,0],corr)\n self.corr_fig.update = True\n return corr",
"def edge_magnitude(edge_x, edge_y):\n # TODO: implement this function.\n # raise NotImplementedError\n\n edge_mag_1 = (edge_x)\n for i in range(0, len(edge_x)):\n for j in range(0,len(edge_x[i])):\n edge_mag_1[i][j] = ((edge_x[i][j]**2) + (edge_y[i][j]**2))**0.5\n\n \n \n # edge_max = np.max(edge_mag_1)\n # print(edge_max)\n # edge_mag = edge_mag_1\n \n # for i in range(0, len(edge_mag_1)):\n # for j in range(0,len(edge_mag_1[i])):\n # edge_mag[i][j] = edge_mag_1[i][j] / edge_max\n\n\n return edge_mag_1",
"def correlation(self, a, c, sample, bucket1, bucket2):\n sample = list(set(sample))\n numerator = 0\n denominator1 = 0\n denominator2 = 0\n avgv = (bucket1['v'][1] + bucket2['v'][1]) / 2\n avgf = (bucket2['v'][2] + bucket2['v'][2]) / 2\n c = Counter(sample)\n for i in range(0, len(sample)):\n if sample[i] >= a and sample[i] < c:\n if sample[i] < bucket1['high']:\n numerator += (sample[i] - avgv) * (c[sample[i]] * avgf)\n denominator1 += np.power(sample[i] - avgv, 2)\n denominator2 += np.power(c[sample[i]] - avgf, 2)\n else:\n numerator += (sample[i] - avgv) * (c[sample[i]] * avgf)\n denominator1 += np.power(sample[i] - avgv, 2)\n denominator2 += np.power(c[sample[i]] - avgf, 2)\n return numerator / (np.power(denominator1, 0.5) * np.power(denominator2, 0.5))",
"def max_correlation1d(a, b):\n\n # compute the correlation\n return max(np.correlate(a/a.sum(), b/b.sum(), mode=\"full\"))",
"def compute_confidence(self):\n pseudo_distance = self.connectivities_coarse.copy()\n pseudo_distance.data = 1./pseudo_distance.data\n connectivities_coarse_tree = minimum_spanning_tree(pseudo_distance)\n connectivities_coarse_tree.data = 1./connectivities_coarse_tree.data\n connectivities_coarse_tree_indices = [\n connectivities_coarse_tree[i].nonzero()[1]\n for i in range(connectivities_coarse_tree.shape[0])]\n # inter- and intra-cluster based confidence\n if not self._tree_based_confidence:\n total_n = self.n_neighbors * np.array(self.vc.sizes())\n logg.msg('{:>2} {:>2} {:>4} {:>4} {:>4} '\n '{:>7} {:>7} {:>7} {:>7}'\n .format('i', 'j', 'conn', 'n[i]', 'n[j]',\n 'avg', 'thresh', 'var', 'conf'), v=5)\n maximum = self.connectivities_coarse.max()\n confidence = self.connectivities_coarse.copy() # initializing\n for i in range(self.connectivities_coarse.shape[0]):\n for j in range(i+1, self.connectivities_coarse.shape[1]):\n if self.connectivities_coarse[i, j] > 0:\n minimum = min(total_n[i], total_n[j])\n average = self.connectivities_coarse[i, j] / minimum\n geom_mean = np.sqrt(total_n[i] * total_n[j])\n confidence[i, j] = self.connectivities_coarse[i, j] / geom_mean\n # confidence[i, j] = self.connectivities_coarse[i, j] / maximum\n variance = 0.0\n # variance = self.threshold * (1-self.threshold)\n # if average > self.threshold:\n # confidence[i, j] = 1\n # else:\n # confidence[i, j] = norm.cdf(average,\n # self.threshold, variance)\n logg.msg(\n '{:2} {:2} {:4} {:4} {:4} '\n '{:7.2} {:7.2} {:7.2} {:7.2}'\n .format(i, j, int(self.connectivities_coarse[i, j]),\n total_n[i], total_n[j],\n average, self.threshold, variance, confidence[i, j]), v=5)\n confidence[j, i] = confidence[i, j]\n # tree-based confidence\n else:\n median_connectivities_coarse_tree = np.median(connectivities_coarse_tree.data)\n confidence = self.connectivities_coarse.copy()\n confidence.data[self.connectivities_coarse.data >= median_connectivities_coarse_tree] = 1\n connectivities_coarse_adjusted = self.connectivities_coarse.copy()\n connectivities_coarse_adjusted.data -= median_connectivities_coarse_tree\n connectivities_coarse_adjusted.data = np.exp(connectivities_coarse_adjusted.data)\n index = self.connectivities_coarse.data < median_connectivities_coarse_tree\n confidence.data[index] = connectivities_coarse_adjusted.data[index]\n confidence_tree = self.compute_confidence_tree(\n confidence, connectivities_coarse_tree_indices)\n self.confidence = confidence\n self.confidence_tree = confidence_tree"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parses the neighbor_correlation object created in compute_correlation() to merge and remove useless intervals.
|
def parse_neighbors_correlation(self, neighbors_correlation):
correlation_intervals = {}
for neighbor, correlation, num_mod in neighbors_correlation:
filepath, line = neighbor.split(':')
line = int(line)
if filepath not in correlation_intervals:
correlation_intervals[filepath] = {(line, line):(correlation, num_mod)}
else:
merge_left, merge_right = False, False
for (a, b) in correlation_intervals[filepath].keys():
if b == line - 1 and correlation_intervals[filepath][(a,b)][0] == correlation:
merge_left = True
merge_left_pair = (a, b)
if a == line + 1 and correlation_intervals[filepath][(a,b)][0] == correlation:
merge_right = True
merge_right_pair = (a, b)
if merge_left and merge_right:
correlation_intervals[filepath].pop(merge_left_pair)
correlation_intervals[filepath].pop(merge_right_pair)
correlation_intervals[filepath][(merge_left_pair[0], merge_right_pair[1])] = (correlation, num_mod)
elif merge_left:
correlation_intervals[filepath].pop(merge_left_pair)
correlation_intervals[filepath][(merge_left_pair[0], line)] = (correlation, num_mod)
elif merge_right:
correlation_intervals[filepath].pop(merge_right_pair)
correlation_intervals[filepath][(line, merge_right_pair[1])] = (correlation, num_mod)
else:
correlation_intervals[filepath][(line, line)] = (correlation, num_mod)
neighbors_correlation_packed = []
for filepath, linedict in correlation_intervals.items():
for line_interval, data in linedict.items():
neighbors_correlation_packed.append((filepath, line_interval, data[0], data[1]))
neighbors_correlation_packed.sort(key=lambda x: (-x[2], x[0], x[1][0]), reverse=False)
return neighbors_correlation_packed
|
[
"def remove_redundancies(self):\n start = timeit.default_timer()\n nrows_before = len(self.all_geometries.index)\n df = self.all_geometries.copy()\n df = df.round(10)\n og_cols = df.columns.tolist()\n # sort interatomic distance columns according to alphabetized bond types\n # e.g. OH HH CH --> CH HH OH\n alpha_bond_cols = [og_cols[i] for i in self.mol.alpha_bond_types_indices]\n alpha_bond_cols.append('cartesians')\n alpha_bond_cols.append('internals')\n df = df[alpha_bond_cols]\n df_cols = df.columns.tolist()\n # sort values of each 'bondtype' subpartition of interatomic distance columns\n # subpartitions are defined by the index of the first occurance of each \n # bond_type label. CH CH CH HH HH OH would be [0,3,5]. These define partition bounds.\n ind = self.mol.alpha_bond_types_first_occur_indices\n K = len(ind)\n # sort each subpartition\n for i in range(K):\n if i < (K - 1):\n cut = slice(ind[i], ind[i+1])\n mask = df_cols[cut]\n df.loc[:,mask] = np.sort(df.loc[:,mask].values, axis=1)\n else:\n mask = df_cols[i:self.n_interatomics]\n df.loc[:,mask] = np.sort(df.loc[:,mask].values, axis=1)\n\n # Remove duplicates\n # take opposite of duplicate boolean Series (which marks duplicates as True)\n mask = -df.duplicated(subset=self.bond_columns)\n self.unique_geometries = self.all_geometries.loc[mask] \n self.n_disps = len(self.unique_geometries.index)\n print(\"Redundancy removal took {} seconds\".format(round((timeit.default_timer() - start),2)))\n print(\"Removed {} redundant geometries from a set of {} geometries\".format(nrows_before-self.n_disps, nrows_before))",
"def __get_spatial_neighbors(self):\n row = self.gdf[self.gdf['shapeID'] == self.target_id].squeeze()\n target_neighbors = self.gdf[~self.gdf.geometry.disjoint(row.geometry)].shapeID.tolist()\n neighbors = target_neighbors\n\n all_neighbors = {}\n self.degree_dict[0] = [self.target_id]\n self.degree_dict[1] = [i for i in target_neighbors if i != self.target_id]\n \n # Get neighbors\n for i in range(self.degrees):\n new_n = []\n for n in neighbors:\n cur_row = self.gdf[self.gdf['shapeID'] == n].squeeze()\n cur_neighbors = self.gdf[~self.gdf.geometry.disjoint(cur_row.geometry)].shapeID.tolist()\n if n not in all_neighbors.keys():\n all_neighbors[n] = cur_neighbors\n new_n.append(n)\n if i != 0:\n self.degree_dict[i + 1] = new_n\n\n k = [v for k,v in all_neighbors.items()]\n k = list(set([item for sublist in k for item in sublist]))\n k = [i for i in k if i not in all_neighbors.keys()]\n neighbors = k\n\n if len(neighbors) == 0:\n break\n\n # Cleanup: remove all ofthe neighbors of neighbors that are more than one degree fromt he target node\n # i.i. remove all of the muiciaplites in the values that are not in the keys\n u_vals = list(set([item for sublist in all_neighbors.values() for item in sublist]))\n remove_vals = [i for i in u_vals if i not in all_neighbors.keys()]\n for k,v in all_neighbors.items():\n to_remove = [j for j in v if j in remove_vals]\n for tr in to_remove:\n all_neighbors[k] = [i for i in all_neighbors[k] if i not in tr]\n\n return all_neighbors",
"def dropcors(data, thresh=0.8):\n def halfcors(dat):\n \"\"\"Finds reverse duplicates of correlation pairs and drops them.\n \"\"\"\n halved = []\n\n for i in range(len(dat)):\n revpair = (dat.iloc[i,1], dat.iloc[i,0])\n\n if revpair in halved:\n pass\n\n else:\n halved.append((dat.iloc[i,0], dat.iloc[i,1]))\n\n return halved\n\n\n def listpairs(pairslist):\n \"\"\"Lists all the elements in the correlations pairs\"\"\"\n countatt = []\n\n for pair in pairslist:\n countatt.append(pair[0])\n countatt.append(pair[1])\n\n return countatt\n\n\n def dropdup(pars, dups):\n \"\"\"Dropping selected pairs from the list of correlated pairs\"\"\"\n for dup in dups:\n ind = pars[pars == dup].index\n pars.drop(ind)\n\n return pars\n\n #print(\"\\n\\nCurrent columns in data at the beginning:\\n\\n{}\".format(data.columns))\n\n corr_preproc = data.corr()\n cri_hi_prep = abs(corr_preproc < 1) & abs(corr_preproc >= thresh)\n\n atts_corr = corr_preproc[cri_hi_prep].stack().reset_index()\n atts_corr.columns=['first', 'second', 'corr']\n print(len(atts_corr))\n print(\"\\nCorrelation pairs:\\n\\n{}\".format(atts_corr))\n\n halfpars = halfcors(atts_corr)\n #print(len(halfpars))\n #print(\"\\n\\nhafpars:\\n\\n{}\".format(halfpars))\n\n count_att = listpairs(halfpars)\n #print(len(count_att))\n #print(\"\\n\\ncount_att:\\n\\n{}\".format(count_att))\n\n coratrank = pd.Series(count_att).value_counts()\n #print(len(coratrank))\n #print(\"\\n\\ncoratrank:\\n\\n{}\".format(coratrank))\n\n # Recording attributes which correlate with more than one another attribute.\n drpat = []\n\n #for at in coratrank[coratrank > 1].index:\n # drpat.append(at)\n\n #print(len(drpat))\n #print(\"\\n\\ndrpat (first):\\n\\n{}\".format(drpat))\n\n countattS = pd.Series(count_att)\n sings = sorted((dropdup(countattS, drpat).str.lower()), key=lambda x: (len(x), x))\n #print(len(sings))\n #print(\"\\n\\nsings (first):\\n\\n{}\".format(sings))\n\n for sing in sings:\n for i in halfpars:\n\n if i[0] == sing:\n drpat.append(sing)\n if i[1] in sings:\n sings.remove(i[1])\n\n if i[1] == sing:\n drpat.append(sing)\n if i[0] in sings:\n sings.remove(i[0])\n\n print(len(drpat))\n print(\"\\nRemove the following {} columns:\\n\\n{}\".format(len(drpat), drpat))\n\n wocorrs = data.drop(columns=drpat)\n\n print(\"\\nRemaining columns:\\n{}\\n{}\".format(len(wocorrs.columns), wocorrs.columns))\n\n return wocorrs",
"def remove_segments_with_no_points(rn, data, distance_threshold=5):\n\n\t# 1. build a kd tree of all data:\n\tkd_index = cKDTree(list(set(data)))\n\t# Transform distance radius from meters to radius\n\tRADIUS_DEGREE = distance_threshold * 10e-6\n\n\tfor s, t in rn.edges():\n\t\tmiddle_p = ((s[0] + t[0]) / 2, (s[1] + t[1]) / 2)\n\t\tneighbors = kd_index.query_ball_point(x=middle_p, r=RADIUS_DEGREE, p=2)\n\t\tif len(neighbors) == 0:\n\t\t\trn.remove_edge(s, t)\n\treturn rn",
"def removeRegions(dataFrame, er):\n midpoints = [] # to store midpoints\n\n # finding midpoints\n for index, row in dataFrame.iterrows():\n midpoints.append((row['start'] + row['end']) // 2)\n\n # adding another column called midpoints\n dataFrame['midpoint'] = midpoints\n\n # sort based on chromosome and then by midpoints\n sData = dataFrame.sort_values(\n by=['midpoint', 'p-value'], kind='mergesort')\n excludeList = [] # list of indexes that needs to be removed\n # print sData\n # print \"\\n\"\n\n # removing the motifs that are called on the different strand\n sortedData = removeRepeats(sData)\n\n # choosing motifs based on p-values per chromosome\n for i in sortedData['#chr'].unique():\n chrData = sortedData.loc[sortedData['#chr'] == i]\n # print chrData[0:3]\n mdp = chrData['midpoint'].tolist()\n # pprint.pprint(mdp)\n print(\"processing chromosome : {} \".format(i))\n\n # iterating through each motif to pick the least p-value\n for j in range(0, len(mdp)):\n # print mdp[j]\n\n # retrieve the index that will later be removed or retained\n index1 = chrData.index[chrData['midpoint'] == mdp[j]].tolist()[\n 0]\n # print index1\n\n excludeZone = range(0, mdp[j] + er) # create a boundary region\n pv1 = chrData.loc[index1]['p-value'] # retrieve the pvalue\n\n # if the index is not excluded from any previous comparisons\n if index1 not in excludeList:\n for k in range(j + 1, len(mdp)):\n # print \"\\t {}\".format(mdp[k])\n # retrieve the next line index\n index2 = chrData.index[chrData['midpoint'] == mdp[k]].tolist()[\n 0]\n # print \"\\t {}\".format(index2)\n\n # checking if the next line is in the exclusionZone\n if mdp[k] in excludeZone:\n # retrieve the p-value for comparison\n pv2 = chrData.loc[index2]['p-value']\n\n # excluding the greater p-value after comparison\n if pv1 <= pv2:\n # pprint.pprint(\" IF pv1: {}, pv2 :{}\".format(pv1,pv2))\n excludeList.append(index2)\n # pprint.pprint(list(chrData.loc[index2]))\n else:\n # pprint.pprint(\"ELSE pv1: {}, pv2 :{}\".format(pv1,pv2))\n excludeList.append(index1)\n # pprint.pprint(list(chrData.loc[index1]))\n # else:\n # print \"IN EXCLUDE LIST : {}\".format(index1)\n print(\"\\nRemoved : {} ,\\nindex : {}\\n\".format(\n len(excludeList), excludeList))\n sData = sortedData.drop(excludeList)\n sortedData = sData.sort_values(by=['#chr', 'rank'], kind='mergesort')\n # print sortedData\n sortedData = sortedData.drop(['midpoint'], axis=1)\n print(\"Final Shape of the data : {}\".format(sortedData.shape))\n sortedData.to_csv('dedupFimo.bed', sep='\\t', header=False, index=False)\n # return excludeList",
"def find_corridors(self, list_of_walls):\n same_sided_walls = []\n single_used_walls = []\n for wall in list_of_walls.wall_list:\n single_used_walls.append(wall)\n #single_used_walls = deepcopy(list_of_walls)\n opposite_sided_walls = []\n for first_wall, second_wall in itertools.combinations(list_of_walls.wall_list, 2):\n if -3 < self.angle_between_lines(first_wall,second_wall) < 3 :\n # TODO this can be made more efficient by not checking all 4 distance possibilities, but by having a formula that finds the shortest path between two lines\n minimum_distance = self.minimum_distance_between_lines(first_wall, second_wall)\n\n\n\n if minimum_distance < 2: # for starters requiring two walls that belong to the same side of a hallway to be within 2 meters of each other. Might need adjusting\n\n # first we need to check, if either of the parallel walls is already a part of a corridor side\n create_new_entry = True\n for i, extended_walls in enumerate(same_sided_walls):\n if first_wall in extended_walls:\n create_new_entry = False\n # appending the second wall to the end of the list, not sure if this is always correct\n same_sided_walls[i].append(second_wall)\n if second_wall in single_used_walls:\n single_used_walls.remove(second_wall)\n break\n if second_wall in extended_walls:\n create_new_entry = False\n same_sided_walls[i].append(first_wall)\n if second_wall in single_used_walls:\n single_used_walls.remove(first_wall)\n break\n if create_new_entry:\n same_sided_walls.append([first_wall, second_wall])\n single_used_walls.remove(first_wall)\n single_used_walls.remove(second_wall)\n\n single_used_walls = [[x] for x in single_used_walls]\n for first_side, second_side in itertools.combinations(itertools.chain(same_sided_walls, single_used_walls), 2):\n # first we check if the two sides are at a 180 degrees angle to each other\n\n if 177 < self.angle_between_lines(first_side[0],second_side[0]) < 183:\n # if that is the case we check if any wall combination is within 3 meters:\n\n within_distance = False\n #wall_segments_within_distance = [(x, y) for x in first_side for y in second_side if self.minimum_distance_between_lines(x,y) < 4]\n for x, y in itertools.product(first_side, second_side):\n if self.minimum_distance_between_lines(x,y) < 2.3:\n within_distance = True\n break\n # wall_segments_within_distance is currently just 2 walls, x and y. not first_side, second_side. this needs to be changed\n\n if within_distance:\n for x, y in itertools.product(first_side, second_side):\n self.show_line_in_rviz(x.wall_start, y.wall_start, line_color=ColorRGBA(1, 1, 0, 0.5))\n self.show_line_in_rviz(x.wall_start, y.wall_end, line_color=ColorRGBA(1, 1, 0, 0.5))\n self.show_line_in_rviz(x.wall_end, y.wall_start, line_color=ColorRGBA(1, 1, 0, 0.5))\n self.show_line_in_rviz(x.wall_end, y.wall_end, line_color=ColorRGBA(1, 1, 0, 0.5))",
"def clean_up_zero_length_edges(self):\n\n resulting_edges = []\n for edge in self.edges:\n start = edge.get_origin()\n end = edge.twin.get_origin()\n if start.xd == end.xd and start.yd == end.yd:\n\n # Combine the vertices\n v1: Vertex = edge.origin\n v2: Vertex = edge.twin.origin\n\n # Move connected edges from v1 to v2\n for connected in v1.connected_edges:\n connected.origin = v2\n v1.connected_edges.remove(connected)\n v2.connected_edges.append(connected)\n\n # Remove vertex v1\n self.vertices.remove(v1)\n\n # Delete the edge\n edge.delete()\n edge.twin.delete()\n\n else:\n resulting_edges.append(edge)\n self.edges = resulting_edges",
"def find_neighbours(self):\t\n\t\tself.total_bonds = []\n\n\t\t# find all possible bonds\n\t\tfor i in range(self.length):\n\t\t\tfor j in range(i, self.length):\n\t\t\t\tdx = abs(self.seq[i].location[0] - self.seq[j].location[0])\n\t\t\t\tdy = abs(self.seq[i].location[1] - self.seq[j].location[1])\n\t\t\t\t#dz = abs(self.seq[i].location[2] - self.seq[j].location[2])\n\t\t\n\t\t\t\tif ((dx + dy) == 1):\n\t\t\t\t\tself.total_bonds.append(Bond(self.seq[i], self.seq[j]))\n\n\t\t# subtract covalent bonds\n\t\tself.other_bonds = [x for x in self.total_bonds if x not in self.covalent_bonds]",
"def consolidate_connections(connections_list):\n\n\t# Sort list (optional)\n\tconnections_list.sort(key=(lambda x: (x['from'], x['to']) ))\n\n\t# Remove self loops\n\tfor i in reversed(range(0,len(connections_list))):\n\t\tif (connections_list[i]['from'] == connections_list[i]['to']):\n\t\t\tdel(connections_list[i])\n\n\t# Split list to groups that have the same from and to stops\n\tsame_connection_groups = groupby(connections_list, key=lambda x: x['from'] + \"_\" + x['to'])\n\n\t# Merge these groups together by concating the routes for each connection using \"|\"\n\tconnections_list = [reduce(merge_connections, group) for _,group in same_connection_groups]\n\n\treturn connections_list",
"def drop_hanging_nodes(network, tolerance = 0.005): \n if 'degree' not in network.nodes.columns:\n deg = calculate_degree(network)\n else: deg = network.nodes['degree'].to_numpy()\n #hangNodes : An array of the indices of nodes with degree 1\n hangNodes = np.where(deg==1)\n ed = network.edges.copy()\n to_ids = ed['to_id'].to_numpy()\n from_ids = ed['from_id'].to_numpy()\n hangTo = np.isin(to_ids,hangNodes)\n hangFrom = np.isin(from_ids,hangNodes)\n #eInd : An array containing the indices of edges that connect\n #the degree 1 nodes\n eInd = np.hstack((np.nonzero(hangTo),np.nonzero(hangFrom)))\n degEd = ed.iloc[np.sort(eInd[0])]\n edge_id_drop = []\n for d in degEd.itertuples():\n dist = shapely.measurement.length(d.geometry)\n #If the edge is shorter than the tolerance\n #add the ID to the drop list and update involved node degrees\n if dist < tolerance:\n edge_id_drop.append(d.id)\n deg[d.from_id] -= 1\n deg[d.to_id] -= 1\n # drops disconnected edges, some may still persist since we have not merged yet\n if deg[d.from_id] == 1 and deg[d.to_id] == 1: \n edge_id_drop.append(d.id)\n deg[d.from_id] -= 1\n deg[d.to_id] -= 1\n \n edg = ed.loc[~(ed.id.isin(edge_id_drop))].reset_index(drop=True)\n aa = ed.loc[ed.id.isin(edge_id_drop)]\n edg.drop(labels=['id'],axis=1,inplace=True)\n edg['id'] = range(len(edg))\n n = network.nodes.copy()\n n['degree'] = deg\n #Degree 0 Nodes are cleaned in the merge_2 method\n #x = n.loc[n.degree==0]\n #nod = n.loc[n.degree > 0].reset_index(drop=True)\n return Network(nodes = n,edges=edg)",
"def removeAllCorrelations(self):\n\t\tfor tb in self.bins:\n\t\t\ttb.removeAllCorrelations()",
"def remove_non_edges(neighbor_data, print_img=False):\n\n new_array = np.empty((50, 10))\n new_array[:] = np.NAN\n new_array[0] = neighbor_data[0] # first row\n new_array[-1] = neighbor_data[-1] # last row\n new_array[:, 0] = neighbor_data[:, 0] # first column\n new_array[:, -1] = neighbor_data[:, -1] # last column\n\n if print_img:\n plt.matshow(new_array, cmap=plt.cm.winter)\n plt.show()\n\n return new_array",
"def _read_cluster_correlations(\n self, corr_in_file, detect_linear_dependencies=True):\n\n A = np.loadtxt(corr_in_file, skiprows=3)\n if detect_linear_dependencies:\n n_redundant = 0\n for i in range(A.shape[1]-1, 0, -1):\n x = np.linalg.lstsq(A[:, :i], A[:, i])\n error = A[:, :i].dot(x[0]) - A[:, i]\n if np.linalg.norm(error) < 1.0e-10:\n n_redundant += 1\n A[:, i] = 0.0\n if n_redundant > 0:\n corr_in_file_red = corr_in_file + \"-red\"\n print(\" {} redundant clusters removed.\".format(n_redundant))\n header = \"{} # number of clusters\\n\".format(A.shape[1])\n header += \"{} # number of configurations\\n\".format(\n A.shape[0])\n header += \"clusters\"\n np.savetxt(corr_in_file_red, A, fmt='%9.7f',\n delimiter=' ', header=header, comments='')\n\n return A",
"def __removeBlobs(self):\n \n # Total circulation removed\n self.__totalCirculationRemoved = []\n \n # Iterate through all the sub domains (each mesh) of the navier-stokes\n for subDomainID in self.multiEulerian.subDomainKeys:\n \n # Make reference to the boundary polygon\n # Format: closed loop polygon\n # Shape : (:,2) [x in column 1, y in column2]\n # Reasoning : necessary of _points_in_poly function\n # Remove all the blobs inside the outer boundary of the interpolation\n # region, including inside the surface (or very near surface blobs).\n xyPolygon = self.interpolationRegions[subDomainID]['boundaryPolygon']\n \n # Determine the bounding box of the interpolation region\n xyMin_boundingBox = xyPolygon.min(axis=0)\n xyMax_boundingBox = xyPolygon.max(axis=0)\n \n # Make reference to old uncorrected blobs\n xBlob = self.lagrangian.blobs.x\n yBlob = self.lagrangian.blobs.y\n \n # Find the blobs that are inside the bounding box of the \n # interpolation region\n iBoundingBox = _numpy.where((xBlob > xyMin_boundingBox[0]) & \n (xBlob < xyMax_boundingBox[0]) & \n (yBlob > xyMin_boundingBox[1]) & \n (yBlob < xyMax_boundingBox[1]))[0]\n \n # Determine blobs (which are inside bounding box)\n # that are also inside the interpolation region. \n iInside = _points_inside_poly(_numpy.array([xBlob[iBoundingBox], yBlob[iBoundingBox]]).T,\n xyPolygon)\n\n #TODO: for conservation of circulation\n # Determine the circulation that is removed\n self.__totalCirculationRemoved.append(_numpy.sum(self.lagrangian.blobs.g[iBoundingBox[iInside]]))\n \n if iBoundingBox[iInside].shape[0] != 0:\n # Remove old blobs inside polygon\n self.lagrangian.blobs.removeBlobs(iBoundingBox[iInside])",
"def merge_nearby_stops(stops_list, connections_list, radius):\n\n\t# Turn list of connections into dictionary for direct access\n\tconnections_dict = {connection['from'] + \"_\" + connection['to'] : connection for connection in connections_list}\n\n\t# Counters\n\tstops_merged = 0\n\tinitial_length = len(stops_list)\n\n\t# Iterate over every stop with every other in a triangle (in reverse because we are changing it)\n\tfor i in reversed(range(0, initial_length)):\n\t\tnew_length = len(stops_list)\n\t\tfor j in reversed(range(i+1, new_length)):\n\n\t\t\t# Calculate distance between any two stops\n\t\t\tstop_1 = stops_list[i]\n\t\t\tstop_2 = stops_list[j]\n\n\t\t\tdistance = calculate_straight_distance(stop_1['lat'], stop_1['lon'], stop_2['lat'], stop_2['lon'], radius)\n\n\t\t\t# If the two stops are within 50m\n\t\t\tif distance < walking_distance:\n\n\t\t\t\t# If there is no actual transit route connecting the two, merge 2nd to 1st\n\t\t\t\tif (stops_list[i]['tag'] + \"_\" + stops_list[j]['tag'] not in connections_dict and\n\t\t\t\t\tstops_list[j]['tag'] + \"_\" + stops_list[i]['tag'] not in connections_dict):\n\n\t\t\t\t\t# Set 1st stop position to average of two\n\t\t\t\t\tstops_list[i]['lat'] = (float(stops_list[i]['lat']) + float(stops_list[j]['lat'])) /2\n\t\t\t\t\tstops_list[i]['lon'] = (float(stops_list[i]['lon']) + float(stops_list[j]['lon'])) /2\n\n\t\t\t\t\t# Add stop to merged stops\n\t\t\t\t\tstops_list[i]['merged'] = list(set(stops_list[i]['merged'] + stops_list[j]['merged']))\n\n\t\t\t\t\t# Change connections to tag of 1st stop\n\t\t\t\t\tfor connection in connections_list:\n\t\t\t\t\t\tif connection['from'] == stops_list[j]['tag']:\n\t\t\t\t\t\t\tconnection['from'] = stops_list[i]['tag']\n\n\t\t\t\t\tfor connection in connections_list:\n\t\t\t\t\t\tif connection['to'] == stops_list[j]['tag']:\n\t\t\t\t\t\t\tconnection['to'] = stops_list[i]['tag']\n\n\t\t\t\t\t# Delete the second stop\n\t\t\t\t\tdel stops_list[j]\n\n\t\t\t\t\tstops_merged = stops_merged + 1\n\n\t\tprint(\"Calculated distances for \" + str( initial_length - i + 1 ) + \"/\" + str(initial_length) + \" stops\", end=\"\\r\")\n\n\tprint(\"\\nComparison done! Merged: \" + str(stops_merged) + \" pairs of nearby stops.\")\n\t\n\treturn stops_list, connections_list",
"def find_corridor_entrances(self, list_of_corners, list_of_walls):\n list_of_corridors = CorridorList()\n list_of_lines_perpendicular = []\n for corner in list_of_corners.corner_list:\n if self.distance(corner.first_wall.wall_end, Point(0,0,self.Z_OFFSET)) < 3:\n self.create_perpendicular_walls(list_of_lines_perpendicular, corner)\n\n for line in list_of_lines_perpendicular:\n for wall in list_of_walls.wall_list:\n if wall == line[1] or wall == line[2]:\n continue\n intersect_x, intersect_y = self.line_intersection(line[0], wall)\n if intersect_x is not None:\n intersect_pt = Point(intersect_x, intersect_y, self.Z_OFFSET)\n dist_to_origin = self.distance(intersect_pt, Point(0,0,self.Z_OFFSET))\n dist_to_corner = self.distance(intersect_pt, line[0].wall_start)\n if dist_to_origin < 3:\n tempx = (line[0].wall_start.x+intersect_pt.x)/2\n tempy = (line[0].wall_start.y+intersect_pt.y)/2\n intersect_pt = Point(tempx, tempy, self.Z_OFFSET)\n dist = self.distance_line_to_point(wall.wall_start, wall.wall_end, intersect_pt)#line_intersection(intersect_pt, wall)\n dist_to_origin = self.distance(intersect_pt, Point(0,0,self.Z_OFFSET))\n if dist > 0.3 and dist_to_origin > 0.5:\n list_of_corridors.corridor_list.append(intersect_pt)\n return list_of_corridors",
"def _identify_ridge_lines(matr, max_distances, gap_thresh):\n if(len(max_distances) < matr.shape[0]):\n raise ValueError('Max_distances must have at least as many rows as matr')\n \n all_max_cols = PeakFind._boolrelextrema(matr, numpy.greater, axis=1, order=1)\n #Highest row for which there are any relative maxima\n has_relmax = numpy.where(all_max_cols.any(axis=1))[0]\n if(len(has_relmax) == 0):\n return []\n start_row = has_relmax[-1]\n #Each ridge line is a 3-tuple:\n #rows, cols,Gap number\n ridge_lines = [[[start_row],\n [col],\n 0] for col in numpy.where(all_max_cols[start_row])[0]]\n final_lines = []\n rows = numpy.arange(start_row - 1, -1, -1)\n cols = numpy.arange(0, matr.shape[1])\n for row in rows:\n this_max_cols = cols[all_max_cols[row]]\n \n #Increment gap number of each line,\n #set it to zero later if appropriate\n for line in ridge_lines:\n line[2] += 1\n \n #XXX These should always be all_max_cols[row]\n #But the order might be different. Might be an efficiency gain\n #to make sure the order is the same and avoid this iteration\n prev_ridge_cols = numpy.array([line[1][-1] for line in ridge_lines])\n #Look through every relative maximum found at current row\n #Attempt to connect them with existing ridge lines.\n for ind, col in enumerate(this_max_cols):\n \"\"\"\n If there is a previous ridge line within\n the max_distance to connect to, do so.\n Otherwise start a new one.\n \"\"\"\n line = None\n if(len(prev_ridge_cols) > 0):\n diffs = numpy.abs(col - prev_ridge_cols)\n closest = numpy.argmin(diffs)\n if diffs[closest] <= max_distances[row]:\n line = ridge_lines[closest]\n if(line is not None):\n #Found a point close enough, extend current ridge line\n line[1].append(col)\n line[0].append(row)\n line[2] = 0\n else:\n new_line = [[row],\n [col],\n 0]\n ridge_lines.append(new_line)\n \n #Remove the ridge lines with gap_number too high\n #XXX Modifying a list while iterating over it.\n #Should be safe, since we iterate backwards, but\n #still tacky.\n for ind in range(len(ridge_lines) - 1, -1, -1):\n line = ridge_lines[ind]\n if line[2] > gap_thresh:\n final_lines.append(line)\n del ridge_lines[ind]\n \n out_lines = []\n for line in (final_lines + ridge_lines):\n sortargs = numpy.array(numpy.argsort(line[0]))\n rows, cols = numpy.zeros_like(sortargs), numpy.zeros_like(sortargs)\n rows[sortargs] = line[0]\n cols[sortargs] = line[1]\n out_lines.append([rows, cols])\n \n return out_lines",
"def remove_missing_neighbours(neighbours, data):\n data_neighbours = {}\n for key in neighbours.keys():\n loc = neighbours[key]\n if data[loc[0], loc[1]] != MISSING:\n data_neighbours[key] = neighbours[key]\n return data_neighbours",
"def find_continents(self):\n \n print(\"find_continents\")\n \n continents = []\n \n for index, t in enumerate(self.land):\n self.find_neighbors(index, t, continents)\n\n continents.sort(key=lambda c:len(c), reverse = True)\n merged_continent = False\n merged = []\n\n for i, c in enumerate(continents):\n sub_continent = continents[i+1:]\n for j, d in enumerate(sub_continent):\n merged_continent = False\n for l in d:\n if ((l in continents[i]) or self.is_neighbor(l ,continents[i])):\n continents[i] = self.merge(continents[i], d)\n #print(i, continents[i])\n continents[j+1] = []\n merged_continent = True\n if (merged_continent == True):\n break\n \n self.final_continents = [c for c in continents if len(c) > 0]\n print(\"The number of continents = {}\".format(len(self.final_continents)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute correlation between a file/folder and another one in commit TreeGraph based on value of edge. Correlation = Value of edge / max value of edge for this node
|
def compute_same_level_correlation(self, node_path):
def compute_same_level_correlation_iteration(tree_graph, splitted_path):
if len(splitted_path) == 1 and splitted_path[0] in tree_graph.kids:
self.compute_correlation(splitted_path[0], tree_graph.graph)
elif len(splitted_path) > 1 and splitted_path[0] in tree_graph.kids:
compute_same_level_correlation_iteration(tree_graph.kids[splitted_path[0]], splitted_path[1:])
tree_graph = self.commit_tree_graph
splitted_path = node_path.split('\\')
print(splitted_path)
compute_same_level_correlation_iteration(tree_graph, splitted_path)
|
[
"def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):\n\n for (node1, node2) in pairs_of_modified_files:\n \n if node1 in self.repo_files_path and node2 in self.repo_files_path:\n\n # Find common prefix\n path_prefix = os.path.commonpath([node1, node2])\n \n if len(path_prefix) > 0:\n path_prefix_split = path_prefix.split('\\\\')\n tree_commit_node_name1 = node1[len(path_prefix)+1:].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix)+1:].split('\\\\')[0]\n else:\n path_prefix_split = []\n tree_commit_node_name1 = node1[len(path_prefix):].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix):].split('\\\\')[0]\n\n # Create or update edge in TreeCommit graph\n self.commit_tree_graph.add_edge(path_prefix_split, tree_commit_node_name1, tree_commit_node_name2)",
"def compute_correlation(self, node_name, commit_graph, method='basic', alpha=0.5):\n\n number_modifications = commit_graph.nodes[node_name][\"number_modifications\"]\n neighbors_correlation = []\n\n for neighbor in commit_graph.neighbors(node_name):\n\n number_modifications_same_commit = commit_graph.edges[node_name, neighbor][\"number_modifications_same_commit\"]\n number_modifications_neighbor = commit_graph.nodes[neighbor][\"number_modifications\"]\n\n if method == 'basic':\n correlation = Correlation.Correlation.basic_correlation(number_modifications_same_commit, number_modifications)\n\n elif method == 'addition':\n\n correlation = Correlation.Correlation.addition_correlation(number_modifications_same_commit, number_modifications, number_modifications_neighbor, alpha)\n \n elif method == 'multiplication':\n\n correlation = Correlation.Correlation.multiplication_correlation(number_modifications_same_commit, number_modifications, number_modifications_neighbor, alpha)\n\n neighbors_correlation.append((neighbor, correlation, number_modifications_same_commit))\n \n\n neighbors_correlation = self.parse_neighbors_correlation(neighbors_correlation)\n\n print(f'Correlation of {node_name} (modified in {number_modifications} commits) with :')\n for i, neighbor in enumerate(neighbors_correlation):\n if i < 200:\n print(f'{neighbor[0]}:{neighbor[1]} : {neighbor[2]}% (modified {neighbor[3]} times)')\n else:\n break",
"def calc_frict_ratio(cone_maxes, donut_maxes, range_maxes, filename):\n # Areas to calculate the friction ratios.\n A_cone = config_file[\"areas\"][\"cone\"]\n A_donut = config_file[\"areas\"][\"donut\"]\n \n # Convert voltages to pressure (V -> psi)\n cone_max_f = []\n for cone_max in cone_maxes:\n lb = config_file['calibValues']['cone_m']*cone_max + \\\n config_file['calibValues']['cone_b']\n psi = lb/A_cone\n cone_max_f.append(psi)\n \n donut_max_f = []\n for donut_max in donut_maxes:\n lb = config_file['calibValues']['donut_m']*donut_max + \\\n config_file['calibValues']['donut_b']\n psi = lb/A_donut\n donut_max_f.append(psi)\n \n # Calculate friction ratio.\n friction_ratios = []\n for i in range(0, len(cone_max_f)):\n Fr = (donut_max_f[i]/cone_max_f[i])*100\n friction_ratios.append(Fr)\n\n # Convert cone max f to bar\n cone_max_bar = []\n for elem in cone_max_f:\n cone_max_bar.append(elem*0.0689476)\n\n with open(_makefilename(filename), 'w') as f:\n f.write('depth (in), fri (%), qc (bar) \\n')\n for i in range(0,len(friction_ratios)):\n f.write(str(range_maxes[i]) + ',' + str(friction_ratios[i]) + ',' + str(cone_max_bar[i]) + '\\n')\n\n return (friction_ratios, cone_max_bar)",
"def _calculate_relations(files, relation_threshold):\n get_relations.get_relations(files[PMI_FILE_PATH], relation_threshold, \n files[RELATION_FILE_PATH])",
"def max_correlation(a, b):\n\n # compute the correlation\n # approximately equal to the following, with an error on the order of 1E18\n # signal.correlate2d(a/a.sum(), b/b.sum(), mode=\"full\", boundary=\"fill\", fillvalue=0)\n return np.max(signal.fftconvolve(a/a.sum(), b[::-1,::-1]/b.sum(), mode=\"full\"))",
"def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue",
"def string_correlation(node, end, s1, s2):\n if node == end:\n # necessary because we assume end node with indexes -1, -1\n return 0\n ref = _scorr(s1, s2)\n hs1 = s1[node.i:]\n hs2 = s2[node.j:]\n cur = _scorr(hs1, hs2)\n #print node, hs1, hs2, cur, ref\n h = cur/float(ref)\n return h",
"def edge_magnitude(edge_x, edge_y):\n # TODO: implement this function.\n # raise NotImplementedError\n\n edge_mag_1 = (edge_x)\n for i in range(0, len(edge_x)):\n for j in range(0,len(edge_x[i])):\n edge_mag_1[i][j] = ((edge_x[i][j]**2) + (edge_y[i][j]**2))**0.5\n\n \n \n # edge_max = np.max(edge_mag_1)\n # print(edge_max)\n # edge_mag = edge_mag_1\n \n # for i in range(0, len(edge_mag_1)):\n # for j in range(0,len(edge_mag_1[i])):\n # edge_mag[i][j] = edge_mag_1[i][j] / edge_max\n\n\n return edge_mag_1",
"def compute_confidence(self):\n pseudo_distance = self.connectivities_coarse.copy()\n pseudo_distance.data = 1./pseudo_distance.data\n connectivities_coarse_tree = minimum_spanning_tree(pseudo_distance)\n connectivities_coarse_tree.data = 1./connectivities_coarse_tree.data\n connectivities_coarse_tree_indices = [\n connectivities_coarse_tree[i].nonzero()[1]\n for i in range(connectivities_coarse_tree.shape[0])]\n # inter- and intra-cluster based confidence\n if not self._tree_based_confidence:\n total_n = self.n_neighbors * np.array(self.vc.sizes())\n logg.msg('{:>2} {:>2} {:>4} {:>4} {:>4} '\n '{:>7} {:>7} {:>7} {:>7}'\n .format('i', 'j', 'conn', 'n[i]', 'n[j]',\n 'avg', 'thresh', 'var', 'conf'), v=5)\n maximum = self.connectivities_coarse.max()\n confidence = self.connectivities_coarse.copy() # initializing\n for i in range(self.connectivities_coarse.shape[0]):\n for j in range(i+1, self.connectivities_coarse.shape[1]):\n if self.connectivities_coarse[i, j] > 0:\n minimum = min(total_n[i], total_n[j])\n average = self.connectivities_coarse[i, j] / minimum\n geom_mean = np.sqrt(total_n[i] * total_n[j])\n confidence[i, j] = self.connectivities_coarse[i, j] / geom_mean\n # confidence[i, j] = self.connectivities_coarse[i, j] / maximum\n variance = 0.0\n # variance = self.threshold * (1-self.threshold)\n # if average > self.threshold:\n # confidence[i, j] = 1\n # else:\n # confidence[i, j] = norm.cdf(average,\n # self.threshold, variance)\n logg.msg(\n '{:2} {:2} {:4} {:4} {:4} '\n '{:7.2} {:7.2} {:7.2} {:7.2}'\n .format(i, j, int(self.connectivities_coarse[i, j]),\n total_n[i], total_n[j],\n average, self.threshold, variance, confidence[i, j]), v=5)\n confidence[j, i] = confidence[i, j]\n # tree-based confidence\n else:\n median_connectivities_coarse_tree = np.median(connectivities_coarse_tree.data)\n confidence = self.connectivities_coarse.copy()\n confidence.data[self.connectivities_coarse.data >= median_connectivities_coarse_tree] = 1\n connectivities_coarse_adjusted = self.connectivities_coarse.copy()\n connectivities_coarse_adjusted.data -= median_connectivities_coarse_tree\n connectivities_coarse_adjusted.data = np.exp(connectivities_coarse_adjusted.data)\n index = self.connectivities_coarse.data < median_connectivities_coarse_tree\n confidence.data[index] = connectivities_coarse_adjusted.data[index]\n confidence_tree = self.compute_confidence_tree(\n confidence, connectivities_coarse_tree_indices)\n self.confidence = confidence\n self.confidence_tree = confidence_tree",
"def correlation(self):\n pass",
"def compute_relationship(\n v1: np.ndarray,\n v2: np.ndarray,\n v1_label: Text = 'v1',\n v2_label: Text = 'v2',\n maxlag: int = 4,\n fname: Text = '',\n verbose: bool = True) -> dict:\n # Correlation test.\n rval, pval = pearsonr(v1, v2)\n\n if verbose:\n significant = ''\n if pval < 0.05:\n significant = 'yay!!!!'\n print('r-val: {}\\np-val: {} \\t{}'.format(rval, pval, significant))\n\n # Scatter plot.\n f = plt.figure()\n sns.scatterplot(v2, v1)\n # plt.plot((min(v1), max(v2)), (max(v1), min(v2)), 'r')\n plt.plot(np.linspace(min(v2), max(v2)), np.linspace(min(v1), max(v1)), 'r')\n plt.xlabel(v2_label)\n plt.ylabel(v1_label)\n plt.show()\n if fname:\n f.savefig('{}.png'.format(fname), bbox_inches='tight')\n f.savefig('{}.pdf'.format(fname), bbox_inches='tight')\n\n # Causality test.\n causality_res = grangercausalitytests(\n np.column_stack((v1, v2)),\n maxlag=maxlag,\n verbose=verbose)\n return {'rval': rval, 'pval': pval, 'causality': causality_res}",
"def linkage_precomputed_corr():\n\n # optimal_ordering : bool, optional\n # If True, the linkage matrix will be reordered so that the distance\n # between successive leaves is minimal. This results in a more intuitive\n # tree structure when the data are visualized. defaults to False, because\n # this algorithm can be slow, particularly on large datasets [2]_. See\n # also the `optimal_leaf_ordering` function.\n corr: pd.DataFrame = load_corr_matrix('pearson', *TEST_KEY)\n condensed = squareform(corr, checks=False, force='tovector')\n condensed = 1 - condensed\n Z = linkage(condensed, method='average', optimal_ordering=True)\n dendrogram(Z, orientation='left', leaf_label_func=lambda x: corr.columns[x])\n plt.show()",
"def compare_trees(tree_file1, tree_file2, method):\n coeff, p_value, n = compare_tip_to_tip_distances(tree_file1,\n tree_file2,\n method)\n click.echo(\"Correlation coefficient: %f\" % coeff)\n click.echo(\"p-value: %f\" % p_value)\n click.echo(\"Number of overlapping tips: %d\" % n)",
"def calculate(self):\n corr=0.\n n=0\n cr_sum = 0.\n \n for i in range((self.data.shape[1]-1)):\n if self.red_glyphs.mlab_source.scalars[i] != self.hi_color:\n print(i)\n cr_mean = self.cr[:,2*i+1].mean() \n n+=1\n cr_sum+=cr_mean\n corr+=(self.data[:,1 +i]+1.)*cr_mean**2.\n corr=corr*n/cr_sum**2.-1.\n self.correlation = np.empty(shape = (self.data.shape[0], 2), dtype = 'float')\n self.correlation[:,1] = corr\n self.correlation[:,0] = self.data[:,0]\n self.corr_fig.ax.cla()\n self.corr_fig.ax.semilogx(self.data[:,0],corr)\n self.corr_fig.update = True\n return corr",
"def compute_files_that_should_be_in_commit(self, commit_hash):\n\n similar_commits = {}\n potential_nodes = set()\n\n # Get list of files modified in commit\n modified_files = []\n modified_files_dict = {}\n for commit in pydriller.Repository(self.repo_folder, single=commit_hash).traverse_commits():\n for modification in commit.modified_files:\n modified_files.append(modification.new_path)\n modified_files_dict[modification.new_path] = 1\n\n # Compute each commit similarity score\n print('Computing similarity score')\n for commit in tqdm.tqdm(pydriller.Repository(self.repo_folder).traverse_commits()):\n if commit.hash != commit_hash:\n modified_files_other_commit = []\n new_nodes = []\n similar_nodes = 0\n for modification in commit.modified_files:\n\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n\n if current_path is not None and current_path in modified_files_dict:\n similar_nodes += 1\n else:\n new_nodes.append(current_path)\n modified_files_other_commit.append(current_path)\n similarity = similar_nodes / max(len(modified_files), len(modified_files_other_commit))\n if similarity > 0.3:\n similar_commits[commit.hash] = (similarity, new_nodes)\n for node in new_nodes:\n if node not in potential_nodes:\n potential_nodes.add(node)\n\n # Compute score of new potential nodes\n print('Compute node scores')\n for node in tqdm.tqdm(potential_nodes):\n node_score = 0\n for _, (similarity, nodes) in similar_commits.items():\n if node in nodes:\n node_score += similarity\n node_score /= len(similar_commits)\n modified_files_dict[node] = node_score\n\n for node in self.repo_files_path:\n if node not in modified_files_dict:\n modified_files_dict[node] = 0\n\n return modified_files_dict",
"def get_rel_coverage_to_compare_region_r(r):\n\n if r[\"fraction_chromosome_covered\"]>=0.9: return 1.0\n else: return find_nearest([r[\"relative_coverage_3\"], r[\"relative_coverage_5\"]], r[\"relative_coverage_target\"])",
"def correlation(self, column_a, column_b):\n\n return self._scala.correlation(column_a, column_b)",
"def corr(x, y):\n\treturn abs(np.corrcoef(x, y)[0][1])",
"def load_target_and_cg_rdf(target_filename, cg_filename, averaging='yes'):\n\n print \"loading rdfs: \", target_filename, cg_filename\n \n d1 = np.loadtxt(target_filename)\n d2 = np.loadtxt(cg_filename)\n \n if (len(d2) < len(d1)):\n d3 = d2[:,1] - d1[:len(d2)-len(d1),2]\n d4 = np.power(d2[:,1] - d1[:len(d2)-len(d1),2], 2)\n elif (len(d2) > len(d1)): \n d3 = d2[:len(d1)-len(d2),1] - d1[:,2]\n d4 = np.power(d2[:len(d1)-len(d2),1] - d1[:,2], 2)\n else: \n d3 = d2[:,1] - d1[:,2] \n d4 = np.power(d2[:,1] - d1[:,2], 2)\n \n if averaging == 'yes': \n d3 = np.average(np.absolute(d3))\n d4 = np.average(d4)\n \n return d1, d2, d3, d4"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a dictionnary containing for each file a score saying if it should be included in a given commit.
|
def compute_files_that_should_be_in_commit(self, commit_hash):
similar_commits = {}
potential_nodes = set()
# Get list of files modified in commit
modified_files = []
modified_files_dict = {}
for commit in pydriller.Repository(self.repo_folder, single=commit_hash).traverse_commits():
for modification in commit.modified_files:
modified_files.append(modification.new_path)
modified_files_dict[modification.new_path] = 1
# Compute each commit similarity score
print('Computing similarity score')
for commit in tqdm.tqdm(pydriller.Repository(self.repo_folder).traverse_commits()):
if commit.hash != commit_hash:
modified_files_other_commit = []
new_nodes = []
similar_nodes = 0
for modification in commit.modified_files:
if modification.new_path in self.repo_files_path:
current_path = modification.new_path
else:
current_path = self.retrieve_current_path(modification.new_path)
if current_path is not None and current_path in modified_files_dict:
similar_nodes += 1
else:
new_nodes.append(current_path)
modified_files_other_commit.append(current_path)
similarity = similar_nodes / max(len(modified_files), len(modified_files_other_commit))
if similarity > 0.3:
similar_commits[commit.hash] = (similarity, new_nodes)
for node in new_nodes:
if node not in potential_nodes:
potential_nodes.add(node)
# Compute score of new potential nodes
print('Compute node scores')
for node in tqdm.tqdm(potential_nodes):
node_score = 0
for _, (similarity, nodes) in similar_commits.items():
if node in nodes:
node_score += similarity
node_score /= len(similar_commits)
modified_files_dict[node] = node_score
for node in self.repo_files_path:
if node not in modified_files_dict:
modified_files_dict[node] = 0
return modified_files_dict
|
[
"def git_annotate_file_order(commits):\n file_commits = collections.defaultdict(list)\n\n for k, c in commits.items():\n if 'order' in c:\n for fname in c['files']:\n file_commits[fname].append((c['order'], k))\n c['file_order'] = {} # Use this as oppty to track on new field\n\n for fname, val in file_commits.items():\n for i, (order, c) in enumerate(sorted(val, key=lambda x: x[0])):\n commits[c]['file_order'][fname] = i + 1",
"def compute_metrics(self, commit_no):\n\n commit = self.commits[commit_no]\n subprocess.call([\"git\", \"-C\", self.repo, \"checkout\", commit[0]],\n stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)\n dcmp = filecmp.dircmp(self.repo, self.dir)\n m = compare_dirs(dcmp)\n logging.debug (\"Commit %s. Files: %d, %d, %d, lines: %d, %d, %d, %d)\"\n % (commit[0], m[\"left_files\"], m[\"right_files\"], m[\"diff_files\"],\n m[\"left_lines\"], m[\"right_lines\"],\n m[\"added_lines\"], m[\"removed_lines\"]))\n m[\"total_files\"] = m[\"left_files\"] + m[\"right_files\"] + m[\"diff_files\"]\n m[\"total_lines\"] = m[\"left_lines\"] + m[\"right_lines\"] \\\n + m[\"added_lines\"] + m[\"removed_lines\"]\n m[\"commit_seq\"] = commit_no\n m[\"commit\"] = commit[0]\n m[\"date\"] = commit[1]\n return m",
"def git_annotate_file_order_by_author(commits, git_actor_dedupe_table):\n file_commits_by_author = collections.defaultdict(\n lambda: collections.defaultdict(list))\n\n for k, c in commits.items():\n if 'order' in c:\n for fname in c['files']:\n author = git_actor_dedupe_table[c['author']]['standard_actor']\n file_commits_by_author[fname][author].append((c['order'], k))\n # Use this as opportunity to tack on new field\n c['file_order_for_author'] = {}\n\n for fname, entry in file_commits_by_author.items():\n for author, val in entry.items():\n for i, (order, c) in enumerate(sorted(val, key=lambda x: x[0])):\n commits[c]['file_order_for_author'][fname] = i + 1",
"def generate_pylint_dict():\n\n # Create pylint output for all .py files\n generate_pylint_files()\n\n # Dict for returning pylint data\n pylint = {}\n\n # List to store pylint code scores for each .py file\n lint_scores = []\n\n # Find all text files storing pylint output\n file_list = glob.glob(\"pkg-source/pylint/*.txt\")\n for file in file_list:\n # Read in file contents\n with open(file, \"r\") as f:\n try:\n # Extract line that contains score (quirk of pylint)\n score_line = f.readlines()[-4]\n # Check that score line exists at all\n if score_line:\n # Find first occurrence of score\n score_match = re.search(r\"[\\d]+.\\d\\d?\", score_line)\n # Check that score number exists\n if score_match:\n score = score_match.group(0)\n lint_scores.append(float(score))\n # TODO: Make more robust. Why do index errors occur?\n except IndexError:\n continue\n\n # Take average of lint scores\n # TODO: Why is no files found sometimes?\n if len(lint_scores) == 0:\n pylint[\"average_lint_score\"] = \"No files found\"\n else:\n average_lint_score = sum(lint_scores) / len(lint_scores)\n pylint[\"average_lint_score\"] = round(average_lint_score, 2)\n\n return pylint",
"def get_statistics_dict() -> Dict[str, Tuple[int, int]]:\n\n scoreboard = defaultdict(lambda: (0, 0))\n\n with open(CSV_FILE, 'r') as college_quotes:\n\n for quote in csv.reader(college_quotes):\n for index in range(1, len(quote), 2):\n for author in quote[index].split(' & '):\n quotes, memes = scoreboard[author]\n scoreboard[author] = quotes + 1, memes\n\n for author in Path(MEMES_PATH).iterdir():\n quotes, memes = scoreboard[author.stem.title()]\n scoreboard[author.stem.title()] = quotes, len(list(author.iterdir()))\n\n return scoreboard",
"def diff_files(self):\n files = dict()\n # get the git diff stat without the last summary line\n lines = self.repo.git.diff('--stat', '--no-color').splitlines()[:-1]\n for line in lines:\n match = re.match(r' (.*\\S) *\\| *([0-9]+) ', line)\n fname = match[1]\n #changes = int(match[2])\n diff = self.get_diff_raw(fname)\n\n files[fname] = Patch(\n files=[fname],\n size=len(diff),\n hunks=diff.count('@@')/2,\n test_size=False)\n\n return files",
"def calculate_total_contribution(self):\n all_users = []\n\n for file_id in self.file_revs:\n for user in self.list_revisions_user(file_id):\n all_users.append(user[0])\n\n all_users_contribution = {}\n for user in all_users:\n if user not in all_users_contribution:\n all_users_contribution[user] = 1\n else:\n all_users_contribution[user] += 1\n\n return all_users_contribution",
"def parse_file(filepath):\n contributions = defaultdict(int)\n\n with open(filepath) as f:\n for line in f:\n line_output = _parse_line(line)\n if line_output is not None:\n date, count = line_output\n contributions[date] += count\n return contributions",
"def get_map_score(name: str, files_by_challenge: list, stats_directory: str) -> list:\n scenario_scores = []\n for scenario in files_by_challenge[name]:\n try:\n scenario_directory = os.path.join(stats_directory, scenario)\n summary_score = SessionStat.from_file(scenario_directory).summary.score\n scenario_scores.append(summary_score)\n except:\n continue\n\n return scenario_scores",
"def calculate_file_contribution(self, file_id):\n all_users = []\n\n for file_revision in self.file_revs[file_id]:\n\n all_users.append(file_revision['lastModifyingUser']['displayName'])\n\n all_users_contribution = {}\n for user in all_users:\n if user not in all_users_contribution:\n all_users_contribution[user] = 1\n else:\n all_users_contribution[user] += 1\n\n return all_users_contribution",
"def __classify_files(self, files):\n\n ret = {}\n try:\n for lang, conf in self.configuration_dict['langs'].items():\n ret[lang] = set()\n for pattern in conf['include']:\n ret[lang].update(\n [f for f in files if fnmatch.fnmatch(f, pattern)])\n if not ret[lang]:\n del ret[lang]\n\n except KeyError as e:\n raise AutoLintConfError((\"Configuration file, key %s not found\"\n % e.args[0]))\n\n return ret",
"def files_in_commit(commit: Optional[str] = None) -> Iterable[str]:\n cmd = ['git', 'diff', '--name-status']\n if commit is not None:\n cmd.append(f'{commit}..{commit}~1')\n\n output = subprocess.check_output(cmd)\n\n for line in output.decode('utf-8').split('\\n'):\n if line == '':\n break\n\n path = line.split('\\t')[-1]\n\n if not path.endswith('.py'):\n continue\n\n abs_path = os.path.abspath(path)\n\n yield abs_path",
"def get_changed_files_in_commit(self, commit_hash):\r\n output = self._execute_command(get_changed_files_in_commit.format(commit_id=commit_hash))\r\n return re.match(r\"(?P<content>.*)\\ncommit {}\".format(commit_hash), output, re.DOTALL).group('content').splitlines()",
"def getChangedFilesForCommits(self):\n\t\t\"\"\"Returns [{'time':time, 'files':[filenames,]}]\"\"\"\n\t\trequestString = \"https://api.github.com/repos/{}/{}/compare\"\n\t\trequestString = requestString.format(self.user, self.repo)\n\t\tcommits = self.getCommits()\n\t\tchanges = []\n\t\tfor commitIndex in range(len(commits)):\n\t\t\tif commitIndex == 0:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcurrent = commits[commitIndex]['sha']\n\t\t\t\tprevious = commits[commitIndex - 1]['sha']\n\t\t\t\tcommitTime = parseGitTimeString(commits[commitIndex]['commit']['committer']['date'])\n\t\t\t\tcompareString = \"/{}...{}\"\n\t\t\t\tcompareString = compareString.format(previous, current)\n\t\t\t\ttempRequestString = requestString + compareString\n\t\t\t\tresponse = urllib.request.urlopen(tempRequestString)\n\t\t\t\tdata = response.read().decode('utf-8')\n\t\t\t\tdata = json.loads(data)\n\t\t\t\tfiles = data['files']\n\t\t\t\t#this right here is wrong... should be commitsha:{time:124523523,files:changed}\n\t\t\t\tfilesChanged = {'time': commitTime, 'files': [file['filename'] for file in files if file['status'] == 'modified']}\n\t\t\t\tchanges.append(filesChanged)\n\t\treturn changes",
"def get_comments(rc_file, submissions):\n comments = {}\n with bz2.open(rc_file, 'rt', encoding=\"utf-8\") as f:\n for line in f:\n try:\n comment = json.loads(line)\n sid = get_linked_submission_id(comment)\n if sid in submissions.keys():\n comments[get_comment_id(comment)] = comment\n except Exception:\n traceback.print_exc()\n pass\n return comments",
"def _ria_matching(self, test_documents_paths, original_test_documents_texts, sents, targ_vecs, targs):\n score_dict = {}\n for file in test_documents_paths:\n with open(file, 'r', encoding='utf-8') as test_doc_file:\n line_cnt = 0\n for line in test_doc_file:\n line_cnt += 1\n line_parts = line.strip().split('\\t')\n for i in range(0, len(line_parts)):\n part = DataLoader.filter_text(line_parts[i])\n if part != '':\n top_matches = self._w2v.get_most_similar(sents, targ_vecs, part,\n RIA.NUMBER_OF_TARGETS - 1, 0.4)\n for match in top_matches:\n key = targs[match[1]]\n filename = file[file.rfind('/')+1:] # For excel output\n original_line_parts = original_test_documents_texts[file][line_cnt-1].split('\\t')\n original_part = original_line_parts[i]\n original_context = original_test_documents_texts[file][line_cnt-1]\n if line_cnt > 1:\n original_context = original_test_documents_texts[file][line_cnt-2] + '\\n' +\\\n original_context\n if line_cnt < len(original_test_documents_texts[file]):\n original_context += ('\\n' + original_test_documents_texts[file][line_cnt])\n if key in score_dict:\n score_dict[key].add((match[0], (part, filename, line_cnt, original_part,\n original_context)))\n else:\n score_dict[key] = {(match[0], (part, filename, line_cnt, original_part,\n original_context))}\n return score_dict",
"def load_runs(fn):\n runs = defaultdict(dict)\n with open(fn, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n qid, _, docid, _, score, _ = line.strip().split()\n runs[qid][docid] = float(score)\n return runs",
"def get_chain_scores(wd, projections_list):\n print(f\"Reading chain scores\")\n proj_to_chain_score = {}\n chain_scores_file = os.path.join(wd, ORTH_SCORES)\n f = open(chain_scores_file, \"r\")\n f.__next__()\n for line in f:\n line_data = line.rstrip().split(\"\\t\")\n trans = line_data[0]\n chain = line_data[1]\n score = line_data[2]\n proj_ = f\"{trans}.{chain}\"\n if proj_ not in projections_list:\n continue\n proj_to_chain_score[proj_] = score\n f.close()\n return proj_to_chain_score",
"def CheckAllFiles(files, aggregate):\n fileModules = {} if not aggregate else set([])\n for file in files:\n modules = ModuleFinder.GetModules(file)\n if not aggregate:\n fileModules[file] = modules\n else:\n fileModules.update(modules)\n return fileModules"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a dataframe, with files as rows, commits as columns. The value in a cell is 0 if a file was not in a commit, 1 otherwise.
|
def create_commits_dataframe(self):
files_commits = {}
current_length = 0
columns = []
pbar = tqdm.tqdm(total=self.total_commits)
for commit in self.repository_mining.traverse_commits():
current_length += 1
columns.append(commit.hash)
for modification in commit.modified_files:
if modification.new_path in self.repo_files_path:
current_path = modification.new_path
else:
current_path = self.retrieve_current_path(modification.new_path)
if current_path is not None:
if current_path in files_commits:
while len(files_commits[current_path]) < current_length - 1:
files_commits[current_path].append(0)
files_commits[current_path].append(1)
else:
files_commits[current_path] = [0 for _ in range(current_length-1)]
files_commits[current_path].append(1)
pbar.update(1)
pbar.close()
dataframe_list = []
index = []
for key, value in files_commits.items():
if len(value) < current_length:
while len(files_commits[key]) < current_length:
files_commits[key].append(0)
index.append(key)
dataframe_list.append(value)
return pd.DataFrame(dataframe_list, index=index, columns=columns)
|
[
"def create_commits_dataframe_lines(self):\n\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n columns.append(commit.hash)\n\n pbar.update(1)\n pbar.close()\n\n\n dataframe_list = []\n index = []\n\n\n cwd = os.getcwd()\n os.chdir(self.repo_folder)\n\n\n # Print analyzing all the lines of the repo\n print('Print analyzing all the lines of the repo')\n file_lines = []\n \n\n for file_path in tqdm.tqdm(self.repo_files_path):\n\n # Get path to file and count number of lines\n complete_file_path = self.repo_folder + '\\\\' + file_path\n linenumber = self.get_file_number_of_lines(complete_file_path)\n\n for i in range(1, linenumber):\n file_lines.append((file_path, i))\n\n line_to_commits = {}\n with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_line = {executor.submit(self.analyze_line, file_line): file_line for file_line in file_lines}\n\n pbar = tqdm.tqdm(total=len(file_lines))\n for future in concurrent.futures.as_completed(future_to_line):\n file_line = future_to_line[future]\n try:\n \n modified_in_commits = future.result()\n modified_in_commits = [commit[1:-1] for commit in modified_in_commits]\n index.append(f'{file_line[0]}:{file_line[1]}')\n file_line_commits = []\n for commit in columns:\n if commit in modified_in_commits:\n file_line_commits.append(1)\n else:\n file_line_commits.append(0)\n dataframe_list.append(file_line_commits)\n except Exception as exc:\n print(f'Error during execution : {exc}')\n pbar.update(1)\n pbar.close()\n\n\n os.chdir(cwd)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def create_commits_dataframe_functions(self):\n\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n columns.append(commit.hash)\n\n pbar.update(1)\n pbar.close()\n\n\n dataframe_list = []\n index = []\n\n\n cwd = os.getcwd()\n os.chdir(self.repo_folder)\n\n with open('./gitattributes', 'a') as f:\n f.write('*.py diff=python\\n')\n\n print(os.listdir('./'))\n \n\n # Print analyzing all the lines of the repo\n print('Print analyzing all the lines of the repo')\n file_methods = []\n \n\n for file_path in tqdm.tqdm(self.repo_files_path):\n\n if file_path[-3:] == '.py':\n\n print(file_path)\n # Get path to file and count number of lines\n complete_file_path = self.repo_folder + '\\\\' + file_path\n methods = self.find_methods_in_python_file(complete_file_path)\n\n for method in methods:\n file_methods.append((file_path, method))\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_method = {executor.submit(self.analyze_method, file_method): file_method for file_method in file_methods}\n\n pbar = tqdm.tqdm(total=len(file_methods))\n for future in concurrent.futures.as_completed(future_to_method):\n file_method = future_to_method[future]\n try:\n \n modified_in_commits = future.result()\n modified_in_commits = [commit[1:-1] for commit in modified_in_commits]\n row_name = f'{file_method[0]}:{file_method[1]}'\n if row_name not in index:\n index.append(f'{file_method[0]}:{file_method[1]}')\n file_method_commits = []\n for commit in columns:\n if commit in modified_in_commits:\n file_method_commits.append(1)\n else:\n file_method_commits.append(0)\n dataframe_list.append(file_method_commits)\n except Exception as exc:\n print(f'Error during execution : {exc}')\n pbar.update(1)\n pbar.close()\n\n\n os.chdir(cwd)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def file_detail(self, rev='HEAD', committer=True, ignore_globs=None, include_globs=None):\n\n df = None\n\n for repo in self.repos:\n try:\n if df is None:\n df = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev)\n df['repository'] = repo.repo_name\n else:\n chunk = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev)\n chunk['repository'] = repo.repo_name\n df = df.append(chunk)\n except GitCommandError:\n print('Warning! Repo: %s couldnt be inspected' % (repo, ))\n\n df = df.reset_index(level=-1)\n df = df.set_index(['file', 'repository'])\n return df",
"def df_from_files(self):\n print('Creating dataframe...')\n num = len([name for name in os.listdir(self.raw) if not name[0] == '.'])\n files = os.path.join(self.raw, '~.info.json') # This is a weird hack\n files = files.replace('~', '{:05d}') # It allows path joining to work on Windows\n data = [json.load(open(files.format(i))) for i in range(1, num + 1)]\n\n columns = ['formats', 'tags', 'categories', 'thumbnails']\n lists = [[], [], [], []]\n deletes = {k: v for k, v in zip(columns, lists)}\n for dt in data:\n for col, ls in deletes.items():\n ls.append(dt[col])\n del dt[col]\n\n self.df = pd.DataFrame(data)\n self.df['upload_date'] = pd.to_datetime(self.df['upload_date'], format='%Y%m%d')\n self.df.to_csv(os.path.join(self.ran, 'df.csv'))\n\n self.tags = deletes['tags']\n pickle.dump(self.tags, open(os.path.join(self.ran, 'tags.txt'), 'wb'))",
"def coverage(self):\n\n df = pd.DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage', 'repository'])\n\n for repo in self.repos:\n try:\n cov = repo.coverage()\n cov['repository'] = repo.repo_name\n df = df.append(cov)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have coverage' % (repo, ))\n\n df.reset_index()\n\n return df",
"def is_bare(self):\n\n ds = [[x.repo_name, x.is_bare()] for x in self.repos]\n df = pd.DataFrame(ds, columns=['repository', 'is_bare'])\n return df",
"def file_change_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None):\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['repository', 'date', 'author', 'committer', 'message', 'rev', 'filename', 'insertions', 'deletions'])\n\n for repo in self.repos:\n try:\n ch = repo.file_change_history(\n branch,\n limit=limit,\n days=days,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df",
"def get_local_state(self):\n try:\n diff = self.repo.index.diff\n unstaged_diffs = [d.change_type for d in diff(None)]\n staged_diffs = [d.change_type for d in diff('HEAD')]\n except BadName:\n # Git repo has been initialised but has no commits yet.\n self.has_commits = False\n return\n self.has_commits = True\n self.has_untracked_files = bool(self.repo.untracked_files)\n self.has_new_files = 'D' in staged_diffs\n self.has_unstaged_modifications = 'M' in unstaged_diffs\n self.has_staged_modifications = 'M' in staged_diffs\n self.has_renamed_files = 'R100' in staged_diffs",
"def synthesize_cvs_commit_ids(self):\n\n rows = self.db.query(self.db.rewrite_sql(\"SELECT count(*) FROM checkins WHERE commitid IS NULL\"), []);\n count = rows[0][0]\n if (count == 0):\n return\n\n print(\"Updating \" + str(count) + \" legacy CVS entries\")\n select = self.db.rewrite_sql(\"SELECT id, ci_when, whoid, repositoryid, branchid, descid FROM checkins WHERE commitid IS NULL ORDER BY repositoryid, branchid, whoid, ci_when LIMIT 100000\")\n rows = self.db.query(select, [])\n\n i = 0\n commitid = 0\n last_row = [0, 0, 0, 0, 0, 0]\n while len(rows) > 0:\n cursor = self.db.conn.cursor()\n for row in rows:\n if not self.are_rows_in_same_commit(row, last_row):\n cursor.execute(\"INSERT INTO commitids (hash, co_when, authorid, committerid) VALUES (%s, %s, %s, %s)\", [\"s\" + str(time.time()) + str(i), row[1], row[2], row[2]])\n commitid = cursor.lastrowid\n cursor.execute(self.db.rewrite_sql(\"UPDATE checkins SET commitid=%s WHERE id=%s\"), [commitid, row[0]])\n i = i + 1\n last_row = row\n\n cursor.close()\n self.db.conn.commit()\n self.db.conn.begin()\n print(\" Updated \" + str(i) + \" / \" + str(count))\n rows = self.db.query(select, []);\n cursor.close()\n self.db.conn.commit()\n print(\"OK: Converted CVS legacy entries\")",
"def commit_history(self, branch, limit=None, days=None, ignore_globs=None, include_globs=None):\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['author', 'committer', 'message', 'lines', 'insertions', 'deletions', 'net'])\n\n for repo in self.repos:\n try:\n ch = repo.commit_history(branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs)\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df",
"def files_in_commit(commit: Optional[str] = None) -> Iterable[str]:\n cmd = ['git', 'diff', '--name-status']\n if commit is not None:\n cmd.append(f'{commit}..{commit}~1')\n\n output = subprocess.check_output(cmd)\n\n for line in output.decode('utf-8').split('\\n'):\n if line == '':\n break\n\n path = line.split('\\t')[-1]\n\n if not path.endswith('.py'):\n continue\n\n abs_path = os.path.abspath(path)\n\n yield abs_path",
"def compute_files_that_should_be_in_commit(self, commit_hash):\n\n similar_commits = {}\n potential_nodes = set()\n\n # Get list of files modified in commit\n modified_files = []\n modified_files_dict = {}\n for commit in pydriller.Repository(self.repo_folder, single=commit_hash).traverse_commits():\n for modification in commit.modified_files:\n modified_files.append(modification.new_path)\n modified_files_dict[modification.new_path] = 1\n\n # Compute each commit similarity score\n print('Computing similarity score')\n for commit in tqdm.tqdm(pydriller.Repository(self.repo_folder).traverse_commits()):\n if commit.hash != commit_hash:\n modified_files_other_commit = []\n new_nodes = []\n similar_nodes = 0\n for modification in commit.modified_files:\n\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n\n if current_path is not None and current_path in modified_files_dict:\n similar_nodes += 1\n else:\n new_nodes.append(current_path)\n modified_files_other_commit.append(current_path)\n similarity = similar_nodes / max(len(modified_files), len(modified_files_other_commit))\n if similarity > 0.3:\n similar_commits[commit.hash] = (similarity, new_nodes)\n for node in new_nodes:\n if node not in potential_nodes:\n potential_nodes.add(node)\n\n # Compute score of new potential nodes\n print('Compute node scores')\n for node in tqdm.tqdm(potential_nodes):\n node_score = 0\n for _, (similarity, nodes) in similar_commits.items():\n if node in nodes:\n node_score += similarity\n node_score /= len(similar_commits)\n modified_files_dict[node] = node_score\n\n for node in self.repo_files_path:\n if node not in modified_files_dict:\n modified_files_dict[node] = 0\n\n return modified_files_dict",
"def repo_information(self):\n\n data = [[repo.git_dir,\n repo.repo.branches,\n repo.repo.bare,\n repo.repo.remotes,\n repo.repo.description,\n repo.repo.references,\n repo.repo.heads,\n repo.repo.submodules,\n repo.repo.tags,\n repo.repo.active_branch] for repo in self.repos]\n\n df = pd.DataFrame(data, columns=[\n 'local_directory',\n 'branches',\n 'bare',\n 'remotes',\n 'description',\n 'references',\n 'heads',\n 'submodules',\n 'tags',\n 'active_branch'\n ])\n\n return df",
"def has_coverage(self):\n\n ds = [[x.repo_name, x.has_coverage()] for x in self.repos]\n df = pd.DataFrame(ds, columns=['repository', 'has_coverage'])\n return df",
"def commit_to_csv(commit, csv_filename):\n repo.git_dir\n data = (commit.tree / csv_filename).data_stream.read()\n dialect = csv.Sniffer().sniff(StringIO(unicode(data)).read(1024))\n data = data.splitlines()\n for n, row in enumerate(data):\n if n == 0:\n data[n] = \"ID\" + dialect.delimiter + row\n else: \n data[n] = str(n) + dialect.delimiter + row\n data = \"\\n\".join(data)\n csv_out = csv.DictReader(StringIO(unicode(data), newline=None), dialect=dialect)\n return csv_out",
"def get_commit_record(repo_path,branches_names): \n commits = []\n commit_record = [] \n \n if 'master' in branches_names:\n del branches_names[branches_names.index('master')]\n branches_names.insert(0,'master') \n \n for b in branches_names: \n \n s = subprocess.check_output(\"cd %s; git checkout %s; git log \" % (repo_path,b), shell=True)\n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n\t \n\t l = m[2][3:len(m[2])-6] # m[2] contains date and time of commit log\n\t \n\t time = datetime.datetime.strptime(l, '%a %b %d %H:%M:%S %Y') # parses datetime string ('l' here) according to format\n\t \n\t commits.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=time, message=m[3].strip(),branches = b.strip()))\n \n if not commit_record:\n commit_record = commits + commit_record #concatenate bcz comit_record is empty\n \n else: \n\t for t in commits: # comapare commit hash to avoid repitition of same commit log\n\t for j in commit_record:\n\t\t if (t ['commit_hash'] != j['commit_hash']):\n\t\t if j == commit_record[-1]: \n\t\t\t commit_record.append(t)\n\t else:\n\t\t break\n commit_record = sorted(commit_record, key=operator.itemgetter('datetime'), reverse = True) # sort commit record according to date and time\t\n \n \n return commit_record",
"def main(git_log):\n df = pd.read_csv(git_log, sep = '|', names = ['commit', 'message', 'author', 'email'])\n df['area'] = df['message'].apply(define_area)\n df['message'] = df['message'].apply(delete_prefix)\n\n # Split commits by areas\n core = df[df['area']==Area.core.value]\n tests = df[df['area']==Area.tests.value]\n build = df[df['area']==Area.build.value]\n apps = df[df['area']==Area.apps.value]\n docs = df[df['area']==Area.docs.value]\n other = df[df['area'].isna()]\n\n # Define individual contributors\n contributors = df.groupby(['author', 'email'])\n contributors = list(contributors.groups.keys())\n\n with open('release-notes.md', 'w') as f:\n f.write('# Release Notes\\n')\n\n f.write('\\n## API / ABI / Integration Changes\\n')\n f.write('\\n**API/ABI version: 1.x.**\\n')\n\n f.write('\\n## New Features and Improvements\\n')\n f.write('\\n## Important Bug Fixes\\n')\n f.write('\\n## Build\\n')\n f.write('\\n## Documentation\\n')\n\n f.write('\\n## Contributors\\n')\n for name, email in contributors:\n f.write(f'\\n{name} <{email}>')\n f.write('\\n')\n\n f.write('\\n## Changelog\\n')\n f.write('\\n<details><summary>Click to expand/collapse</summary>')\n f.write('\\n<p>')\n f.write('\\n')\n\n if not core.empty:\n f.write('\\n### Core Functionality')\n write_into_changelog(core, f)\n\n if not tests.empty:\n f.write('\\n### Unit Tests')\n write_into_changelog(tests, f)\n\n if not build.empty:\n f.write('\\n### Build Scripts (CMake, etc.)')\n write_into_changelog(build, f)\n\n if not apps.empty:\n f.write('\\n### Sample Applications')\n write_into_changelog(apps, f)\n\n if not docs.empty:\n f.write('\\n### Documentation')\n write_into_changelog(docs, f)\n\n if not other.empty:\n f.write('\\n### Other')\n write_into_changelog(other, f)\n\n f.write('\\n</p>')\n f.write('\\n</details>')",
"def blame(self, committer=True, by='repository', ignore_globs=None, include_globs=None):\n\n df = None\n\n for repo in self.repos:\n try:\n if df is None:\n df = repo.blame(committer=committer, by=by, ignore_globs=ignore_globs, include_globs=include_globs)\n else:\n df = df.append(repo.blame(committer=committer, by=by, ignore_globs=ignore_globs, include_globs=include_globs))\n except GitCommandError as err:\n print('Warning! Repo: %s couldnt be blamed' % (repo, ))\n pass\n\n df = df.reset_index(level=1)\n df = df.reset_index(level=1)\n if committer:\n if by == 'repository':\n df = df.groupby('committer').agg({'loc': np.sum})\n elif by == 'file':\n df = df.groupby(['committer', 'file']).agg({'loc': np.sum})\n else:\n if by == 'repository':\n df = df.groupby('author').agg({'loc': np.sum})\n elif by == 'file':\n df = df.groupby(['author', 'file']).agg({'loc': np.sum})\n\n df = df.sort_values(by=['loc'], ascending=False)\n\n return df",
"def process_commit_files_unfiltered(c, verbose=False):\n\n files = []\n # for p in c.parents: # iterate through each parent\n if len(c.parents) > 0:\n p = c.parents[0]\n for d in c.diff(p, create_patch=False):\n if False: # verbose:\n print\n print 'A:', d.a_blob,\n if isValidBlob(d.a_blob):\n print d.a_blob.path\n print 'B:', d.b_blob,\n if isValidBlob(d.b_blob):\n print d.b_blob.path\n sys.stdout.flush()\n if not isValidBlob(d.a_blob):\n if verbose:\n print 'Delete'\n continue\n elif not isValidBlob(d.b_blob):\n if verbose:\n print 'Add A'\n files.append(d.a_blob.path)\n elif (isValidBlob(d.a_blob) and isValidBlob(d.b_blob)\n and d.b_blob.path.endswith('.py')):\n files.append(d.b_blob.path)\n elif len(c.parents) == 0:\n # inaugural commit, so can't use diff\n files = [b for b in get_all_blob_paths(c.tree)]\n return files"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Same as create_commits_dataframe() but with lines as rows instead of files.
|
def create_commits_dataframe_lines(self):
columns = []
pbar = tqdm.tqdm(total=self.total_commits)
for commit in self.repository_mining.traverse_commits():
columns.append(commit.hash)
pbar.update(1)
pbar.close()
dataframe_list = []
index = []
cwd = os.getcwd()
os.chdir(self.repo_folder)
# Print analyzing all the lines of the repo
print('Print analyzing all the lines of the repo')
file_lines = []
for file_path in tqdm.tqdm(self.repo_files_path):
# Get path to file and count number of lines
complete_file_path = self.repo_folder + '\\' + file_path
linenumber = self.get_file_number_of_lines(complete_file_path)
for i in range(1, linenumber):
file_lines.append((file_path, i))
line_to_commits = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
future_to_line = {executor.submit(self.analyze_line, file_line): file_line for file_line in file_lines}
pbar = tqdm.tqdm(total=len(file_lines))
for future in concurrent.futures.as_completed(future_to_line):
file_line = future_to_line[future]
try:
modified_in_commits = future.result()
modified_in_commits = [commit[1:-1] for commit in modified_in_commits]
index.append(f'{file_line[0]}:{file_line[1]}')
file_line_commits = []
for commit in columns:
if commit in modified_in_commits:
file_line_commits.append(1)
else:
file_line_commits.append(0)
dataframe_list.append(file_line_commits)
except Exception as exc:
print(f'Error during execution : {exc}')
pbar.update(1)
pbar.close()
os.chdir(cwd)
return pd.DataFrame(dataframe_list, index=index, columns=columns)
|
[
"def create_commits_dataframe(self):\n\n files_commits = {}\n current_length = 0\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n current_length += 1\n columns.append(commit.hash)\n\n for modification in commit.modified_files:\n\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n \n if current_path is not None:\n\n if current_path in files_commits:\n\n while len(files_commits[current_path]) < current_length - 1:\n files_commits[current_path].append(0)\n files_commits[current_path].append(1)\n \n else:\n files_commits[current_path] = [0 for _ in range(current_length-1)]\n files_commits[current_path].append(1)\n\n pbar.update(1)\n pbar.close()\n\n dataframe_list = []\n index = []\n for key, value in files_commits.items():\n\n if len(value) < current_length:\n\n while len(files_commits[key]) < current_length:\n files_commits[key].append(0)\n\n index.append(key)\n dataframe_list.append(value)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def create_commits_dataframe_functions(self):\n\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n columns.append(commit.hash)\n\n pbar.update(1)\n pbar.close()\n\n\n dataframe_list = []\n index = []\n\n\n cwd = os.getcwd()\n os.chdir(self.repo_folder)\n\n with open('./gitattributes', 'a') as f:\n f.write('*.py diff=python\\n')\n\n print(os.listdir('./'))\n \n\n # Print analyzing all the lines of the repo\n print('Print analyzing all the lines of the repo')\n file_methods = []\n \n\n for file_path in tqdm.tqdm(self.repo_files_path):\n\n if file_path[-3:] == '.py':\n\n print(file_path)\n # Get path to file and count number of lines\n complete_file_path = self.repo_folder + '\\\\' + file_path\n methods = self.find_methods_in_python_file(complete_file_path)\n\n for method in methods:\n file_methods.append((file_path, method))\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_method = {executor.submit(self.analyze_method, file_method): file_method for file_method in file_methods}\n\n pbar = tqdm.tqdm(total=len(file_methods))\n for future in concurrent.futures.as_completed(future_to_method):\n file_method = future_to_method[future]\n try:\n \n modified_in_commits = future.result()\n modified_in_commits = [commit[1:-1] for commit in modified_in_commits]\n row_name = f'{file_method[0]}:{file_method[1]}'\n if row_name not in index:\n index.append(f'{file_method[0]}:{file_method[1]}')\n file_method_commits = []\n for commit in columns:\n if commit in modified_in_commits:\n file_method_commits.append(1)\n else:\n file_method_commits.append(0)\n dataframe_list.append(file_method_commits)\n except Exception as exc:\n print(f'Error during execution : {exc}')\n pbar.update(1)\n pbar.close()\n\n\n os.chdir(cwd)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def commit_history(self, branch, limit=None, days=None, ignore_globs=None, include_globs=None):\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['author', 'committer', 'message', 'lines', 'insertions', 'deletions', 'net'])\n\n for repo in self.repos:\n try:\n ch = repo.commit_history(branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs)\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df",
"def file_change_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None):\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['repository', 'date', 'author', 'committer', 'message', 'rev', 'filename', 'insertions', 'deletions'])\n\n for repo in self.repos:\n try:\n ch = repo.file_change_history(\n branch,\n limit=limit,\n days=days,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df",
"def get_line_table(lines: List[Line]) -> pd.DataFrame:\n lines_df = pd.DataFrame.from_records([line.as_dict() for line in lines], index=\"id\")\n lines_df.index.name = \"line_id\"\n return lines_df",
"def df_from_files(self):\n print('Creating dataframe...')\n num = len([name for name in os.listdir(self.raw) if not name[0] == '.'])\n files = os.path.join(self.raw, '~.info.json') # This is a weird hack\n files = files.replace('~', '{:05d}') # It allows path joining to work on Windows\n data = [json.load(open(files.format(i))) for i in range(1, num + 1)]\n\n columns = ['formats', 'tags', 'categories', 'thumbnails']\n lists = [[], [], [], []]\n deletes = {k: v for k, v in zip(columns, lists)}\n for dt in data:\n for col, ls in deletes.items():\n ls.append(dt[col])\n del dt[col]\n\n self.df = pd.DataFrame(data)\n self.df['upload_date'] = pd.to_datetime(self.df['upload_date'], format='%Y%m%d')\n self.df.to_csv(os.path.join(self.ran, 'df.csv'))\n\n self.tags = deletes['tags']\n pickle.dump(self.tags, open(os.path.join(self.ran, 'tags.txt'), 'wb'))",
"def jobs_dataframe(execute_lines, pbs_kwargs, parser_kwargs=None):\n if type(execute_lines) == str:\n if 'name' not in pbs_kwargs.keys():\n names = [unique_name()]\n pbs_kwargs['name'] = names\n job_id, status = [''], ['']\n execute_lines = [execute_lines]\n pbs_kwargs = [pbs_kwargs]\n parser_kwargs = [parser_kwargs]\n if parser_kwargs is None:\n parser_kwargs = [dict()]\n else:\n names = [pbs_kwargs['name']]\n if type(execute_lines) == list:\n names = []\n for i in range(len(execute_lines)):\n if 'name' not in pbs_kwargs[i].keys():\n names.append(unique_name())\n pbs_kwargs[i]['name'] = names[-1]\n else:\n names.append(pbs_kwargs[i]['name'])\n if parser_kwargs is None:\n parser_kwargs = [dict() for _ in range(len(execute_lines))]\n job_id = [''] * len(execute_lines)\n status = [''] * len(execute_lines)\n reset = [0] * len(execute_lines)\n\n return pd.DataFrame(dict(\n name=names,\n execute_lines=execute_lines,\n pbs_kwargs=pbs_kwargs,\n parser_kwargs=parser_kwargs,\n job_id=job_id,\n status=status))",
"def main(git_log):\n df = pd.read_csv(git_log, sep = '|', names = ['commit', 'message', 'author', 'email'])\n df['area'] = df['message'].apply(define_area)\n df['message'] = df['message'].apply(delete_prefix)\n\n # Split commits by areas\n core = df[df['area']==Area.core.value]\n tests = df[df['area']==Area.tests.value]\n build = df[df['area']==Area.build.value]\n apps = df[df['area']==Area.apps.value]\n docs = df[df['area']==Area.docs.value]\n other = df[df['area'].isna()]\n\n # Define individual contributors\n contributors = df.groupby(['author', 'email'])\n contributors = list(contributors.groups.keys())\n\n with open('release-notes.md', 'w') as f:\n f.write('# Release Notes\\n')\n\n f.write('\\n## API / ABI / Integration Changes\\n')\n f.write('\\n**API/ABI version: 1.x.**\\n')\n\n f.write('\\n## New Features and Improvements\\n')\n f.write('\\n## Important Bug Fixes\\n')\n f.write('\\n## Build\\n')\n f.write('\\n## Documentation\\n')\n\n f.write('\\n## Contributors\\n')\n for name, email in contributors:\n f.write(f'\\n{name} <{email}>')\n f.write('\\n')\n\n f.write('\\n## Changelog\\n')\n f.write('\\n<details><summary>Click to expand/collapse</summary>')\n f.write('\\n<p>')\n f.write('\\n')\n\n if not core.empty:\n f.write('\\n### Core Functionality')\n write_into_changelog(core, f)\n\n if not tests.empty:\n f.write('\\n### Unit Tests')\n write_into_changelog(tests, f)\n\n if not build.empty:\n f.write('\\n### Build Scripts (CMake, etc.)')\n write_into_changelog(build, f)\n\n if not apps.empty:\n f.write('\\n### Sample Applications')\n write_into_changelog(apps, f)\n\n if not docs.empty:\n f.write('\\n### Documentation')\n write_into_changelog(docs, f)\n\n if not other.empty:\n f.write('\\n### Other')\n write_into_changelog(other, f)\n\n f.write('\\n</p>')\n f.write('\\n</details>')",
"def test_with_multi_commit_diff(self):\n reader = DiffXReader(io.BytesIO(\n b'#diffx: encoding=utf-8, version=1.0\\n'\n b'#.change:\\n'\n b'#..preamble: indent=4, length=49, mimetype=text/markdown\\n'\n b' Summary of the _first_ commit in the series.\\n'\n b'#..meta: format=json, length=244\\n'\n b'{\\n'\n b' \"author\": \"Test User <test@example.com>\",\\n'\n b' \"committer\": \"Test User <test@example.com>\",\\n'\n b' \"committer date\": \"2021-06-02T13:12:06-07:00\",\\n'\n b' \"date\": \"2021-06-01T19:26:31-07:00\",\\n'\n b' \"id\": \"a25e7b28af5e3184946068f432122c68c1a30b23\"\\n'\n b'}\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file1\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef\",\\n'\n b' \"old\": \"c8839177d1a5605aa60abe69db95c84183f0eebe\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=60\\n'\n b'--- /file1\\n'\n b'+++ /file1\\n'\n b'@@ -498,7 +498,7 @@\\n'\n b' ... diff content\\n'\n b'#.change:\\n'\n b'#..preamble: indent=4, length=52\\n'\n b' Summary of commit #2\\n'\n b'\\n'\n b' Here\\'s a description.\\n'\n b'#..meta: format=json, length=244\\n'\n b'{\\n'\n b' \"author\": \"Test User <test@example.com>\",\\n'\n b' \"committer\": \"Test User <test@example.com>\",\\n'\n b' \"committer date\": \"2021-06-02T19:46:25-07:00\",\\n'\n b' \"date\": \"2021-06-01T19:46:22-07:00\",\\n'\n b' \"id\": \"91127b687f583184144161f432222748c1a30b23\"\\n'\n b'}\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file2\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"a2ccb0cb48383472345d41a32afde39a7e6a72dd\",\\n'\n b' \"old\": \"1b7af7f97076effed5db722afe31c993e6adbc78\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=80\\n'\n b'--- a/file2\\n'\n b'+++ b/file2\\n'\n b'@@ -66,7 +66,8 @@\\n'\n b' ... diff content for commit 2, file2\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file3\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"0d4a0fb8d62b762a26e13591d06d93d79d61102f\",\\n'\n b' \"old\": \"be089b7197974703c83682088a068bef3422c6c2\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=82\\n'\n b'--- a/file3\\n'\n b'+++ b/file3\\n'\n b'@@ -258,7 +258,8 @@\\n'\n b' ... diff content for commit 2, file3\\n'\n ))\n\n self.assertEqual(list(reader), [\n {\n 'level': 0,\n 'line': 0,\n 'options': {\n 'encoding': 'utf-8',\n 'version': '1.0',\n },\n 'section': Section.MAIN,\n 'type': 'diffx',\n },\n {\n 'level': 1,\n 'line': 1,\n 'options': {},\n 'section': Section.CHANGE,\n 'type': 'change',\n },\n {\n 'level': 2,\n 'line': 2,\n 'options': {\n 'indent': 4,\n 'length': 49,\n 'mimetype': 'text/markdown',\n },\n 'section': Section.CHANGE_PREAMBLE,\n 'text': 'Summary of the _first_ commit in the series.\\n',\n 'type': 'preamble',\n },\n {\n 'level': 2,\n 'line': 4,\n 'metadata': {\n 'author': 'Test User <test@example.com>',\n 'committer': 'Test User <test@example.com>',\n 'committer date': '2021-06-02T13:12:06-07:00',\n 'date': '2021-06-01T19:26:31-07:00',\n 'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',\n },\n 'options': {\n 'format': 'json',\n 'length': 244,\n },\n 'section': Section.CHANGE_META,\n 'type': 'meta',\n },\n {\n 'level': 2,\n 'line': 12,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 13,\n 'metadata': {\n 'path': 'file1',\n 'revision': {\n 'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',\n 'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 21,\n 'options': {\n 'length': 60,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- /file1\\n'\n b'+++ /file1\\n'\n b'@@ -498,7 +498,7 @@\\n'\n b' ... diff content\\n'\n ),\n 'type': 'diff',\n },\n {\n 'level': 1,\n 'line': 26,\n 'options': {},\n 'section': Section.CHANGE,\n 'type': 'change',\n },\n {\n 'level': 2,\n 'line': 27,\n 'options': {\n 'indent': 4,\n 'length': 52,\n },\n 'section': Section.CHANGE_PREAMBLE,\n 'text': (\n \"Summary of commit #2\\n\"\n \"\\n\"\n \"Here's a description.\\n\"\n ),\n 'type': 'preamble',\n },\n {\n 'level': 2,\n 'line': 31,\n 'metadata': {\n 'author': 'Test User <test@example.com>',\n 'committer': 'Test User <test@example.com>',\n 'committer date': '2021-06-02T19:46:25-07:00',\n 'date': '2021-06-01T19:46:22-07:00',\n 'id': '91127b687f583184144161f432222748c1a30b23',\n },\n 'options': {\n 'format': 'json',\n 'length': 244,\n },\n 'section': Section.CHANGE_META,\n 'type': 'meta',\n },\n {\n 'level': 2,\n 'line': 39,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 40,\n 'metadata': {\n 'path': 'file2',\n 'revision': {\n 'new': 'a2ccb0cb48383472345d41a32afde39a7e6a72dd',\n 'old': '1b7af7f97076effed5db722afe31c993e6adbc78',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 48,\n 'options': {\n 'length': 80,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- a/file2\\n'\n b'+++ b/file2\\n'\n b'@@ -66,7 +66,8 @@\\n'\n b' ... diff content for commit 2, file2\\n'\n ),\n 'type': 'diff',\n },\n {\n 'level': 2,\n 'line': 53,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 54,\n 'metadata': {\n 'path': 'file3',\n 'revision': {\n 'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',\n 'old': 'be089b7197974703c83682088a068bef3422c6c2',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 62,\n 'options': {\n 'length': 82,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- a/file3\\n'\n b'+++ b/file3\\n'\n b'@@ -258,7 +258,8 @@\\n'\n b' ... diff content for commit 2, file3\\n'\n ),\n 'type': 'diff',\n },\n ])",
"def log_to_dataframe(self, log_file, regex, headers, logformat):\r\n log_messages = []\r\n linecount = 0\r\n with open(log_file, 'r') as fin:\r\n for line in fin.readlines():\r\n try:\r\n\r\n match = regex.search(line.strip())\r\n message = [match.group(header) for header in headers]\r\n log_messages.append(message)\r\n linecount += 1\r\n except Exception as e:\r\n pass\r\n logdf = pd.DataFrame(log_messages, columns=headers)\r\n # print(logdf.head())\r\n logdf.insert(0, 'LineId', None)\r\n logdf['LineId'] = [i + 1 for i in range(linecount)]\r\n\r\n return logdf",
"def log_to_dataframe(self, log_file, regex, headers, logformat):\n log_messages = []\n linecount = 0\n with open(log_file, 'r') as fin:\n for line in fin.readlines():\n try:\n match = regex.search(line.strip())\n message = [match.group(header) for header in headers]\n log_messages.append(message)\n linecount += 1\n except Exception as e:\n pass\n logdf = pd.DataFrame(log_messages, columns=headers)\n logdf.insert(0, 'LineId', None)\n logdf['LineId'] = [i + 1 for i in range(linecount)]\n return logdf",
"def create_from_data(self, repository, diff_file_name, diff_file_contents, parent_diff_file_name, parent_diff_file_contents, diffset, commit_id, parent_id, commit_message, author_name, author_email, author_date, validation_info=None, request=None, committer_name=None, committer_email=None, committer_date=None, base_commit_id=None, check_existence=True, validate_only=False):\n\t\tdiffcommit = self.model(filename=diff_file_name, diffset=diffset, commit_id=commit_id, parent_id=parent_id, author_name=author_name, author_email=author_email, author_date=author_date, commit_message=commit_message, committer_name=committer_name, committer_email=committer_email, committer_date=committer_date)\n\t\tif not validate_only:\n\t\t\tdiffcommit.save()\n\t\tget_file_exists = partial(get_file_exists_in_history, validation_info or {}, repository, parent_id)\n\t\tcreate_filediffs(get_file_exists=get_file_exists, diff_file_contents=diff_file_contents, parent_diff_file_contents=parent_diff_file_contents, repository=repository, request=request, basedir=\"\", base_commit_id=base_commit_id, diffset=diffset, diffcommit=diffcommit, validate_only=validate_only, check_existence=check_existence)\n\t\tif validate_only:\n\t\t\treturn None\n\t\treturn diffcommit",
"def file_detail(self, rev='HEAD', committer=True, ignore_globs=None, include_globs=None):\n\n df = None\n\n for repo in self.repos:\n try:\n if df is None:\n df = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev)\n df['repository'] = repo.repo_name\n else:\n chunk = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev)\n chunk['repository'] = repo.repo_name\n df = df.append(chunk)\n except GitCommandError:\n print('Warning! Repo: %s couldnt be inspected' % (repo, ))\n\n df = df.reset_index(level=-1)\n df = df.set_index(['file', 'repository'])\n return df",
"def as_data_frame(self, **kwargs):\n try:\n import pandas as pd\n except ImportError:\n raise ImportError(\"What are you doing trying to export a Layout \"\n \"as a pandas DataFrame when you don't have \"\n \"pandas installed? Eh? Eh?\")\n if kwargs:\n files = self.get(return_type='file', **kwargs)\n else:\n files = self.files.values()\n data = pd.DataFrame.from_records([f.entities for f in files])\n data.insert(0, 'path', [f.path for f in files])\n return data",
"def commit_to_csv(commit, csv_filename):\n repo.git_dir\n data = (commit.tree / csv_filename).data_stream.read()\n dialect = csv.Sniffer().sniff(StringIO(unicode(data)).read(1024))\n data = data.splitlines()\n for n, row in enumerate(data):\n if n == 0:\n data[n] = \"ID\" + dialect.delimiter + row\n else: \n data[n] = str(n) + dialect.delimiter + row\n data = \"\\n\".join(data)\n csv_out = csv.DictReader(StringIO(unicode(data), newline=None), dialect=dialect)\n return csv_out",
"def create_articles_df(zip_file):\n articles_df = pd.DataFrame(columns=['text'])\n article_relative_filepaths = [fp for fp in zip_file.namelist() if '.txt' in fp]\n\n for filepath in tqdm(article_relative_filepaths, desc='Creating articles df'):\n article_id = re.findall(r'\\d+', filepath)[0]\n content = read_article_content(zip_file, filepath)\n \n articles_df.loc[article_id, 'text'] = content\n\n return articles_df",
"def dbstore_commit_data(fromdate=None, recreate=False, quieter=False):\n dbdefine.create_tables(subset={\"commits_stats\", \"commits_log\"}, recreate=recreate)\n commits = list(db.do(\"\"\"SELECT id, time FROM commits_stats\"\"\"))\n donerevs = set(x[0] for x in commits)\n if not commits:\n fromdate = None\n if fromdate == \"<latest>\":\n fromdate = max(x[1] for x in commits)\n basepath = GIT_THEMING_PATH_HIST\n notespath = os.path.join(basepath, \"notes\")\n os.chdir(basepath)\n entries = list_commits(basepath)\n bydate = defaultdict(list)\n latestcommits = set()\n logrows = [(commit, date, author, committype, msg) for commit, author, date, committype, msg in entries]\n db.do(\"\"\"REPLACE INTO commits_log VALUES(%s, %s, %s, %s, %s)\"\"\", values=logrows)\n\n for commit, _, date, _, _ in entries:\n bydate[date.date()].append((date, commit))\n for datelist in bydate.values():\n date, commit = max(datelist)\n latestcommits.add(commit)\n\n for idx, (commit, author, date, _, _) in enumerate(entries):\n if fromdate and date <= fromdate:\n if not quieter:\n print(\"EARLIER:\", (commit, author, date), \"...SKIPPING\")\n elif commit in donerevs:\n if not quieter:\n print(\"EXISTS:\", (commit, author, date), \"...SKIPPING\")\n elif commit not in latestcommits:\n if not quieter:\n print(\"SKIPPING EARLIER COMMIT:\", (commit, author, date))\n else:\n try:\n res = subprocess.check_output(['git', 'checkout', '-f', commit]).decode(\"utf-8\")\n except Exception as e:\n print(\"GIT ERROR\", repr(e))\n continue\n try:\n datapoint = get_datapoint(notespath)\n except AssertionError as e:\n print(\"PARSE ERROR\", repr(e))\n continue\n except Exception as e:\n print(\"UNKNOWN ERROR\", repr(e))\n continue\n data = json.dumps(datapoint)\n row = (commit, date.strftime('%Y-%m-%d %H:%M:%S'), author, data)\n db.do(\"\"\"REPLACE INTO commits_stats VALUES(%s, %s, %s, %s)\"\"\", values=[row])\n if not quieter:\n print(\"INSERTED: \", str(row)[:120], \"...\")\n print(dict(datapoint))",
"def create_df(path_or_buffer, v='2'):\r\n column_names = load_column_names(v=v)\r\n return pd.read_csv(\r\n path_or_buffer, sep=\"\\t\", header=None, usecols=range(len(column_names)),\r\n names=column_names, index_col=0, dtype={'EventCode': 'object'}, encoding='utf-8'\r\n )",
"def to_row(repo, author, pr):\n pr_data = PullRequest(pr)\n\n latest_commit_at = pr_data.latest_commit.datetime.date()\n oldest_commit_at = pr_data.oldest_commit.datetime.date()\n days_between_commits = (latest_commit_at - oldest_commit_at + ONE_DAY).days\n\n latest_commit_author = lib.display(pr_data.latest_commit.author)\n oldest_commit_author = lib.display(pr_data.oldest_commit.author)\n\n out_row = {\n \"Repo Owner\": lib.display(repo.owner),\n \"Repo Name\": repo.name,\n \"Repo URL\": repo.html_url,\n \"Author\": lib.display(author),\n \"PR ID\": f\"#{pr_data.number}\",\n \"PR Title\": pr_data.title,\n \"PR From Branch\": pr_data.from_branch_name,\n \"PR To Branch\": pr_data.to_branch_name,\n \"PR URL\": pr_data.url,\n \"Jira Ticket\": pr_data.jira_ticket,\n \"PR Updated At\": pr_data.updated_at,\n \"PR Created At\": pr_data.created_at,\n \"Latest Commit At\": latest_commit_at,\n \"Latest Commit Author\": latest_commit_author,\n \"Oldest Commit At\": oldest_commit_at,\n \"Oldest Commit Author\": oldest_commit_author,\n \"Days Between Commits\": days_between_commits,\n \"Status\": pr_data.status,\n \"Merged/Closed WOY\": pr_data.status_changed_week_of_year(),\n \"Merged/Closed Date\": pr_data.status_changed_at(),\n \"Merged By\": pr_data.merged_by_name(),\n \"Reviewers\": \", \".join(pr_data.reviewer_names()),\n \"Comments\": pr_data.comment_count,\n \"Commits\": pr_data.commit_count,\n \"Changed Files\": pr_data.changed_files,\n \"Added Lines\": pr_data.additions,\n \"Deleted Lines\": pr_data.deletions,\n \"Changed Lines\": pr_data.additions + pr.deletions,\n }\n\n review_states = Counter([r.state for r in pr_data.reviews])\n [review_states.setdefault(s, 0) for s in Review.get_states()]\n out_row.update(**dict(review_states))\n\n return out_row"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a list of the names of all the methods included in a python file.
|
def find_methods_in_python_file(self, file_path):
methods = []
o = open(file_path, "r", encoding='utf-8')
text = o.read()
p = ast.parse(text)
for node in ast.walk(p):
if isinstance(node, ast.FunctionDef):
methods.append(node.name)
print(methods)
return methods
|
[
"def listMethods(self):\n methodNames = self.funcs.keys()\n methodNames.sort()\n return methodNames",
"def _listOfMethods(self, lookinside):\n\t\ttry:\n\t\t\tif lookinside:\n\t\t\t\treturn dir(__import__(lookinside, globals={}, locals={}, fromlist=[], level=-1))\n\t\texcept ImportError:\n\t\t\treturn []",
"def do_list_funcs(self, arg):\n # Check if file exists as .py\n if not (os.path.isfile(arg)\n and arg[-3:] == \".py\"\n and arg in os.listdir()):\n print(\"list_funcs: %s: Not a .py file\" % arg)\n return False\n # Search file contents for top-level function declarations\n file_contents = open(arg, mode=\"r\").read()\n for match in re.finditer(self.fun_pat, file_contents):\n # Don't return private methods\n if match.group(1)[:2] != \"__\":\n print(\"\\t\" + match.group(1))",
"def get_method_names(self):\n method_names = []\n with open(self.stat_method, 'r') as method_file:\n for line in method_file.readlines():\n method_names.append(line.split(',')[0])\n return method_names",
"def extensions_for(self, m):\r\n if m not in self.methods:\r\n return []\r\n return self._string_to_list(self.methods[m]['file-extensions'])",
"def list_py(path = None):\n if(path == None):\n path =os.getcwd()\n return [fname for fname in os.listdir(path)\n if os.path.isfile(fname)\n if fname.endswith('.py')]",
"def _find_all_symbols(module):\n return [f.name for f in module.functions]",
"def test_find_all_func_def(self):\n self.filename = \"parser_tests/ruby_function_def.txt\"\n expected_func_def = [\"method_name\", \"test\"]\n self.run_parser()\n self.assertListEqual(expected_func_def, self.p.scanner.functions)",
"def test_find_functions(self):\n self.filename = \"parser_tests/ruby_functions.txt\"\n expected_functions = ['multiply', 'method_name']\n self.run_parser()\n self.assertListEqual(expected_functions, self.p.scanner.functions_calls)",
"def test_py_annot_method_names(self):\n line_data = list([line.rstrip() for line in open(os.path.join(self.src_files, \"python_annot_file.py\"),\n encoding='utf-8', errors='ignore')])\n self.assertEqual(str(get_py_annot_method_names(line_data, \"@staticmethod\", 0)), \"['validate_return']\")\n file_dir = os.path.join(self.file_path, \"test_resource\", \"test_repo\", \"test\")\n for file in os.listdir(file_dir):\n if file.startswith(\"ExtractedFunc_\"):\n os.remove(os.path.join(file_dir, file))",
"def get_py_files(path: str) -> List[str]:\n yield from iglob(os.path.join(path, '*.py'))",
"def system_listMethods(self):\r\n\r\n methods = self.funcs.keys()\r\n if self.instance is not None:\r\n # Instance can implement _listMethod to return a list of\r\n # methods\r\n if hasattr(self.instance, '_listMethods'):\r\n methods = remove_duplicates(\r\n methods + self.instance._listMethods()\r\n )\r\n # if the instance has a _dispatch method then we\r\n # don't have enough information to provide a list\r\n # of methods\r\n elif not hasattr(self.instance, '_dispatch'):\r\n methods = remove_duplicates(\r\n methods + list_public_methods(self.instance)\r\n )\r\n methods.sort()\r\n return methods",
"def get_worker_methods(found_module):\n logger = logging.getLogger(__name__)\n\n try:\n # Getting the functions\n methods_in_module = inspect.getmembers(found_module,\n lambda member: inspect.isfunction(member))\n # methods_in_module is a list of tuples (function_name:str, function_obj:function)\n except Exception as error:\n msg = \"Error occurred when looking for a worker methods. Following exception occurred: %s\" % (error)\n logger.error(msg)\n raise Exception(msg)\n\n return methods_in_module",
"def do_list_vars(self, arg):\n # Check if file exists as .py\n if not (os.path.isfile(arg)\n and arg[-3:] == \".py\"\n and arg in os.listdir()):\n print(\"list_funcs: %s: Not a .py file\" % arg)\n return False\n # Search file contents for top-level function declarations\n file_contents = open(arg, mode=\"r\").read()\n for match in re.finditer(self.var_pat, file_contents):\n # Don't return private variables\n if match.group(1)[:2] != \"__\":\n print(\"\\t\" + match.group(1))",
"def inspect_module_names(self) -> Set[str]:\n modules = []\n pattern_1 = r\"import\\s+(?P<module>\\w+)\"\n pattern_2 = r\"from\\s+(?P<module>\\w+)\"\n if not self._is_package:\n with open(str(self.root_filename), \"r\") as file:\n for line in file.readlines():\n m = re.match(pattern_1, line)\n if m:\n module = m.group(\"module\")\n modules.append(module)\n pass\n m = re.match(pattern_2, line)\n if m:\n module = m.group(\"module\")\n modules.append(module)\n pass\n pass\n pass\n pass\n else:\n # pattern = r\"import\\s+(?P<module>\\w+)\"\n for path, _, filenames in walk(str(self.root)):\n dir_path = self.root.joinpath(path)\n for filename in filenames:\n abs_path = dir_path.joinpath(filename)\n\n if not str(abs_path).endswith(\".py\"):\n continue\n pass\n modules.append(filename)\n pass\n pass\n return set(modules)",
"def _get_lua_funcs(self):\n with open(\"bitwise.lua\", \"r\") as f:\n for func in f.read().strip().split(\"local function \"):\n if func:\n bits = func.split(\"\\n\", 1)\n name = bits[0].split(\"(\")[0].strip()\n snippet = bits[1].rsplit(\"end\", 1)[0].strip()\n yield name, snippet",
"def get_chapter_methods(chapter_name) -> List[str]:\n\n chapters_path = os.path.join(os.getcwd(), \"methods\")\n chapters = os.listdir(chapters_path)\n\n for chapter in chapters: \n l_ocorrences: list = re.findall(chapter_name, chapter)\n if len(l_ocorrences) > 0:\n chapter_methods: Dict = {\"methods\": os.listdir(os.path.join(chapters_path, chapter))}\n print('ocorrences: ', l_ocorrences)\n print('methods: ', chapter_methods)\n return chapter_methods",
"def get_tokens_from_dir(self) -> List[List[NameBodyTokens]]:\n return [methods_token for file in self.data_files for methods_token in self.load_data_file(file)]",
"def _get_python_sources():\n\n return list(_SWIFT_PATH.rglob(\"*.py\")) + _KNOWN_SCRIPT_PATHS"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Same as create_commits_dataframe() but with functions instead of files as rows.
|
def create_commits_dataframe_functions(self):
columns = []
pbar = tqdm.tqdm(total=self.total_commits)
for commit in self.repository_mining.traverse_commits():
columns.append(commit.hash)
pbar.update(1)
pbar.close()
dataframe_list = []
index = []
cwd = os.getcwd()
os.chdir(self.repo_folder)
with open('./gitattributes', 'a') as f:
f.write('*.py diff=python\n')
print(os.listdir('./'))
# Print analyzing all the lines of the repo
print('Print analyzing all the lines of the repo')
file_methods = []
for file_path in tqdm.tqdm(self.repo_files_path):
if file_path[-3:] == '.py':
print(file_path)
# Get path to file and count number of lines
complete_file_path = self.repo_folder + '\\' + file_path
methods = self.find_methods_in_python_file(complete_file_path)
for method in methods:
file_methods.append((file_path, method))
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
future_to_method = {executor.submit(self.analyze_method, file_method): file_method for file_method in file_methods}
pbar = tqdm.tqdm(total=len(file_methods))
for future in concurrent.futures.as_completed(future_to_method):
file_method = future_to_method[future]
try:
modified_in_commits = future.result()
modified_in_commits = [commit[1:-1] for commit in modified_in_commits]
row_name = f'{file_method[0]}:{file_method[1]}'
if row_name not in index:
index.append(f'{file_method[0]}:{file_method[1]}')
file_method_commits = []
for commit in columns:
if commit in modified_in_commits:
file_method_commits.append(1)
else:
file_method_commits.append(0)
dataframe_list.append(file_method_commits)
except Exception as exc:
print(f'Error during execution : {exc}')
pbar.update(1)
pbar.close()
os.chdir(cwd)
return pd.DataFrame(dataframe_list, index=index, columns=columns)
|
[
"def create_commits_dataframe(self):\n\n files_commits = {}\n current_length = 0\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n current_length += 1\n columns.append(commit.hash)\n\n for modification in commit.modified_files:\n\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n \n if current_path is not None:\n\n if current_path in files_commits:\n\n while len(files_commits[current_path]) < current_length - 1:\n files_commits[current_path].append(0)\n files_commits[current_path].append(1)\n \n else:\n files_commits[current_path] = [0 for _ in range(current_length-1)]\n files_commits[current_path].append(1)\n\n pbar.update(1)\n pbar.close()\n\n dataframe_list = []\n index = []\n for key, value in files_commits.items():\n\n if len(value) < current_length:\n\n while len(files_commits[key]) < current_length:\n files_commits[key].append(0)\n\n index.append(key)\n dataframe_list.append(value)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def create_commits_dataframe_lines(self):\n\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n columns.append(commit.hash)\n\n pbar.update(1)\n pbar.close()\n\n\n dataframe_list = []\n index = []\n\n\n cwd = os.getcwd()\n os.chdir(self.repo_folder)\n\n\n # Print analyzing all the lines of the repo\n print('Print analyzing all the lines of the repo')\n file_lines = []\n \n\n for file_path in tqdm.tqdm(self.repo_files_path):\n\n # Get path to file and count number of lines\n complete_file_path = self.repo_folder + '\\\\' + file_path\n linenumber = self.get_file_number_of_lines(complete_file_path)\n\n for i in range(1, linenumber):\n file_lines.append((file_path, i))\n\n line_to_commits = {}\n with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:\n future_to_line = {executor.submit(self.analyze_line, file_line): file_line for file_line in file_lines}\n\n pbar = tqdm.tqdm(total=len(file_lines))\n for future in concurrent.futures.as_completed(future_to_line):\n file_line = future_to_line[future]\n try:\n \n modified_in_commits = future.result()\n modified_in_commits = [commit[1:-1] for commit in modified_in_commits]\n index.append(f'{file_line[0]}:{file_line[1]}')\n file_line_commits = []\n for commit in columns:\n if commit in modified_in_commits:\n file_line_commits.append(1)\n else:\n file_line_commits.append(0)\n dataframe_list.append(file_line_commits)\n except Exception as exc:\n print(f'Error during execution : {exc}')\n pbar.update(1)\n pbar.close()\n\n\n os.chdir(cwd)\n\n return pd.DataFrame(dataframe_list, index=index, columns=columns)",
"def df_from_files(self):\n print('Creating dataframe...')\n num = len([name for name in os.listdir(self.raw) if not name[0] == '.'])\n files = os.path.join(self.raw, '~.info.json') # This is a weird hack\n files = files.replace('~', '{:05d}') # It allows path joining to work on Windows\n data = [json.load(open(files.format(i))) for i in range(1, num + 1)]\n\n columns = ['formats', 'tags', 'categories', 'thumbnails']\n lists = [[], [], [], []]\n deletes = {k: v for k, v in zip(columns, lists)}\n for dt in data:\n for col, ls in deletes.items():\n ls.append(dt[col])\n del dt[col]\n\n self.df = pd.DataFrame(data)\n self.df['upload_date'] = pd.to_datetime(self.df['upload_date'], format='%Y%m%d')\n self.df.to_csv(os.path.join(self.ran, 'df.csv'))\n\n self.tags = deletes['tags']\n pickle.dump(self.tags, open(os.path.join(self.ran, 'tags.txt'), 'wb'))",
"def file_change_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None):\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['repository', 'date', 'author', 'committer', 'message', 'rev', 'filename', 'insertions', 'deletions'])\n\n for repo in self.repos:\n try:\n ch = repo.file_change_history(\n branch,\n limit=limit,\n days=days,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df",
"def commit_history(self, branch, limit=None, days=None, ignore_globs=None, include_globs=None):\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['author', 'committer', 'message', 'lines', 'insertions', 'deletions', 'net'])\n\n for repo in self.repos:\n try:\n ch = repo.commit_history(branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs)\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df",
"def workspace_to_dataframe(workspace):\n files = FileModel.objects.filter(workspace=workspace).all()\n\n df = pd.DataFrame()\n for f in files:\n # aggiungo ogni file (ogni campo nel file) al dataframe\n df = df.join(file_to_dataframe(f), how='outer')\n\n # aggiungo ogni evento al dataframe\n events = PunctualAnnotationEvent.objects.filter(workspace=workspace).all()\n for e in events:\n df = add_event_to_dataframe(df, e)\n\n events = IntervalAnnotationEvent.objects.filter(workspace=workspace).all()\n for e in events:\n df = add_event_to_dataframe(df, e)\n\n return df",
"def as_data_frame(self, **kwargs):\n try:\n import pandas as pd\n except ImportError:\n raise ImportError(\"What are you doing trying to export a Layout \"\n \"as a pandas DataFrame when you don't have \"\n \"pandas installed? Eh? Eh?\")\n if kwargs:\n files = self.get(return_type='file', **kwargs)\n else:\n files = self.files.values()\n data = pd.DataFrame.from_records([f.entities for f in files])\n data.insert(0, 'path', [f.path for f in files])\n return data",
"def file_detail(self, rev='HEAD', committer=True, ignore_globs=None, include_globs=None):\n\n df = None\n\n for repo in self.repos:\n try:\n if df is None:\n df = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev)\n df['repository'] = repo.repo_name\n else:\n chunk = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev)\n chunk['repository'] = repo.repo_name\n df = df.append(chunk)\n except GitCommandError:\n print('Warning! Repo: %s couldnt be inspected' % (repo, ))\n\n df = df.reset_index(level=-1)\n df = df.set_index(['file', 'repository'])\n return df",
"def create_from_data(self, repository, diff_file_name, diff_file_contents, parent_diff_file_name, parent_diff_file_contents, diffset, commit_id, parent_id, commit_message, author_name, author_email, author_date, validation_info=None, request=None, committer_name=None, committer_email=None, committer_date=None, base_commit_id=None, check_existence=True, validate_only=False):\n\t\tdiffcommit = self.model(filename=diff_file_name, diffset=diffset, commit_id=commit_id, parent_id=parent_id, author_name=author_name, author_email=author_email, author_date=author_date, commit_message=commit_message, committer_name=committer_name, committer_email=committer_email, committer_date=committer_date)\n\t\tif not validate_only:\n\t\t\tdiffcommit.save()\n\t\tget_file_exists = partial(get_file_exists_in_history, validation_info or {}, repository, parent_id)\n\t\tcreate_filediffs(get_file_exists=get_file_exists, diff_file_contents=diff_file_contents, parent_diff_file_contents=parent_diff_file_contents, repository=repository, request=request, basedir=\"\", base_commit_id=base_commit_id, diffset=diffset, diffcommit=diffcommit, validate_only=validate_only, check_existence=check_existence)\n\t\tif validate_only:\n\t\t\treturn None\n\t\treturn diffcommit",
"def create_df(file, df_type):\n try:\n date_id = file.split(\"/\")[-1].split(\".\")[0]\n report_timestamp = datetime.strptime(date_id, \"%m-%d-%y\").strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n df = pd.read_csv(file)\n columns = df.columns.tolist()\n\n df[\"reportTimestamp\"] = df.apply(lambda row: report_timestamp, axis=1)\n df[\"dateId\"] = df.apply(lambda row: date_id, axis=1)\n\n if df_type == \"confirmed\":\n df[\"confirmedCases\"] = df.apply(lambda row: row[columns[-1]], axis=1)\n else:\n df[\"deaths\"] = df.apply(lambda row: row[columns[-1]], axis=1)\n\n df.drop(columns[-1], axis=1, inplace=True)\n\n return df\n\n except Exception as exception:\n logger.error(\"Received Exception in create_df function \"\n \"in covid_cases_usa.py - {}\".format(exception))\n raise exception",
"def callables_signatures_df(\n callables, callable_filt=DFLT_CALLABLE_FILT, null_fill=''\n) -> pd.DataFrame:\n callable_filt = get_callable_filt(callable_filt)\n d = arg_default_dict_of_callables(callables, callable_filt=callable_filt)\n return _df_of_callable_arg_default_dict(d, null_fill=null_fill)",
"def create_articles_df(zip_file):\n articles_df = pd.DataFrame(columns=['text'])\n article_relative_filepaths = [fp for fp in zip_file.namelist() if '.txt' in fp]\n\n for filepath in tqdm(article_relative_filepaths, desc='Creating articles df'):\n article_id = re.findall(r'\\d+', filepath)[0]\n content = read_article_content(zip_file, filepath)\n \n articles_df.loc[article_id, 'text'] = content\n\n return articles_df",
"def file_change_rates(self, branch='master', limit=None, coverage=False, days=None, ignore_globs=None, include_globs=None):\n\n columns = ['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change', 'edit_rate', 'repository']\n if coverage:\n columns += ['lines_covered', 'total_lines', 'coverage']\n df = pd.DataFrame(columns=columns)\n\n for repo in self.repos:\n try:\n fcr = repo.file_change_rates(\n branch=branch,\n limit=limit,\n coverage=coverage,\n days=days,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n fcr['repository'] = repo.repo_name\n df = df.append(fcr)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df",
"def get_commit_data(commits, commit_dict, preset='all', matrix_type='adjacency', spectrum_type='eigenvalue'):\n subgraphs = metrics.unique_subgraphs(commit_dict, preset)\n commit_times = metrics.get_dates(commits)\n\n x = []\n y = []\n\n for graph, sha1_list in subgraphs:\n\n # Graph energy for testing\n mat = matrix.graph_to_matrix(graph, matrix=matrix_type)\n eig_vals = matrix.analyze_matrix(mat, type=spectrum_type)[0]\n energy = 0\n for val in eig_vals:\n energy += abs(val)\n\n # create data points\n for sha1 in sha1_list:\n try:\n date = commit_times[sha1]\n x.append(date)\n y.append(energy)\n except KeyError:\n pass\n\n return (x, y)",
"def save_git_commits(commits, project):\n jdump(commits, project_to_fname(project))",
"def _process_xlsx_files(self):\n logging.info(f\"Processing {self.transaction} as XLSX files.\")\n\n # Getting the function\n function_for_file = files_definitions[self.transaction][\"function\"]\n\n dataframe = pd.DataFrame()\n\n for file in self.files:\n blob_content = self.blob_container_client.get_blob_client(blob=file).download_blob().content_as_bytes()\n df = pd.read_excel(blob_content, dtype=str)\n df = self._simplify_columns(df)\n df = function_for_file(df)\n\n dataframe = pd.concat([dataframe, df])\n \n return dataframe",
"def coverage(self):\n\n df = pd.DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage', 'repository'])\n\n for repo in self.repos:\n try:\n cov = repo.coverage()\n cov['repository'] = repo.repo_name\n df = df.append(cov)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have coverage' % (repo, ))\n\n df.reset_index()\n\n return df",
"def create_file_object(df):\n file_object = StringIO()\n df.to_csv(file_object, index=False)\n file_object.seek(0)\n return file_object",
"def jobs_dataframe(execute_lines, pbs_kwargs, parser_kwargs=None):\n if type(execute_lines) == str:\n if 'name' not in pbs_kwargs.keys():\n names = [unique_name()]\n pbs_kwargs['name'] = names\n job_id, status = [''], ['']\n execute_lines = [execute_lines]\n pbs_kwargs = [pbs_kwargs]\n parser_kwargs = [parser_kwargs]\n if parser_kwargs is None:\n parser_kwargs = [dict()]\n else:\n names = [pbs_kwargs['name']]\n if type(execute_lines) == list:\n names = []\n for i in range(len(execute_lines)):\n if 'name' not in pbs_kwargs[i].keys():\n names.append(unique_name())\n pbs_kwargs[i]['name'] = names[-1]\n else:\n names.append(pbs_kwargs[i]['name'])\n if parser_kwargs is None:\n parser_kwargs = [dict() for _ in range(len(execute_lines))]\n job_id = [''] * len(execute_lines)\n status = [''] * len(execute_lines)\n reset = [0] * len(execute_lines)\n\n return pd.DataFrame(dict(\n name=names,\n execute_lines=execute_lines,\n pbs_kwargs=pbs_kwargs,\n parser_kwargs=parser_kwargs,\n job_id=job_id,\n status=status))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Performs a dimensionality reduction on a given dataframe, using the given method.
|
def dimensionality_reduction(self, df, method='tSNE'):
if method == 'tSNE':
tsne = sklearn.manifold.TSNE(n_components=2, perplexity=5, metric='precomputed')
embedded_data = tsne.fit_transform(df)
elif method == 'MCA':
df.replace({0: "False", 1: "True"}, inplace = True)
mca = prince.MCA(n_components=2)
embedded_data = mca.fit_transform(df)
elif method == 'NMDS':
nmds = sklearn.manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed",
n_init=1)
embedded_data = nmds.fit_transform(df)
df_embedded = pd.DataFrame(embedded_data, index=df.index)
return df_embedded
|
[
"def applyier(df, *args, **kwargs):\n # pandas default implementation doesn't know how to handle `dtypes` keyword argument\n kwargs.pop(\"dtypes\", None)\n df = cls.frame_wrapper(df)\n result = fn(df, *args, **kwargs)\n\n if (\n not isinstance(result, pandas.Series)\n and not isinstance(result, pandas.DataFrame)\n and func not in (\"to_numpy\", pandas.DataFrame.to_numpy)\n and func not in (\"align\", pandas.DataFrame.align)\n and func not in (\"divmod\", pandas.Series.divmod)\n and func not in (\"rdivmod\", pandas.Series.rdivmod)\n and func not in (\"to_list\", pandas.Series.to_list)\n and func not in (\"to_dict\", pandas.Series.to_dict)\n and func not in (\"mean\", pandas.DataFrame.mean)\n and func not in (\"median\", pandas.DataFrame.median)\n and func not in (\"skew\", pandas.DataFrame.skew)\n and func not in (\"kurt\", pandas.DataFrame.kurt)\n ):\n # When applying a DatetimeProperties or TimedeltaProperties function,\n # if we don't specify the dtype for the DataFrame, the frame might\n # get the wrong dtype, e.g. for to_pydatetime in\n # https://github.com/modin-project/modin/issues/4436\n astype_kwargs = {}\n dtype = getattr(result, \"dtype\", None)\n if dtype and isinstance(\n df,\n (\n pandas.core.indexes.accessors.DatetimeProperties,\n pandas.core.indexes.accessors.TimedeltaProperties,\n ),\n ):\n astype_kwargs[\"dtype\"] = dtype\n result = (\n pandas.DataFrame(result, **astype_kwargs)\n if is_list_like(result)\n else pandas.DataFrame([result], **astype_kwargs)\n )\n if isinstance(result, pandas.Series):\n if result.name is None:\n result.name = MODIN_UNNAMED_SERIES_LABEL\n result = result.to_frame()\n\n inplace_method = kwargs.get(\"inplace\", False)\n if inplace is not None:\n inplace_method = inplace\n return result if not inplace_method else df",
"def determine_cutoff_dimension(self, method='gavish_donoho_2014', **kwargs):\n # https://jaxenter.com/implement-switch-case-statement-python-138315.html\n # switcher = {\n # 'gavish_donoho_2014': self._paper_cutoff,\n # 'sparse_svd_like': self._sparse_cutoff,\n # 'close_hankel_reconstruction': self._hankel_reconstruction_cutoff,\n # }\n # # Get the function from switcher dictionary\n # func_pointer = switcher.get(method, lambda: print(\"Invalid method\"))\n\n # for key in kwargs.keys():\n # if key == 'method':\n # kwargs[key] = ''.join([ '_', kwargs.pop(kwargs[key], 'gavish_donoho_2014') ])\n \n cls_method_to_use = ''.join([ '_', method ])\n\n func_pointer = getattr(self, cls_method_to_use, lambda: print(\"Invalid method\", cls_method_to_use, kwargs))\n # methods need be named like strings, prefixed with _\n\n # print(func_pointer)\n # print(kwargs)\n\n # Execute the function and return the result\n self.num_rel_dim = func_pointer(**kwargs)\n return self.num_rel_dim",
"def run(self, df):\n raise NotImplementedError",
"def method_delta(df):\n\n # create MultiIndex\n df = df.copy().sort_index(axis=1)\n df.columns = pd.MultiIndex.from_frame(\n df.columns.str.extract(fr\"^(t[01])_({'|'.join(methods.keys())})?_?(.*)$\"),\n names=[\"available\", \"method\", \"feature\"],\n )\n # select only methods dim and scores + get delta (t1 - t0)\n df = df.loc[\n :, [(m == m) & (f not in [\"profile\", \"predicted\"]) for t, m, f in df.columns]\n ]\n df_delta = df[\"t1\"] - df[\"t0\"]\n\n df_delta.columns = [\"delta_\" + \"_\".join(col) for col in df_delta.columns]\n return df_delta",
"def agg_da(da, agg_method, agg_dim=None, **kwargs):\n if agg_dim == \"timesteps\" and \"timestep_resolution\" in kwargs.keys() and agg_method != \"sum\":\n da = da / kwargs[\"timestep_resolution\"]\n agg_kwargs = {\"keep_attrs\": True}\n if agg_method == \"sum\":\n agg_kwargs.update({\"min_count\": 1})\n return getattr(da, agg_method)(agg_dim, **agg_kwargs)",
"def nwise_apply(df, method, n=2, comm=False, as_df=False, ds_names=True,\n must_include=None, **method_kwargs):\n\n numeric_df = df._get_numeric_data()\n cols = numeric_df.columns.values\n mat = numeric_df.values\n mat = mat.T\n applyf = method\n\n mask = np.isfinite(mat)\n\n # create the possible combinations of lines\n counter = list(range(mat.shape[0])) # get the number of lines?\n # ALL possible combinations of lines?\n perm = True if not comm else False\n combs = n_combinations(counter, n, must_include=must_include, permutations=perm)\n\n # find out how many variables the applyf returns\n result = []\n # apply the method using the first data set to find out the shape of c,\n c = applyf(*array_dropna(*[mat[i] for i in range(n)]))\n for index, value in enumerate(np.atleast_1d(c)):\n result.append(OrderedDict([(c, np.nan) for c in combs]))\n result = np.array(result) # array of OrderedDicts\n # each return value result is a dict that gets filled with dicts that have\n # the cols and keys and the results as values\n\n lut_comb_cols = dict()\n\n for comb in combs:\n valid = np.logical_and(*[mask[i] for i in comb]) # where all are True\n\n lut_comb_cols.update(dict(zip(comb, tuple(np.take(cols, comb)))))\n\n if not valid.any():\n continue\n if not valid.all():\n c = applyf(*[mat[i,:][valid] for i in comb], **method_kwargs)\n else:\n c = applyf(*[mat[i,:] for i in comb], **method_kwargs)\n\n for index, value in enumerate(np.atleast_1d(c)):\n result[index][comb] = value\n\n if as_df:\n if n != 2:\n raise ValueError('Array structure only available for n=2')\n else:\n if not ds_names:\n lut_comb_cols = None\n result = [_to_df(r, comm=comm, lut_names=lut_comb_cols) for r in result]\n else:\n if ds_names:\n formatted_results = []\n for r in result:\n formatted = OrderedDict()\n for k, v in r.items():\n formatted[tuple([lut_comb_cols[i] for i in k])] = v\n formatted_results.append(formatted)\n result = formatted_results\n\n if len(result) == 1:\n result = result[0]\n else:\n result = tuple(result)\n\n return result",
"def dimensionality_reduction(samples_data, config):\n\n drs = {\n 'pca': dr_pca,\n 'tsne': dr_tsne,\n 'rfc': dr_rfc,\n 'irfc': dr_irfc\n }\n\n uuids = samples_data.index[samples_data['selected'] == 1].tolist()\n x_train = samples_data.index[samples_data['train'] == 1].tolist()\n x_dev = samples_data.index[samples_data['dev'] == 1].tolist()\n x_test = samples_data.index[samples_data['test'] == 1].tolist()\n\n # Prompts the user to select an action\n dr = interaction.ask_action(constants.msg_dr, set(drs.keys()))\n if dr == 's':\n return None, None\n\n components = interaction.ask_number(constants.msg_components)\n to_cla = interaction.ask_yes_no(constants.msg_cla_clu)\n\n if to_cla:\n data, model = drs[dr].reduce(config, components, None, x_train, x_dev, x_test)\n\n else:\n data, model = drs[dr].reduce(config, components, uuids, None, None, None)\n\n return data, model",
"def transform(self, df):\n # CATEGORICAL FEATURES\n if self.categorical_columns:\n df.fillna({col: 'other' for col in self.categorical_columns}, inplace=True)\n df.replace('', {col: 'other' for col in self.categorical_columns}, inplace=True)\n print(self.aggregation_strategy)\n agg_df = df.groupby(self.aggregation_keys).aggregate(self.aggregation_strategy).reset_index()\n if self.vectorizor_compatibility:\n for col in self.categorical_columns:\n agg_df[col] = agg_df[col].map(lambda v: my_instance(v))\n agg_df.rename(columns={col: CATEGORICAL_FEATURE.format(name=col) for col in self.categorical_columns},\n inplace=True)\n return agg_df",
"def apply_along_axis(func1d, axis, arr, *args, **kwargs):\n arr = dask.array.core.asarray(arr)\n\n # Validate and normalize axis.\n arr.shape[axis]\n axis = len(arr.shape[:axis])\n\n # Rechunk so that analyze is applied over the full axis.\n arr = arr.rechunk(arr.chunks[:axis] + (arr.shape[axis:axis + 1], ) +\n arr.chunks[axis + 1:])\n\n # Test out some data with the function.\n test_data = numpy.ones(args[0].shape[1], dtype=arr.dtype)\n test_result = numpy.array(func1d(test_data, *args, **kwargs))\n\n # Map analyze over the data to get the result\n # Adds other axes as needed.\n result = arr.map_blocks(\n _apply_along_axis,\n name=dask.utils.funcname(func1d) + '-along-axis',\n dtype=test_result.dtype,\n chunks=(arr.chunks[:axis] + test_result.shape + arr.chunks[axis + 1:]),\n drop_axis=axis,\n new_axis=list(range(axis, axis + test_result.ndim, 1)),\n func1d=func1d,\n func1d_axis=axis,\n func1d_args=args,\n func1d_kwargs=kwargs,\n )\n\n return result",
"def shape_first_input_to_cop_dim(method, instance: BaseCopula, args, kwargs):\n x = args[0]\n args = list(args)\n\n columns = getattr(instance, \"_columns\", None)\n if isinstance(x, pd.Series):\n if columns is not None and set(x.index) == set(columns):\n x = x[columns] # order by columns\n x = x.to_numpy()[None, :] # cast to matrix\n elif isinstance(x, pd.DataFrame):\n if columns is not None:\n x = x.loc[:, columns] # order by columns\n else:\n if not isinstance(x, np.ndarray):\n x = np.asarray(x, float)\n if x.ndim == 1:\n x = x[None, :]\n\n if x.ndim == 1:\n x = x[None, :] # convert series or 1D vector to 2D vector\n\n if x.ndim != 2:\n raise ValueError(\"Input array must have 1 or 2 dimensions\")\n elif x.shape[1] != instance.dim:\n raise ValueError(\"Input array must have same dimension as copula\")\n\n args[0] = x\n\n return method(*args, **kwargs)",
"def process_features_data(features_data_frame, columns_to_drop, method=\"normalize\"):\n features_data_frame = features_data_frame.drop(\n columns=columns_to_drop\n )\n if method == \"standardize\":\n features_data_frame = standardize_data_set(features_data_frame)\n elif method == \"normalize\":\n features_data_frame = normalize_data_set(features_data_frame)\n else:\n raise ValueError(\"Invalid method: \", method)\n\n return features_data_frame",
"def scale(df, method='linreg', reference_index=0):\n dicton = globals()\n try:\n scaling_func = dicton[method]\n except KeyError as e:\n print('scaling method not found')\n raise e\n\n reference = df[df.columns.values[reference_index]]\n df = df.drop([df.columns.values[reference_index]], axis=1)\n #new_df = pd.DataFrame\n for series in df:\n df[series] = pd.Series(\n scaling_func(df[series].values, reference.values),\n index=df.index)\n\n df.insert(reference_index, reference.name, reference)\n\n return df",
"def quantile_normalisation(df, method='mean'):\n t = df.stack().groupby(df.rank(method='first').stack().astype(int))\n if method == 'mean':\n rank = t.mean()\n elif method == 'median':\n rank = t.median()\n else:\n raise NotImplemented(\"Unrecognised method %s\" % method)\n\n return df.rank(method='min').stack().astype(int).map(rank).unstack()",
"def pairwise_apply(df, method, comm=False):\n warnings.warn(\"pairwise_apply() is deprecated, use nwise_apply(..., n=2) instead\",\n DeprecationWarning)\n numeric_df = df._get_numeric_data()\n cols = numeric_df.columns\n mat = numeric_df.values\n mat = mat.T\n applyf = method\n K = len(cols)\n result_empty = np.empty((K, K), dtype=float)\n result_empty.fill(np.nan)\n\n # find out how many variables the applyf returns\n c = applyf(mat[0], mat[0])\n result = []\n for index, value in enumerate(np.atleast_1d(c)):\n result.append(result_empty)\n result = np.array(result)\n mask = np.isfinite(mat)\n for i, ac in enumerate(mat):\n for j, bc in enumerate(mat):\n if i == j:\n continue\n if comm and np.isfinite(result[0][i, j]):\n continue\n valid = mask[i] & mask[j]\n if not valid.any():\n continue\n if not valid.all():\n c = applyf(ac[valid], bc[valid])\n else:\n c = applyf(ac, bc)\n\n for index, value in enumerate(np.atleast_1d(c)):\n result[index][i, j] = value\n if comm:\n result[index][j, i] = value\n return_list = []\n for data in result:\n return_list.append(df._constructor(data, index=cols, columns=cols))\n\n if len(return_list) == 1:\n return return_list[0]\n else:\n return tuple(return_list)",
"def test_all_dynamic_dimension(self):\n from nitrous.module import dump\n\n D = Slice(Long, shape=(Any, Any, Any))\n X, Y, Z = range(3)\n\n @function(Long, a=D)\n def f(a):\n return a[2, 1, 2]\n\n m = module([f])\n # Should have run-time multiplications during index flattening.\n self.assertRegexpMatches(dump(m), \"mul\")\n self.assertEqual(m.f(self.data), 14)",
"def apply_data(self,\n data: 'Data',\n method: Callable) -> 'Data':\n dfs = np.array_split(data.data, mp.cpu_count(), axis = 0)\n pool = Pool()\n data.data = np.vstack(pool.map(method, dfs))\n pool.close()\n pool.join()\n pool.clear()\n return data",
"def process(self, df):\n pass",
"def get_scalers(df: DataFrame, columns, **kwargs) -> Callable:\n if type(columns) is str: # allow either a single string or a list of strings\n columns = [columns]\n scalers = [df[col].get_scaler(**kwargs) for col in columns]\n return partial(reduce, lambda df, f: df.pipe(f), scalers)",
"def apply(dataframe, function, **kwargs):\n if \"n_threads\" in kwargs:\n n_threads = kwargs.pop(\"n_threads\")\n else:\n n_threads = 1\n if n_threads == 1:\n return dataframe.swifter.apply(function, **kwargs)\n\n pool = multiprocessing.Pool(processes=n_threads)\n result = pool.map(\n _apply_df,\n [(d, function, i, kwargs)\n for i, d in enumerate(np.array_split(dataframe, n_threads))],\n )\n pool.close()\n result = sorted(result, key=lambda x: x[0])\n return pd.concat([i[1] for i in result])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes a distance matrix using the jaccard distance on the inputed dataframe.
|
def get_distance_matrix(self, df):
dist = sklearn.neighbors.DistanceMetric.get_metric('jaccard')
distance_matrix = dist.pairwise(df.iloc[:,:].to_numpy())
print(f'Distance matrix : {distance_matrix}')
print(f'{len(distance_matrix)}, {len(distance_matrix[0])}')
distance_df = pd.DataFrame(distance_matrix, index=df.index, columns=df.index)
return distance_df
|
[
"def calculate_distance_matrix(data_frame, columns):\n\n # Initialise empty distance matrix\n num_rows = len(data_frame)\n distance_matrix = np.zeros((num_rows, num_rows))\n\n x_inds, y_inds = np.triu_indices(len(data_frame))\n\n for c in columns:\n distance_matrix[x_inds, y_inds] += calculate_alignment_scores(data_frame[c].iloc[x_inds],\n data_frame[c].iloc[y_inds])\n distance_matrix[y_inds, x_inds] += calculate_alignment_scores(data_frame[c].iloc[x_inds],\n data_frame[c].iloc[y_inds])\n\n return distance_matrix",
"def distance_matrix_calculate(self):\n qtd = self.mapa.shape[0]\n distancias = np.zeros([qtd, qtd])\n\n _temp_max = 0\n\n for i in range(qtd):\n for j in range(i, qtd):\n if i != j:\n b = self.mapa[i, 0] - self.mapa[j, 0]\n c = self.mapa[i, 1] - self.mapa[j, 1]\n a = np.sqrt(np.square(b) + np.square(c))\n\n distancias[i, j] = a\n distancias[j, i] = a\n\n if _temp_max < a:\n _temp_max = a\n\n self.distancias = distancias",
"def jaccard_distance(left: pandas.Series, right: pandas.Series) -> float:\n\n\tx_or_y = X_or_Y(left, right)\n\tx_and_y = X_and_Y(left, right)\n\treturn jaccard_distance_numeric(x_or_y, x_and_y)",
"def _get_distance_matrix(self):\n\n # implement here",
"def get_adjacency(dataframe):\n \n # Number of nodes in the graph\n n_nodes = dataframe.shape[0]\n\n # Calculate distances. Due to the high dimensional data (> 1300 dimensions) the cosine distance is chosen\n distances = np.zeros((n_nodes, n_nodes))\n \n for i, a in dataframe.iterrows():\n for j, b in dataframe.iterrows():\n dot_product = np.dot(a,b)\n distances[i,j] = 1 - dot_product/(np.linalg.norm(a,2)*np.linalg.norm(b,2))\n\n # Weights (gaussian) are assigned to each link based on the distance \n kernel_width = distances.std()\n weights = np.exp(-distances**2 / (2*kernel_width**2))\n\n # Set main diagonal to zero (No self-loops)\n np.fill_diagonal(weights,0)\n adjacency = weights.copy()\n return adjacency",
"def dist_matrix(data_coords_atom):\n \treturn pd.DataFrame(distance_matrix(data_coords_atom.iloc[:,3:],\n \t\tdata_coords_atom.iloc[:,3:]), index = data_coords_atom.iloc[:, 3:].index,\n \tcolumns = data_coords_atom.iloc[:, 3:].index)",
"def distance_matrix(input_, distance_measure,\n adjacency_matrix =[]):\n if distance_measure == \"eucledian_dist\":\n Npts= input_.shape[0]\n distance_matrix=np.zeros((Npts,Npts))\n \n for xi in range(Npts):\n for xj in range(xi, Npts):\n distance_matrix[xi,xj] = eucledian_dist(\n input_[xi],input_[xj])\n distance_matrix[xj,xi] = distance_matrix[xi,xj]\n \n return(distance_matrix)\n \n if distance_measure == \"commute_time_distance\":\n Npts= len(input_)\n distance_matrix=np.zeros((Npts,Npts))\n eigenvectors_matrix = np.zeros((Npts-1, Npts))\n eigenvalues_symm_list = []\n #Unpack eigenvalues and eigenvectors in a list/matrix\n for i in range(1, Npts):\n eigenvectors_matrix[i-1] = input_[i][1]\n eigenvalues_symm_list.append(input_[i][0])\n #Compute distance matrix\n D = diagonal_matrix(adjacency_matrix)\n #Scaling factor:\n scale_factor = 1 / np.array(eigenvalues_symm_list)\n for i in range(Npts):\n for j in range(i, Npts):\n c_ij= commute_time_distance(i, j, scale_factor, \n eigenvectors_matrix, D)\n distance_matrix[i][j] = c_ij\n distance_matrix[j][i] = c_ij\n \n return(distance_matrix)",
"def runJac(data_path):\n csv_path = data_path.split('.')[0] + '.csv'\n if not os.path.exists(csv_path):\n convert_jac_data_to_csv(data_path, csv_path)\n df = pd.read_csv(csv_path)\n\n n_samples = df.shape[0]\n dist_matrix = [[0] * n_samples for _ in range(n_samples)]\n for i in range(n_samples):\n for j in range(i+1, n_samples):\n curr_dist = Jaccard(df.iloc[i], df.iloc[j])\n dist_matrix[i][j] = curr_dist\n dist_matrix[j][i] = curr_dist\n # Produces the distance matrix\n if i % 50 == 0:\n print(i, dist_matrix[i])\n\n # scores[i] = score for i+2 clusters\n scores = [hierarchical(dist_matrix, k) for k in range(2, 21)]\n for k, score in enumerate(scores):\n print('number of clusters: %d, score: %.6f' % (k+2, score))\n print('best number of clusters is: %d' % (np.argmax(scores) + 2))\n\n\n # Plot the data using matplotlib\n # make the \"silhouette_score vs number of cluster\" plot\n plt.scatter(list(range(2, len(scores) + 2)), scores)\n plt.xlabel('number of clusters')\n plt.ylabel('silhouette score')\n plt.xticks(list(range(2, 21, 2)))\n plt.title('Score vs Clusters')\n plt.grid(linestyle='--')\n plt.savefig('hierarchical.png')\n plt.show()",
"def pairwise_distance_matrix(X, metric='cosine'):\n if metric=='cosine':\n pairwise_dists = cosine_distances(X)\n elif metric=='jaccard':\n pairwise_dists = squareform(pdist(X, metric='jaccard')) # slower than cosine\n return pairwise_dists",
"def _distance_matrix_core(args):\n sqadj, d, assignment = args\n n = len(sqadj)\n for i in assignment:\n for j in range(i):\n max_k = None\n max_value = -_np.inf\n\n for k in range(n):\n if k == i or k == j:\n continue\n else:\n value = abs(sqadj[i, k] - sqadj[j, k])\n if value > max_value:\n max_value = value\n max_k = k\n\n d[i,j] = d[j,i] = max_value",
"def dist_mat(df, linkage):\n\n even_num = [i for i in range(2, len(df) + 1) if i % 2 == 0]\n D = pd.DataFrame()\n ind = list(df.index)\n k = 0\n for i in ind:\n for j in ind[k:]:\n if i != j:\n\n a = df.loc[i].values\n b = df.loc[j].values\n z1 = [i for i in even_num if i <= len(a)]\n z2 = [i for i in even_num if i <= len(b)]\n a = [a[: z1[0]]] + [\n a[z1[i]: z1[i + 1]] for i in range(len(z1) - 1)\n ]\n b = [b[: z2[0]]] + [\n b[z2[i]: z2[i + 1]] for i in range(len(z2) - 1)\n ]\n\n if linkage == \"single\":\n D.loc[i, j] = sl_dist(a, b)\n elif linkage == \"complete\":\n D.loc[i, j] = cl_dist(a, b)\n elif linkage == \"average\":\n D.loc[i, j] = avg_dist(a, b)\n else:\n\n D.loc[i, j] = np.inf\n\n k += 1\n\n D = D.fillna(np.inf)\n\n return D",
"def _distance_matrix(self):\n def dist(ii, jj):\n \"\"\"\n Calculates a distance between two points at indices ii and jj in\n the xy data matrix.\n ARGS:\n ii, jj (int): Indices\n \"\"\"\n return (sqrt((self.xy[0][ii] - self.xy[0][jj]) ** 2 + (self.xy[1][ii] - self.xy[1][jj]) ** 2))\n return np.array([np.array([dist(ii, jj) for jj in range(len(self.xy[0]))]) for ii in range(len(self.xy[0]))])",
"def test_003_adjacency_matrix_ok(self):\n\n print(\"Test Three... To show that distance.pdist function calculates correctly on a pdb.cif file\")\n\n with open('./extracted_test_data/1j5a.cif') as infile:\n target_list = infile.read().split('\\n')\n df_1 = pd.DataFrame(data=target_list, columns=[\"header\"]) # Put list in a dataframe m X 1 column\n df_1 = df_1[:-1] # Removes additional row that is included\n cif_to_df_2 = df_1.header.str.split(expand=True) # Put dataframe to m x 20 columns\n critical_info_to_df_3 = cif_to_df_2.drop(columns=[0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # df containing aa & coordinate positions\n print(critical_info_to_df_3.head())\n convert_to_array = critical_info_to_df_3.drop(columns=[5], axis=1).to_numpy() # Removes aa flag & contains only coordinate info\n calculate_distances = distance.pdist(convert_to_array, 'euclidean')\n make_square = distance.squareform(calculate_distances)\n print(make_square)\n\n assert df_1.shape[0] == cif_to_df_2.shape[0]\n assert cif_to_df_2.shape[0] == critical_info_to_df_3.shape[0]",
"def distance_matrix(fasta_file):\n\n if os.path.isfile(fasta_file):\n data = [i[1] for i in fasta_read(fasta_file)]\n else:\n raise ValueError(print(distance_matrix.__doc__))\n\n P = [[0 for j in range(len(data))] for i in range(len(data))]\n\n for i in range(len(data)):\n s1 = data[i]\n for j in range(i):\n s2 = data[j]\n p_dist = sum((s1[c] != s2[c]) for c in range(len(s1)))\n p_dist = p_dist / len(s1)\n P[i][j], P[j][i] = p_dist, p_dist\n\n with open('output_{}'.format(fasta_file), 'w') as fout:\n for i in P:\n for j in i:\n fout.write('{} '.format(j))\n fout.write('\\n')\n return P",
"def build_dist_table(self):\n alphabet_size = self.alphabetSize\n #print 'alpha size:',alphabet_size\n dist_matrix = np.zeros((alphabet_size, alphabet_size))\n for i in xrange(alphabet_size):\n # the min_dist for adjacent symbols are 0, so we start with i+2\n for j in xrange(i+2, alphabet_size):\n # square the distance now for future use\n dist_matrix[i, j] = (self.beta[i] - self.beta[j-1])**2\n # the distance matrix is symmetric\n dist_matrix[j,i] = dist_matrix[i, j]\n return dist_matrix",
"def euclidean_distance_matrix(x):\n r = np.sum(x*x, 1)\n r = r.reshape(-1, 1)\n distance_mat = r - 2*np.dot(x, x.T) + r.T\n return distance_mat",
"def getDistanceMatrix(self):\n\t\tnatoms=self.atoms\n\t\tdist = [[0.0 for i in range(natoms)] for j in range(natoms)]\n\t\tfor i in range(0,natoms):\n\t\t #dist[i][i]=0.0 #diagonal elements are zero\n\t\t for j in range(i+1, natoms):\n\t\t\ticoord=self.coordDict[i+1]\n\t\t\tjcoord=self.coordDict[j+1]\n\t\t\txdiff=icoord[0]-jcoord[0]\n\t\t\tydiff=icoord[1]-jcoord[1]\n\t\t\tzdiff=icoord[2]-jcoord[2]\n\t\t\tdist[i][j]=math.sqrt(xdiff*xdiff+ydiff*ydiff+zdiff*zdiff)\n\t\t\tdist[j][i]=dist[i][j] #matrix is symmetric\n\n\t\treturn dist",
"def GetDistanceMatrix(data,metric,isSimilarity=1):\n nPts = len(data)\n num_pairs = int(nPts*(nPts-1)/2)\n res = np.zeros(num_pairs ,np.float)\n nSoFar=0\n for col in range(1,nPts):\n for row in range(col):\n fp1 = data[col][1]\n fp2 = data[row][1]\n if fp1.GetNumBits()>fp2.GetNumBits():\n fp1 = DataStructs.FoldFingerprint(fp1,fp1.GetNumBits()/fp2.GetNumBits())\n elif fp2.GetNumBits()>fp1.GetNumBits():\n fp2 = DataStructs.FoldFingerprint(fp2,fp2.GetNumBits()/fp1.GetNumBits())\n sim = metric(fp1,fp2)\n if isSimilarity:\n sim = 1.-sim\n res[nSoFar] = sim\n nSoFar += 1\n return res",
"def test_004_adjacency_matrix_ok(self):\n\n print(\"Test Four... To show that distance.pdist function calculates correctly on a pdb.cif file\")\n\n with open('./extracted_test_data/1j5a.cif') as infile:\n target_list = infile.read().split('\\n')\n df_1 = pd.DataFrame(data=target_list, columns=[\"header\"]) # Put list in a dataframe m X 1 column\n df_1 = df_1[:-1] # Removes additional row that is included\n cif_to_df_2 = df_1.header.str.split(expand=True) # Put dataframe to m x 20 columns\n critical_info_to_df_3 = cif_to_df_2.drop(columns=[0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # df containing aa & coordinate positions\n convert_to_array = critical_info_to_df_3.drop(columns=[5], axis=1).to_numpy() # Removes aa flag & contains only coordinate info\n calculate_distances = distance.pdist(convert_to_array, 'euclidean')\n make_square = distance.squareform(calculate_distances)\n\n for i in range(0,make_square.shape[1]):\n print(make_square[i,i])\n self.assertEqual(make_square[i,i], 0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clusters a dataframe using a given method.
|
def cluster_dataframe(self, df, method='HDBSCAN', distance_matrix=True, min_size=2, max_eps=None, join_clusterless_samples=True):
if method == 'HDBSCAN':
clusterer = hdbscan.HDBSCAN(min_cluster_size=2, cluster_selection_epsilon=0.5)
clusterer.fit(df)
elif method == 'OPTICS':
if distance_matrix:
if max_eps is not None:
clusterer = sklearn.cluster.OPTICS(min_samples=min_size, metric='precomputed', n_jobs=4, max_eps=max_eps)
else:
clusterer = sklearn.cluster.OPTICS(min_samples=min_size, metric='precomputed', n_jobs=4)
else:
clusterer = sklearn.cluster.OPTICS(min_samples=min_size, n_jobs=4)
clusterer.fit(df)
elif method == 'AggClustering':
if distance_matrix:
clusterer = sklearn.cluster.AgglomerativeClustering(
n_clusters=None,
affinity='precomputed',
linkage='average',
distance_threshold=0.95)
else:
clusterer = clusterer = sklearn.cluster.AgglomerativeClustering(
n_clusters=None,
distance_threshold=1)
clusterer.fit(df)
elif method == 'BIRCH':
if distance_matrix:
clusterer = sklearn.cluster.Birch(
n_clusters=None)
else:
clusterer = sklearn.cluster.Birch(
n_clusters=None,
affinity='precomputed',
distance_threshold=1)
clusterer.fit(df)
filenames = df.index.tolist()
clusters = {}
cluster_labels = []
if not join_clusterless_samples:
backwards_index = -1
for (filename, cluster) in zip(filenames, clusterer.labels_):
filename = filename.replace("/", "\\")
if not join_clusterless_samples and cluster == -1:
cluster = backwards_index
backwards_index -= 1
cluster_labels.append(cluster)
if cluster in clusters:
clusters[cluster].append(filename)
else:
clusters[cluster] = [filename]
return clusters, cluster_labels
|
[
"def get_cluster(df, cluster_method):\r\n df_sub = df.iloc[0:,\r\n [df.columns.get_loc(\"Sharpe\"),\r\n df.columns.get_loc(cluster_method)]]",
"def dynamicTreeCut(distance_df, func='hybrid', method='average', **cluster_kws):\n stats = importr('stats')\n dynamicTreeCut = importr('dynamicTreeCut')\n dist = stats.as_dist(distance_df)\n link = stats.hclust(dist, method=method)\n if func == 'hybrid':\n dist = stats.as_dist(distance_df)\n clustering = dynamicTreeCut.cutreeHybrid(link, distance_df, **cluster_kws)\n return np.array(clustering[0])\n elif func == 'dynamic':\n clustering = dynamicTreeCut.cutreeDynamic(link, **cluster_kws)\n return np.array(clustering)",
"def check_clustering_method(method):\n list_methods = ['kmeans']\n if method in list_methods:\n return method\n else:\n raise ValueError(\"The method should be in \" + list_methods)",
"def visualization_clusters(self, cmethod):\r\n \r\n # colors used to label data points\r\n colors = [\"red\",\"blue\",\"yellow\"]\r\n \r\n legends = [None, None, None]\r\n \r\n fig = plt.figure(cmethod)\r\n ax_cluster = Axes3D(fig)\r\n \r\n if (cmethod == self._kmeans):\r\n labels = self.kmeans.labels_\r\n smethod = \"kmeans\"\r\n elif (cmethod == self._spectral):\r\n labels = self.spectral.labels_\r\n smethod = \"Spectral clustering\"\r\n else:\r\n labels = self.hac.labels_\r\n smethod = \"HAC\"\r\n \r\n for i,v in enumerate(self.val_hist):\r\n # select cluster color/label\r\n l = labels[i]\r\n \r\n if (l == 0):\r\n cls = 'class 1'\r\n elif (l == 1):\r\n cls = 'class 2'\r\n else:\r\n cls = 'class 3'\r\n \r\n if (legends[l] == None):\r\n ax_cluster.scatter(v[0], v[1], v[2], marker='o', color=colors[l], s=20, label=cls)\r\n legends[l] = l\r\n else:\r\n ax_cluster.scatter(v[0], v[1], v[2], marker='o', color=colors[l], s=20)\r\n \r\n ax_cluster.text(v[0], v[1], v[2], '{0}'.format(i), size=5, zorder=1, color='k') \r\n \r\n ax_cluster.set_title(\"{0}\".format(smethod)) \r\n ax_cluster.set_xlabel('X')\r\n ax_cluster.set_ylabel('Y')\r\n ax_cluster.set_zlabel('Z')\r\n ax_cluster.legend()",
"def run_cluster_analysis(data, labels=None, random_state={\"spectral\": 0, \"kmeans\": 0}):\n method_scores = {}\n \n for method in CLUSTERING_METHODS:\n aris = []\n silhouettes = []\n dbs = []\n \n # For each method, we try\n # k={2, 3, ..., 6} number of clusters\n for k in NUM_CLUSTERS:\n ari_score, silhouette, db = cluster_(data, labels, method, k, random_state)\n \n aris.append(ari_score)\n silhouettes.append(silhouette)\n dbs.append(db)\n \n if labels is None:\n # patients\n method_scores[method, \"silhouette\"] = silhouettes\n method_scores[method, \"db\"] = dbs\n else:\n # pdx\n method_scores[method, \"ari\"] = aris\n \n plot_analysis_results(method_scores)\n scores_df = scores_to_dataframe(method_scores)\n return scores_df",
"def hierachical_clustering(df, distanceCut = 2):\n\n # distance matrix\n # print (df.values[:2, 1:5])\n # Y = pdist(df.values[:, 1:], 'correlation')\n Y = pdist(df.values, 'correlation')\n print(df.shape, Y.shape)\n\n # linkage matrix\n Z = linkage(Y, method='ward')\n Clus = fcluster(Z, distanceCut, criterion='distance')\n\n print(Clus) # This is cluster number for each row in df\n\n number_features, number_clusters = len(Clus), len(set(list(Clus)))\n print(\"number of features: \", number_features)\n print(\"number of communities: \", number_clusters)\n\n # Compile clusters\n ClusDict = {}\n for ii in range(number_features):\n # if ClusDict.has_key(Clus[ii]):\n if Clus[ii] in ClusDict:\n ClusDict[ Clus[ii] ].append(ii)\n else:\n ClusDict[ Clus[ii] ] = [ii]\n\n #print(ClusDict.items()[:3]) # This organizes cluster, members\n return Clus, ClusDict",
"def run(self, df):\n raise NotImplementedError",
"def __init__(self, df, polygon_id_col, class_col=None, som_architecture=None, k_max=12):\n assert type(df)==pd.DataFrame, 'df needs to be of type `pd.DataFrame`.'\n assert type(polygon_id_col)==str and type(class_col) in [str, type(None)], 'Both polygon_id_col and class_col need to be of type `str`.'\n assert polygon_id_col in df.columns, f'{polygon_id_col} not in dataframe.'\n self.methods = ['som', 'bhattacharyya', 'kmeans', 'hierarchical']\n if not hasattr(self, '_previous_cluster_col'): self._previous_cluster_col = False\n self._polygon_id = polygon_id_col\n self.class_col = class_col\n self.df = df.sort_values(by=self._polygon_id)\n self.k = k_max\n if som_architecture:\n self.som_architectures = np.expand_dims(np.array(som_architecture), 0)\n else:\n self.som_architectures = get_2Dcoordinates_matrix((5,5)).reshape((2,-1))\n self.som_architectures = self.som_architectures[:,np.apply_along_axis(lambda x: (x!=0).all() and (x!=1).any(), 0, self.som_architectures)].T\n\n if self.df[self._polygon_id].dtype == np.dtype('O'):\n self.is_string_identifier = True\n self.label_encoder = LabelEncoder().fit(self.df['Label'])\n self.df[self._polygon_id] = self.label_encoder.transform(self.df[self._polygon_id])\n else:\n self.is_string_identifier = False\n\n if class_col: drop_cols = [self._polygon_id, self.class_col]\n else: drop_cols = [self._polygon_id]\n\n polygon_list = np.split(self.df.drop(columns=drop_cols), np.where(np.diff(self.df[self._polygon_id]))[0]+1)\n # drop polygons with too few pixels to be relevant for classification\n self._polygon_list = [x for x in polygon_list]# if len(x)>=10]",
"def clustering_step_local(scores_df, expected_chroms,\n dots_clustering_radius, verbose):\n\n # using different bin12_id_names since all\n # pixels are annotated at this point.\n pixel_clust_list = []\n for chrom in expected_chroms:\n # probably generate one big DataFrame with clustering\n # information only and then just merge it with the\n # existing 'scores_df'-DataFrame.\n # should we use groupby instead of 'scores_df['chrom12']==chrom' ?!\n # to be tested ...\n df = scores_df[((scores_df['chrom1'].astype(str)==str(chrom)) &\n (scores_df['chrom2'].astype(str)==str(chrom)))]\n if not len(df):\n continue\n\n pixel_clust = clust_2D_pixels(\n df,\n threshold_cluster=dots_clustering_radius,\n bin1_id_name='start1',\n bin2_id_name='start2',\n verbose=verbose)\n pixel_clust_list.append(pixel_clust)\n if verbose:\n print(\"Clustering is over!\")\n # concatenate clustering results ...\n # indexing information persists here ...\n pixel_clust_df = pd.concat(pixel_clust_list, ignore_index=False)\n\n # now merge pixel_clust_df and scores_df DataFrame ...\n # # and merge (index-wise) with the main DataFrame:\n df = pd.merge(\n scores_df,\n pixel_clust_df,\n how='left',\n left_index=True,\n right_index=True)\n\n # report only centroids with highest Observed:\n chrom_clust_group = df.groupby([\"chrom1\", \"chrom2\", \"c_label\"])\n centroids = df.loc[chrom_clust_group[\"obs.raw\"].idxmax()]\n return centroids",
"def generate_clusters(self,D):\n\n condensed = squareform(D.dist_frame)\n linkage = hcl.average(condensed)\n self.clusters = hcl.fcluster(linkage,self.factor,criterion=self.criterion)\n\n self.num_clusters = n_clusters = len(np.unique(self.clusters)) - (1 if -1 in clusters else 0)\n self.cluster_labels = pd.DataFrame({'sequences' : D.dist_frame.index, \n 'cluster' : self.clusters})",
"def findclusters(shopping_data):\r\n\r\n rows = {} #dictionary for saving each row from the data frame\r\n count= {} # to keep count of points in every cluster\r\n\r\n last_10_cluster =[]\r\n\r\n for i in range(1,101):\r\n rows[i] = shopping_data.ix[i].values\r\n count[i]=1\r\n\r\n clust = set()\r\n\r\n #while loop where all the clustering is performed\r\n while len(rows)>1:\r\n minimum = 99999\r\n arg1 = 0\r\n arg2 = 0\r\n for rowi in rows:\r\n for rowj in rows:\r\n if(rowi!=rowj):\r\n dist = Euclidean_Distance(rows[rowi],rows[rowj])\r\n if(minimum > dist):\r\n arg1 = rowi\r\n arg2 = rowj\r\n minimum = dist\r\n\r\n row1 = rows[arg1]\r\n row2 = rows[arg2]\r\n ct1 = count[arg1]\r\n ct2 = count[arg2]\r\n row_avg = Average(row1,row2,ct1,ct2)\r\n\r\n #once the cluster is formed the original data points are popped\r\n rows.pop(arg1)\r\n rows.pop(arg2)\r\n count.pop(arg1)\r\n count.pop(arg2)\r\n\r\n s = str(arg1)+\"-\"+str(arg2)\r\n clust.add(s)\r\n rows[s] = row_avg\r\n count[s] = ct1+ct2\r\n minval = min (ct1,ct2)\r\n\r\n if(len(rows)<11):\r\n last_10_cluster.append(minval)\r\n\r\n\r\n print(\"The last 10 minimum cluster sizes are\")\r\n for val in last_10_cluster:\r\n print(str(val))\r\n\r\n myDendrogram(shopping_data)",
"def process_cluster(self, cluster):\n raise NotImplementedError",
"def cluster_stocks(data: pd.DataFrame, n_clusters=5, verbose=False):\n\n if not isinstance(n_clusters, int):\n raise ValueError(\"Total number of clusters must be integer.\")\n elif n_clusters < 2:\n raise ValueError(f\"Total number of clusters({len(data.columns)}) must be > 2.\")\n elif len(data.columns) < 3:\n raise ValueError(\n f\"Total number of stocks in pf({len(data.columns)}) must be > 2.\"\n )\n elif n_clusters > len(data.columns):\n raise ValueError(\n f\"Total number of clusters({n_clusters}) \"\n f\"must be <= number of stocks({len(data.columns)}) in pf\"\n )\n\n if isinstance(data.columns, pd.MultiIndex):\n data = clean_data(data)\n\n pf_return_means = mean_returns(data, type=\"log\")\n pf_daily_returns = daily_log_returns(data)\n pf_volatility = volatility(data)\n # format the data as a numpy array to feed into the K-Means algorithm\n data_ret_vol = np.asarray(\n [np.asarray(pf_return_means), np.asarray(pf_volatility)]\n ).T\n\n distorsions = []\n max_n_clusters = min(20, len(data.columns))\n\n for k in range(2, max_n_clusters):\n k_means = KMeans(n_clusters=k)\n k_means.fit(X=data_ret_vol)\n distorsions.append(k_means.inertia_)\n\n plt.plot(\n range(2, max_n_clusters),\n distorsions,\n linestyle=\"-\",\n color=\"red\",\n lw=2,\n label=\"Elbow curve\",\n )\n plt.title(\"Elbow curve\")\n plt.xlabel(\"Number of clusters\")\n plt.ylabel(\"Distortion\")\n plt.grid(True)\n plt.legend()\n\n # Step size of the mesh. Decrease to increase the quality of the VQ.\n h = 0.002 # point in the mesh [x_min, x_max]x[y_min, y_max].\n\n x_min, x_max = data_ret_vol[:, 0].min() - 0.1, data_ret_vol[:, 0].max() + 0.1\n y_min, y_max = data_ret_vol[:, 1].min() - 0.1, data_ret_vol[:, 1].max() + 0.1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n km = KMeans(n_clusters=n_clusters)\n km.fit(data_ret_vol)\n\n centroids = km.cluster_centers_\n\n # Obtain labels for each point in mesh. Use last trained model.\n Z = km.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)\n\n # some plotting using numpy's logical indexing\n plt.figure(figsize=(10, 6))\n plt.imshow(\n Z,\n interpolation=\"nearest\",\n extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n cmap=plt.cm.Paired,\n aspect=\"auto\",\n origin=\"lower\",\n )\n\n # Plot the centroids as a white X\n plt.scatter(\n centroids[:, 0], centroids[:, 1], marker=\"*\", s=420, color=\"white\", zorder=10\n )\n # Plot stocks\n plt.plot(data_ret_vol[:, 0], data_ret_vol[:, 1], \"o\", markersize=12)\n\n plt.title(\"K-means clustering\\n\" \"Centroids are marked with white star\")\n plt.xlabel(\"Returns\")\n plt.ylabel(\"Volatility\")\n\n idx, _ = vq(data_ret_vol, centroids)\n clusters = {}\n\n for i in list(set(idx)):\n clusters[i] = []\n\n for name, cluster in zip(pf_return_means.index, idx):\n clusters[cluster].append(name)\n\n # Calculating avg comulative daily return for each cluster and store\n # in pf_daily_returns under special stock name - avg{Cluster index}\n for i in list(set(idx)):\n s = \"avg\" + str(i)\n pf_daily_returns[s] = pf_daily_returns[clusters[i]].mean(axis=1)\n\n for n in range(n_clusters):\n # plot clusters\n plt.figure(figsize=(10, 6))\n\n for stock in clusters[n]:\n # plot stocks as grey lines\n plt.plot(pf_daily_returns[stock].cumsum(), \"gray\", linewidth=1)\n\n plt.title(f\"Cluster #{n}\")\n plt.ylabel(\"Daily returns cumulative sum\")\n # plot average to see cluster dynamic\n s = \"avg\" + str(n)\n plt.plot(pf_daily_returns[s].cumsum(), \"red\", linewidth=3)\n plt.xticks(rotation=30)\n plt.grid(True)\n\n if verbose:\n print(f\"Cluster #{n}\")\n print(clusters[n])\n\n return clusters",
"def cluster(**kwargs):\n def cluster_use_metadata_adder(func):\n def extended_test(self, *args, **kwargs):\n self.test_context.before()\n test_result = func(self, *args, **kwargs)\n return self.test_context.after(test_result)\n\n extended_test.__dict__.update(**func.__dict__)\n extended_test.__name__ = func.__name__\n\n Mark.mark(extended_test, ParametrizableClusterMetadata(**kwargs))\n return extended_test\n\n return cluster_use_metadata_adder",
"def runCoClustering(self):\n return 0",
"def dimensionality_reduction(self, df, method='tSNE'):\n\n if method == 'tSNE':\n tsne = sklearn.manifold.TSNE(n_components=2, perplexity=5, metric='precomputed')\n embedded_data = tsne.fit_transform(df)\n\n elif method == 'MCA':\n \n df.replace({0: \"False\", 1: \"True\"}, inplace = True)\n mca = prince.MCA(n_components=2)\n embedded_data = mca.fit_transform(df)\n\n elif method == 'NMDS':\n\n nmds = sklearn.manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,\n dissimilarity=\"precomputed\",\n n_init=1)\n embedded_data = nmds.fit_transform(df)\n\n df_embedded = pd.DataFrame(embedded_data, index=df.index)\n return df_embedded",
"def clustering_step(scores_file, expected_chroms, ktypes, fdr,\n dots_clustering_radius, verbose):\n res_df = pd.read_hdf(scores_file, 'results')\n\n # do Benjamin-Hochberg FDR multiple hypothesis tests\n # genome-wide:\n for k in ktypes:\n res_df[\"la_exp.\"+k+\".qval\"] = get_qvals( res_df[\"la_exp.\"+k+\".pval\"] )\n\n # combine results of all tests:\n res_df['comply_fdr'] = np.all(\n res_df[[\"la_exp.\"+k+\".qval\" for k in ktypes]] <= fdr,\n axis=1)\n\n # print a message for timing:\n if verbose:\n print(\"Genome-wide multiple hypothesis testing is done.\")\n\n # using different bin12_id_names since all\n # pixels are annotated at this point.\n pixel_clust_list = []\n for chrom in expected_chroms:\n # probably generate one big DataFrame with clustering\n # information only and then just merge it with the\n # existing 'res_df'-DataFrame.\n # should we use groupby instead of 'res_df['chrom12']==chrom' ?!\n # to be tested ...\n df = res_df[(res_df['comply_fdr'] &\n (res_df['chrom1']==chrom) &\n (res_df['chrom2']==chrom))]\n\n pixel_clust = clust_2D_pixels(\n df,\n threshold_cluster=dots_clustering_radius,\n bin1_id_name='start1',\n bin2_id_name='start2',\n verbose=verbose)\n pixel_clust_list.append(pixel_clust)\n if verbose:\n print(\"Clustering is over!\")\n # concatenate clustering results ...\n # indexing information persists here ...\n pixel_clust_df = pd.concat(pixel_clust_list, ignore_index=False)\n\n # now merge pixel_clust_df and res_df DataFrame ...\n # # and merge (index-wise) with the main DataFrame:\n df = pd.merge(\n res_df[res_df['comply_fdr']],\n pixel_clust_df,\n how='left',\n left_index=True,\n right_index=True)\n\n # report only centroids with highest Observed:\n chrom_clust_group = df.groupby([\"chrom1\", \"chrom2\", \"c_label\"])\n centroids = df.loc[chrom_clust_group[\"obs.raw\"].idxmax()]\n return centroids",
"def cluster(self, vectors, *args, **kwargs):\n\n pass",
"def main():\n df = pd.read_csv(\"HW_07_SHOPPING_CART_v137.csv\", header=0)\n df.index = df.ID\n del df['ID']\n global points\n points = {}\n for index, row in df.iterrows():\n # if(index <):\n points[index] = row.tolist()\n global all_clusters, clusters, cluster_number, total_number_of_features\n\n total_number_of_features = len(points[1])\n all_clusters = []\n for index, point in points.items():\n all_clusters.append(Cluster(index))\n all_clusters[index - 1].mean = point\n all_clusters[index - 1].guest_ids.append(index)\n\n cluster_number[len(all_clusters)] = all_clusters\n perform_clustering()\n smallest_cluster()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Counts the number of common commits between two clusters. Takes a dataframe containing the commits as columns and the files/lines/... as rows. Takes a dict containing the clusters.
|
def count_clusters_common_commits(self, df, clusters, lines=False):
clusters_extended = {}
for key, value in clusters.items():
number_common_commits = 0
for column in df:
number_common_files_commit = 0
for filename in value:
if df.loc[filename, column] == 1:
number_common_files_commit += 1
if number_common_files_commit == len(value):
number_common_commits += 1
if lines:
value = self.parse_fileline(value)
clusters_extended[key] = (number_common_commits, value)
# print(f'Cluster {key}, {number_common_commits} common commits : {value}\n')
return clusters_extended
|
[
"def count_clusters(dsetclusters, dsetreads):\n\n countdict = {\n 'All': sum(dsetclusters['morethan1']['All']['info_dict'].values()),\n 'PAS': sum(dsetclusters['morethan1']['wPAS']['info_dict'].values())}\n\n return countdict",
"def _count_cluster_reannotations(operons: List[genes.Operon],\n reannotated_operons: Dict[str, genes.Operon]) -> ReannotationCounts:\n cluster_reannotations = defaultdict(dict)\n\n for operon in operons:\n reannotated_operon = reannotated_operons.get(operon.contig)\n if reannotated_operon is None:\n # We don't have a reannotation for this particular operon\n # This is common if we only re-BLAST a subset of our data\n # for efficiency's sake\n continue\n\n # Count the reannotations for a single pair of operons and merge those counts into the \n # running total for the entire cluster\n update = _count_reannotations(operon, reannotated_operon)\n for feature_name, reannotation_data in update.items():\n for reannotated_feature_name, count in reannotation_data.items():\n current_count = cluster_reannotations[feature_name].get(reannotated_feature_name, 0)\n cluster_reannotations[feature_name][reannotated_feature_name] = count + current_count\n\n # If we get no reannotations for feature_name, we still need to add feature_name to the\n # cluster_reannotations dictionary, so that we can see when features are NEVER reannotated\n if not cluster_reannotations.get(feature_name):\n cluster_reannotations[feature_name] = defaultdict(int)\n return cluster_reannotations",
"def get_cluster_count(imp_centers, subs):\r\n center_tree = spatial.KDTree(imp_centers)\r\n cluster_count = {i: 0 for i in range(len(imp_centers))}\r\n for sub in subs:\r\n cluster_count[center_tree.query(sub)[1]] += 1\r\n cluster_count_freq = sorted(cluster_count.items(), key=operator.itemgetter(1), reverse=True)\r\n cluster_count = cluster_count.items()\r\n return cluster_count",
"def _count_cooccurrences(files, target_file, synonym_file, window):\n get_cooccurrences.get_cooccurrences(files[INDEX_FILE_PATH], target_file,\n synonym_file, window, files[WORD_COUNT_FILE_PATH],\n files[COOCCURRENCE_FILE_PATH])",
"def count_managed_clusters(filter: str = None,\r\n configuration: Configuration = None,\r\n secrets: Secrets = None) -> int:\r\n logger.debug(\r\n \"Start count_managed_clusters: configuration='{}', filter='{}'\".format(\r\n configuration, filter))\r\n\r\n managed_clusters = fetch_resources(filter, RES_TYPE_AKS, secrets, configuration)\r\n return len(managed_clusters)",
"def get_cluster_nodegroup_count(self, context, cluster_id):",
"def summarize_clusters(df, clusters, num_repft, repft_min_sup):\n print(\"[clustering] summarizing clusters...\")\n out = []\n phrase_cnts = df.phrases.explode().value_counts()\n phrase_cnts = phrase_cnts[phrase_cnts > 100]\n genre_cnts = df.genres.explode().value_counts()\n genre_cnts = genre_cnts[genre_cnts > 100]\n\n for c in np.unique(clusters):\n docs = df[clusters == c]\n\n pcnt = docs.phrases.explode().value_counts().sort_values()\n pkws = (-pcnt / phrase_cnts).dropna().sort_values().index\n\n gcnt = docs.genres.explode().value_counts().sort_values()\n gkws = (-gcnt / genre_cnts).dropna().sort_values().index\n\n out.append(\n {\n \"count\": len(docs),\n \"quality_phrases\": list(pkws[:10]),\n \"genres\": list(gkws[:10]),\n }\n )\n\n with open(\"data/out/cluster_summaries.json\", \"w\") as outf:\n json.dump(out, outf)",
"def contingency_table(c1,c2):\n\n # check that data clustering info is consistent\n num_data = validate_cluster_data(c1, c2)\n\n if num_data:\n c1_labels = list(set(c1))\n c1_ind = {}\n for l in c1_labels: c1_ind[l] = []\n c2_labels = list(set(c2))\n c2_ind = {}\n for l in c2_labels: c2_ind[l] = []\n for i in range(num_data):\n l1 = c1[i]; l2 = c2[i]\n c1_ind[l1].append(i)\n c2_ind[l2].append(i)\n\n # create contingency table\n cont = []\n for i in range(len(c1_labels)):\n l1 = c1_labels[i]\n cont.append([0]*len(c2_labels))\n for j in range(len(c2_labels)):\n l2 = c2_labels[j]\n cont[i][j] = len(set(c1_ind[l1]) & set(c2_ind[l2]))\n\n cont_row_sums = []\n for i in range(len(c1_labels)):\n cont_row_sums.append(sum(cont[i]))\n cont_col_sums = []\n for j in range(len(c2_labels)):\n cont_col_sums.append(sum([cont[i][j] for i in range(len(cont))]))\n\n sum_rowsums_squared = sum([n*n for n in cont_row_sums])\n sum_colsums_squared = sum([n*n for n in cont_col_sums])\n all_cont = sum(cont, [])\n sum_n = sum(all_cont)\n sum_n_squared = sum([n**2 for n in all_cont])\n\n # pair counts\n N = [[0,0],[0,0]]\n N[0][0] = 0.5*(num_data*num_data + sum_n_squared - (sum_rowsums_squared + sum_colsums_squared))\n N[0][1] = 0.5*(sum_rowsums_squared - sum_n_squared)\n N[1][0] = 0.5*(sum_colsums_squared - sum_n_squared)\n N[1][1] = 0.5*(sum_n_squared - sum_n)\n\n return N\n else:\n return None",
"def get_cluster_count_all(self, context, filters=None):",
"def _find_cluster_cohesion(self, clusterid, ignorelist=[]):\n classids = self._clusterid2classid[clusterid]\n\n if len(classids) == 0:\n print(\"No classes in cluster\", clusterid)\n return 0.\n\n cluster_cohesion = 0.\n\n for classid in classids:\n if classid in ignorelist:\n continue\n #get the row for this class\n apicalls = self._usage_matrix[classid]\n # if classid == 10:\n # print(\"for clusterid:\", clusterid, \"classid\", classid, apicalls)\n class_intercluster_prop = self._find_intercluster_prop(classid, apicalls, classids, ignorelist, weighted=WTS)\n self._class_intercluster_props[classid] = class_intercluster_prop\n cluster_cohesion += class_intercluster_prop\n\n if WTS is None:\n return cluster_cohesion\n else:\n return cluster_cohesion / (len(classids) - len(ignorelist))",
"def count_coocs(self):\n\n print(\"counting co-occurrences...\")\n starttime = default_timer()\n\n global coocs_raw_\n global chunks_\n global corpus_\n\n corpus_ = self.corpus\n # offloading\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n chunks_ = [list(lines) for lines in divide(self.jobs, range(len(self.corpus)))]\n ws = self.windowsize\n vs = self.vocabsize\n mt = self.modeltype\n\n futures = {executor.submit(coocs_worker, chunk_id, ws, mt, vs)\n for chunk_id in range(len(chunks_))}\n for future in concurrent.futures.as_completed(futures):\n coocs_chunk = future.result()\n # csr matrix\n self.coocs_raw += coocs_chunk\n # adding csr matrices to get total co-occurrences\n # currently this is done sequentially, parallel reduce would be great!\n\n corpus_ = ''\n # resetting\n delta = default_timer() - starttime\n delta = str(timedelta(seconds=delta)).split('.')[0]\n print(\"done ({})\".format(delta))",
"def annotate_cluster(annotation, clusters):\n annotated_clusters = {}\n total_conflicts = defaultdict(lambda: defaultdict(int))\n unresolved_conflicts = defaultdict(lambda: defaultdict(int))\n\n for cluster in clusters:\n protein_id_list = [id_tag.split(\"|\")[1] for id_tag in clusters[cluster].split(\",\")]\n\n summary_annotation = defaultdict(int)\n\n for protein_id in protein_id_list:\n if not protein_id in annotation:\n continue\n else:\n protein_info = annotation[protein_id]\n summary_annotation[protein_info] += 1\n\n if len(summary_annotation) == 0:\n continue\n\n elif len(summary_annotation) == 1:\n annotated_clusters[cluster] = summary_annotation.keys()[0]\n\n else:\n total_conflicts[cluster] = summary_annotation\n #Get the total number of values\n total_annotations = sum(summary_annotation.itervalues())\n top_hit = None\n\n for hit in summary_annotation:\n if summary_annotation[hit] / float(total_annotations) > float(0.5):\n top_hit = hit\n\n #Check where no decision was made\n if top_hit is None:\n unresolved_conflicts[cluster] = summary_annotation\n else:\n annotated_clusters[cluster] = top_hit\n\n return annotated_clusters, total_conflicts, unresolved_conflicts",
"def merge_nodes(self, node1, node2, initial_commit_graph, df):\n\n new_commit_graph = copy.deepcopy(initial_commit_graph)\n\n # Etapes pour merger les nodes\n # 1. Get list of out connections with a dict\n # eg. {node3 : 5, node4 : 6}\n # 2. Get list of in connections with a dict\n # 3. Merge nodes\n\n # 1 and 2\n\n connections = {}\n\n index = list(df.index)\n new_node_row = []\n\n for column in df.columns:\n if df.at[node1, column] == 1 or df.at[node2, column] == 1:\n new_node_row.append(1)\n for neighbor in index:\n if df.at[neighbor, column] == 1 and neighbor not in [node1, node2]:\n if neighbor not in connections:\n connections[neighbor] = 1\n else:\n connections[neighbor] += 1\n else:\n new_node_row.append(0)\n\n new_node_row = [new_node_row]\n\n\n '''\n for neighbor in initial_commit_graph.adj[node1]:\n if neighbor != node2:\n if neighbor not in connections:\n connections[neighbor] = initial_commit_graph.edges[node1, neighbor]['number_modifications_same_commit']\n else:\n connections[neighbor] += initial_commit_graph.edges[node1, neighbor]['number_modifications_same_commit']\n \n for neighbor in initial_commit_graph.adj[node2]:\n if neighbor != node1:\n if neighbor not in connections:\n connections[neighbor] = initial_commit_graph.edges[node2, neighbor]['number_modifications_same_commit']\n else:\n connections[neighbor] += initial_commit_graph.edges[node2, neighbor]['number_modifications_same_commit']\n '''\n\n\n new_commit_graph.remove_node(node1)\n new_commit_graph.remove_node(node2)\n\n new_node = f'{node1}:{node2}'\n new_commit_graph.add_node(new_node)\n\n new_row = pd.DataFrame(new_node_row, columns=list(df.columns), index=[new_node])\n new_df = df.drop(labels=[node1, node2])\n new_df = new_df.append(new_row)\n\n for neighbor, num_mod in connections.items():\n new_commit_graph.add_edge(new_node, neighbor)\n new_commit_graph.edges[new_node, neighbor]['number_modifications_same_commit'] = num_mod\n\n \n return new_commit_graph, new_df",
"def extract_chromosome_counts(ssm_or_cnsm_df):\n if \"icgc_mutation_id\" in ssm_or_cnsm_df.columns:\n ssm_or_cnsm_df = ssm_or_cnsm_df[[\"icgc_donor_id\", \"icgc_mutation_id\", \"chromosome\"]]\n ssm_or_cnsm_df = ssm_or_cnsm_df.drop_duplicates()\n else:\n ssm_or_cnsm_df = ssm_or_cnsm_df[[\"icgc_donor_id\", \"chromosome\"]].reset_index()\n ssm_or_cnsm_df = ssm_or_cnsm_df.drop_duplicates()\n ssm_or_cnsm_df = ssm_or_cnsm_df.groupby([\"icgc_donor_id\", \"chromosome\"]).count().reset_index()\n donors = ssm_or_cnsm_df[\"icgc_donor_id\"].unique()\n chromosomes = ssm_or_cnsm_df[\"chromosome\"].unique()\n if \"icgc_mutation_id\" in ssm_or_cnsm_df.columns:\n helper_list = [list(a) for a in zip(ssm_or_cnsm_df[\"icgc_donor_id\"], ssm_or_cnsm_df[\"chromosome\"],\n ssm_or_cnsm_df[\"icgc_mutation_id\"])]\n else:\n helper_list = [list(a) for a in zip(ssm_or_cnsm_df[\"icgc_donor_id\"], ssm_or_cnsm_df[\"chromosome\"],\n ssm_or_cnsm_df[\"index\"])]\n feature_df = pd.DataFrame(0, index=donors, columns=chromosomes, dtype=\"int16\")\n for cn in helper_list:\n feature_df.at[cn[0], cn[1]] = cn[2]\n\n return feature_df",
"def draw_clusters(clusters):\n bjp_pos = read_file(collect.BJP_POS_USER_FILE)['results']\n set_bjp_pos = set(bjp_pos)\n bjp_neg = read_file(collect.BJP_NEG_USER_FILE)['results']\n set_bjp_neg = set(bjp_neg)\n con_pos = read_file(collect.CON_POS_USER_FILE)['results']\n set_con_pos = set(con_pos)\n con_neg = read_file(collect.CON_NEG_USER_FILE)['results']\n set_con_neg = set(con_neg)\n count = 2\n for cluster in clusters:\n cluster_bjp_pos = set()\n cluster_bjp_neg = set()\n cluster_con_pos = set()\n cluster_con_neg = set()\n cluster_neutral = set()\n for n in cluster.nodes():\n if n in set_bjp_pos:\n cluster_bjp_pos.add(n)\n elif n in set_bjp_neg:\n cluster_bjp_neg.add(n)\n elif n in set_con_pos:\n cluster_con_pos.add(n)\n elif n in set_con_neg:\n cluster_con_neg.add(n)\n else:\n cluster_neutral.add(n)\n draw_graph(cluster, cluster_bjp_neg, cluster_bjp_pos, cluster_con_neg, cluster_con_pos, cluster_neutral, count,\n 'cluster_' + str(count - 1), 'community detection - cluster '+ str(count - 1) + '\\n Neutral Users - Purple | '\n 'Positive for BJP - Green | '\n 'Negative for BJP - Red | \\n '\n 'Positive for Congress - Blue | '\n 'Negative for Congress - Yellow ')\n count += 1",
"def get_intersection_matrix(pair_names, unions_names, cutoff, dset_dict):\n\n dset_nr = len(pair_names)+1 #pairs and union\n\n # Counter is 3-dimensional for keeping both abs number of intersection AND\n # percentages. \n\n counter = np.zeros([dset_nr, cutoff, 2]) # 0-based\n\n # Get the pairs \n for (indx1, (main_name, sub_name)) in enumerate(pair_names):\n # Get the pair-dsets\n main_dset = dset_dict[main_name]\n sub_dset = dset_dict[sub_name]\n\n # Iterate through all (polyA-cluster, read_count) points in the\n # datasets, and add the polyA-clusters to two temporary lists, indexed\n # by the read count from 0 to cutoff-1.\n main_cls = [[] for val in range(cutoff)]\n sub_cls = [[] for val in range(cutoff)]\n\n for (dset, dset_l) in [(main_dset, main_cls), (sub_dset, sub_cls)]:\n\n for (read_nr, clusters) in dset.iteritems():\n if read_nr <= 0:\n debug()\n if read_nr > cutoff-1:\n dset_l[cutoff-1].append(clusters) # add if > cutoff\n else:\n dset_l[read_nr-1] = clusters\n\n #if dset_l[-1] != []:\n #debug()\n\n # Flatten the last arrays\n main_cls[-1] = sum(main_cls[-1], [])\n sub_cls[-1] = sum(sub_cls[-1], [])\n\n # Get number of intersections \n isect_nrs = [len(set.intersection(set(main_cls[count]),\n set(sub_cls[count]))) for count in\n range(0, cutoff)]\n\n # Get percent of intersection relative to 'main' dataset (will be all or\n # annot)\n isect_pcnt = []\n for (indx, isect_nr) in enumerate(isect_nrs):\n\n # Only calculate percentage if more than 1 cluster with this read count\n if main_cls[indx] != 0:\n isect_pcnt.append(isect_nrs[indx]/len(main_cls[indx]))\n else:\n isect_pcnt.append(0)\n\n # Add the number and intersection to the array\n counter[indx1,:,0] = isect_nrs\n counter[indx1,:,1] = isect_pcnt\n\n # Now all the pairs have been added. Add the unions\n # Take the union of all dsetsxcept\n all_cls = [[] for val in range(cutoff)]\n\n # add all the clusters from the union datasets to all_cls\n for u_name in unions_names:\n for (read_nr, clusters) in dset_dict[u_name].iteritems():\n\n if read_nr > cutoff-1:\n all_cls[cutoff-1].append(clusters) # add if > cutoff\n else:\n all_cls[read_nr-1].append(clusters)\n\n # flatten all_cls (which has all the clusters in the union dsets)\n # and take union at the same tim\n all_cls = [sum(el, []) for el in all_cls]\n\n # Get number of intersections \n # (using main_cls from the previous for-loop -- dirty :S)\n all_I_nrs = [len(set.intersection(set(main_cls[count]),\n set(all_cls[count]))) for count in\n range(0, cutoff)]\n\n # Get percent of intersection relative to 'main' dataset (will be all or annot)\n all_I_pcnt = []\n for (indx, isect_nr) in enumerate(isect_nrs):\n\n # Only calculate percentage if more than 1 cluster with this read count\n if main_cls[indx] != 0:\n all_I_pcnt.append(all_I_nrs[indx]/len(main_cls[indx]))\n else:\n all_I_pcnt.append(0)\n\n # Add the number and intersection to the array\n counter[-1,:,0] = all_I_nrs\n counter[-1,:,1] = all_I_pcnt\n\n ### flip things around; put union row first. This is for better compliance\n # with downstream code\n\n newcount = np.zeros([dset_nr, cutoff, 2])\n newcount[0] = counter[-1]\n newcount[1:] = counter[0:-1]\n\n return newcount",
"def test_entity_relation_co_occurrence_dataframe(self):\n for labels, merge_sides, merge_subsets in itertools.product((False, True), repeat=3):\n _test_count_dataframe(\n dataset=self.dataset,\n df=dataset_analysis.get_entity_relation_co_occurrence_df(\n dataset=self.dataset,\n merge_sides=merge_sides,\n merge_subsets=merge_subsets,\n add_labels=labels,\n ),\n labels=labels,\n merge_subsets=merge_subsets,\n merge_sides=merge_sides,\n )",
"def merge(c1, c2):\n global number_of_effective_clusters, all_clusters\n number_of_effective_clusters += 1\n new_cluster = Cluster(number_of_effective_clusters)\n new_cluster.guest_ids = list(set(c1.guest_ids + c2.guest_ids))\n new_cluster.mean = get_mean(new_cluster.guest_ids)\n all_clusters.append(new_cluster)\n\n for i, o in enumerate(all_clusters):\n if o.id == c1.id:\n del all_clusters[i]\n\n for i, o in enumerate(all_clusters):\n if o.id == c2.id:\n del all_clusters[i]",
"def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):\n\n for (node1, node2) in pairs_of_modified_files:\n \n if node1 in self.repo_files_path and node2 in self.repo_files_path:\n\n # Find common prefix\n path_prefix = os.path.commonpath([node1, node2])\n \n if len(path_prefix) > 0:\n path_prefix_split = path_prefix.split('\\\\')\n tree_commit_node_name1 = node1[len(path_prefix)+1:].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix)+1:].split('\\\\')[0]\n else:\n path_prefix_split = []\n tree_commit_node_name1 = node1[len(path_prefix):].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix):].split('\\\\')[0]\n\n # Create or update edge in TreeCommit graph\n self.commit_tree_graph.add_edge(path_prefix_split, tree_commit_node_name1, tree_commit_node_name2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print all the commits of a repo.
|
def print_commits(self):
for commit in self.repository_mining.traverse_commits():
print(f'Commit : {commit.hash}')
print(f'Parents : {commit.parents}')
|
[
"def all_commits(repo: Optional[str] = None) -> List[str]:\n with Repo.open(repo) as _repo:\n return _repo.scm.list_all_commits()",
"def collect_commits(self, args):\n\t\t# call a get_repo function\n\t\trepo_list = self.get_repo(args)\n\t\tprint(\"\\n\\tRepositories:\\n \", repo_list)\n\t\ttry:\n\t\t\tfor repo_name in repo_list:\n\t\t\t\tprint(repo_name, \"Repository\")\n\t\t\t\trepo = self.organization.get_repo(repo_name)\n\t\t\t\tbranch_list = self.get_branch(repo_name, args)\n\t\t\t\tprint(\"branches: \", branch_list)\n\t\t\t\t\n\t\t\t\tfor branch in branch_list:\n\t\t\t\t\tgit_branch = self.organization.get_repo(repo_name).get_branch(branch)\n\t\t\t\t\tbranch_commit = git_branch.commit\n\t\n\t\t\t\t\ttotal_commits = repo.get_commits(sha=branch_commit.sha).totalCount\n\t\t\t\t\tprint(\"total number of commits in \",repo_name,\" of branch \", branch, \" is: \", total_commits)\n\n\t\t\t\t\t# since there are 100 commits in a single page we can easily get the total number of page by dividing the total commits with 100\n\t\t\t\t\ttotal_page = total_commits / args.per_page\n\t\t\t\t\tif total_page is not int:\n\t\t\t\t\t\ttotal_page = math.ceil(total_page)\n\t\t\t\t\tprint(\"The total number of page is: \" + str(total_page))\n\n\t\t\t\t\t#print(repo.get_commits().get_page(rel='last'))\n\t\t\t\t\tpage = 0\n\t\t\t\t\tnum_of_commits = 0\n\t\t\t\t\twhile page < total_page:#just for testing but actually its till last page\n\t\t\t\t\t\tcommit_list = []\n\t\t\t\t\t\tprint(\"\\n\\tpage: \", page)\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor commit in repo.get_commits(sha=branch_commit.sha).get_page(page):\n\t\t\t\t\t\t\tcommit_dict = {}\n\t\t\t\t\t\t\tcommit_dict['author'] = commit.author\n\t\t\t\t\t\t\tcommit_dict['sha'] = commit.sha\n\t\t\t\t\t\t\tcommit_dict['files'] = commit.files\n\t\t\t\t\t\t\tcommit_dict['stats'] = commit.stats\n\t\t\t\t\t\t\tcommit_dict['commit'] = commit.commit\n\t\t\t\t\t\t\tcommit_dict['committer'] = commit.committer\n\t\t\t\t\t\t\tcommit_dict['comments_url'] = commit.comments_url\n\t\t\t\t\t\t\tcommit_dict['html_url'] = commit.html_url\n\t\t\t\t\t\t\tcommit_dict['parents'] = commit.parents\n\t\t\t\t\t\t\tcommit_dict['url'] = commit.url\n\t\t\t\t\t\t\tcommit_list.append(commit_dict)\n\n\t\t\t\t\t\t\tnum_of_commits += 1\n\t\t\t\t\t\t\tprint(num_of_commits)\n\n\t\t\t\t\t\twith open(args.org + \"/\" + repo_name+\"/\"+args.event_type+\"/\"+branch+\"_branch/\" + args.org + \"-\" +\n\t\t\t\t\t \t\trepo_name + \"-\"+branch+\"_branch-\" + args.event_type + \"-page-\" + str(page) + \".json\", 'w') as f:\n\t\t\t\t\t\t\tf.write(str(commit_list))\n\n\t\t\t\t\t\tprint(\"page \", page, \" added to file\")\n\t\t\t\t\t\tself.sleeper()\n\t\t\t\t\t\tpage += 1\n\n\t\t\tprint(\"commit data successfully collected\")\n\t\texcept Exception as e:\n\t\t\tprint(\"Problem Occured: \", e)",
"def test_repo_get_all_commits(self):\n pass",
"def rev_list(repo, commits, outstream=sys.stdout):\n with open_repo_closing(repo) as r:\n for entry in r.get_walker(include=[r[c].id for c in commits]):\n outstream.write(entry.commit.id + b\"\\n\")",
"def get_commits():\n repo = git.Repo(\".\")\n commits = list(repo.iter_commits())\n return commits",
"def traverse_commits(self) -> Generator[Commit, None, None]:\n logger.info('Git repository in {}'.format(self.git_repo.path))\n all_cs = self._apply_filters_on_commits(self.git_repo.get_list_commits())\n\n if not self.reversed_order:\n all_cs.reverse()\n\n for commit in all_cs:\n logger.info('Commit #{} in {} from {}'\n .format(commit.hash, commit.author_date, commit.author.name))\n\n if self._is_commit_filtered(commit):\n logger.info('Commit #{} filtered'.format(commit.hash))\n continue\n\n yield commit",
"def colored_output(repos):\n\n init() # initialize coloroma\n seperator = \"+==================================================================+\"\n print(Fore.WHITE, Style.BRIGHT, seperator, end=\"\\n\\n\")\n for repo in repos:\n print(\n Fore.LIGHTRED_EX,\n Style.BRIGHT,\n f\"{make_hyperlink(repo['name'], repo['html_url'])}\",\n )\n print(\n Fore.LIGHTYELLOW_EX,\n Style.NORMAL,\n \"\\n \".join(textwrap.wrap(f\"{repo['description']}\", len(seperator))),\n end=\"\\n\\n\",\n )\n print(Fore.LIGHTCYAN_EX, Style.BRIGHT, f\"{repo['language']}\", end=\"\\t\")\n print(\n Fore.LIGHTCYAN_EX,\n Style.BRIGHT,\n f\"{repo['stargazers_count']} Stars\",\n end=\"\\t\",\n )\n print(Fore.LIGHTCYAN_EX, Style.BRIGHT, f\"{repo['forks_count']} Forks\", end=\"\\t\")\n print(\n Fore.LIGHTCYAN_EX,\n Style.BRIGHT,\n f\"{repo['watchers_count']} Watchers\",\n end=\"\\n\\n\",\n )\n print(Fore.WHITE, Style.BRIGHT, seperator, end=\"\\n\\n\")",
"def get_comit_log(repo_path):\n \n commits = []\n #s = subprocess.check_output(\"cd %s; git checkout %s; git log\" % (repo_path,b_name), shell=True)\n s = subprocess.check_output(\"cd %s; git log \" % repo_path, shell=True)\n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n commits.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip()))\n\n return commits",
"def get_commits(repo):\n\n repo_id = repo.key.id().split('_')[1]\n github = Github(repo.owner.get().github_token)\n gh_repo = github.get_repo(int(repo_id))\n\n return gh_repo.get_commits()",
"def getCommits(username, repo_name):\n\n # fetch user's access token\n conn = create_connection('test.db')\n query = f\"SELECT token from Token WHERE g_username='{username}';\"\n result = execute_read_query(conn, query)\n token = (result[0])[0]\n \n # GET request for fetching commits\n # endpoint - '/repos/:username/:repo_name/commits'\n headers = {\n 'Authorization': f\"token {token}\",\n 'author': username,\n }\n url = f\"https://api.github.com/repos/{username}/{repo_name}/commits\"\n res = requests.get(url=url, headers=headers)\n res = res.json()\n\n # Store all commits in a list\n lst = []\n for i in res:\n commit = i['commit']\n \n # Custom object for details required\n # details required at frontend \n obj = {}\n obj['message'] = commit['message']\n obj['url'] = commit['url']\n lst.append(obj)\n\n response = {}\n response['data'] = lst\n return response",
"def get_commit_log(repo_path,b_name = None): \n \n commits = []\n s = subprocess.check_output(\"cd %s; git checkout %s; git log \" % (repo_path,b_name), shell=True)\n \n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n commits.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip()))\n \n return commits",
"def get_commits(repo):\n\n # Get the last commit from the last release\n res = requests.get('https://api.github.com/repos/open-eats/%s/tags' % repo)\n commit = res.json()[0].get('commit').get('sha')\n\n # Get the date of the last commit from the last release\n res = requests.get('https://api.github.com/repos/open-eats/%s/commits/%s' % (repo, commit))\n date = res.json().get('commit').get('author').get('date')\n\n # Convert the date to a datetime and add 1 second to it,\n # So we don't get the last commit of the previous release.\n date = datetime.strptime(date, \"%Y-%m-%dT%H:%M:%SZ\") + timedelta(seconds=1)\n\n # Get all the commits messages since the last release\n res = requests.get('https://api.github.com/repos/open-eats/%s/commits?since=%s' % (repo, date))\n return [re.get('commit').get('message') for re in res.json()]",
"def get_changeset_lines(repo_dir):\n cmds = ['cd %s' % repo_dir, 'git log --reverse --format=\"%H|%ct|%s\"']\n return execute(' && '.join(cmds)).splitlines()",
"def commits_log(self, obj1, obj2):\n return self._repo.iter_commits(rev='%(obj1)s..%(obj2)s' % {'obj1': obj1, 'obj2': obj2})",
"def get_repo_commits(self, repos, page_size=100, timeout=20):\n params = {'pagelen': page_size}\n total_commits = 0\n for repo in repos:\n endpoint = 'repositories/{resource}/{repo_name}/commits'.format(\n resource=self.resource,\n repo_name=repo\n )\n total_commits += self.retrieve_page_object_count(\n endpoint,\n timeout=timeout,\n params=params,\n page_size=page_size\n )\n return total_commits",
"def all_commits(change_id, curr_project, curr_ref):\n commits = []\n manifest = ET.ElementTree(file='.repo/manifest.xml')\n commits.append((curr_project, project_path(manifest, curr_project), curr_ref))\n\n url = (GERRIT_ROOT + 'changes/?o=CURRENT_REVISION&q=status:open+' +\n change_id)\n changes = request.urlopen(url)\n for change in parse_response(changes):\n project = change['project']\n fetch = change['revisions'][change['current_revision']]['fetch']\n # The `ref` is the same for every download scheme, hence we can use\n # the first one that is there\n ref = fetch.values()[0]['ref']\n path = project_path(manifest, project, change['branch'])\n if path and project != curr_project:\n commits.append((project, path, ref))\n\n return commits",
"def test_repo_list_all_git_refs(self):\n pass",
"def print_commit(commit, decode, outstream=sys.stdout):\n outstream.write(\"-\" * 50 + \"\\n\")\n outstream.write(\"commit: \" + commit.id.decode(\"ascii\") + \"\\n\")\n if len(commit.parents) > 1:\n outstream.write(\n \"merge: \"\n + \"...\".join([c.decode(\"ascii\") for c in commit.parents[1:]])\n + \"\\n\"\n )\n outstream.write(\"Author: \" + decode(commit.author) + \"\\n\")\n if commit.author != commit.committer:\n outstream.write(\"Committer: \" + decode(commit.committer) + \"\\n\")\n\n time_tuple = time.gmtime(commit.author_time + commit.author_timezone)\n time_str = time.strftime(\"%a %b %d %Y %H:%M:%S\", time_tuple)\n timezone_str = format_timezone(commit.author_timezone).decode(\"ascii\")\n outstream.write(\"Date: \" + time_str + \" \" + timezone_str + \"\\n\")\n outstream.write(\"\\n\")\n outstream.write(decode(commit.message) + \"\\n\")\n outstream.write(\"\\n\")",
"def list_repos(self, conn):\n\t\trepos = self.get_repos()\n\t\tpacket = self.set_packet(6, repos)\n\t\tconn.sendall(packet)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the entropy of a commit graph.
|
def compute_entropy(self, commit_graph):
# Entropy computation is not perfect
# * New size won't be the sum of old sizes exactly
# * We have to take into account the times when node1 and node2 were modified
# together with one of their neighbor
entropy = 0
for node in commit_graph.nodes:
# Compute number of lines
if node in self.repo_files_path:
lines = self.compute_file_lines(node)
else:
files = node.split(':')
lines = 0
for file in files:
lines += self.compute_file_lines(file)
# Compute coupling with other nodes
coupling = 0
for neighbor in commit_graph.neighbors(node):
coupling += commit_graph.edges[node, neighbor]['number_modifications_same_commit']
entropy += lines * coupling
return entropy
|
[
"def compute_entropy(node):\r\n total = len(node)\r\n appearance = sum(node)\r\n not_appearance = len(node) - sum(node)\r\n entropy = 0\r\n if appearance > 0:\r\n entropy -= (appearance / total) * math.log(appearance / total, 2)\r\n if not_appearance > 0:\r\n entropy -= (not_appearance / total) * math.log(not_appearance / total, 2)\r\n return entropy",
"def entropy(self):\n sum = 0.0\n base = len(self.alpha)\n for sym in self.alpha:\n p = self.__getitem__(sym)\n if p == 0:\n p = 0.000001\n sum += p * math.log(p, base)\n return -sum",
"def entropy(x):\n\treturn stats.entropy(x)",
"def entropy(probs):\n return sum(-v*log(v,2) for v in probs.values())",
"def entropy(probs):\n e = np.sum([single_ent(p) for p in probs])\n return -1 * e",
"def entropy_H(self, data):\n\n if not data:\n return 0.0\n\n occurences = Counter(bytearray(data))\n\n entropy = 0\n for x in occurences.values():\n p_x = float(x) / len(data)\n entropy -= p_x*math.log(p_x, 2)\n\n return entropy",
"def entropy(data_set):\n log2 = lambda x: log(x)/log(2)\n results = unique_counts(data_set)\n ent = 0.0\n for count in results.keys():\n ratio = float(results[count])/len(data_set)\n ent = ent-ratio*log2(ratio)\n\n return ent",
"def entropy(iterable):\n if iterable[0] is None:\n return None\n v = np.array([p for p in iterable if p > 0])\n v = v / v.sum()\n return (-(v * np.log2(v)).sum())",
"def get_entropy(self):\n\n return self.entropy_H( self.get_data() )",
"def compute_entropy(belief, config):\r\n entropy = np.zeros((config.dimension, config.dimension))\r\n for key in belief.keys():\r\n entropy[key[0], key[1]] = ss.entropy(belief[key])\r\n\r\n return entropy",
"def entropy(self, nbins):\n if np.log2(nbins)!=int(np.log2(nbins)):\n raise Exception('Nbins must be a power of 2')\n if self.data is not None and self.trans:\n npixels = self.data.shape[0] * self.data.shape[1]\n data = self.data/nbins\n imgR = data[:,:,0].ravel()\n imgG = data[:,:,1].ravel()\n imgB = data[:,:,2].ravel()\n counts = np.zeros((256/nbins, 256/nbins, 256/nbins), dtype=float)+0.00000000001 # avoid log(0)\n\n for i in range(imgR.shape[0]):\n counts[imgR[i], imgG[i], imgB[i]] += 1\n counts /= npixels\n lcounts = np.log2(counts)\n ent = - lcounts * counts\n return np.sum(ent)\n else:\n raise Exception('Image not yet transformed')",
"def entropy (distr):\n return np.sum(np.array(distr.values())*np.log2(distr.values()))",
"def entropy(mp: MetaPath) -> float:\n frequencies = np.array(list(Counter(mp.as_list()).values())) / len(mp)\n return probablistic_entropy(frequencies)",
"def entropy(self):\n if self.collapsed:\n # Maybe this doesn't make sense, should be checked\n return float('inf')\n return np.log(self.sumOfWeights) - self.sumOfWeightsLogs / self.sumOfWeights + self.noise",
"def calc_entropy(data):\r\n\r\n col = data[:,-1]\r\n _, counts = np.unique(col, return_counts=True)\r\n entropy = (counts / len(col)) * np.log2(counts / len(col))\r\n entropy = -np.sum(entropy)\r\n ###########################################################################\r\n # END OF YOUR CODE #\r\n ###########################################################################\r\n return entropy",
"def entropy (distr):\n return -sum([p * np.log(p) for p in distr.values()])",
"def entropy(s):\n b = bytearray.fromhex(s)\n freqs = [c / len(b) for c in Counter(b).values()]\n return -sum(f * math.log2(f) for f in freqs)",
"def entropy(x,symbols=None):\n ps = prob(x)\n tmp = -(ps*N.log2(ps))\n return np.ma.masked_invalid(tmp).sum()",
"def entropy2(counts):\n\n # Calculate the total\n total = 0\n for row in counts:\n total += row[0] + row[1]\n\n # Calculate the entropy for the two attributes\n entropy2 = 0\n for row in counts:\n p = row[0]\n n = row[1]\n occurrences = p + n\n entropy2 += occurrences / total * entropy(p, n)\n return entropy2"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Merge nodes of commit graph.
|
def merge_nodes(self, node1, node2, initial_commit_graph, df):
new_commit_graph = copy.deepcopy(initial_commit_graph)
# Etapes pour merger les nodes
# 1. Get list of out connections with a dict
# eg. {node3 : 5, node4 : 6}
# 2. Get list of in connections with a dict
# 3. Merge nodes
# 1 and 2
connections = {}
index = list(df.index)
new_node_row = []
for column in df.columns:
if df.at[node1, column] == 1 or df.at[node2, column] == 1:
new_node_row.append(1)
for neighbor in index:
if df.at[neighbor, column] == 1 and neighbor not in [node1, node2]:
if neighbor not in connections:
connections[neighbor] = 1
else:
connections[neighbor] += 1
else:
new_node_row.append(0)
new_node_row = [new_node_row]
'''
for neighbor in initial_commit_graph.adj[node1]:
if neighbor != node2:
if neighbor not in connections:
connections[neighbor] = initial_commit_graph.edges[node1, neighbor]['number_modifications_same_commit']
else:
connections[neighbor] += initial_commit_graph.edges[node1, neighbor]['number_modifications_same_commit']
for neighbor in initial_commit_graph.adj[node2]:
if neighbor != node1:
if neighbor not in connections:
connections[neighbor] = initial_commit_graph.edges[node2, neighbor]['number_modifications_same_commit']
else:
connections[neighbor] += initial_commit_graph.edges[node2, neighbor]['number_modifications_same_commit']
'''
new_commit_graph.remove_node(node1)
new_commit_graph.remove_node(node2)
new_node = f'{node1}:{node2}'
new_commit_graph.add_node(new_node)
new_row = pd.DataFrame(new_node_row, columns=list(df.columns), index=[new_node])
new_df = df.drop(labels=[node1, node2])
new_df = new_df.append(new_row)
for neighbor, num_mod in connections.items():
new_commit_graph.add_edge(new_node, neighbor)
new_commit_graph.edges[new_node, neighbor]['number_modifications_same_commit'] = num_mod
return new_commit_graph, new_df
|
[
"def _merge_cfgnodes(self, cfgnode_0, cfgnode_1):\n\n assert cfgnode_0.addr + cfgnode_0.size == cfgnode_1.addr\n new_node = cfgnode_0.merge(cfgnode_1)\n\n # Update the graph and the nodes dict accordingly\n self._model.remove_node(cfgnode_1.block_id, cfgnode_1)\n self._model.remove_node(cfgnode_0.block_id, cfgnode_0)\n\n in_edges = list(self.graph.in_edges(cfgnode_0, data=True))\n out_edges = list(self.graph.out_edges(cfgnode_1, data=True))\n\n self.graph.remove_node(cfgnode_0)\n self.graph.remove_node(cfgnode_1)\n\n self.graph.add_node(new_node)\n for src, _, data in in_edges:\n self.graph.add_edge(src, new_node, **data)\n for _, dst, data in out_edges:\n self.graph.add_edge(new_node, dst, **data)\n\n # Put the new node into node dicts\n self._model.add_node(new_node.block_id, new_node)",
"def merge(self, tree):\n pass",
"def merge_nodes(node1, node2):\n merged = _GraphMerger.merge_any(node1, node2, node_def_pb2.NodeDef)\n merged_inputs = node1.input[:]\n merged_inputs.extend([i for i in node2.input[:] if i not in merged_inputs])\n merged.input[:] = merged_inputs\n return merged",
"def merge_nodes(G, nodes, new_node, **attr):\n G.add_node(new_node, **attr) # Add node corresponding to the merged nodes\n edge_iterator = list(G.edges(data=True))\n for n1,n2,data in edge_iterator:\n if n1 in nodes:\n if G.has_edge(new_node,n2):\n w = data['weight']\n G[new_node][n2]['weight'] += w\n else:\n G.add_edge(new_node,n2,**data)\n elif n2 in nodes:\n G.add_edge(n1,new_node,**data)\n if G.has_edge(n1,new_node):\n w = data['weight']\n G[n1][new_node]['weight'] += w\n else:\n G.add_edge(n1,new_node,**data)\n \n for n in nodes: # remove the merged nodes\n if n in G.nodes():\n G.remove_node(n)",
"def merge_graphs(self, graphs: List[Graph]):\n if not graphs:\n logger.warning(\"Can not find any graph when merge graphs.\")\n return\n\n for graph in graphs:\n self._rank_ids.append(graph.rank_id)\n\n # Graph has the same operator on the same stage, so we can use the node ids with any graphs.\n node_ids = graphs[0].list_op_node_id()\n for node_id in node_ids:\n node = graphs[0].get_op_node(node_id)\n new_node = copy.deepcopy(node)\n for graph in graphs[1:]:\n new_node.parallel_group.extend(graph.get_op_node(node_id).parallel_group)\n new_node.parallel_group = list(sorted(set(new_node.parallel_group)))\n\n self._op_nodes[new_node.node_id] = new_node\n\n self._const_nodes = graphs[0].const_nodes\n self._parameter_nodes = graphs[0].parameter_nodes",
"def merge_nodes(G, nodes, new_node, attr_dict=None, **attr):\n\n G.add_node(new_node, attr_dict, **attr) # Add the 'merged' node\n\n for n1, n2, data in G.edges(data=True):\n # For all edges related to one of the nodes to merge,\n # make an edge going to or coming from the `new gene`.\n if n1 in nodes:\n G.add_edge(new_node, n2, data)\n elif n2 in nodes:\n G.add_edge(n1, new_node, data)",
"def merge_text_nodes(self):\n ...",
"def merge_subgraph(self, subgraph: collections.Iterable = {},\n source: int = None):\n # first, turn node collection into graph (ie with corresponding edges)\n subgraph = self.graph.subgraph(subgraph)\n if len(subgraph) == 0:\n # do nothing given empty subgraph\n return\n for connected_subgraph in nx.connected_component_subgraphs(subgraph):\n ordered_nodes = nx.dfs_preorder_nodes(connected_subgraph, source)\n current_node = next(ordered_nodes)\n for next_node in ordered_nodes:\n current_node = self.tree.merge(current_node, next_node)",
"def merge_nodes(G, node1, node2, data=False):\n\t#H = G.copy()\n\tH = G\n\tif node1 == node2: # if self-edge\n\t\tH.remove_edge(node1,node2)\n\telse:\n\t\t# create the new node\n\t\tnode_id = node1+'_'+node2\n\t\tif data == False:\n\t\t\tH.add_node(node_id)\n\t\telif data == True:\n\t\t\tdegree1 = len(G[node1])\n\t\t\tdegree2 = len(G[node2])\n\t\t\tif degree1 > degree2:\n\t\t\t\tH.add_node(node_id, H.node[node1])\n\t\t\telse:\n\t\t\t\tH.add_node(node_id, H.node[node2])\n\t\telse:\n\t\t\traise ValueError(\"data only accept True or False\")\n\t\t# connect it to the rest\n\t\tfor n, n_data in H[node1].items():\n\t\t\tif not (n == node2 or n == node1):\n\t\t\t\t#props = H[node1][n]\n\t\t\t\tH.add_edge(node_id, n, n_data)\n\t\tfor n, n_data in H[node2].items():\n\t\t\tif not (n == node1 or n == node2):\n\t\t\t\t#props = H[node2][n]\n\t\t\t\tH.add_edge(node_id, n, n_data)\n\t\t# remove the initial nodes and edges\n\t\tH.remove_node(node1)\n\t\tH.remove_node(node2)\n\t# compute new nodes properties\n\t# TODO: recompute only for the neighbors of the merged nodes\n\t#H = normalize_weights(H,weight='weight')\n\treturn H",
"def copy_merge_nodes(G, G_copy, nodes, new_node, attr_dict=None, **attr):\n\n # G_copy.add_node(new_node, attr_dict, **attr) # Add the 'merged' node\n\n for n1, n2 in G.edges():\n # For all edges related to one of the nodes to merge,\n # make an edge going to or coming from the `new gene`.\n if n1 in nodes:\n G_copy.add_edge(new_node, n2)\n elif n2 in nodes:\n G_copy.add_edge(n1, new_node)",
"def merge_nodes_respect_wiring(G, node1, node2, data=False):\n\tif not (node1 in G):\n\t\tprint(\"Warning: node '{}' not in the graph\".format(node1))\n\t\tH = G\n\telif not (node2 in G):\n\t\tprint(\"Warning: node '{}' not in the graph\".format(node2))\n\t\tH = G\n\telse:\n\t\tif not G.has_edge(node1,node2):\n\t\t\tprint(\"Warning: no edge found between nodes {} and {}\".format(node1,node2))\n\t\t\tH = G\n\t\telse:\n\t\t\t#H = G.copy()\n\t\t\tH = G\n\t\t\t# create the new node\n\t\t\tnode_id = node1+'_'+node2\n\t\t\t# Check if node name already exist, if not create it\n\t\t\tif node_id not in H:\n\t\t\t\tH.add_node(node_id)\n\t\t\telse:\n\t\t\t\tH.node[node_id]['merge_from'] = [node_id] # Record the existence of node_id before the merge\n\t\t\t# Handle new node data (except 'paths')\n\t\t\tif data == False:\n\t\t\t\tnode_data = {}\n\t\t\telif data == node1:\n\t\t\t\tnode_data = H.node[node1]\n\t\t\telif data == node2:\n\t\t\t\tnode_data = H.node[node2]\n\t\t\telif data =='auto': # data from node with the largest degree\n\t\t\t\tdegree1 = len(H[node1])\n\t\t\t\tdegree2 = len(H[node2])\n\t\t\t\tif degree1 > degree2 and H.node[node1]:\n\t\t\t\t\tnode_data = H.node[node1]\n\t\t\t\telse:\n\t\t\t\t\tnode_data = H.node[node2]\n\t\t\telse:\n\t\t\t\traise ValueError(\"data only accept False, auto or the id of one of the nodes\")\n\t\t\t# copy node data\n\t\t\tfor key in node_data.keys():\n\t\t\t\tif not (key=='paths' or key=='merge_from' or key in H.node[node_id]): # 'paths' and 'merge_from' need special handling (see later on)\n\t\t\t\t\tH.node[node_id][key]=node_data[key]\n\n\t\t\t# Record the merge history on node_id\n\t\t\trecord_merge_from(H,node1,node2,node_id)\n\n\t\t\t# next step: connect node_id to the rest of the graph\n\t\t\t# Transfer in-connection of node1 and out-connections of node 2 to node_id\n\n\t\t\t# Handle outgoing connections of node2\n\t\t\t# Copy the outgoing connections related to the merging, to the new node\n\t\t\tedges_to_remove = copy_links(H,node1,node2,node_id,direction='out')\n\t\t\t# disconnect these outgoing connections from node2\n\t\t\tfor (node,text_id,word_pos) in edges_to_remove:\n\t\t\t\t#print(node2,node,text_id,word_pos)\t\t\n\t\t\t\tdisconnect_node(H,node2,node,text_id,word_pos)\n\n\t\t\t#handle the ingoing connections of node1\n\t\t\tedges_to_remove = copy_links(H,node1,node2,node_id,direction='in')\n\t\t\t# disconnect them from node1\n\t\t\tfor (node,text_id,word_pos) in edges_to_remove:\n\t\t\t\t#print(node,node1)\n\t\t\t\tdisconnect_node(H,node,node1,text_id,word_pos)\n\n\t\t\t# Next step: record the edge paths in the node_id data\n\t\t\ttransfer_paths_to_node(H,node1,node2,node_id)\n\n\n\t\t\t# remove edge between node1 and node2\n\t\t\tH.remove_edge(node1,node2)\n\t\t\t# remove nodes if they are disconnected\n\t\t\tif not H.degree(node1): \n\t\t\t\tif not 'paths' in H.node[node1].keys(): \n\t\t\t\t\tH.remove_node(node1)\n\t\t\tif node2 in H and not H.degree(node2): # first check for the case node1==node2, then the degree\n\t\t\t\tif not 'paths' in H.node[node2].keys(): \t\t\n\t\t\t\t\tH.remove_node(node2)\n\treturn H",
"def _merge_equivalent_nodes(G, max_history):\n\n changed = True\n # every node merge changes the graph and can trigger previously impossible node merges - we need to repeat until\n # the graph doesn't change anymore\n while changed:\n changed = False\n remaining_node_ids = [n for n in G.nodes() if n > 0]\n for idx, i in enumerate(remaining_node_ids):\n if G.has_node(i):\n for j in remaining_node_ids[idx + 1:]: # assumes node equivalence is cumulative\n if G.has_node(j) and _nodes_are_equivalent(G, i, j, max_history):\n changed = True\n # moves all outgoing edges to the other node\n for _, succ_node, k, d in G.out_edges(j, keys=True, data=True):\n _add_edge(G, i, succ_node, k, d.get(\"label\"))\n G.remove_edge(j, succ_node)\n # moves all incoming edges to the other node\n for prev_node, _, k, d in G.in_edges(j, keys=True, data=True):\n _add_edge(G, prev_node, i, k, d.get(\"label\"))\n G.remove_edge(prev_node, j)\n G.remove_node(j)",
"def _merge_node(self, x, i):\n y = x.children[i]\n z = x.children.pop(i + 1)\n y.keys.append(x.keys.pop(i))\n y.keys.extend(z.keys)\n y.children.extend(z.children)",
"def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):\n\n for (node1, node2) in pairs_of_modified_files:\n \n if node1 in self.repo_files_path and node2 in self.repo_files_path:\n\n # Find common prefix\n path_prefix = os.path.commonpath([node1, node2])\n \n if len(path_prefix) > 0:\n path_prefix_split = path_prefix.split('\\\\')\n tree_commit_node_name1 = node1[len(path_prefix)+1:].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix)+1:].split('\\\\')[0]\n else:\n path_prefix_split = []\n tree_commit_node_name1 = node1[len(path_prefix):].split('\\\\')[0]\n tree_commit_node_name2 = node2[len(path_prefix):].split('\\\\')[0]\n\n # Create or update edge in TreeCommit graph\n self.commit_tree_graph.add_edge(path_prefix_split, tree_commit_node_name1, tree_commit_node_name2)",
"def merge_communities2nodes(communities, nodes):\n communities_df = (\n pd.DataFrame([communities])\n .T.reset_index()\n .rename(\n columns={\"index\": column_names.node_id, 0: column_names.nodes_community}\n )\n )\n nodes = nodes.merge(communities_df, how=\"left\", on=column_names.node_id)\n return nodes",
"def merge(node, parents):\n nodes = [parent for parent in parents]\n nodes.append(node)\n return nodes",
"def condense_nodes(self, node, *other_nodes, **kwargs):\n life_recalc = kwargs.pop('life_recalc', True)\n enforce_connectivity = kwargs.pop('enforce_connectivity', True)\n if kwargs:\n raise TypeError('Unexpected keyword argument(s): {}'\n .format(', '.join(kwargs)))\n\n nd = self.node[node]\n L.debug('Node {} incorporating {}'.format(\n node, ', '.join(str(x) for x in other_nodes)))\n\n all_nodes = (node,) + other_nodes\n subg = self.subgraph(all_nodes)\n if (enforce_connectivity\n and nx.number_connected_components(subg.to_undirected()) != 1):\n raise ValueError('Attempting to merge unconnected nodes.')\n\n # not sure which function is trying to merge a node with itself...\n # but it needs to be stopped. until then catch it here.\n if node in other_nodes:\n other_nodes = list(other_nodes)\n other_nodes.pop(other_nodes.index(node))\n\n if not other_nodes:\n return\n\n edges_out = set(self.out_edges_iter(all_nodes))\n edges_in = set(self.in_edges_iter(all_nodes))\n #edges_internal = edges_out & edges_in # (unused, for now)\n edges_external = edges_out ^ edges_in\n\n nd.setdefault(kc.COMPONENTS, set([node]))\n\n # copy/update node data (NO TOPOLOGY CHANGES)\n for other_node in other_nodes:\n other_data = self.node[other_node]\n\n # abscond with born/died\n if life_recalc:\n nd[kc.FRAME_BORN] = min(nd[kc.FRAME_BORN], other_data.pop(kc.FRAME_BORN))\n nd[kc.FRAME_DIED] = max(nd[kc.FRAME_DIED], other_data.pop(kc.FRAME_DIED))\n\n nd[kc.TIME_BORN] = min(nd[kc.TIME_BORN], other_data.pop(kc.TIME_BORN))\n nd[kc.TIME_DIED] = max(nd[kc.TIME_DIED], other_data.pop(kc.TIME_DIED))\n else:\n for key in (kc.FRAME_BORN, kc.FRAME_DIED, kc.TIME_BORN, kc.TIME_DIED):\n del other_data[key]\n\n # combine set/mapping data\n nd[kc.COMPONENTS].update(other_data.pop(kc.COMPONENTS, set([other_node])))\n\n #TODO Nick: uncomment and test it\n merge_mappings(nd, other_data)\n\n #TODO Nick: uncomment and test it\n # propogate original edge data\n for a, b in edges_external:\n # should be in one and only one (xor)\n assert (a in all_nodes) ^ (b in all_nodes)\n\n if a in all_nodes:\n # \"leaving\" edge\n u = node\n v = b\n else:\n # \"incoming\" edge\n u = a\n v = node\n\n edge_data = self.get_edge_data(a, b)\n if self.has_edge(u, v):\n existing_edge_data = self.get_edge_data(u, v)\n merge_mappings(existing_edge_data, edge_data)\n else:\n self.add_edge(u, v, **edge_data)\n\n # cleanup\n for other_node in other_nodes:\n # remove nodes (edges come off with)\n self.remove_node(other_node)\n\n # update what's where\n for component in nd[kc.COMPONENTS]:\n self._whereis_data[component] = node",
"def merge_node(fsa, sent_id1, wd_id1, sent_id2, wd_id2):\n # Find the node of (sent_id1, wd_id1)\n # print(\"Sent 1: \", sent_id1)\n # print(\"Word 1: \", wd_id1)\n node1 = find_node(fsa, sent_id1, wd_id1)\n if node1 is None:\n return fsa\n \n # Find the node of (sent_id2, wd_id2, wd)\n # print(\"Sent 2: \", sent_id2)\n # print(\"Word 2: \", wd_id2)\n node2 = find_node(fsa, sent_id2, wd_id2)\n if node2 is None:\n return fsa\n\n if node1 == node2:\n return fsa\n\n # Create the new label\n node1_content = idx_to_node.get(node1)\n if node1_content is None:\n print(\"Node 1 not found during merging!\")\n find_nodes_of_sent(node1)\n return fsa\n node_label1 = str(node1_content)\n node_dic1 = ast.literal_eval(node_label1)\n\n node2_content = idx_to_node.get(node2)\n if node2_content is None:\n print(\"Node 2 not found during merging!\")\n find_nodes_of_sent(sent_id2)\n return fsa\n node_label2 = str(node2_content)\n node_dic2 = ast.literal_eval(node_label2)\n \n merge_label = str(merge_dics(node_dic1, node_dic2))\n # if (sent_id1 == 3 and wd_id1 == 0) or \\\n # (sent_id2 == 3 and wd_id2 == 0):\n # print(\"The new label:{}\".format(merge_label)) \n\n\n # Relabel node1 and remove node2\n idx_to_node[node1] = merge_label\n del idx_to_node[node2]\n #idx_to_node[node2] = \"\"\n \n # Merge two nodes\n #print(\"Merge node {} and node {}\\n\".format(node1, node2))\n new_fsa = nx.contracted_nodes(fsa, node1, node2)\n\n # if (sent_id1 == 3 and wd_id1 == 0) or \\\n # (sent_id2 == 3 and wd_id2 == 0):\n # print_node_contents()\n return new_fsa",
"def _merge(self, provider):\n self._merge_names(provider)\n self._merge_nodes(provider)\n self._merge_graph(provider)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints the clusters contained in the file 'name'.
|
def display_interesting_clusters_extended(self, name):
with open(name, "rb") as fp:
clusters_extended = pickle.load(fp)
interesting_clusters = 0
for cluster, value in clusters_extended.items():
modified_files = []
for function in value[1]:
file_name, _ = function.split(':')
if file_name not in modified_files:
modified_files.append(file_name)
if len(modified_files) > 1 and value[0] > 2:
interesting_clusters += 1
print(f'Cluster {cluster} ({value[0]} common commits) : {value[1]}')
print(f'{interesting_clusters} interesting clusteres out of {len(clusters_extended)}')
# print(clusters_extended)
|
[
"def handle_cluster(file_path):\n # create the output directory if it doesn't exist\n try:\n os.mkdir(\"output\")\n except OSError as e:\n print \"INFO: directory 'output' already exists, will probably overwrite the old data\"\n\n # the paths to the created lonely cluster files gonna be stored\n output_files = []\n\n with open(file_path, \"r\") as cluster_file:\n # put one cluster per file\n lines_to_print = []\n cluster_num = 1\n\n # for line the every cluster file\n for line in cluster_file:\n # if the line is empty then we are at a clusters end\n if not line.strip():\n # convert first row to 2 columns\n first_row = lines_to_print[0].split(\"\\t\")\n first_row[0] = first_row[0].split(\" \")\n first_row[1] = first_row[1].split(\" \")\n first_row[1][2] = first_row[1][2].strip()\n\n # add the new columns to the headline\n lines_to_print[1] = lines_to_print[1].replace(\"\\r\\n\", \"\\tAnnotation Cluster\\tEnrichment Score\\r\\n\")\n\n # add the data to every line\n for i in range(2, len(lines_to_print)):\n lines_to_print[i] = lines_to_print[i].replace(\"\\r\\n\", \"\\t\" + first_row[0][2] + \"\\t\" + first_row[1][\n 2] + \"\\r\\n\")\n\n # print the file and story the path to it\n output_path = \"output/cluster_\" + str(cluster_num) + \".txt\"\n output_files.append(output_path)\n with open(output_path, \"w\") as output_file:\n for i in range(1, len(lines_to_print)):\n output_file.write(lines_to_print[i])\n\n # increase the cluster num and empty the lines to print array\n cluster_num += 1\n lines_to_print = []\n else:\n lines_to_print.append(line)\n\n # do the split chart algorithm for every newly created lonely cluster file\n for former_output_file in output_files:\n split_cluster_charts(former_output_file)",
"def visualize_clusters(file_path, output_dir=\".\"):\n input_fname = file_path.split(\"/\")[-1][:-4]\n\n labels_tweets = pd.read_csv(file_path)\n labels_text = merge_tweets(labels_tweets)\n make_tag_cloud(labels_text, input_fname, output_dir)\n\n return labels_text # for test",
"def cluster_names(self):\n # TODO",
"def print_cluster_header(cluster):\n print \"Cluster Name: %s\" % cluster.displayName\n print '*' * (len(cluster.displayName) + 14)\n print \"\"",
"def print_clustering(clustering):\n print('|'.join(\"\".join(map(str, loe)) for loe\n in clustering.clu2elm_dict.values()))",
"def allclust(all,names,clusters=['M67','N7789','N6819','N6791','M3','M15'],out='clust') :\n allinds=[]\n for cluster in clusters :\n inds=plotlogg(all,names,cluster=cluster,hard='plots/')\n allinds.append(inds)\n\n grid=[]\n yt=[]\n for param in ['hr','rms','chi2','M','Cpar','Npar','alpha','Cpar_Npar','C_N'] :\n row=[]\n for clust in clusters :\n fig=clust+'_'+param+'.png'\n row.append(fig)\n grid.append(row)\n yt.append(param)\n\n for el in aspcap.elems()[0] :\n row=[]\n for clust in clusters :\n fig=clust+'_'+el+'.png'\n row.append(fig)\n grid.append(row)\n yt.append(el)\n\n xtit=[]\n for c in clusters :\n xtit.append('<A HREF={:s}.html> {:s} </A>'.format(c,c))\n html.htmltab(grid,file='plots/'+out+'.html',xtitle=xtit,ytitle=yt)\n return allinds",
"def write_clusters(self, fname, headers=[], mapping=None, raw=False,\n write_names='separate'):\n f = fname\n if not hasattr(f, 'write'):\n f = open(f, 'w')\n # This writes out a separate file containing the true names\n # for the comunities.\n if write_names == 'separate':\n # Write the community names to disk:\n f_names = open(fname+'.names', 'w')\n print >> f_names, '# Community names for file %s'%fname\n print >> f_names, '#', repr(self)\n if isinstance(headers, str):\n print >> f_names, '#', headers\n else:\n for line in headers:\n print >> f_names, '#', line\n # Write the label for self, if it exists:\n if hasattr(self, 'label'):\n print >> f_names, '# label:', self.label\n print >> f_names, '#', time.ctime()\n # Write headers if we are not in raw mode\n if not raw:\n # Write representation of self: includes q and N.\n print >> f, '# Comunities, one community per line'\n print >> f, '#', repr(self)\n # Write any user-supplied headers.\n if isinstance(headers, str):\n print >> f, '#', headers\n else:\n for line in headers:\n print >> f, '#', line\n # Write the label for self, if it exists:\n if hasattr(self, 'label'):\n print >> f, '# label:', self.label\n # Write the community names inline, if requested:\n if write_names == 'inline':\n print >> f, '# community names:', ' '.join(str(cname) \\\n for cname in self.iterkeys())\n # Write the current time.\n print >> f, '#', time.ctime()\n\n # if mapping='int', then force an integer mapping\n if (mapping == 'int' or mapping == int):\n raise ValueError(\"Automatically mapping to int doesn't make sense \\\n - it isn't reproduciable.\")\n\n # Write all the communities.\n for cname, cnodes in self.iteritems():\n # Both the name and nodes must be nonempty\n if len(cnodes) == 0:\n raise ValueError(\"Can not write empty community %s\"%cname)\n if isinstance(cname, str) and len(cname) == 0:\n raise ValueError(\"Can not write empty community label '%s'\"%\n cname)\n # Write separate community names\n if write_names == 'separate':\n print >> f_names, cname\n # Write actual communities.\n if mapping:\n print >> f, ' '.join(str(mapping[n]) for n in cnodes)\n else:\n print >> f, ' '.join(str(x) for x in cnodes)\n\n if write_names == 'separate':\n f_names.close()",
"def do_kube_cluster_show(cc, args):\n try:\n name = cc.kube_cluster.get(args.name)\n _print_kube_cluster_show(name)\n except exc.HTTPNotFound:\n raise exc.CommandError('kubernetes cluster not found: %s' % args.name)",
"def format_cluster(cluster_name):\n return format_node(cluster_name, '*')",
"def plot_clusters(self, clustered_data, file_path='visualization/clusters.png'):\n number_of_clusters = nx.number_connected_components(self.network)\n plt.clf()\n plt.title('Cluster affectation')\n color = ['r', 'b', 'g', 'k', 'm', 'r', 'b', 'g', 'k', 'm']\n for i in range(number_of_clusters):\n observations = []\n for observation, s in clustered_data:\n if s.any() == i:\n observations.append(observation)\n #observations = [observation for observation, s in clustered_data if s == i]\n if len(observations) > 0:\n observations = np.array(observations)\n plt.scatter(observations[:, 0], observations[:, 1], color=color[i], label='cluster #'+str(i))\n plt.legend()\n plt.savefig(file_path)",
"def show_cluster(self, **kwargs):\n\n cluster = self.get_cluster(**kwargs)\n cluster_uniques = cluster.groupby('query').sum().to_dict()['num_searches']\n cluster_uniques = dict(sorted(cluster_uniques.items(), key=operator.itemgetter(1), reverse=True))\n name, _ = self.infer_cluster_name(cluster)\n name = '_'.join(name)\n\n print(f'Cluster number:\\t{cluster.iloc[0][\"cluster_label\"]}')\n print(f'Cluster name:\\t{name}')\n print('Unique queries:\\t', end='')\n pprint(cluster_uniques)\n print('--------------------------------------------------')",
"def pickle_clusters(self, filename='cfp.pkl'):\n import cPickle as pickle\n pickle.dump(self._cluster_info, open(filename, 'w'))",
"def get_cluster(ctx, name, region, verbosity):\n cp = ControlPlane(name, region=region)\n ci = cp.query()\n headers = ['NAME', 'ENDPOINT', 'VPC', 'SUBNETS']\n print(tabulate([[ci.name, ci.endpoint, ci.vpc, ','.join(ci.subnets)]], headers, tablefmt='plain'))",
"def plot_documents(self, svd, names, doc_clusters, no_clusters):\n u, vt = svd\n pts = vt\n # each cluster gets a different colour\n colormap = plt.get_cmap(\"hsv\")\n norm = matplotlib.colors.Normalize(vmin=0, vmax=no_clusters)\n scalarMap = matplotlib.cm.ScalarMappable(cmap=colormap, norm=norm)\n self.plot(pts[1], pts[2], names, colours=[scalarMap.to_rgba(i) for i in doc_clusters])",
"def read_cluster_network_with_http_info(self, name, **kwargs):\n\n all_params = ['name', 'pretty', 'exact', 'export']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_cluster_network\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_cluster_network`\")\n\n\n collection_formats = {}\n\n resource_path = '/oapi/v1/clusternetworks/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'exact' in params:\n query_params['exact'] = params['exact']\n if 'export' in params:\n query_params['export'] = params['export']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = ['BearerToken']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ClusterNetwork',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def show_names (filename):\n\ttry: \n\t\twith open(filename, encoding='utf-8') as file_object:\n\t\t\tcontents = file_object.read()\n\texcept FileNotFoundError:\n\t\tprint(f\"Sorry, the file {filename} is missing.\")\n\telse:\n\t\tprint(contents.rstrip())",
"def display(self):\n print(\"Grapheme cluster: \\n{}\".format(self.graph_clust))\n print(\"Grapheme cluster id: \\n{}\".format(self.graph_clust_id))\n print(\"Grapheme cluster vector: \\n{}\".format(self.graph_clust_vec))\n print(\"Generalized vector: \\n{}\".format(self.generalized_vec))",
"def draw_clusters(clusters):\n bjp_pos = read_file(collect.BJP_POS_USER_FILE)['results']\n set_bjp_pos = set(bjp_pos)\n bjp_neg = read_file(collect.BJP_NEG_USER_FILE)['results']\n set_bjp_neg = set(bjp_neg)\n con_pos = read_file(collect.CON_POS_USER_FILE)['results']\n set_con_pos = set(con_pos)\n con_neg = read_file(collect.CON_NEG_USER_FILE)['results']\n set_con_neg = set(con_neg)\n count = 2\n for cluster in clusters:\n cluster_bjp_pos = set()\n cluster_bjp_neg = set()\n cluster_con_pos = set()\n cluster_con_neg = set()\n cluster_neutral = set()\n for n in cluster.nodes():\n if n in set_bjp_pos:\n cluster_bjp_pos.add(n)\n elif n in set_bjp_neg:\n cluster_bjp_neg.add(n)\n elif n in set_con_pos:\n cluster_con_pos.add(n)\n elif n in set_con_neg:\n cluster_con_neg.add(n)\n else:\n cluster_neutral.add(n)\n draw_graph(cluster, cluster_bjp_neg, cluster_bjp_pos, cluster_con_neg, cluster_con_pos, cluster_neutral, count,\n 'cluster_' + str(count - 1), 'community detection - cluster '+ str(count - 1) + '\\n Neutral Users - Purple | '\n 'Positive for BJP - Green | '\n 'Negative for BJP - Red | \\n '\n 'Positive for Congress - Blue | '\n 'Negative for Congress - Yellow ')\n count += 1",
"def clustering(output_name, setmap):\n # Sort the platform list to ensure that the ordering of platforms in the\n # distance matrix and dendrogram do not change from run to run\n platforms = sorted(extract_platforms(setmap))\n\n if len(platforms) == 1:\n log.error(\"Error: clustering is not supported for a single platform.\")\n return None\n\n if not util.ensure_png(output_name):\n log.error(\"Error: clustering output file name is not a png; skipping creation.\")\n return None\n\n # Import additional modules required by clustering report\n # Force Agg backend to matplotlib to avoid DISPLAY errors\n import matplotlib\n matplotlib.use(\"Agg\")\n from matplotlib import pyplot as plt\n\n # Remove misleading axes\n for axis in [\"left\", \"right\", \"top\"]:\n matplotlib.rcParams[\"axes.spines.\" + axis] = False\n\n from scipy.cluster import hierarchy\n from scipy.spatial.distance import squareform\n\n # Compute distance matrix between platforms\n matrix = [[distance(setmap, p1, p2) for p2 in platforms] for p1 in platforms]\n\n # Print distance matrix as a table\n lines = []\n lines += [\"\", \"Distance Matrix\"]\n labelled_matrix = [[name] + [(\"%.2f\" % column) for column in matrix[row]]\n for (row, name) in enumerate(platforms)]\n lines += [table([\"\"] + platforms, labelled_matrix)]\n\n # Hierarchical clustering using average inter-cluster distance\n clusters = hierarchy.linkage(squareform(matrix), method='average')\n\n # Plot dendrogram of hierarchical clustering\n fig, ax = plt.subplots()\n hierarchy.dendrogram(clusters, labels=platforms, orientation=\"right\")\n ax.set_xlim(xmin=0, xmax=1)\n ax.axvline(x=divergence(setmap), linestyle='--', label=\"Average\")\n plt.text(divergence(setmap), ax.get_ylim()[1], \"Average\", ha=\"center\", va=\"bottom\")\n plt.xlabel(\"Code Divergence\")\n with util.safe_open_write_binary(output_name) as fp:\n fig.savefig(fp)\n\n return \"\\n\".join(lines)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the tf values for each file in repo.
|
def compute_tf(voc_to_index, file_to_identifiers):
tf = {}
for file_path in file_to_identifiers.keys():
tf[file_path] = [0 for _ in range(len(voc_to_index))]
for word in file_to_identifiers[file_path]:
tf[file_path][voc_to_index[word]] += 1
num_identifiers = len(file_to_identifiers[file_path])
if num_identifiers > 0:
tf[file_path] = [value / num_identifiers for value in tf[file_path]]
return tf
|
[
"def _compute_tf(self, filename):\n total_terms = len(self._hash_map.keys())\n\n tf_dict = {filename: {}}\n\n for key, value in self._hash_map.iteritems():\n tf = value / float(total_terms)\n tf_dict[filename][key] = float(\"{0:.6f}\".format(tf))\n\n # save tf_dict on database\n self._mongo_session = MongoDB()\n self._mongo_session.connect(host=\"localhost\", port=27017, database=\"crawler\", collection=\"tf_dict\")\n self._mongo_session.insert_document(tf_dict)\n self._mongo_session.disconnect()",
"def compute_stats(self):\n\n def parse_proto(example_protos):\n features = {\n 'genome': tf.FixedLenFeature([1], tf.int64),\n tfrecord_util.TFR_INPUT: tf.FixedLenFeature([], tf.string),\n tfrecord_util.TFR_OUTPUT: tf.FixedLenFeature([], tf.string)\n }\n parsed_features = tf.parse_example(example_protos, features=features)\n genome = parsed_features['genome']\n seq = tf.decode_raw(parsed_features[tfrecord_util.TFR_INPUT], tf.uint8)\n targets = tf.decode_raw(parsed_features[tfrecord_util.TFR_OUTPUT], tf.float16)\n return {'genome': genome, 'sequence': seq, 'target': targets}\n\n # read TF Records\n dataset = tf.data.Dataset.list_files(self.tfr_pattern)\n\n def file_to_records(filename):\n return tf.data.TFRecordDataset(filename, compression_type='ZLIB')\n dataset = dataset.flat_map(file_to_records)\n\n dataset = dataset.batch(1)\n dataset = dataset.map(parse_proto)\n\n iterator = dataset.make_one_shot_iterator()\n try:\n next_op = iterator.get_next()\n except tf.errors.OutOfRangeError:\n print('TFRecord pattern %s is empty' % self.tfr_pattern, file=sys.stderr)\n exit(1)\n\n self.num_seqs = 0\n\n with tf.Session() as sess:\n try:\n next_datum = sess.run(next_op)\n except tf.errors.OutOfRangeError:\n next_datum = False\n\n while next_datum:\n # infer seq_depth\n seq_1hot = next_datum['sequence'].reshape((self.seq_length,-1))\n if self.seq_depth is None:\n self.seq_depth = seq_1hot.shape[-1]\n else:\n assert(self.seq_depth == seq_1hot.shape[-1])\n\n # infer num_targets\n targets1 = next_datum['target'].reshape(self.target_length,-1)\n if self.num_targets is None:\n self.num_targets = targets1.shape[-1]\n targets_nonzero = (targets1.sum(axis=0, dtype='float32') > 0)\n else:\n assert(self.num_targets == targets1.shape[-1])\n targets_nonzero = np.logical_or(targets_nonzero, targets1.sum(axis=0, dtype='float32') > 0)\n\n # count sequences\n self.num_seqs += 1\n\n try:\n next_datum = sess.run(next_op)\n except tf.errors.OutOfRangeError:\n next_datum = False\n\n if self.num_seqs > 0:\n self.num_targets_nonzero = (targets_nonzero > 0).sum()\n print('%s has %d sequences with %d/%d targets' % (self.tfr_pattern, self.num_seqs, self.num_targets_nonzero, self.num_targets), flush=True)\n else:\n self.num_targets_nonzero = None\n print('%s has %d sequences with 0 targets' % (self.tfr_pattern, self.num_seqs), flush=True)",
"def create_tfidf_vectors():\n vector_list = {}\n vector_magnitude = {}\n for file,tokens in tf.items():\n \n \"\"\"calculates raw tf-idf\n For a given dict of tokens we extract keys using tokens.keys()\n Using Lambda we calculate tf-idf for each token in the tokens dict\n and then return a key:value pair dict\n where key -> token name , value -> un normalized tf-idf and store in vector_list\"\"\"\n vector_list[file] = dict(map(lambda token : (token,(1+log10(tokens[token]))*getidf(token)) ,tokens.keys()))\n \n \"\"\"calculates file magnitude\n Form the calculated vector_list using vector_list[file].values() \n Using Lambda we calculate magnitude of the each document\n and then return a key:value pair dict\n where key -> file name , value -> magnitude of the file\"\"\"\n vector_magnitude[file] = (sqrt(sum(map(lambda value : value * value ,vector_list[file].values()))))\n \n tfidf_vectors[file] = Counter()\n \n #normalization of each token with respect document in which they are present\n for token in vector_list[file]:\n tfidf_vectors[file][token] = vector_list[file][token] / vector_magnitude[file]",
"def _apply_tf(wfs, fci, tf, amplitudes):",
"def _tf(self):\n self.TF_count = np.zeros(shape=(self.idf_N, self.word_N))\n for i_index, x in enumerate(self.document_list):\n # import nltk\n # words = nltk.tokenize.word_tokenize(x)\n if self._type == 'str':\n words = x.split()\n elif self._type == 'list':\n words = x\n word_counter_obj = collections.Counter(words)\n # Get the mapping from the unique words of this document to the full document vocabulary indices\n position_value = list(map(self.vocab_dict.get, word_counter_obj.keys()))\n # And map the values of the counts (using position_vlaue) to the TF_count object.\n self.TF_count[i_index, position_value] = list(word_counter_obj.values())\n\n return self.TF_count",
"def compute_vgg16_feature():\n\n data_dir = os.path.join(parent_path, 'flower_photos')\n contents = os.listdir(data_dir)\n classes = [each for each in contents if os.path.isdir(os.path.join(data_dir, each))]\n\n batch_size = 10\n labels = []\n batch = []\n features = None\n # load VGG16 model\n vgg = vgg16.Vgg16()\n input = tf.placeholder(tf.float32, [None, 224, 224, 3])\n with tf.name_scope(\"content_vgg\"):\n vgg.build(input)\n time_begin = datetime.datetime.now()\n with tf.Session() as sess:\n # compute feature for each type of flowers\n print('image processed starting')\n for each in classes:\n print('starting %s images processed...' % each)\n class_path = os.path.join(data_dir, each)\n files = os.listdir(class_path)\n files_len = len(files)\n for ii, file in enumerate(files, 1):\n # load image to batch list\n img = utils.load_image(os.path.join(class_path, file))\n batch.append(img.reshape(1, 224, 224, 3))\n labels.append(each)\n\n if ii % batch_size == 0 or ii == len(files):\n images = np.concatenate(batch)\n feed_dict = {input: images}\n features_batch = sess.run(vgg.relu6, feed_dict=feed_dict)\n\n # store computed features at codes list\n if features is None:\n features = features_batch\n else:\n features = np.concatenate((features, features_batch))\n # clear batch for next batch to compute feature\n batch = []\n sys.stdout.write('\\r>> %d/%d %s images processed' % (ii, files_len, each))\n sys.stdout.flush()\n print('\\n%s image process done!' % each)\n time_end = datetime.datetime.now()\n print('image processed finished: %.0fs' % (time_end - time_begin).total_seconds())\n\n return features, labels",
"def calculate_feature_vector(path):\n\ttf_image = preprocess_image(path)\n\treturn module(tf_image)",
"def compute_files_that_should_be_in_commit(self, commit_hash):\n\n similar_commits = {}\n potential_nodes = set()\n\n # Get list of files modified in commit\n modified_files = []\n modified_files_dict = {}\n for commit in pydriller.Repository(self.repo_folder, single=commit_hash).traverse_commits():\n for modification in commit.modified_files:\n modified_files.append(modification.new_path)\n modified_files_dict[modification.new_path] = 1\n\n # Compute each commit similarity score\n print('Computing similarity score')\n for commit in tqdm.tqdm(pydriller.Repository(self.repo_folder).traverse_commits()):\n if commit.hash != commit_hash:\n modified_files_other_commit = []\n new_nodes = []\n similar_nodes = 0\n for modification in commit.modified_files:\n\n if modification.new_path in self.repo_files_path:\n current_path = modification.new_path\n else:\n current_path = self.retrieve_current_path(modification.new_path)\n\n if current_path is not None and current_path in modified_files_dict:\n similar_nodes += 1\n else:\n new_nodes.append(current_path)\n modified_files_other_commit.append(current_path)\n similarity = similar_nodes / max(len(modified_files), len(modified_files_other_commit))\n if similarity > 0.3:\n similar_commits[commit.hash] = (similarity, new_nodes)\n for node in new_nodes:\n if node not in potential_nodes:\n potential_nodes.add(node)\n\n # Compute score of new potential nodes\n print('Compute node scores')\n for node in tqdm.tqdm(potential_nodes):\n node_score = 0\n for _, (similarity, nodes) in similar_commits.items():\n if node in nodes:\n node_score += similarity\n node_score /= len(similar_commits)\n modified_files_dict[node] = node_score\n\n for node in self.repo_files_path:\n if node not in modified_files_dict:\n modified_files_dict[node] = 0\n\n return modified_files_dict",
"def compute_files(user1, user2, file_list, dir_pre, start_num):\n match_total = 0\n test_total = 0\n gold_total = 0\n for fi in file_list:\n file1 = dir_pre + user1 + \"/\" + fi + \".txt\"\n file2 = dir_pre + user2 + \"/\" + fi + \".txt\"\n if not os.path.exists(file1):\n print(\"*********Error: \", file1, \"does not exist*********\", file=ERROR_LOG)\n return -1.00\n if not os.path.exists(file2):\n print(\"*********Error: \", file2, \"does not exist*********\", file=ERROR_LOG)\n return -1.00\n try:\n file1_h = open(file1, \"r\")\n file2_h = open(file2, \"r\")\n except IOError:\n print(\"Cannot open the files\", file1, file2, file=ERROR_LOG)\n break\n cur_amr1 = amr.AMR.get_amr_line(file1_h)\n cur_amr2 = amr.AMR.get_amr_line(file2_h)\n if cur_amr1 == \"\":\n print(\"AMR 1 is empty\", file=ERROR_LOG)\n continue\n if cur_amr2 == \"\":\n print(\"AMR 2 is empty\", file=ERROR_LOG)\n continue\n amr1 = amr.AMR.parse_AMR_line(cur_amr1)\n amr2 = amr.AMR.parse_AMR_line(cur_amr2)\n test_label = \"a\"\n gold_label = \"b\"\n amr1.rename_node(test_label)\n amr2.rename_node(gold_label)\n (test_inst, test_rel1, test_rel2) = amr1.get_triples()\n (gold_inst, gold_rel1, gold_rel2) = amr2.get_triples()\n if verbose:\n print(\"Instance triples of file 1:\", len(test_inst), file=DEBUG_LOG)\n print(test_inst, file=DEBUG_LOG)\n print(\"Attribute triples of file 1:\", len(test_rel1), file=DEBUG_LOG)\n print(test_rel1, file=DEBUG_LOG)\n print(\"Relation triples of file 1:\", len(test_rel2), file=DEBUG_LOG)\n print(test_rel2, file=DEBUG_LOG)\n print(\"Instance triples of file 2:\", len(gold_inst), file=DEBUG_LOG)\n print(gold_inst, file=DEBUG_LOG)\n print(\"Attribute triples of file 2:\", len(gold_rel1), file=DEBUG_LOG)\n print(gold_rel1, file=DEBUG_LOG)\n print(\"Relation triples of file 2:\", len(gold_rel2), file=DEBUG_LOG)\n print(gold_rel2, file=DEBUG_LOG)\n (best_match, best_match_num) = smatch.get_best_match(test_inst, test_rel1, test_rel2,\n gold_inst, gold_rel1, gold_rel2,\n test_label, gold_label)\n if verbose:\n print(\"best match number\", best_match_num, file=DEBUG_LOG)\n print(\"Best Match:\", smatch.print_alignment(best_match, test_inst, gold_inst), file=DEBUG_LOG)\n match_total += best_match_num\n test_total += (len(test_inst) + len(test_rel1) + len(test_rel2))\n gold_total += (len(gold_inst) + len(gold_rel1) + len(gold_rel2))\n smatch.match_triple_dict.clear()\n (precision, recall, f_score) = smatch.compute_f(match_total, test_total, gold_total)\n return \"%.2f\" % f_score",
"def aggregate_tb_files_per_state(tb_files):\n ret = {}\n for f in tb_files:\n with open(f, 'r', encoding='utf-8') as fp:\n try:\n data = json.load(fp)\n except Exception:\n logger.warning('Failed to parse translation block JSON file %s', f)\n continue\n\n if not data:\n logger.warning('Translation block JSON file %s is empty', f)\n continue\n\n state_id = get_tb_state(f)\n\n for module_path, coverage in data.items():\n states = {}\n if module_path not in ret:\n ret[module_path] = states\n else:\n states = ret[module_path]\n\n tbs = set()\n if state_id not in states:\n states[state_id] = tbs\n else:\n tbs = states[state_id]\n\n for tb in coverage:\n tbs.add(_tb_to_uint64(tb))\n return ret",
"def count_tfidf(self):\n\t\tif 'all_docs_num' not in dir(self):\n\t\t\tprint 'just set all_docs_num to class attr firstly'\n\t\t\treturn\n\t\tdocsvector={}\n\t\tsave_idf={}\n\t\tfor doc_id in self.docs:\n\t\t\tdocsvector[doc_id]=[]\n\t\t\tfor word in sorted(self.feature):\n\t\t\t\ttf=0 \n\t\t\t\tif (doc_id in self.words[word].rf) and (self.words_count[doc_id]): \n\t\t\t\t\ttf=self.words[word].rf[doc_id]\n\t\t\t\t\ttf/=self.words_count[doc_id]\n\t\t\t\tdf=self.words[word].docnum_in_cate+self.words[word].docnum_in_others\n\t\t\t\tidf=math.log(self.all_docs_num/df,10.0)\n\t\t\t\tsave_idf[word]=idf\n\t\t\t\ttfidf=tf*idf\n\t\t\t\tdocsvector[doc_id].append(tfidf)\n\t\treturn docsvector,save_idf",
"def update_files(\n self,\n files: List[DynamicProxy],\n dry_run: bool,\n delete: bool,\n context: Dict[str, Any],\n ref: Optional[str] = None,\n **kwargs,\n ) -> List[\"DatasetUpdateMetadata\"]:\n from renku.core.dataset.providers.models import DatasetUpdateAction, DatasetUpdateMetadata\n\n if \"visited_repos\" not in context:\n context[\"visited_repos\"] = {}\n\n progress_text = \"Checking git files for updates\"\n\n results: List[DatasetUpdateMetadata] = []\n\n try:\n communication.start_progress(progress_text, len(files))\n for file in files:\n communication.update_progress(progress_text, 1)\n if not file.based_on:\n continue\n\n based_on = file.based_on\n url = based_on.url\n if url in context[\"visited_repos\"]:\n remote_repository = context[\"visited_repos\"][url]\n else:\n communication.echo(msg=\"Cloning remote repository...\")\n path = get_cache_directory_for_repository(url=url)\n remote_repository = clone_repository(url=url, path=path, checkout_revision=ref)\n context[\"visited_repos\"][url] = remote_repository\n\n checksum = remote_repository.get_object_hash(path=based_on.path, revision=\"HEAD\")\n found = checksum is not None\n changed = found and based_on.checksum != checksum\n\n src = remote_repository.path / based_on.path\n dst = project_context.metadata_path.parent / file.entity.path\n\n if not found:\n if not dry_run and delete:\n delete_dataset_file(dst, follow_symlinks=True)\n project_context.repository.add(dst, force=True)\n results.append(DatasetUpdateMetadata(entity=file, action=DatasetUpdateAction.DELETE))\n elif changed:\n if not dry_run:\n # Fetch file if it is tracked by Git LFS\n pull_paths_from_storage(remote_repository, remote_repository.path / based_on.path)\n if is_linked_file(path=src, project_path=remote_repository.path):\n delete_dataset_file(dst, follow_symlinks=True)\n create_external_file(target=src.resolve(), path=dst)\n else:\n shutil.copy(src, dst)\n file.based_on = RemoteEntity(\n checksum=checksum, path=based_on.path, url=based_on.url # type: ignore\n )\n results.append(DatasetUpdateMetadata(entity=file, action=DatasetUpdateAction.UPDATE))\n finally:\n communication.finalize_progress(progress_text)\n\n return results",
"def compute_elem_diff_for_user_doc(doc_ftrs, user_ftrs, num_doc_fields, num_user_fields, num_deep):\n\n def compute_diff(user_field_ftrs, doc_ftrs):\n \"\"\" Computes Elementwise diff between user_field_ftrs and doc_ftrs\n\n :param doc_ftrs Document features. Shape=[batch_size, group_size, num_doc_fields, num_deep]\n :param user_field_ftrs Shape=[batch_size, num_deep]\n \"\"\"\n user_field_ftrs = tf.expand_dims(tf.expand_dims(user_field_ftrs, axis=1), axis=1) # [batch_size, 1, 1, num_deep]\n elementwise_diff = user_field_ftrs - doc_ftrs # [batch_size, group_size, num_doc_fields, num_deep]\n return elementwise_diff # [batch_size, group_size, num_doc_fields, num_deep]\n\n batch_size = tf.shape(input=doc_ftrs)[0]\n max_group_size = tf.shape(input=doc_ftrs)[1]\n\n # Shape=[num_user_fields, batch_size, group_size, num_doc_fields, num_deep]\n sim_ftrs = tf.map_fn(partial(compute_diff, doc_ftrs=doc_ftrs), tf.transpose(a=user_ftrs, perm=[1, 0, 2]))\n # Shape=[batch_size, group_size, num_doc_fields, num_user_fields, num_deep]\n sim_ftrs = tf.transpose(a=sim_ftrs, perm=[1, 2, 3, 4, 0])\n\n num_sim_ftrs = NAME_TO_NUM_SIM['diff'](num_doc_fields, num_user_fields, num_deep)\n # Shape=[batch_size, group_size, num_sim_ftrs]\n sim_ftrs = tf.reshape(sim_ftrs, [batch_size, max_group_size, num_sim_ftrs])\n return sim_ftrs",
"def compute(self, fname, **kw):\n from array import array\n results = array('f')\n self.times['Images Attempted'] += 1\n try:\n # open image\n t0 = time.time()\n im = Image.open(fname).convert('RGB')\n t1 = time.time()\n self.times['Image Open'] += t1-t0\n # align image for each aligner\n aligned = dict((t, a.align(im, **kw)[0]) for t, a in self.aligners.items())\n t2 = time.time()\n self.times['Image Align'] += t2-t1\n for mfunc, fset in zip(self.mfuncs, self.fsets):\n im = aligned[mfunc.align]\n c1 = time.time()\n mask = mfunc.compute(im, **kw)\n c2 = time.time()\n #mask.save('mask.png')\n cur = fset.compute(im, mask, **kw)\n c3 = time.time()\n results.extend(cur)\n c4 = time.time()\n self.times['Compute Mask'] += c2-c1\n self.times['Compute Fset'] += c3-c2\n self.times['Concat Feats'] += c4-c3\n t3 = time.time()\n self.times['Compute Feats'] += t3-t2\n self.times['Total Feature Computation'] += t3-t0\n except (IOError, IndexError): return None\n self.times['Images Done'] += 1\n return results",
"def show_samples():\n files = os.listdir(FLAGS.directory)\n for file in files:\n image, label = read_and_decode(tf.train.string_input_producer([os.path.join(FLAGS.directory, file)]),\n (256, 256, 3))\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n tf.train.start_queue_runners(sess=sess)\n\n label_val_1, image_val_1 = sess.run([label, image])\n\n cv2.imshow('s', (image_val_1 + 0.5))\n print(label_val_1)\n cv2.waitKey(1000)",
"def generate_hashes(ctx: Context, files: list[pathlib.Path]):\n for fpath in files:\n ctx.info(f\"* Processing {fpath} ...\")\n hashes = {}\n for hash_name in (\"blake2b\", \"sha512\", \"sha3_512\"):\n ctx.info(f\" * Calculating {hash_name} ...\")\n with fpath.open(\"rb\") as rfh:\n try:\n digest = hashlib.file_digest(rfh, hash_name) # type: ignore[attr-defined]\n except AttributeError:\n # Python < 3.11\n buf = bytearray(2**18) # Reusable buffer to reduce allocations.\n view = memoryview(buf)\n digest = getattr(hashlib, hash_name)()\n while True:\n size = rfh.readinto(buf)\n if size == 0:\n break # EOF\n digest.update(view[:size])\n digest_file_path = fpath.parent / f\"{fpath.name}.{hash_name.upper()}\"\n hexdigest = digest.hexdigest()\n ctx.info(f\" * Writing {digest_file_path} ...\")\n digest_file_path.write_text(digest.hexdigest())\n hashes[hash_name] = hexdigest\n hashes_json_path = fpath.parent / f\"{fpath.name}.json\"\n ctx.info(f\" * Writing {hashes_json_path} ...\")\n hashes_json_path.write_text(json.dumps(hashes))\n ctx.info(\"Done\")",
"def ComputeTFGanFIDScore(fake_images, real_images, inception_graph):\n assert fake_images.shape[3] == 3\n assert real_images.shape[3] == 3\n bs_real = real_images.shape[0]\n bs_fake = fake_images.shape[0]\n assert bs_real % INCEPTION_BATCH == 0\n assert bs_fake % INCEPTION_BATCH == 0\n assert bs_real >= bs_fake and bs_real % bs_fake == 0\n ratio = bs_real // bs_fake\n logging.info(\"Ratio of real/fake images is: %d\", ratio)\n with tf.Graph().as_default():\n fake_images_batch = tf.train.batch(\n [tf.convert_to_tensor(fake_images, dtype=tf.float32)],\n enqueue_many=True,\n batch_size=INCEPTION_BATCH)\n real_images_batch = tf.train.batch(\n [tf.convert_to_tensor(real_images, dtype=tf.float32)],\n enqueue_many=True,\n batch_size=INCEPTION_BATCH * ratio)\n eval_fn = fid_score_lib.get_fid_function(\n gen_image_tensor=fake_images_batch,\n real_image_tensor=real_images_batch,\n num_gen_images=fake_images.shape[0],\n num_eval_images=real_images.shape[0],\n image_range=\"0_255\",\n inception_graph=inception_graph)\n with tf.train.MonitoredTrainingSession() as sess:\n fid_score = eval_fn(sess)\n return fid_score",
"def parse_all_asts_and_save(repo_file_path: str, count = -1):\n dir_path = os.path.dirname(os.path.realpath(repo_file_path)) + \"/\"\n print(f\"Parsing ASTs from data directory: {dir_path}\")\n with open(repo_file_path) as repos:\n repo_lines = [i.strip() for i in repos]\n\n if count >= 0:\n repo_lines = repo_lines[:count]\n\n batch_size = 3000\n\n for i in range(0, len(repo_lines), batch_size):\n start = i\n\n if (i + batch_size) > len(repo_lines):\n end = len(repo_lines)\n else:\n end = i + batch_size\n\n print(f\"Current batch: {start}:{end} out of {len(repo_lines)}.\")\n\n subprocess.call(['python3', 'preprocessing_batched.py', str(repo_file_path), str(start), str(end)])",
"def evaluate_using_files(prediction_file, gold_file):\n with open(prediction_file) as f:\n prediction = json.load(f)\n with open(gold_file) as f:\n gold = json.load(f)\n return evaluate(prediction, gold)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Split a snake or camel case string into its composing words.
|
def split_sentence(word):
# Snake split
splitted_snake_sentence = word.split('_')
# camel_word = re.sub(r'_(.)', lambda m: m.group(1).upper(), word)
splitted_sentence = []
for snake_word in splitted_snake_sentence:
camel_words = re.findall(r'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', snake_word)
for camel_word in camel_words:
splitted_sentence.append(camel_word)
return splitted_sentence
|
[
"def wordSplit(cls, s, lower=False):\n words = []\n for word in s.split(' '):\n if lower:\n word = word.lower()\n words.append(word.strip())\n return words",
"def string_split_2():\n s = 'dog lion snake elephant cow donkey goat duck'\n return s.split('o')",
"def split_uppercase_joined_words(string):\n string_parts = string.split()\n string_split = []\n for word in string_parts:\n temp_word = \"\"\n for letter in word:\n if letter.isupper() and temp_word != \"\": # When finding an uppercase letter split save word and start new word.\n if not temp_word[-1].isalpha():\n if len(\n temp_word) > 1: # Only add previous word to list if it is not an empty string, ie has more than just the special character\n string_split.append(temp_word[:-1])\n temp_word = temp_word[-1]\n else:\n string_split.append(temp_word)\n temp_word = \"\"\n\n # print(temp_word)\n temp_word += letter\n string_split.append(temp_word)\n\n # print(name_split) # debug\n return \" \".join(string_split)",
"def split_str(S):\n L = []\n word = ''\n for ch in S:\n if ch in string.punctuation:\n word = word + ''\n elif ch in string.whitespace:\n L.append(word)\n word = ''\n else:\n word += ch\n else:\n L.append(word)\n \n return L",
"def split_words(comments):\n return [re.split(r'\\W+', comment) for comment in comments]",
"def split_camel_case_text(text):\n matches = re.finditer(\n '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)\n flag_matches = [match.group(0) for match in matches]\n count = len(flag_matches)\n print(flag_matches)\n flag = False # whether it has camel case words or not\n if count > 0:\n flag = True # whether it has camel case words or not\n words = re.sub('([A-Z][a-z]+)', r' \\1', re.sub('([A-Z]+)', r' \\1',\n text)).split()\n _tmp = \" \".join([word.strip() for word in words])\n _tmp = TextPreprocessing.remove_multiple_spaces(_tmp)\n return _tmp, flag",
"def split_words(clean_article):\n\tsplit_article= clean_article.split()\n\treturn split_article",
"def split(word):\n return [char for char in word]",
"def name_split(name: str):\n\n s = name.lower()\n tokens = ['river', 'lake', 'basin', 'ocean', 'sea', 'mount', 'mountain']\n for token in tokens:\n s = s.replace(token, \"\")\n return s.strip()",
"def split_strings(original_string: str, delimiter: str = \"__\"):\n return original_string.split(delimiter)",
"def guess_splitwords():\n\n if t_word[:2] == 'un' and (t_pos == 'ADJD' or t_pos == 'ADJA'):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])",
"def _split_word(self, word):\n (opening_puncts, core_token, closing_puncts) = self._split_punctuation(word)\n if closing_puncts and closing_puncts[0][2] == '.':\n (core_token, closing_puncts) = \\\n self._restore_abbreviation(core_token, closing_puncts)\n return opening_puncts, core_token, closing_puncts",
"def make_words(president_file_lines):\n split_words = []\n for string in president_file_lines:\n split = string.split()\n split_words.extend(split)\n return split_words",
"def transform_camel_to_snake(word):\n split_word = re.findall('[A-Z][^A-Z]*', word)\n return '_'.join(_.lower() for _ in split_word)",
"def test_split(self):\r\n target = ANSIString(\"{gThis is {nA split string{g\")\r\n first = (u'\\x1b[1m\\x1b[32mThis is \\x1b[0m', u'This is ')\r\n second = (u'\\x1b[1m\\x1b[32m\\x1b[0m split string\\x1b[1m\\x1b[32m',\r\n u' split string')\r\n re_split = re.split('A', target)\r\n normal_split = target.split('A')\r\n self.assertEqual(re_split, normal_split)\r\n self.assertEqual(len(normal_split), 2)\r\n self.checker(normal_split[0], *first)\r\n self.checker(normal_split[1], *second)",
"def get_words(message):\n\n # *** START CODE HERE ***\n return [word.lower() for word in message.split(\" \")]\n # *** END CODE HERE ***",
"def SplitOnWs(self, *args):\n return _snap.TStr_SplitOnWs(self, *args)",
"def get_words(line):\n line = re.sub(r'\\W+', ' ', line)\n line = re.sub(r'[_0-9]+', ' ', line)\n return line.split()",
"def split_name(name):\n parsed = re.split(' |-', name)\n\n if len(parsed) > 2:\n split_name = {'first': parsed[0], 'second': parsed[-2], 'last': parsed[-1]}\n else:\n split_name = {'first': parsed[0], 'second': '', 'last': parsed[-1]}\n\n return split_name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Runs a semantic analysis on a repo to get a distance matrix containing the cosine distance between each file.
|
def semantic_analysis(self):
file_to_identifiers = self.get_corpus()
self.preprocess_words(file_to_identifiers)
print(file_to_identifiers)
voc_size, voc_to_index = self.compute_voc(file_to_identifiers)
tf = self.compute_tf(voc_to_index, file_to_identifiers)
idf = self.compute_idf(voc_to_index, file_to_identifiers)
tf_idf = self.compute_tf_idf(voc_to_index, tf, idf)
tf_idf_df = pd.DataFrame.from_dict(tf_idf, orient='index')
distance_matrix = cosine_similarity(tf_idf_df)
for i in range(len(distance_matrix)):
distance_matrix[i][i] = 1
distance_df = pd.DataFrame(distance_matrix, index=tf_idf_df.index, columns=tf_idf_df.index)
correlated_files = set()
for file_path in file_to_identifiers.keys():
for file_path2 in file_to_identifiers.keys():
if file_path != file_path2:
correlation = distance_df.loc[file_path, file_path2]
if correlation > 0 and correlation < 1:
files = sorted([file_path, file_path2])
correlated_files.add((files[0], files[1], correlation))
correlated_files = sorted(list(correlated_files), key=lambda x: x[2], reverse=True)
for i in range(50):
print(correlated_files[i])
print(distance_df)
return distance_df
# print(distance_df)
|
[
"def compute_similarity():\n movie_data = pd.read_csv(\"movie_recsys/datasets/movie_data.csv\")\n\n # Compute TF-IDF representation.\n tfidf = TfidfVectorizer(stop_words=\"english\")\n tfidf_matrix = tfidf.fit_transform(movie_data[\"story\"])\n\n # Compute Cosine Similarity.\n cosine_sim_scores = linear_kernel(tfidf_matrix, tfidf_matrix)\n\n # Saving.\n file_path = Path.cwd() / \"movie_recsys/datasets/cosine_sim_scores.csv\"\n savetxt(file_path, cosine_sim_scores)\n return",
"def calculate_distances(self):\n\n # Initialize container.\n distances = np.zeros((len(self.data.stem_ids), 2))\n\n # For each report-abstract pairs\n for i in tqdm(range(len(self.data.stem_ids))):\n\n # Get report, abstract and random other abstract\n report = self.model.doc_vecs.loc['%s_report' % self.data.stem_ids[i]]\n summary = self.model.doc_vecs.loc['%s_abstract' % self.data.stem_ids[i]]\n other = self.model.doc_vecs.loc[self.data.abstract_ids[random.randint(0, len(self.data.abstract_ids)-1)]]\n\n # self.distance_measure is always cosine. Calculate distance.\n if self.distance_measure == 'cosine':\n distances[i][0] = cosine(report, summary)\n distances[i][1] = cosine(report, other)\n\n # Make pandas dataframe, save and return.\n distances = pd.DataFrame(distances, index=self.data.stem_ids, columns=['own', 'other'])\n distances.to_csv(self.model.path / str('distances_%s_%s.csv' % (self.data.name, self.distance_measure)))\n\n return distances",
"def calculate_cosine_similarity(self):\n\n data = []\n #prepare input for the sklearn cosine similarity function\n for k in sorted(self.node_dict.keys()):\n data.append(\" \".join(self.cleaned_data[self.node_dict[k]]))\n\n vec = TfidfVectorizer()\n x = vec.fit_transform(\n data)\n\n # Calculate the pairwise cosine similarities (depending on the amount of data that you are going to have this\n # could take a while)\n matrix_similarity = cosine_similarity(x)\n # Remove duplicates + diagonal: cosine similarity returns a symmetric matrix, where the diagonal and the\n # lower or upper triangular is irrelevant\n tril_ind = np.tril_indices(matrix_similarity.shape[0])\n mat_sim_upper = matrix_similarity.copy()\n mat_sim_upper[tril_ind] = -1\n\n return mat_sim_upper",
"def run(self):\n\n file1 = raw_input(\"Enter absolute path of the first document: \")\n file2 = raw_input(\"Enter absolute path of the second document: \")\n text1 = self.__readFile(file1)\n text2 = self.__readFile(file2)\n similarityScore = self.__semanticRelatednessCalculator.getSemanticRelatednessScore(text1, text2)\n print \"The similarity score (0-1) is:\", similarityScore",
"def cosine_score(self):\n for i in self.all_results: \n length = 0\n for j in self.all_results[i]:\n\n length += self.all_results[i][j] ** 2\n length = math.sqrt(length)\n \n for j in self.all_results[i]:\n self.all_results[i][j] = self.all_results[i][j]/length\n \n for doc in self.all_results:\n score = 0\n for query_word in self.query_score:\n if query_word in self.all_results[doc]:\n score += self.all_results[doc][query_word] * self.query_score[query_word]\n self.doc_score[doc] = score",
"def cosine_sim(query, trail):\n num = np.dot(query, trail)\n denom = (np.sqrt(np.sum(np.linalg.norm(query))) * np.sqrt(np.sum(np.linalg.norm(trail))))\n \n return num / denom",
"def runCalculation(self): \n \n # Calculate the sequence entropy of each column in a fasta file\n f = open(self.fasta_file,'r')\n self.data = wl.LogoData.from_seqs(wl.read_seq_data(f)) \n f.close()",
"def optimized(embeddings, args):\n similarities = np.zeros(shape=(len(embeddings), len(embeddings)))\n\n if args.similarity_algo == \"cosine\":\n for i in tqdm(range(len(embeddings))):\n for j in range(len(embeddings)):\n similarities[i][j] = np.dot(embeddings[i], np.transpose(embeddings[j]))\n\n else:\n raise ValueError(\"Invalid similarity algorithm\")\n\n return similarities",
"def calculate_cosine_similarity(query_vector, data_vectors):\n distances = np.array(\n cosine_similarity(query_vector, data_vectors)[0]) # result is [[ data ]], so get idx 0 to have [ data ]\n\n # argsort will return a sorted list of indices of the original data (+1 because documents are indexed from 1)\n # for cosine similarity, higher is better, so invert the list by [::-1]\n distances_sorted = distances.argsort()[::-1] + 1\n return distances_sorted",
"def _compute_distances(self, spacy_en_dir=\"en\"):\n nlp = spacy.load(spacy_en_dir)\n df = self._base.get_all_text()\n print(\"tokenizing\")\n tqdm.pandas()\n df[\"noun_tokens\"] = df.sentence.progress_apply(lambda text: ReviewApp._graph_tokenize(text, nlp))\n print(\"building distances\")\n distances = ReviewApp._word_neighbors(df, 1).assign(weight=2).append(\n ReviewApp._word_neighbors(df, 1).assign(weight=1))\n distances = distances.groupby(['w0', 'w1']).weight.sum().reset_index()\n return distances",
"def getSimilarityScore(self,query_embedding, quote_embedding):\n score = util.pytorch_cos_sim(query_embedding,quote_embedding) \n return score",
"def __compute_dist(self, medoids):\n dists = np.zeros((self.n, self.k))\n \n for i in range(self.n):\n dists[i,:] = np.linalg.norm(self.X[i,:] - medoids, axis=1)**2\n \n return dists",
"def cosine_sim_matrix(tokens_1: List[str], tokens_2: List[str], embed_name: str = \"fasttext\"):\n\n # Taking most of the time around 8 seconds, instead consider it as a instance variable.\n embed = AssignEmbedding(embed_name)\n\n embeddings_1 = np.asarray(embed.assign(tokens_1))\n embeddings_2 = np.asarray(embed.assign(tokens_2))\n\n sim_matrix = cosine_similarity(embeddings_1, embeddings_2)\n\n return np.round(sim_matrix, 3)",
"def cosine_search(input_data, db_data):\n from uncurl_analysis import bulk_data\n dist = bulk_data.cosine(db_data, input_data)[0][0]\n return dist",
"def cosine_similarity(self, query, indices=None):\n\n pass",
"def get_cos_sims(self, arg):\n\n if len(self.graph.vs[\"name\"]) < 1:\n return [0]\n\n self.graph.add_vertex(name=arg)\n vectorizer = TfidfVectorizer()\n matrix = vectorizer.fit_transform(self.graph.vs[\"name\"])\n arg_sim = cosine_similarity(matrix[-1], matrix)[0][:-1]\n self.graph.delete_vertices(arg)\n\n return list(arg_sim)",
"def calculate_distances(self):\n\n # Matrices with reports vectors and abstracts vectors\n reports = self.model.doc_vecs.loc[self.data.report_ids]\n abstracts = self.model.doc_vecs.loc[self.data.abstract_ids]\n\n # Calculates the distance between each pairs of the matrices\n distances = cdist(reports, abstracts, self.distance_measure)\n distances = np.nan_to_num(distances, nan=np.inf)\n\n distances = pd.DataFrame(distances, index=self.data.report_ids, columns=self.data.abstract_ids)\n\n return distances",
"def document_distance(file1: str, file2: str):\n file1_text = process_file(file1)\n file2_text = process_file(file2)\n file1_words = get_words_from_text_list(file1_text)\n file2_words = get_words_from_text_list(file2_text)\n file1_word_freq = get_freq_count_from_words_dict(file1_words)\n file2_word_freq = get_freq_count_from_words_dict(file2_words)\n distance = vector_angle(file1_word_freq, file2_word_freq)\n print(distance)",
"def calculate_cosine_similarity(grid, dfs, reference_day='1101'):\n smsIn = {}\n smsOut = {}\n callIn = {}\n callOut = {}\n internet = {}\n\n reference = join_cdr_grid(dfs[reference_day], grid)\n reference.fillna(0, inplace=True)\n\n for key, value in dfs.items():\n if key != '1101':\n joined = join_cdr_grid(value, grid)\n joined.fillna(0, inplace=True)\n try:\n smsIn[key] = 1 - cosine(reference[\"smsIn\"], joined[\"smsIn\"])\n except:\n print (key)\n continue\n try:\n smsOut[key] = 1 - cosine(reference[\"smsOut\"], joined[\"smsOut\"])\n except:\n print (key)\n continue\n try:\n callIn[key] = 1 - cosine(reference[\"callIn\"], joined[\"callIn\"])\n except:\n print (key)\n continue\n try:\n callOut[key] = 1 - cosine(reference[\"callOut\"], joined[\"callOut\"])\n except:\n print (key)\n continue\n try:\n internet[key] = 1 - cosine(reference[\"internet\"], joined[\"internet\"])\n except:\n print (key)\n continue\n print(\"processed\", key)\n\n #sorting\n smsIn = sorted(smsIn.items(), key=lambda s: s[0])\n smsOut = sorted(smsOut.items(), key=lambda s: s[0])\n callIn = sorted(callIn.items(), key=lambda s: s[0])\n callOut = sorted(callOut.items(), key=lambda s: s[0])\n internet = sorted(internet.items(), key=lambda s: s[0])\n\n return smsIn, smsOut, callIn, callOut, internet"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return list of all docs in database.
|
def find_all(self):
return self.documents
|
[
"def get_all_docs(self):\n docs = []\n cursor = self.db.scores.find({})\n for document in cursor:\n docs.append(document)\n # print(docs)\n return(docs)",
"def getAllDocuments():\n return [Document(d) for d in HopperLowLevel.allDocuments()]",
"def get_docs():\n fields = ['Title', 'Description', 'OperatingSystem']\n all_docs = {}\n\n connection = Connection()\n db = connection.linux_laptops\n docs = db.docs\n\n for f in fields:\n all_docs.update(_get_docs(docs, f))\n return all_docs",
"def get_documents():\n\n DB_USER = app.config.get('DB_USER', 'postgres')\n DB_PASSWORD = app.config.get('DB_PASSWORD', 'dbpass')\n DB_NAME = app.config.get('DB_NAME', 'envirolens')\n\n DB.connect(\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASSWORD\n )\n\n if DB.cursor is None:\n return jsonify({'Error' : 'The connection could not be established'})\n\n document_ids = request.json.get('document_ids', None)\n\n # If the \"document_ids\" parameter was not set:\n if document_ids is None:\n return jsonify(\n {'Message' : 'You need to provide json with \"document_ids\" : [list of documents ids] value'}\n )\n\n statement = \"\"\"SELECT * FROM documents WHERE document_id IN %s;\"\"\"\n DB.cursor.execute(statement, (tuple(document_ids), )) \n\n # Enumerating the fields\n num_fields = len(DB.cursor.description)\n field_names = [i[0] for i in DB.cursor.description]\n documents = [{ field_names[i]: row[i] for i in range(num_fields) } for row in DB.cursor.fetchall()]\n \n # Cleaning the ouput:\n # - removing fulltext field\n # - slicing down the fulltext_cleaned field to 500 chars\n # - we return only the first 10 results\n for i in range(len(documents)):\n if documents[i]['fulltext_cleaned'] is not None:\n documents[i]['fulltext_cleaned'] = documents[i]['fulltext_cleaned'][:500]\n documents[i].pop('fulltext')\n\n DB.disconnect()\n\n return jsonify(documents[:10])",
"def get_list_menu_docs(catering: str) -> List[Dict]:\n collection_name: str = collection_manager.get_menu_collection(catering)\n return [document for document in db.find_all(collection_name)]",
"def get_dataset_docs(catering: str) -> List[Dict]:\n collection_name: str = collection_manager.get_dataset_collection(catering)\n return [document for document in db.find_all(collection_name)]",
"def documentos(self):\n return self.dados.get(\"docs\", [])",
"def get_all_appd():\n return list(appd_coll.find())",
"async def get_all_foto(self, db: Any) -> List:\n foto = []\n cursor = db.foto_collection.find()\n for document in await cursor.to_list(length=2000):\n foto.append(document)\n logging.debug(document)\n return foto",
"def listDB(self):\n # Responses: list of db names\n return self.get(\"/_all_dbs\", descr='listDB').addCallback(\n self.parseResult)",
"def fetch_docs(self, table):\r\n url = self.db_url + \"/Help?\" + urllib.urlencode({'page': \"databases/\"+\"Eagle\"+\"/\"+table})\r\n urllib2.install_opener(urllib2.build_opener(self.auth_handler, self.cookie_handler))\r\n response = urllib2.urlopen(url)\r\n cookie_jar.save(ignore_discard=True)\r\n return response.readlines()",
"def get_all(self):\n entities = []\n c = self._collection.find()\n for e in c:\n entities.append(e)\n return entities",
"def get_all(collection):\n return DB.DATABASE[collection].find({})",
"def getAllBooks(self):\n self.cursor.execute(\n \"select * from Book\")\n res = self.cursor.fetchall()\n return res",
"def srcdocs(self, i=1):\n res = []\n db = self.srcdb(i=i)\n for did in db:\n res += [dict(db[did])]\n return res",
"def _yield_docs(db, prefix=None, batchsize=500):\n for r in db.iterview(\"_all_docs\", batch=batchsize, include_docs=True):\n _id = str(r.id)\n if prefix and not _id.startswith(prefix):\n continue\n yield dict(r.doc)",
"def get_list_register_docs(catering: str) -> List[Dict]:\n collection_name: str = collection_manager.get_register_collection(catering)\n return [document for document in db.find_all(collection_name)]",
"def list_doc(\n self,\n doctype,\n fields=\"*\",\n filters=None,\n limit_start=None,\n limit_page_length=None,\n order_by=None,\n ):\n params = {\n \"fields\": json.dumps(fields),\n }\n if filters:\n params[\"filters\"] = json.dumps(filters)\n if limit_start:\n params[\"limit_start\"] = limit_start\n if limit_page_length:\n params[\"limit_page_length\"] = limit_page_length\n if order_by:\n params[\"order_by\"] = order_by\n\n return self.session.get(self.__build_url__(f\"{doctype}\"), params=params).json()",
"def fetch_all_docs_from_api():\n dgidb_docs = []\n # number of docs returned per API call\n doc_per_query = 500\n # get the total number of docs in DGIdb\n total_count = count_total_docs()\n template_url = 'http://www.dgidb.org/api/v2/interactions?count=' + str(doc_per_query) + '&page={page}'\n # use pagination to fetch all docs\n for i in range(1, math.ceil(total_count/500) + 1):\n query_url = template_url.replace('{page}', str(i))\n doc = requests.get(query_url).json()\n dgidb_docs += doc.get('records')\n # make sure all docs are fetched\n assert len(dgidb_docs) == total_count\n return dgidb_docs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the first matching doc. If none is found, return None.
|
def find_one(self, where_dict):
for document in self.documents:
if self.check_document(document, where_dict):
return document
|
[
"def one(self, *args, **kwargs):\n bson_obj = self.find(*args, **kwargs)\n count = bson_obj.count()\n if count > 1:\n raise MultipleResultsFound(\"%s results found\" % count)\n elif count == 1:\n try:\n doc = next(bson_obj)\n except StopIteration:\n doc = None\n return doc",
"def find_one(self, filter=None):\n for doc in self.find(filter, limit=1):\n return doc",
"def find_one(self, where_dict):\n result = self.find(where_dict)\n return result[0] if result else None",
"def one(self):\n try:\n return self.results[0]\n except IndexError:\n return None",
"def find_one(self, **kwargs):\n q = self.compile_query(**kwargs)\n for f in six.itervalues(self.facts):\n if q(f):\n return f\n return None",
"def find_document(self, **kwargs):\n\n if len(kwargs) == 0:\n raise SearchError(\"No information pertinent to item given\")\n\n self._maybe_clear_cache()\n matches = list(itertools.islice(self.backend.find(kwargs), 1))\n if not matches:\n raise SearchError(\n \"No item information found that matches the search criteria\"\n )\n return copy.deepcopy(matches[0])",
"def get_doc_from_shorturl(self, shortURL):\n doc = self.collection.find_one({'_id': shortURL})\n return doc",
"def find_one(cls, *args, **kw):\n\t\t\n\t\tif len(args) == 1 and not isinstance(args[0], Filter):\n\t\t\targs = (getattr(cls, cls.__pk__) == args[0], )\n\t\t\n\t\tDoc, collection, query, options = cls._prepare_find(*args, **kw)\n\t\tresult = Doc.from_mongo(collection.find_one(query, **options))\n\t\t\n\t\treturn result",
"def get_doc_by_id(cls, doc_id):\n return cls.get_index().get(doc_id=doc_id)",
"def findfirst(fn, collection, default=None):\n return next(iter(filter(fn, collection)), default)",
"def getDoc(self, key):\n return self._docs.get('doc_' + key)",
"def get_one(cur, query):\n\tnummatches = cur.execute(query)\n\treturn cur.fetchone()",
"def find_one(collection, query):\n return DB.DATABASE[collection].find_one(query)",
"def fetch_one(self, *args, **kwargs):\n bson_obj = self.fetch(*args, **kwargs)\n count = bson_obj.count()\n if count > 1:\n raise MultipleResultsFound(\"%s results found\" % count)\n elif count == 1:\n return next(bson_obj)",
"def get_doc_by_id(doc_id, cursor):\n return cursor.execute(f\"select * from documents where id='{doc_id}'\").fetchall()",
"def _get_cached_doc_only(doc_id):\n doc = rcache().get(key_doc_id(doc_id), None)\n if doc and CACHE_DOCS:\n return simplejson.loads(doc)\n else:\n return None",
"def find_doc(self, doc_type, property_name, property_value):\n try:\n self.client.connect()\n db = self.client[self.db_name]\n selector = {\n '_id': {'$gt': 0},\n 'type': doc_type,\n property_name: property_value\n }\n query = Query(db, selector=selector)\n for doc in query()['docs']:\n return doc\n return None\n except Exception:\n LOG.exception(\"Cloudant DB exception:\")\n finally:\n self.client.disconnect()",
"def first(self):\n try:\n row = self.cursor_strategy.fetchone()\n except BaseException as e:\n self.connection._handle_dbapi_exception(\n e, None, None, self.cursor, self.context\n )\n\n try:\n if row is not None:\n return self.process_rows([row])[0]\n else:\n return None\n finally:\n self.close()",
"def get_doc(self, type_, name):\n if type_ == \"doxygen\":\n return self.doxydocs.get(name)\n if type_ == \"sphinx\":\n return self.sphinxdocs.get(name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return matching list of matching doc(s).
|
def find(self, where_dict):
matching_list = []
for document in self.documents:
if self.check_document(document, where_dict):
matching_list.append(document)
print(matching_list)
return matching_list
|
[
"def matches(self):\n return (SuperfastDocmatch(self, row_dct=row)\n for row in self.documents['rows'])",
"def find_all(self):\n return self.documents",
"def _get_documents(tx: Transaction, **parameters) -> List[Node]:\n params_strings = []\n if len(parameters) == 0:\n raise TypeError(\"Must provide at least one parameter\")\n for key in parameters.keys():\n params_strings.append(f\"{key}:${key}\")\n query_string = f\"\"\"\n MATCH (d:Document {{ {','.join(params_strings)} }})\n RETURN d\n \"\"\"\n return [p[0] for p in tx.run(query_string, **parameters)]",
"def get_documents_containing_term(self, term):\n try:\n doc_list = list(self.inverted_index[term].keys())\n except KeyError:\n return []\n return doc_list",
"def run(self, docs):\n\n return list(self.return_one_prodigy_doc(doc) for doc in docs)",
"def getAllDocuments(authorlist):\n documentlist = []\n for authors in authorlist:\n [documentlist.append(doc) for doc in authors.docs]\n return documentlist",
"def get_found_documents(self, document_names_list):\n documents = CouchDocument.view(\n 'dmscouch/all',\n keys=document_names_list,\n include_docs=True)\n # Converting documents to omit couchdb ViewResults iteration bug\n results = []\n for doc in documents:\n results.append(doc)\n return results",
"def get_lm_matched_docs(query, searcher, qparser, topk=2000):\n #did_dict = {}\n dids = []\n scores = []\n query = qparser.parse(query)\n # searcher.setSimilarity(LMDirichletSimilarity())\n scoreDocs = searcher.search(query, topk).scoreDocs\n # print(\"Found %d document(s) that matched query '%s':\" % (len(scoreDocs), query))\n\n for scoreDoc in scoreDocs:\n if len(dids) > 1000:\n break\n\n doc = searcher.doc(scoreDoc.doc)\n did = doc.get(\"id\")\n\n if check_if_spam(did):\n continue\n #text = doc.get(\"raw\")\n #did_dict[did] = {}\n #did_dict[did]['text'] = text\n #did_dict[did]['score'] = scoreDoc.score\n dids.append(did)\n scores.append(scoreDoc.score)\n\n return dids, scores",
"def word_search(doc_list, keyword):\n tmp = []\n tmpindex = []\n for h,i in zip(range(len(doc_list)),doc_list):\n tmp = [j.rstrip('.,').lower() for j in i.split()]\n if keyword in tmp:\n tmpindex.append(h)\n\n return tmpindex",
"def search(self, filter):\n\t\tmatch_list = [note for note in self.notes if note.match(filter)]\n\t\treturn match_list",
"def _get_all_docs_above_threshold(self, doc):\n current_length = 1\n docs = self.model.docvecs.most_similar(doc, topn=1)\n while docs[-1][1] >= self.threshold:\n current_length += 1\n docs = self.model.docvecs.most_similar(doc, topn=current_length)\n\n return [item[0] for item in docs[0:-1]]",
"def search_terms(self, terms):\n\n docs_indices = []\n\n for term_index, term in enumerate(terms):\n\n term = eng_stemmer.stem(term)\n\n # keep only docs that contains all terms\n\n if term not in self.term_index:\n\n #docs_indices = []\n \n continue\n\n #break\n\n # compute intersection between results\n \n # there is room for improvements in this part of the code\n \n else:\n \n docs_with_term = self.term_index[term]\n \n if term_index == 0:\n \n docs_indices = docs_with_term\n \n else:\n \n docs_indices = set(docs_indices) | set(docs_with_term)\n \n return list(set(docs_indices))",
"def srcdocs(self, i=1):\n res = []\n db = self.srcdb(i=i)\n for did in db:\n res += [dict(db[did])]\n return res",
"def get_all_docs(self):\n docs = []\n cursor = self.db.scores.find({})\n for document in cursor:\n docs.append(document)\n # print(docs)\n return(docs)",
"def simple_find(self, doc_ls):\n spl_text_ls = []\n\n for doc in doc_ls:\n is_simple = False\n nsubj_tok = [tok for tok in doc if tok.dep_ == \"nsubj\" or tok.dep_ == \"nsubjpass\"]\n mark_tok = [tok for tok in doc if tok.dep_ == \"mark\"]; \n\n if len(nsubj_tok) == 1 and len(mark_tok) == 0:\n is_simple = True\n\n if is_simple == True:\n spl_text_ls.append(doc.string.strip())\n\n return spl_text_ls",
"def index_search(files, index, terms):\n res_file =[]\n count = 0\n if len(terms) == 0:\n print('empty terms')\n return\n for term in terms:\n term = term.lower()\n count += 1\n if count == 1:\n try:\n s = index[term]\n except:\n s = set()\n else:\n s = s.intersection(index[term])\n for id in s:\n res_file.append(files[id])\n return res_file",
"def query(self, query_str: str)->list:\n url_dict = {} #stores data of end urls \n urls_tf_idf_total = {}#used to keep track of tf.idf for the queries\n result_list = [] #used to store the results\n json_data = json.load(open(BOOKKEPING_LOC))\n split_query = query_str.split()\n counter = 0\n for query in split_query: #iterate through query by splitting with space\n result = self._collection.find({\"_id\": query})\n try:\n token_value = result.next()\n docs_dict = token_value[\"Doc_info\"]\n results_count = 0 #potentially have to take out if want all queries for selecting\n for doc_id, attributes in sorted(docs_dict.items(), key=get_tfidf, reverse=True):\n #keeping track of updates. those with more updates = matched more queries = higher priority\n #even if lower tf.idf\n if(json_data[doc_id] in urls_tf_idf_total):\n urls_tf_idf_total[json_data[doc_id]][0] += 1\n urls_tf_idf_total[json_data[doc_id]][1] += docs_dict[doc_id][\"tf-idf\"]\n else:\n urls_tf_idf_total[json_data[doc_id]] = [1,docs_dict[doc_id][\"tf-idf\"]]\n results_count += 1\n if (results_count == 10):\n break\n except StopIteration:#could not find query\n pass\n #search for urls that match the most words and continues until 10 queries are reached\n #or if there are no more urls to retrieve\n counter = len(split_query)\n while(1):\n if(len(url_dict) >= 10 or counter == 0): \n break\n for url,tf_idf in list(urls_tf_idf_total.items()):#list part necessary in python3\n if( tf_idf[0] == counter): #iterates through ALL the words matching. Stopping prematurely\n #will result in queries being missed before moving to the next best match.\n url_dict[url] = tf_idf\n counter -= 1 #used to keep track of how many queries are matching.\n #higher priority towards queries with more words matching\n #return urls sorted by tf_idf\n sorted_values = sorted(url_dict.items(), key=lambda x: (x[1][0],x[1][1]), reverse = True)\n #return 10 top urls from sorted_values\n for url,tf_idf in sorted_values:\n if(len(result_list) < 10):\n result_list.append((url,tf_idf))\n else:\n break\n return result_list",
"def get_documents(corpus, list_doc_ids):\n # XML parse code adapted from\n # https://stackabuse.com/reading-and-writing-xml-files-in-python/\n corpus_filename = config.CORPUS[corpus]['corpusxml']\n if not os.path.isfile(corpus_filename):\n print(corpus_filename + ' does not exist')\n return []\n tree = xml.parse(corpus_filename)\n root = tree.getroot()\n doc_list = []\n #list_doc_ids is a list of (doc_id, score) pairs\n for doc in list_doc_ids:\n doc_id = doc[0]\n # print(doc_id)\n # print(doc[1])\n # print(root[doc_id][0].text)\n if root[doc_id][1].text == None:\n root[doc_id][\n 1].text = ' // There is no title information available. Reuters did not supply any title information for this article. //'\n if root[doc_id][2].text == None:\n root[doc_id][\n 2].text = '// There is no text body information available. Reuters did not supply any body text for this article. //'\n # print(root[doc_id][1].text)\n # print(root[doc_id][2].text)\n if corpus==config.UOTTAWA:\n doc_to_add = Document(doc_id, doc[1],\n root[doc_id][0].text + ' ' + root[doc_id][1].text,\n root[doc_id][2].text, [])\n doc_list.append(doc_to_add)\n elif corpus ==config.REUTERS:\n if root[doc_id][3].text == None:\n root[doc_id][\n 3].text = '// There is no topic information available. Reuters did not supply any body text for this article. //'\n\n doc_to_add = Document(doc_id, doc[1],\n root[doc_id][0].text + ' ' + root[doc_id][1].text,\n root[doc_id][2].text,root[doc_id][3].text)\n doc_list.append(doc_to_add)\n\n\n\n return doc_list",
"def _yield_subquery_document_results(\n self, subquery: List[QueryItem]\n ) -> Generator[Set[int], None, None]:\n for search_item in subquery:\n if search_item.exact:\n yield self.index.find_documents_with_phrase(search_item.words)\n else:\n yield self.index.find_documents_with_words(search_item.words)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the number of matching docs.
|
def count(self, where_dict):
# return len(self.find(where_dict))
count = 0
for document in self.documents:
if self.check_document(document, where_dict):
count += 1
return count
|
[
"def docids_count():",
"def document_count(self):\n #return len(self.fake_index_storage.keys())\n raise NotImplementedError()",
"def __len__(self):\n with self._index.reader() as reader:\n return reader.doc_count()",
"def doc_count(es_client, search_app):\n response = es_client.count(index=search_app.es_model.get_read_alias())\n return response['count']",
"def document_count(self, word: str) -> int:\n return self.index.document_count(word)",
"def _get_count(self) -> \"size_t\" :\n return _core.DocumentReferences__get_count(self)",
"def count(self, where_dict={}):\n return len(self.find(where_dict))",
"def count_total_docs():\n query_url = 'http://www.dgidb.org/api/v2/interactions?count=1&page=1'\n return requests.get(query_url).json()['_meta']['total_count']",
"def topic_match_count( query_topics_dict, document_topics_dict ):\r\n counter = 0\r\n\r\n if query_topics_dict is not None and document_topics_dict is not None:\r\n query_topics = list( query_topics_dict.keys() )\r\n document_topics = list( document_topics_dict.keys() )\r\n for topic in query_topics:\r\n if topic in document_topics:\r\n counter += 1\r\n\r\n return counter",
"def get_word_counts(docs):\n pass",
"def number_of_matches(self):\n return len(self.matches)",
"def test_2_document_counts(self):\n # Get index\n r = self.client.get('/')\n if r.status_code==302:\n r = self.client.get(r.headers['Location'])\n code = r.status_code\n data = str(r.data)\n\n # should find 2 google docs\n self.assertIn('id=\"gdoc-count\">2',data)",
"def count(self, **request_params):\n es_query = self._generate_es_query(count_query=True)\n return self.search_model_class.count(es_query, **request_params)",
"def test_2_document_counts(self):\n # Get index\n r = self.client.get('/')\n if r.status_code==302:\n r = self.client.get(r.headers['Location'])\n code = r.status_code\n data = str(r.data)\n\n # should find 1 issue + 1 pull request\n self.assertIn('id=\"issue-count\">2',data)\n\n # should find 2 files, 1 markdown\n self.assertIn('id=\"ghfile-count\">2',data)\n self.assertIn('id=\"markdown-count\">1',data)",
"def no_of_documents_containing_a_word(self, query_word):\n if PROXIMITY.useCache:\n if query_word in BM25.no_of_docs_dict:\n return float(BM25.no_of_docs_dict[query_word])\n else:\n return 0\n else:\n if query_word in self.cache:\n return float(self.cache[query_word])\n else:\n no_of_documents_having_the_word = 0\n for para_id, ranked_word_dict in self.documents.items():\n if query_word in ranked_word_dict:\n no_of_documents_having_the_word += 1\n self.cache[query_word] = no_of_documents_having_the_word\n return float(no_of_documents_having_the_word)",
"def count(self, value: str, *, exact_match: bool = False) -> int:\n return len(list(self.search(value, exact_match=exact_match)))",
"def raw_counts(self, query_term, doc):",
"def __document_frequency(term):\n\treturn len(inverted_index[term])",
"def count(self, queryExpression, defaultField=None):\n searcher = self.fbt.getIndexSearcher()\n analyzer = self.fbt.getIndexAnalyzer()\n defaultField = defaultField or self.fbt.getConfig(\"LUCENE_DEFAULT_FIELD\")\n query = JavaLuceneQueryParser(defaultField, analyzer).parse(queryExpression)\n results = searcher.search(query, 1)\n return results.totalHits"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete matching doc(s) from the collection.
|
def delete(self, where_dict):
remove_list = self.find(where_dict)
for document in remove_list:
self.documents.remove(document)
|
[
"def delete_document(self, collection, query, multiple=False):\n if multiple:\n return self.connection[collection].delete_many(query)\n else:\n return self.connection[collection].delete_one(query)",
"def delete(self, where_dict):\n def checker(document):\n return not self.check_document(document, where_dict)\n self.collection = list(filter(checker, self.collection))",
"def delete_many(self, record=[]):\n query = []\n for doc_id in record:\n sub_query = {\"delete\": {\"_index\": self.index, \"_type\": self.doc_type, \"_id\": doc_id[\"_id\"]}}\n query.append(sub_query)\n\n try:\n response = self.client.elastic.bulk(query)\n return True\n except Exception as e:\n return False",
"def delete_all(self, collection):\n self.__db[collection].delete_many({})",
"def clear_collection(collection):\n for doc in collection.stream():\n doc.reference.delete()",
"def delete_collection(collection):\r\n collection.delete_many({})",
"def delete(self, **kwargs):\n if self.doc_id:\n doc = self._connection(self.server, self.database)[self.doc_id]\n self._connection(self.server, self.database).delete(doc)",
"def delete_doc(doc):\n doc._collection_obj.remove(ObjectId(doc.ID))",
"def clear_index(cls):\n index = cls.get_index()\n try:\n while True:\n doc_ids = [\n document.doc_id for document in index.get_range(ids_only=True)]\n if not doc_ids:\n break\n index.delete(doc_ids)\n except search.DeleteError:\n logging.exception('Error removing documents: ')",
"def deleteMatches():\n\n push_to_db(\"delete from matches\")",
"def deleteDocument(self, document):\n return",
"def delete_documents(self, metadocs, override_role_separation=False):\n def _get_delete_action(doc, id_suffix=''):\n action = {'_op_type': 'delete', '_id': doc['_id'] + id_suffix}\n\n if doc.get('_version'):\n action['_version'] = doc['_version']\n action['_version_type'] = 'external'\n\n parent_entity_id = doc.get('_parent')\n if parent_entity_id:\n if (not override_role_separation and\n self.plugin.parent_plugin.requires_role_separation):\n # Default to _USER; defaulting to _ADMIN causes a\n # security issue because of potential fishing queries\n parent_entity_id += (id_suffix or USER_ID_SUFFIX)\n action['_parent'] = parent_entity_id\n return action\n\n actions = []\n for metadoc in metadocs:\n if (not override_role_separation and\n self.plugin.requires_role_separation):\n actions.extend([\n _get_delete_action(metadoc, ADMIN_ID_SUFFIX),\n _get_delete_action(metadoc, USER_ID_SUFFIX)])\n else:\n actions.append(_get_delete_action(metadoc))\n\n try:\n helpers.bulk(\n client=self.plugin.engine,\n index=self.index_name,\n doc_type=self.document_type,\n actions=actions\n )\n except helpers.BulkIndexError as exc:\n exc_payload = exc[1]\n doc_ids = ', '.join(e['delete']['_id'] for e in exc_payload)\n\n if all(e['delete']['status'] == 404 for e in exc_payload):\n LOG.warning(\n _LW(\"Error deleting %(doc_type)s %(ids)s; \"\n \"already deleted\") %\n {\"doc_type\": self.plugin.document_type, \"ids\": doc_ids})\n\n elif all(e['delete']['status'] == 409 for e in exc_payload):\n # This *should* never happen. If it does, something has gone\n # wrong but leaving this here for now\n LOG.warning(\n _LW(\"Error deleting %(doc_type)s %(ids)s; newer versions \"\n \"of some documents have been indexed\") %\n {\"doc_type\": self.plugin.document_type, \"ids\": doc_ids})\n else:\n raise",
"def test_delete_document_using_delete(self):\n pass",
"def test_bulk_delete(self):\n\n se = SearchEngineFactory().create()\n # se.create_index(index='test')\n\n for i in range(10):\n x = {\n 'id': i,\n 'type': 'prefLabel',\n 'value': 'test pref label',\n }\n se.index_data(index='test', doc_type='test', body=x, idfield='id', refresh=True)\n y = {\n 'id': i + 100,\n 'type': 'altLabel',\n 'value': 'test alt label',\n }\n se.index_data(index='test', doc_type='test', body=y, idfield='id', refresh=True)\n\n\n query = Query(se, start=0, limit=100)\n match = Match(field='type', query='altLabel')\n query.add_query(match)\n\n query.delete(index='test', refresh=True)\n\n self.assertEqual(se.es.count(index='test', doc_type='test')['count'], 10)",
"def delete_docs(q=None):\n data = '{ \"delete\": { \"query\": \"%s\" }, \"commit\": {} }' % (q is not None and q or '*:*')\n hdrs = {'Content-Type': 'application/json'}\n\n url = '%s/update/json' % (URL,)\n req = urllib2.Request(url, data, hdrs)\n\n o = urllib2.urlopen(req)",
"def deleteMatches():\n with get_cursor() as cursor:\n cursor.execute(\"delete from match_results;\")",
"def bulk_delete(self, layer_name, doc_type, root_doc_id):\n raise NotImplementedError",
"def delete_documents(self, content_source_key, ids):\n endpoint = \"sources/{}/documents/bulk_destroy\".format(content_source_key)\n return self.session.request(\"post\", endpoint, json=ids)",
"def deleteMatches():\n db, cur = connect()\n # to delete the matches, simply remove all data from the \"matches\" table\n # using a \"TRUNCATE\" command\n query = \"TRUNCATE matches;\"\n cur.execute(query)\n db.commit()\n db.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Applies a map_function to each document, collating the results. Then applies a reduce function to the set, returning the result.
|
def map_reduce(self, map_function, reduce_function):
map_results = []
for document in self.documents:
map_results.append(map_function(document))
return reduce_function(map_results)
|
[
"def map_reduce(self, map_function, reduce_function):\n return reduce_function(list(map(map_function, self.collection)))",
"def reduce_func(self, reduce_function):\n return reduce_function(self.collection)",
"def reduce_function(word_maps):\n # Reduce all the data by combining all the parts that are received\n result = {}\n for i in word_maps:\n for k, v in i.items():\n try:\n # result exists, add the value\n result[k] += v\n except KeyError:\n # new result, set the value\n result[k] = v\n return result",
"def mapmany(self, function):\r\n return Iterable(itertools.chain.from_iterable(map(function, self.__iterable)))",
"def map_reduce(sequence, map_func, reduce_func):\n sequence_len = len(sequence)\n groups = defaultdict(list)\n\n for item in sequence:\n groups[map_func(item)].append(item)\n\n res = list()\n for key, values in groups.items():\n res.append((key, reduce_func(key, values, sequence_len)))\n\n return tuple(res)",
"def map(self, function):\r\n return Iterable(map(function, self.__iterable))",
"def reduceByKey(self, func):\n buckets = col.defaultdict(list)\n for d in self.data:\n buckets[d[0]].append(d[1])\n\n reduced_buckets = dict()\n for key in buckets:\n reduced_buckets[key] = ft.reduce(func, buckets[key])\n\n return ParallelData(reduced_buckets.items())",
"def collect(sequence, function):\n for seq in __builtin__.map(function, sequence):\n for x in seq:\n yield x",
"def flatMap(self, func):\n result = map(func, self.data)\n\n return ParallelData(list(it.chain.from_iterable(result)))",
"def map_results(results):\n return map(\n lambda w: {\n 'word': w,\n 'len': len(w),\n 'val': calculate_value(w)\n }, results\n )",
"def map_collection(collection, map_fn):\n if collection is None:\n return None\n if isinstance(collection, (tuple, list)):\n return type(collection)(map_fn(x) for x in collection)\n if isinstance(collection, dict):\n return {k: map_fn(v) for k, v in collection.items()}\n return map_fn(collection)",
"def pipeline_each(data, fns):\n\tfrom functools import reduce\n\treturn reduce(lambda a, x: list(map(x, a)), fns, data)",
"def MapCol(iterable, columns, func):\n colset = as_set(columns)\n for es in iterable:\n yield tuple(func(e) if i in colset else e for i, e in enumerate(es))",
"def apply(self, func):\n if not callable(func):\n raise ValueError(\"Expected func to be a callable function\")\n mapped = [func(date) for date in self]\n if all([isinstance(m, datetime.date) for m in mapped]):\n return Calendar(mapped)\n return mapped",
"def parse_to_documents(self, models):\n return map(self.parse_to_document, models)",
"def map(self, func, pds):\n \n raise NotImplemented",
"def get_documents(self):\n # Result container for collating all possible dictionary file terms\n set_of_documents = []\n documents = self.process_file()\n print(\"Done processing file\")\n\n # Then we handle the fields: content, title and date_posted and court (to generate position index)\n count = 0\n for document in documents:\n document['content_positional_indexes'] = self.generate_positional_indexes(document['content']) # Part 1: Content\n document['title_positional_indexes'] = self.generate_positional_indexes(document['title']) # Part 2: Title\n document['court_positional_indexes'] = self.generate_positional_indexes(document['court']) # Part 3: Court\n document['date_posted_positional_indexes'] = self.generate_positional_indexes(document['date_posted'].split()[0]) # Part 4: Date_posted\n\n # To obtain the top K terms for the current document\n accumulate_counts = {}\n self.include_count_contribution_from_pos_ind(accumulate_counts, document['content_positional_indexes'])\n self.include_count_contribution_from_pos_ind(accumulate_counts, document['title_positional_indexes'])\n self.include_count_contribution_from_pos_ind(accumulate_counts, document['court_positional_indexes'])\n self.include_count_contribution_from_pos_ind(accumulate_counts, document['date_posted_positional_indexes'])\n document['top_K'] = Counter(accumulate_counts).most_common(K)\n for i in range(K):\n # i must always be smaller than actual_size by 1\n # accumulate_counts has a possibility of going below K\n # to avoid null pointer exception, we use < len(accumulate_counts)\n if (i < len(accumulate_counts)):\n document['top_K'][i] = document['top_K'][i][0]\n else:\n break;\n\n # Now, document['top_K'] will be a list of the top K terms for the document\n set_of_documents.append(document)\n\n print(count,\" Generated positional indexes\")\n count += 1\n\n print(\"Done getting documents\")\n return set_of_documents",
"def parse_pages(self, pages_collection):\n\n # for every page, for each unique term add 1 \n for page in tqdm(pages_collection):\n term_freqs_dict = self.create_term_freqs_dict(page) # create & update page.term_freqs_dict\n \n for term in term_freqs_dict.keys(): # all terms are unique\n self.doc_term_freqs[term] = self.doc_term_freqs.get(term, 0) + 1 # update doc_term_freqs {term: doc_freqs}",
"def _all_reduce(self, input, output, mode=\"sum\"):\n input_list = [i for i in input]\n ans = self.gloo.all_reduce(input_list, mode)\n for i in range(len(ans)):\n output[i] = 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list of the sorted names of the collections in the database.
|
def get_names_of_collections(self):
return sorted(self.collections.keys())
|
[
"def get_collections(db):\n res = None\n if db:\n res = db.list_collection_names()\n return res",
"def __list_collection__(dbname):\n coll_str = run(\"\"\"mongo %s --eval \"printjson(db.getCollectionNames())\" --quiet\"\"\" % dbname)\n if coll_str:\n collections = json.loads(coll_str)\n # remove system.* collections\n for name in collections:\n match = re.search(\"system.*\", name)\n if match:\n collections.remove(name)\n return collections\n return None",
"def get_all_collections(self):\n return self.client.get(\"/collections\")",
"def list_collections():\n\n try:\n collections = facade.list_collections(kind='document')\n return collections, 200\n except gmap_exc.DatabaseNotExist as err:\n return err.message, 400\n except Exception as err:\n return str(err), 500",
"def _list_sub_collections(self, prefix=None, strip=True):\n\n prefix = self._join_prefix(self._mongo_prefix, prefix)\n prefix = '.'.join(prefix)\n if prefix:\n prefix += '.'\n\n striplen = len(prefix) if strip else 0\n\n for name in self._database.collection_names():\n if name.startswith('system.'):\n continue # Ignore system collections\n\n if (not prefix) or name.startswith(prefix):\n yield name[striplen:]",
"def collection_name(self):",
"def cli_cosmosdb_collection_list(client, database_id):\n return list(client.ReadContainers(_get_database_link(database_id)))",
"def list_database(db=None):\n if db is None:\n return CONNECTION.get_connection().database_names()\n return CONNECTION.get_connection()[db].collection_names()",
"def getCollections(self) -> None:\n if not self._checkUserNameConfigured():\n return\n self._executeQuery(\"users/{}/collections\".format(self._user_name))",
"def get_collections(self):\n if self.collections:\n return self.collections\n else:\n self._load_collections(self.collection_names)\n return self.collections",
"def names(self):\n\t\treturn self.store().names()",
"def names(self, pubs=EmptyI):\n\n base = self.get_part(self.__BASE_PART, must_exist=True)\n if base is None:\n # Catalog contains nothing.\n return set()\n return base.names(pubs=pubs)",
"def get_glue_database_names(self):\n try:\n self.response = self.glue_client.get_databases()\n database_names = []\n for idx, i in enumerate(self.response['DatabaseList']):\n database_names.append(self.response['DatabaseList'][idx]['Name'])\n return database_names\n except Exception as e:\n print(e)",
"def GetProjectNames():\n return [p.name for p in db.Query(models.Project).order('name')]",
"def keys(self) -> List[str]:\n return list(self._collection.keys())",
"def databases(self):\n _log.debug('get database list')\n result = self._requestJSON('dbs', '')\n return self._getKey(result, 'name')",
"def get_list_menu_docs(catering: str) -> List[Dict]:\n collection_name: str = collection_manager.get_menu_collection(catering)\n return [document for document in db.find_all(collection_name)]",
"def list_databases(self):\n\n _conn = self.get_mongo_client()\n return [i for i in _conn.list_databases()]",
"def puppy_names():\n\tfor puppy in session.query(Puppy).order_by(Puppy.name.asc()).all():\n\t\tprint puppy.name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a vote object into minimal CSV line.
|
def _vote_csv_row(vote):
rep = rep_log10(vote['reputation'])
return "%s,%s,%s,%s" % (vote['voter'], vote['rshares'], vote['percent'], rep)
|
[
"def to_csv(self):\n pass",
"def to_csv(self) -> str:\n return \"Movie\\n\\\"{}\\\",{},{},{},{},{}\".format(\n self.get_name(), self.get_runtime(),\n self.get_provider(), self.get_person(),\n self.is_started(), self.is_finished()\n )",
"def as_csv(self):\n\n import csv\n import cStringIO\n\n out = cStringIO.StringIO()\n writer = csv.writer(out)\n\n writer.writerow((_(\"subverbify\"),\n _(\"uniques\"),\n _(\"pageviews\")))\n for (name, url), (uniques, pageviews) in self.report:\n writer.writerow((name, uniques, pageviews))\n\n return out.getvalue()",
"def as_csv(cls, thing):\n\n import csv\n import cStringIO\n\n start, end = promote.get_traffic_dates(thing)\n history = cls.get_hourly_traffic(thing, start, end)\n\n out = cStringIO.StringIO()\n writer = csv.writer(out)\n\n writer.writerow((_(\"date and time (UTC)\"),\n _(\"impressions\"),\n _(\"clicks\"),\n _(\"click-through rate (%)\")))\n for date, datestr, values in history:\n # flatten (date, datestr, value-tuple) to (date, value1, value2...)\n writer.writerow((date,) + values)\n\n return out.getvalue()",
"def toLineCostCsvStrings(self) -> [str]:\n return [str(self.getId()),\n CsvWriter.shortenDecimalValueForOutput(self.getLength()),\n CsvWriter.shortenDecimalValueForOutput(self.getCost())\n ]",
"def to_csv(self) -> str:\n show_csv = \"\\\"{}\\\",{},{},{},{}\".format(\n self.get_name(), self.get_provider(),\n self.get_person(),\n self.is_started(), self.is_finished()\n )\n episodes_csv = \"\\n\".join(episode.to_csv() for episode in self.get_episodes())\n return f\"LimitedSeries\\n{show_csv}\\n{episodes_csv}\"",
"def csv(self):\n output = io.StringIO()\n writer = csv.writer(output)\n labels = sorted(self.records.keys())\n\n # x labels.\n writer.writerow([''] + labels)\n\n # y labels and data.\n for y, y_label in enumerate(labels):\n row = [labels[y]]\n for x_label in labels:\n row.append(self.record_similarity(y_label, x_label))\n writer.writerow(row)\n\n return output.getvalue()",
"def print_line(self, line, header = False):\n\t\t\n\t\t\n\t\t#~ pprint([str(i).replace(\",\", \".\") if is_number(i) else i for i in line])\n\t\t#~ pprint([is_number(i) for i in line])\n\t\twith open(self.filename_csv, 'wb' if header else 'ab') as csvfile:\n\t\t\tcsvwriter = csv.writer(csvfile, delimiter='\\t',quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n\t\t\tcsvwriter.writerow(line)",
"def csv(self):\n output = io.StringIO()\n writer = csv.writer(output)\n\n # x labels.\n writer.writerow([''] + self.x_labels)\n\n # y labels and data.\n for y, row in enumerate(self.data.tolist()):\n writer.writerow([self.y_labels[y]] + row)\n\n return output.getvalue()",
"def csv(self, outfile=None):\n assert self.load().isloaded()\n csv = [(self.filename(), # video filename\n k, # frame number (zero indexed)\n d.category(), d.shortlabel(), # track category and shortlabel (displayed in caption)\n ';'.join([self.activities(id=aid).category() for aid in tolist(d.attributes['activityid'])] if 'activityid' in d.attributes else ''), # semicolon separated activity category associated with track\n d.xmin(), d.ymin(), d.width(), d.height(), # bounding box\n d.attributes['trackid'], # globally unique track ID\n ';'.join([aid for aid in tolist(d.attributes['activityid'])] if 'activityid' in d.attributes else '')) # semicolon separated activity ID associated with track\n for (k,im) in enumerate(self) for d in im.objects()]\n csv = [('# video_filename', 'frame_number', 'object_category', 'object_shortlabel', 'activity categories(;)', 'xmin', 'ymin', 'width', 'height', 'track_id', 'activity_ids(;)')] + csv\n return writecsv(csv, outfile) if outfile is not None else csv",
"def to_csv(self,fn='tableone.csv'):\n with open(fn, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(self.tableone)",
"def write_csv_row(self, csv_writer):\n csv_writer.writerow([\n self.object_id,\n self.email,\n self.forenames,\n self.surname,\n self.phone,\n self.note,\n self.role,\n self.college.name,\n self.affiliation.name,\n self.battels.battels_id if self.battels is not None else 'N/A',\n ])",
"def format_row(self, row_obj):\n return row_obj",
"def format_votes(dddb, vote_list):\n for vote in vote_list:\n for vote_detail in vote.vote_details:\n vote_detail.vote = vote.vote_id\n\n if vote_detail.person is not None:\n if vote_detail.person['alt_id'] is None:\n voter_names = vote_detail.person['name'].split(',')\n if len(voter_names) <= 2:\n voter_names = vote_detail.person['name'].split(';')\n\n for voter_name in voter_names:\n pid = get_pid_name(dddb, voter_name)\n vote_result = vote_detail.result\n state = vote_detail.state\n\n vote.add_vote_detail(state=state, vote_result=vote_result, pid=pid)\n else:\n vote_detail.pid = get_pid(dddb, vote_detail.person)",
"def convert_context_to_csv(self, context):\r\n raise NotImplemented('You must implement this in the subclass')",
"def __str__(self):\n return 'VerseTag, id: ' + \\\n str(self.id) + \\\n ', data_type: ' + \\\n str(self.data_type) + \\\n ', count: ' + \\\n str(self.count) + \\\n ', custom_type: ' + \\\n str(self.custom_type) + \\\n ', values: ' + \\\n str(self.value)",
"def _saveCSV( self ):",
"def asteroids_csv(self, payload):\n csv_file=open(f\"/tmp/asteroids_{self.today}.csv\",'w', newline='\\n')\n fields=list(payload[0].keys())\n writer=csv.DictWriter(csv_file, fieldnames=fields)\n writer.writeheader()\n writer.writerows(payload)\n csv_file.close()",
"def __str__(self):\n return '\\t'.join((self.uid, self.account, self.proto, self.fpr,\n 'verified' if self.verified else ''))",
"def make_participant_dump_csv(self):\n columns = collections.OrderedDict([\n ('id','study_id'),\n ('created',lambda c: c.created.date()),\n ('facility','facility'),\n ('group','study_group'),\n ('shared',lambda c: 1 if c.phone_shared else 0) ,\n ('validation',lambda c: 1 if c.is_validated else 0),\n ('age','age'),\n ])\n p_all = cont.Contact.objects.all()\n file_path = os.path.join(self.options['dir'],'participant_dump.csv')\n make_csv(columns,p_all,file_path)\n return file_path"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Output message of backtesting engine.
|
def output(self, msg):
print(f"{datetime.now()}\t{msg}")
|
[
"def output(msg):\r\n sys.stdout.write(msg+\"\\n\")",
"def output(msg):\n print(f\"{datetime.now()}\\t{msg}\")",
"def output(message):\n sys.stdout.write(message + \"\\n\")\n sys.stdout.flush()",
"def output(self,code,msg):\n self.sendLine(\"%s %s\" % (code, msg))",
"def print_output(self):\r\n output = \"\"\r\n if self.status == self.STATUS_OK:\r\n output = \"OK\"\r\n elif self.status == self.STATUS_WARNING:\r\n output = \"Warning\"\r\n elif self.status == self.STATUS_CRITICAL:\r\n output = \"Critical\"\r\n elif self.status == self.STATUS_UNKNOWN:\r\n output = \"Unknown\"\r\n\r\n if self.messages:\r\n if len(output):\r\n output += \" - \"\r\n # Join messages like sentences. Correct those messages which already ended with a period or a newline.\r\n output += \". \".join(self.messages).replace(\".. \", \".\").replace(\"\\n. \", \"\\n\")\r\n\r\n if self.perfdata:\r\n if len(output):\r\n output += \" | \"\r\n output += \" \".join(self.perfdata)\r\n\r\n print(output)",
"def get_message():\n return \"Hello AbbVie world\"",
"def shout(self):\n\t\tprint \"meow meow\"",
"def print_message(message):\r\n print(message)",
"def display_message():\n m = get_message()\n print(m)\n # return None",
"def test_message(self):\n self.logger.message('Testing.')\n self.assertTrue(self.called, 'BaseLogger did not call given output method')",
"def SendTestOutput(self, output_file, testcase, message):\n self._WriteTestToFile(output_file, testcase, message)",
"def do_outputs(self):\n print(f\"Your score is: {self.score}\")",
"def job(self, msg, *args, **kwargs):\n self.print(50, msg, *args, **kwargs)",
"def log_script_result(self, message):\n\n testlog.wtl_log(\"!*** %s\" %(message), force=True)",
"def event_message() -> str:\n\n return \"Automation for the people!\"",
"def print_goodbye():\r\n \r\n message = \"Goodbye\"\r\n print message",
"def display_message(text):\n\n clear_shell()\n print figlet.renderText(text)\n sleep(.75)\n clear_shell()",
"def __post_execution_message(self):\r\n self.output.textCursor().insertText('\\n\\n')\r\n format_ = QTextCharFormat()\r\n format_.setAnchor(True)\r\n format_.setForeground(Qt.green)\r\n self.output.textCursor().insertText(\r\n self.tr(\"Post Execution Script Successfully executed.\"), format_)",
"def display_message(message):\n\n print '%s %s' % (timestamp(), message)",
"def print_backtest_results(self, stdout=None):\n previous_stdout = sys.stdout\n if stdout is not None: # Temporarily redirects output to stdout provided.\n sys.stdout = stdout\n\n print(\"\\nBacktest results:\")\n print(f'\\tSymbol: {\"Unknown/Imported Data\" if self.symbol is None else self.symbol}')\n print(f'\\tElapsed: {round(self.endTime - self.startTime, 2)} seconds')\n print(f'\\tStart Period: {self.data[self.startDateIndex][\"date_utc\"]}')\n print(f\"\\tEnd Period: {self.currentPeriod['date_utc']}\")\n print(f'\\tStarting balance: ${round(self.startingBalance, self.precision)}')\n print(f'\\tNet: ${round(self.get_net(), self.precision)}')\n print(f'\\tCommissions paid: ${round(self.commissionsPaid, self.precision)}')\n print(f'\\tTrades made: {len(self.trades)}')\n net = self.get_net()\n difference = round(net - self.startingBalance, self.precision)\n if difference > 0:\n print(f'\\tProfit: ${difference}')\n print(f'\\tProfit Percentage: {round(net / self.startingBalance * 100 - 100, 2)}%')\n elif difference < 0:\n print(f'\\tLoss: ${-difference}')\n print(f'\\tLoss Percentage: {round(100 - net / self.startingBalance * 100, 2)}%')\n else:\n print(\"\\tNo profit or loss incurred.\")\n # print(f'Balance: ${round(self.balance, 2)}')\n # print(f'Coin owed: {round(self.coinOwed, 2)}')\n # print(f'Coin owned: {round(self.coin, 2)}')\n # print(f'Trend: {self.trend}')\n\n sys.stdout = previous_stdout # revert stdout back to normal"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clear all data of last backtesting.
|
def clear_data(self):
self.strategy = None
self.tick = None
self.bar = None
self.datetime = None
self.algo_count = 0
self.algos.clear()
self.active_algos.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
|
[
"def clear_current_data(self):\n self.current_acc_data.clear()\n self.current_bvp_data.clear()\n self.current_eda_data.clear()\n self.current_hr_data.clear()\n self.current_ibi_data.clear()\n self.current_temp_data.clear()\n self.current_eye_tracking_data.clear()\n self.current_skeleton_data.clear()",
"def reset(self):\n\n for test in self._tests:\n test.reset()",
"def clear(self):\n self.data = []\n self.updateData()",
"def reset(self):\n\n self.history = []\n self.output.value[:] = self.initialHistory",
"def data_reset(self):\n # ic()\n self.arches.clear()\n self.arch_ids.clear()\n self.data_1d.clear()\n self.data_2d.clear()\n self.new_scan = True",
"def reset_all(self):\n self.reset_memory()\n self.reset_traces()\n self.reset_tags()\n\n self.prev_obs = np.zeros(self.nx_inst)\n self.prev_qa = 0\n self.prev_max = 0.",
"def clear_temp_data():\n\n # Clears table of active_clients\n logger.info(\"Clearing all temp data\")\n DbQuery.clear_table(\"active_clients\")",
"def clear(self) -> \"Dump\":\n ...",
"def reset(self):\n\n self.timestep = 0\n self.historyLayer.reset()",
"def clear(self):\n\t\tself.kcp.ai.MemoryInfo['n'] = 0\n\t\tself.changed = True",
"def clear_test_result(self, test):",
"def flush(self):\n self.Gr_list = None\n self.Iq_list = None\n self.Rw_list = None\n self._data_df = None\n self._recipe = None",
"def reset_testdata():\n reset_database()\n return 'data successfully resetted'",
"def clear(self):\n for phase in self.phases:\n getattr(self, phase).clear()\n self.test_runs = 0\n self.train_runs = 0",
"def clean_data(self):\r\n self.all_data.drop(len(self.all_data) - 1, inplace = True)",
"def clearAll(self):\n\t\tself.faceSnapShot = None #This is the state of the HappyFace to which all the expressions are compared\n\t\tself.expressionLibrary = []",
"def clear_and_restart(self):\n\n # Clear current data and assign a copy of default values\n self.data = copy.deepcopy(self.defaults)\n\n # Set pysat parameters without a default working value to []\n for key in self.non_defaults:\n self.data[key] = []\n\n # Trigger a file write\n self.store()\n\n return",
"def clearTestDatabase():\n classes = [\n AerialPosition,\n AccessLog,\n FlyZone,\n GpsPosition,\n MissionConfig,\n MovingObstacle,\n ObstacleAccessLog,\n ServerInfo,\n ServerInfoAccessLog,\n StationaryObstacle,\n TakeoffOrLandingEvent,\n UasTelemetry,\n Waypoint,\n ]\n for cur_class in classes:\n cur_class.objects.all().delete()\n cache.clear()",
"def reset(self):\n # reserve only those that has been around this time\n new_stats_data={}\n for c in self.stats_diff.keys():\n # but carry over all the users... should not change that often\n new_stats_data[c]=self.current_stats_data[c]\n\n self.old_stats_data=new_stats_data\n self.current_stats_data={}\n\n # and flush out the differences\n self.stats_diff={}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Cancel order by vt_orderid.
|
def cancel_order(self, strategy: SpreadStrategyTemplate, vt_orderid: str):
pass
|
[
"def cancel_order(self, order_id: int) -> None:\n raise NotImplementedError(\"Should implement cancel_order()\")",
"def cancel_order(self, **params):\n return self._delete('order', True, data=params)",
"async def cancel_order(self, **params):\r\n return await self.client_helper(\"cancel_order\", **params)",
"def cancel_order(self, **kwargs):\n return self.client.execute(\"order/cancel_order\", \"POST\", kwargs)",
"def cancel_option_order(order_id):\n url = urls.option_cancel(order_id)\n data = helper.request_post(url)\n\n if data:\n print('Order ' + order_id + ' cancelled')\n return data",
"async def cancel_order(ctx, symbol, order_id, orig_client_order_id, new_client_order_id, recv_window):\n if order_id is None and orig_client_order_id is None:\n ctx.log('Either --order_id (-oid) or --orig_client_order_id (-ocoid) must be sent.')\n return\n\n payload = {\n 'symbol': symbol,\n 'recvWindow': recv_window,\n 'timestamp': get_timestamp()\n }\n\n builder = CancelOrderBuilder(endpoint='api/v3/order', payload=payload, method='DELETE') \\\n .add_optional_params_to_payload(order_id=order_id,\n orig_client_order_id=orig_client_order_id,\n new_client_order_id=new_client_order_id) \\\n .set_security()\n\n await builder.send_http_req()\n\n builder.handle_response().generate_output()",
"async def futures_cancel_order(self, **params):\r\n return await self.client_helper(\"futures_cancel_order\", **params)",
"def cancel_all_open_order(self):",
"async def futures_cancel_orders(self, **params):\r\n return await self.client_helper(\"futures_cancel_orders\", **params)",
"def params_cancel_order(self, order_id = None):\n \n params = {}\n if order_id:\n params = {\"id\": order_id}\n \n return params",
"def cancel(self, order_id):\n\n response = self.request(E.cancelSslCertRequest(\n E.id(order_id)\n ))\n\n return int(response.data.id)",
"def cancel_order_cid(self, order_cid, order_date):\n data = [\n 0,\n abbreviations.get_notification_code('order cancel'),\n None,\n {\n # docs: http://bit.ly/2BVqwW6\n 'cid': order_cid,\n 'cid_date': order_date\n }\n ]\n payload = json.dumps(data, ensure_ascii=False).encode('utf8')\n self.factories[\"auth\"].protocol_instance.sendMessage(payload, isBinary=False)",
"def cancel(self, uid):\n order = self._orders[uid]\n if not order.active:\n return\n if order.is_buy:\n pricelevel = self._bids.pricelevel(order.price)\n pricelevel.remove(order)\n if pricelevel.is_empty():\n self._bids.remove_pricelevel(order.price)\n else:\n pricelevel = self._asks.pricelevel(order.price)\n pricelevel.remove(order)\n if pricelevel.is_empty():\n self._asks.remove_pricelevel(order.price)\n \n if uid < 0:\n self.my_cumvol_sent -= order.leavesqty\n order._cumqty = order.qty - order.leavesqty\n order.leavesqty = 0\n order.active = False",
"def cancel_order(clientId, listingId):\n try:\n order = conn.cursor()\n order.execute(\n \"UPDATE public.\\\"Order\\\" SET \\\"Status\\\" = 'Canceled' WHERE \\\"ClientID\\\" = \" + str(clientId) +\n \" AND \\\"ListingID\\\" = \" + str(listingId) + \" AND \\\"Status\\\" = \\'Pending\\'\")\n conn.commit()\n\n order.close()\n except:\n rollback = conn.cursor()\n rollback.execute(\"ROLLBACK\")\n rollback.commit()",
"def disable_cancel_order(request):\n try:\n pk = request.GET[\"order_id\"]\n order = Order.objects.get(pk=pk)\n order.is_cancellable = False\n order.save()\n return HttpResponse('Success!')\n except Order.DoesNotExist:\n return HttpResponseNotFound('Order not found')",
"def order_cancel_request(message, futures):\n order_id = message[2][0] # uses id, if no cid given\n order_cid = message[2][2]\n future_id = f\"oc_{order_id}\"\n future_id_cid = f\"oc_{order_cid}\"\n # print(\"Cancel requst started!\")\n if future_id in futures.keys():\n future = futures[future_id]\n elif future_id_cid in futures.keys():\n future = futures[future_id_cid]\n # print(\"requst future\", future)\n future.set_result({\n \"status\": message[6], # Error/Sucess\n \"id\": message[4][0],\n \"cid\": message[4][2],\n \"response\": message[4],\n \"comment\": message[7]\n })\n if future_id in futures:\n del futures[future_id]\n elif future_id_cid in futures:\n del futures[future_id_cid]",
"def remove_cancelled_order(self, id):\r\n print(\"Cancelling order '{}'\".format(id))\r\n self.orders_panel.remove_order_widget(id)",
"def save(self):\n order = self.context['order']\n order.cancel_order()",
"def futures_cancel_order(self, symbol):\n try:\n # quantity = self.futures_get_position_quantity(symbol)\n # if quantity == 0.0:\n # self.print_log(f\"{symbol} order is not currently open\")\n # return\n\n is_order_open = False\n open_orders = self.futures_get_all_open_orders()\n\n for dictionary in open_orders:\n if dictionary['symbol'] == symbol:\n is_order_open = True\n break\n \n if is_order_open:\n self.client.futures_cancel_all_open_orders(symbol=symbol, recvWindow=RECV_WINDOW)\n self.print_log(f\"Cancelled {symbol} order\")\n except Exception as e:\n self.handle_exception(e, f\"Could not close {symbol} order\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Put an event to update strategy status.
|
def put_strategy_event(self, strategy: SpreadStrategyTemplate):
pass
|
[
"def update(self, event):\n raise NotImplementedError('update event is not implemented')",
"def process_status_update(self, event: StrategyStatusChangeEvent):\n self._callnext = (event.status != StrategyStatus.PAUSED.value)\n if event.status == StrategyStatus.PAUSED.value:\n self.on_pause()",
"def update(self, event: MispEvent) -> None:\n event.timestamp = datetime.datetime.now()\n event.published=0\n raw_evt = event.to_xml()\n self.server.POST('/events/%d' % event.id, raw_evt)",
"def on_entity_update(self, event):\n self.entity.cubolt_entity.on_entity_update(event)",
"def task_updated(event: Event):\n data = {\"status\": event.get(\"state\", \"RUNNING\")}\n\n # Rather than send all data, only pass on known fields\n for field in [\"log\", \"urls\"]:\n value = event.get(field)\n if value:\n data.update({field: value})\n\n update_job(event[\"task_id\"], data)",
"def SetStatus(self, status):\n self.status = status\n self.put()",
"def _handle_EditConfigHookEvent (self, event):\n log.debug(\"Received %s event...\" % event.__class__.__name__)\n request_id = event.callback.request_id\n deploy_status = self.status_mgr.get_status(id=request_id)\n if event.was_error():\n log.debug(\"Update failed status for service request: %s...\" %\n request_id)\n deploy_status.set_domain_failed(domain=event.domain)\n else:\n log.debug(\"Update success status for service request: %s...\" % request_id)\n deploy_status.set_domain_ok(domain=event.domain)\n if isinstance(event.callback.data, NFFG):\n log.log(VERBOSE, \"Changed topology:\\n%s\" % event.callback.data.dump())\n domain_mgr = self.domains.get_component_by_domain(event.domain)\n if domain_mgr is None:\n log.error(\"DomainManager for domain: %s is not found!\" % event.domain)\n return\n if isinstance(domain_mgr, UnifyDomainManager) and domain_mgr.polling:\n log.debug(\"Polling in domain: %s is enabled! Skip explicit update...\"\n % event.domain)\n domain_mgr.update_topology_cache()\n if CONFIG.one_step_update():\n log.debug(\"One-step-update is enabled. Skip explicit domain update!\")\n else:\n self.DoVManager.update_domain(domain=event.domain,\n nffg=event.callback.data)\n log.debug(\"Installation status: %s\" % deploy_status)\n if not deploy_status.still_pending:\n if deploy_status.success:\n log.info(\"All installation process has been finished for request: %s! \"\n \"Result: %s\" % (deploy_status.id, deploy_status.status))\n if CONFIG.one_step_update():\n log.info(\"One-step-update is enabled. Update DoV now...\")\n self.DoVManager.set_global_view(nffg=deploy_status.data)\n elif deploy_status.failed:\n log.error(\"All installation process has been finished for request: %s! \"\n \"Result: %s\" % (deploy_status.id, deploy_status.status))\n if CONFIG.one_step_update():\n log.warning(\"One-step-update is enabled. \"\n \"Skip update due to failed request...\")\n if CONFIG.rollback_on_failure():\n self.__do_rollback(status=deploy_status,\n previous_state=self.DoVManager.get_backup_state())\n result = InstallationFinishedEvent.get_result_from_status(deploy_status)\n log.info(\"Overall installation result: %s\" % result)\n # Rollback set back the domains to WAITING status\n if not deploy_status.still_pending:\n is_fail = InstallationFinishedEvent.is_error(result)\n self._layer_API._process_mapping_result(nffg_id=request_id,\n fail=is_fail)\n self._layer_API.raiseEventNoErrors(InstallationFinishedEvent,\n id=request_id,\n result=result)\n else:\n log.debug(\"Installation process is still pending! Waiting for results...\")",
"def _push_status(self):\n\n self.data['status'] = self._status\n event_manager.device_changed(self)",
"def update_status(self, status, context=None):\n if status not in ACCEPTABLE_STATUS:\n raise ValueError('Invalid status value {}'.format(status))\n try:\n jsonapi.dumps(context)\n except TypeError:\n raise ValueError('Context must be JSON serializable.')\n\n status_changed = status != self._status\n self._status = status\n self._context = context\n self._last_updated = format_timestamp(get_aware_utc_now())\n\n if status_changed and self._status_changed_callback:\n print(self._status_changed_callback())",
"async def request_hvac_update(self) -> None:\n\n state = mapper.map_hvac_sync_state(await self._call_api(urls.hvac))\n\n if state and not state.is_pending:\n await self._call_api(urls.hvac_update, 'put')",
"async def employee_status(self, event):\n await self.send_json(event)",
"async def event_checkpointed(self, event: Event) -> None:\n (event_key, _) = event\n await self.handle.report_checkpointed.remote(\n workflow_context.get_current_workflow_id(), event_key, True\n )",
"def input(self, _event):\n if _event.type == GAME_EVENT and _event.reason == GameEventType.HUD_UPDATE:\n for entity in self.observing:\n entity.artifacts[SpriteArtifact.NAME].sprite.updatehud(_event.caps, _event.lifes, _event.points)\n self.dirty = True",
"def sample_updater(event_id):\n event = Event.objects.get(id=event_id)\n event.modified = timezone.now()\n event.save()",
"def notify_event(self, event):\n # Check for game paused event \n if event == [\"GAME_PAUSE\"]:\n if self.noStart:\n # Clear the initial load screen\n self.noStart = False\n else:\n if self.pause:\n self.pause = False\n self.scoreManager.startTime += (timer()-self.pauseTime)\n else:\n self.pause = True\n self.pauseTime = timer()\n \n # Pause for a second to prevent duplicate events being \n # transmitted by the controller\n pygame.time.delay(50)\n # Now clear the Event Manager of this event so that it can move on\n self.eventManager.event_clear(event)\n # Check for game over event\n if event == self.model.gameOverEvent:\n self.gameOver = True\n # Now clear the Event Manager of this event so that it can move on\n self.eventManager.event_clear(event)",
"def task_status_changed(sg, logger, event, args):\n\n # Return if we don't have all the field values we need.\n if (\n not event.get(\"entity\", {}).get(\"id\")\n or not event.get(\"meta\", {}).get(\"entity_id\")\n or not event.get(\"id\")\n ):\n return\n\n # Make some vars for convenience.\n entity_id = event[\"entity\"][\"id\"]\n entity_name = event[\"entity\"][\"name\"]\n status_mapping_field = args[\"status_mapping_field\"]\n\n # Re-query for the Task Status value to make sure we have an up-to-date\n # new status value. The input value from the event may be inaccurate if the\n # triggers are ever running behind.\n sg_task = sg.find_one(\"Task\", [[\"id\", \"is\", entity_id]], [\"sg_status_list\"])\n\n # Return if we can't find our Task.\n if not sg_task:\n logger.info(\n \"Unable to retrieve Task (%d) %s from SG for event %d, skipping.\"\n % (entity_id, entity_name, event[\"id\"])\n )\n return\n\n # Grab the Shotgun Status entity the Task was set to.\n new_task_status = sg.find_one(\n \"Status\",\n [[\"code\", \"is\", sg_task[\"sg_status_list\"]]],\n [status_mapping_field],\n )\n\n # Return if we can't find our Status entity (would be pretty weird).\n if not new_task_status:\n logger.info(\n \"No Status found with code %s, skipping.\" % sg_task[\"sg_status_list\"]\n )\n return\n\n # Return if the Status entity's sg_version_status_mapping value is empty.\n if new_task_status[status_mapping_field] is None:\n logger.debug(\n \"No sg_version_status_mapping found for Status with id %s, skipping.\"\n % new_task_status[\"id\"]\n )\n return\n\n # Get the latest Version attached to our Task.\n sg_version = sg.find_one(\n \"Version\",\n [[\"sg_task\", \"is\", sg_task]],\n [],\n order=[{\"field_name\": \"created_at\", \"direction\": \"desc\"}],\n )\n\n # Return if we can't find a Version attached to the Task.\n if not sg_version:\n logger.debug(\"No Version linked to Task with id %s, skipping.\" % entity_id)\n return\n\n # Update the Version's sg_status_field with the Status entity's\n # sg_version_status_mapping value.\n try:\n result = sg.update(\n \"Version\",\n sg_version[\"id\"],\n {\"sg_status_list\": new_task_status[status_mapping_field]},\n )\n logger.debug(\"Result is: %s\" % result)\n except Exception as e:\n logger.warning(\n \"Could not update Version with id %s to Status %s: %s\"\n % (sg_version[\"id\"], new_task_status[status_mapping_field], str(e))\n )",
"def dispatch_event(self, event):\n self.redis_client.store_event(event)",
"def store_event(self, event: \"EventLogEntry\") -> None:",
"def update(self, events, game):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute correlation of the model with human similarity judgments. `pairs` is a filename of a dataset where lines are 3tuples, each consisting of a word pair and a similarity value, separated by `delimiter`. An example dataset is included in Gensim (test/test_data/wordsim353.tsv). More datasets can be found at
|
def evaluate_word_sims(model, name, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False):
ok_vocab = list(model.vocabulary.items()) #restrict_vocab
ok_vocab = {w.lower(): v for w, v in ok_vocab} if case_insensitive else dict(ok_vocab)
similarity_gold = []
similarity_model = []
oov = 0
# original_vocab = model.vocabulary
# model.vocabulary = ok_vocab
for line_no, line in enumerate(gensim.utils.smart_open(pairs)):
line = gensim.utils.to_unicode(line)
if line.startswith('#'):
# May be a comment
continue
else:
try:
if case_insensitive:
a, b, sim = [word.lower() for word in line.split(delimiter)]
else:
a, b, sim = [word for word in line.split(delimiter)]
sim = float(sim)
except (ValueError, TypeError):
logger.debug('Skipping invalid line #%d in %s', line_no, pairs)
continue
if a not in ok_vocab or b not in ok_vocab:
oov += 1
if dummy4unknown:
logger.debug('Zero similarity for line #%d with OOV words: %s', line_no, line.strip())
similarity_model.append(0.0)
similarity_gold.append(sim)
continue
else:
logger.debug('Skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim) # Similarity from the dataset
similarity_model.append(model.get_similarity(a, b)) # Similarity from the model
# self.vocab = original_vocab
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
if dummy4unknown:
oov_ratio = float(oov) / len(similarity_gold) * 100
else:
oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100
logger.debug('Pearson correlation coefficient against %s: %f with p-value %f', pairs, pearson[0], pearson[1])
logger.debug(
'Spearman rank-order correlation coefficient against %s: %f with p-value %f',
pairs, spearman[0], spearman[1]
)
logger.debug('Pairs with unknown words: %d', oov)
# log_evaluate_word_sims(pearson, spearman, oov_ratio, name, pairs)
return pearson, spearman, oov_ratio
|
[
"def pearson_corr(pairs: Sequence[Pair]) -> float:\n X = tuple(p.x for p in pairs)\n Y = tuple(p.y for p in pairs)\n return Chapter04.ch04_ex4.corr(X, Y)",
"def match_pair(pair, sim_method):\r\n doc1, doc2 = get_texts(pair)\r\n ents1 = extract_ents(nlp(doc1))\r\n ents2 = extract_ents(nlp(doc2))\r\n # cluster the corefer entities for each document\r\n c1 = cluster_doc(ents1)\r\n c2 = cluster_doc(ents2)\r\n similarity = sim_method(c1, c2)\r\n return similarity, [c1, c2]",
"def matrix_from_pairs(pairs):\n\n labels = set()\n labels.update([x[0] for x in pairs]+[x[1] for x in pairs])\n labels = list(labels)\n labels.sort()\n\n dimension = len(labels)\n distance_matrix = np.zeros( (dimension, dimension), 'f')\n\n for pair in pairs:\n i = labels.index(pair[0])\n j = labels.index(pair[1])\n distance_matrix[i][j] = -1*float(pair[2]) ## negating for affinity\n\n return distance_matrix, labels",
"def matrix_from_pairs(pairs):\n\n labels = set()\n labels.update([x[0] for x in pairs]+[x[1] for x in pairs])\n labels = list(labels)\n labels.sort()\n\n dimension = len(labels)\n distance_matrix = zeros( (dimension, dimension), 'f')\n\n for pair in pairs:\n i = labels.index(pair[0])\n j = labels.index(pair[1])\n distance_matrix[i][j] = float(pair[2])\n distance_matrix[j][i] = float(pair[2])\n \n return map(list, distance_matrix), labels",
"def compute_mirna_expression_correlation(mirna_expressions_file_path: str):\n with open('/Users/royjudes/Desktop/miRNA embedding project/kidney_miRNA_pairs_pvalues_cossim_jaccard.csv', 'r') as pairs_file:\n first_line = True\n with open('/Users/royjudes/Desktop/miRNA embedding project/kidney_miRNA_pairs_pvalues_cossim_jaccard_correlation.csv', 'a') as pairs_file_with_correlation:\n writer = csv.writer(pairs_file_with_correlation)\n writer.writerow(['miRNA_a', 'avg_rpm_a', 'miRNA_b', 'avg_rpm_b', 'cos_sim', 'pvalue', 'jaccard', 'correlation'])\n df = pd.read_csv(mirna_expressions_file_path)\n\n for record in pairs_file:\n if first_line:\n first_line = False\n continue\n\n try:\n record = record.split(\",\")\n miRNA_a = record[0]\n miRNA_b = record[2]\n\n record[6] = record[6].replace(\"\\n\", \"\")\n\n correlation, _ = pearsonr(df[miRNA_a], df[miRNA_b])\n record.append(correlation)\n writer.writerow(record)\n\n except:\n continue",
"def pearson_correlation(data, person1, person2):\n # list of shared items\n shared_items = {}\n for item in data[person1]:\n if item in data[person2]:\n shared_items[item] = 1\n\n # number of elements\n n = len(shared_items)\n\n # no ratings in common\n if n == 0:\n return 0\n\n n = float(n)\n\n # adding up all preferences\n sum1 = sum([data[person1][item] for item in shared_items])\n sum2 = sum([data[person2][item] for item in shared_items])\n\n # summing up squares\n sum_sq1 = sum([pow(data[person1][item], 2) for item in shared_items])\n sum_sq2 = sum([pow(data[person2][item], 2) for item in shared_items])\n\n # summing products\n p_sum = sum([data[person1][item] * data[person2][item] for item in shared_items])\n\n # Pearson\n num = p_sum - (sum1 * sum2 / n)\n den = sqrt((sum_sq1 - pow(sum1, 2) / n) * (sum_sq2 - pow(sum2, 2) / n))\n\n # avoid dividing by 0\n if den == 0:\n return 0\n\n ret_val = num/den\n\n return ret_val",
"def correlate_targets() -> None:\n sc = StandardScaler()\n data = []\n\n for file_name in [x for x in os.listdir(Parameters.meta_dataset_dir) if x.endswith('.csv')]:\n d = pd.read_csv(Parameters.meta_dataset_dir + file_name)\n df = pd.DataFrame(data=sc.fit_transform(d), columns=d.columns)\n data.append(df)\n\n frame = pd.concat(data)\n\n lofo = [x for x in frame.columns if \"LOFO\" in x]\n shap = [x for x in frame.columns if \"SHAP\" in x]\n pimp = [x for x in frame.columns if \"PIMP\" in x]\n lime = [x for x in frame.columns if \"LIME\" in x]\n lm = [x for x in frame.columns if not x.startswith(\"target_\")]\n\n matrix = frame.corr(\"spearman\")\n matrix = matrix.drop(lofo + shap + pimp + lime + lm, axis=0)\n matrix = matrix.drop([x for x in list(frame.columns) if x not in lofo + shap + pimp + lime], axis=1)\n\n def __f(targets):\n return np.round(np.mean([np.mean([abs(x) for x in matrix[target].values if abs(x) < 1])\n for target in targets]), 2)\n\n def __f_2(targets):\n return np.round(np.max([np.mean([abs(x) for x in matrix[target].values if abs(x) < 1])\n for target in targets]), 2)\n\n d = {'lofo': [__f(lofo), __f_2(lofo)], 'shap': [__f(shap), __f_2(shap)],\n 'lime': [__f(lime), __f_2(lime)], 'pimp': [__f(pimp), __f_2(pimp)]}\n pd.DataFrame(data=d, index=[\"mean\", \"max\"], columns=[\"lofo\", \"shap\", \"lime\", \"pimp\"]).to_csv(\n Parameters.output_dir + \"meta_prediction_performance/target_corr.csv\")",
"def evaluate_similarities(lang, vecs_fname):\r\n similarities_path = os.path.join(path, 'datasets', 'similarities')\r\n if not os.path.exists('results'):\r\n os.mkdir('results')\r\n results_path = os.path.join('results', 'similarities')\r\n if not os.path.exists(results_path):\r\n os.mkdir(results_path)\r\n logging.info(f'evaluating semantic similarities with {vecs_fname}')\r\n vectors = Vectors(vecs_fname, normalize=True, n=1e6, d=300)\r\n scores = []\r\n for similarities_fname in os.listdir(similarities_path):\r\n if similarities_fname.startswith(lang):\r\n logging.info(f'correlating similarities from {similarities_fname}')\r\n similarities = pd.read_csv(os.path.join(similarities_path, similarities_fname), sep='\\t', comment='#')\r\n score = compare_similarities(vectors, similarities)['scores']\r\n score['source'] = similarities_fname\r\n scores.append(score)\r\n scores_fname = os.path.split(vecs_fname)[1].replace('.vec', '.tsv')\r\n if len(scores) > 0:\r\n scores = pd.concat(scores)\r\n scores.to_csv(os.path.join(results_path, scores_fname), sep='\\t', index=False)\r\n return scores",
"def generate_similarity(keywords_data: pd.DataFrame) -> pd.DataFrame:\n LOGGER.info(\"Generating the similarity between each pair of news.\")\n data = keywords_data.copy()\n data[\"keywords\"] = data[\"keywords\"].apply(set)\n news_keywords = data.set_index(\"news_id\")[\"keywords\"].to_dict().items()\n\n pair_data = []\n for (left_news, left_keywords), (right_news, right_keywords)\\\n in tqdm(itertools.combinations(news_keywords, 2),\n desc=\"Generate sims\", total=len(data) * (len(data) - 1) / 2):\n intersection = left_keywords & right_keywords\n if not intersection:\n continue\n sim = len(intersection) / (len(left_keywords) + len(right_keywords) - len(intersection))\n pair_data.append((left_news, right_news, sim))\n pair_data.append((right_news, left_news, sim))\n return pd.DataFrame(pair_data, columns=[\"news_id_left\", \"news_id_right\", \"sim\"])",
"def calc_pairing_matching_score(self,neuronA,neuronB,connectivity,neuron_names,list_of_pairs):\r\n\r\n #Vertex similarity based on Jarrell et al., 2012\r\n # f(x,y) = min(x,y) - C1 * max(x,y) * e^(-C2 * min(x,y))\r\n # x,y = edge weights to compare\r\n # vertex_similarity is the sum of f over all vertices\r\n # C1 determines how negatively a case where one edge is much stronger than another is punished\r\n # C2 determines the point where the similarity switches from negative to positive\r\n C1 = 0.5\r\n C2 = 1\r\n vertex_similarity = 0\r\n max_score = 0\r\n\r\n\r\n for pA,pB in list_of_pairs:\r\n try:\r\n a = connectivity[pA]['skids'][neuronA]\r\n except:\r\n a = 0\r\n try:\r\n b = connectivity[pB]['skids'][neuronB]\r\n except:\r\n b = 0\r\n\r\n max_score += max([a,b])\r\n vertex_similarity += (\r\n min([a,b]) - C1 * max([a,b]) * math.exp(- C2 * min([a,b]))\r\n )\r\n\r\n #Again but the other way around\r\n for pB,pA in list_of_pairs:\r\n try:\r\n a = connectivity[pA]['skids'][neuronA]\r\n except:\r\n a = 0\r\n try:\r\n b = connectivity[pB]['skids'][neuronB]\r\n except:\r\n b = 0\r\n\r\n max_score += max([a,b])\r\n vertex_similarity += (\r\n min([a,b]) - C1 * max([a,b]) * math.exp(- C2 * min([a,b]))\r\n )\r\n\r\n try:\r\n similarity_normalized = ( vertex_similarity + C1 * max_score ) / ( ( 1 + C1 ) * max_score) #Reason for (1+C1) is that by increasing vertex_similarity first by C1*max_score, we also increase the maximum reachable value\r\n #print(vertex_similarity,similarity_normalized,max_score)\r\n except:\r\n similarity_normalized = 0\r\n\r\n\r\n return similarity_normalized",
"def test_find_pairs_no_frame(self, dataframes, dataframe_pairs):\n cc = multicolor.Registrator(dataframes[0], dataframes[1])\n cc.find_pairs()\n res = cc.pairs.sort_values((\"channel1\", \"x\")).reset_index(drop=True)\n pd.testing.assert_frame_equal(res, dataframe_pairs)",
"def compare_all_pairs(sentences, w2vmodel):\n for s1, s2 in combinations(sentences, 2):\n # get similarity between s1 and s2\n prob = word_mover_distance_probspec(s1, s2, w2vmodel)\n print(s1)\n print(s2)\n print(pulp.value(prob.objective))",
"def text_profiles_similarity(self):\n\n # Text (TF-IDF)\n processor = TextProcessor(store_docs=True, \n clusters={'kmeans': lambda: KMeans(5)} )\n processor.run()\n \n # dictionary containing metrics for the profiles\n docs = []\n for username, cluster in processor.clusters[\"kmeans\"].items():\n # for each cluster, build up a new dataset, we will then use it to \n # compare the profiles\n for label in np.unique(cluster.labels_):\n # get only the documents with this label\n docs.append(\" \".join([processor.stored_docs[username][i] for i, val \n in enumerate(cluster.labels_ == label) if val]))\n\n features = processor.get_features(docs)\n self._processor = processor\n return euclidean_distances(features, features)",
"def compute_differences(X, pairs):\n return X[pairs[:, 0]] - X[pairs[:, 1]]",
"def _compute_word_pairs(self, words):\n # Sort the words first so the tuples are always ordered the same\n return combinations(sorted(words), r=2)",
"def similarity(self, words):\n single_token_words = {w: True for w in words if len(w.split(\" \")) == 1}\n known_word_vectors = [self.model[word] / np.linalg.norm(self.model[word]) for word \\\n in single_token_words.keys() if self.model.vocab.has_key(word)]\n multiple_token_words = [w for w in words if w not in single_token_words]\n word_vector_averages = list(itertools.chain(*[[self.model[t] for t in w.split(\" \") if \\\n self.model.vocab.has_key(t)] for w \\\n in multiple_token_words]))\n word_vector_averages = [w / np.linalg.norm(w) for w in word_vector_averages]\n known_word_vectors.extend(word_vector_averages)\n word_vectors = list(known_word_vectors)\n\n # base case: #(words) = 1, return 1\n if len(word_vectors) == 1: return 1\n # base case: #(words) = 2, return the cosine similarity of the two vectors\n if len(word_vectors) == 2:\n v1, v2 = word_vectors[0], word_vectors[1]\n sim = np.dot(v1, v2)\n # we are not interested in negative similarity, pinning it to 0\n return sim if sim > 0 else 0\n sims = []\n for w in word_vectors:\n # remove w from the set of word vectors to compute the \n # dot product of average of this set with the word vector w\n known_word_vectors.remove(w)\n unit_vec_candidate = np.average(known_word_vectors, axis=0)\n unit_vec_candidate /= np.linalg.norm(unit_vec_candidate)\n sims.append(np.dot(unit_vec_candidate, w))\n # add w back to the set of word vectors \n known_word_vectors.append(w)\n avg_sim = np.average(sims)\n return avg_sim if avg_sim > 0 else 0",
"def test_fiducial_pairs(fid_pairs, target_model, prep_fiducials, meas_fiducials, germs,\n test_lengths=(256, 2048), pre_povm_tuples=\"first\", tol=0.75,\n verbosity=0, mem_limit=None):\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)\n\n if pre_povm_tuples == \"first\":\n firstRho = list(target_model.preps.keys())[0]\n firstPOVM = list(target_model.povms.keys())[0]\n pre_povm_tuples = [(firstRho, firstPOVM)]\n pre_povm_tuples = [(_circuits.Circuit((prepLbl,)), _circuits.Circuit((povmLbl,)))\n for prepLbl, povmLbl in pre_povm_tuples]\n\n def _get_derivs(length):\n \"\"\" Compute all derivative info: get derivative of each <E_i|germ^exp|rho_j>\n where i = composite EVec & fiducial index and j similar \"\"\"\n\n circuits = []\n for germ in germs:\n expGerm = _gsc.repeat_with_max_length(germ, length) # could pass exponent and set to germ**exp here\n pairList = fid_pairs[germ] if isinstance(fid_pairs, dict) else fid_pairs\n circuits += _gsc.create_circuits(\"pp[0]+p[0]+expGerm+p[1]+pp[1]\",\n p=[(prep_fiducials[i], meas_fiducials[j]) for i, j in pairList],\n pp=pre_povm_tuples, expGerm=expGerm, order=['p', 'pp'])\n circuits = _remove_duplicates(circuits)\n\n resource_alloc = _baseobjs.ResourceAllocation(comm=None, mem_limit=mem_limit)\n layout = target_model.sim.create_layout(circuits, None, resource_alloc, array_types=('ep',), verbosity=0)\n\n local_dP = layout.allocate_local_array('ep', 'd')\n target_model.sim.bulk_fill_dprobs(local_dP, layout, None)\n dP = local_dP.copy() # local == global (no layout.gather required) b/c we used comm=None above\n layout.free_local_array(local_dP) # not needed - local_dP isn't shared (comm=None)\n\n return dP\n\n def _get_number_amplified(m0, m1, len0, len1):\n \"\"\" Return the number of amplified parameters \"\"\"\n L_ratio = float(len1) / float(len0)\n try:\n s0 = _np.linalg.svd(m0, compute_uv=False)\n s1 = _np.linalg.svd(m1, compute_uv=False)\n except: # pragma: no cover\n printer.warning(\"SVD error!!\"); return 0 # pragma: no cover\n #SVD did not converge -> just say no amplified params...\n\n numAmplified = 0\n printer.log(\"Amplified parameter test: matrices are %s and %s.\" % (m0.shape, m1.shape), 4)\n printer.log(\"Index : SV(L=%d) SV(L=%d) AmpTest ( > %g ?)\" % (len0, len1, tol), 4)\n for i, (v0, v1) in enumerate(zip(sorted(s0, reverse=True), sorted(s1, reverse=True))):\n if abs(v0) > 0.1 and (v1 / v0) / L_ratio > tol:\n numAmplified += 1\n printer.log(\"%d: %g %g %g YES\" % (i, v0, v1, (v1 / v0) / L_ratio), 4)\n printer.log(\"%d: %g %g %g NO\" % (i, v0, v1, (v1 / v0) / L_ratio), 4)\n return numAmplified\n\n L0, L1 = test_lengths\n\n printer.log(\"---------- Testing Fiducial Pairs ----------\")\n printer.log(\"Getting jacobian at L=%d\" % L0, 2)\n dP0 = _get_derivs(L0)\n printer.log(\"Getting jacobian at L=%d\" % L1, 2)\n dP1 = _get_derivs(L1)\n printer.log(\"Computing number amplified\", 2)\n nAmplified = _get_number_amplified(dP0, dP1, L0, L1)\n printer.log(\"Number of amplified parameters = %s\" % nAmplified)\n\n return nAmplified",
"def compute_similarities(self,dataset,j):\r\n pass",
"def remove_mutual_connections(pair):\n user, common_followed_users = pair\n # iterate over all user ids and keep all user ids that are not started with \"-\".\n common_followed_users = [user for user in common_followed_users if \"-\" + user not in common_followed_users]\n # iterate over all user ids and remove all users who are followed directly with the User X.\n common_followed_users = [user for user in common_followed_users if not str(user).startswith(\"-\")]\n # keep all remaining users' ids in a dictionary whit the key of Fi and the number of occurrences of Fi in the list.\n common_followed_users = dict((i, common_followed_users.count(i)) for i in common_followed_users)\n result = []\n # iterate over the dictionary and create a list of recommendation for user X.\n for key, value in common_followed_users.items():\n recommendation = Recommendation(key, value)\n result.append(recommendation)\n # sort recommendation list in descending order with the __gt__ method which is implemented in Recommendation class.\n result.sort(reverse=True)\n # return a pair of user X with his or her recommendations.\n return user, result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate score by section, helper for
|
def log_evaluate_word_analogies(name, section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
score = correct / (correct + incorrect)
logger.info("{} {}: {:.1f}% ({}/{})".format(name, section['section'], 100.0 * score, correct, correct + incorrect))
return score
|
[
"def findSectionScore(soup):\n\n #wordCount and score are used for each section, totalScore and totalWords are for the entire article\n wordCount = 0\n sectionScores = {}\n currentSection = ''\n score = 0\n totalScore = 0\n totalWords = 0\n\n # we go through all of the text in the article\n for tag in soup.findAll(\"span\"):\n\n #figure out which sub section the words belong to\n newSection = tag.find_previous(text=re.compile(r'^=='))\n\n if newSection == None:\n continue\n\n #update sectionScores and totalScore if we come to the end of a section\n if newSection is not currentSection:\n if wordCount and score:\n score = score/wordCount\n sectionScores[currentSection] = score\n\n currentSection = newSection\n totalWords += wordCount\n totalScore += score *wordCount\n\n score = 0\n wordCount = 0\n\n score, wordCount = findTagScore(tag, wordCount,score)\n\n if totalWords:\n\n totalScore = totalScore/totalWords\n\n\n\n return sectionScores, totalScore",
"def compute_score(self, profile):\n score = 0\n # ====================================================================\n # age consideration\n # ====================================================================\n if profile.max_age:\n if self.age < profile.max_age:\n score += 1\n if profile.min_age:\n if self.age > profile.min_age:\n score += 1\n # ====================================================================\n # skills consideration\n # ====================================================================\n for skill in profile.skills.all():\n if skill in self.skills.all():\n if skill.level == 'Ex':\n score += 9\n elif skill.level == 'In':\n score += 6\n else:\n score += 3\n # ====================================================================\n # industry consideration\n # ====================================================================\n if profile.industry == self.industry:\n if profile.industry.endswith('H'):\n score += 3\n elif profile.industry.endswith('M'):\n score += 2\n else:\n score += 1\n # ====================================================================\n # location consideration\n # ====================================================================\n if profile.location == self.location:\n if profile.location.endswith('H'):\n score += 3\n elif profile.location.endswith('M'):\n score += 2\n else:\n score += 1\n # ====================================================================\n # gender consideration\n # ====================================================================\n if profile.gender == self.gender:\n if profile.gender.endswith('H'):\n score += 3\n elif profile.gender.endswith('M'):\n score += 2\n else:\n score += 1\n # ====================================================================\n # experience consideration\n # ====================================================================\n if (self.experience - profile.experience) >= 0:\n score += 3\n elif (self.experience - profile.experience) >= -2:\n score += 1\n\n # ====================================================================\n # education consideration\n # ====================================================================\n if profile.edu == 'DP':\n if self.edu in ('DP', 'BSc', 'Msc' , 'Phd'):\n score += 1\n elif profile.edu == 'Bsc':\n if self.edu in ('BSc', 'Msc' , 'Phd'):\n score += 2\n elif profile.edu == 'Msc':\n if self.edu in ('Msc' , 'Phd'):\n score += 4\n elif profile.edu == 'Phd':\n if self.edu == profile.edu:\n score += 8\n # =====================================================================\n # return total score\n # ====================================================================\n return score",
"def score(self, pairs):\n pass",
"def compute_score(scores):\n\tcurr_score = 50\n\tfor classification in scores: \n\t\tif classification == 1 or classification == 3: \n\t\t\tcurr_score += 0.08\n\t\tif classification == 2 or classification == 4: \n\t\t\tcurr_score -= 0.03\n\treturn curr_score",
"def slice_score(self, proc_ranges, slice):\r\n gap_start = slice.start\r\n gap_end = slice.stop\r\n gap_size = gap_end - gap_start\r\n width_score = (gap_size / 810)\r\n distance_score = proc_ranges[gap_start:gap_end].mean() / proc_ranges.mean()\r\n slice_score = (width_score*0.7) + (distance_score*0.3)\r\n return slice_score",
"def score(room):\n c_dict = room.chair_items()\n score_list = []\n room.update_mesh()\n if len(room.get_id_list()) == 0:\n sanitize_score = None\n else:\n sanitize_score = sanitize(room, True)\n rad_score = chair_radius(room)\n sq_score = chair_per_sq(room)\n num_score = num_chair(room)\n dist_score = chair_dist(room)\n if sanitize_score is not None:\n for c in c_dict.keys():\n c_id = c_dict[c]\n total_score = 0.1 * sanitize_score[c] + 0.2 * dist_score[c] + 0.4 * rad_score[c] + 0.2 * sq_score[c] + 0.1 * num_score[c]\n score_list.append({'chair_id': c_id.get_archi_id(),\n 'position': {'x': c_id.x_pos(),\n 'y': c_id.y_pos(),\n 'z': c_id.z_pos()},\n 'total_score': total_score,\n 'scores': {'santizer': sanitize_score[c],\n 'chair_distance': dist_score[c],\n 'chairs_in_radius': rad_score[c],\n 'square_footage': sq_score[c],\n 'number_of_chairs': num_score[c]\n }})\n else:\n for c in c_dict.keys():\n total_score = 0.2 * dist_score[c] + 0.4 * rad_score[c] + 0.2 * sq_score[c] + 0.1 * num_score[c]\n c_id = c_dict[c]\n score_list.append({'chair_id': c_id.get_archi_id(),\n 'position': {'x': c_id.x_pos(),\n 'y': c_id.y_pos(),\n 'z': c_id.z_pos()},\n 'total_score': total_score,\n 'scores': {'santizer': sanitize_score[c],\n 'chair_distance': dist_score[c],\n 'chairs_in_radius': rad_score[c],\n 'square_footage': sq_score[c],\n 'number_of_chairs': num_score[c]\n }})\n return {'chairs': score_list}",
"def computeOQScore(data):\n data[\"oqScore\"] = data[\"traitBased\"] + data[\"exceptionalBased\"]",
"def cal_doc_scores(self, sentences) :\n doc_pos_score =0\n doc_neg_score = 0\n for label, pos, neg in sentences:\n if label != 0 :\n doc_pos_score += pos\n doc_neg_score += neg\n return doc_pos_score, doc_neg_score",
"def score(prop, cluster):\r\n return len([other for other in cluster if other[1] == prop[1]]) / (1.0 * len(cluster))",
"def get_score(self, query_term, doc):",
"def calculate(self, strand):\r\n result = []\r\n\r\n for row in self.assessmentData[strand]:\r\n score = 0\r\n baseNum = 5\r\n # Consider only the last three entries of the row since we've appended our counting dictionaries\r\n # to the end of the object's assessment data\r\n for columnNum in range(-3, 0):\r\n # If the column only has one entry so far\r\n column = row[columnNum]\r\n \r\n # Developing and Understanding\r\n if columnNum != -1: \r\n if \"2\" in column:\r\n if column[\"2\"] > 1 or (column[\"2\"] == 1 and len(column) == 1):\r\n score = baseNum + columnNum\r\n elif \"1\" in column:\r\n if column[\"1\"] > 1:\r\n score = baseNum - 0.5 + columnNum\r\n elif \"1\" in column:\r\n if column[\"1\"] > 1 or (column[\"1\"] == 1 and len(column) == 1):\r\n score = baseNum -1 + columnNum\r\n else:\r\n score = baseNum - 1.5 + columnNum\r\n \r\n # Mastery Column\r\n else:\r\n # This creates a threshold so that if they haven't achieved the previous strands\r\n # They cannot achieve mastery\r\n if score < 3:\r\n pass\r\n\r\n elif \"2\" in column:\r\n masteryData = strand[-5]\r\n if len(masteryData) == 1:\r\n score = 4\r\n \r\n elif column[\"2\"] > 1:\r\n if masteryData[-2:] == \"22\":\r\n score = 4\r\n\r\n # at this point, we'll create a temporary scale and fix this later\r\n elif \"1\" in column:\r\n if column[\"1\"] > 1:\r\n score = 3.75\r\n else:\r\n score = 3.5 \r\n result.append(score) \r\n\r\n try:\r\n return round(sum(result)/len(result), 2)\r\n except ZeroDivisionError:\r\n return 0",
"def _event_game_score(event_row, solutions, books):\n game_score_awarded = 0.0\n if event_row[\"Event\"] == \"WorksheetSubmit\":\n game_score_awarded = _award_worksheet(event_row)\n elif event_row[\"Event\"] == \"BooksAndArticles\":\n game_score_awarded = _award_books(event_row, books)\n elif event_row[\"Event\"] == \"Scanner\":\n game_score_awarded = _award_scan(event_row, solutions)\n elif event_row[\"Event\"] == \"Conversation\":\n game_score_awarded = _award_conversation(event_row)\n return game_score_awarded",
"def get_scores_section_to_planet(self):\n table = {}\n\n row_length = (self.height // MyCommon.Constants.NUM_SECTIONS) + 1 ## +1 TO COUNT LAST ITEM IN RANGE\n col_length = (self.width // MyCommon.Constants.NUM_SECTIONS) + 1\n\n for r in range(row_length):\n for c in range(col_length):\n curr_section = (r, c)\n table[curr_section] = self.calculate_scores_to_planets(curr_section)\n\n return table",
"def calculateScore(board,gameState):\n pass",
"def calculate_score(self):\n\t\tlocation_score = 0\n\t\tfor location in self.locations:\n\t\t\tlocation_score += location.get_vps()\n\t\t\n\t\tlord_score = 0\n\t\tfor lord in self.lords:\n\t\t\tlord_score += lord.vps\n\t\t\t\n\t\taffiliate_score = 0\n\t\tfor affiliate_race in self.affiliates:\n\t\t\tif (affiliate_race):\n\t\t\t\taffiliate_score += min(affiliate_race)\n\t\t\t\n\t\tmonster_score = sum(self.monsters)\n\t\t\n\t\treturn location_score + lord_score + affiliate_score + monster_score",
"def calculateScore(self, annotation, u_score_0, u_score_1, s_score):\n if annotation == 1:\n a = s_score * u_score_1\n b = 1 - u_score_0\n c = 1 - s_score\n\n elif annotation == 0:\n a = s_score * (1 - u_score_1)\n b = 1 - s_score\n c = u_score_0\n # Preliminary catch of zero division error\n # TODO: Figure out how to handle it\n try:\n score = a / (a + b * c)\n # leave score unchanged\n except ZeroDivisionError as e:\n print(e)\n score = s_score\n\n return score",
"def calculate_score(assessments: List[float]) -> float:\n score = 0\n for i in range(0, len(assessments)):\n score += assessments[i]*weights[i]\n return score",
"def score_sp(self, alignment):\n return sum(\n [self.__score_column(alignment.column(col_idx))\n for col_idx in range(len(alignment))]\n )",
"def scscore(mol: SmallMolecule) -> float:\n return _sccore(mol)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns the number of bank accounts
|
def count_accounts():
return BankAccount.__no_of_accounts
|
[
"def get_account_count(self):\n self.hive.rpc.set_next_node_on_empty_reply(False)\n if self.hive.rpc.get_use_appbase():\n ret = self.hive.rpc.get_account_count(api=\"condenser\")\n else:\n ret = self.hive.rpc.get_account_count()\n return ret",
"def num_withdrawals(goal):\n transactions = GoalTransaction.objects.filter(goal=goal)\n\n if not transactions:\n return 0\n\n withdrawals = 0\n\n for t in transactions:\n if t.is_withdraw:\n withdrawals += 1\n\n return withdrawals",
"def get_total_bets():\n\n return TABLE_BETS['id'].count()",
"def get_account_balance(self):\n return self.execute_private_api(\"/api/accounts/balance\", \"GET\")",
"def unconfirmed_tx_count():\n data = make_request(\"https://blockchain.info/q/unconfirmedcount\")\n return int(data) if int(data) > 0 else -1",
"def getAccountAvailableTotal(self, account: CryptoAccount) -> int:\n\n # the account info\n accountAddress = account.getAddress()\n accountPublicKey = account.getPublicKey()\n accountPrivateKey = account.getPrivateKey()\n\n # get account total from the utxo set, for the specific recipient\n balance = 0\n for utxSetKey, utxoElement in self.__UTXOSet.items(): # for each unspent tx output in the utxo set\n\n # check if the tx output is spendable\n isSpendable = self.isTxOutputSpendable(utxSetKey, utxoElement, accountPrivateKey, accountPublicKey)\n\n # if the tx output is related to the specific recipient address and if it can be spent (script result true)\n if utxoElement.getRecipient() == accountAddress and isSpendable:\n balance += utxoElement.getValue()\n return balance",
"def _get_num_transactions(self, address):\n params = [f\"0x{address}\", self.DEFAULT_BLOCK_LATEST]\n nonce = self._json_call(\"eth_getTransactionCount\", params)\n return nonce",
"def account_number(self) -> int:\n if self._account_number == 0:\n self._account_number = self.info().account_number\n return self._account_number",
"def get_number_territories(self):\n territories_total = 0\n for data in self.country_data.values():\n if data[\"number_of_users\"] > 0:\n territories_total += 1\n\n return territories_total",
"def credit_limit(self):\n return self._balance / 2",
"def countCardsByNum(self, number):\n if self.verbose:\n print(self.name + \" counting all cards of number \" + str(number))\n if self.log is not None:\n self.log.write(self.name + \" counting all cards of number \" + str(number) + \"\\n\")\n return len(self.findCardsByNum(number))",
"def countPlayers():\n\n # establish db connection\n DB, cursor = connect()\n\n # fetch number of players registered\n cursor.execute(\"SELECT count(*) from player_registry\")\n player_count = cursor.fetchone()[0]\n DB.close()\n\n return player_count",
"def count(self):\n info = self.describe()\n return info['Table'].get('ItemCount', 0)",
"def get_dataset_total_count(self):\n return int(requests.get(f'{self.url}?$select=count(*)', headers=self.headers).json()[0]['count'])",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = vpnalwaysonprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def get_total_active_cases(self):\n parsed_data = self.__get_response()\n active_cases_section = parsed_data.find(\"li\", {\"class\": \"bg-blue\"}).find_all(\"strong\", {\"class\": \"mob-hide\"})[1]\n total_active_cases = str(active_cases_section.text).split()[0]\n return int(total_active_cases)",
"def get_number_of_rows(site,resource_id,API_key=None):\n ckan = ckanapi.RemoteCKAN(site, apikey=API_key)\n results_dict = ckan.action.datastore_info(id = resource_id)\n ic(results_dict)\n return results_dict['meta']['count']",
"def get_number_of_users(self):\n return len(self.get_users_id())",
"def get_checking_account_balance():\n try:\n browser = create_webdriver()\n browser.get(\"https://bankofamerica.com\")\n except Exception as err:\n print(\n \"[bank_of_america.__init__.get_checking_account_balance]: \"\n \"Error creating the webdriver: {}\".format(err)\n )\n exit()\n browser = login(browser, get_credentials())\n try:\n checking_account_balance = browser.find_element_by_xpath(\n '//*[@id=\"Traditional\"]/li[1]/div[1]/div[1]/span'\n ).text\n return checking_account_balance\n except Exception as err:\n print(\n \"[bank_of_america.__init__.get_checking_account_balance]: \"\n \"Error finding the actual balance. So close... sorry. \"\n \"Error: {}\".format(err)\n )\n exit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find a constant that scales F to E.
|
def findConst(F, E):
for k in range(2):
for l in range(2):
if E[k, l]!=0:
return F[k, l]/E[k, l]
|
[
"def as_constant(self, eps=1e-14):\n if self.is_scalar_field():\n maxval = self.f.vector().max() # global (!) maximum value\n minval = self.f.vector().min() # global (!) minimum value\n if (maxval - minval) < eps:\n return maxval\n else:\n raise RuntimeError(\"Field does not have a unique constant value.\")\n else:\n raise NotImplementedError()",
"def _get_constant_function(constant: float):\n\n def function(x):\n return constant\n\n return function",
"def fermi(beta, E):\n return 1./(1.+np.exp(E*beta))",
"def specific_energy_func(self):\n return self.P.val - self.inl[2].m.val_SI * self.e.val",
"def _bounds_eNu(eE):\n return bounds_eNu",
"def _get_factor(min_: float, max_: float, px_size: int):\n range_ = abs(max_ - min_)\n return px_size / range_ if range_ != 0 else 1 # if we only need to represent 1 pixel, we can use 1 as density",
"def computeF(u_max, rho, aval, bval):\n\treturn u_max*rho*(1 - aval*rho - bval*rho**2)",
"def SpreadFactor(self): \n return 4.5",
"def f_to_c(temp_f):\n return (temp_f - 32) * 5 / 9",
"def r(self, e, exp, state):\n return (1 + self.istar_value(state)) * exp / e - 1",
"def best_constant(self):\n if np.all(self.const_gen[0] == 0):\n return self.const_gen[0]\n\n ss = self.inds[0, 1:]\n index = ss >= (100 + self.x_num)\n ls = ss[index] - 100 - self.x_num\n sub_index = [i for i in range(self.const_gen.shape[1]) if i not in ls]\n const = self.const_gen[0]\n const[sub_index] = 0\n return const",
"def compute_epipole(F):\n \n # return null space of F (Fx=0)\n U,S,V = np.linalg.svd(F)\n e = V[-1]\n return e/e[2]",
"def __const_c(self):\n return gamma((self.eta+1)/2) \\\n / ((np.pi*(self.eta-2))**.5*gamma(self.eta/2))",
"def get_scale_factor(self, node):\n m, M = node.getTightBounds()\n model_R = (M - m)[0]/2\n\n return self.R / model_R",
"def _cie_rgb_EOCF(value):\n\n value = np.asarray(value)\n\n return value ** 2.2",
"def quality_factor(L):\r\n from numpy import sqrt\r\n L /= (keV/um)\r\n if L < 10:\r\n return 1\r\n elif L < 100:\r\n return 0.32*L-2.2\r\n else:\r\n return 300./sqrt(L)",
"def kelvinToFahrenheit(value):\n # F = K * (9/5) - 459.67\n return value * (9.0/5.0) - 459.67",
"def piecewise_constant_approximation(u):\n return lookup_table[array(n_intervals * u).astype(int)]",
"def chebint(x,f):\n N = len(x)\n wi = pi/N\n return sum(sqrt(1-x**2)*f*pi*wi)",
"def findEquilibrium(f):\n\ta, b = f\n\treturn b/(1-a)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Process alarm and return additional data to be added to the alarm data. Clientside configuration must be pushed as "config" key
|
def get_data(self, alarm, config):
return {}
|
[
"def create_alarm(self, config_alarm_info):\n mon_plugin = MonPlugin()\n plugin_uuid = mon_plugin.configure_rest_plugin()\n alarm_uuid = mon_plugin.configure_alarm(config_alarm_info)\n return alarm_uuid",
"def _get_alarm_data(self):\n return self._replace_id(self.request.get_data(), 'alarm_id')",
"def add_alarm():\n global alarm_adapt\n\n # Parsing the hour argument, if not present or wrong return error message\n hour = request.args.get('hour')\n if hour is None:\n message = {'error': 'The \\'hour\\' argument is required to add alarm'}\n return jsonify(message)\n else:\n try:\n hour = int(hour)\n except ValueError:\n message = {'error': 'The \\'hour\\' argument must be an integer'}\n return jsonify(message)\n\n # Parsing the minute argument, if not present or wrong return error message\n minute = request.args.get('minute')\n if minute is None:\n message = {'error': 'The \\'minute\\' argument is required to add alarm'}\n return jsonify(message)\n else:\n try:\n minute = int(minute)\n except ValueError:\n message = {'error': 'The \\'minute\\' argument must be an integer'}\n return jsonify(message)\n\n def check_boolean(arg_str):\n \"\"\"\n Internal function to simplify boolean arguments parsing.\n Set to True by default.\n \"\"\"\n arg_value = request.args.get(arg_str)\n if arg_value is None:\n parse_success = True\n return_value = True\n elif arg_value.lower() in (\"true\", \"yes\", \"enabled\"):\n parse_success = True\n return_value = True\n elif arg_value.lower() in (\"false\", \"no\", \"disabled\"):\n parse_success = True\n return_value = False\n else:\n parse_success = False\n return_value = 'The \\'%s\\' argument has to be a bool' % arg_str\n return parse_success, return_value\n\n # Parsing the weekdays\n success, monday = check_boolean(\"monday\")\n if success is False:\n return jsonify({'error': monday})\n success, tuesday = check_boolean(\"tuesday\")\n if success is False:\n return jsonify({'error': tuesday})\n success, wednesday = check_boolean(\"wednesday\")\n if success is False:\n return jsonify({'error': wednesday})\n success, thursday = check_boolean(\"thursday\")\n if success is False:\n return jsonify({'error': thursday})\n success, friday = check_boolean(\"friday\")\n if success is False:\n return jsonify({'error': friday})\n success, saturday = check_boolean(\"saturday\")\n if success is False:\n return jsonify({'error': saturday})\n success, sunday = check_boolean(\"sunday\")\n if success is False:\n return jsonify({'error': sunday})\n\n # Parsing the enabled argument, if not present or wrong return error message\n success, enabled = check_boolean(\"enabled\")\n if success is False:\n return jsonify({'error': enabled})\n\n # Parsing the label argument\n label = request.args.get('label')\n if label is None:\n label = ''\n\n # Parsing the timestamp argument, if wrong data type return error\n timestamp = request.args.get('timestamp')\n if timestamp is not None:\n try:\n timestamp = long(timestamp)\n except ValueError:\n message = {'error': 'The \\'timestamp\\' argument must be an integer'}\n return jsonify(message)\n\n # At this point all arguments should be correct\n json_response = alarm_adapt.json_add_alarm(\n hour, minute, enabled=enabled, label=label, timestamp=timestamp,\n days=(monday, tuesday, wednesday, thursday, friday, saturday, sunday))\n return Response(json_response, mimetype='application/json')",
"def handle_create_alarm(self, message):\n if self.neon_in_request(message):\n content = self._extract_alert_params(message, AlertType.ALARM)\n content[\"kind\"] = int(AlertType.ALARM)\n LOG.info(content)\n self.confirm_alert(\"alarm\", content, message)",
"def parse_alert(context, alert):\n\n alert = oid_mapper.OidMapper.map_oids(alert)\n # Check for mandatory alert attributes\n for attr in AlertHandler._mandatory_alert_attributes:\n if not alert.get(attr):\n msg = \"Mandatory information %s missing in alert message. \" \\\n % attr\n raise exception.InvalidInput(msg)\n\n alert_model = {}\n\n # Fill alarm id and fill alarm_name with corresponding mapping names\n alert_model['alert_id'] = alert['emcAsyncEventCode']\n alert_model['alert_name'] = alert_mapper.alarm_id_name_mapping.get(\n alert_model['alert_id'], alert_model['alert_id'])\n\n alert_model['severity'] = AlertHandler.SEVERITY_MAP.get(\n alert['connUnitEventSeverity'],\n constants.Severity.INFORMATIONAL)\n\n alert_model['category'] = constants.Category.NOT_SPECIFIED\n alert_model['type'] = constants.EventType.EQUIPMENT_ALARM\n\n alert_model['sequence_number'] = alert['connUnitEventId']\n\n # trap info do not contain occur time, update with received time\n # Get date and time and convert to epoch format\n pattern = '%Y-%m-%d %H:%M:%S'\n curr_time = strftime(pattern, gmtime())\n\n alert_model['occur_time'] = int(time.mktime(time.strptime(curr_time,\n pattern)))\n alert_model['description'] = alert['connUnitEventDescr']\n alert_model['recovery_advice'] = 'None'\n alert_model['resource_type'] = alert['connUnitType']\n\n # Location is name-value pair having component type and component name\n component_type = alert_mapper.component_type_mapping.get(\n alert.get('emcAsyncEventComponentType'), \"\")\n alert_model['location'] = 'Array id=' \\\n + alert['connUnitName'] \\\n + ',Component type=' \\\n + component_type \\\n + ',Component name=' \\\n + alert['emcAsyncEventComponentName'] \\\n + ',Event source=' \\\n + alert['emcAsyncEventSource']\n if alert['connUnitName']:\n alert_model['serial_number'] = alert['connUnitName']\n return alert_model",
"def edit_alarm():\n global alarm_adapt\n\n # Parsing the id argument, if not present or wrong return error message\n id_ = request.args.get('id')\n if id_ is None:\n message = {'error': 'The \\'id\\' argument is required to edit alarm'}\n return jsonify(message)\n else:\n try:\n id_ = int(id_)\n except ValueError:\n message = {'error': 'The \\'id\\' argument must be an integer'}\n return jsonify(message)\n\n # Parsing the hour argument\n hour = request.args.get('hour')\n if hour is not None:\n try:\n hour = int(hour)\n except ValueError:\n message = {'error': 'The \\'hour\\' argument must be an integer'}\n return jsonify(message)\n\n # Parsing the minute argument\n minute = request.args.get('minute')\n if minute is not None:\n try:\n minute = int(minute)\n except ValueError:\n message = {'error': 'The \\'minute\\' argument must be an integer'}\n return jsonify(message)\n\n def check_boolean(arg_str):\n \"\"\" Internal function to simplify boolean arguments parsing. \"\"\"\n return_value = request.args.get(arg_str)\n parse_success = True\n if return_value is not None:\n if return_value.lower() in (\"true\", \"yes\", \"enabled\"):\n parse_success = True\n return_value = True\n elif return_value.lower() in (\"false\", \"no\", \"disabled\"):\n parse_success = True\n return_value = False\n else:\n parse_success = False\n return_value = 'The \\'%s\\' argument has to be a bool' % arg_str\n return parse_success, return_value\n\n # Parsing the weekdays\n success, monday = check_boolean(\"monday\")\n if success is False:\n return jsonify({'error': monday})\n success, tuesday = check_boolean(\"tuesday\")\n if success is False:\n return jsonify({'error': tuesday})\n success, wednesday = check_boolean(\"wednesday\")\n if success is False:\n return jsonify({'error': wednesday})\n success, thursday = check_boolean(\"thursday\")\n if success is False:\n return jsonify({'error': thursday})\n success, friday = check_boolean(\"friday\")\n if success is False:\n return jsonify({'error': friday})\n success, saturday = check_boolean(\"saturday\")\n if success is False:\n return jsonify({'error': saturday})\n success, sunday = check_boolean(\"sunday\")\n if success is False:\n return jsonify({'error': sunday})\n\n alarm_repeat = None\n if any([monday, tuesday, wednesday, thursday, friday, saturday, sunday]):\n alarm_repeat = list(alarm_adapt.get_alarm_repeat(id_))\n if monday is not None:\n alarm_repeat[0] = monday\n if tuesday is not None:\n alarm_repeat[1] = tuesday\n if wednesday is not None:\n alarm_repeat[2] = wednesday\n if thursday is not None:\n alarm_repeat[3] = thursday\n if friday is not None:\n alarm_repeat[4] = friday\n if saturday is not None:\n alarm_repeat[5] = saturday\n if sunday is not None:\n alarm_repeat[6] = sunday\n\n # Parsing the enabled argument, if wrong return error message\n success, enabled = check_boolean(\"enabled\")\n if success is False:\n return jsonify({'error': enabled})\n\n # Parsing the label argument\n label = request.args.get('label')\n\n # At this point all arguments should be correct\n json_response = alarm_adapt.json_edit_alarm(\n alarm_id=id_, hour=hour, minute=minute, enabled=enabled, label=label,\n days=alarm_repeat)\n return Response(json_response, mimetype='application/json')",
"def setup_alarm(self):\n try:\n self.next_event = Alarm.next_event_overall()\n except NoAlarms:\n self.logger.warn(\"no alarms !\")\n else:\n self._process = Process(target=event_process, args=[self.next_event, self.log_setup])\n self._process.daemon = True\n self._process.start()",
"def parse_alarm_payload(payload, zone):\n alarm_list = payload[\"CurrentAlarmList\"]\n tree = XML.fromstring(alarm_list.encode(\"utf-8\"))\n\n # An alarm list looks like this:\n # <Alarms>\n # <Alarm ID=\"14\" StartTime=\"07:00:00\"\n # Duration=\"02:00:00\" Recurrence=\"DAILY\" Enabled=\"1\"\n # RoomUUID=\"RINCON_000ZZZZZZ1400\"\n # ProgramURI=\"x-rincon-buzzer:0\" ProgramMetaData=\"\"\n # PlayMode=\"SHUFFLE_NOREPEAT\" Volume=\"25\"\n # IncludeLinkedZones=\"0\"/>\n # <Alarm ID=\"15\" StartTime=\"07:00:00\"\n # Duration=\"02:00:00\" Recurrence=\"DAILY\" Enabled=\"1\"\n # RoomUUID=\"RINCON_000ZZZZZZ01400\"\n # ProgramURI=\"x-rincon-buzzer:0\" ProgramMetaData=\"\"\n # PlayMode=\"SHUFFLE_NOREPEAT\" Volume=\"25\"\n # IncludeLinkedZones=\"0\"/>\n # </Alarms>\n\n alarms = tree.findall(\"Alarm\")\n alarm_args = {}\n for alarm in alarms:\n values = alarm.attrib\n alarm_id = values[\"ID\"]\n\n alarm_zone = next(\n (z for z in zone.all_zones if z.uid == values[\"RoomUUID\"]), None\n )\n if alarm_zone is None:\n # Some alarms are not associated with a zone, ignore these\n continue\n\n args = {\n \"zone\": alarm_zone,\n # StartTime not StartLocalTime which is used by CreateAlarm\n \"start_time\": datetime.strptime(values[\"StartTime\"], \"%H:%M:%S\").time(),\n \"duration\": (\n None\n if values[\"Duration\"] == \"\"\n else datetime.strptime(values[\"Duration\"], \"%H:%M:%S\").time()\n ),\n \"recurrence\": values[\"Recurrence\"],\n \"enabled\": values[\"Enabled\"] == \"1\",\n \"program_uri\": (\n None\n if values[\"ProgramURI\"] == \"x-rincon-buzzer:0\"\n else values[\"ProgramURI\"]\n ),\n \"program_metadata\": values[\"ProgramMetaData\"],\n \"play_mode\": values[\"PlayMode\"],\n \"volume\": values[\"Volume\"],\n \"include_linked_zones\": values[\"IncludeLinkedZones\"] == \"1\",\n }\n\n alarm_args[alarm_id] = args\n return alarm_args",
"def getAlarmInformation(self):\n command = self.COMMANDS[\"getAlarmInformation\"]\n logger.info(f\"Getting alarm information\")\n url = self.ip + \"/\" +command\n logger.info(f\"Accessing {url}\")\n r = requests.get(url)\n\n if r.status_code in RESPONSES:\n return RESPONSES[r.status_code](r).json()\n else:\n raise cRIOUnknownStatusCode(r.status_code)",
"def getFrontendData(self):\n if request.args.get('action') == 'editalarm':\n \n if request.args.get('alarmid', '0') == '0': # add new alarm\n alarm = classes.get('alarm')(datetime.datetime.now(), '', 2, 0)\n #flash(babel.gettext(u'alarms.alarmadded'), 'alarms.add')\n \n else: # edit alarm\n alarm = classes.get('alarm').getAlarms(id=int(request.args.get('alarmid')))\n return render_template('frontend.alarms_edit.html', alarm=alarm, cities=classes.get('city').getCities(), objects=classes.get('alarmobject').getAlarmObjects(), cars=classes.get('car').getCars(), frontendarea=request.args.get('frontendarea'))\n\n elif request.args.get('action') == 'alarmmonitor': # send alarm to monitor\n for monitor in classes.get('monitor').getMonitors():\n scheduler.deleteJobForEvent('changeLayout') # send update to monitors\n for l in classes.get('monitorlayout').getLayouts(mid=int(monitor.id)):\n if l.trigger == 'alarm_added':\n #monitorserver.sendMessage(str(monitor.id), 'load', ['layoutid=%s' % l.id, 'alarmid=%s' % request.args.get('alarmid')])\n monitorserver.sendMessage(str(monitor.id), 'load', layoutid='%s' % l.id, alarmid='%s' % request.args.get('alarmid')) # TODO check\n\n elif request.args.get('action') == 'printalarm':\n classes.get('printer').getPrinters(pid=int(request.args.get('printerdef'))).doPrint(object=classes.get('alarm').getAlarms(id=int(request.args.get('alarmid'))), id=request.args.get('alarmid'), copies=1)\n return \"\"\n\n elif request.args.get('action') == 'routeinfo':\n alarm = classes.get('alarm').getAlarms(request.args.get('alarmid'))\n data = alarm.getRouting()\n return render_template('frontend.alarms_routing.html', routing=data)\n\n elif request.args.get('action') == 'routecoords':\n alarm = classes.get('alarm').getAlarms(request.args.get('alarmid'))\n data = alarm.getRouting()\n return jsonify(data)\n\n elif request.args.get('action') == 'message':\n return render_template('frontend.alarms_message.html', alarm=classes.get('alarm').getAlarms(request.args.get('alarmid')), messagestates=classes.get('alarmhistory').historytypes, area=request.args.get('area'), reload=request.args.get('reload', 'true'))\n\n elif request.args.get('action') == 'addmessage': # add message\n if request.form.get('messagetext') != \"\":\n alarm = classes.get('alarm').getAlarms(request.form.get('alarmid'))\n alarm.addHistory(request.form.get('messagestate'), request.form.get('messagetext'))\n db.session.commit()\n return render_template('frontend.alarms_message.html', alarm=classes.get('alarm').getAlarms(request.form.get('alarmid')), messagestates=classes.get('alarmhistory').historytypes, area=request.args.get('area'))\n\n elif request.args.get('action') == 'deletemessage': # delete selected message\n #print \"delete message with timestamp\", request.args.get('datetime'), request.args.get('alarmid')\n alarm = classes.get('alarm').getAlarms(request.args.get('alarmid'))\n for msg in alarm.history:\n if str(msg.timestamp) == request.args.get('datetime'):\n db.session.delete(msg)\n db.session.commit()\n return render_template('frontend.alarms_message.html', alarm=classes.get('alarm').getAlarms(request.args.get('alarmid')), messagestates=classes.get('alarmhistory').historytypes, area=request.args.get('area'))\n\n elif request.args.get('action') == 'housecoordinates': # return a dict with coordinats of housenumber\n if request.args.get('alarmid') != \"None\":\n alarm = classes.get('alarm').getAlarms(id=int(request.args.get('alarmid')))\n if alarm and alarm.housenumber:\n return {'lat': map(lambda x: x[0], alarm.housenumber.points), 'lng': map(lambda x: x[1], alarm.housenumber.points)}\n return []\n\n elif request.args.get('action') == 'evalhouse': # try to eval housenumer\n street = classes.get('street').getStreet(request.args.get('streetid'))\n if street:\n points = dict(lat=[], lng=[])\n for hn in street.housenumbers:\n if str(hn.number) == request.args.get('housenumber').strip():\n points['lat'].extend(map(lambda x: x[0], hn.points))\n points['lng'].extend(map(lambda x: x[1], hn.points))\n return points\n return {}\n\n elif request.args.get('action') == 'alarmsforstate': # render alarms for given state\n if 'alarmfilter' not in session:\n session['alarmfilter'] = 7\n return render_template('frontend.alarms_alarm.html', alarms=classes.get('alarm').getAlarms(days=int(session['alarmfilter']), state=int(request.args.get('state', '-1'))), printdefs=classes.get('printer').getActivePrintersOfModule('alarms'))\n return \"\"",
"def onAlarm(self):",
"def _handle_alarm(self, timestamp: datetime, alarm: str, state: bool):\n _LOGGER.debug(\"Handle alarm: %s; State: %s\", alarm, state)\n\n self.last_activity = timestamp\n self.alarm_timestamp[alarm] = timestamp\n self.alarm_state[alarm] = state\n\n for handler in self._alarm_handlers:\n handler(self, timestamp, alarm, state)",
"def _get_alarm_dict(self, **kwargs):\n alarm_id = self._generate_random_name()\n alarm = {\"alarm_id\": alarm_id,\n \"name\": \"TestAlarm-%s\" % alarm_id,\n \"description\": \"Test Alarm\"}\n\n alarm.update(kwargs)\n return alarm",
"def _updateAlarm(self) -> None:\r\n pass",
"def _add_metric_alarm_config(alarm_info, current_alarms):\n # Some keys that come from the argparse options can be omitted\n omitted_keys = {'debug', 'alarm_name', 'command', 'clusters', 'function'}\n\n current_alarms[alarm_info['alarm_name']] = {\n key: value\n for key, value in alarm_info.iteritems() if key not in omitted_keys\n }\n\n return current_alarms",
"def get_device_alarms(self) -> Dict[str, Any]:\n\n logger.debug(\"Requesting device alarms\")\n\n alarms = []\n devices = self.get_devices()\n for device in devices:\n device_settings = self.get_device_settings(device[\"deviceId\"])\n alarms += device_settings[\"alarms\"]\n return alarms",
"def save(self):\n args = [\n (\"StartLocalTime\", self.start_time.strftime(TIME_FORMAT)),\n (\n \"Duration\",\n \"\" if self.duration is None else self.duration.strftime(TIME_FORMAT),\n ),\n (\"Recurrence\", self.recurrence),\n (\"Enabled\", \"1\" if self.enabled else \"0\"),\n (\"RoomUUID\", self.zone.uid),\n (\n \"ProgramURI\",\n \"x-rincon-buzzer:0\" if self.program_uri is None else self.program_uri,\n ),\n (\"ProgramMetaData\", self.program_metadata),\n (\"PlayMode\", self.play_mode),\n (\"Volume\", self.volume),\n (\"IncludeLinkedZones\", \"1\" if self.include_linked_zones else \"0\"),\n ]\n if self.alarm_id is None:\n response = self.zone.alarmClock.CreateAlarm(args)\n self._alarm_id = response[\"AssignedID\"]\n alarms = Alarms()\n if alarms.last_id == int(self.alarm_id) - 1:\n alarms.last_alarm_list_version = \"{}:{}\".format(\n alarms.last_uid, self.alarm_id\n )\n alarms.alarms[self.alarm_id] = self\n else:\n # The alarm has been saved before. Update it instead.\n args.insert(0, (\"ID\", self.alarm_id))\n self.zone.alarmClock.UpdateAlarm(args)\n return self.alarm_id",
"def _add_global_metric_alarm(self, alarm_info):\n function_name = alarm_info['function']\n\n func_config_name = '{}_config'.format(function_name)\n\n # Check if metrics are not enabled, and ask the user if they would like to enable them\n if func_config_name not in self.config['lambda']:\n self.config['lambda'][func_config_name] = {}\n\n function_config = self.config['lambda'][func_config_name]\n\n if function_name in CLUSTERED_FUNCTIONS:\n if not self._clusters_with_metrics_enabled(function_name):\n prompt = (\n 'Metrics are not currently enabled for the \\'{}\\' function '\n 'within any cluster. Creating an alarm will have no effect '\n 'until metrics are enabled for this function in at least one '\n 'cluster. Would you still like to continue?'.format(function_name)\n )\n if not continue_prompt(message=prompt):\n return False\n\n else:\n if not function_config.get('enable_custom_metrics'):\n prompt = (\n 'Metrics are not currently enabled for the \\'{}\\' function. '\n 'Would you like to enable metrics for this function?'\n ).format(function_name)\n\n if continue_prompt(message=prompt):\n self.toggle_metrics(function_name, enabled=True)\n\n elif not continue_prompt(message='Would you still like to add this alarm '\n 'even though metrics are disabled?'):\n return False\n\n metric_alarms = function_config.get('custom_metric_alarms', {})\n\n # Format the metric name for the aggregate metric\n alarm_settings = alarm_info.copy()\n alarm_settings['metric_name'] = '{}-{}'.format(\n metrics.FUNC_PREFIXES[function_name],\n alarm_settings['metric_name']\n )\n\n function_config['custom_metric_alarms'] = self._add_metric_alarm_config(\n alarm_settings,\n metric_alarms\n )\n LOGGER.info('Successfully added \\'%s\\' metric alarm to '\n '\\'conf/lambda.json\\'.', alarm_settings['alarm_name'])\n\n return True",
"def create_alarm() -> str:\r\n #List space for just one alarm\r\n schedule_section = []\r\n\r\n #Catches date and time user has entered\r\n user_date_time = request.args.get(\"date_time\")\r\n #Catches reason user has entered\r\n user_reason = request.args.get(\"reason\")\r\n\r\n #Seperates date and time\r\n analyse_list = list(user_date_time)\r\n alarm_date = \"\"\r\n alarm_time = \"\"\r\n for date_part in range(0, 10):\r\n alarm_date = alarm_date + analyse_list[date_part]\r\n for time_part in range(11, 16):\r\n alarm_time = alarm_time + analyse_list[time_part]\r\n\r\n #appends information to a list\r\n schedule_section.append(alarm_date)\r\n schedule_section.append(alarm_time)\r\n schedule_section.append(user_reason)\r\n\r\n #appends list to the overall alarm list\r\n alarm_schedule.append(schedule_section)\r\n\r\n #Outputs Alarm created by the user\r\n confirm_message = (\"Alarm created at\", alarm_time, \"on\",\r\n alarm_date, \"because of\", user_reason)\r\n\r\n #Accessing Template from JSON File\r\n config_file = config_handle()\r\n create_template = config_file[\"file_paths\"][\"create_output\"]\r\n return render_template(create_template,\r\n user_confirm=confirm_message)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Simple put/get on ranges of rows, hitting multiple sstables
|
def rangeputget_test(self):
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 2)
self.create_cf(cursor, 'cf')
tools.range_putget(cluster, cursor)
|
[
"def range(self, row, start, end):\n row_str = idkey_as_str(row)\n start_str = start.strftime(_TIME_FORMAT)\n end_str = end.strftime(_TIME_FORMAT)\n fmt = u\"Range(%s=%s,%s,%s)\"\n return PQLQuery(fmt % (self.name, row_str, start_str, end_str),\n self.index)",
"def set_range(self, val, row_range, col_range):\r\n\t\tfor i in row_range:\r\n\t\t\tfor j in col_range:\r\n\t\t\t\tself[(i,j)] = val",
"def test_query_routing(self):\n row_counts = [20, 30]\n for shard_index in [0, 1]:\n write_rows_to_shard(row_counts[shard_index], shard_index)\n vtgate_conn = get_connection()\n for shard_index in [0, 1]:\n # Fetch all rows in each shard\n cursor = vtgate_conn.cursor(\n tablet_type='master', keyspace=KEYSPACE_NAME,\n keyranges=[keyrange.KeyRange(SHARD_NAMES[shard_index])])\n rowcount = cursor.execute('select * from vt_insert_test', {})\n # Verify row count\n self.assertEqual(rowcount, row_counts[shard_index])\n # Verify keyspace id\n for result in cursor.results:\n kid = result[2]\n self.assertIn(kid, SHARD_KID_MAP[SHARD_NAMES[shard_index]])\n\n # Do a cross shard range query and assert all rows are fetched.\n # Use this test to also test the vtgate vars are correctly updated.\n v = utils.vtgate.get_vars()\n key0 = 'Execute.' + KEYSPACE_NAME + '.' + SHARD_NAMES[0] + '.master'\n key1 = 'Execute.' + KEYSPACE_NAME + '.' + SHARD_NAMES[1] + '.master'\n before0 = v['VttabletCall']['Histograms'][key0]['Count']\n before1 = v['VttabletCall']['Histograms'][key1]['Count']\n\n cursor = vtgate_conn.cursor(\n tablet_type='master', keyspace=KEYSPACE_NAME,\n keyranges=[keyrange.KeyRange('75-95')])\n rowcount = cursor.execute('select * from vt_insert_test', {})\n self.assertEqual(rowcount, row_counts[0] + row_counts[1])\n vtgate_conn.close()\n\n v = utils.vtgate.get_vars()\n after0 = v['VttabletCall']['Histograms'][key0]['Count']\n after1 = v['VttabletCall']['Histograms'][key1]['Count']\n self.assertEqual(after0 - before0, 1)\n self.assertEqual(after1 - before1, 1)",
"def get_value_cells(start_row, end_row):\n start_source = 'C' + `start_row`\n end_source = 'K' + `end_row`\n the_range = start_source, end_source\n \n return the_range",
"def fetch_bigtable_rows(big_table, keys, other_silly_variable=None):",
"def putcellslice (self, rownr, value, blc, trc, inc=[]):\n return self._table.putcellslice (self._column, rownr, value, blc, trc, inc);",
"def get_bounded_range_values(user, spreadsheet_id,\n start_row_index, start_col_index, end_row_index, end_col_index):\n if start_row_index < 0 or start_col_index < 0 or end_row_index < 0 or end_col_index < 0:\n raise ValueError('Negative index')\n service = _build_sheets_service(user)\n start_cell = _convert_to_a1(start_row_index, start_col_index)\n end_cell = _convert_to_a1(end_row_index, end_col_index)\n return _execute_request(\n service.spreadsheets().values().get(spreadsheet_id=spreadsheet_id, range=\"{}:{}\".format(start_cell, end_cell)))",
"def rows(table, start, stop):\n\n return table[start : stop + 1]",
"def rowRange(self, startrow, endrow): # real signature unknown; restored from __doc__\n pass",
"def put_many(self, rows):\n self.flush()\n return self._client.put_many(self._full_name, rows)",
"def _get_rows(self, item):\n if isinstance(item, tuple):\n key, item = item\n else:\n key = self.table.primary_key\n\n index = self.indices[key]\n if len(index.columns) > 1:\n raise ValueError(\"Cannot use .loc on multi-column indices\")\n\n if isinstance(item, slice):\n # None signifies no upper/lower bound\n start = MinValue() if item.start is None else item.start\n stop = MaxValue() if item.stop is None else item.stop\n rows = index.range((start,), (stop,))\n else:\n if not isinstance(item, (list, np.ndarray)): # single element\n item = [item]\n # item should be a list or ndarray of values\n rows = []\n for key in item:\n p = index.find((key,))\n if len(p) == 0:\n raise KeyError(f\"No matches found for key {key}\")\n else:\n rows.extend(p)\n return rows",
"def assignRowsToTable(self):\n st = self.currentStage\n mem = self.mems[self.currentMem]\n tableIndex = self.tableIndex\n self.startRowDict[mem][tableIndex,self.slRange[0]] = self.rowRange[0]\n for sl in self.slRange:\n self.tablesInBlock[mem][sl][self.table] = 1\n pass\n for r in self.rowRange:\n self.lastBlockOfRow[mem][st][r] = self.slRange[-1]\n self.numberOfRowsDict[mem][tableIndex,self.slRange[0]] += 1\n for sl in self.slRange:\n self.dirty[mem][r,sl] = 1\n pass\n self.numWordsLeft -= 1\n pass\n self.logger.debug(\"Assigned \" + str(len(self.rowRange)) + \" rows from \"\\\n + str(self.rowRange[0]) + \" to \" + str(self.rowRange[-1]) +\\\n \"in blocks \" + str(self.slRange[0]) + \" to \" + str(self.slRange[-1])\\\n + \" of \" + mem)\n self.lastBlockOfTable[mem][st][tableIndex] = self.slRange[-1]\n pass",
"def update_cells(self, start, end, vals, sheet=None):\n if sheet:\n self.open_sheet(sheet)\n\n if not self.sheet:\n raise Exception(\"No open worksheet\")\n\n if start == end:\n return\n\n for start_cell, end_cell, val_chunks in self._get_update_chunks(start,\n end,\n vals):\n rng = self._get_range(start_cell, end_cell)\n\n cells = self._retry_range(rng)\n\n if len(val_chunks) != len(cells):\n raise Exception(\"Number of chunked values doesn't match number of cells\")\n\n for val, cell in zip(val_chunks, cells):\n cell.value = val\n\n for cells_chunk in chunks(cells, self._max_update_chunk_size):\n self._retry_update(cells_chunk)",
"def __records_in_range(data_store, range_):\n for index, content in enumerate(data_store):\n position = range_.get_position(index)\n if position == PositionWrtRange.SMALLER:\n pass\n elif position == PositionWrtRange.INSIDE:\n yield Record(index, content)\n else: ## position == PositionWrtRange.LARGER:\n raise StopIteration",
"def test_get_row_by_index(self):\t\n\t\ttest_grid = self.load_grid_181()\n\t\tresult_row = test_grid.get_row(4)\n\n\t\tself.assertEquals(result_row, range(28, 37))",
"def update_shard(shard_id, lower_bound, persister=None):\n persister.exec_stmt(\n RangeShardingSpecification.UPDATE_RANGE,\n {\"params\" : (lower_bound, shard_id)}\n )",
"def get_range(self, model, key, min, max):\n if key not in model.schema.props:\n raise RuntimeError(f\"{key} is not a part of {model.name}'s schema\")\n if not model.schema.props[key].index_key:\n return self.get_item_from_index_set(model, key, min, max)\n else:\n result = []\n for obj in self.storage.get_keys_in_model(model):\n obj_val = getattr(obj, key)\n if obj_val >= min and obj_val <= max:\n result.append(obj)\n return result",
"def offset_range(index_key, start_time, end_time):\n start, end = 0, COLUMN_HEIGHT\n tbase = index_key.get_tbase()\n if tbase == base_time(start_time): start = offset_time(start_time)\n if tbase == base_time(end_time): end = offset_time(end_time)\n return start, end",
"def update_range(self, range_start, range_end, value):\n return self.__update_range(0, 0, len(self.__tree) // 2,\n range_start, range_end, value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test wide row slices
|
def wide_row_test(self):
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf')
key = 'wide'
for x in xrange(1, 5001):
tools.insert_columns(self, cursor, key, 100, offset=x-1)
for size in (10, 100, 1000):
for x in xrange(1, (50001 - size) / size):
tools.query_columns(self, cursor, key, size, offset=x*size-1)
|
[
"def test_fixed_axes_count_slice(self):\n self.assert_tensor_equal(\n Selection()[:, :, :, 2:4].apply(sample_tensor()),\n sample_tensor()[:, :, :, 2:4])",
"def rowCheck(self, i):\n #row is list of tuples\n #row represents a row of pixels of a photo\n row = self.array[i]\n if row.count(self.outline) > self.size[0]/2:\n return (True, i)\n else: return (False,i)",
"def test_chunk():\n img = _img().copy()\n assert not np.all(img[:10, :10] == 0)\n for i, (chunk, _) in enumerate(transform.chunk_img(img, 10)):\n chunk[:] = i\n for i, (chunk, _) in enumerate(transform.chunk_img(img, 10)):\n assert np.all(chunk == i)\n assert np.all(img[:10, :10] == 0)",
"def test_get_slice_dense(self):\n config.session.execute(\"TRUNCATE TABLE hecuba.istorage\")\n config.session.execute(\"DROP KEYSPACE IF EXISTS hecuba_dislib\")\n\n bn, bm = 5, 5\n x = np.random.randint(100, size=(30, 30))\n ds_data = ds.array(x=x, block_size=(bn, bm))\n data = ds.array(x=x, block_size=(bn, bm))\n data.make_persistent(name=\"hecuba_dislib.test_array\")\n\n slice_indices = [(7, 22, 7, 22), # many row-column\n (6, 8, 6, 8), # single block row-column\n (6, 8, None, None), # single-block rows, all columns\n (None, None, 6, 8), # all rows, single-block columns\n (15, 16, 15, 16), # single element\n # (-10, -5, -10, -5), # out-of-bounds (not\n # implemented)\n # (-10, 5, -10, 5), # out-of-bounds (not implemented)\n (21, 40, 21, 40)] # out-of-bounds (correct)\n\n for top, bot, left, right in slice_indices:\n got = data[top:bot, left:right].collect()\n expected = ds_data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))\n\n # Try slicing with irregular array\n x = data[1:, 1:]\n data = ds_data[1:, 1:]\n\n for top, bot, left, right in slice_indices:\n got = x[top:bot, left:right].collect()\n expected = data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))",
"def test_column_row_grid(self):\n img = cv2.imread(constant.TEST_FRAME_PATH)\n grid_size = 3\n height, width, channels = img.shape\n column = int(width / grid_size)\n row = int(height / grid_size)\n img_map, row, column = functions.grid_map(img, grid_size)\n self.assertEqual(height / grid_size, row)\n self.assertEqual(width / grid_size, column)",
"def test_get_rows_square_top(self):\n\n y_dimension = 9\n test_array = self.numpy_it(y_dimension, y_dimension)\n increment = 3\n position = 0\n base = 0\n step = 1\n self.assertEqual(Tile.get_rows(test_array, position, y_dimension, increment, base, step)[2].tolist(),\n [[0, 1, 2, 3, 4, 5, 6, 7, 8],\n [9, 10, 11, 12, 13, 14, 15, 16, 17],\n [18, 19, 20, 21, 22, 23, 24, 25, 26]])\n self.assertEqual((Tile.get_rows(test_array, position, y_dimension, increment, base, step)[0],\n Tile.get_rows(test_array, position, y_dimension, increment, base, step)[1]),\n (0, 3))",
"def test_slice_shape_negative_above(self):\n self.assertEqual(Selection()[3:-1].shape([9]), [5])",
"def test_2d_slice(self):\n\n expected = np.array(\n [\n [\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.4, 1.0, 1.0],\n [1.0, 0.4, 0.4, 0.4, 1.0],\n [1.0, 1.0, 0.4, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n ],\n )\n kernel = np.array([[0.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0]])\n self.cube.data[2, 2] = 0\n plugin = GeneratePercentilesFromANeighbourhood(2000)\n plugin.percentiles = np.array([10, 50, 90])\n result = plugin.pad_and_unpad_cube(self.cube, kernel)\n self.assertIsInstance(result, Cube)\n self.assertArrayAlmostEqual(result.data, expected)",
"def test_getRowIndices(self):\n is_long = lambda x: len(x) > 10\n is_med = lambda x: len(x) > 3\n is_any = lambda x: len(x) > 0\n self.assertEqual(self.ragged.getRowIndices(is_long), [])\n self.ragged.RowOrder = 'cba'\n self.assertEqual(self.ragged.getRowIndices(is_med), ['c','a'])\n self.ragged.RowOrder = 'abc'\n self.assertEqual(self.ragged.getRowIndices(is_med), ['a','c'])\n self.assertEqual(self.ragged.getRowIndices(is_any), ['a','b','c'])\n #should be able to negate\n self.assertEqual(self.ragged.getRowIndices(is_med, negate=True), ['b'])\n self.assertEqual(self.ragged.getRowIndices(is_any, negate=True), [])",
"def test_index_rows_dense(self):\n config.session.execute(\"TRUNCATE TABLE hecuba.istorage\")\n config.session.execute(\"DROP KEYSPACE IF EXISTS hecuba_dislib\")\n\n bn, bm = 5, 5\n x = np.random.randint(100, size=(10, 10))\n ds_data = ds.array(x=x, block_size=(bn, bm))\n data = ds.array(x=x, block_size=(bn, bm))\n data.make_persistent(name=\"hecuba_dislib.test_array\")\n\n indices_lists = [([0, 5], [0, 5])]\n\n for rows, cols in indices_lists:\n got = data[rows].collect()\n expected = ds_data[rows].collect()\n self.assertTrue(equal(got, expected))\n\n # Try slicing with irregular array\n x = ds_data[1:, 1:]\n data_sliced = data[1:, 1:]\n\n for rows, cols in indices_lists:\n got = data_sliced[rows].collect()\n expected = x[rows].collect()\n\n self.assertTrue(equal(got, expected))",
"def test_slice(setup):\n assert isinstance(setup[\"sliced\"], da.Array)",
"def is_horizontal_win(self, checker):\n for row in range(self.height):\n for col in range(self.width - 3):\n # Check if the next four columns in this row\n # contain the specified checker.\n if self.slots[row][col] == checker and \\\n self.slots[row][col + 1] == checker and \\\n self.slots[row][col + 2] == checker and \\\n self.slots[row][col + 3] == checker:\n return True\n # if we make it here, there were no horizontal wins\n return False",
"def test_large(self):\n tensor = index_tensor([4, 1, 5, 3])\n for axis0 in range(0, 4):\n axis1 = 0\n for axis2 in range(0, 5):\n for axis3 in range(0, 3):\n self.assert_tensor_equal(\n tensor[axis0][axis1][axis2][axis3],\n [axis0, axis1, axis2, axis3])",
"def test_get_rows_square_bottom(self):\n\n y_dimension = 9\n test_array = self.numpy_it(y_dimension, y_dimension)\n increment = 3\n position = y_dimension - increment\n base = 0\n step = 1\n self.assertEqual(Tile.get_rows(test_array, position, y_dimension, increment, base, step)[2].tolist(),\n [[54, 55, 56, 57, 58, 59, 60, 61, 62],\n [63, 64, 65, 66, 67, 68, 69, 70, 71],\n [72, 73, 74, 75, 76, 77, 78, 79, 80]])\n self.assertEqual((Tile.get_rows(test_array, position, y_dimension, increment, base, step)[0],\n Tile.get_rows(test_array, position, y_dimension, increment, base, step)[1]),\n (6, 9))",
"def test_slice_ndarray(setup):\n assert isinstance(setup[\"sliced\"].compute(), np.ndarray)",
"def is_equal_along_dimension(x,dim):\n return np.all([np.array_equal(slice_select(x,dim,i), slice_select(x,dim,i-1)) for i in range(x.shape[dim])])",
"def checkHorizontalWin(self, param):\n for row in self.__l:\n for i in range(15 - 5):\n if row[i:i + 5] == [param] * 5:\n return True\n\n return False",
"def test_sliding_window_for_data_with_outer_dimensions(self):\n data = constant_op.constant([[1, 1, 1], [2, 2, 1], [3, 3, 1], [4, 4, 1],\n [5, 5, 1]])\n\n width = 2\n axis = -2\n\n expected_result = constant_op.constant([[[1, 1, 1], [2, 2, 1]],\n [[2, 2, 1], [3, 3, 1]],\n [[3, 3, 1], [4, 4, 1]],\n [[4, 4, 1], [5, 5, 1]]])\n self._test_sliding_window_op(expected_result, data, width, axis)",
"def test_one_row(self):\n self.assertGreaterEqual(read_dataframe().shape[0], 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize a new race registry with no runners entered.
|
def __init__(self) -> None:
self.groups = {}
self.runners = {}
for c in Registry.CATEGORIES:
self.groups[c] = []
|
[
"def initialize(self):\n if self.initialized:\n raise RuntimeError(\"The registry is already initialized\")\n\n for specifier, serializer in self._prematurely.items():\n model = apps.get_model(specifier)\n self._serializers[model] = self._get_serializer(model, serializer)\n\n self._initialized = True",
"def initialize(self):\n if self.__initialized:\n raise SimulatorError('Simulation has already been initialized')\n for sim_obj in self.simulation_objects.values():\n sim_obj.init_before_run()\n self.event_counts.clear()\n self.__initialized = True",
"def initialize(self, trainer):\n pass",
"def _initialize_agents(self):\n\n for agent in self.agents:\n agent.fill_with_binary()\n\n self.best_agent = copy.deepcopy(self.agents[0])",
"def __init__(self, race, name):\r\n self.race = race\r\n self.name = name",
"def __init__(self):\n self.game_handler = UniqueIDGenerator()\n self.player_handler = UniqueIDGenerator()\n self.rooms = {}",
"def initialize_rtree(self):\n self.tree = _RTree(self.initial_entities)",
"def seed_universe(self):\r\n self.add_beacon()",
"def reset():\n global _REGISTRY\n _REGISTRY.clear()",
"def agent_init(self, task_specification=None):\n # This is (for now) actually a dummy method to satisfy the\n # RLGlue interface. It is the programmer's job to check wether an\n # experiment fits the agent object.\n self.agent.reset()",
"def __init__(self, rooms, this_scheduler, test = False):\n if test:\n self.schedule = this_scheduler\n else:\n self.schedule = ref(this_scheduler)\n self.days = [structures.Day(rooms, day_code, self, test)\n for day_code in 'mtwrf']\n self.fitness = 0\n self.valid = True\n self.num_invalid = 0\n self.complete = True\n #Week's copy of courses\n self.sections = []\n self.constraints = {}",
"def __init__(self):\n this = _coin.new_SoEnvironment()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def test_registry_unnamed(registry: ExperimentRegistry):\n cls = make_experiment('UnnamedChild', always_set=False)\n assert cls not in registry.experiments.values()",
"def nr_initialize():\n global _L3Rebalance\n\n _L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)\n\n if config.section_exists('l3agent-rebalance'):\n section = config.CONF['l3agent-rebalance']\n _nr_timer_interval = int(section.get('timer_interval', 10))\n _L3Rebalance.router_diff_threshold = \\\n int(section.get('router_diff_threshold', 3))\n _L3Rebalance.hold_off = int(section.get('hold_off', 3))\n if _L3Rebalance.router_diff_threshold < 1:\n DLOG.warn(\"Invalid setting for router_diff_threshold: %s, \"\n \"forcing to 1\" %\n _L3Rebalance.router_diff_threshold)\n _L3Rebalance.router_diff_threshold = 1\n if _nr_timer_interval < 1:\n DLOG.warn(\"Invalid setting for timer_interval: %s, forcing to 1\" %\n _nr_timer_interval)\n _nr_timer_interval = 1\n else:\n _nr_timer_interval = 10\n _L3Rebalance.router_diff_threshold = 3\n _L3Rebalance.hold_off = 3\n\n timers.timers_create_timer('nr', 1, _nr_timer_interval, _nr_timer)",
"def __init__(self, **kwargs: Any) -> None:\n self._scheduler = AsyncIOScheduler()\n self._schedules: List[ScheduledTask] = []",
"def __init__(self):\n #initialize the class \"roster\" to a list\n self.class_roster = list()",
"def __init__(self, initial_demands = []):\n self.explicitly_demanded = set()\n self.nodes = {}\n self.provided = set()\n self.parent_ptrs = {}\n for demand in initial_demands:\n self.add_new_demand(demand)",
"def initialize(self) -> None:\n if self._initialized:\n return\n if os.getpid() != self._master_pid:\n raise RuntimeError(\n \"TimeSummary must be initialized in the same process as the \"\n \"one created the instance. Please call initialize() in the \"\n \"main process.\")\n self._cpu_worker.initialize()\n if self._cuda_worker is not None:\n self._cuda_worker.initialize()\n self._initialized = True",
"def __init__(self):\n # Create a initialized state map where all tiles are assumed unknown\n self._state = [TileState.Unknown] * StateMap.TILE_NUMBER\n self._state.append(False) # isClaim bit\n self._state.append(False) # Claim action bit"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Register runner with email andd category.
|
def register(self, email: str, category: str) -> None:
# remove the runner from all categories they are
# currently in.
for c in Registry.CATEGORIES:
if email in self.groups[c]:
self.groups[c].remove(email)
self.groups[category].append(email)
self.groups[category].sort()
self.runners[email] = category
|
[
"def add_runner(self, runner):\n category = int(runner.get_time() / 10)\n if runner not in self._runners:\n self._runners.append(runner)\n\n if runner.get_time() < 40:\n if category not in self._categories:\n self._categories[category] = []\n self._categories[category].append(runner)\n else:\n self._categories[int(runner.get_time() / 10)].append(runner)\n else:\n if 4 not in self._categories:\n self._categories[4] = []\n self._categories[4].append(runner)",
"def register_experiment_runner(self, runner_key, runner_class):\n if runner_key in self.experiment_runners:\n raise PluginError('ExperimentRunner already registered for '\n 'key {}'.format(runner_key))\n self.experiment_runners[runner_key] = runner_class",
"def test_user_add_email(self):\n pass",
"def createSubscribersForTask(self):\n for i in range(4):\n email = 'subscriber%s@example.com' % str(i)\n subscriber = GCIProfileHelper(self.gci, self.dev_test)\n subscriber.createOtherUser(email)\n subscriber.createProfile()\n self.task.subscribers.append(subscriber.profile.key())\n self.task.put()",
"def test_can_add_caretaker_by_email(self):\n\n self.create_legacy()\n\n data = {\n 'caretaker': 'Test email3'\n }\n\n response = self.client.put('/legacy/1/caretaker', data=json.dumps(data),\n content_type='application/json')\n\n self.assert200(response)",
"def create_campaign_member():",
"async def register(ctx):\r\n if ctx.guild.id not in bot.hunger_games:\r\n return await ctx.respond(\"There is no active hunger games on going at the moment in this server. Please ask an administrator to setup a game.\", ephemeral=True)\r\n game = bot.hunger_games[ctx.guild.id]\r\n result = await game.add_contestant(ctx.author)\r\n if result:\r\n await ctx.respond(\"You have registered as tribute. May the odds be with you.\")\r\n else:\r\n await ctx.respond(\"You are already registered as a tribute.\", ephemeral=True)",
"def test_create_run_as_user(self):\n pass",
"def test_email_subscribe(self):\n self.user.subscribe_to_replied_threads = UserModel.SUBSCRIBE_ALL\n self.user.save()\n\n response = self.client.post(\n self.api_link, data={\n 'post': \"This is test response!\",\n }\n )\n self.assertEqual(response.status_code, 200)\n\n # user has subscribed to thread\n subscription = self.user.subscription_set.get(thread=self.thread)\n\n self.assertEqual(subscription.category_id, self.category.id)\n self.assertTrue(subscription.send_email)",
"def addPerformer(self, name, address, port):\n self.performers[name] = (address, port)",
"def auto_enroll(sender, **kwargs):\n\n created = kwargs.get('created', False)\n user = kwargs['instance']\n\n key = \"omit-default-selections-{}\".format(slugify(user.email))\n skip = bool(cache.get(key))\n\n if created and user and not skip:\n _enroll_user_in_default_categories.delay(kwargs['instance'])",
"def register(self, runner_class: type[RunnerBase]) -> None:\n self.frameworks[runner_class.name] = runner_class",
"def remove_runner(self, email):\n runner = self.get_runner(email)\n\n if runner is not None:\n self._runners.pop(runner)\n self.get_category(runner.time).pop(runner)",
"def register_report(\n name,\n category,\n report_class,\n options_class,\n modes,\n translated_name,\n status=_(\"Unknown\"),\n description=_unavailable,\n author_name=_(\"Unknown\"),\n author_email=_(\"Unknown\"),\n unsupported=False,\n require_active=True,\n ):\n (junk,standalone_task) = divmod(modes,2**MODE_GUI)\n if standalone_task:\n _register_standalone(report_class,options_class,translated_name,\n name,category,description,\n status,author_name,author_email,unsupported,\n require_active)\n\n (junk,book_item_task) = divmod(modes-standalone_task,2**MODE_BKI)\n if book_item_task:\n book_item_category = book_categories[category]\n register_book_item(translated_name,book_item_category,\n report_class,options_class,name,unsupported,\n require_active)\n\n (junk,command_line_task) = divmod(modes-standalone_task-book_item_task,\n 2**MODE_CLI)\n if command_line_task:\n _register_cl_report(name,category,report_class,options_class,\n translated_name,unsupported, require_active)",
"def test_add_user(self):\n request = self.factory.get(\n '/feeder/subscribe_user_to_feed/?username=Mohit&feedname=Yoga')\n response = subscribe_user_to_feed(request)\n self.assertEqual(response.content, 'Success!')\n\n request = self.factory.get('/feeder/get_user_feeds/?username=Mohit')\n response = get_user_feeds(request)\n self.assertEqual(response.content, 'Yoga')",
"def register_provider(args):\n if len(args) == 0:\n click.echo(\"Usage: mephisto register <provider_type> arg1=value arg2=value\")\n return\n\n from mephisto.abstractions.databases.local_database import LocalMephistoDB\n from mephisto.operations.registry import get_crowd_provider_from_type\n from mephisto.operations.utils import parse_arg_dict, get_extra_argument_dicts\n\n provider_type, requester_args = args[0], args[1:]\n args_dict = dict(arg.split(\"=\", 1) for arg in requester_args)\n\n crowd_provider = get_crowd_provider_from_type(provider_type)\n RequesterClass = crowd_provider.RequesterClass\n\n if len(requester_args) == 0:\n from tabulate import tabulate\n\n params = get_extra_argument_dicts(RequesterClass)\n for param in params:\n click.echo(param[\"desc\"])\n click.echo(tabulate(param[\"args\"].values(), headers=\"keys\"))\n return\n\n try:\n parsed_options = parse_arg_dict(RequesterClass, args_dict)\n except Exception as e:\n click.echo(str(e))\n\n if parsed_options.name is None:\n click.echo(\"No name was specified for the requester.\")\n\n db = LocalMephistoDB()\n requesters = db.find_requesters(requester_name=parsed_options.name)\n if len(requesters) == 0:\n requester = RequesterClass.new(db, parsed_options.name)\n else:\n requester = requesters[0]\n try:\n requester.register(parsed_options)\n click.echo(\"Registered successfully.\")\n except Exception as e:\n click.echo(str(e))",
"def created(self, group, **payload):\n pass",
"def test_email_required_registration(self):\n errorMsg = 'The email address field is required.'\n rv = self.register('mister_test', '', 'password', 'password')\n assert errorMsg in rv.data",
"def test_create_subject(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
register the student into ClassList
|
def register(self, student: list) -> None:
self.students[student[0]] = student[1:]
|
[
"def addStudent(self,student):\n self.__classlist.append(student)",
"def add_student_to_class(self, student, start_date=None):\n #student_key = student.key()\n #try:\n #self.students_list.index(student_key)\n #except:\n #self.students_list.append(student_key)\n \n if (not start_date):\n start_date = self.start_date\n students_class = student.assign_class_session(self, self.subject,\n start_date)\n if (students_class):\n #This is set True at the first student assignment and never\n #set False again\n self.students_have_been_assigned = True\n return students_class",
"def add(self, student):\n StudentList.add(self, student)\n self.writeToFile()",
"def addStudent(self, student):\n\t\tself.__students.append(student)",
"def add_student(self, student):\n if student:\n if isinstance(student, list):\n self.students.extend(student)\n for stud in student:\n self.quiz_scoreboard[stud.uid] = 0.00\n self.submission_type[stud.uid] = None\n else:\n self.students.append(student)\n self.quiz_scoreboard[student.uid] = 0.00\n self.submission_type[student.uid] = None",
"def updata_student_info(self,student):\n for item in self.__stu_list:\n if student.id == item.id:\n item.name = student.name\n item.score = student.score\n item.sex = student.sex\n return True\n return False",
"def add_students_to_class(self, student_list, start_date=None):\n if (not start_date):\n start_date = self.start_date\n task_name = \"AddStudentsToClass: \" + self.name\n function = \"SchoolDB.models.ClassSession.static_add_student_to_class\"\n function_args = \"class_session_keystring= '%s'\" %str(self.key())\n if start_date:\n function_args = \"%s, start_date='%d'\" %(function_args, \n start_date.toordinal())\n instance_keylist = [ str(student_key) for student_key in student_list]\n task = SchoolDB.assistant_classes.TaskGenerator(task_name=task_name,\n function=function, function_args=function_args,\n instance_keylist=instance_keylist, \n instances_per_task=15)\n return task.queue_tasks()",
"def singlerun_add_students_to_class(self, student_list, start_date=None):\n if (not start_date):\n start_date = None\n else:\n start_date = date.fromordinal(start_date)\n students_class_records = []\n for student in student_list:\n students_class = self.add_student_to_class(student, start_date)\n if (students_class):\n students_class_records.append(students_class)\n return students_class_records",
"def create_student(name, StudentList):\r\n new_student = Student(name)\r\n StudentList.append(new_student)\r\n return new_student",
"def load_student_class_instance():\n tablename = 'student_class_instance'\n data = jsonify_seed_data(tablename)\n\n for item in data[tablename]:\n new_item = StudentClassInstance(\n student_id=item['student_id'],\n class_instance_id=item['class_instance_id'],\n attendance=item['attendance']\n )\n db.session.add(new_item)\n db.session.commit()",
"def add_student(student_id, first_name, last_name, password, email_address, course_list, view_url, pic_url):\n new_User = User.objects.get_or_create(email = email_address)[0]\n new_User.first_name = first_name\n new_User.last_name = last_name\n new_User.password = password\n new_User.username = username\n new_User.save()\n\n new_student = Student.objects.get_or_create(user = new_User)[0] #get_or_create method returns a tuple, where element 0 is the object\n new_student.course_list = course_list\n\n new_student.save()\n\n return new_student",
"def __init__(self, courseName):\n\t\tself.__courseName = courseName\n\t\tself.__students = []",
"def add(self, *students):\r\n if len(self.__students) + len(students) > 20:\r\n raise ValueError(\"the number of people in group can't be more than 20\")\r\n for student in students:\r\n if not isinstance(student, Student):\r\n raise TypeError\r\n for registered in self.__students:\r\n if student.name == registered.name and student.surname == registered.surname:\r\n raise ValueError(\"this student has been registered already\")\r\n self.__students.append(student)",
"def add_student(self, student_name: str) -> None:\n if len(self.enrolled_students) < self.course_capacity and student_name \\\n not in self.enrolled_students:\n self.enrolled_students.append(student_name)\n self.enrolled_students.sort()\n elif len(self.enrolled_students) >= self.course_capacity and \\\n student_name not in self.waitlist:\n self.waitlist.append(student_name)\n else:\n print('This student is already in this course')",
"def add_student_to_school(student_id, school_id):\n school = get_school_using_id(school_id)\n school.student_list.append(student_id)\n school.save()\n return (True, \"\")",
"def set_student_attributes(s, t):\n l = len(list_students)+1 #!helper\n s.gpa = random.randint(1, 100)\n s.grade_level = t.grade_level #!not DRY\n s.name = ('Student'+ str(l+1) + '_G_' + str(s.grade_level)) #come up with a better naming\n s.current_teacher = t\n # print('This is the new Student:', s) #comment\n # print(s.current_teacher.name, len(s.current_teacher.students))\n # print('No of students for ', s.current_teacher.name, ' at ', len(s.current_teacher.students))\n return s",
"def __callAddStudent(self):\r\n idSubject=input(\" Give ID:\")\r\n name=input(\" Give name:\") \r\n try:\r\n st=self.__lista.createStudent(idSubject, name)\r\n self.__lista.addStudent(st)\r\n print(\"Student \"+st.getName()+\" has been successfully added.\")\r\n except InputError as ex:\r\n print(ex.getErrors())\r\n except IdError as ex:\r\n print(ex.getErrors())\r\n except DuplicateDataError as ex:\r\n print(ex.getErrors())\r\n except RepositoryError() as ex:\r\n print(ex.getErrors())",
"def load_student():\n tablename = 'student'\n data = jsonify_seed_data(tablename)\n\n for item in data[tablename]:\n new_item = Student(\n name_first=item['name_first'],\n name_last=item['name_last'],\n rank_stripes=item['rank_stripes'],\n rank_type=item['rank_type'],\n program=item['program'],\n )\n db.session.add(new_item)\n db.session.commit()",
"def __init__(self, studentID, name):\n self.__studentID = studentID\n self.__name = name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return the average score of last n times
|
def calculate_average(self, n: int) -> int:
total = 0
counter = 0
i = 0
while counter != n:
total += self.history[i]
i += 1
counter += 1
return counter / n
|
[
"def average_six(n):\n total=0\n for i in range(n):\n total=total+(six_heads())\n return (total/n)",
"def average_loss(stats: Stats, n: int = 10) -> float:\n loss = pd.DataFrame(stats[\"loss\"])[\"singleton\"]\n return loss.iloc[-n:].mean()",
"def get_mean_score_nth_test():\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT ur.user_id, AVG(ur.is_correct)\n FROM drill_testset_responses AS tsr\n INNER JOIN (\n SELECT r.user_id, r.id AS response_id, o.is_correct\n FROM drill_response AS r\n INNER JOIN drill_multiplechoiceresponse AS mcr\n ON r.id = mcr.response_ptr_id\n INNER JOIN drill_multiplechoiceoption AS o\n ON o.id = mcr.option_id\n ) AS ur\n ON tsr.multiplechoiceresponse_id = ur.response_id\n GROUP BY tsr.testset_id\n ORDER BY ur.user_id\n \"\"\")\n ignore_users = _get_user_ignore_set()\n data = []\n for user_id, rows in groupby(cursor.fetchall(), lambda r: r[0]):\n if user_id in ignore_users:\n continue\n for i, (_user_id, score) in enumerate(rows):\n data.append((i + 1, float(score)))\n\n data.sort()\n return data",
"def get_avg(t):\n l = []\n for pl in range(n):\n l.append(markov[pl][t])\n expr = l[0]\n for i in range(1,n):\n expr = expr+l[i]\n return(1/n*expr)",
"def get_avg(self):\n\t\treturn self.sum / max(len(self.window), 1)",
"def compute_average(n):\n data = []\n start = time() # start time in secs\n for k in range(n):\n data.append(None)\n end = time() # end time\n return (end - start) / n # compute average time",
"def negvalenceavg (score):\n ng = []\n for n in score:\n ng.append(n['neg'])\n return sum(ng) / len (ng)",
"def calc_algo_mean_score(scores):\n algo_total_score = 0\n for score in scores:\n algo_total_score += score\n return algo_total_score / len(scores)",
"def get_average(self, last_n=None):\n if not self.has_samples():\n msg = \"get_average() cannot be called when no samples exist\"\n raise IllegalStateError(msg)\n\n samples = self.get_samples(last_n)\n return reduce(add, samples) / float(len(samples))",
"def _get_average_best_score(self):\n return mean([x['best_score'] for x in self._results])",
"def avg(rank):\n return np.ones((rank, rank)) / (rank ** 2)",
"def margin_of_error_avg_score(n,s,t=1.96):\n return t*s/np.sqrt(n)",
"def get_rank_avg(group, scorerank):\n \n ranksum=0\n for score in (group):\n ranksum+=scorerank[score]\n return ranksum/len(group)",
"def _get_average_best_scores(self):\n return numpy.mean([x['best_scores'] for x in self.results], axis=0)",
"def avg_and_total(iterable):\n\ttotal_count = 0\n\ttotal_score = 0.0\n\n\tfor item in iterable:\n\t\t(score,count) = item\n\t\ttotal_score += score * count\n\t\ttotal_count += count\n\n\treturn total_score / total_count, total_count",
"def mean_score(self):\n pass",
"def macro_average(scores):\n n = len(scores)\n ave_p = sum(s.precision for s in scores) / n\n ave_r = sum(s.recall for s in scores) / n\n return Score(ave_p, ave_r)",
"def old_score(results):\r\n\r\n return 5*results[\"win\"] - 2*results[\"failure\"]",
"def getScore(self):\n\t\ttempScore = 0;\n\t\tcount = [0,0,0,0,0,0]\n\t\tfor k in range(len(self.savedDice)):\n\t\t\tcount[self.savedDice[k]-1] += 1\n\t\tpairs = 0\n\t\tstraight = 0\n\t\tfor j in range(6):\n\t\t\tif count[j] >= 3:\n\t\t\t\ttemp = ((j+1)*100)*(count[j]-2)\n\t\t\t\tif j == 0: #multiple ones get you thousands instead of hundreds.\n\t\t\t\t\ttemp *= 10\n\t\t\t\ttempScore += temp\n\t\t\telif count[j] == 2:\n\t\t\t\tpairs += 1\n\t\t\t\tif j == 0:\n\t\t\t\t\ttempScore += 200\n\t\t\t\telif j == 4:\n\t\t\t\t\ttempScore += 100\n\t\t\telif count[j] == 1:\n\t\t\t\tstraight += 1\n\t\t\t\tif j == 0:\n\t\t\t\t\ttempScore += 100\n\t\t\t\telif j == 4:\n\t\t\t\t\ttempScore += 50\n\t\t\telse: #we have < 3\n\t\t\t\tif j == 0:\n\t\t\t\t\ttempScore += count[j]*100\n\t\t\t\telif j == 4:\n\t\t\t\t\ttempScore += count[j]*50\n\t\tif pairs == 3:\n\t\t\ttempScore += 750\n\t\t\tif count[0] == 2:\n\t\t\t\ttempScore -= 200\n\t\t\tif count[4] == 2:\n\t\t\t\ttempScore -= 100\n\t\telif straight == 6:\n\t\t\ttempScore += 1500 - 150 #150 was already added because of the single 1, and 5\n\t\treturn tempScore"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Modify the price with its discount
|
def discount(self, discount: float) -> None:
self.price = self.price * discount
|
[
"def discount_update(self, discount, actor):\n\n finance = self.cart['finance']\n try:\n # validate discount value\n try:\n discount = Decimal(discount)\n except:\n discount = Decimal(0)\n\n subtotal = finance['prod_cost'] + finance['shipping_cost']\n if discount > subtotal:\n discount = subtotal\n if discount < 0:\n discount = Decimal(0)\n\n # we store and display discounts as a negative value\n discount *= -1\n c = get_cursor()\n c.execute(\"\"\"\n update cart\n set discount_cost = %s\n where cart_id = %s\"\"\",\n (discount, self.cart['cart_id']))\n finance['discount_cost'] = discount\n self.recompute()\n self.log(\"Discount set to {}\".format(discount), actor)\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")\n return",
"def reduce_price(self, reduction):\r\n self._price = self._price - reduction",
"def apply_discount(price, discount):\n return (money_to_float(price)\n .fold(lambda cost:\n (percent_to_float(discount)\n .fold(lambda savings: cost * (1 - savings)))))",
"def you_save(self):\n if self.discount_price:\n return self.price - self.discount_price\n else:\n return self.price - self.calc_discount_price()",
"def price(self):\n return self._price * (100 - self.discount) / 100",
"def final_price(self):\n return self.price - self.price * self.discount",
"def get_final_price(price, discount_percentage=10):\n return price-( price* discount_percentage / 100)",
"def morning_discount(order: \"Order\") -> None:\n setattr(order, \"discount\", 0.5)",
"def discount_price(product, discount):\n if config_value('TAX', 'DEFAULT_VIEW_TAX'):\n return taxed_discount_price(product, discount)\n else:\n return untaxed_discount_price(product, discount)",
"def set_discount(self, discount):\n self._discount = discount",
"def update_price(self, company: Company):\n pass",
"def apply_to_ticket(self, ticket):\n if self.discount_type == 'Fixed Price':\n ticket.price = self.discount_value\n elif self.discount_type == 'Fixed Discount':\n ticket.price = ticket.price - self.discount_value\n else:\n ticket.price = ticket.price * (100 - self.discount_value) / 100\n\n ticket.add_note(\n 'Used voucher {0}/{1}'.format(self.object_id, self.code)\n )\n\n return ticket",
"def setPriceDozen(self,price):\n self.priceDozen=float(price)",
"def update_price(origin_price: float, price: float):\n return (get_current_price() / origin_price) * price",
"def apply_percent_coupon(self):\r\n return self.price - self.price*self.coupon.percent_amount",
"def fuelPrice(litres, price):\n if litres < 2:\n discount = 0\n elif litres < 4:\n discount = .5*litres\n elif litres < 6:\n discount = .10*litres\n elif litres < 8:\n discount = .15*litres\n elif litres < 10:\n discount = .20*litres\n else:\n discount = .25*litres\n return round(litres*price - discount, 2)",
"def taxed_discount_price(product, discount):\n price = untaxed_discount_price(product, discount)\n taxer = satchmo_tax._get_taxprocessor()\n return price + taxer.by_price(product.taxClass, price)",
"def percent_price_reduction(change):\n \n upcoming_price_changes(change)\n\n # TODO do you wish to continue?\n\n sql_update = \"\"\"\n update `tabItem Price` ip\n \n left join `tabItem` it\n on ip.item_code = it.item_code\n \n set ip.price_list_rate = ip.price_list_rate + (ip.price_list_rate * %s / 100.0)\n\n where ip.selling = 1\n and it.ebay_id REGEXP '[0-9]'\n \n and it.modified < now() - interval 10 day\n \n and ((it.standard_rate >25 and it.delivery_type = 'Standard Parcel')\n or (it.standard_rate >75 and it.delivery_type = 'Pallet'))\n and (select count(sii.name) from `tabSales Invoice Item` sii where sii.item_code = it.item_code and sii.docstatus=1) = 0\n \n \"\"\"%(change)\n\n frappe.db.sql(sql_update, auto_commit=True)\n \n sql_update_it = \"\"\"\n update `tabItem` it\n\n set \n it.standard_rate = it.standard_rate + (it.standard_rate * %s / 100.0),\n it.vat_inclusive_price = it.vat_inclusive_price + (it.vat_inclusive_price * %s / 100.0)\n \n where \n it.ebay_id REGEXP '[0-9]'\n and it.modified < now() - interval 30 day\n \n and ((it.standard_rate >25 and it.delivery_type = 'Standard Parcel')\n or (it.standard_rate >75 and it.delivery_type = 'Pallet'))\n and (select count(sii.name) from `tabSales Invoice Item` sii where sii.item_code = it.item_code and sii.docstatus=1) = 0\n \n \"\"\"%(change, change)\n\n frappe.db.sql(sql_update_it, auto_commit=True)\n\n print(\"Price reduction completed\")",
"def __init__(self, price, discount_strategy = None):\n \n self.price = price\n self.discount_strategy = discount_strategy"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Spins off a process that runs as a daemon.
|
def StartDaemon(self):
# To spin off the process, use what seems to be the "standard" way to spin
# off daemons: fork a child process, make it the session and process group
# leader, then fork it again so that the actual daemon process is no longer
# a session leader.
#
# This is a very simplified (with significantly reduced features) version of
# the python-daemon library at https://pypi.python.org/pypi/python-daemon/.
pid = os.fork()
logging.debug('Forked new process, pid= {0}'.format(pid))
if pid == 0:
os.setsid()
pid = os.fork()
if pid == 0:
os.chdir('/')
os.umask(0)
else:
# The use of os._exit here is recommended for parents of a daemon
# process to avoid issues with running the cleanup tasks that
# sys.exit() runs by preventing issues from the cleanup being run
# more than once when the two parents exit and later when the daemon
# exits.
os._exit(0)
else:
os._exit(0)
# Set up pidfile and signal handlers.
pidf = open(self.pidfile, 'w')
pidf.write(str(os.getpid()))
pidf.close()
logging.debug('Sending signal SIGTERM to shutdown daemon')
signal.signal(signal.SIGTERM, self.ShutdownDaemon)
self.accounts_manager.Main()
|
[
"def daemonize_start():\n\n if _detach:\n if _fork_and_wait_for_startup() > 0:\n # Running in parent process.\n sys.exit(0)\n\n if sys.platform != 'win32':\n # Running in daemon or monitor process.\n os.setsid()\n\n if _monitor:\n saved_daemonize_fd = _daemonize_fd\n daemon_pid = _fork_and_wait_for_startup()\n if daemon_pid > 0:\n # Running in monitor process.\n _fork_notify_startup(saved_daemonize_fd)\n if sys.platform != 'win32':\n _close_standard_fds()\n _monitor_daemon(daemon_pid)\n # Running in daemon process\n\n if _pidfile:\n _make_pidfile()",
"def post_fork_child(self):\n spawn_control_env = {\n DAEMON_ENTRYPOINT: f\"{self._daemon_entrypoint}:launch_new_pantsd_instance\",\n # The daemon should run under the same sys.path as us; so we ensure\n # this. NB: It will scrub PYTHONPATH once started to avoid infecting\n # its own unrelated subprocesses.\n \"PYTHONPATH\": os.pathsep.join(sys.path),\n }\n exec_env = {**os.environ, **spawn_control_env}\n\n # Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.\n cmd = [sys.executable] + sys.argv\n\n spawn_control_env_vars = \" \".join(f\"{k}={v}\" for k, v in spawn_control_env.items())\n cmd_line = \" \".join(cmd)\n logger.debug(f\"pantsd command is: {spawn_control_env_vars} {cmd_line}\")\n\n # TODO: Improve error handling on launch failures.\n os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)",
"def start_daemon():\n attach_madz()\n\n import madz.live_script as madz\n\n daemon = madz.Daemon(**madz_config)\n print(\"Configuring Server...\")\n daemon.configure()\n print(\"Starting Server\")\n daemon.start()",
"def start_daemon():\n _agent = create_agent()\n\n # Setting up MockDaemon and starting process for testing\n _daemon = MockDaemon(_agent)\n _daemon.start()",
"def main():\n if os.environ.get('INVOCATOR') == \"systemd\":\n logger.info('Starting daemon')\n start_daemon()\n else:\n logger.error('Cannot invoke daemon from command line! Use systemd controls.')\n exit(1)",
"def daemon():\n click.echo('Waiting for input..')\n run_daemon()",
"def daemon(method: str):\n return _run_speedify_cmd([\"daemon\", method])",
"def daemonize(self):\n\n try: \n pid = os.fork() \n if pid > 0:\n # exit first parent\n sys.exit(0) \n except OSError as err: \n sys.stderr.write('fork #1 failed: {0}\\n'.format(err))\n sys.exit(1)\n \n # decouple from parent environment\n os.chdir('/') \n os.setsid() \n os.umask(0) \n \n # do second fork\n try: \n pid = os.fork() \n if pid > 0:\n\n # exit from second parent\n sys.exit(0) \n except OSError as err: \n sys.stderr.write('fork #2 failed: {0}\\n'.format(err))\n sys.exit(1) \n \n # redirect standard file descriptors\n sys.stdout.flush()\n sys.stderr.flush()\n si = open(os.devnull, 'r')\n so = open(self.logfile, 'a+')\n se = open(self.logfile, 'a+')\n\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n \n # write pidfile\n atexit.register(self.delpid)\n\n pid = str(os.getpid())\n with open(self.pidfile,'w+') as f:\n f.write(pid + '\\n')\n logger.info(\"Created %s\", self.pidfile)",
"def ensure_daemon():\n pass",
"def DaemonStarting(self):\n pass",
"def ensure_daemon():\n if not tqdaemon.status():\n log.info(\"Attempting to start missing tqdaemon.\")\n p = multiprocessing.Process(target=tqdaemon.daemonize)\n p.start()\n p.join(1)\n time.sleep(0.1)",
"def _background(pidfile, stdin='/dev/null', stdout='/dev/null',\n stderr='/dev/null'): # pragma: no cover\n def _fork_and_exit_parent(errmsg, wait=False, write=False):\n try:\n pid = os.fork()\n if pid > 0:\n if write: # write PID of child process to `pidfile`\n tmp = pidfile + '.tmp'\n with open(tmp, 'w') as fp:\n fp.write(str(pid))\n os.rename(tmp, pidfile)\n if wait: # wait for child process to exit\n os.waitpid(pid, 0)\n os._exit(0)\n except OSError as err:\n _log().critical('%s: (%d) %s', errmsg, err.errno, err.strerror)\n raise err\n\n # Do first fork and wait for second fork to finish.\n _fork_and_exit_parent('fork #1 failed', wait=True)\n\n # Decouple from parent environment.\n os.chdir(wf().workflowdir)\n os.setsid()\n\n # Do second fork and write PID to pidfile.\n _fork_and_exit_parent('fork #2 failed', write=True)\n\n # Now I am a daemon!\n # Redirect standard file descriptors.\n si = open(stdin, 'rb', 0)\n so = open(stdout, 'ab+', 0)\n se = open(stderr, 'ab+', 0)\n if hasattr(sys.stdin, 'fileno'):\n os.dup2(si.fileno(), sys.stdin.fileno())\n if hasattr(sys.stdout, 'fileno'):\n os.dup2(so.fileno(), sys.stdout.fileno())\n if hasattr(sys.stderr, 'fileno'):\n os.dup2(se.fileno(), sys.stderr.fileno())",
"def cmd_monitor_daemon( args):\n\tconfig.daemon = True\n\tcmd_monitor( args)",
"def start(self):\n\n #Check to see if the pid file exists to see if the daemon is already running\n proc_id = None\n try:\n with open(self._pid_file, 'r') as pid_f:\n proc_id_str = pid_f.read().strip()\n proc_id = int(proc_id_str)\n\n # If we found a PID file but the process in the PID file does not exists,\n # then we are most likely reading a stale PID file. Go ahead and startup\n # a new instance of the daemon\n if not self.process_exists(proc_id):\n os.remove(self._pid_file)\n proc_id = None\n\n except IOError:\n proc_id = None\n\n if proc_id != None:\n log_message(\"The 'Skate Flair Service' was already running.\")\n sys.exit(1)\n\n # Start the daemon\n log_message(\"The 'Skate Flair Service' is about to become a daemon.\")\n self.daemonize()\n \n # Now that we are a daemon we need to switch over to using the logger\n open_logger()\n\n log_message(\"The 'Skate Flair Service' is now a daemon, lets run with it.\")\n self.run()\n\n return",
"def daemonize_daemon():\n _agent = create_agent()\n\n # Setting up MockDaemon and testing daemonize method\n _daemon = MockDaemon(_agent)\n _daemon._daemonize()",
"def service_start(svc):\n # TODO Change to subprocess\n system('systemctl daemon-reload')\n system('systemctl start {}'.format(svc))",
"def spwan_v2(self, command: str):\n try:\n pid = os.fork()\n if pid > 0:\n # parent process, return and keep running\n return\n except OSError:\n pass\n\n os.setsid()\n\n # do second fork\n try:\n pid = os.fork()\n if pid > 0:\n\n # loop = asyncio.get_running_loop()\n # loop.stop()\n\n # exit from second parent\n sys.exit(0)\n except OSError:\n pass\n\n # loop = asyncio.get_running_loop()\n # loop.stop()\n\n os.execl(\"/bin/sh\", \"sh\", \"-c\", command)\n\n os._exit(127)",
"def launch(self):\n self.processdev.start()\n pid = self.processdev.pid\n p = psutil.Process(self.processdev.pid)\n p.nice(psutil.HIGH_PRIORITY_CLASS)\n print(str(pid) + \"est le pid\")",
"def spawn_daemon(func):\n # do the UNIX double-fork magic, see Stevens' \"Advanced \n # Programming in the UNIX Environment\" for details (ISBN 0201563177)\n try: \n pid = os.fork() \n if pid > 0:\n # parent process, return and keep running\n return\n except OSError, e:\n print >>sys.stderr, \"fork #1 failed: %d (%s)\" % (e.errno, e.strerror) \n sys.exit(1)\n\n os.setsid()\n\n # do second fork\n try: \n pid = os.fork() \n if pid > 0:\n # exit from second parent\n sys.exit(0) \n except OSError, e: \n print >>sys.stderr, \"fork #2 failed: %d (%s)\" % (e.errno, e.strerror) \n sys.exit(1)\n\n # do stuff\n func()\n\n # all done\n os._exit(os.EX_OK)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create household_structures before dispatch, if they don't exist.
|
def pre_dispatch(self, plot, **kwargs):
survey = kwargs.get('survey', None)
if survey:
surveys = [survey]
else:
surveys = self.get_surveys()
HouseholdStructure = get_model('bcpp_household', 'householdstructure')
for survey in surveys:
for household in plot.get_contained_households():
if not HouseholdStructure.objects.using(self.get_using_source()).filter(household=household, survey=survey).exists():
# create household_structure, signal takes care of adding the members
HouseholdStructure.objects.using(self.get_using_source()).create(household=household,
survey=survey,
member_count=0,
note='created on dispatch')
|
[
"def create_structures(dont_load_entities: bool = False) -> object:\n\n if not dont_load_entities:\n load_entities()\n\n default_webhooks = Webhook.objects.filter(is_default=True)\n for corporation in EveCorporationInfo.objects.all():\n EveEntity.objects.get_or_create(\n id=corporation.corporation_id,\n defaults={\n \"category\": EveEntity.Category.CORPORATION,\n \"name\": corporation.corporation_name,\n },\n )\n my_owner = Owner.objects.create(corporation=corporation)\n for x in default_webhooks:\n my_owner.webhooks.add(x)\n\n if int(corporation.corporation_id) in [2001, 2002]:\n alliance = EveAllianceInfo.objects.get(alliance_id=3001)\n corporation.alliance = alliance\n corporation.save()\n\n for character in EveCharacter.objects.all():\n EveEntity.objects.get_or_create(\n id=character.character_id,\n defaults={\n \"category\": EveEntity.Category.CHARACTER,\n \"name\": character.character_name,\n },\n )\n corporation = EveCorporationInfo.objects.get(\n corporation_id=character.corporation_id\n )\n if corporation.alliance:\n character.alliance_id = corporation.alliance.alliance_id\n character.alliance_name = corporation.alliance.alliance_name\n character.save()\n\n StructureTag.objects.get(name=\"tag_a\")\n tag_b = StructureTag.objects.get(name=\"tag_b\")\n tag_c = StructureTag.objects.get(name=\"tag_c\")\n Structure.objects.all().delete()\n for structure in entities_testdata[\"Structure\"]:\n x = structure.copy()\n x[\"last_updated_at\"] = now()\n x[\"owner\"] = Owner.objects.get(\n corporation__corporation_id=x[\"owner_corporation_id\"]\n )\n del x[\"owner_corporation_id\"]\n\n if \"services\" in x:\n del x[\"services\"]\n\n obj = Structure.objects.create(**x)\n if obj.state != 11:\n obj.state_timer_start = now() - timedelta(days=randrange(3) + 1)\n obj.state_timer_start = obj.state_timer_start + timedelta(\n days=randrange(4) + 1\n )\n\n if obj.id in [1000000000002, 1000000000003]:\n obj.tags.add(tag_c)\n\n if obj.id in [1000000000003]:\n obj.tags.add(tag_b)\n\n if \"services\" in structure:\n for service in structure[\"services\"]:\n StructureService.objects.create(\n structure=obj,\n name=service[\"name\"],\n state=StructureService.State.from_esi_name(service[\"state\"]),\n )\n obj.save()",
"def create(self):\n logger.info('Creating BlockPrizeStructures for %s' % self.block)\n\n for default_prize_structure in DefaultPrizeStructure.objects.filter(\n site_sport=self.block.site_sport):\n # If the BlockPrizeStructure for this DefaultPrizeStructure doesn't exist, create it.\n if not BlockPrizeStructure.objects.filter(\n block=self.block,\n prize_structure=default_prize_structure.prize_structure\n ).count() > 0:\n logger.info('Creating non-existant BlockPrizeStructure for %s - %s' % (\n default_prize_structure, self.block))\n try:\n bps = BlockPrizeStructure()\n bps.block = self.block\n bps.prize_structure = default_prize_structure.prize_structure\n bps.save()\n\n except Exception as e:\n # Couldn't create it, but maybe it already existed? Either way, we don't want\n # this to stop the entire contest pool creation process, so capture the error\n # and keep chugging.\n logger.error(e)\n client.captureException()\n pass",
"def household_structure(self):\n if not self._household_structure:\n if isinstance(self.dashboard_model_instance, HouseholdStructure):\n self._household_structure = self.dashboard_model_instance\n else:\n try:\n self._household_structure = HouseholdStructure.objects.get(pk=self.dashboard_id)\n except HouseholdStructure.DoesNotExist:\n pass\n# try:\n# self._household_structure = HouseholdStructure.objects.get(\n# household__pk=self.household.pk, survey=self.survey)\n# except (HouseholdStructure.DoesNotExist, AttributeError):\n# pass\n return self._household_structure",
"def _fetch_upwell_structures(self, token: Token) -> bool:\n from .eveuniverse import EsiNameLocalization\n\n corporation_id = self.corporation.corporation_id\n structures = list()\n try:\n # fetch all structures incl. localizations for services\n structures_w_lang = esi_fetch_with_localization(\n esi_path=\"Corporation.get_corporations_corporation_id_structures\",\n args={\"corporation_id\": corporation_id},\n token=token,\n languages=EsiNameLocalization.ESI_LANGUAGES,\n has_pages=True,\n )\n except OSError as ex:\n message_id = (\n f\"{__title__}-fetch_upwell_structures-{self.pk}-{type(ex).__name__}\"\n )\n title = f\"{__title__}: Failed to update upwell structures for {self}\"\n message = (\n f\"{self}: Failed to update upwell structures \"\n f\"from ESI for due to: {ex}\"\n )\n logger.exception(message)\n notify_admins_throttled(\n message_id=message_id,\n title=title,\n message=message,\n level=\"danger\",\n timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT,\n )\n return False\n\n is_ok = True\n # reduce data\n structures = self._compress_services_localization(\n structures_w_lang, EveUniverse.ESI_DEFAULT_LANGUAGE\n )\n\n # fetch additional information for structures\n if not structures:\n logger.info(\"%s: No Upwell structures retrieved from ESI\", self)\n else:\n logger.info(\n \"%s: Fetching additional infos for %d Upwell structures from ESI\",\n self,\n len(structures),\n )\n for structure in structures:\n try:\n structure_info = esi_fetch(\n \"Universe.get_universe_structures_structure_id\",\n args={\"structure_id\": structure[\"structure_id\"]},\n token=token,\n )\n structure[\"name\"] = Structure.extract_name_from_esi_respose(\n structure_info[\"name\"]\n )\n structure[\"position\"] = structure_info[\"position\"]\n except OSError as ex:\n message_id = (\n f\"{__title__}-fetch_upwell_structures-details-\"\n f\"{self.pk}-{type(ex).__name__}\"\n )\n title = (\n f\"{__title__}: Failed to update details for \"\n f\"structure from {self}\"\n )\n message = (\n f\"{self}: Failed to update details for structure \"\n f\"with ID {structure['structure_id']} from ESI due to: {ex}\"\n )\n logger.warning(message, exc_info=True)\n notify_admins_throttled(\n message_id=message_id,\n title=title,\n message=message,\n level=\"warning\",\n timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT,\n )\n structure[\"name\"] = \"(no data)\"\n is_ok = False\n\n logger.info(\n \"%s: Storing updates for %d upwell structures\",\n self,\n len(structures),\n )\n for structure in structures:\n Structure.objects.update_or_create_from_dict(structure, self)\n\n if STRUCTURES_DEVELOPER_MODE:\n self._store_raw_data(\"structures\", structures, corporation_id)\n\n self._remove_structures_not_returned_from_esi(\n structures_qs=self.structures.filter_upwell_structures(),\n new_structures=structures,\n )\n return is_ok",
"def make_structured_dict(self):\n self.struct_game_dict = {'AI': [], 'StaticObject': [], 'Player': [],\n 'MovableGameObject': [], 'NetworkedObject': [],\n 'Meeting': [], 'ClimableObject': [], 'Effect': [],\n 'AnimateSpriteObject':[]}\n\n for game_obj in self.game_objects.values():\n self.add_to_structured_list(game_obj)",
"def test_link_new_structures_to_existing(self): # pylint: disable=C0103\n # we essentially need to find a SprayDay object that was sent to us\n # as a new structure but yet it is sufficiently close to an existing\n # Household object. Unfortunately as at 3/12/2018 this does not exist\n # in any of our test fixtures and so we shall have to artificially\n # create one\n\n # first we load our test data\n self._load_fixtures()\n\n # next we find a suitable SprayDay object\n sprayday = SprayDay.objects.first()\n\n # lets change its details a little bit\n # change osmstructure data to make it look like a new structure\n if sprayday.data.get(f\"{settings.MSPRAY_UNIQUE_FIELD}:way:id\"):\n del sprayday.data[f\"{settings.MSPRAY_UNIQUE_FIELD}:way:id\"]\n sprayday.data[f\"{settings.MSPRAY_UNIQUE_FIELD}:node:id\"] = 1234\n\n sprayday.osmid = -1337 # new structure-like osmid\n sprayday.household = None # no household\n sprayday.save() # save it!\n\n # now link it\n link_new_structures_to_existing(\n target_area=sprayday.location, distance=5\n )\n\n sprayday.refresh_from_db()\n\n self.assertEqual(\n 1234,\n sprayday.data[f\"original_{settings.MSPRAY_UNIQUE_FIELD}:node:id\"],\n )\n self.assertIsNotNone(sprayday.household)\n self.assertIsNone(\n sprayday.data.get(f\"{settings.MSPRAY_UNIQUE_FIELD}:node:id\")\n )\n self.assertEqual(sprayday.geom, sprayday.household.geom)\n self.assertEqual(sprayday.bgeom, sprayday.household.bgeom)\n self.assertEqual(sprayday.osmid, sprayday.household.hh_id)\n self.assertEqual(\n sprayday.household.hh_id,\n sprayday.data[f\"{settings.MSPRAY_UNIQUE_FIELD}:way:id\"],\n )\n self.assertEqual(1, sprayday.spraypoint_set.all().count())",
"def make_House(name):\n if not name in (h.name for h in houses):\n new_house = House(name)\n houses.append(new_house)\n return new_house",
"def add_sections_if_necessary(self):\n query = SchoolDB.models.Section.all(keys_only = True)\n query.ancestor(self.school)\n section_keys = query.fetch(500)\n for section_key in section_keys:\n section_summary = self.get_section_summary(section_key)\n if (not section_summary):\n section_summary = self.create_section_summary(section_key)\n logging.info(\n \"Created section summary information for new section %s\")\\\n %unicode(Section.get(section_key))",
"def setupTablesInBlock(self):\n \n self.tablesInBlock = {}\n\n\n for mem in self.switch.memoryTypes:\n self.tablesInBlock[mem] = {}\n for sl in range(sum(self.switch.numBlocks[mem])):\n self.tablesInBlock[mem][sl] = {}\n pass\n pass\n\n # TODO(lav): Useless code?\n memIndex = 0\n mem = self.switch.memoryTypes[memIndex]\n blocks = self.getBlocksInStage(mem, 0)\n while len(blocks) == 0:\n memIndex += 1\n mem = self.switch.memoryTypes[memIndex]\n blocks = self.getBlocksInStage(mem, 0)\n pass\n pass",
"def _default_structure(self, release):\n # If web in procfile then honor it\n if release.build.procfile and 'web' in release.build.procfile:\n structure = {'web': 1}\n\n # if there is no SHA, assume a docker image is being promoted\n elif not release.build.sha:\n structure = {'cmd': 1}\n\n # if a dockerfile, assume docker workflow\n elif release.build.dockerfile:\n structure = {'cmd': 1}\n\n # if a procfile exists without a web entry and dockerfile, assume heroku workflow\n # and return empty structure as only web type needs to be created by default and\n # other types have to be manually scaled\n elif release.build.procfile and 'web' not in release.build.procfile:\n structure = {}\n\n # default to heroku workflow\n else:\n structure = {'web': 1}\n\n return structure",
"def vehicle_allocation(\n persons,\n households,\n vehicles,\n tours,\n tours_merged,\n network_los,\n chunk_size,\n trace_hh_id,\n):\n trace_label = \"vehicle_allocation\"\n model_settings_file_name = \"vehicle_allocation.yaml\"\n model_settings = config.read_model_settings(model_settings_file_name)\n\n logsum_column_name = model_settings.get(\"MODE_CHOICE_LOGSUM_COLUMN_NAME\")\n\n estimator = estimation.manager.begin_estimation(\"vehicle_allocation\")\n\n model_spec_raw = simulate.read_model_spec(file_name=model_settings[\"SPEC\"])\n coefficients_df = simulate.read_model_coefficients(model_settings)\n model_spec = simulate.eval_coefficients(model_spec_raw, coefficients_df, estimator)\n\n nest_spec = config.get_logit_model_settings(model_settings)\n constants = config.get_model_constants(model_settings)\n\n locals_dict = {}\n locals_dict.update(constants)\n locals_dict.update(coefficients_df)\n\n # ------ constructing alternatives from model spec and joining to choosers\n vehicles_wide = vehicles.to_frame().pivot_table(\n index=\"household_id\",\n columns=\"vehicle_num\",\n values=\"vehicle_type\",\n aggfunc=lambda x: \"\".join(x),\n )\n\n alts_from_spec = model_spec.columns\n # renaming vehicle numbers to alternative names in spec\n vehicle_alt_columns_dict = {}\n for veh_num in range(1, len(alts_from_spec)):\n vehicle_alt_columns_dict[veh_num] = alts_from_spec[veh_num - 1]\n vehicles_wide.rename(columns=vehicle_alt_columns_dict, inplace=True)\n\n # if the number of vehicles is less than the alternatives, fill with NA\n # e.g. all households only have 1 or 2 vehicles because of small sample size,\n # still need columns for alternatives 3 and 4\n for veh_num, col_name in vehicle_alt_columns_dict.items():\n if col_name not in vehicles_wide.columns:\n vehicles_wide[col_name] = \"\"\n\n # last entry in spec is the non-hh-veh option\n assert (\n alts_from_spec[-1] == \"non_hh_veh\"\n ), \"Last option in spec needs to be non_hh_veh\"\n vehicles_wide[alts_from_spec[-1]] = \"\"\n\n # merging vehicle alternatives to choosers\n choosers = tours_merged.to_frame().reset_index()\n choosers = pd.merge(choosers, vehicles_wide, how=\"left\", on=\"household_id\")\n choosers.set_index(\"tour_id\", inplace=True)\n\n # ----- setup skim keys\n skims = get_skim_dict(network_los, choosers)\n locals_dict.update(skims)\n\n # ------ preprocessor\n preprocessor_settings = model_settings.get(\"preprocessor\", None)\n if preprocessor_settings:\n expressions.assign_columns(\n df=choosers,\n model_settings=preprocessor_settings,\n locals_dict=locals_dict,\n trace_label=trace_label,\n )\n\n logger.info(\"Running %s with %d tours\", trace_label, len(choosers))\n\n if estimator:\n estimator.write_model_settings(model_settings, model_settings_file_name)\n estimator.write_spec(model_settings)\n estimator.write_coefficients(coefficients_df, model_settings)\n estimator.write_choosers(choosers)\n\n tours = tours.to_frame()\n\n # ------ running for each occupancy level selected\n tours_veh_occup_cols = []\n for occup in model_settings.get(\"OCCUPANCY_LEVELS\", [1]):\n logger.info(\"Running for occupancy = %d\", occup)\n # setting occup for access in spec expressions\n locals_dict.update({\"occup\": occup})\n\n choices = simulate.simple_simulate(\n choosers=choosers,\n spec=model_spec,\n nest_spec=nest_spec,\n skims=skims,\n locals_d=locals_dict,\n chunk_size=chunk_size,\n trace_label=trace_label,\n trace_choice_name=\"vehicle_allocation\",\n estimator=estimator,\n )\n\n # matching alt names to choices\n choices = choices.map(dict(enumerate(alts_from_spec))).to_frame()\n choices.columns = [\"alt_choice\"]\n\n # last alternative is the non-household vehicle option\n for alt in alts_from_spec[:-1]:\n choices.loc[choices[\"alt_choice\"] == alt, \"choice\"] = choosers.loc[\n choices[\"alt_choice\"] == alt, alt\n ]\n choices.loc[\n choices[\"alt_choice\"] == alts_from_spec[-1], \"choice\"\n ] = alts_from_spec[-1]\n\n # creating a column for choice of each occupancy level\n tours_veh_occup_col = f\"vehicle_occup_{occup}\"\n tours[tours_veh_occup_col] = choices[\"choice\"]\n tours_veh_occup_cols.append(tours_veh_occup_col)\n\n if estimator:\n estimator.write_choices(choices)\n choices = estimator.get_survey_values(\n choices, \"households\", \"vehicle_allocation\"\n )\n estimator.write_override_choices(choices)\n estimator.end_estimation()\n\n pipeline.replace_table(\"tours\", tours)\n\n tracing.print_summary(\n \"vehicle_allocation\", tours[tours_veh_occup_cols], value_counts=True\n )\n\n annotate_settings = model_settings.get(\"annotate_tours\", None)\n if annotate_settings:\n annotate_vehicle_allocation(model_settings, trace_label)\n\n if trace_hh_id:\n tracing.trace_df(tours, label=\"vehicle_allocation\", warn_if_empty=True)",
"def _handle_holdings_info(self, content):\n self._holdings[\"cash\"] = {\"cash\": content[\"cash\"], \"available_cash\": content[\"availableCash\"]}\n self._holdings[\"markets\"] = {}\n for asset in content[\"assets\"]:\n market = asset[\"market\"]\n self._holdings[\"markets\"][market[\"id\"]] = {\"units\": asset[\"units\"],\n \"available_units\": asset[\"availableUnits\"]}",
"def make_block_node(self, person_unit_arrays):\n geocode, arrays = person_unit_arrays\n arrays = list(arrays)\n gqhhvacs = arrays[1].astype(int)\n arrays[1] = arrays[1][:-1]\n\n # Assign arrays to table names in a dictionary and fill in with zeros if array is non-existent\n assert len(arrays) == len(self.data_names)\n data_dict = {n: a.astype(int) if a is not None else np.zeros(self.person_hist_dimensions).astype(int) for n,a in zip(self.data_names, arrays)}\n #for name in data_dict:\n #if data_dict[self.privacy_table_name] is None:\n # data_dict[self.privacy_table_name] = np.zeros(self.person_hist_dimensions).astype(int)\n\n # data_dict = {}\n # for i in range(len(arrays)):\n # data_dict[self.data_names[i]] = arrays[i].astype(int) if arrays[i] is not None else np.zeros(self.person_hist_dimensions).astype(int)\n\n # geocode is a tuple where the [1] entry is empty. We only want the [0] entry.\n geocode = geocode[0]\n logging.info(\"creating geocode: %s\" % geocode)\n\n housing_table_name = self.housing_table_name\n privacy_table_name = self.privacy_table_name\n\n raw = sparse.multiSparse(data_dict[privacy_table_name].astype(int))\n raw_housing = sparse.multiSparse(data_dict[housing_table_name].astype(int))\n levels = tuple(self.config[\"geodict\"][\"geolevel_names\"].split(\",\"))\n\n invar_names = tuple(self.config[CONSTRAINTS][THE_INVARIANTS+\".\"+levels[0]].split(\",\"))\n if invar_names == (\"\",):\n invariants_dict = {}\n else:\n invariants_dict = self.InvariantsCreator(raw=raw, raw_housing=raw_housing, invariant_names=invar_names).calculateInvariants().invariants_dict\n invariants_dict[\"gqhhvacs_vect\"] = gqhhvacs #not used for constraints, but must be passed through. don't need to add hhvacs to node signature anymore this way.\n\n cons_names = tuple(self.config[CONSTRAINTS][THE_CONSTRAINTS+\".\"+levels[0]].split(\",\"))\n \n\n # Make Constraints\n if cons_names == (\"\",):\n constraints_dict = {}\n else:\n constraints_dict = self.ConstraintsCreator(hist_shape=data_dict[self.privacy_table_name].shape,\n invariants=invariants_dict,\n constraint_names=cons_names)\\\n .calculateConstraints().constraints_dict\n\n #raw = data_dict[self.privacy_table_name].astype(int)\n \n block_node = nodes.geounitNode(geocode=geocode, geocodeDict=self.geocodeDict, raw=raw, raw_housing=raw_housing,\n cons=constraints_dict, invar=invariants_dict)\n return block_node",
"def prepareBuildListCompletedCheck(self):\n self.remainingBuildTasks[race_worker[self.race]] = 12\n self.remainingBuildTasks[raceBasicTownhall[self.race]] = 1\n self.raceSpecificUnitAndStructureCreations()\n # add everything from build list\n for element in self.buildList:\n # convert to id\n unitId: UnitTypeId = self.unitToId(element)\n if unitId in self.remainingBuildTasks:\n self.remainingBuildTasks[unitId] += 1\n else:\n self.remainingBuildTasks[unitId] = 1\n self.loggerBase.info(\"Created remaining build tasks data structure: \" + str(self.remainingBuildTasks))",
"def map_to_buildingsync(obj, groupspaces=False):\n #\n allbuilding = obj['All - Building']\n spacefunctions = obj['All - Space Functions']\n metered_energy = obj['All - Metered Energy']\n delivered_energy = obj['All - Delivered Energy']\n summary = obj['L1 - EEM Summary']\n envelope = obj['L2 - Envelope']\n hvac = obj['L2 - HVAC']\n summary_L2 = obj['L2 - EEM Summary']\n lighting_plug_loads = obj['L2 - Lighting Elec & Plug Loads']\n inventory = obj['L2 - Equipment Inventory']\n #\n # All - Building\n #\n # Give the address\n address = createElement('Address')\n if 'Street*' in allbuilding:\n el = createSubElement(address, 'StreetAddressDetail')\n el = createSubElement(el, 'Simplified')\n el = createSubElement(el, 'StreetAddress')\n el.text = allbuilding['Street*']\n easymap(allbuilding, 'City*', 'City', address)\n easymap(allbuilding, 'State*', 'State', address)\n if 'Postal Code*' in allbuilding:\n postalcode = allbuilding['Postal Code*']\n postalcode, plus4 = process_zip(postalcode)\n postalcodeplus4 = postalcode\n if plus4:\n postalcodeplus4 += '-' + plus4\n el = createSubElement(address, 'PostalCode')\n el.text = postalcode\n el = createSubElement(address, 'PostalCodePlus4')\n el.text = postalcodeplus4\n # street address, city, state, zip5, zip5-4\n if len(address) == 0:\n address = None\n # Create contacts if they are present\n contacts = createElement('Contacts')\n auditor = None\n if 'Energy Auditor' in allbuilding:\n auditor = createSubElement(contacts, 'Contact')\n auditor.attrib['ID'] = 'EnergyAuditor'\n addel('ContactRole', auditor, 'Energy Auditor')\n addel('ContactName', auditor, allbuilding['Energy Auditor'])\n keycontact = None\n if 'Key Contact' in allbuilding:\n keycontact = createSubElement(contacts, 'Contact')\n keycontact.attrib['ID'] = 'KeyContact'\n addel('ContactRole', keycontact, 'Other')\n addel('ContactName', keycontact, allbuilding['Key Contact'])\n addudf(keycontact, 'ASHRAE Standard 211 Role', 'Key Contact')\n if 'Client Name' in allbuilding:\n client = createSubElement(contacts, 'Contact')\n client.attrib['ID'] = 'Client'\n addel('ContactRole', client, 'Other')\n addel('ContactName', client, allbuilding['Client Name'])\n addudf(client, 'ASHRAE Standard 211 Role', 'Client')\n if 'Building Owner' in allbuilding:\n owner = createSubElement(contacts, 'Contact')\n owner.attrib['ID'] = 'BuildingOwner'\n addel('ContactRole', owner, 'Other')\n addel('ContactName', owner, allbuilding['Building Owner'])\n addudf(owner, 'ASHRAE Standard 211 Role', 'Owner')\n\n buildings = createElement('Buildings')\n building = createSubElement(buildings, 'Building')\n building.attrib['ID'] = 'Building'\n\n easymap(allbuilding, 'Building Name*', 'PremisesName', building)\n easymap(allbuilding, 'Building Description - Notable Conditions',\n 'PremisesNotes', building)\n # OccupancyClassification should go here, but it can't: the enums don't match\n if 'Occupancy' in allbuilding:\n occupancy = allbuilding['Occupancy']\n if 'Typical number of occupants (during occ hours)' in occupancy:\n levels = createSubElement(building, 'OccupancyLevels')\n level = createSubElement(levels, 'OccupancyLevel')\n addel('OccupantQuantity', level,\n str(occupancy['Typical number of occupants (during occ hours)']))\n typicalocc = createElement('TypicalOccupantUsages')\n if 'Typical occupancy (hours/week)' in occupancy:\n occ = createSubElement(typicalocc, 'TypicalOccupantUsage')\n addel('TypicalOccupantUsageValue', occ,\n str(occupancy['Typical occupancy (hours/week)']))\n addel('TypicalOccupantUsageUnits', occ, 'Hours per week')\n if 'Typical occupancy (weeks/year)' in occupancy:\n occ = createSubElement(typicalocc, 'TypicalOccupantUsage')\n addel('TypicalOccupantUsageValue', occ,\n str(occupancy['Typical occupancy (weeks/year)']))\n addel('TypicalOccupantUsageUnits', occ, 'Weeks per year')\n if len(typicalocc) > 0:\n building.append(typicalocc)\n if 'Number of Dwelling Units in Building (Multifamily Only)' in occupancy:\n units = createSubElement(building, 'SpatialUnits')\n addel('SpatialUnitType', units, 'Apartment units')\n addel('NumberOfUnits', units, str(occupancy['Number of Dwelling Units in Building (Multifamily Only)']))\n\n easymap(allbuilding, 'Conditioned Floors Above grade',\n 'ConditionedFloorsAboveGrade', building, f=str)\n easymap(allbuilding, 'Conditioned Floors Below grade',\n 'ConditionedFloorsBelowGrade', building, f=str)\n easymap(allbuilding, 'Building automation system? (Y/N)',\n 'BuildingAutomationSystem', building, yn2tf)\n easymap(allbuilding, 'Historical landmark status? (Y/N)',\n 'HistoricalLandmark', building, yn2tf)\n # Map to FloorAreas\n floorareas = createElement('FloorAreas')\n if 'Total conditioned area' in allbuilding:\n floorarea = createSubElement(floorareas, 'FloorArea')\n addel('FloorAreaType', floorarea, 'Conditioned')\n addel('FloorAreaValue', floorarea, allbuilding['Total conditioned area'])\n if 'Gross floor area' in allbuilding:\n floorarea = createSubElement(floorareas, 'FloorArea')\n addel('FloorAreaType', floorarea, 'Gross')\n addel('FloorAreaValue', floorarea, allbuilding['Gross floor area'])\n if 'Conditioned area (heated only)' in allbuilding:\n floorarea = createSubElement(floorareas, 'FloorArea')\n addel('FloorAreaType', floorarea, 'Cooled only')\n addel('FloorAreaValue', floorarea, allbuilding['Conditioned area (heated only)'])\n if 'Conditioned area (cooled only)' in allbuilding:\n floorarea = createSubElement(floorareas, 'FloorArea')\n addel('FloorAreaType', floorarea, 'Heated only')\n addel('FloorAreaValue', floorarea, allbuilding['Conditioned area (cooled only)'])\n # Map Space Function table to FloorAreas\n if 'Space Function' in allbuilding:\n for key, value in allbuilding['Space Function'].items():\n floorarea = createSubElement(floorareas, 'FloorArea')\n addel('FloorAreaType', floorarea, 'Custom')\n addel('FloorAreaCustomName', floorarea, key)\n addel('FloorAreaValue', floorarea, value)\n\n easymap(allbuilding, 'Year of construction*',\n 'YearOfConstruction', building, f=str)\n\n easymap(allbuilding, 'Year of Prior Energy Audit',\n 'YearOfLastEnergyAudit', building, f=str)\n\n easymap(allbuilding, 'Last Renovation*',\n 'YearOfLastMajorRemodel', building, f=str)\n #\n # All - Space Functions\n #\n # subsections = createElement('Subsections')\n spaces = []\n phvac = {}\n nohvac = []\n for key, value in spacefunctions.items():\n element = createElement('Space')\n # First the stuff that has a slot to go into\n addel('PremisesName', element, key)\n if 'Number of Occupants' in value:\n levels = createSubElement(element, 'OccupancyLevels')\n level = createSubElement(levels, 'OccupancyLevel')\n addel('OccupantQuantity', level,\n str(value['Number of Occupants']))\n typicalocc = createElement('TypicalOccupantUsages')\n if 'Use (hours/week)' in value:\n occ = createSubElement(typicalocc, 'TypicalOccupantUsage')\n addel('TypicalOccupantUsageValue', occ,\n str(value['Use (hours/week)']))\n addel('TypicalOccupantUsageUnits', occ, 'Hours per week')\n if 'Use (weeks/year)' in value:\n occ = createSubElement(typicalocc, 'TypicalOccupantUsage')\n addel('TypicalOccupantUsageValue', occ,\n str(value['Use (weeks/year)']))\n addel('TypicalOccupantUsageUnits', occ, 'Weeks per year')\n if len(typicalocc) > 0:\n element.append(typicalocc)\n if 'Gross Floor Area' in value:\n floorareas = createSubElement(element, 'FloorAreas')\n floorarea = createSubElement(floorareas, 'FloorArea')\n addel('FloorAreaType', floorarea, 'Gross')\n addel('FloorAreaValue', floorarea, str(value['Gross Floor Area']))\n # Now for the UDFs\n easymapudf(value, 'Function type',\n 'ASHRAE Standard 211 Function Type', element)\n easymapudf(value, 'Original intended use',\n 'ASHRAE Standard 211 Original Intended Use', element)\n easymapudf(value, 'Percent Conditioned Area',\n 'ASHRAE Standard 211 Percent Conditioned Area', element,\n f=repercentage)\n easymapudf(value, 'Approximate Plug Loads (W/sf)',\n 'ASHRAE Standard 211 Approximate Plug Loads', element, f=str)\n easymapudf(value, 'Principal HVAC Type',\n 'ASHRAE Standard 211 Principal HVAC Type', element, f=str)\n if value['Principal HVAC Type']:\n if value['Principal HVAC Type'] in phvac:\n phvac[value['Principal HVAC Type']].append(element)\n else:\n phvac[value['Principal HVAC Type']] = [element]\n else:\n nohvac.append(element)\n easymapudf(value, 'Principal Lighting Type',\n 'ASHRAE Standard 211 Principal Lighting Type', element, f=str)\n spaces.append(element)\n subsections = []\n subsection = None\n\n # Map the building shape if it is given\n if 'General Building Shape*' in envelope:\n subsections = createSubElement(building, 'Subsections')\n subsection = createSubElement(subsections, 'Subsection')\n addel('FootprintShape', subsection, envelope['General Building Shape*'])\n\n hvacsystems = None\n lightingsystems = None\n dhwsystems = None\n heatrecoverysystems = None\n wallsystems = None\n roofsystems = None\n ceilingsystems = None\n foundationsystems = None\n fenestrationsystems = None\n plugloads = None\n\n # L2 - HVAC, make one system to represent all of it.\n if len(hvac) > 0:\n hvacsystem = createElement('HVACSystem')\n # Plant stuff\n if 'Boiler Type' in hvac:\n el = createSubElement(hvacsystem, 'Plants')\n el = createSubElement(el, 'HeatingPlant')\n el = createSubElement(el, 'Boiler')\n for val in hvac['Boiler Type']:\n addudf(el, 'ASHRAE Std 211 Boiler Type', val)\n # HeatingAndCoolingSystems\n hvacsys = el = createElement('HeatingAndCoolingSystems')\n stuff = ['Heating Source', 'Heating Fuel']\n # Heating Source related info\n if any([el in hvac for el in stuff]):\n el = createSubElement(hvacsys, 'HeatingSources')\n el = createSubElement(el, 'HeatingSource')\n for tag in stuff:\n if tag in hvac:\n for val in hvac[tag]:\n addudf(el, 'ASHRAE Std 211 %s' % tag, val)\n stuff = ['Cooling Source', 'Chiller Input', 'Compressor', 'Condenser']\n # Cooling Source related info\n if any([el in hvac for el in stuff]):\n el = createSubElement(hvacsys, 'CoolingSources')\n el = createSubElement(el, 'CoolingSource')\n for tag in stuff:\n if tag in hvac:\n for val in hvac[tag]:\n addudf(el, 'ASHRAE Std 211 %s' % tag, val)\n if len(hvacsys) > 0:\n hvacsystem.append(hvacsys)\n\n # Tags with nowhere to go\n stuff = ['Zone Controls', 'Central Plant Controls', 'Heat Recovery', 'Outside Air',\n 'Cooling Distribution Equipment Type', 'Heating Distribution Equipment Type']\n for tag in stuff:\n if tag in hvac:\n for val in hvac[tag]:\n addudf(hvacsystem, 'ASHRAE Std 211 %s' % tag, val)\n\n if len(hvacsystem) > 0:\n hvacsystem.attrib['ID'] = 'Std211L2HVAC'\n hvacsystems = createElement('HVACSystems')\n hvacsystems.append(hvacsystem)\n\n stuff = ['SHW/DHW Source', 'SHW/DHW Fuel']\n if any([el in hvac for el in stuff]):\n dhwsystems = createElement('DomesticHotWaterSystems')\n dhw = createSubElement(dhwsystems, 'DomesticHotWaterSystem')\n dhw.attrib['ID'] = 'Std211L2HVACDHW'\n for tag in stuff:\n if tag in hvac:\n for val in hvac[tag]:\n addudf(dhw, 'ASHRAE Std 211 %s' % tag, val)\n\n if inventory:\n systems = map_equipment_inventory(inventory)\n if systems['HVACSystem']:\n if not hvacsystems:\n hvacsystems = createElement('HVACSystems')\n for system in systems['HVACSystem']:\n hvacsystems.append(system)\n if systems['HeatRecoverySystem']:\n if not heatrecoverysystems:\n heatrecoverysystems = createElement('HeatRecoverySystems')\n for system in systems['HeatRecoverySystem']:\n heatrecoverysystems.append(system)\n\n # Lighting\n if 'Lighting Source Type(s)' in lighting_plug_loads:\n num = 1\n sources = []\n for src_type, src in lighting_plug_loads['Lighting Source Type(s)'].items():\n source = createElement('LightingSystem')\n source.attrib['ID'] = 'LightingSystem%d' % num\n num += 1\n source.append(bsync_lighting_system_lookup(src_type))\n easyremap(src, 'Ballast Type(s)', 'BallastType', source, bsync_ballast_lookup)\n control = bsync_lighting_control_lookup(src['Control(s)'])\n if control is None:\n easymapudf(src, 'Control(s)', 'ASHRAE Std 211 Lighting Control', source)\n else:\n source.append(control)\n easymapudf(src, 'Space Type(s)*', 'ASHRAE Std 211 Space Type', source)\n easymapudf(src, 'Approx % Area Served', 'ASHRAE Std 211 Approx % Area Served', source, str)\n sources.append(source)\n if len(sources) > 0:\n lightingsystems = createElement('LightingSystems')\n for src in sources:\n lightingsystems.append(src)\n\n # Plug/process loads\n if 'Major Process/Plug Load Type(s)**' in lighting_plug_loads:\n num = 1\n loads = []\n for ld_type, ld in lighting_plug_loads['Major Process/Plug Load Type(s)**'].items():\n load = createElement('PlugLoad')\n addudf(load, 'ASHRAE Std 211 Major Process/Plug Load Type(s)', ld_type)\n easymapudf(ld, 'Key Operational Details***', 'ASHRAE Std 211 Key Operational Details', load)\n loads.append(load)\n if len(loads) > 0:\n plugloads = createElement('PlugLoads')\n for load in loads:\n plugloads.append(load)\n\n # Handle sides\n if ('Total exposed above grade wall area (sq ft)' in envelope or\n 'Total exposed above grade wall area R value' in envelope or\n 'Glazing area, approx % of exposed wall area [10, 25, 50, 75, 90, 100]*' in envelope or\n 'Wall Constructions' in envelope or\n 'Fenestration Frame Types' in envelope or\n 'Fenestration Glass Types' in envelope or\n 'Fenestration Seal Condition' in envelope):\n # Something is there to put in sides, make what we need\n if subsection is None:\n subsections = createSubElement(building, 'Subsections')\n subsection = createSubElement(subsections, 'Subsection')\n sides = createSubElement(subsection, 'Sides')\n side = createSubElement(sides, 'Side')\n # Make a wall system if needed\n wallsystem = None\n if ('Total exposed above grade wall area (sq ft)' in envelope or\n 'Total exposed above grade wall area R value' in envelope or\n 'Glazing area, approx % of exposed wall area [10, 25, 50, 75, 90, 100]*' in envelope or\n 'Wall Constructions' in envelope):\n wallsystems = createElement('WallSystems')\n wallsystem = createSubElement(wallsystems, 'WallSystem')\n wallsystem.attrib['ID'] = 'Wall1'\n easymap(envelope, 'Total exposed above grade wall area R value',\n 'WallRValue', wallsystem, f=str)\n easymapudf(envelope, 'Wall Constructions',\n 'ASHRAE Standard 211 Wall Construction', wallsystem, f=lambda x: ', '.join(x))\n # Make window stuff\n fenestrationsystem = None\n if ('Fenestration Frame Types' in envelope or\n 'Fenestration Glass Types' in envelope):\n fenestrationsystems = createElement('FenestrationSystems')\n fenestrationsystem = createSubElement(fenestrationsystems, 'FenestrationSystem')\n fenestrationsystem.attrib['ID'] = 'Fenestration1'\n easymapudf(envelope, 'Fenestration Frame Types',\n 'ASHRAE Standard 211 Fenestration Frame Types',\n fenestrationsystem, f=lambda x: ', '.join(x))\n easymapudf(envelope, 'Fenestration Glass Types',\n 'ASHRAE Standard 211 Fenestration Glass Types',\n fenestrationsystem, f=lambda x: ', '.join(x))\n easymapudf(envelope, 'Fenestration Seal Condition',\n 'ASHRAE Standard 211 Fenestration Seal Condition',\n fenestrationsystem)\n easymapudf(envelope, 'Description of Exterior doors**',\n 'ASHRAE Standard 211 Description of Exterior doors',\n fenestrationsystem)\n # Fill in the side information\n if wallsystem is not None:\n wallid = createSubElement(side, 'WallID')\n wallid.attrib['IDref'] = wallsystem.attrib['ID']\n if 'Total exposed above grade wall area (sq ft)' in envelope:\n addel('WallArea', wallid,\n str(envelope['Total exposed above grade wall area (sq ft)']))\n if fenestrationsystem is not None:\n windowid = createSubElement(side, 'WindowID')\n windowid.attrib['IDref'] = fenestrationsystem.attrib['ID']\n if 'Glazing area, approx % of exposed wall area [10, 25, 50, 75, 90, 100]*' in envelope:\n addel('WindowToWallRatio', windowid,\n str(envelope['Glazing area, approx % of exposed wall area [10, 25, 50, 75, 90, 100]*']))\n # Roof is next\n if ('Roof area (sq ft)' in envelope or\n 'Roof area R value' in envelope or\n 'Cool Roof (Y/N)' in envelope or\n 'Roof condition' in envelope or\n 'Roof Construction' in envelope):\n roofsystems = createElement('RoofSystems')\n roofsystem = createSubElement(roofsystems, 'RoofSystem')\n roofsystem.attrib['ID'] = 'Roof1'\n easymap(envelope, 'Roof area R value', 'RoofRValue',\n roofsystem, f=str)\n easymapudf(envelope, 'Cool Roof (Y/N)',\n 'ASHRAE Standard 211 Cool Roof (Y/N)', roofsystem)\n easymapudf(envelope, 'Roof condition',\n 'ASHRAE Standard 211 Roof Condition', roofsystem)\n easymapudf(envelope, 'Roof Construction',\n 'ASHRAE Standard 211 Roof Construction',\n roofsystem, f=lambda x: ', '.join(x))\n roofid = createSubElement(subsection, 'RoofID')\n roofid.attrib['IDref'] = roofsystem.attrib['ID']\n easymap(envelope, 'Roof area (sq ft)', 'RoofArea', roofid, f=str)\n\n # Make a ceiling system if needed\n if 'Floor Construction' in envelope:\n if ('Steel joist' in envelope['Floor Construction'] or\n 'Wood frame' in envelope['Floor Construction']):\n value = []\n if 'Steel joist' in envelope['Floor Construction']:\n value = ['Steel joist']\n if 'Wood frame' in envelope['Floor Construction']:\n value.append('Wood frame')\n value = ', '.join(value)\n ceilingsystems = createElement('CeilingSystems')\n ceilingsystem = createSubElement(ceilingsystems, 'CeilingSystem')\n ceilingsystem.attrib['ID'] = 'Ceiling1'\n addudf(ceilingsystem, 'ASHRAE Standard 211 Floor Construction',\n str(value))\n ceilingid = createSubElement(subsection, 'CeilingID')\n ceilingid.attrib['IDref'] = ceilingsystem.attrib['ID']\n\n # Foundation systems\n foundationsystem = None\n if ('Foundation Type' in envelope or\n 'Floor Construction' in envelope):\n foundationsystems = createElement('FoundationSystems')\n foundationsystem = createSubElement(foundationsystems, 'FoundationSystem')\n foundationsystem.attrib['ID'] = 'Foundation1'\n easymapudf(envelope, 'Foundation Type',\n 'ASHRAE Standard 211 Foundation Type',\n foundationsystem, f=lambda x: ', '.join(x))\n easymapudf(envelope, 'Floor Construction',\n 'ASHRAE Standard 211 Floor Construction',\n foundationsystem, f=lambda x: ', '.join(x))\n foundationid = createSubElement(subsection, 'FoundationID')\n foundationid.attrib['IDref'] = foundationsystem.attrib['ID']\n\n # Map the UDFs from L2 - Envelope\n udfs = createElement('UserDefinedFields')\n appendudf(udfs, 'Below grade wall area (sq ft)', envelope, prefix='ASHRAE Standard 211 ')\n appendudf(udfs, 'Below grade wall area (sq m)', envelope, prefix='ASHRAE Standard 211 ')\n appendudf(udfs, 'Overall Enclosure Tightness Assessment', envelope, prefix='ASHRAE Standard 211 ')\n appendudf(udfs, 'Description of Exterior doors**', envelope, prefix='ASHRAE Standard 211 ')\n appendudf(udfs, 'Below grade wall area R value', envelope, prefix='ASHRAE Standard 211 ')\n appendudf(udfs, 'Above grade wall common area with other conditioned buildings (ft2)', envelope,\n prefix='ASHRAE Standard 211 ')\n appendudf(udfs, 'Above grade wall common area with other conditioned buildings (m2)', envelope,\n prefix='ASHRAE Standard 211 ')\n # appendudf(udfs, 'Fenestration Seal Condition', envelope, prefix = 'ASHRAE Standard 211 ')\n\n if len(udfs) > 0:\n if subsection is None:\n subsections = createSubElement(building, 'Subsections')\n subsection = createSubElement(subsections, 'Subsection')\n subsection.append(udfs)\n\n thermalzones = []\n if len(spaces) > 0:\n if groupspaces:\n # Group spaces by the principle HVAC type\n thermalzones = createElement('ThermalZones')\n for phvactype, spcs in phvac.items():\n tz = createSubElement(thermalzones, 'ThermalZone')\n tzspaces = createSubElement(tz, 'Spaces')\n for space in spcs:\n tzspaces.append(space)\n # Anything with nothing gets its own zone\n for space in nohvac:\n tz = createElement('ThermalZone')\n tzspaces = createSubElement(tz, 'Spaces')\n tzspaces.append(space)\n else:\n # Every space gets its own thermal zone\n thermalzones = createElement('ThermalZones')\n for space in spaces:\n tz = createSubElement(thermalzones, 'ThermalZone')\n tzspaces = createSubElement(tz, 'Spaces')\n tzspaces.append(space)\n if len(thermalzones) > 0:\n if subsection is None:\n subsections = createSubElement(building, 'Subsections')\n subsection = createSubElement(subsections, 'Subsection')\n subsection.append(thermalzones)\n\n # Now for the UDFs from All - Building\n easymapudf(allbuilding, 'Primary Building use type*',\n 'ASHRAE Standard 211 Primary Building Use Type', building)\n easymapudf(allbuilding, 'Year Last Commissioned',\n 'ASHRAE Standard 211 Year Last Commissioned', building, f=str)\n easymapudf(allbuilding, 'Percent owned (%)',\n 'ASHRAE Standard 211 Percent Owned', building, f=repercentage)\n easymapudf(allbuilding, 'Percent leased (%)',\n 'ASHRAE Standard 211 Percent Leased', building, f=repercentage)\n easymapudf(allbuilding, 'Total Number of Floors',\n 'ASHRAE Standard 211 Total Number of Floors', building, f=str)\n if 'Excluded Spaces' in allbuilding:\n allbuilding['Excluded Spaces'] = ', '.join(allbuilding['Excluded Spaces'])\n easymapudf(allbuilding, 'Excluded Spaces',\n 'ASHRAE Standard 211 Excluded Spaces', building)\n\n if 'Occupancy' in allbuilding:\n easymapudf(allbuilding['Occupancy'],\n '% of Dwelling Units currently Occupied (Multifamily Only)',\n 'ASHRAE Standard 211 Percent Dwelling Units Currently Occupied',\n building, f=repercentage)\n\n # Wrap up for building\n if len(building) == 0:\n building = None\n buildings = None\n\n # Map energy sources, metered energy, and delivered energy to a report\n report = createElement('Report')\n scenario = None\n resources = None\n\n if ('Energy Sources' in allbuilding\n or 'Utility #1' in metered_energy\n or 'Utility #2' in metered_energy\n or 'Utility #3' in metered_energy\n or delivered_energy != {}):\n scenarios = createSubElement(report, 'Scenarios')\n scenario = createSubElement(scenarios, 'Scenario')\n scenario.attrib['ID'] = 'ASHRAEStandard211Scenario'\n addel('ScenarioName', scenario, 'ASHRAE Standard 211 Scenario')\n resources = createSubElement(scenario, 'ResourceUses')\n\n #\n # Map the energy sources from 'All - Building', does this need to be\n # harmonized with the information from 'All - Metered Energy' below?\n #\n if 'Energy Sources' in allbuilding:\n for el in allbuilding['Energy Sources']:\n resource = createElement('ResourceUse')\n # Nope, enum fail on both\n # easymap(el, 'Energy Source', 'EnergyResource', resource)\n # if 'Type' in el:\n # sub = createSubElement(resource, 'Utility')\n # sub = createSubElement(sub, 'MeteringConfiguration')\n # sub.text = el['Type']\n easymapudf(el, 'Energy Source', 'ASHRAE Standard 211 Energy Source',\n resource)\n easymapudf(el, 'Type', 'ASHRAE Standard 211 Type', resource)\n easymapudf(el, 'ID', 'ASHRAE Standard 211 ID', resource, f=str)\n easymapudf(el, 'Rate schedule', 'ASHRAE Standard 211 Rate Schedule',\n resource, f=str)\n if len(resource) > 0:\n resources.append(resource)\n\n # Add resource uses for metered and delivered energy\n for name in ['Utility #1', 'Utility #2', 'Utility #3']:\n if name in metered_energy:\n resource = createElement('ResourceUse')\n resource.attrib['ID'] = 'Std211ResourceUse' + name.replace(' #', '')\n if metered_energy[name]['Definition']['Units'].startswith(\"=INDEX('Drop Down Lists'!\"):\n # Use default\n metered_energy[name]['Definition']['Units'] = metered_energy_default_units[metered_energy[name]['Type']]\n if metered_energy[name]['Definition']['kBtu/unit'].startswith('=IFERROR(INDEX(EnergyConversionRates,MATCH'):\n # Use default\n metered_energy[name]['Definition']['kBtu/unit'] = str(\n conversion_to_kBtu[metered_energy[name]['Definition']['Units']])\n if metered_energy[name]['Type'] in metered_energy_type_lookup:\n el = createSubElement(resource, 'EnergyResource')\n el.text = metered_energy_type_lookup[metered_energy[name]['Type']]\n else:\n el = createSubElement(resource, 'EnergyResource')\n el.text = 'Other'\n easymapudf(metered_energy[name], 'Type',\n 'ASHRAE Standard 211 Energy Source', resource)\n el = createSubElement(resource, 'ResourceUnits')\n el.text = metered_energy_bsync_units[metered_energy[name]['Type']]\n el = createSubElement(resource, 'UtilityIDs')\n el = createSubElement(el, 'UtilityID')\n el.attrib['IDref'] = 'Std211Metered' + name.replace(' #', '')\n easymapudf(metered_energy[name]['Definition'], 'kBtu/unit', 'ASHRAE Standard 211 kBtu/unit', resource)\n resources.append(resource)\n\n if delivered_energy:\n resource = createElement('ResourceUse')\n resource.attrib['ID'] = 'Std211ResourceUseDelivered1'\n if delivered_energy['Definition']['Conversion to kBTU'].startswith(\"=IFERROR(INDEX(\"):\n # Use default\n delivered_energy['Definition']['Conversion to kBTU'] = str(\n conversion_to_kBtu[delivered_energy['Definition']['Units']])\n el = createSubElement(resource, 'EnergyResource')\n fueltype = delivered_energy['Definition']['Delivered Energy Type (if applicable)']\n if fueltype == 'Oil':\n fueltype = 'Fuel oil'\n el.text = fueltype\n el = createSubElement(resource, 'ResourceUnits')\n el.text = bsync_unit_lookup[delivered_energy['Definition']['Units']]\n easymapudf(delivered_energy['Definition'], 'Conversion to kBTU', 'ASHRAE Standard 211 Conversion to kBTU',\n resource)\n if 'Estimated Annual Use**' in delivered_energy['Definition']:\n easymapudf(delivered_energy['Definition'], 'Estimated Annual Use**',\n 'ASHRAE Standard 211 Estimated Annual Use', resource,\n str)\n resources.append(resource)\n\n # Now the time series data\n datapoints = []\n\n keys = {'Utility #1': {'Use': 'Energy', 'Cost': 'Currency', 'Peak': 'Energy'},\n 'Utility #2': {'Use': 'Energy', 'Cost': 'Currency'},\n 'Utility #3': {'Use': 'Energy', 'Cost': 'Currency'}}\n\n reading_type = {'Use': 'Total',\n 'Cost': 'Total',\n 'Peak': 'Peak'}\n\n for name in ['Utility #1', 'Utility #2', 'Utility #3']:\n if name in metered_energy:\n refname = 'Std211ResourceUse' + name.replace(' #', '')\n if 'Data' in metered_energy[name]:\n for pt in metered_energy[name]['Data']:\n start = pt['Start Date']\n end = pt['End Date']\n # Compute the frequency, we don't handle 'Unknown'\n frequency = determine_frequency(start, end)\n for inkey, outkey in keys[name].items():\n ts = createElement('TimeSeries')\n el = createSubElement(ts, 'ReadingType')\n el.text = reading_type[inkey]\n el = createSubElement(ts, 'TimeSeriesReadingQuantity')\n el.text = outkey\n el = createSubElement(ts, 'StartTimeStamp')\n el.text = start.strftime('%Y-%m-%dT00:00:00')\n el = createSubElement(ts, 'EndTimeStamp')\n el.text = end.strftime('%Y-%m-%dT00:00:00')\n el = createSubElement(ts, 'IntervalFrequency')\n el.text = frequency\n el = createSubElement(ts, 'IntervalReading')\n el.text = str(pt[inkey])\n el = createSubElement(ts, 'ResourceUseID')\n el.attrib['IDref'] = refname\n datapoints.append(ts)\n\n if delivered_energy:\n refname = 'Std211ResourceUseDelivered1'\n if 'Data' in delivered_energy:\n for pt in delivered_energy['Data']:\n start = pt['Delivery date']\n for inkey, outkey in {'Volume': 'Other', 'Cost': 'Currency'}.items():\n ts = createElement('TimeSeries')\n el = createSubElement(ts, 'ReadingType')\n el.text = 'Total'\n el = createSubElement(ts, 'TimeSeriesReadingQuantity')\n el.text = outkey\n el = createSubElement(ts, 'StartTimeStamp')\n el.text = start.strftime('%Y-%m-%dT00:00:00')\n el = createSubElement(ts, 'IntervalReading')\n el.text = str(pt[inkey])\n el = createSubElement(ts, 'ResourceUseID')\n el.attrib['IDref'] = refname\n datapoints.append(ts)\n\n if len(datapoints) > 0:\n ts = createSubElement(scenario, 'TimeSeriesData')\n for pt in datapoints:\n ts.append(pt)\n\n if len(scenario) > 0 and (building is not None):\n link = createSubElement(scenario, 'LinkedPremises')\n el = createSubElement(link, 'Building')\n el = createSubElement(el, 'LinkedBuildingID')\n el.attrib['IDref'] = building.attrib['ID']\n\n # Add the utility items\n utilities = createElement('Utilities')\n for name in ['Utility #1', 'Utility #2', 'Utility #3']:\n if name in metered_energy:\n el = createSubElement(utilities, 'Utility')\n el.attrib['ID'] = 'Std211Metered' + name.replace(' #', '')\n el = createSubElement(el, 'UtilityName')\n el.text = name\n if len(utilities) > 0:\n report.append(utilities)\n\n if auditor is not None:\n el = createSubElement(report, 'AuditorContactID')\n el.attrib['IDref'] = auditor.attrib['ID']\n\n easymapudf(allbuilding, 'Date of site visit(s)',\n 'ASHRAE Standard 211 Date of site visit(s)', report)\n\n # Wrap up for report\n if len(report) == 0:\n report = None\n #\n # L1 - EEM Summary\n #\n fields = ['Modified System',\n 'Impact on Occupant Comfort or IEQ',\n 'Other Non-Energy Impacts',\n 'Cost',\n 'Savings Impact',\n 'Typical ROI',\n 'Priority']\n # First the low cost items\n measures = createElement('Measures')\n if 'Low-Cost and No-Cost Recommendations' in summary:\n for key, value in summary['Low-Cost and No-Cost Recommendations'].items():\n measure = createSubElement(measures, 'Measure')\n el = createSubElement(measure, 'LongDescription')\n el.text = key\n udfs = createSubElement(measure, 'UserDefinedFields')\n for field in fields:\n if field in value:\n udf = createSubElement(udfs, 'UserDefinedField')\n udfname = createSubElement(udf, 'FieldName')\n udfname.text = field\n udfvalue = createSubElement(udf, 'FieldValue')\n udfvalue.text = value[field]\n udf = createSubElement(udfs, 'UserDefinedField')\n udfname = createSubElement(udf, 'FieldName')\n udfname.text = 'ASHRAE Standard 211 L1 Measure Category'\n udfvalue = createSubElement(udf, 'FieldValue')\n udfvalue.text = 'Low-Cost and No-Cost Recommendations'\n # Change that one thing...\n fields[1] = 'Impact on Occupant Comfort'\n if 'Potential Capital Recommendations' in summary:\n for key, value in summary['Potential Capital Recommendations'].items():\n measure = createSubElement(measures, 'Measure')\n el = createSubElement(measure, 'LongDescription')\n el.text = key\n udfs = createSubElement(measure, 'UserDefinedFields')\n for field in fields:\n if field in value:\n udf = createSubElement(udfs, 'UserDefinedField')\n udfname = createSubElement(udf, 'FieldName')\n udfname.text = field\n udfvalue = createSubElement(udf, 'FieldValue')\n udfvalue.text = value[field]\n udf = createSubElement(udfs, 'UserDefinedField')\n udfname = createSubElement(udf, 'FieldName')\n udfname.text = 'ASHRAE Standard 211 L2 Measure Category'\n udfvalue = createSubElement(udf, 'FieldValue')\n udfvalue.text = 'Potential Capital Recommendations'\n\n #\n # L2 - EEM Summary\n #\n udf_fields = ['Electricity Cost Savings', 'Non-energy Cost Savings']\n # Try to build the utility savings headings\n utility_units = []\n utility_types = []\n for name in ['Utility #1', 'Utility #2', 'Utility #3']:\n if name in metered_energy:\n utility_units.append(metered_energy[name]['Definition']['Units'])\n utility_types.append(metered_energy[name]['Type'])\n if delivered_energy:\n utility_types.append(delivered_energy['Definition']['Delivered Energy Type (if applicable)'])\n utility_units.append(delivered_energy['Definition']['Units'])\n for category, eems in summary_L2.items():\n for key, value in eems.items():\n measure = createSubElement(measures, 'Measure')\n el = createSubElement(measure, 'LongDescription')\n el.text = key\n measure_savings = createElement('MeasureSavingsAnalysis')\n\n annual_by_fuels = createElement('AnnualSavingsByFuels')\n utilnum = 1\n for util_units, util_type in zip(utility_units, utility_types):\n if utilnum == 4:\n header = 'Delivered Energy'\n else:\n header = 'Utility #%d' % utilnum # util_type + ' [' + util_units +']'\n utilnum += 1\n if header in value:\n if value[header]:\n savings = createSubElement(annual_by_fuels, 'AnnualSavingsByFuel')\n el = createSubElement(savings, 'EnergyResource')\n el.text = metered_energy_type_lookup[util_type]\n el = createSubElement(savings, 'ResourceUnits')\n el.text = bsync_unit_lookup[util_units]\n el = createSubElement(savings, 'AnnualSavingsNativeUnits')\n el.text = str(value[header])\n\n if len(annual_by_fuels) > 0:\n measure_savings.append(annual_by_fuels)\n\n easymap(value, 'Potential Incentives', 'FundingFromIncentives', measure_savings, str)\n\n if len(measure_savings) > 0:\n measure.append(measure_savings)\n\n easymap(value, 'Measure Life (years)', 'UsefulLife', measure, str)\n easymap(value, 'Measure Cost', 'MeasureTotalFirstCost', measure, str)\n\n udfs = createSubElement(measure, 'UserDefinedFields')\n for field in udf_fields:\n if field in value:\n if value[field]:\n udf = createSubElement(udfs, 'UserDefinedField')\n udfname = createSubElement(udf, 'FieldName')\n udfname.text = 'ASHRAE Std 211 ' + field\n udfvalue = createSubElement(udf, 'FieldValue')\n udfvalue.text = value[field]\n udf = createSubElement(udfs, 'UserDefinedField')\n udfname = createSubElement(udf, 'FieldName')\n udfname.text = 'ASHRAE Standard 211 L2 Measure Category'\n udfvalue = createSubElement(udf, 'FieldValue')\n udfvalue.text = category\n\n #\n # Assemble the final result\n #\n root_ns = et.QName(\"http://buildingsync.net/schemas/bedes-auc/2019\", \"BuildingSync\")\n attr_qname = et.QName(\"http://www.w3.org/2001/XMLSchema-instance\", \"schemaLocation\")\n nsmap = {None: \"http://buildingsync.net/schemas/bedes-auc/2019\",\n 'xsi': \"http://www.w3.org/2001/XMLSchema-instance\"}\n bsync = et.Element(root_ns,\n {attr_qname: \"http://buildingsync.net/schemas/bedes-auc/2019 https://github.com/BuildingSync/schema/releases/download/v1.0/BuildingSync.xsd\"},\n nsmap=nsmap)\n # The following five lines are the original ElementTree version\n # bsync = et.Element('Audits')\n # bsync.attrib['xmlns'] = \"http://nrel.gov/schemas/bedes-auc/2014\"\n # bsync.attrib['xmlns:xsi'] = \"http://www.w3.org/2001/XMLSchema-instance\"\n # bsync.attrib['xsi:schemaLocation'] = \"http://nrel.gov/schemas/bedes-auc/2014 ../BuildingSync.xsd\"\n\n # First is Sites\n facilities = None\n if (address is not None) or (keycontact is not None) or (buildings is not None):\n facilities = createSubElement(bsync, 'Facilities')\n facility = createSubElement(facilities, 'Facility')\n sites = createSubElement(facility, 'Sites')\n site = createSubElement(sites, 'Site')\n if address is not None:\n site.append(address)\n if keycontact is not None:\n pcid = createSubElement(site, 'PrimaryContactID')\n pcid.text = keycontact.attrib['ID']\n if buildings is not None:\n site.append(buildings)\n # Second is Systems\n if ((hvacsystems is not None) or (lightingsystems is not None) or (dhwsystems is not None)\n or (heatrecoverysystems is not None) or (wallsystems is not None) or (roofsystems is not None)\n or (ceilingsystems is not None) or (fenestrationsystems is not None) or (foundationsystems is not None)\n or (plugloads is not None)):\n if facilities is None:\n facilities = createSubElement(bsync, 'Facilities')\n facility = createSubElement(facilities, 'Facility')\n systems = createSubElement(facility, 'Systems')\n if hvacsystems is not None:\n systems.append(hvacsystems)\n if lightingsystems is not None:\n systems.append(lightingsystems)\n if dhwsystems is not None:\n systems.append(dhwsystems)\n if heatrecoverysystems is not None:\n systems.append(heatrecoverysystems)\n if wallsystems is not None:\n systems.append(wallsystems)\n if roofsystems is not None:\n systems.append(roofsystems)\n if ceilingsystems is not None:\n systems.append(ceilingsystems)\n if fenestrationsystems is not None:\n systems.append(fenestrationsystems)\n if foundationsystems is not None:\n systems.append(foundationsystems)\n if plugloads is not None:\n systems.append(plugloads)\n # Next is Measures\n if measures is not None:\n if facilities is None:\n facilities = createSubElement(bsync, 'Facilities')\n facility = createSubElement(facilities, 'Facility')\n facility.append(measures)\n # Now Reports\n if report is not None:\n if facilities is None:\n facilities = createSubElement(bsync, 'Facilities')\n facility = createSubElement(facilities, 'Facility')\n facility.append(report)\n # Last is Contacts\n if contacts is not None:\n if facilities is None:\n facilities = createSubElement(bsync, 'Facilities')\n facility = createSubElement(facilities, 'Facility')\n facility.append(contacts)\n # Done!\n return bsync",
"def get_available_structures( self ):\n _check_type(self)\n return _get_available(self, \"structure_\")",
"def expand_households():\n\n if setting('NO_INTEGERIZATION_EVER', False):\n logger.warning(\"skipping expand_households: NO_INTEGERIZATION_EVER\")\n inject.add_table('expanded_household_ids', pd.DataFrame())\n return\n\n geographies = setting('geographies')\n household_id_col = setting('household_id_col')\n\n low_geography = geographies[-1]\n\n # only one we really need is low_geography\n seed_geography = setting('seed_geography')\n geography_cols = geographies[geographies.index(seed_geography):]\n\n weights = get_weight_table(low_geography, sparse=True)\n weights = weights[geography_cols + [household_id_col, 'integer_weight']]\n\n # - expand weights table by integer_weight, so there is one row per desired hh\n weight_cols = weights.columns.values\n weights_np = np.repeat(weights.values, weights.integer_weight.values, axis=0)\n expanded_weights = pd.DataFrame(data=weights_np, columns=weight_cols)\n\n if setting('GROUP_BY_INCIDENCE_SIGNATURE'):\n\n # get these in a repeatable order so np.random.choice behaves the same regardless of weight table order\n # i.e. which could vary depending on whether we ran single or multi process due to apportioned/coalesce\n expanded_weights = expanded_weights.sort_values(geography_cols + [household_id_col])\n\n # the household_id_col is really the group_id\n expanded_weights.rename(columns={household_id_col: 'group_id'}, inplace=True)\n\n # the original incidence table with one row per hh, with index hh_id\n household_groups = pipeline.get_table('household_groups')\n household_groups = household_groups[[household_id_col, 'group_id', 'sample_weight']]\n\n # for each group, lists of hh_ids and their sample_weights (as relative probabiliities)\n # [ [ [<group_0_hh_id_list>], [<group_0_hh_prob_list>] ],\n # [ [<group_1_hh_id_list>], [<group_1_hh_prob_list>] ], ... ]\n HH_IDS = 0\n HH_PROBS = 1\n grouper = household_groups.groupby('group_id')\n group_hh_probs = [0] * len(grouper)\n for group_id, df in grouper:\n hh_ids = list(df[household_id_col])\n probs = list(df.sample_weight / df.sample_weight.sum())\n group_hh_probs[group_id] = [hh_ids, probs]\n\n # get a repeatable random number sequence generator for consistent choice results\n prng = pipeline.get_rn_generator().get_external_rng('expand_households')\n\n # now make a hh_id choice for each group_id in expanded_weights\n def chooser(group_id):\n hh_ids = group_hh_probs[group_id][HH_IDS]\n hh_probs = group_hh_probs[group_id][HH_PROBS]\n return prng.choice(hh_ids, p=hh_probs)\n expanded_weights[household_id_col] = \\\n expanded_weights.group_id.apply(chooser, convert_dtype=True,)\n\n # FIXME - omit in production?\n del expanded_weights['group_id']\n del expanded_weights['integer_weight']\n\n append = inject.get_step_arg('append', False)\n replace = inject.get_step_arg('replace', False)\n assert not (append and replace), \"can't specify both append and replace for expand_households\"\n\n if append or replace:\n t = inject.get_table('expanded_household_ids').to_frame()\n prev_hhs = len(t.index)\n added_hhs = len(expanded_weights.index)\n\n if replace:\n # FIXME - should really get from crosswalk table?\n low_ids_to_replace = expanded_weights[low_geography].unique()\n t = t[~t[low_geography].isin(low_ids_to_replace)]\n\n expanded_weights = pd.concat([t, expanded_weights], ignore_index=True)\n\n dropped_hhs = prev_hhs - len(t.index)\n final_hhs = len(expanded_weights.index)\n op = 'append' if append else 'replace'\n logger.info(\"expand_households op: %s prev hh count %s dropped %s added %s final %s\" %\n (op, prev_hhs, dropped_hhs, added_hhs, final_hhs))\n\n # sort this so results will be consistent whether single or multiprocessing, GROUP_BY_INCIDENCE_SIGNATURE, etc...\n expanded_weights = expanded_weights.sort_values(geography_cols + [household_id_col])\n\n repop = inject.get_step_arg('repop', default=False)\n inject.add_table('expanded_household_ids', expanded_weights, replace=repop)",
"def test_1_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_xml, 'Tags'))\n self.assertEqual(self.m_pyhouse_obj.House.Rooms, None)",
"def setup_struct(self):\n\n # Set the problem name if the user doesn't\n if 'prob_name' not in self.prob_dict.keys():\n self.prob_dict['prob_name'] = 'struct'\n\n # Create the base root-level group\n root = Group()\n\n # Create the problem and assign the root group\n self.prob = Problem()\n self.prob.root = root\n\n # Loop over each surface in the surfaces list\n for surface in self.surfaces:\n\n # Get the surface name and create a group to contain components\n # only for this surface.\n # This group's name is whatever the surface's name is.\n # The default is 'wing'.\n name = surface['name']\n tmp_group = Group()\n\n # Strip the surface names from the desvars list and save this\n # modified list as self.desvars\n desvar_names = []\n for desvar in self.desvars.keys():\n\n # Check to make sure that the surface's name is in the design\n # variable and only add the desvar to the list if it corresponds\n # to this surface.\n if name[:-1] in desvar:\n desvar_names.append(''.join(desvar.split('.')[1:]))\n\n # Add independent variables that do not belong to a specific component.\n # Note that these are the only ones necessary for structual-only\n # analysis and optimization.\n # Here we check and only add the variables that are desvars or a\n # special var, radius, which is necessary to compute weight.\n indep_vars = [('loads', surface['loads'])]\n for var in surface['geo_vars']:\n if var in desvar_names or 'thickness' in var or var in surface['initial_geo']:\n indep_vars.append((var, surface[var]))\n\n # Add structural components to the surface-specific group\n tmp_group.add('indep_vars',\n IndepVarComp(indep_vars),\n promotes=['*'])\n tmp_group.add('mesh',\n GeometryMesh(surface, self.desvars),\n promotes=['*'])\n tmp_group.add('tube',\n MaterialsTube(surface),\n promotes=['*'])\n tmp_group.add('struct_setup',\n SpatialBeamSetup(surface),\n promotes=['*'])\n tmp_group.add('struct_states',\n SpatialBeamStates(surface),\n promotes=['*'])\n tmp_group.add('struct_funcs',\n SpatialBeamFunctionals(surface),\n promotes=['*'])\n\n # Add bspline components for active bspline geometric variables.\n # We only add the component if the corresponding variable is a desvar\n # or special (radius).\n for var in surface['bsp_vars']:\n if var in desvar_names or var in surface['initial_geo'] or 'thickness' in var:\n n_pts = surface['num_y']\n if var in ['thickness_cp', 'radius_cp']:\n n_pts -= 1\n trunc_var = var.split('_')[0]\n tmp_group.add(trunc_var + '_bsp',\n Bspline(var, trunc_var, surface['num_'+var], n_pts),\n promotes=['*'])\n\n # Add tmp_group to the problem with the name of the surface.\n # The default is 'wing'.\n root.add(name[:-1], tmp_group, promotes=[])\n\n root.add_metadata(surface['name'] + 'yield_stress', surface['yield'])\n root.add_metadata(surface['name'] + 'fem_origin', surface['fem_origin'])\n\n # Actually set up the problem\n self.setup_prob()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Dispatches all member status for this subject, e.g SubjectAbsentee, SubjectUndecided, ....
|
def dispatch_member_status_instances(self, app_label, registered_subject, user_container, **kwargs):
member_status_models = self.get_member_status_models(app_label)
for member_status_cls in member_status_models:
member_status = member_status_cls.objects.filter(registered_subject=registered_subject)
if member_status:
self.dispatch_user_items_as_json(member_status, user_container)
|
[
"def transition(status_to, ticket_data):\n if status_to == \"In Progress\":\n email_address, result = get_group_details(ticket_data)\n action_group_membership_change(email_address, result[0], ticket_data)",
"def UserStatus(self, Status):",
"async def _check_registered_sub_status(self):\n await self.bot.wait_until_ready()\n # list of tuples of (mc_uuid, mc_name, resubbed/unsubbed)\n status_changed_list = [] # type: List[Tuple[str, str, bool, Member]]\n # TODO: hopefully won't exceed 100 guilds\n async for guild in self.bot.fetch_guilds(): # type: Guild\n # have to get complete guild as fetch_guild just gives basic info\n guild = self.bot.get_guild(guild.id)\n if guild is None:\n print('Unable to retrieve guild')\n return # TODO: log\n ban_list = await guild.bans()\n banned_user_ids = set(str(be[1].id) for be in ban_list)\n for disc_id, wl_entry in self._working_discord_mc_mapping.items():\n mc_uuid_str = wl_entry['uuid']\n mc_uuid = uuid.UUID(mc_uuid_str)\n if disc_id in banned_user_ids:\n status_changed_list.append((mc_uuid, wl_entry['name'], False, member))\n continue\n member = guild.get_member(int(disc_id)) # type: Member\n if member is None:\n print(f'User {disc_id} could not be retrieved')\n continue # TODO: log\n # if the uuid is not in the whitelist\n if mc_uuid not in self._whitelisted_uuids:\n # check if the user has resubbed\n if any(r.name in self._allowed_roles for r in member.roles):\n status_changed_list.append((mc_uuid, wl_entry['name'], True, member))\n continue\n # if user has none of the allowed roles, they have lost sub\n if all(r.name not in self._allowed_roles for r in member.roles):\n status_changed_list.append((mc_uuid, wl_entry['name'], False, member))\n for mc_user_uuid, mc_username, resubbed, member in status_changed_list:\n if resubbed: # add resubbed users back to whitelist\n await self._add_user_to_whitelist(mc_user_uuid, mc_username)\n await member.add_roles(Object(self._managed_role_id), reason='Resub')\n else:\n removal_reason = 'Banned' if str(member.id) in banned_user_ids else 'Unsub'\n await self._remove_user_from_whitelist(mc_user_uuid)\n await member.remove_roles(Object(self._managed_role_id), reason=removal_reason)\n # TODO: send message in channel to unsubbed user?",
"def test_someSuccesses(self):\n ds = self.makeDeliveries(u'test1@example.com', u'test2@example.com')\n ds[0].mailSent(None, self.scheduler)\n self.assertEqual(set(self.message.iterStatuses()),\n set([exmess.OUTBOX_STATUS, exmess.UNREAD_STATUS,\n exmess.SENT_STATUS]))",
"def status(self, status, persister=None):\n assert(status in Group.GROUP_STATUS)\n # Check the maximum number of threads.\n _utils.check_number_threads(1)\n persister.exec_stmt(Group.UPDATE_STATUS,\n {\"params\":(status, self.__group_id)})\n self.__status = status",
"def public_mmsstatuscb():\n message_sid = request.values.get('MessageSid')\n message_status = request.values.get('MessageStatus')\n logging.info('Message status: %s / %s', message_sid, message_status)\n logging.debug('Message status DUMP: %s', request.values)\n if (message_status == 'failed' or message_status == 'undelivered'):\n logging.error('Message with SID %s has unacceptable status: %s', message_sid, message_status)\n return ('', 204)",
"def _sensible_status_changes(self, current_status, rescinded):\n\n status_changes = defaultdict(lambda: [])\n\n status_changes[('Draft', False)] = [\n self.StatusChange(self.UserRelationship.INITIATOR,\n 'Submitted', False),\n self.StatusChange(self.UserRelationship.INITIATOR,\n 'Cancelled', False)\n ]\n status_changes[('Submitted', False)] = [\n self.StatusChange(self.UserRelationship.INITIATOR,\n 'Submitted', True), # rescind\n self.StatusChange(self.UserRelationship.RESPONDENT,\n 'Accepted', False),\n self.StatusChange(self.UserRelationship.RESPONDENT,\n 'Refused', False)\n ]\n status_changes[('Accepted', False)] = [\n self.StatusChange(self.UserRelationship.INITIATOR,\n 'Accepted', True), # rescind\n self.StatusChange(self.UserRelationship.RESPONDENT,\n 'Accepted', True), # rescind\n self.StatusChange(self.UserRelationship.GOVERNMENT_ANALYST,\n 'Recommended', False),\n self.StatusChange(self.UserRelationship.GOVERNMENT_ANALYST,\n 'Not Recommended', False)\n ]\n status_changes[('Recommended', False)] = [\n self.StatusChange(self.UserRelationship.INITIATOR,\n 'Recommended', True), # rescind\n self.StatusChange(self.UserRelationship.RESPONDENT,\n 'Recommended', True), # rescind\n self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,\n 'Approved', False),\n self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,\n 'Declined', False)\n ]\n status_changes[('Not Recommended', False)] = [\n self.StatusChange(self.UserRelationship.INITIATOR,\n 'Not Recommended', True), # rescind\n self.StatusChange(self.UserRelationship.RESPONDENT,\n 'Not Recommended', True), # rescind\n self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,\n 'Approved', False),\n self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,\n 'Declined', False)\n ]\n\n return status_changes[(current_status, rescinded)]",
"async def membership(ctx, arg):\n\n target = msg_to_member(ctx.message)\n reply = membership_duration(target)\n await ctx.send(reply)",
"async def members(ctx):\n if not hasattr(members, 'last_members_request'):\n members.last_members_request = datetime(2000, 1, 1)\n \"\"\"/slap people if !members has already been polled recently\"\"\"\n reply = \"\"\n if (datetime.utcnow() - members.last_members_request) < timedelta(hours=6):\n reply = reply + \"That's already been asked\\n\"\n reply = reply + f\"slaps {ctx.author.mention} around a bit with a large trout\\n\"\n \"\"\"Let them see the count anyways\"\"\"\n \"\"\"Show the total number of active DMS members.\"\"\"\n async with aiohttp.ClientSession() as session:\n async with session.get('https://accounts.dallasmakerspace.org/member_count.php') as resp:\n if resp.status != 200:\n reply = reply + f\"Error {resp.status}: I cannot access that info right now.\"\n await ctx.send(reply)\n return\n total = (await resp.json(content_type='text/html'))['total']\n reply = reply + f\"There are currently {total} members.\"\n await ctx.send(reply)\n members.last_members_request = datetime.utcnow()\n return",
"def part_check_member_status(self, username, status):\n member = get_member(username)\n self.assertEqual(member.payment_account.billing_status, status)\n \n pac_type = 'free' if status == 'failed' else member.payment_account.type\n self.assertEqual(member.account_type, pac_type)",
"def update_status(self):\n logging.info(\"Updating status\")\n active_messages = ['Unit is ready']\n for check in self.custom_status_checks:\n _result = check()\n if isinstance(_result, ActiveStatus):\n if _result.message:\n active_messages.append(_result.message)\n else:\n self.unit.status = _result\n return\n\n if self._stored.series_upgrade:\n self.unit.status = BlockedStatus(\n 'Ready for do-release-upgrade and reboot. '\n 'Set complete when finished.')\n return\n\n if self._stored.is_paused:\n self.unit.status = MaintenanceStatus(\n \"Paused. Use 'resume' action to resume normal service.\")\n return\n\n missing_relations = []\n for relation in self.REQUIRED_RELATIONS:\n if not self.model.get_relation(relation):\n missing_relations.append(relation)\n if missing_relations:\n self.unit.status = BlockedStatus(\n 'Missing relations: {}'.format(', '.join(missing_relations)))\n return\n\n _, services_not_running_msg = os_utils.ows_check_services_running(\n self.services(), ports=[])\n if services_not_running_msg is not None:\n self.unit.status = BlockedStatus(services_not_running_msg)\n return\n\n if self._stored.is_started:\n _unique = []\n # Reverse sort the list so that a shorter message that has the same\n # start as a longer message comes first and can then be omitted.\n # eg 'Unit is ready' comes after 'Unit is ready and clustered'\n # and 'Unit is ready' is dropped.\n for msg in sorted(list(set(active_messages)), reverse=True):\n dupes = [m for m in _unique if m.startswith(msg)]\n if not dupes:\n _unique.append(msg)\n self.unit.status = ActiveStatus(', '.join(_unique))\n else:\n self.unit.status = WaitingStatus('Charm configuration in progress')\n\n logging.info(\"Status updated\")",
"def application_form_incomplete():\n logger.info(\"corn job started time membership incomplete mail is = %s\", datetime.datetime.now())\n\n try:\n for membership in MemberShip.objects.filter(status__iexact=PAPER_STATUS['DRAFT']): \n if membership.user.is_active:\n logger.info(membership.user.email)\n form_link = reverse(\"koscientific:application_form\")\n full_link = \"{}{}\".format(settings.DOMAIN_NAME, form_link)\n\n mail_dict = {\n 'subject' : 'Complete your membership details',\n 'plain_message' : \"Hi. your membership form in draft state please click here {} to complete\".format(full_link) ,\n 'recipient_list' : '{}'.format(membership.user.email),\n }\n mail_count = membership.email_membership_aduits.filter(mem_status=EmailMembershipAduit.MEMBERSHIP_INCOMPLETE).count()\n logger.info('mail count', mail_count)\n \n if mail_count > 0:\n first_mail = membership.email_membership_aduits.filter(mem_status=EmailMembershipAduit.MEMBERSHIP_INCOMPLETE).order_by('created_at').first()\n td = timezone.now() - first_mail.created_at\n days, hours, minutes = td.days, td.seconds // 3600, td.seconds % 3600 / 60.0\n logger.info(days, hours, minutes)\n\n if mail_count == 1 and days >= 3:\n # that 3th day\n logger.info('3rd day day mail')\n tigger_membership_incomplete_mail('3rd day mail sending', membership, mail_dict)\n elif mail_count == 2 and days >= 7:\n # that 7th day\n logger.info('7th day day mail')\n tigger_membership_incomplete_mail('7th day mail sending', membership, mail_dict)\n else:\n # that 0th day\n logger.info('first day mail')\n tigger_membership_incomplete_mail('1st day mail sending', membership, mail_dict)\n\n except Exception as e:\n logger.info(\"corn job unable to send membership incomplete mail = %s\", e)\n \n logger.info(\"corn job stoped time is = %s\", datetime.datetime.now())\n logger.info(\"==========================================================\")",
"def test_set_membership_status(mocker) -> None:\n with get_client().context():\n mocker.patch('database.memberships.Memberships.put', return_value=ndb.Key(Memberships, create_id()))\n mocker.patch('database.memberships.Memberships.query', return_value=MembershipsQueryMock())\n\n with test_app().app_context():\n memberships_view: MembershipsView = MembershipsView()\n\n uid: str = membership_mock_data['uid']\n organization_id: str = config_instance.ORGANIZATION_ID\n status: str = membership_mock_data['payment_status']\n response, status = memberships_view.set_membership_payment_status(organization_id=organization_id,\n uid=uid, status=status)\n response_data: dict = response.get_json()\n assert status == status_codes.successfully_updated_code, response_data['message']\n assert response_data.get('payload') is not None, response_data['message']\n\n mocker.stopall()",
"def status():\n createDisplayMeetingInfo()\n createDisplayIntersectedTimes()\n createDisplayResponders()\n return render_template('status.html')",
"def status(self, mailbox, *names):\n cmd = b'STATUS'\n\n preparedMailbox = _prepareMailboxName(mailbox)\n try:\n names = b' '.join(self._statusNames[name] for name in names)\n except KeyError:\n raise ValueError(\"Unknown names: {!r}\".format(\n set(names) - set(self._statusNames)\n ))\n\n args = b''.join([preparedMailbox,\n b\" (\", names, b\")\"])\n resp = (b'STATUS',)\n d = self.sendCommand(Command(cmd, args, wantResponse=resp))\n d.addCallback(self.__cbStatus)\n return d",
"def test_get_team_membership_status(self):\n pass",
"def update(self, adults, contact): \r\n #Blank list to store the infection status of the people in the list.\r\n r = [] \r\n #Counter to determine which position in the list of whether each person is in contact with an \r\n #infected person to use.\r\n i = 0\r\n try:\r\n \r\n #Looping through all the people.\r\n for adult in adults:\r\n #If they have been in contact with an infected person, change their contact status.\r\n a = self.spread(adults, contact)[i]\r\n #Move onto the next number in the list.\r\n i += 1\r\n if a == \"yes\":\r\n contact = \"yes\"\r\n else:\r\n contact = \"no\"\r\n #Define what the status is.\r\n q = adult.updatestatus(contact) \r\n #Moving the people.\r\n adult.movex()\r\n adult.movey()\r\n #Populate the list of infection status.\r\n r.append(q) \r\n except:\r\n print(\"something went wrong\")\r\n #Return the list of which people are infected.\r\n return r",
"def queue_status(targets='all'):",
"def test_someBouncesAndSuccesses(self):\n ds = self.makeDeliveries(u'test1@example.com', u'test2@example.com',\n u'test3@example.com')\n ds[0].mailSent(None, self.scheduler)\n ds[1].failureSending(self.makeBounceError(), self.scheduler)\n self.assertEqual(set(self.message.iterStatuses()),\n set([exmess.SENT_STATUS, exmess.UNREAD_STATUS,\n exmess.OUTBOX_STATUS]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get labelled preds by thresholding the raw probability values and joining them up
|
def threshold_predictions(preds, classes, prediction_threshold=0.5):
labelled_preds = [' '.join([classes[i] for i, p in enumerate(pred) if p > prediction_threshold])
for pred in preds]
return labelled_preds
|
[
"def __predict_label(self, label_probs):\n def driver(prob):\n candidate = np.argmax(prob)\n if candidate == 0 and prob[0] > self.model_paras['threshold_positive']:\n return 0\n elif candidate == 2 and prob[2] > self.model_paras['threshold_negative']:\n return 2\n else:\n return 1\n\n labels = list(map(driver, label_probs))\n return labels",
"def predictions_linear(input_data,weights,threshold):\n prediction = input_data@weights\n labels_predicted = [1 if x > threshold else -1 for x in prediction]\n return labels_predicted",
"def get_onehot_label_threshold(scores, threshold=0.5):\n scores = np.array(scores)\n predicted_onehot_labels = np.zeros(scores.shape)\n predicted_onehot_labels[np.array(scores) >= threshold] = 1\n scores_max = np.argmax(scores, axis=-1)\n predicted_onehot_labels[np.array(list(range(len(scores)))), scores_max] = 1\n return predicted_onehot_labels",
"def predict(self, testing_data):\n\n\tlabels = list()\n\ttesting_data = np.asarray(testing_data)\n\n\tif len(testing_data.shape) == 1 or testing_data.shape[1] == 1:\n\t testing_data = testing_data.reshape(1,len(testing_data))\n\n\tfor i,vec in enumerate(testing_data):\n\t # initialize gaussian log probabilities for each tag\n\t gaussian_lp = {tag:0.0 for tag in self.unique_labels}\n\t for j,val in enumerate(vec):\n\t\tfor tag in self.unique_labels:\n\t\t # compute conditional probability\n\t\t gs_prob = self.gaussian(val, tag, j)\n\t\t if gs_prob:\t # filter zero probabilities\n\t\t\tgaussian_lp[tag] += np.log(gs_prob) \n\t # multiply priors\n\t for tag in self.unique_labels:\n\t\tgaussian_lp[tag] += self.prior_prob[tag]\n\t labels.append(max(gaussian_lp.items(), key=lambda x:x[1])[0])\n\n\treturn labels",
"def get_label_using_scores_by_threshold(scores, threshold=0.5):\n predicted_labels = []\n predicted_values = []\n scores = np.ndarray.tolist(scores)\n for score in scores:\n count = 0\n index_list = []\n value_list = []\n for index, predict_value in enumerate(score):\n if predict_value > threshold:\n index_list.append(index)\n value_list.append(predict_value)\n count += 1\n if count == 0:\n index_list.append(score.index(max(score)))\n value_list.append(max(score))\n predicted_labels.append(index_list)\n predicted_values.append(value_list)\n return predicted_labels, predicted_values",
"def binarize_preds(predictions: torch.Tensor, threshold=0.5) -> torch.Tensor:\n return predictions.__ge__(threshold).int()",
"def nb_predict(X, class_prob, class_word_prob):\r\n Ypred = []\r\n ###################################################\r\n # Q8.1 Edit here\r\n ###################################################\r\n for i in range(len(X)):\r\n listinx = X[i]\r\n tempp = []\r\n for indexclass in range(len(class_prob)):\r\n p = calculatelog(class_prob[indexclass])\r\n for j in range(len(listinx)):\r\n if class_word_prob[indexclass, listinx[j][0]] == -1:\r\n continue\r\n p += listinx[j][1] * calculatelog(class_word_prob[indexclass, listinx[j][0]])\r\n\r\n tempp.append(p)\r\n\r\n if(tempp[0] > tempp[1]):\r\n Ypred.append(0)\r\n else:\r\n Ypred.append(1)\r\n\r\n return Ypred",
"def compute_labels(inputs, labels, threshold=0.71):\n global model\n\n outputs = model(**inputs, labels=labels)\n logits = outputs[:2][1]\n\n return map_logit(logits.detach().numpy()[0], threshold=threshold)",
"def probability_labels(self, labels):\n\t\tlabels = labels[0]\n\t\tprobability = 1\n\t\tfor (i,j) in self.spans:\n\t\t\tif (i,j) in labels.keys():\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprobability = probability * 0.5\n\t\tself.probability = probability\n\t\treturn",
"def label_data(self, X_art, y_probs):\n ans = []\n for y_prob in y_probs:\n inv_probs = np.zeros(len(y_prob), dtype=float)\n for i, prob in enumerate(y_prob):\n if prob == 0:\n inv_probs[i] = (2 ** 10)/len(y_prob)\n else:\n inv_probs[i] = 1.0/prob\n inv_probs = inv_probs/np.sum(inv_probs)\n \n # Calculate cumulative probabilities\n stats = [None]*len(inv_probs)\n stats[0] = inv_probs[0]\n for i in range(1, len(inv_probs)):\n stats[i] = stats[i-1] + inv_probs[i]\n ans.append(self._select_index_probabilistically(stats))\n\n return np.array(ans)",
"def threshold_labels(labels, thresh=None, act_class=1):\n#def threshold_labels(labels, thresh, act_class=1):\n finalLabels = np.zeros(labels.shape[1:], dtype=np.int32)\n for j in xrange(finalLabels.shape[0]):\n if thresh is None:\n finalLabels[j,:] = np.argmax(labels[:,j,:], axis=0)\n else:\n actVox = np.where(labels[act_class,j,:]>=thresh)\n finalLabels[j,actVox] = act_class\n return finalLabels",
"def predict(self, x_set):\n def classify(x):\n # Pick top-voted label among the k nearest neighbors.\n label_votes = self.knn_label_votes(x)\n return max(label_votes, key=label_votes.get)\n\n return np.array(list(map(classify, x_set)))",
"def predict_label_probability(texts, labels, text_new):\r\n\r\n train_twitter = texts\r\n test_twitter = text_new\r\n\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n from sklearn.feature_extraction.text import TfidfTransformer\r\n from sklearn.linear_model import LogisticRegression\r\n\r\n count_vect = CountVectorizer()\r\n twitter_train_counts = count_vect.fit_transform(train_twitter)\r\n\r\n tf_transformer = TfidfTransformer(use_idf=False).fit(twitter_train_counts)\r\n twitter_train_tf = tf_transformer.transform(twitter_train_counts)\r\n\r\n\r\n tfidf_transformer = TfidfTransformer()\r\n twitter_train_tfidf = tfidf_transformer.fit_transform(twitter_train_counts)\r\n\r\n twitter_clf = LogisticRegression().fit(twitter_train_tfidf,labels)\r\n\r\n twitter_test_data = count_vect.transform(test_twitter)\r\n twitter_tfidf = tfidf_transformer.transform(twitter_test_data)\r\n\r\n\r\n twitter_predicted = twitter_clf.predict(twitter_tfidf)\r\n\r\n for text, class_label in zip(test_twitter, twitter_predicted):\r\n print('%r => %s' % (text, class_label))\r\n\r\n\r\n class_prob = list(twitter_clf.predict_proba(twitter_tfidf)[:,1])\r\n\r\n return class_prob\r\n pass",
"def __predict_with_threshold(self, clf, features):\n logging.debug('Entering __predict_with_threshold()')\n ti = time()\n predictions = Ensemble.__custom_predict(clf.predict_proba(features)[:, MINORITY_POS], \\\n clf.predict(features), self.threshold)\n logging.debug('prediction time: {:0.4f}'.format(time()-ti))\n logging.debug('Exiting __predict_with_threshold()')\n return predictions",
"def __predict_vote(self, features):\n logging.debug('Entering __predict_vote()')\n preds = self.__predict_with_threshold(self.voteclf, features)\n logging.debug('Exiting __predict_vote()')\n return preds",
"def predict_naive_bayes(D, p_y, p_v_y):\n import numpy as np\n pred =[] # list of integer labels\n p_y_d =[] # list of floats\n for doc in D: #[[a,d,f],[a,c]...]\n temp ={}\n p_d =0\n for i in p_y:\n temp[i]=0\n for word in doc:\n if word in p_v_y[i]:\n temp[i] += np.log(p_v_y[i][word])\n else:\n temp[i] += np.log(p_v_y[i]['<unk>']) \n temp[i]+=np.log(p_y[i])\n p_d+= np.exp(temp[i])\n max_value = max(temp.values())\n for inx in temp:\n if temp[inx] == max_value:\n max_inx = inx \n p_y_d.append(np.exp(max_value)/p_d)\n pred.append(int(max_inx)) # 0 or 1\n\n \n return pred, p_y_d",
"def prioritized_texts_with_label():\n print(\"getting prioritized texts\")\n n_texts = int(request.args.get('n'))\n texts = get_texts_only(n_all_samples)\n prios_texts = classifier.prioritize(map(lambda t: t['statement'], texts))\n # print(prios_texts)\n texts_prioritized = np.array(texts)[np.array(prios_texts)].tolist()\n to_label = texts_prioritized[:int(n_texts/2)]\n for sample in to_label:\n sample['label'] = \"\"\n\n texts_with_labels = random.sample([sample for sample in\n get_texts_with_labels(100, \"majority\")\n if sample['label'] in ['left', 'right']], int(n_texts/2))\n\n # print(texts_priotized)\n result = to_label + texts_with_labels\n random.shuffle(result)\n\n return jsonify({'data': result})",
"def from_prob_to_class(y_pred):\n\n N = y_pred.shape[0]\n y_pred_new = np.zeros(N)\n for i in range(N):\n if y_pred[i] >= 0.5:\n y_pred_new[i] = 1\n else:\n y_pred_new[i] = 0\n return y_pred_new",
"def evaluate(labels, predictions):\n i=0\n j=0\n total_true = 0\n total_wrong = 0\n for label,prediction in zip(labels,predictions):\n if label==1:\n total_true = total_true + 1\n if prediction == 1:\n i = i + 1\n else:\n total_wrong = total_wrong + 1\n if prediction == 0:\n j = j + 1\n sensitivity = float(i/total_true)\n specificity = float(j/total_wrong)\n return(sensitivity, specificity)\n\n\n\n\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Click button to close the dialog
|
def click_button_close(self):
# AutoGen method click_link: None
self.click_element(self.BUTTON_CLOSE)
|
[
"def close(self):\n\n Dialog.close(self)\n gui.no_modal_dialog=True",
"def on_action_close(self, content):\n self.widget().close()",
"def close_alert(self):\n self.nottreal.view.wizard_window.close_alert()",
"def close_window(self):\r\n Window.close()",
"def closePopup(self):\n try:\n popUp_button = self.driver.find_elements_by_xpath('//button[contains(@id, \"dialog-close\") and contains(@class, \"Button-No-Standard-Style close \")]')\n popUp_button[5].click()\n time.sleep(15)\n except:\n pass",
"def test_close_pushButton_closes_ui(self):\n dialog = project_manager.MainDialog()\n dialog.show()\n \n self.assertEqual(dialog.isVisible(), True)\n \n # now run the UI\n QTest.mouseClick(dialog.close_pushButton, Qt.LeftButton)\n self.assertEqual(dialog.isVisible(), False)",
"def closeWindowCallback(self, event):\n\t\tself.EndModal(self.status)",
"def closing_widget(self):\n pass",
"def CloseButton(*args, **kwargs):\n return _aui.AuiPaneInfo_CloseButton(*args, **kwargs)",
"def close_window(_):\n root.destroy()",
"def close_window(window):\r\n window.destroy()",
"def close_create_from_ado_query_window(self):\n self.visible_element_click(locators.SuiteManagerPageLocators.CLOSE_ICON,2)",
"def closeCityOptionsButton(self):\n\n gui.widgets[self._btn_city_opt_id].close()",
"def close_report(self):\n driver = self.driver\n # Buttons lack convenient labels. Finding by tag name\n button_div = driver.find_element_by_id(\"buttons2\")\n buttons = button_div.find_elements_by_tag_name(\"a\")\n # Click the \"Close Report\" button (assuming its the last one)\n buttons[-1].click()\n # Return Window focus\n driver.switch_to_window(driver.window_handles[-1])",
"def click_close(self) -> None:\r\n self.analyse_instances.clear()\r\n self.w.reinit_start_ui()",
"def close_modal(self) -> None:\n self._actions.close_modal += 1",
"def closeEvent(self, event):\n\n # Remove the viewer widget from the main GUI and exit.\n self.parent_gui.display_widget(None, display=False)\n self.close()",
"def close_modal(self):\n # In chrome, close button is not inside window\n # which causes click failures. To avoid this, just change\n # the position of the popup\n self.bring_model_inside_window()\n self.q(css=\".badges-modal .close\").click()\n EmptyPromise(lambda: not self.modal_displayed(), \"Share modal dismissed\").fulfill()",
"def OnCloseWindow(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify comment with given text
|
def verify_comment_text(self, text=None):
self.element_text_should_be(self.COMMENT, text)
|
[
"def test_comment_check_spam(self):\n check_kwargs = {\n # Akismet guarantees this will be classified spam.\n \"comment_author\": \"viagra-test-123\",\n **self.base_kwargs,\n }\n self.assertTrue(self.api.comment_check(**check_kwargs))",
"def testComment(self):\n cyto = self.session.create_cytokine()\n\n self.util.stringTypeTest(self, cyto, \"comment\")\n\n self.util.stringPropertyTest(self, cyto, \"comment\")",
"def test_comment_check(self):\n self._mock_request(\n \"comment_check\",\n akismet.Akismet.COMMENT_CHECK_URL,\n \"true\",\n {\"comment_author\": \"viagra-test-123\"},\n )",
"def ShouldPassCommentCheck(self, line):\n self.assertEqual(\"\", self.checker.CommentIfAndIncludeCheck(1, line),\n \"Should not be flagged as style error: \" + line)",
"def test_comment(self):\n items = pulldom.parseString(SMALL_SAMPLE)\n for evt, _ in items:\n if evt == pulldom.COMMENT:\n break\n else:\n self.fail(\"No comment was encountered\")",
"def verify(self, plain_text):",
"def is_valid_comment(self, request: Request) -> bool:\n f = request.form\n if f['author'] and ['text']:\n return True\n return False",
"def _get_comment_text():\n comment_samples = [\n \"Malesu mauris nas lum rfusce vehicula bibend. Morbi.\",\n \"Nuncsed quamal felis donec rutrum class ipsumnam teger. Sedin metusd metusdo quamnunc utcras facilis nequen.\",\n \"Adipisci ent neque eger vehicula dis. Miquis auctorpr quamphas purusp phasel duifusce parturi. Ris liberoa ligula lacini risus nean. Arcualiq cubilia aenean nuncnunc ulum fringi uisque abitur rerit setiam. Nean miproin aliquet risusvi tempusp aliquete. Integer nequenu bulum ibulum laoree accumsan ellus mus odio uis. Amet curae ivamus congue aliquama liberofu que.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In justov volutpat mus habitas dapibusc nequenu volutp justo. Quam blandi tur maurisd egesta erossed morbi turpis risus tate. Lacusp facilis class vehicula varius iaculis setiam montes pharetra. Usce ecenas quispr naeos nec nibhphas lacinia roin. Abitur maurisma metusqui justop uscras llam enas. Magnaqu faucibus sduis arcualiq imperd teger egetlor teger.\",\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Conseq tristiq enas duis sociosqu eduis enimsed tudin vel. Lus semnunc risusm nulla parturi atein at placerat. Tiam laut nibhnul turpisn vitaenul eleifen commodo euismo quat posuered. Egestas nullain justop maurisin purusp donec nas liberofu aptent. Nec aliquam tiam puruscra turpisp luctus proin. Lectusin turpisn usce orcivest nullam eget arcuduis tdonec min. Esent cursus vulput aenean bulum lacini congued pretiu. Portamor bulum tate isse llam cidunt estmae.\\n\\nSque leocras fusce nullap fusce convall laoreet nibhnull estsusp. Roin aliquet esent ctetur blandit etiam nequesed viverr. Nislqu sse orciduis lacusp in tasse gravida lla ullam. Itnunc id mauris rerit entum disse lacinia. Oin luctus velit musetiam onec potenti ipsump volutp. Tortor musetiam bibendum onec esent libero esque sim. Enas ras eclass placerat sedin risusut vulput enimdon montes. Rhoncus dolorma estsusp facilis etsed llaut esque cursus. Nisl ullamcor tincid llus nulla iaculis.\",\n ]\n return random.choice(comment_samples)",
"def contains_codechecker_comment(fp):\n pos_before_read = fp.tell()\n if pos_before_read != 0:\n fp.seek(0)\n source_text = fp.read()\n match = \"codechecker_\" in source_text\n fp.seek(pos_before_read)\n if not match:\n return False\n return True",
"def _is_comment(self,line: str) -> bool:\n if line[0].isdigit():\n return False\n else:\n return True",
"def test_comment_check_not_spam(self):\n check_kwargs = {\n # Akismet guarantees this will not be classified spam.\n \"user_role\": \"administrator\",\n **self.base_kwargs,\n }\n self.assertFalse(self.api.comment_check(**check_kwargs))",
"def _is_comment(line):\n code_counter = 0\n code_word = keyword.kwlist\n for word in line:\n if word == code_word:\n code_counter += 1\n return code_counter < num_max_of_python_word_for_comment",
"def _is_comment_line(self, line):\r\n return line[0] in self.comment_chars",
"def ShouldFailCommentCheck(self, line):\n error = self.checker.CommentIfAndIncludeCheck(1, line)\n self.assertNotEqual(\"\", error, \"Should be flagged as style error: \" + line)\n highlight = test_util.GetHighlight(line, error).strip()\n self.assertTrue(highlight.startswith((\"<if\", \"<include\")))",
"def test_already_formatted_block_comment(self):\n \n inp = '2_5_block_comment.txt'\n self.run_single_file_case(inp)",
"def comment_print(comment_sample, valid):\n if valid:\n print(\" Valid Comment:\\t\\t\" + comment_sample)\n else:\n print(\"Invalid Comment:\\t\\t\" + comment_sample)",
"def extract_comments(self, sid, text):\n pass",
"def is_comment(self, line):\r\n return line.startswith(self.comment_chars) or not line",
"def is_comment_sym(text):\n return text in (COMMENT_MARKER, COMMENT_SYM_DEPRECATED)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test we fail setup when no dir found.
|
async def test_setup_fails_on_no_dir(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=False
):
res = await async_setup_component(hass, "python_script", {})
assert not res
assert "Folder python_scripts not found in configuration folder" in caplog.text
|
[
"def test_check_dir_existence_sub_dir_not_found(self):\n self.assertFalse(self.existing_dirs.append('unexpected_dir'))",
"def test_default_output_dir_exists():\n\n assert os.path.exists(\"corems_output\")",
"def test_DataDirPresent(self):\n\t\tself.assertEqual(os.path.isdir('Data'), True)",
"def test_processdir_not_implemented(self, groomer):\n with pytest.raises(ImplementationRequired):\n groomer.processdir('.', '.')",
"def test_discover_conf_py_directory_not_found():\n with tempfile.TemporaryDirectory() as tempdir:\n with pytest.raises(FileNotFoundError):\n discover_conf_py_directory(tempdir)",
"def test_check_dir_existence_root_is_wrong(self):\n self.assertFalse(check_dir_existence('/some/wrong/path', self.existing_dirs))",
"def test_nonexisting_path_raises():\n with pytest.raises(NotADirectoryError, match=\"Definitions directory not found: foo\"):\n nc.Nomenclature(\"foo\")",
"def test_setup(self):\n cfgdir = mkdtemp()\n cmd = f'kepler setup --path {cfgdir}'.split()\n try:\n out = check_output(cmd)\n self.assertTrue(out.decode('utf-8').rstrip().endswith('Welcome to Kepler!'))\n self.check_config(cfgdir)\n finally:\n rmtree(cfgdir)",
"def test_exit_if_configfile_not_found_when_rootdir_explicitly_specified(tmp_path):\n os.chdir(tmp_path)\n cwd = Path.cwd()\n with pytest.raises(SystemExit):\n get_configdict(rootdir_path=cwd)",
"def test_make_sure_path_exists(self):\n new_directory = os.path.join(self.tmp_dir_name, 'new_directory')\n pycgmIO.make_sure_path_exists(new_directory)\n assert os.path.isdir(new_directory)",
"def test_setup_project():\n setup_project('testing')\n dir_list = ['/archive', '/db', '/jobpool/short', '/jobpool/priority', '/jobpool/long', '/lost+found',\n '/screeninglib/geometrylib', '/screeninglib/structurelib', '/job_templates']\n file_list = ['/testing.config', '/screeninglib/building_blocks.dat', '/screeninglib/config.dat']\n for dir in dir_list:\n assert True == os.path.isdir('testing' + dir)\n for file in file_list:\n assert True == os.path.isfile('testing' + file)",
"def testInitPresence(self):\n for fileName in self.files:\n if os.path.isdir(fileName):\n self.assertTrue(\n os.path.isfile(\n os.path.join(fileName, '__init__.py')\n )\n )",
"def testCheckDirectory(self):\n with self.assertRaises(ValueError):\n check_parent(\"\")\n with self.assertRaises(ValueError):\n check_parent(None)\n check_parent(\"sample/not_here.tif\")\n check_parent(\"output/create_me/create_me_as_well/\")\n check_parent(\"output/create_me_too/file_here.tif\")\n self.assertTrue(os.path.exists(\"output/create_me/\"))\n self.assertTrue(os.path.exists(\"output/create_me/create_me_as_well\"))\n self.assertTrue(os.path.exists(\"output/create_me_too/\"))",
"def verifyOutputDir(self, dirname):\n print \"Verifing output dir %s\" % dirname\n if (not path.exists(dirname)):\n print \"Path doesn't exist\"\n makedirs(dirname)",
"def test_create_db_dir_if_not_exist(self):\n databasemanager.DatabaseManager(driftwood())",
"def test_safe_copy_makedir_doesnt_exist(self):\n pass",
"def test_create_non_existing_path():\n\t\n\twith workspace() as ws:\n\t\tws.run(\n\t\t\t'venv --no-activate foo/venv',\n\t\t\texpect_error = True,\n\t\t\texpect_stderr_contains = 'does not exist')\n\t\t\n\t\tws.check_dir()",
"def test_discover_package_doc_dir_not_found():\n with tempfile.TemporaryDirectory() as tempdir:\n with pytest.raises(FileNotFoundError):\n discover_package_doc_dir(tempdir)",
"def test_run_no_build_dir(self):\n environment.set_value('BUILD_DIR', '')\n fuzzer = TestEngineFuzzer()\n with self.assertRaisesRegex(builtin.BuiltinFuzzerError, 'BUILD_DIR'):\n fuzzer.run('/input', '/output', 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that enumerate is accepted and executed.
|
async def test_using_enumerate(hass: HomeAssistant) -> None:
source = """
for index, value in enumerate(["earth", "mars"]):
hass.states.set('hello.{}'.format(index), value)
"""
hass.async_add_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.0", "earth")
assert hass.states.is_state("hello.1", "mars")
|
[
"def test_my_enumerate(self):\r\n self.assertTrue(list(my_enumerate([\"ejona\"])) == list(enumerate([\"ejona\"])))\r\n self.assertFalse(list(my_enumerate([\"ejona\"])) == list(enumerate([\"ejonaejona\"])))\r\n self.assertTrue(list(my_enumerate([3, 33, 11, 44, 67])) == list(enumerate([3, 33, 11, 44, 67])))",
"def test_run_intcode(initial: List, expected: List) -> None:\n program = Intcode(initial)\n program.run()\n assert program.get_program() == expected",
"def test_enumerate(self):\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli):\n with self.assertWarns(DeprecationWarning):\n self.assertEqual(i, PauliTable(labels[idx]))",
"def test_incorrect_start(start):\n with raises(TypeError):\n next(ienumerate([21], start))",
"def test_enumerate_fields(self):\n #Run command\n result_json = self.run_cmd(\"\")\n \n #validate JSON returned\n data_validator ={\n \"type\": \"object\",\n \"properties\": {\n \"enum\": { \"$ref\": \"#/pScheduler/PluginEnumeration/Archiver\" }\n },\n \"additionalProperties\": False,\n \"required\": [\"enum\"]\n }\n valid, error = json_validate({\"enum\": result_json}, data_validator)\n assert valid, error\n #verify name is as expected\n self.assertEqual(result_json['name'], self.name)",
"def test_enumerate_fields(self):\n #Run command\n result_json = self.run_cmd(\"\")\n\n #validate JSON returned\n data_validator ={\n \"type\": \"object\",\n \"properties\": {\n \"enum\": { \"$ref\": \"#/pScheduler/PluginEnumeration/Test\" }\n },\n \"additionalProperties\": False,\n \"required\": [\"enum\"]\n }\n valid, error = json_validate({\"enum\": result_json}, data_validator)\n assert valid, error\n #verify name is as expected\n self.assertEqual(result_json['name'], self.name)\n #scheduling class is as expected\n self.assertEqual(result_json['scheduling-class'], self.scheduling_class)",
"def _enumerate():\n res = hwi_enumerate()\n res += specter_enumerate()\n return res",
"def test_enum_open_first(device, be_opts, cli_args):\n\n if \"spdk\" == be_opts[\"be\"]:\n pytest.skip(\"SPDK does not support enumeration post open()/close()\")\n\n # Open/close device. This will make it disappear from the enumeration on SPDK\n dev = dev_from_params(device, be_opts)\n xnvme.xnvme_dev_close(dev)\n\n global DEVICE_COUNT\n DEVICE_COUNT = 0\n\n xnvme.xnvme_enumerate(None, None, callback_func, None)\n assert DEVICE_COUNT > 0",
"def test_reversed_enumeration(self):\n test_list = range(10)\n expected = [\n (0, 9), (-1, 8), (-2, 7), (-3, 6), (-4, 5),\n (-5, 4), (-6, 3), (-7, 2), (-8, 1), (-9, 0)\n ]\n result = [l for l in reverse_enumerate(test_list)]\n self.assertEquals(expected, result)",
"def test_scan_index(self, mock_helpers, mock_client):\n # cast to list to force evaluation of the generator\n response = list(scan_index(\"foo\", ExampleModel))\n mock_helpers.scan.assert_called_once_with(mock_client.return_value, index=\"foo\")\n assert response == list(mock_helpers.scan.return_value)",
"def test_two_actions():\n assert intcode([1,1,1,4,99,5,6,0,99]) == [30,1,1,4,2,5,6,0,99], \\\n \"Should be [30,1,1,4,2,5,6,0,99].\"",
"def test_reversed_enumeration_option_params(self):\n test_list = range(10)\n expected = [\n (9, 9), (8, 8), (7, 7), (6, 6), (5, 5),\n (4, 4), (3, 3), (2, 2), (1, 1), (0, 0)\n ]\n result = [l for l in reverse_enumerate(test_list, 9)]\n self.assertEquals(expected, result)",
"def test_create_index_variable_loop():\n create_index_variable_loop() # Should raise exception if fails",
"def test_list_runs_io(self):\n pass",
"def run_intcode(state):\n while state.ic < len(state.intcode):\n opcode = state.intcode[state.ic] % 100\n if opcode not in opcodes:\n raise Exception(f'Illegal opcode {opcode} at index {state.ic}')\n opcodes[opcode](state)\n if state.ic == 0:\n return",
"def test_iterconstantsIdentity(self):\n constants = list(self.STATUS.iterconstants())\n again = list(self.STATUS.iterconstants())\n self.assertIs(again[0], constants[0])\n self.assertIs(again[1], constants[1])",
"def test_iter(self):\n\n enum = self.test_construct()\n\n _struct = {\n 'BLUE': 0x0,\n 'RED': 0x1,\n 'GREEN': 0x2}\n\n for key, value in enum:\n assert key in _struct\n assert _struct[key] is value\n _struct[key] = True\n\n assert all(_struct.itervalues()) # make sure all values touched",
"def test_iter(self):\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(iter(pauli)):\n with self.assertWarns(DeprecationWarning):\n self.assertEqual(i, PauliTable(labels[idx]))",
"def test_iterconstantsIdentity(self):\n constants = list(self.METHOD.iterconstants())\n again = list(self.METHOD.iterconstants())\n self.assertIs(again[0], constants[0])\n self.assertIs(again[1], constants[1])\n self.assertIs(again[2], constants[2])\n self.assertIs(again[3], constants[3])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test datetime and time modules exposed.
|
async def test_exposed_modules(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
caplog.set_level(logging.ERROR)
source = """
hass.states.set('module.time', time.strftime('%Y', time.gmtime(521276400)))
hass.states.set('module.time_strptime',
time.strftime('%H:%M', time.strptime('12:34', '%H:%M')))
hass.states.set('module.datetime',
datetime.timedelta(minutes=1).total_seconds())
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("module.time", "1986")
assert hass.states.is_state("module.time_strptime", "12:34")
assert hass.states.is_state("module.datetime", "60.0")
# No errors logged = good
assert caplog.text == ""
|
[
"def test_setup_datetime(self):\n\n # Build the package\n self.run_setup('build')\n\n import stsci.testpackage\n\n assert hasattr(stsci.testpackage, '__setup_datetime__')\n prev = stsci.testpackage.__setup_datetime__\n now = datetime.now()\n # Rebuild\n # So that there's less chance for ambiguity\n time.sleep(1)\n self.run_setup('build')\n\n reload(stsci.testpackage.version)\n reload(stsci.testpackage)\n\n import stsci.testpackage\n\n assert hasattr(stsci.testpackage, '__setup_datetime__')\n assert stsci.testpackage.__setup_datetime__ > now\n assert stsci.testpackage.__setup_datetime__ > prev",
"def test_time(self):\n self.logTestName()\n self.assertEqual(self.t, self.r)",
"def test_user_tracked_times(self):\n pass",
"def test_time_series(self):\n\n assert False",
"async def test_set_datetime_time(hass: HomeAssistant) -> None:\n await async_setup_component(\n hass, DOMAIN, {DOMAIN: {\"test_time\": {\"has_time\": True, \"has_date\": False}}}\n )\n\n entity_id = \"input_datetime.test_time\"\n\n dt_obj = datetime.datetime(2017, 9, 7, 19, 46, 30)\n\n await async_set_date_and_time(hass, entity_id, dt_obj)\n\n state = hass.states.get(entity_id)\n assert state.state == dt_obj.strftime(FORMAT_TIME)\n assert state.attributes[\"has_time\"]\n assert not state.attributes[\"has_date\"]\n\n assert state.attributes[\"timestamp\"] == (19 * 3600) + (46 * 60) + 30",
"async def test_set_datetime_2(hass: HomeAssistant) -> None:\n await async_setup_component(\n hass, DOMAIN, {DOMAIN: {\"test_datetime\": {\"has_time\": True, \"has_date\": True}}}\n )\n\n entity_id = \"input_datetime.test_datetime\"\n\n dt_obj = datetime.datetime(\n 2017, 9, 7, 19, 46, 30, tzinfo=dt_util.get_time_zone(hass.config.time_zone)\n )\n\n await async_set_datetime(hass, entity_id, dt_obj)\n\n state = hass.states.get(entity_id)\n assert state.state == dt_obj.strftime(FORMAT_DATETIME)\n assert state.attributes[\"has_time\"]\n assert state.attributes[\"has_date\"]\n\n assert state.attributes[\"year\"] == 2017\n assert state.attributes[\"month\"] == 9\n assert state.attributes[\"day\"] == 7\n assert state.attributes[\"hour\"] == 19\n assert state.attributes[\"minute\"] == 46\n assert state.attributes[\"second\"] == 30\n assert state.attributes[\"timestamp\"] == dt_obj.timestamp()",
"def test_time_valid_init(generic_task):\n assert generic_task.get_time_valid() == '0000'",
"async def test_set_datetime(hass: HomeAssistant) -> None:\n await async_setup_component(\n hass, DOMAIN, {DOMAIN: {\"test_datetime\": {\"has_time\": True, \"has_date\": True}}}\n )\n\n entity_id = \"input_datetime.test_datetime\"\n\n dt_obj = datetime.datetime(\n 2017, 9, 7, 19, 46, 30, tzinfo=dt_util.get_time_zone(hass.config.time_zone)\n )\n\n await async_set_date_and_time(hass, entity_id, dt_obj)\n\n state = hass.states.get(entity_id)\n assert state.state == dt_obj.strftime(FORMAT_DATETIME)\n assert state.attributes[\"has_time\"]\n assert state.attributes[\"has_date\"]\n\n assert state.attributes[\"year\"] == 2017\n assert state.attributes[\"month\"] == 9\n assert state.attributes[\"day\"] == 7\n assert state.attributes[\"hour\"] == 19\n assert state.attributes[\"minute\"] == 46\n assert state.attributes[\"second\"] == 30\n assert state.attributes[\"timestamp\"] == dt_obj.timestamp()",
"def test_date_arithmetic(self) -> None:\r\n result1 = date_arithmetic()\r\n result2 = date_arithmetic()\r\n self.assertEqual(result1, result2)",
"def test_import_time(pytester: pytest.Pytester) -> None:\n r = pytester.run(\n sys.executable, \"-We\", \"-c\", \"import aiohttp_debugtoolbar\", timeout=0.6\n )\n\n assert not r.stdout.str()\n assert not r.stderr.str()",
"def test_export_schedule(self):\n pass",
"def test_get_time_tracking_entry(self):\n pass",
"def test_create_time_tracking_entry(self):\n pass",
"def test_date():\n assert format_event_time(datetime.date(2016, 1, 15)) == '2016-01-15'",
"def test_datetime(snapshot):\n expect = datetime.datetime(2017, 11, 19)\n snapshot.assert_match(expect)",
"def test_object_times(self):\n ps = PlexosOutput('coad/test/mda_output.zip')\n expected = [datetime(2020, 4, 16, x) for x in range(24)]\n self.assertEqual(expected, ps['Line']['B1_B2'].get_data_times('Flow'))",
"def test_timeframes(self):\n pass",
"def tests(self):\n pass",
"def test_version_time_ok(self):\n self.execute('version time 2.0 \"%s\"' % self._test_date)\n rv, output = self.execute('version list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test time.sleep warns once.
|
async def test_sleep_warns_one(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
caplog.set_level(logging.WARNING)
source = """
time.sleep(2)
time.sleep(5)
"""
with patch("homeassistant.components.python_script.time.sleep"):
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert caplog.text.count("time.sleep") == 1
|
[
"def _sleep(self):\n self.kill()",
"def rand_sleep():\n time.sleep(random.uniform(0.75, 1.5))",
"def test_dummy_timer (self):\n with Timeout(None):\n sleep(DELAY)\n sleep(DELAY)",
"def test_sleep():\n job = Sleep()\n\n current = {\"index\": 0}\n\n def update_state(state, meta):\n assert state == \"RUNNING\"\n index = current[\"index\"]\n assert meta[\"log\"][index][\"level\"] == INFO\n assert meta[\"log\"][index][\"message\"].startswith(\n \"This is repetition {}\".format(index + 1)\n )\n current[\"index\"] += 1\n\n job.update_state = update_state\n\n job.run()",
"def sleepDelay(ms):\r\n time.sleep(ms/1000.0)",
"async def sleep(self, ctx):\r\n\r\n await self.client.change_presence(status=discord.Status.invisible)\r\n\r\n Database.Bot[\"sleeping\"] = True\r\n\r\n await ctx.send(\r\n f'Bot going to sleep.. will not respond again until `{Database.Main[ctx.guild.id].get(\"prefix\", \".\")}wake` is sent'\r\n )",
"def send_warning(user_id, message, seconds):\n\n\tprint 'in send warning'\n\ttime.sleep(seconds - 2)\n\tprint 'after sleep'\n\tut.send_response(message + str(seconds) + ' seconds.', user_id)",
"def test_kill_event() -> None:\n with FailureNotifier(\"test\", verbose=0, debug=True):\n print(\"Sleeping...\")\n time.sleep(30.0)\n print(\"Done Sleeping, you were too late!\")\n raise ValueError(\"Fuck\")",
"def contains_sleep2():\n output = check50.run(\"grep -c -w 'sleep 100 &' typescript\").stdout()\n if output == \"0\\n\":\n help = \"Make sure that you try all commands in the lab. To start the script command so that it appends to you typescript file, use 'script -a typescript'\"\n raise check50.Failure(help)",
"def responsive_sleep(self, seconds, wait_reason=''):\n for x in xrange(int(seconds)):\n if (self.config.wait_log_interval and\n not x % self.config.wait_log_interval):\n print '%s: %dsec of %dsec' % (wait_reason,\n x,\n seconds)\n time.sleep(1.0)",
"async def _sleep_on_error(self, delay: float = 1.0, deviation: float = 1.0):\n await asyncio.sleep(delay - deviation + 2 * deviation * random.random())",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 1:\n pass",
"def _delay(self):\n time.sleep(random.randint(self.min_delay,self.max_delay)/1000.0)",
"def does_fall_asleep(self):\n return self.times_slept != []",
"def catch_alarm():\n comm_time_to_call_heart_beat = True",
"def wait():\n t = random.triangular(config.WAIT_MIN, config.WAIT_MAX)\n time.sleep(t)",
"def _thread_sleep(self) -> None:\n local_jm_interval = 2\n if isinstance(self._launcher, (LocalLauncher)):\n time.sleep(local_jm_interval)\n else:\n time.sleep(CONFIG.jm_interval)",
"def doSleep(self):\n if os.environ.get(\"TRAVIS\"):\n time.sleep(10)\n else:\n time.sleep(20)\n return",
"def logSleep(self, timeSlept):\n self.slept = timeSlept/3600"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retreive our generic cookie for Subscene. This cookie defines which subtitle's language will be returned to us.
|
def _get_cookie(cls, referer):
return {"Cookie" : "LanguageFilter=" + cls.SELECTED_LANGUAGE + "; "
"ShowSubtitleDetails=true; " +
"ShowSubtitlePreview=false;",
"Referer" : referer}
|
[
"def get_lang_from_cookie(request, supported):\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n if lang_code and lang_code in supported:\n return lang_code\n else:\n return None",
"def insert_lang_cookie(response):\n\n if 'lang' in request.args:\n response.set_cookie('lang', request.args['lang'])\n\n return response",
"def read_cookie(self):\n if \"Cookie\" in self.headers:\n v = self.headers['Cookie']\n c = SimpleCookie()\n c.load(v)\n if 'sid' in c:\n value = c['sid'].value\n return value\n return None",
"def get_cookie(self, key):\n return self.state.get_cookie(key)",
"async def as_cookie(self) -> str:",
"def cookie(self):\r\n return self._cookie",
"def getSubtitleContent(cls, version_sub_stage):\r\n\r\n url = version_sub_stage.version_code\r\n subtitle_page = cls._my_perform_request(url)\r\n subtitle_url = Utils.getregexresults(\r\n SUBSCENE_REGEX.SUBTITLE_URL_PARSER, \r\n subtitle_page)\r\n\r\n # If for some reason we failed.\r\n if not subtitle_url:\r\n WriteDebug(\"Failed getting the subtitle url in page: %s\" % url)\r\n return None\r\n \r\n # regex results returned by Utils.getregexresults are returned as list.\r\n subtitle_url = subtitle_url[0]\r\n return Utils.DownloadSubAsBytesIO(\r\n SUBSCENE_PAGES.DOMAIN.replace('www.', ''), \r\n subtitle_url,\r\n cls._build_referer(url))",
"def session_cookie_name(self):\n r=Loader.capi.cppcms_capi_session_get_session_cookie_name(self.d)\n self.check()\n return r.decode()",
"def _decode_cookie(self, value):\n if not value:\n return None\n parts = value.split('|')\n if len(parts) != 3:\n return None\n name = self._cookie_name\n value, timestamp, sign = parts\n\n if self.session_max_age is not None:\n if int(timestamp) < int(time.time()) - self.session_max_age:\n return None\n\n expected_sign = self._get_signature(name, value, timestamp)\n if not hmac.compare_digest(expected_sign, sign):\n # TODO: log warning\n return None\n return value",
"def read_cookie(self, name):\n return self.request.cookies.get(name)",
"def get_cookie(self, key, default=None, secret=None):\n value = self.cookies.get(key)\n if value:\n value = urllib.unquote(value)\n if secret and value:\n dec = cookie_decode(value, secret) # (key, value) tuple or None\n return dec[1] if dec and dec[0] == key else default\n return value or default",
"def sub(self):\n return self.claims.get('sub')",
"def get_cookie():\n if \"HTTP_COOKIE\" in os.environ:\n cookie_string = os.environ.get(\"HTTP_COOKIE\")\n cookie = c.SimpleCookie()\n cookie.load(cookie_string)\n\n try:\n value = cookie[\"uname\"].value\n except Exception:\n value = \"\"\n\n return value",
"def _cookie_value(request):\n if _COOKIE_NAME in request.COOKIES:\n value = request.COOKIES[_COOKIE_NAME]\n blob = _deserialized_cookie_value(value)\n if blob is None:\n _debug_log('insane cookie value %s' % value)\n return _default_cookie_value(request.user.username)\n try:\n cookie_value = pyatdl_pb2.VisitorInfo0.FromString(blob)\n except message.Error as e:\n _debug_log('cookie message.Error %s' % unicode(e))\n return _default_cookie_value(request.user.username)\n if cookie_value.sanity_check == _SANITY_CHECK and cookie_value.username_hash == _username_hash(request.user.username):\n return cookie_value\n else:\n _debug_log('cookie sanity check %s' % cookie_value.sanity_check)\n return _default_cookie_value(request.user.username)\n return _default_cookie_value(request.user.username)",
"def get_language(khoros_object, identifier=None, category_details=None):\n return get_category_field(khoros_object, 'language', identifier, category_details)",
"def download_subtitle(self, subtitles, filename):\n sub = subtitles[0]\n return self.download_subtitle_by_id(sub.get_id(), sub.get_download_url(), filename)",
"def __create_sub(self, elements):\n country = PipocasSubtitleCountry(elements['country'], elements['country_flag'])\n return PipocasSubtitle(elements['id'], elements['release'], elements['poster'],\n country, elements['hits'], elements['rating'],\n elements['votes'], elements['download'])",
"def get_sub(self, sid):\n\n\t\t# Single stories don't have a sub-corpus.\n\t\tif (sid == 'a-tale-of-two-cities' or sid == 'peregrine-pickle' or \\\n\t\t\tsid == 'pride-and-prejudice' or sid == 'to-the-lighthouse' or \\\n\t\t\tsid == 'tristram-shandy'):\n\t\t\treturn None\n\t\telif self.btmsm.belongs(sid):\n\t\t\treturn 'BOTH_TEXTS_MINUS_SHAKES'\n\t\telif self.contcm.belongs(sid):\n\t\t\treturn 'contemporary'\n\t\telif self.mfacm.belongs(sid):\n\t\t\treturn 'mfa'\n\t\telif self.nf19Cm.belongs(sid):\n\t\t\treturn 'nonfiction-19C'\n\t\telif self.nf21Cm.belongs(sid):\n\t\t\treturn 'nonfiction-21C'\n\t\telif self.nycm.belongs(sid):\n\t\t\treturn 'ny-times'\n\t\telif self.pipcm.belongs(sid):\n\t\t\treturn 'klab'\n\t\telif self.percm.belongs(sid):\n\t\t\treturn 'period-novels'\n\t\telif self.stancm.belongs(sid):\n\t\t\treturn 'stanford'\n\t\telif self.wilkcm.belongs(sid):\n\t\t\treturn 'wilkens'\n\t\telse:\n\t\t\traise ValueError(\"Unrecognized story id, \" + sid + \".\")",
"def get_language():\n from django.conf import settings as st\n from django.utils import translation\n return request.session.get('language', st.LANGUAGE_CODE)\n return \"vi\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build a referer URL for Subscene. page should start with a single slash, i.e. "/".
|
def _build_referer(cls, page):
return "http://" + SUBSCENE_PAGES.DOMAIN + page
|
[
"def get_page_url(self):\n mission_part = 'mission=' + self.mission\n id_num = self.image_id.split('-')[2]\n id_part = 'roll=E&frame=' + id_num\n page_url = infopage + mission_part + '&' + id_part\n return page_url",
"def create_wiki_url(page: str) -> str:\n return f\"https://genshin-impact.fandom.com{page}\"",
"def url(self):\n if self.slug:\n return self.slug\n return url_for('page_view',page_id=self.id)",
"def __get_url():\n url = request.args.get('url', request.referrer)\n if url is None:\n return None\n parts = urlparse(url)\n return parts.netloc + parts.path",
"def generateChargifyAPIURL(page_shortname, resource_id):\n url = 'https://%s.chargify.com/%s/%s.json' % (\n CONFIG['chargify_sub_domain'], page_shortname, resource_id)\n return url",
"def sub_page_URL_generator(vendor,page_URL,pageCount):\n search_query = \"?\"+scrape_elements.websites[vendor]['page-query']\n constructed = page_URL + search_query + \"=\" + str(pageCount)\n return constructed",
"def complete_url(half_url):\n # Join the url with the href of world news\n full_url = url + half_url\n return full_url",
"def generateHostedURL(page_shortname, resource_id):\n token = generateSecretToken(page_shortname, resource_id)\n url = 'https://%s.chargify.com/%s/%s/%s' % (\n CONFIG['chargify_sub_domain'], page_shortname, resource_id, token)\n return url",
"def construct_scrape_page_url(self, date, base=BASE_URL):\n return base + '?date=' + date",
"def homepage_url(self):\n return self.request.link(self.app.org)",
"def get_instance_frontpage_url(self):\n if self.__class__.__no_role_and_flatten_rolefrontpage_url():\n return self.rolefrontpage_url()\n else:\n return reverse('{}-frontpage'.format(self.id))",
"def GetMainPageUrl(self):\n\n return \"/\"",
"def permalink(self):\n return \"/\".join(\n filter(\n None,\n [\n self.page.region.slug,\n self.language.code,\n self.ancestor_path,\n self.slug,\n ],\n )\n )",
"def changePageURL(self) -> None:\r\n offset = int(re.search(r\"\\d+\\Z\", self.root_url).group(0)) + 60\r\n self.root_url = re.sub(r\"\\d+\\Z\", \"\", self.root_url) + str(offset)",
"def _url(route):\n return \"%s%s\" % (c['base_address'], route)",
"def _build_url(self, route):\n return \"{0}/{1}\".format(self.base_url, route)",
"def construct_url(context, request):",
"def build_base_url(self):\n self.__base_url = \"https://www.tripadvisor.%s\" % (self.__review_language)",
"def portal_url():\n return portal().absolute_url()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform a simple request, but adds the cookies needed for Subscene. If remove_tabs is true, the function removes all the "\t"s in the returned content.
|
def _my_perform_request(cls, page, remove_tabs = True):
referer = cls._build_referer(page)
cookie = cls._get_cookie(referer)
content = Utils.PerformRequest(
# Subscene's server will return 301 code (Redirection) when we
# request pages with the HOST set to www.subscene.com, so we
# replace it with simply subscene.com.
SUBSCENE_PAGES.DOMAIN.replace('www.', ''),
page,
'',
Utils.HttpRequestTypes.GET,
cookie)
if remove_tabs:
content = content.replace("\t", "")
return content
|
[
"def main(request, response):\n\n token = \"ArQvBL/jhDJ62HaUm/ak0dIUYDjZAfeCQTXwa92cOrHZbL7R+bhb3qrVO2pHWkgJPgvIzvLX5m3wfaUJfOKY0Q4AAABqeyJvcmlnaW4iOiAiaHR0cHM6Ly93d3cud2ViLXBsYXRmb3JtLnRlc3Q6ODQ0NCIsICJmZWF0dXJlIjogIk9yaWdpbklzb2xhdGlvbkhlYWRlciIsICJleHBpcnkiOiAyMDAwMDAwMDAwfQ==\"\n\n header_order = request.GET.first(\"headerOrder\")\n if header_order == \"otoi\":\n response.headers.set(\"Origin-Trial\", token)\n response.headers.set(\"Origin-Isolation\", \"?1\")\n elif header_order == \"oiot\":\n response.headers.set(\"Origin-Isolation\", \"?1\")\n response.headers.set(\"Origin-Trial\", token)\n else:\n raise AssertionError(\"Invalid headerOrder\")\n\n response.headers.set(\"Content-Type\", \"text/html\")\n\n return \"\"\"\n <!DOCTYPE html>\n <meta charset=\"utf-8\">\n <title>Helper page for origin isolation tests</title>\n\n <script type=\"module\">\n window.onmessage = e => {\n if (e.data.constructor === WebAssembly.Module) {\n parent.postMessage(\"WebAssembly.Module message received\", \"*\");\n } else if (e.data.command === \"set document.domain\") {\n document.domain = e.data.newDocumentDomain;\n parent.postMessage(\"document.domain is set\", \"*\");\n }\n };\n\n window.onmessageerror = () => {\n parent.postMessage(\"messageerror\", \"*\");\n };\n </script>\n \"\"\"",
"def data_not_persist_across_requests_on_method_level():\n s = requests.Session()\n resp = s.get('http://httpbin.org/cookies', cookies={'from-my':'browser'})\n print resp.text\n resp = s.get('http://httpbin.org/cookies')\n print resp.text",
"def sub_add(self, url, flags='select', title=None, lang=None):\n self.command('sub_add', url.encode(fs_enc), *_drop_nones(flags, title, lang))",
"def start_session(url: str):\n session = HTMLSession()\n response = session.get(url)\n\n return response",
"def load_torrent(url, cookies=None):\n return get(url, cookies=cookies).content",
"def establish_a_session():\n new_session = requests.Session()\n\n jar = requests.cookies.RequestsCookieJar()\n jar.set('view_mature', 'true' if named_args.adult else 'false')\n jar.set('d_browse_bookshelf', '2') # grid-like view\n\n new_session.cookies = jar\n return new_session",
"def enable_cookie(self, cookies=None):\r\n if self.environ.get('HTTP_COOKIE'):\r\n cookies = [SimpleCookie(self.environ.get('HTTP_COOKIE'))]\r\n\r\n if cookies:\r\n for cookie in cookies:\r\n for morsel in cookie.values():\r\n morsel['path'] = '/'\r\n # TODO: fixme\r\n k, v = cookie.output().split(':')[0:2]\r\n self.headers += [(k,v)]\r\n else:\r\n cookie = SimpleCookie()\r\n cookie['JSESSIONID'] = 'dummy'\r\n cookie['JSESSIONID']['path'] = '/'\r\n k, v = cookie.output().split(':')\r\n self.headers += [(k,v)]",
"def dummy_get_response(request: http.HttpRequest):\n return http.HttpResponse(\"Session test\")",
"def dl_sub(page):\n # start_time = time.time()\n soup = scrape_page(page)\n div = soup.find(\"div\", {\"class\": \"download\"})\n down_link = \"https://subscene.com\" + div.find(\"a\").get(\"href\")\n r = requests.get(down_link, stream=True)\n filelist = []\n for found_sub in re.findall(\n \"filename=(.+)\", r.headers[\"content-disposition\"]\n ):\n with open(found_sub.replace(\"-\", \" \"), \"wb\") as f:\n for chunk in r.iter_content(chunk_size=150):\n if chunk:\n f.write(chunk)\n filelist = zip_extractor(found_sub.replace(\"-\", \" \"))\n print(\n \"Subtitle ({}) - Downloaded\\nList of files zipped: {}\".format(\n found_sub.replace(\"-\", \" \").capitalize(), filelist\n )\n )\n return filelist\n # print(\"--- download_sub took %s seconds ---\" % (time.time() - start_time))",
"def sanitize_cookies_get(request, *args):\n return _sanitize_common(request, 'cookies', *args)",
"def get_page(url):\n request = Request(url)\n request.add_header('User-Agent',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)')\n \n while True:\n try:\n cookie_jar.add_cookie_header(request)\n response = urlopen(request)\n cookie_jar.extract_cookies(response, request)\n break\n except:\n time.sleep(2)\n print url + \"Error\"\n continue\n\n html = response.read()\n response.close()\n cookie_jar.save()\n return html",
"def generatetab(_tabname, _tabcontent):\n try:\n addtolog(\"log\", u\"<p><b>Creación del tab\" + _tabname + u\"</b></p><ul>\")\n htmlfile = path + \"/tabs/\" + _tabname.replace(' ', '_') + \".html\"\n html = open(htmlfile, \"w\")\n html.write(_tabcontent.encode('utf8'))\n html.close()\n\n addtolog(\"log\", u\"</ul>\")\n except Exception, e:\n addtolog(\"log\", u\"<li id='error\" + unicode(\n len(stufftoreturn[\"error\"])) + \"'>\" + WRONGIMG + u\"Error message:\" + e.message + u\"</li>\")\n addtolog(\"log\", u\"</ul>\")\n addtolog(\"error\", u\"<a href='#error\" + unicode(\n len(stufftoreturn[\"error\"])) + \"'><p>\" + WRONGIMG + u\"Error en la creación del tab</p>\")\n addtolog(\"error\", u\"<p>\" + WRONGIMG + u\"Error message:\" + e.message + u\"</p></a>\")",
"def _chop_cookies(r: WSGIRequest) -> WSGIRequest:\n if not wagtailcache_settings.WAGTAIL_CACHE_IGNORE_COOKIES:\n return r\n\n if r.COOKIES and not (\n settings.CSRF_COOKIE_NAME in r.COOKIES\n or settings.SESSION_COOKIE_NAME in r.COOKIES\n ):\n r.COOKIES = {}\n return r",
"def Download(id, filename):\n # Cleanup temp dir, we recomend you download/unzip your subs in temp folder and\n # pass that to XBMC to copy and activate\n if os.path.isdir(_temp):shutil.rmtree(_temp)\n xbmcvfs.mkdirs(_temp)\n if not os.path.isdir(_temp):xbmcvfs.mkdir(_temp)\n unpacked = str(uuid.uuid4())\n unpacked = unpacked.replace(\"-\",\"\")\n unpacked = unpacked[0:6]\n xbmcvfs.mkdirs(_temp + \"/\" + unpacked)\n _newtemp = xbmc.translatePath(os.path.join(_temp, unpacked)).decode(\"utf-8\")\n\n subtitles_list = []\n username = _addon.getSetting( 'LDuser' )\n password = _addon.getSetting( 'LDpass' )\n login_postdata = urllib.urlencode({'username' : username, 'password' : password, 'login' : 'Login', 'sid' : ''})\n cj = cookielib.CookieJar()\n my_opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n my_opener.addheaders = [('Referer', main_url + 'modules.php?name=Your_Account')]\n urllib2.install_opener(my_opener)\n request = urllib2.Request(main_url + 'forum/ucp.php?mode=login', login_postdata)\n response = urllib2.urlopen(request).read()\n content = my_opener.open(main_url + 'modules.php?name=Downloads&d_op=getit&lid=' + id + '&username=' + username)\n content = content.read()\n #### If user is not registered or User\\Pass is misspelled it will generate an error message and break the script execution!\n if 'Apenas Disponvel para utilizadores registados.' in content.decode('utf8', 'ignore'):\n xbmcplugin.endOfDirectory(int(sys.argv[1]))\n xbmc.executebuiltin(('Notification(%s,%s,%d)' % (_scriptname , _language(32019).encode('utf8'),5000)))\n if content is not None:\n header = content[:4]\n if header == 'Rar!':\n local_tmp_file = pjoin(_newtemp, str(uuid.uuid4())+\".rar\")\n packed = True\n elif header == 'PK\u0003\u0004':\n local_tmp_file = pjoin(_newtemp, str(uuid.uuid4())+\".zip\")\n packed = True\n else:\n # never found/downloaded an unpacked subtitles file, but just to be sure ...\n # assume unpacked sub file is an '.srt'\n local_tmp_file = pjoin(_newtemp, \"ldivx.srt\")\n subs_file = local_tmp_file\n packed = False\n log(u\"Saving subtitles to '%s'\" % (local_tmp_file,))\n try:\n with open(local_tmp_file, \"wb\") as local_file_handle:\n\n local_file_handle.write(content)\n local_file_handle.close()\n xbmc.sleep(500)\n except: log(u\"Failed to save subtitles to '%s'\" % (local_tmp_file,))\n if packed:\n xbmc.executebuiltin(\"XBMC.Extract(%s, %s)\" % (local_tmp_file.encode(\"utf-8\"), _newtemp))\n xbmc.sleep(1000)\n\n ## IF EXTRACTION FAILS, WHICH HAPPENS SOMETIMES ... BUG?? ... WE WILL BROWSE THE RAR FILE FOR MANUAL EXTRACTION ##\n searchsubs = recursive_glob(_newtemp, SUB_EXTS)\n searchsubscount = len(searchsubs)\n if searchsubscount == 0:\n dialog = xbmcgui.Dialog()\n subs_file = dialog.browse(1, _language(32024).encode('utf8'), 'files', '.srt|.sub|.aas|.ssa|.smi|.txt', False, True, _newtemp+'/').decode('utf-8')\n subtitles_list.append(subs_file)\n ## ELSE WE WILL GO WITH THE NORMAL PROCEDURE ##\n else:\n log(u\"Unpacked files in '%s'\" % (_newtemp,))\n os.remove(local_tmp_file)\n searchsubs = recursive_glob(_newtemp, SUB_EXTS)\n searchsubscount = len(searchsubs)\n log(u\"count: '%s'\" % (searchsubscount,))\n for file in searchsubs:\n # There could be more subtitle files in _temp, so make\n # sure we get the newly created subtitle file\n if searchsubscount == 1:\n # unpacked file is a newly created subtitle file\n log(u\"Unpacked subtitles file '%s'\" % (file.decode('utf-8'),))\n try: subs_file = pjoin(_newtemp, file.decode(\"utf-8\"))\n except: subs_file = pjoin(_newtemp, file.decode(\"latin1\"))\n subtitles_list.append(subs_file)\n break\n else:\n # If there are more than one subtitle in the temp dir, launch a browse dialog\n # so user can choose. If only one subtitle is found, parse it to the addon.\n\n dirs = os.walk(os.path.join(_newtemp,'.')).next()[1]\n dircount = len(dirs)\n if dircount == 0:\n filelist = os.listdir(_newtemp)\n for subfile in filelist:\n shutil.move(os.path.join(_newtemp, subfile), _temp+'/')\n os.rmdir(_newtemp)\n dialog = xbmcgui.Dialog()\n subs_file = dialog.browse(1, _language(32024).encode('utf8'), 'files', '.srt|.sub|.aas|.ssa|.smi|.txt', False, False, _temp+'/').decode(\"utf-8\")\n subtitles_list.append(subs_file)\n break\n else:\n for dir in dirs:\n shutil.move(os.path.join(_newtemp, dir), _temp+'/')\n os.rmdir(_newtemp)\n dialog = xbmcgui.Dialog()\n subs_file = dialog.browse(1, _language(32024).encode('utf8'), 'files', '.srt|.sub|.aas|.ssa|.smi|.txt', False, False, _temp+'/').decode(\"utf-8\")\n subtitles_list.append(subs_file)\n break\n else: subtitles_list.append(subs_file)\n return subtitles_list",
"def make_request(self):\n page = requests.get(self._url)\n tree = html.fromstring(page.content)\n\n return tree",
"def all_section(main_url):\n soup = parse_html(request_url(url))\n ul = soup.find(\"div\", class_=\"MainLMenu tab\").ul\n section_list = []\n for li in ul.find_all(\"li\"):\n section_list.append(li.a.get('href'))\n # Remove the section which we will not consider\n # Like the video section and others\n remove = [0, 1, -1, -1, -1]\n for i in remove:\n section_list.remove(section_list[i])\n return section_list",
"def subdiv(currentSubdLevel=bool, proxyMode=int, maxPossibleLevel=int, faceStats=bool, smallOffsets=bool, currentLevel=bool, displayLoad=bool, deepestLevel=int, edgeStats=bool):\n pass",
"def gettab(self, arg='', addp=None):\n rurl = self.PREFIX + self.TABLES + arg + self.SUFFIX\n return self._append_addp(rurl, addp)",
"def make_reorder_tabs_request(self, data):\n return self.client.post(\n self.url_reorder,\n data=data,\n content_type=\"application/json\",\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will try to locate the the sersies number string in the movie name and extract it. returning the movie name without it. And also, a represen tation of the season number in the format of "SXX"
|
def _extract_season_number(cls, movie_name):
# The regex will return the season string. We'll remove it from the
# movie_name.
for season, code in SUBSCENE_CONSTS.TV_SERIES_SEASONS.iteritems():
# Concatenate the "Season" to the number.
season = ' '.join([season, SUBSCENE_CONSTS.TV_SERIES_SEASON])
if season in movie_name:
movie_name = movie_name.replace(season, code)
return movie_name
|
[
"def _getSeason(self):\n # TODO: Add a RegEx for matching out the Season Number\n pass",
"def get_singlename_year(name, sex, year):\n\n #identify filename for that year\n filename = \"names/yob\" + str(year) + \".txt\"\n\n #open the file and search for that name and sex, extracting the number\n with open(filename) as f:\n text = f.read()\n pat = name + \",\" + sex + \",(\\d+)\"\n result = re.findall(pat, text) \n\n if result == []:\n number = 0 #set number to zero if name isn't on the list\n else:\n number = int(result[0])\n return number #returns the number for that name in that year",
"def _remove_year(cls, movie_name):\r\n result = Utils.getregexresults(\r\n SUBSCENE_REGEX.YEAR_REMOVER, \r\n movie_name, \r\n False)\r\n if not result: \r\n return None\r\n return result[0]",
"def get_season_number(p):\n d = os.path.basename(p)\n\n for f in SEASON_FMTS:\n match = re.search(f, d)\n if match:\n return int(match.group(1))\n\n raise ValueError('not a season: {p}')",
"def _season_code(season):\n season = str(season)\n pat1 = re.compile(r'^[0-9]{4}$') # 1994 | 9495\n pat2 = re.compile(r'^[0-9]{2}$') # 94\n pat3 = re.compile(r'^[0-9]{4}-[0-9]{4}$') # 1994-1995\n pat4 = re.compile(r'^[0-9]{4}-[0-9]{2}$') # 1994-95\n pat5 = re.compile(r'^[0-9]{2}-[0-9]{2}$') # 94-95\n\n if re.match(pat1, season):\n if int(season[2:]) == int(season[:2]) + 1:\n if season == '1920' or season == '2021':\n msg = ('Season id \"{}\" is ambiguous: interpreting as \"{}-{}\"'\n .format(season, season[:2], season[-2:]))\n warnings.warn(msg)\n return season # 9495\n elif season[2:] == '99':\n return ''.join([season[2:], '00']) # 1999\n else:\n return ''.join([season[-2:], '{:02d}'.format(int(season[-2:]) + 1)]) # 1994\n elif re.match(pat2, season):\n if season == '99':\n return ''.join([season, '00']) # 99\n else:\n return ''.join([season, '{:02d}'.format(int(season) + 1)]) # 94\n elif re.match(pat3, season):\n return ''.join([season[2:4], season[-2:]]) # 1994-1995\n elif re.match(pat4, season):\n return ''.join([season[2:4], season[-2:]]) # 1994-95\n elif re.match(pat5, season):\n return ''.join([season[:2], season[-2:]]) # 94-95\n else:\n return season",
"def extract_year(title: str):\n year = -1\n match = regex.search('\\((\\d{4})\\)$', title.strip())\n if match:\n year = int(match.group(1).strip())\n title = title[:match.start()].strip()\n return title, year",
"def getMovieNameParts(movieName):\n\n # Trim the movie name down to just the main part of the name\n # up until a '-', '[', '(', or the extension\n # if no match, then the whole movieName\n match = re.search(r'(.*?)(?=\\s*[([.-])|(.+)', movieName)\n # if match:\n # print(f\"**{match.group()}**\")\n # else:\n # print(f\"##{movieName}##\")\n mainName = match.group()\n suffixName = movieName[len(mainName):]\n\n return mainName, suffixName",
"def get_season_from_game_id(game_id):\n if game_id[4] == '9':\n return '20' + game_id[3] + game_id[4] + '-' + str(int(game_id[3]) + 1) + '0'\n else:\n return '20' + game_id[3] + game_id[4] + '-' + game_id[3] + str(int(game_id[4]) + 1)",
"def getMovieTitle(movieID):\n f = open(\"ml-100k/u.item\")\n\n for line in f.readlines():\n d = line.split(\"|\")\n if int(d[0]) == movieID:\n return d[1]\n f.close()",
"def getseasonindex(animeseason):\n return float(f\"{animeseason['year']}.{constants.SEASONS.index(animeseason['season'])}\")",
"def extract_title(name):\n \n try:\n title = re.search(r',\\s(.+?)\\.', name).groups()[0]\n except:\n title = ''\n \n if title in [\"Mr\", \"Mrs\", \"Miss\"]:\n return(title)\n else:\n return('Rare')",
"def format_player_name_for_sports_ref(player_name):\n formatted_name = player_name.replace(\" III\", \"\")\n formatted_name = formatted_name.replace(\".\", \"\")\n formatted_name = formatted_name.replace(\"'\", \"\")\n formatted_name = formatted_name.replace(\" \", \"-\")\n formatted_name = formatted_name.lower()\n name_split = formatted_name.split(\" \")\n\n return formatted_name",
"def extract_board_name(doc_title):\n\n board_name = doc_title.split(\";\")[0].strip()\n board_name = board_name.replace(\"PDQ \", \"\").strip()\n board_name = board_name.replace(\" Editorial Board\", \"\").strip()\n if board_name.startswith(\"Cancer Complementary\"):\n board_name = board_name.replace(\"Cancer \", \"\").strip()\n return board_name",
"def _getSubtitleNumber(entry):\n return entry['SN']",
"def student_name_from(repo_name: str) -> str:\n m = re.search(github_prefix + \"-(.*)$\", repo_name)\n if not m:\n return \"\" # something funny in the name, so therefore not matching\n else:\n return m.group(1)",
"def find_movie_name(url):\n html_page = BeautifulSoup(requests.get(url).text, \"html.parser\")\n html_page.prettify()\n bio = html_page.find(class_='infobox vevent')\n\n if (bio is None):\n logging.warning(\"Cannot find name of Movie\")\n return url\n return bio.th.string",
"def Titanic_get_title(str_passenger_name):\n first_part, second_part = str(str_passenger_name).split(\", \", 1)\n title, third_part =str(second_part).split(\".\", 1)\n if title in ['Mr', 'Mrs', 'Miss']:\n return str(title)\n else:\n return \"Rare\"",
"def get_scene_name_from_recording_name(name_recording):\n return name_recording.partition('_')[0]",
"def season(self):\r\n year = int(self.eid[0:4])\r\n month = int(self.eid[4:6])\r\n if month <= 3:\r\n year -= 1\r\n return year"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Removes the year from the movie name. The year is located inside parentheses. Returns None on failure.
|
def _remove_year(cls, movie_name):
result = Utils.getregexresults(
SUBSCENE_REGEX.YEAR_REMOVER,
movie_name,
False)
if not result:
return None
return result[0]
|
[
"def extract_year(title: str):\n year = -1\n match = regex.search('\\((\\d{4})\\)$', title.strip())\n if match:\n year = int(match.group(1).strip())\n title = title[:match.start()].strip()\n return title, year",
"def get_filename_year(filename):\n new_filename = filename\n filename_year = None\n matches = re.findall(\"\\s\\(\\d+\\)\", new_filename)\n if not matches:\n matches = re.findall(\"\\s\\d+\", new_filename)\n if matches: \n match = matches[-1] # last match\n now = datetime.datetime.now() \n year_string = str(match)\n year = int(year_string.replace(\"(\", \"\").replace(\")\", \"\"))\n if new_filename.endswith(year_string):\n if year > 1945 and year <= now.year: \n filename_year = str(year)\n new_filename = filename.replace(year_string, \"\") \n return new_filename, filename_year",
"def remove_year_from_date(date):\n return float(str(date)[4:])",
"def get_year(text):\n # type: (str) -> int\n year = re.search(r\"\\d{4}\", text)\n return int(year.group()) if year else 0",
"def _parse_year(key):\n\n base_str = key\n year = re.search('_[1-2][0-9]{3}$', key, flags=re.IGNORECASE)\n if year is not None:\n base_str = key.replace(year.group(0), '')\n year = int(year.group(0).lstrip('_'))\n\n # unlikely to be a year before 1800 or after 2200\n if year < 1800 or year > 2200:\n year = None\n base_str = key\n\n return year, base_str",
"def remove_movie(self, title, year):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n movie_meta = '%s (%d)' % (title, year)\n folder = re.sub(\n pattern=r'[?|$|!|:|#]',\n repl=r'',\n string=self.db[self.movies_label][movie_meta]['alt_title'])\n progress = xbmcgui.DialogProgress()\n progress.create(self.kodi_helper.get_local_string(1210), movie_meta)\n progress.update(50)\n time.sleep(0.5)\n del self.db[self.movies_label][movie_meta]\n self._update_local_db(filename=self.db_filepath, db=self.db)\n dirname = self.nx_common.check_folder_path(\n path=os.path.join(self.movie_path, folder))\n filename = os.path.join(self.movie_path, folder, movie_meta + '.strm')\n if xbmcvfs.exists(dirname):\n xbmcvfs.delete(filename)\n xbmcvfs.rmdir(dirname)\n return True\n return False\n time.sleep(1)\n progress.close()",
"def format_year(year: str) -> str:\n return str(int(year)) + ' year'",
"def get_exported_movie_year(self, title):\n year = '0000'\n folder = self.nx_common.check_folder_path(\n path=os.path.join(self.movie_path, title))\n if xbmcvfs.exists(folder):\n file = xbmcvfs.listdir(folder)\n year = str(file[1]).split('(', 1)[1].split(')', 1)[0]\n return int(year)",
"def get_year(ax_id):\n modern_ax_id = re.compile(r\"([0-9]{2})([0-9]{2})\\.([0-9]+)\")\n search_modern = re.search(modern_ax_id, ax_id)\n if search_modern:\n year = \"20\" + search_modern[1]\n else:\n old_ax_id = re.compile(r\"([a-zA-Z]+[-]?[a-zA-Z]+)/([0-9]{2})([0-9]+)\")\n search_old = re.search(old_ax_id, ax_id)\n # get century right\n if search_old[2][0] == \"9\":\n year = \"19\" + search_old[2]\n else:\n year = \"20\" + search_old[2]\n return year",
"def get_year(ax_id):\n modern_ax_id = re.compile(r'([0-9]{2})([0-9]{2})\\.([0-9]+)')\n search_modern = re.search(modern_ax_id, ax_id)\n if search_modern:\n year = '20' + search_modern[1]\n else:\n old_ax_id = re.compile(r'([a-zA-Z]+[-]?[a-zA-Z]+)/([0-9]{2})([0-9]+)')\n search_old = re.search(old_ax_id, ax_id)\n # get century right\n if search_old[2][0] == \"9\":\n year = '19' + search_old[2]\n else:\n year = '20' + search_old[2]\n return year",
"def parse_year(raw):\n years = re.findall('[0-9]{4}', raw)\n\n if years:\n return int(years[0])",
"def _extract_season_number(cls, movie_name):\r\n # The regex will return the season string. We'll remove it from the\r\n # movie_name.\r\n for season, code in SUBSCENE_CONSTS.TV_SERIES_SEASONS.iteritems():\r\n # Concatenate the \"Season\" to the number.\r\n season = ' '.join([season, SUBSCENE_CONSTS.TV_SERIES_SEASON])\r\n if season in movie_name:\r\n movie_name = movie_name.replace(season, code)\r\n\r\n return movie_name",
"def _getYearFromDesc(desc):\n year = desc.split(\"\\n\")[2]\n return int(year[-5:-1])",
"def parse_nr_year(identifier: Optional[str]) -> Optional[Tuple[int, int]]:\n if identifier is None:\n return None\n number, year = identifier.split('/')\n return int(number), year_shorthand_to_full(year)",
"def extract_year(string: str) -> int:\n expr = r\"(?:19|20)\\d{2}\"\n matches = re.findall(expr, string)\n if matches:\n year = matches[0]\n else:\n raise Exception(\"The string does not have any valid year.\")\n\n return int(year)",
"def model_year(self):\n if self._edid[0x10] == 255:\n return self._edid[0x11] + 1990\n else:\n return None",
"def get_year(msg):\n year = input(msg)\n if re.match(\"[1-3][0-9]{3}\", year) and len(year) == 4:\n return year\n else:\n print(\"Enter correct year!\")\n return get_year(msg)",
"def getMovieNameParts(movieName):\n\n # Trim the movie name down to just the main part of the name\n # up until a '-', '[', '(', or the extension\n # if no match, then the whole movieName\n match = re.search(r'(.*?)(?=\\s*[([.-])|(.+)', movieName)\n # if match:\n # print(f\"**{match.group()}**\")\n # else:\n # print(f\"##{movieName}##\")\n mainName = match.group()\n suffixName = movieName[len(mainName):]\n\n return mainName, suffixName",
"def getYear():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds a single movie to the list as a MovieSubStage. Removes the season numbering and the year from the movie_name field.
|
def _add_movie(movie):
movie_name = movie['MovieName']
movie_code = movie['MovieCode']
# Try to extract the season numbering (it might be a season result).
movie_name = cls._extract_season_number(movie_name)
# Remove the year.
movie_name = cls._remove_year(movie_name)
# And convert to global format.
movie_name = Utils.FormatMovieName(movie_name, False)
stage = MovieSubStage(
cls.PROVIDER_NAME,
movie_name,
movie_code,
default_versum)
# There might be duplication in the results.
if stage not in movie_sub_stages:
movie_sub_stages.append(stage)
|
[
"def add_movie(movies):\n new_title = get_valid_selection(\"Title\")\n new_year = get_valid_year()\n new_category = get_valid_selection(\"Category\")\n movies.add_movie(Movie(new_title, new_year, new_category, False))\n print(\"{} ({} from {}) added to movie list\".format(new_title, new_category, new_year))\n movies.sort_movies(SORT_CONDITION)",
"def add_movie(self, movie: Movie):\r\n raise NotImplementedError",
"def add_movie(self, new_movie):\r\n self.movies.append(Movie(new_movie[0], new_movie[1], new_movie[2], new_movie[3]))",
"def addFrameToMovie(frame, movie):\n # frame = None\n # movie = None\n # if a.__class__ == Movie:\n # movie = a\n # frame = b\n # else:\n # movie = b\n # frame = a\n\n if not (isinstance(movie,Movie) and isinstance(frame, str)):\n # if movie.__class__ != Movie or frame.__class__ != String:\n repValError(\"addFrameToMovie(frame, movie): frame is not a string or movie is not a Movie objectd\")\n\n movie.addFrame(frame)",
"def add_movie():\n movies.append(create_movie())\n print(\"\\nYour movie was successfully added!\")\n print(f\"Movies currently on your database: {len(movies)}\")",
"def add_movie(self, title, alt_title, year, video_id, build_url):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n movie_meta = '%s (%d)' % (title, year)\n folder = re.sub(r'[?|$|!|:|#]', r'', alt_title)\n dirname = self.nx_common.check_folder_path(\n path=os.path.join(self.movie_path, folder))\n filename = os.path.join(dirname, movie_meta + '.strm')\n progress = xbmcgui.DialogProgress()\n progress.create(self.kodi_helper.get_local_string(650), movie_meta)\n if xbmcvfs.exists(filename):\n return\n if not xbmcvfs.exists(dirname):\n xbmcvfs.mkdirs(dirname)\n if self.movie_exists(title=title, year=year) is False:\n progress.update(50)\n time.sleep(0.5)\n self.db[self.movies_label][movie_meta] = {'alt_title': alt_title}\n self._update_local_db(filename=self.db_filepath, db=self.db)\n url = build_url({'action': 'play_video', 'video_id': video_id})\n self.write_strm_file(path=filename, url=url, title_player=movie_meta)\n progress.update(100)\n time.sleep(1)\n progress.close()",
"def add_movie(conn, *, id_parse=ACTOR_ID_PARSE, info_cap=MAX_INFO_SIZE):\n print('adding new movie')\n printc('b',\n '** Note ** : if release time is left blank, current date will be assumed. '\n 'To enter actors, provide each actor\\'s id #, space-separated. Actor ids are '\n 'not required, but a director id is. If the actor is a main actor, '\n 'enter the actor id with a * at its end (without space), e.g. 12345*.'\n )\n title, genre, url, rating, budget, gross_income, director_id, studio, actors, info = menu_selections(\n 'title', 'genre', 'url (at most 100 chars)', 'rating (e.g. G, PG-13)',\n 'budget ($)', 'gross revenue($)', 'director id', 'studio (at most 20 chars)',\n 'actor ids\\0', f'additional info/summary [{info_cap} chars max]\\0'\n )\n info = truncate(info, info_cap)\n # just take the date as today\n# date = custom_select(\n# \"Enter release date (empty field sets date to today)\", get_date)[1]\n# if not date:\n# date = dt.date.today()\n \n actors, is_main = zip(*(\n actor_id.groups() for actor_id in id_parse.finditer(actors)\n ))\n is_main = tuple('t' if m else 'f' for m in is_main)\n roles = tuple(truncate(input(f'enter role for actor {a} (at most 50 chars): '),50) for a in actors)\n \n\n conn.autocommit = False\n with conn.cursor() as cur:\n # IMPORTANT -- make this a transaction that succeeds only if both parts\n # (adding movie and actors) succeeds\n try:\n cur.execute(\n \"\"\"\n INSERT INTO movie\n (title, genre, url, rating, budget, gross_income, director_id, studio, summary, date_released)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, CURRENT_DATE) RETURNING id;\"\"\",\n (title, genre, url, rating, budget, gross_income, director_id, studio, info)\n )\n movie_id = cur.fetchone()[0]\n \n execute_batch(cur,\n \"\"\"\n INSERT INTO act\n (actor_id, movie_id, if_main, role)\n VALUES (%s, %s, %s, %s);\"\"\",\n list(zip(actors, [movie_id]*len(actors), is_main, roles))\n )\n\n printc('g', f'movie {title} inserted with id {movie_id}')\n conn.commit()\n except Exception as e:\n print('add_movie: error:', repr(e))\n conn.rollback()\n \n conn.autocommit = True",
"def index_movie(self, movie):\n self.es.index(INDEX_MOVIES, 'movie', movie.to_dict(include_subs=False), id=movie.id)\n elasticsearch.helpers.bulk(self.es, [{\n \"_id\": \"%d-%d\" % (movie.id, sub.sub_id),\n \"_index\": INDEX_SUBTITLES,\n \"_type\": \"subtitle\",\n \"_source\": sub.to_dict()\n } for sub in movie.subtitles])",
"def add_movie(self, title, year, plot, rating):\n try:\n self.table.put_item(\n Item={\n 'year': year,\n 'title': title,\n 'info': {'plot': plot, 'rating': Decimal(str(rating))}})\n except ClientError as err:\n logger.error(\n \"Couldn't add movie %s to table %s. Here's why: %s: %s\",\n title, self.table.name,\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise",
"def adding_stage_ctlr(self, bot, update):\n in_msg = update.message.text\n try:\n Stage.add_stage(in_msg)\n self._send_msg(bot, update, \"\"\"{} *'{}' added successfully*\\n\\nThe *available stages* are:\\n{}\"\"\".format(emojis['tick'], in_msg, u\"\\n\".join(Stage.get_names())), markdown=True)\n # return to add activity menu\n self._fsm.trigger('cancel', bot=bot, update=update)\n except Exception as e:\n self._send_msg(bot, update, \"*{}*\\n\\nLet's *try another name*:\".format(e), markdown=True)",
"def add_video(self, video):\n self._videos[video.video_id] = video",
"def _create_movie_task(videoid, movie):\n name = '{title} ({year})'.format(title=movie['title'], year=movie['year'])\n return [_create_item_task(name, FOLDER_MOVIES, videoid, name, name)]",
"def AddFile(moviefile_uncpath):\n\n global num_errors, verbose_mode\n global movies_conn\n\n utils.Msg(\"Adding file: \\\"\"+moviefile_uncpath+\"\\\" ...\", verbose_mode)\n\n movie = GetMovieInfo(moviefile_uncpath)\n if movie == None:\n return\n\n c = \"\" # column list for INSERT\n v = \"\" # value list for INSERT\n c += \"idMovie, \"\n v += \"NULL, \"\n c += \"idCabinet, \"\n v += \"%(idCabinet)s, \"\n c += \"idMediumType, \"\n v += \"'FILE', \"\n c += \"idStatus, \"\n v += \"%(status)s, \"\n c += \"Uncut, \"\n v += \"%(fn_uncut)s, \"\n c += \"Language, \"\n v += \"%(fn_language)s, \"\n c += \"SubtitleLanguage, \"\n v += \"%(fn_subtitle_language)s, \"\n c += \"Duration, \"\n v += \"%(duration_min)s, \"\n c += \"idQuality, \"\n v += \"%(idVideoQuality)s, \"\n c += \"DesiredDisplayAspectRatioWidth, \"\n v += \"%(fn_dar_width)s, \"\n c += \"DesiredDisplayAspectRatioHeight, \"\n v += \"%(fn_dar_height)s, \"\n c += \"DisplayAspectRatio, \"\n v += \"%(video_dar)s, \"\n c += \"OriginalDisplayAspectRatio, \"\n v += \"%(video_dar_org)s, \"\n c += \"idContainerFormat, \"\n v += \"%(idContainerFormat)s, \"\n c += \"idVideoFormat, \"\n v += \"%(idVideoFormat)s, \"\n c += \"VideoFormatProfile, \"\n v += \"%(video_format_profile)s, \"\n c += \"VideoSamplingWidth, \"\n v += \"%(video_width)s, \"\n c += \"VideoSamplingHeight, \"\n v += \"%(video_height)s, \"\n c += \"VideoBitrate, \"\n v += \"%(video_bitrate_kbps)s, \"\n c += \"VideoFramerate, \"\n v += \"%(video_framerate_fps)s, \"\n c += \"idVideoFramerateMode, \"\n v += \"%(idVideoFramerateMode)s, \"\n # c += \"VideoQualityFactor, \"\n # v += # TBD: Get this value from MediaInfo\n c += \"idAudioFormat, \"\n v += \"%(idAudioFormat)s, \"\n c += \"AudioFormatProfile, \"\n v += \"%(audio_format_profile)s, \"\n # c += \"idAudioChannelType, \"\n # v += # TBD: Get this value from MediaInfo\n c += \"TechnicalFlaws, \"\n v += \"%(fn_techcomm)s, \"\n c += \"AudioBitrate, \"\n v += \"%(audio_bitrate_kbps)s, \"\n c += \"idAudioBitrateMode, \"\n v += \"%(idAudioBitrateMode)s, \"\n c += \"AudioSamplingRate, \"\n v += \"%(audio_samplingrate_hz)s, \"\n c += \"FilePath, \"\n v += \"%(file_path)s, \"\n c += \"FolderPath, \"\n v += \"%(folder_path)s, \"\n now = str(datetime.datetime.now())[0:19]\n c += \"TSUpdated, \"\n v += \"'\"+now+\"', \"\n c += \"TSVerified, \"\n v += \"'\"+now+\"', \"\n c += \"Title, \"\n v += \"%(title)s, \"\n c += \"ReleaseYear, \"\n v += \"%(year)s, \"\n c += \"SeriesTitle, \"\n v += \"%(series_title)s, \"\n c += \"EpisodeTitle, \"\n v += \"%(episode_title)s, \"\n c += \"EpisodeId\"\n v += \"%(episode_id)s\"\n\n # movie[\"fn_threed\"]+\", \"\n # movie[\"fn_partial\"]+\", \"\n\n sql = \"INSERT INTO Medium (\"+c+\") VALUES ( \"+v+\")\"\n\n medium_cursor = movies_conn.cursor(MySQLdb.cursors.Cursor)\n\n medium_cursor.execute(sql,movie)\n\n medium_cursor.close()\n\n movies_conn.commit()",
"def _remove_year(cls, movie_name):\r\n result = Utils.getregexresults(\r\n SUBSCENE_REGEX.YEAR_REMOVER, \r\n movie_name, \r\n False)\r\n if not result: \r\n return None\r\n return result[0]",
"def create_movie(self, name=\"Test Movie\", year=2000, genre_id=50):\n\t\tgenre = Genre.objects.filter(pk=genre_id).first()\n\t\treturn Movie.objects.create(name=name, year=year, genre=genre)",
"def _extract_season_number(cls, movie_name):\r\n # The regex will return the season string. We'll remove it from the\r\n # movie_name.\r\n for season, code in SUBSCENE_CONSTS.TV_SERIES_SEASONS.iteritems():\r\n # Concatenate the \"Season\" to the number.\r\n season = ' '.join([season, SUBSCENE_CONSTS.TV_SERIES_SEASON])\r\n if season in movie_name:\r\n movie_name = movie_name.replace(season, code)\r\n\r\n return movie_name",
"def insert_movie(addname: str, addyear: int, addposter: str, nomatchresult: int) -> None:\n with UseDatabase(dbconfig) as cursor:\n _SQL = \"\"\"insert into moviedata\n (name, year, poster, moviematch, nomatch, disliked)\n values\n (%s, %s, %s, %s, %s, %s)\"\"\"\n cursor.execute(_SQL, (addname, addyear, addposter, 0, nomatchresult, 0))",
"def scrape_subtitle_id(movie_id):\n url = urls.movie_page(movie_id)\n tree = lxml.html.fromstring(requests.get(url).content)\n\n # Links to the subtitle files are stored in a container\n links = tree.xpath('//*[@id=\"moviehash\"]/a/@href')\n\n return filter(lambda x: x is not None, map(_extract_id, links))",
"def __insert_movie(self, session, imdb_id, movie_title, movie_year):\n print(f\"start {self.__insert_movie.__name__}: {movie_title} {movie_year}\")\n\n # Create a new movie row with value of has_role of passed in param and insert it into Movies table\n new_movie = movie.Movie(\n movie_id=imdb_id,\n movie_title=movie_title,\n movie_year=movie_year,\n inserted_dtm=datetime.now()\n )\n\n # Add the new movie to database\n session.add(new_movie)\n session.commit()\n\n msg = f\"end {self.__insert_movie.__name__}: inserted movie {movie_title} ({movie_year})\"\n print_and_log(msg)\n return new_movie"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.