query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Convert the given byte value to GB.
|
def to_gb(byte_value):
return "{:.2f}".format(int(byte_value)/1073741824)
|
[
"def convert_byte_to_gb(attribute_value):\r\n try:\r\n attribute_value = int(attribute_value) / 1024\r\n new_attribute_value = str(attribute_value) + ' GB'\r\n return new_attribute_value\r\n except:\r\n traceback.print_exc()\r\n return ''",
"def convert_bytes_gb(bytes_: int) -> int:\n return round(float(bytes_) / (1024 ** 3))",
"def megabytes_to_gigabytes(mb):\n return mb / 1024",
"def bytes_to_mb(byte):\n return round(byte / 1000 / 1000, 2)",
"def byte_to_megabyte(byte_number):\n\n try:\n return int(byte_number / 1048576)\n except:\n return None",
"def size_converter(_bytes: int) -> str:\n KB = _bytes / float(1 << 10)\n MB = _bytes / float(1 << 20)\n GB = _bytes / float(1 << 30)\n\n if GB > 1:\n return f\"{round(GB, 2):,} GB\"\n elif MB > 1:\n return f\"{round(MB, 2):,} MB\"\n\n return f\"{round(KB, 2):,} KB\"",
"def unit_to_bytes(value: str) -> int:\n m = re.match('^([0-9]+)(.*)$', value)\n if m is None:\n raise InvalidParamError('Value is not a bytes unit')\n xvalue = int(m.group(1))\n xunit = m.group(2)\n if xunit == \"Ki\":\n xvalue *= 1024\n elif xunit == \"Mi\":\n xvalue *= 1024 * 1024\n elif xunit == \"Gi\":\n xvalue *= 1024 * 1024 * 1024\n elif xunit == \"Ti\":\n xvalue *= 1024 * 1024 * 1024 * 1024\n elif xunit == \"Pi\":\n xvalue *= 1024 * 1024 * 1024 * 1024 * 1024\n elif xunit == \"Ei\":\n xvalue *= 1024 * 1024 * 1024 * 1024 * 1024 * 1024\n elif xunit == \"K\":\n xvalue *= 1000\n elif xunit == \"M\":\n xvalue *= 1000 * 1000\n elif xunit == \"G\":\n xvalue *= 1000 * 1000 * 1000\n elif xunit == \"T\":\n xvalue *= 1000 * 1000 * 1000 * 1000\n elif xunit == \"P\":\n xvalue *= 1000 * 1000 * 1000 * 1000 * 1000\n elif xunit == \"E\":\n xvalue *= 1000 * 1000 * 1000 * 1000 * 1000 * 1000\n else:\n raise InvalidParamError('Unknown byte unit \"{}\"'.format(xunit))\n return xvalue",
"def test_convert_to_bytes(self):\n converted_gigabytes = config_functions.convert_to_bytes(500, 'g')\n self.assertEqual(converted_gigabytes, 536870912000)\n converted_kilobytes = config_functions.convert_to_bytes(524288000, 'k')\n self.assertEqual(converted_kilobytes, 536870912000)\n converted_megabytes = config_functions.convert_to_bytes(512000, 'm')\n self.assertEqual(converted_megabytes, 536870912000)",
"def _mb_to_bytes(size_mb):\n return '0:%s' % (size_mb * 1000 * 1000)",
"def mb_to_bytes(megabytes):\n return megabytes * 1000 * 1000",
"def test_convert_from_bytes(self):\n converted_gigabytes = config_functions.convert_from_bytes(536870912000, 'g')\n self.assertEqual(converted_gigabytes, 500)\n converted_kilobytes = config_functions.convert_from_bytes(536870912000, 'k')\n self.assertEqual(converted_kilobytes, 524288000)\n converted_megabytes = config_functions.convert_from_bytes(536870912000, 'm')\n self.assertEqual(converted_megabytes, 512000)",
"def convert_magnitude(byte_value):\n \n if byte_value < 1024:\n \n # Bytes\n size_as_string = '%dB' % byte_value\n\n elif byte_value < 1048576:\n\n # Kilo.\n size_as_string = '%.2fK' % (1.0 * byte_value / 1024)\n\n elif byte_value < 1073741824:\n\n # Mega\n size_as_string = '%.2fM' % (1.0 * byte_value / 1048576)\n\n else:\n\n # Giga\n size_as_string = '%.2fG' % (1.0 * byte_value / 1073741824)\n \n ######################\n return size_as_string\n ######################",
"def convert_to_bytes(size: str) -> int:\n\tunits = {\n\t\t\"B\": 1,\n\n\t\t\"kB\": 10**3,\n\t\t\"MB\": 10**6,\n\t\t\"GB\": 10**9,\n\t\t\"TB\": 10**12,\n\n\t\t# These are typical shortcuts that users take, we support them as well\n\t\t\"k\": 10**3,\n\t\t\"K\": 10**3,\n\t\t\"M\": 10**6,\n\t\t\"G\": 10**9,\n\t\t\"T\": 10**12,\n\n\t}\n\tsize = size.strip() # remove leading and trailing whitespace\n\n\tif size.isdigit():\n\t\t# size is just a number, so it's already in bytes\n\t\treturn int(size)\n\n\t# size has a unit, find where the number part ends\n\tfor i, char in enumerate(size):\n\t\tif not char.isdigit() and char != '.':\n\t\t\tbreak\n\telse:\n\t\t# no unit found\n\t\traise ValueError(\"Invalid size string: {}\".format(size))\n\n\tnumber = size[:i]\n\tunit = size[i:].strip()\n\n\tif unit not in units:\n\t\traise ValueError(\"Invalid unit: {}\".format(unit))\n\n\treturn int(float(number) * units[unit])",
"def _convert_size(input_size):\n if input_size == 0:\n return '0B'\n\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(input_size, 1024)))\n power = math.pow(1024, i)\n size = round(input_size / power, 2)\n return '%s %s' % (size, size_name[i])",
"def bytes_to_megabytes(num):\r\n var_mb = num / 1024.0 / 1024.0\r\n\r\n return round(var_mb, 1)",
"def to_unit_memory(number):\n kb = 1024\n\n number /= kb\n\n if number < 100:\n return '{} Kb'.format(round(number, 2))\n\n number /= kb\n if number < 300:\n return '{} Mb'.format(round(number, 2))\n\n number /= kb\n\n return '{} Gb'.format(round(number, 2))",
"def test_as_gibibytes(self):\n self.assertEqual(1, FileSize(1024 * 1024 * 1024).as_gibibytes)",
"def parse_bytes(strvalue):\n if not isinstance(strvalue, basestring):\n return strvalue\n\n strvalue = strvalue.replace(\" \", \"\")\n scales = {\n \"KB\": 1024,\n \"MB\": 1024**2,\n \"GB\": 1024**3\n }\n if strvalue[-2:] in scales:\n scale = scales[strvalue[-2:]]\n strvalue = strvalue[:-2]\n else:\n scale = 1\n size = int(strvalue) * scale\n return size",
"def test_as_gigabytes(self):\n self.assertEqual(1, FileSize(1000 * 1000 * 1000).as_gigabytes)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NODE sends a message containing an invalid publickey to OTHER. OTHER should drop it
|
def test_invalid_public_key(self):
node, other = self.create_nodes(2)
other.send_identity(node)
message = node.create_bin_key_text('Should drop')
packet = node.encode_message(message)
# replace the valid public-key with an invalid one
public_key = node.my_member.public_key
self.assertIn(public_key, packet)
invalid_packet = packet.replace(public_key, "I" * len(public_key))
self.assertNotEqual(packet, invalid_packet)
# give invalid message to OTHER
other.give_packet(invalid_packet, node)
self.assertEqual(other.fetch_messages([u"bin-key-text", ]), [])
|
[
"def test_send_find_value_unknown(port, version, public_key, private_key):\n item = {\n 'uuid': str(uuid.uuid4()),\n 'recipient': REMOTE_NODE_PUBLIC_KEY,\n 'sender': public_key,\n 'reply_port': 1908,\n 'version': version,\n 'key': sha512('an un-findable key'.encode('utf-8')).hexdigest(),\n }\n msg = seal_message('findvalue', item, private_key)\n result = send_message(port, msg)\n assert result.status_code == 200\n reply = result.json()\n assert reply['uuid'] == item['uuid']\n assert reply['sender'] == REMOTE_NODE_PUBLIC_KEY\n assert reply['recipient'] == public_key\n assert reply['message'] == 'nodes'\n assert reply['reply_port'] == port\n assert reply['version'] == version\n assert 'nodes' in reply\n assert isinstance(reply['nodes'], list)\n assert len(reply['nodes']) == 1 # the node only knows about us!\n assert 'seal' in reply\n assert check_seal(from_dict(reply))",
"def encrypt(self, public_key, message):",
"def test_m2_bad_key(self):\n key = salt.crypt.get_rsa_pub_key(self.key_path)\n assert key.check_key() == 1",
"def verify(self, public_key, message, signature):",
"def sendPublicKey(g, p, s):\n\tstatus = \"120 PubKey \" + str(computePublicKey(g, p, s))\n\treturn status",
"def test_PluggableTransport_checkArguments_obfs4_missing_publickey(self):\n pt = bridges.PluggableTransport()\n self.assertRaises(\n bridges.MalformedPluggableTransport,\n pt.updateFromStemTransport,\n self.fingerprint, 'obfs4', ('34.230.223.87', 37341, [\n ('iat-mode=1,'\n 'node-id=2a79f14120945873482b7823caabe2fcde848722')]))",
"def send_public_key(self):\n self.send(str(self.PUBLIC_KEY[0]) + \",\" + str(self.PUBLIC_KEY[1]))",
"def clean(self):\n super(SignedSSHKey, self).clean()\n if not self.certificate.startswith('ssh-rsa-cert-v01@openssh.com'):\n raise BadRequestError(\"Certificate is not a valid signed RSA key.\")",
"def msg_key(self, msg):\n if ConnectionType.RELAY_BLOCK in self.CONNECTION_TYPE:\n self.node.block_processing_service.process_block_key(msg, self)\n else:\n self.log_error(log_messages.UNEXPECTED_KEY_MESSAGE, msg)",
"def rsa_encrypt(msg, public_key):\n pass",
"def privateMessage(con, nick, message):",
"def __prepareErrorMSGForSender(self):\n dm = DistributerManager.DistributerManager()\n dkm = DistributerKeyManager.DistributerKeyManager()\n userInfo = 'YOUR MESSAGE FROM ' + self.get_timestamp() + ' COULD NOT SEND TO ' + ', '.join(list(self.get_addr_msg_dict().keys()))\n self.set_user_info(userInfo)\n self.set_dist_keys(None)\n addrFingerprintKeyInf = {}\n try:\n senderFingerprint = dm.getFingerprint(self.get_sender_addr(), self.get_dist_addr())\n senderKey = dkm.getKeyFromUser(senderFingerprint)\n addrFingerprintKeyInf[self.get_sender_addr()] = (senderFingerprint, senderKey)\n self.set_addr_fingerprint_key_inf(addrFingerprintKeyInf)\n self.__prepareSigAndEncMsg()\n except (InvalidDistributerAddressException, NoFingerprintException, DBConnectionException, NoKeyException):\n self.__prepareSigMsg()",
"def __prepareSigMsg(self):\n try: \n userInfoTmp = 'FIRST ERROR: ' + self.get_user_info()\n addressMsgDict = {}\n if self.get_dist_key_idsig() is None:\n (_distKeyIDEnc, distKeyIDSig) = self.gnupg.getKeyIDsFromDist(self.get_dist_addr())\n self.set_dist_key_idsig(distKeyIDSig)\n userInfoTmp = userInfoTmp + '\\nNO WAY TO ENCRYPT THIS MESSAGE' + '\\nMAYBE YOU NEED TO CONTACT THE ADMINISTRATOR'\n msg = _util.generateMIMEMsg('plain', userInfoTmp, None, None, None, None)\n signature = self.gnupg.signMsg(msg, self.get_dist_key_idsig())\n msgSig = _util.generateMIMEMsg('signed', msg, signature, self.get_dist_addr(), self.get_sender_addr(), '')\n addressMsgDict[self.get_sender_addr()] = msgSig\n except (NoDistributerKeyIDsException, SigningException) as e:\n userInfoTmp = userInfoTmp + ' \\nNO WAY TO SIGN AND ENCRYPT THIS MESSAGE: ' + e.__str__() + '\\nPLEASE CONTACT THE ADMINISTRATOR'\n msg = _util.generateMIMEMsg('plain', userInfoTmp, None, self.get_dist_addr(), self.get_sender_addr(), None)\n addressMsgDict[self.get_sender_addr()] = msg\n self.set_addr_msg_dict(addressMsgDict)",
"def publicMessage(con, nick, message):",
"def test_key_upload_with_bad_format_key(): \r\n test_data = good_data.copy()\r\n test_data['gen_upload_choice'] = '2'\r\n \r\n data = 'this is a fake key'\r\n fh = open(os.path.join(os.getcwd(), 'fakekey'), 'w')\r\n fh.write(data)\r\n fh.close()\r\n fh = open('fakekey')\r\n test_data['pubkey'] = fh\r\n \r\n response = c.post('/html/register', test_data, follow=True)\r\n fh.close()\r\n os.remove(os.path.join(os.getcwd(), 'fakekey'))\r\n \r\n assert(response.status_code == 200)\r\n assert(\"p class=\\\"warning\\\"\" in response.content)",
"def mix_client_n_hop(public_keys, address, message):\n G = EcGroup()\n # assert G.check_point(public_key)\n assert isinstance(address, bytes) and len(address) <= 256\n assert isinstance(message, bytes) and len(message) <= 1000\n\n # Encode the address and message\n # use those encoded values as the payload you encrypt!\n address_plaintext = pack(\"!H256s\", len(address), address)\n message_plaintext = pack(\"!H1000s\", len(message), message)\n #print(\"add:\",address)\n #print(\"mes:\",message)\n\n ## Generate a fresh public key\n private_key = G.order().random()\n client_public_key = private_key * G.generator()\n\n ## ADD CODE HERE\n #similar to task 2 but more steps involved and conditional statements\n\n #generate blinded public keys list, first entry is just the first public key\n public_keys_blinded = [public_keys[0]]\n #generate shared key for later use\n shared_element = private_key * public_keys[0]\n key_material = sha512(shared_element.export()).digest()\n\n #if n > 1, hence if there are multiple hops, then:\n if (len(public_keys) > 1):\n #generate and initialise a blinding factor to create unlinkability between public keys\n blinding_factor = Bn.from_binary(key_material[48:])\n #blind the public key\n new_ec_public_key = blinding_factor * public_keys[1]\n #insert the blinded public key to the start of the list, so encryption takes place in a FILO sequence\n public_keys_blinded.insert(0,new_ec_public_key)\n\n #repeat the steps above for any remaining unblinded public keys:\n if (len(public_keys) > 2):\n for i,publk in enumerate(public_keys[2:]):\n #print(\"public_keys_blinded:\", public_keys_blinded)\n #print(\"i\", i)\n #print(\"pub:\",len(public_keys))\n #generate shared key\n shared_element = private_key * public_keys_blinded[0]\n key_material = sha512(shared_element.export()).digest()\n #update blinding factor for the next key\n blinding_factor = Bn.from_binary(key_material[48:]) * blinding_factor\n #blind the key\n new_ec_public_key = blinding_factor * publk\n #store the blinded public key into the list, at the start of list\n public_keys_blinded.insert(0,new_ec_public_key)\n\n\n #when encrypting, the blinded public keys should be processed in an inverse order to the normal public keys,\n #from the the last entry, working to the first entry to ensure correct order for decryption\n #don't need to reverse the order of the blinded public keys anymore, since they were inserted appropriately\n\n #reversed_public_keys_blinded = (reversed(public_keys_blinded))\n #print(\"reversed:\", reversed_public_keys_blinded)\n\n #initialise final hmacs list to be returned\n hmacs = []\n #initialise the address and message ciphertext variables for the interations later\n address_cipher = 0\n message_cipher = 0\n\n # using each element in the list, i.e. each key:\n for i, public_key_rb in enumerate(public_keys_blinded):\n #generate shared key\n shared_element = private_key * public_key_rb\n key_material = sha512(shared_element.export()).digest()\n #initisalise key variables\n hmac_key = key_material[:16]\n address_key = key_material[16:32]\n message_key = key_material[32:48]\n #set IV to benotep 16 zeros\n iv = b\"\\x00\"*16\n #if not first round then keep iterating over the ciphertexts, else use plaintext to create the ciphertexts\n if (address_cipher != 0 and message_cipher != 0):\n #encrypting the address and message using the previous ciphertext\n address_cipher = aes_ctr_enc_dec(address_key, iv, address_cipher)\n message_cipher = aes_ctr_enc_dec(message_key, iv, message_cipher)\n else:\n #encrypting the address and message using the plaintext\n address_cipher = aes_ctr_enc_dec(address_key, iv, address_plaintext)\n message_cipher = aes_ctr_enc_dec(message_key, iv, message_plaintext)\n\n aes = Cipher(\"AES-128-CTR\")\n #create the HMAC\n h = Hmac(b\"sha512\", hmac_key)\n #encrypting HMACS:\n new_hmacs = []\n #for each HMAC in the final list:\n for j, other_mac in enumerate(hmacs):\n # Ensure the IV is different for each HMAC\n iv = pack(\"H14s\", j, b\"\\x00\"*14)\n #encrypt the HMAC, taken directly from decrypting function above\n hmac_plaintext = aes_ctr_enc_dec(hmac_key, iv, other_mac)\n hmac_plaintext = hmac_plaintext[:20]\n #store the encrypted hmac into the list used to return all hmacs\n new_hmacs += [hmac_plaintext]\n #append the encrypted HMAC to the HMAC created above\n h.update(hmac_plaintext)\n #append the address and message ciphertexts to the HMAC\n h.update(address_cipher)\n h.update(message_cipher)\n #format the HMAC to make sure it can be processed by the decoding function properly\n expected_mac = h.digest()\n expected_mac = expected_mac[:20]\n #insert the formatted HMAC covering the ciphertexts into the final list of HMACS,\n #at the start of the list because this gets checked first in the decryption function\n new_hmacs.insert(0, expected_mac)\n #set the return variable of all HMACS to the complete list HMACS \n hmacs = new_hmacs\n\n #print(\"hmacs:\", hmacs)\n\n return NHopMixMessage(client_public_key, hmacs, address_cipher, message_cipher)",
"def testConnectWithoutKeySharingFails(looper, txnPoolNodeSetNotStarted):\n with pytest.raises(RemoteNotFound):\n sendMessageToAll(txnPoolNodeSetNotStarted, txnPoolNodeSetNotStarted[0])",
"def on_invalid_msg(self, channel: Channel, session: Session, exc: InvalidMessageError) -> None:",
"def test_bad_key(self):\n locker = Locker.create(self.tempdir, self.content_path(), b'01234567')\n\n success = locker.unpack(self.work_path('unpacked', create=True), b'bogus', locker.mac)\n\n self.assertFalse(success)",
"def api_checkkey():\n config = GitReceiveConfig.load(g.cintf.db)\n key = parse_public_key(request.args['key'])\n if not key[:2] in [k[:2] for k in config.auth_keys]:\n return 'unauthorized'\n return 'ok'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NODE sends a message containing an invalid signature to OTHER. OTHER should drop it
|
def test_invalid_signature(self):
node, other = self.create_nodes(2)
other.send_identity(node)
message = node.create_full_sync_text('Should drop')
packet = node.encode_message(message)
# replace the valid signature with an invalid one
invalid_packet = packet[:-node.my_member.signature_length] + 'I' * node.my_member.signature_length
self.assertNotEqual(packet, invalid_packet)
# give invalid message to OTHER
other.give_packet(invalid_packet, node)
self.assertEqual(other.fetch_messages([u"full-sync-text", ]), [])
|
[
"def test_submit_invalid_signed_message(self):\n r = self._submit_message('Not a PGP-signed message.')\n self.assertIn(err_messages['not_signed'], r.data)\n\n # Submit a signed message that's been modified.\n f = open(os.path.join(self.files, 'invalid.sig'))\n invalid_msg = f.read()\n f.close()\n r = self._submit_message(invalid_msg)\n self.assertIn(err_messages['invalid_sig'], r.data)",
"def test_valid_fees_invalid_payload_sig(\n helpers,\n fees_set,\n address_main,\n mint_tokens\n):\n request = helpers.request.nym()\n request = add_fees_request_with_address(\n helpers,\n fees_set,\n request,\n address_main\n )\n sig = getattr(request, f.SIG.nm)\n # Reverse the signature of NYM txn sender, making it invalid\n sig = sig[::-1]\n setattr(request, f.SIG.nm, sig)\n with pytest.raises(RequestNackedException):\n helpers.sdk.send_and_check_request_objects([request])",
"def __prepareSigMsg(self):\n try: \n userInfoTmp = 'FIRST ERROR: ' + self.get_user_info()\n addressMsgDict = {}\n if self.get_dist_key_idsig() is None:\n (_distKeyIDEnc, distKeyIDSig) = self.gnupg.getKeyIDsFromDist(self.get_dist_addr())\n self.set_dist_key_idsig(distKeyIDSig)\n userInfoTmp = userInfoTmp + '\\nNO WAY TO ENCRYPT THIS MESSAGE' + '\\nMAYBE YOU NEED TO CONTACT THE ADMINISTRATOR'\n msg = _util.generateMIMEMsg('plain', userInfoTmp, None, None, None, None)\n signature = self.gnupg.signMsg(msg, self.get_dist_key_idsig())\n msgSig = _util.generateMIMEMsg('signed', msg, signature, self.get_dist_addr(), self.get_sender_addr(), '')\n addressMsgDict[self.get_sender_addr()] = msgSig\n except (NoDistributerKeyIDsException, SigningException) as e:\n userInfoTmp = userInfoTmp + ' \\nNO WAY TO SIGN AND ENCRYPT THIS MESSAGE: ' + e.__str__() + '\\nPLEASE CONTACT THE ADMINISTRATOR'\n msg = _util.generateMIMEMsg('plain', userInfoTmp, None, self.get_dist_addr(), self.get_sender_addr(), None)\n addressMsgDict[self.get_sender_addr()] = msg\n self.set_addr_msg_dict(addressMsgDict)",
"def test_verify_bad_signature(self):\n rsa = RSA.load_key(self.privkey)\n message = \"This is the message string\"\n digest = sha.sha(message).digest() \n\n otherMessage = \"Abracadabra\"\n otherDigest = sha.sha(otherMessage).digest() \n otherSignature = rsa.sign(otherDigest)\n\n self.assertRaises(RSA.RSAError, rsa.verify, \n digest, otherSignature)",
"def send(self, recipient, message):\n\t\tpass",
"def test_verify_invalid_signature(self):\n f = open(os.path.join(self.files, 'invalid.sig'))\n invalid = f.read()\n f.close()\n\n verified, err_msg = gpg.verify('notasignedmessage')\n self.assertFalse(verified)\n self.assertEqual(err_msg, err_messages['not_signed'])\n\n verified, err_msg = gpg.verify(invalid)\n self.assertFalse(verified)\n self.assertEqual(err_msg, err_messages['invalid_sig'])",
"def __prepareErrorMSGForSender(self):\n dm = DistributerManager.DistributerManager()\n dkm = DistributerKeyManager.DistributerKeyManager()\n userInfo = 'YOUR MESSAGE FROM ' + self.get_timestamp() + ' COULD NOT SEND TO ' + ', '.join(list(self.get_addr_msg_dict().keys()))\n self.set_user_info(userInfo)\n self.set_dist_keys(None)\n addrFingerprintKeyInf = {}\n try:\n senderFingerprint = dm.getFingerprint(self.get_sender_addr(), self.get_dist_addr())\n senderKey = dkm.getKeyFromUser(senderFingerprint)\n addrFingerprintKeyInf[self.get_sender_addr()] = (senderFingerprint, senderKey)\n self.set_addr_fingerprint_key_inf(addrFingerprintKeyInf)\n self.__prepareSigAndEncMsg()\n except (InvalidDistributerAddressException, NoFingerprintException, DBConnectionException, NoKeyException):\n self.__prepareSigMsg()",
"def on_invalid_msg(self, channel: Channel, session: Session, exc: InvalidMessageError) -> None:",
"def test_malformed_rpc(node_factory):\n l1 = node_factory.get_node()\n\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(l1.rpc.socket_path)\n\n # No ID\n sock.sendall(b'{\"jsonrpc\":\"2.0\",\"method\":\"getinfo\",\"params\":[]}')\n obj, _ = l1.rpc._readobj(sock, b'')\n assert obj['error']['code'] == -32600\n\n # No method\n sock.sendall(b'{\"id\":1, \"jsonrpc\":\"2.0\",\"params\":[]}')\n obj, _ = l1.rpc._readobj(sock, b'')\n assert obj['error']['code'] == -32600\n\n # Complete crap\n sock.sendall(b'[]')\n obj, _ = l1.rpc._readobj(sock, b'')\n assert obj['error']['code'] == -32600\n\n # Bad ID\n sock.sendall(b'{\"id\":{}, \"jsonrpc\":\"2.0\",\"method\":\"getinfo\",\"params\":[]}')\n obj, _ = l1.rpc._readobj(sock, b'')\n assert obj['error']['code'] == -32600\n\n # Bad method\n sock.sendall(b'{\"id\":1, \"method\": 12, \"jsonrpc\":\"2.0\",\"params\":[]}')\n obj, _ = l1.rpc._readobj(sock, b'')\n assert obj['error']['code'] == -32600\n\n # Unknown method\n sock.sendall(b'{\"id\":1, \"method\": \"unknown\", \"jsonrpc\":\"2.0\",\"params\":[]}')\n obj, _ = l1.rpc._readobj(sock, b'')\n assert obj['error']['code'] == -32601\n\n sock.close()",
"def verify(self, public_key, message, signature):",
"def test_handle_invalid(self):\n # setup\n invalid_performative = SigningMessage.Performative.SIGN_TRANSACTION\n incoming_message = self.build_incoming_message(\n message_type=SigningMessage,\n dialogue_reference=(\"1\", \"\"),\n performative=invalid_performative,\n terms=self.terms,\n raw_transaction=SigningMessage.RawTransaction(\n \"some_ledger_id\", {\"some_key\": \"some_value\"}\n ),\n to=str(self.skill.skill_context.skill_id),\n )\n\n # operation\n with patch.object(self.signing_handler.context.logger, \"log\") as mock_logger:\n self.signing_handler.handle(incoming_message)\n\n # after\n mock_logger.assert_any_call(\n logging.WARNING,\n f\"cannot handle signing message of performative={invalid_performative} in dialogue={self.signing_dialogues.get_dialogue(incoming_message)}.\",\n )",
"def verify(self, key, msg, sig): # pragma: no cover\n raise NotImplementedError()",
"def test_Bridge_verifyExtraInfoSignature_good_signature(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.assertIsNone(self.bridge._verifyExtraInfoSignature(self.extrainfo))",
"def test_handle_invalid(self):\n # setup\n invalid_performative = SigningMessage.Performative.SIGN_TRANSACTION\n incoming_message = self.build_incoming_message(\n message_type=SigningMessage,\n dialogue_reference=(\"1\", \"\"),\n performative=invalid_performative,\n terms=self.terms,\n raw_transaction=SigningMessage.RawTransaction(\n \"some_ledger_id\", {\"some_key\": \"some_value\"}\n ),\n to=str(self.skill.skill_context.skill_id),\n )\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.signing_handler.handle(incoming_message)\n\n # after\n mock_logger.assert_any_call(\n logging.INFO,\n f\"received {incoming_message.performative} from decision_maker, message={incoming_message}\",\n )\n\n mock_logger.assert_any_call(\n logging.WARNING,\n f\"cannot handle signing message of performative={invalid_performative} in dialogue={self.signing_dialogues.get_dialogue(incoming_message)}.\",\n )",
"def testObsoleteSignatureVerification(self):\n gnupg = self._get_gnupg()\n self.assertFalse(gnupg.is_unusable)\n signed_file = self._get_data_file('signed_by_355304E4.gpg')\n\n verif = gnupg.verify_file(path=signed_file)\n self.assertFalse(verif.is_valid)",
"def testInvalidSignature(self):\n gnupg = self._get_gnupg()\n self.assertFalse(gnupg.is_unusable)\n verif = gnupg.verify_file(path='/etc/passwd')\n self.assertFalse(verif.is_valid)",
"def test_handle_message_wrong_target(self):\n\n msg = Message(name='start', target='fake-id_10', origin='fake-id')\n self.root.state = 'active'\n self.foreach.state = 'active'\n result = self.root.handle_message(self.ch, msg)\n self.assertEqual(result, 'ignored')",
"def unsigned_token_handler(error_message):\n return jsonify({\n 'status': 400,\n 'message': 'Cannot verify the provided token'\n }), 400",
"def test_send_find_value_unknown(port, version, public_key, private_key):\n item = {\n 'uuid': str(uuid.uuid4()),\n 'recipient': REMOTE_NODE_PUBLIC_KEY,\n 'sender': public_key,\n 'reply_port': 1908,\n 'version': version,\n 'key': sha512('an un-findable key'.encode('utf-8')).hexdigest(),\n }\n msg = seal_message('findvalue', item, private_key)\n result = send_message(port, msg)\n assert result.status_code == 200\n reply = result.json()\n assert reply['uuid'] == item['uuid']\n assert reply['sender'] == REMOTE_NODE_PUBLIC_KEY\n assert reply['recipient'] == public_key\n assert reply['message'] == 'nodes'\n assert reply['reply_port'] == port\n assert reply['version'] == version\n assert 'nodes' in reply\n assert isinstance(reply['nodes'], list)\n assert len(reply['nodes']) == 1 # the node only knows about us!\n assert 'seal' in reply\n assert check_seal(from_dict(reply))",
"def test_verify_bad_method(self):\n rsa = RSA.load_key(self.privkey)\n message = \"This is the message string\"\n digest = md5.md5(message).digest() \n signature = rsa.sign(digest, 'sha1')\n self.assertRaises(ValueError, rsa.verify,\n digest, signature, 'bad_digest_method')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the suffixes that match the given principal
|
def _get_suffixes_for_principal(self, config, value, principal):
suffixes_principals = [(suffix, self._format_principal(value, suffix))
for suffix in config.keys()]
return [s for s, p in suffixes_principals if p == principal]
|
[
"def suffixes(self) -> Dict[str, Union[Tuple[str, ...], Dict[str, Tuple[str, ...]]]]:\n return self._normalize(\"suffixes\")",
"def suffixes(self) -> List[str]:\n\t\treturn self.path.suffixes",
"def suffixes (self, suffix = ''):\n results = []\n\n if self.is_word and suffix != \"\":\n results.append(suffix)\n\n if len(self.children) > 0:\n for char in self.children:\n results += self.children[char].suffixes(suffix+char)\n\n return results",
"def get_suffixes(ims: OrderedDict) -> Tuple[str, str]:\n if ims[\"package\"] == \"tlul_pkg\" and ims[\"struct\"] == \"tl\":\n return (\"_h2d\", \"_d2h\")\n\n return (\"_req\", \"_rsp\")",
"def stringSuffixes(s):\n suffixes = list() \n \n for i in range(len(s)):\n suffixes.append(s[i:])\n \n return suffixes",
"def ListSuffixes(directories):\n\tcommon = CommonPrefix(directories)\n\tsuffixes = [re.sub('^'+common,'',directory) for directory in directories]\n\treturn suffixes",
"def match(s, rule, rule_dict):\n\n # check base cases \n if not s:\n return [] # string already consumed\n\n if rule == [['a']]:\n return [ s[1:] ] if s[0] == 'a' else []\n \n if rule == [['b']]: \n return [ s[1:] ] if s[0] == 'b' else []\n\n all_suffixes = []\n\n for subrules in rule: # rule is list of options\n prefixes = [s]\n\n for subrule in subrules:\n prefixes = advance(prefixes, subrule, rule_dict)\n\n for p in prefixes:\n all_suffixes.append(p) \n\n return all_suffixes",
"def prefixes_and_suffixes(P):\n n = len(P)\n prefixes = set()\n suffixes = set()\n for idx in range(n+1):\n prefixes.add(P[0:idx])\n suffixes.add(P[idx:n+1])\n return prefixes & suffixes # Return union of prefixes and suffixes",
"def is_suffix(self, current_suffix, original, debug=DEBUG): #current_suffix is more like current_prefix\n if (current_suffix == \"\"): #exit conditions\n return \"*\";\n else:\n # 3 conditions for possible suffix\n split = (len(original)-len(current_suffix)) #the position at which the word is split 12 - 11 = 11 or -1\n first_part_uncut = original[0:split+1]\n first_part = original[0:split]\n first_part_cut = first_part[0:-1]\n second_part = original[split:];\n if ((len(first_part) != 0) and (first_part in self.words_check)): #find_prefix(forward_trie, first_part)[0] \n second_condition = self.forward_trie.probability(first_part_cut, first_part, DEBUG)\n if ((second_condition > 1 - threshold) and (second_condition < 1 + threshold)): #close to 1 (#TODO: Test for closer values)\n #third condition\n third_condition = self.forward_trie.probability(first_part, first_part_uncut, DEBUG)\n if (third_condition < 1):\n self.word_score_suffix[second_part] = self.word_score_suffix.get(second_part, 0) + (reward) + 1; #20 instead of 19 because they'll be -1'd anyway. It avoids a few elses #morphemes might not in the original wordlist \n self.word_score_suffix[second_part] = self.word_score_suffix.get(second_part, 0) + punish;\n self.is_suffix(current_suffix[0:-1], original, DEBUG) #recursively cut off the last letter",
"def get_config_suffixes(ibs, configid_list):\n cfgsuffix_list = ibs.db.get(CONFIG_TABLE, ('config_suffix',), configid_list)\n return cfgsuffix_list",
"def bad_suffix_heuristic(pattern):\n bad_suffixes = {}\n for i in range(0, len(pattern)):\n bad_suffixes[pattern[i]] = i\n return bad_suffixes",
"def __ensure_suffix_stem(t, suffix):\n tpath = str(t)\n if not tpath.endswith(suffix):\n stem = tpath\n tpath += suffix\n\n return tpath, stem\n else:\n stem, ext = os.path.splitext(tpath)\n\n return t, stem",
"def suffix(pattern):\r\n return pattern[1:len(pattern)]",
"def _suffixes_from_device(devcls):\n for cpt, attr in devcls._sig_attrs.items():\n if hasattr(cpt, \"defn\"):\n items = [(cls, suffix) for cls, suffix, kwargs in cpt.defn.values()]\n elif hasattr(cpt, \"suffix\"):\n items = [(cpt.cls, cpt.suffix)]\n else:\n items = []\n\n for cls, suffix in items:\n yield suffix\n if issubclass(cls, EpicsSignalWithRBV):\n yield \"{}_RBV\".format(suffix)",
"def glob_suffixes(root_path: Path, suffixes: Union[List[str], str]) -> List[Path]:\n if isinstance(suffixes, str):\n suffixes = [suffixes]\n return sorted([f for f in root_path.glob(\"**/*\") if file_suffix_in(f, suffixes)])",
"def getFilesBySuffix(self, suffix=None):\n if not suffix:\n return self.getPrimaryFiles()\n result = []\n for f in self.Files:\n if f.cdbf_name.lower().endswith(suffix.lower()):\n result.append(f)\n return result",
"def findParentSuffix(self, suffix):\n rdns = ldap.explode_dn(suffix)\n del rdns[0]\n\n while len(rdns) > 0:\n suffix = ','.join(rdns)\n try:\n mapent = self.getMTEntry(suffix)\n return suffix\n except NoSuchEntryError:\n del rdns[0]\n\n return \"\"",
"def presuffix_heuristic(pattern):\n presuffixes = [None] * (len(pattern) + 1)\n presuffixes[len(pattern) - 1] = 0\n presuffixes[len(pattern)] = 0\n\n for i in range(len(pattern) - 2, -1, -1):\n j = len(pattern) - presuffixes[i + 1]\n while pattern[i] != pattern[j-1] and presuffixes[j] > 0:\n j = len(pattern) - presuffixes[j]\n if pattern[i] == pattern[j-1]:\n presuffixes[i] = len(pattern) - j + 1\n else:\n presuffixes[i] = 0\n\n return presuffixes",
"def getNameAndSuffix(name):\n\n\tx = name.split(\".\")\n\tif len(x) == 1: # if there is not any suffix add empty record\n\t\tx.append(\"\")\n\t\n\tsuffixes = \".\".join(x[1:])\t# there may be more periods inthe name\n\treturn (x[0], suffixes) # convert list to tuple"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the id and the values of the LocalRolesField objects on the current context
|
def field_and_values_list(self):
fields = get_localrole_fields(self.fti)
field_and_values = []
for fieldname, _field in fields:
try:
if not base_hasattr(self.context, fieldname):
continue
except RequiredMissing:
continue
values = getattr(self.context, fieldname) or []
if not isinstance(values, list):
values = [values]
for value in values:
field_and_values.append((fieldname, value))
return field_and_values
|
[
"def get_role_id(self):\n lis1 = []\n for roleids in self.mysession.query(Role.roleID.label('roleID')).all():\n lis1.append(roleids.roleID)\n return lis1",
"def get_local_roles(obj, principal):\n ctype = ContentType.objects.get_for_model(obj)\n\n if isinstance(principal, User):\n return [prr.role for prr in PrincipalRoleRelation.objects.filter(\n user=principal, content_id=obj.id, content_type=ctype)]\n else:\n return [prr.role for prr in PrincipalRoleRelation.objects.filter(\n group=principal, content_id=obj.id, content_type=ctype)]",
"def get_role_id_user(self):\n lis2 = []\n for roleids in self.mysession.query(User.roleID.label('roleID')).all():\n lis2.append(roleids.roleID)\n return lis2",
"def get_staff_roles(request: Request):\n return staff_roles",
"def get_roles(self) -> List[str]:\n pass",
"def get_roles(self, model_id):\n doc = self.__get_model(model_id)\n return doc['roles']",
"def test_local_roles():\n vocab = roles.LocalRolesChoices\n\n assert len(vocab) == 9\n assert vocab['system'].value == 'system'\n assert vocab['system'].name == 'system'\n assert vocab['system'].label == 'r:system'",
"def getRoles(self):\n rolesList = []\n roles = self.userTree.find('roles')\n for role in roles.findall('role'):\n rolesList.append(role.text)\n return(rolesList)",
"def get_roles(self) -> RoleList:\n\n # only 1 role for map (eg ARAM)\n if len(self.current_queue.roles) == 1:\n return self.current_queue.roles\n\n if self.assigned_role is not None:\n roles = self.data.primary_roles_data()\n roles.move_to_front(self.assigned_role)\n return roles\n else:\n return self.data.primary_roles_data()",
"def user_roles(request):\n return {\n 'ROLE_PATIENT': UserProfile.ROLE_PATIENT,\n 'ROLE_THERAPIST': UserProfile.ROLE_THERAPIST,\n 'ROLE_RESEARCHER': UserProfile.ROLE_RESEARCHER\n }",
"def key_roles_for_entity(context):\n request = context['request']\n entity = entity_for_page(request.current_page)\n memberships = Membership.objects.filter(entity = entity).exclude(role=\"\").order_by('membership_order',) \n duplicates = set()\n membership_list = []\n for membership in memberships:\n if membership.role_plural not in duplicates:\n duplicates.add(membership.role_plural)\n membership_list.extend(memberships.filter(role_plural = membership.role_plural))\n # returns a list of memberships, in the right order - we use a regroup tag to group them by person in the template \n return {'membership_list': membership_list}",
"def get_custom_roles_for(object_type):\n if getattr(flask.g, \"global_role_names\", None) is None:\n flask.g.global_role_names = collections.defaultdict(dict)\n query = db.session.query(\n AccessControlRole.object_type,\n AccessControlRole.id,\n AccessControlRole.name,\n ).filter(\n AccessControlRole.object_type.isnot(None), # noqa\n )\n for type_, id_, name_ in query:\n flask.g.global_role_names[type_][id_] = name_\n return flask.g.global_role_names[object_type]",
"def __repr__(self):\n return f\"{self.role}: {self.id}\"",
"async def roles(self, ctx):\n pass",
"def get_user_roles(screen_def):\n \n selected_users = get_selections(screen_def,'username')\n if debug:\n logger.debug('selected_users = ')\n logger.debug((selected_users))\n # If there are selections, concatenate them into a comma\n # separated list \n if selected_users != None: \n user_names = \"('\" + \"','\".join(selected_users) + \"')\" \n if debug:\n logger.debug((\"user_names =\" + user_names))\n \n # Build a query for all the selected users\n # that will be displayed in a user roles screen\n screen_def = screens.get_screen_def('UserRoles')\n if debug: \n logger.debug(\"screen = {}\".format(screen_def['name']))\n screen_def['query'] = \\\n \"\"\"SELECT grantee,\n granted_role,\n admin_option,\n default_role\n FROM dba_role_privs\n WHERE grantee IN \"\"\" + user_names + \"\"\" \n ORDER BY var_order_by\n \"\"\"\n \n if debug:\n logger.debug(\"query = {}\".format(screen_def['query']))\n \n # Set the title of the user roles screen and display the results of the \n # query\n screen_def['title'] = 'Roles granted to selected users'\n display_window(screen_def)",
"def __revealRoles(self):\n log(\"MState __revealRoles\",4)\n r = \"GG, here were the roles:\"\n\n savedRolesSortedKeys = sorted(self.savedRoles, key=(lambda x: ALL_ROLES.index(self.savedRoles[x])))\n\t\t\n for player_id in savedRolesSortedKeys:\n role = self.savedRoles[player_id]\n r += \"\\n\" + self.mainComm.getName(player_id) + \": \" + role\n return r",
"def prepare(_next, self):\n _next(self)\n if not self.roles_support:\n return\n if not has_permission('manage', self.model.parent, self.request):\n # XXX: yafowil selection display renderer\n return\n value = []\n if self.action_resource == 'edit':\n value = self.model.model.roles\n roles_widget = factory(\n 'field:label:select',\n name='principal_roles',\n value=value,\n props={\n 'label': 'Roles',\n 'multivalued': True,\n 'vocabulary': self.roles_vocab,\n 'format': 'single',\n 'listing_tag': 'ul',\n 'listing_label_position': 'after',\n },\n )\n save_widget = self.form['save']\n self.form.insertbefore(roles_widget, save_widget)",
"def get_player_roles(self) -> List[str]:\n pass",
"def get_config(self, fieldname):\n if not base_hasattr(self.fti, 'localroles'):\n return {}\n return self.fti.localroles.get(fieldname, {})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the config from FTI for a given fieldname
|
def get_config(self, fieldname):
if not base_hasattr(self.fti, 'localroles'):
return {}
return self.fti.localroles.get(fieldname, {})
|
[
"def get_config_fields(self):\n raise NotImplementedError",
"def _get_field_by_name(model, field):\n field_dict = {x.name: x for x in model._meta.get_fields()} # noqa\n return field_dict[field]",
"def get_value(self, config_field):\n raise NotImplementedError",
"def read_attr(self, fieldname):\n return self._read_dict(fieldname)",
"def fieldtype(self):\n return self.__class__.__name__",
"def get_fabric(self, field, field_name=..., fake=...):\r\n ...",
"def get_type_of(self, field_type):\n for entry in self.fields.all():\n if entry.field.field_type.lower() == field_type:\n return entry.value\n return ''",
"def generate_field(name, data):\n assert 'type' in data\n field = TYPES_TO_FIELDS.get(data['type'], Unknown)()\n return field",
"def _meta(self, field, **kwargs):\n try:\n return self.meta[field][0]\n except (KeyError, IndexError):\n if 'default' in kwargs:\n return kwargs['default']\n else:\n raise KeyError('Required metadata not found: %s' % field)",
"def Get(self, f = None):\n return self.__fields[f]",
"def get_custom_fields(self, env, customfield={}):\r\n if not customfield: # return full list\r\n return TicketSystem(env.compmgr).get_custom_fields()\r\n else: # only return specific item with cfname\r\n all = TicketSystem(env.compmgr).get_custom_fields()\r\n for item in all:\r\n if item['name'] == customfield['name']:\r\n return item\r\n return None # item not found\r",
"def get_default(field):\r\n ...",
"def get_field_data(self, field):\n return self._get_field_type_data(field)[1]",
"def get_config_form():\n raise NotImplementedError",
"def field(self):\n\n _field = self.model._meta.fields.get(self.field_name, None)\n\n if isinstance(self._accessor, six.text_type):\n spec = self._accessor\n if spec[0] == ':':\n key_paths = spec[1:].split('.')\n # can be used to access nested JSONField\n for p in key_paths:\n try:\n p = int(p)\n except ValueError:\n pass\n _field = _field[p]\n elif callable(self._accessor):\n _field = self._accessor(_field)\n\n ctx = self.model._meta.database.get_sql_context()\n if self.field_type:\n _field = _field.cast(self.field_type().ddl_datatype(ctx).sql)\n\n return _field",
"def get(self, field, default=None):\n return self.fields.get(field, default)",
"def get_field(self, name):\n return self._fields[name]",
"def _get_field_name(cls, rule_content):\n return rule_content.get(cls.fieldname, None)",
"def get_field(model, name):\n return model._meta.get_field(name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all property templates in this dataset.
|
def property_templates(self) -> PropertyTemplateCollection:
return PropertyTemplateCollection(self.project_id, self.uid, self.session)
|
[
"def file_properties_templates_list_for_team(self):\n arg = None\n r = self.request(\n file_properties.templates_list_for_team,\n 'file_properties',\n arg,\n None,\n )\n return r",
"def _get_instance_templates(self):\r\n return [(instance.name, instance.t)\r\n for instance in self.get_instances()]",
"def templates(self):\n if self._templates is None:\n templates = {}\n dom = self._get_xml(self.TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n name = e.getAttribute('name')\n if name in templates:\n raise ServiceError(\n \"Two templates with same name: \" + name)\n else:\n templates[name] = e.toxml()\n self._templates = templates\n return self._templates",
"def all_templates(self):\n if self._all_templates is None:\n all_templates = {}\n dom = self._get_xml(self.ALL_TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n user = e.getAttribute('userName')\n name = e.getAttribute('name')\n if user in all_templates:\n templates = all_templates[user]\n templates[name] = e.toxml()\n else:\n templates = {}\n templates[name] = e.toxml()\n all_templates[user] = templates\n self._all_templates = all_templates\n return self._all_templates",
"def properties():\n properties = PropertyProfile.query.all()\n return render_template('properties.html',properties=properties)",
"def _all_templates(self):\n for startmodel in self._all_starting_models():\n for template in startmodel.templates:\n yield template",
"def get_registered_properties():\n return _metaschema_properties",
"def get_policy_templates(self, **kwargs):\n baseURL = self.baseURL + \"policy-templates/list\"\n return self._make_request(\"get\",baseURL)",
"def _resources(self):\r\n return self._resources_mapper()",
"def nested_resources(self):\n return self.items()",
"def iter_templates(self):\n for page in self.iter_templates_pages():\n results = page.json()['results']\n for item in results:\n yield item",
"def get_resources(self):\n\t\treturn self.model.all()",
"def PROPERTIES(self):\n return \"properties\"",
"def getProperties(self):\n return self.properties",
"def templates(self) -> Optional[Sequence['outputs.UpstreamTemplateResponse']]:\n return pulumi.get(self, \"templates\")",
"def components(self) -> List:\n\n return self._template",
"def get_templates(self):\n index_templates = {}\n for path in glob.iglob(self.data_path + '/template/*.json'):\n logger.debug('Reading index template setup from {}'.format(path))\n index_template = None\n with open(path) as f:\n index_template = json.load(f)\n template_name = index_template['name']\n setup_body = index_template['body']\n index_templates[template_name] = setup_body\n return index_templates",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def get_properties(self):\n return self._properties",
"def test_api_v3_entity_templates_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all condition templates in this dataset.
|
def condition_templates(self) -> ConditionTemplateCollection:
return ConditionTemplateCollection(self.project_id, self.uid, self.session)
|
[
"def list_question_templates(self):\n return self.query(\"\"\"{\n allQuestionTemplates {\n edges {\n node {\n id\n scId\n questionType\n text\n expectationType\n answerTemplate\n answerValidation\n storyTemplate\n compatibleSpecTypes\n }\n }\n }\n }\"\"\")",
"def all_templates(self):\n if self._all_templates is None:\n all_templates = {}\n dom = self._get_xml(self.ALL_TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n user = e.getAttribute('userName')\n name = e.getAttribute('name')\n if user in all_templates:\n templates = all_templates[user]\n templates[name] = e.toxml()\n else:\n templates = {}\n templates[name] = e.toxml()\n all_templates[user] = templates\n self._all_templates = all_templates\n return self._all_templates",
"def templates(self):\n if self._templates is None:\n templates = {}\n dom = self._get_xml(self.TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n name = e.getAttribute('name')\n if name in templates:\n raise ServiceError(\n \"Two templates with same name: \" + name)\n else:\n templates[name] = e.toxml()\n self._templates = templates\n return self._templates",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def templates_query(self):\n if self.tempchanged:\n try:\n fname = self.templatedf.loc[(self.templatedf.teff == self.teff.value) &\n (self.templatedf.logg == self.grav.value) &\n (self.templatedf.met == self.met.value)].iloc[0].name\n fname = self.templatedir + fname\n kwargs = self.kwargs\n kwargs['wavearr'] = self.spec.spectral_axis.value\n temp_spec = self.cutspec(freader(fname, **self.kwargs))\n except (IndexError, FileNotFoundError, OSError):\n self.gottemplate = False\n return\n self.templatefname = fname\n self.temp_spec = temp_spec\n self.gottemplate = True\n return",
"def get_all_templates(obj):\n category = obj.category if isinstance(obj, Event) else obj\n return set(DesignerTemplate.find_all(DesignerTemplate.category_id.in_(categ['id'] for categ in category.chain)))",
"def BIF_templates(self):\n network_template = Template(\"network $name {\\n}\\n\")\n # property tag may or may not be present in model,and since no of properties\n # can be more than one, will replace them according to format otherwise null\n variable_template = Template(\n \"\"\"variable $name {\n type discrete [ $no_of_states ] { $states };\n$properties}\\n\"\"\"\n )\n property_template = Template(\" property $prop ;\\n\")\n # $variable_ here is name of variable, used underscore for clarity\n probability_template = Template(\n \"\"\"probability ( $variable_$separator_$parents ) {\n table $values ;\n}\\n\"\"\"\n )\n\n conditional_probability_template_total = Template(\n \"\"\"probability ( $variable_$separator_$parents ) {\n$values\n}\\n\"\"\"\n )\n\n conditional_probability_template = Template(\"\"\" ( $state ) $values;\\n\"\"\")\n\n return (\n network_template,\n variable_template,\n property_template,\n probability_template,\n conditional_probability_template_total,\n conditional_probability_template,\n )",
"def templates():\n return [\n Template(\"dummy\", [\n Decompressor,\n DummyService,\n ])\n ]",
"def get_template_filters(self):\n models = Pool._pool[self.database_name]['model']\n filters = []\n\n for model_name, model in models.iteritems():\n for f_name, f in inspect.getmembers(\n model, predicate=inspect.ismethod):\n\n if hasattr(f, '_template_filter'):\n filter = getattr(Pool().get(model_name), f_name)\n filters.append((filter.func_name, filter))\n\n return filters",
"def get_resources(self):\n\t\treturn self.model.all()",
"def write_initial_conditions(self):\n\t lines = []\n\t # fro each initial conditions defined in json\n\t for initial_cond in self.ic:\n\t\t # find the type wanted\n\t\t ICType = initial_cond['ICType']\n\t\t # and now depending on the type we define different ics\n\t\t if ICType == 'Sinusoidal':\n\t \t\t# find the functor \n\t \t\twith open(self.templatePath + '/templates_IC/functor_IC_sinusoidal.py','r') as functor_file:\n\t \t\t\tfor line in functor_file:\n\t \t\t\t\tif 'token_pulse_width' in line:\n\t \t\t\t\t\tlines.append(self.tab+'pulse_width = '+str(initial_cond['PulseWidth'])+'\\n')\n\t \t\t\t\telif 'token_amplitude' in line:\n\t \t\t\t\t\tlines.append(self.tab+'A = '+str(initial_cond['Amplitude'])+'\\n')\n\t \t\t\t\telse:\n\t \t\t\t\t\tlines.append(self.tab + line)\n\t return lines",
"def get_datadict_template():\n return {column: \"\" for column in get_columns()}",
"def _all_templates(self):\n for startmodel in self._all_starting_models():\n for template in startmodel.templates:\n yield template",
"def fetch_machine_specs_templates(self):\n sql = \"\"\"SELECT\n *\n FROM\n machine_specs\n WHERE\n is_template = 1\n ORDER BY\n name\n \"\"\" \n rows = self._execute(sql)\n \n return [rfp_machine_specs.MachineSpecsTemplate(row) for row in rows]",
"def response_templates(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"response_templates\")",
"def get_template_names(self):\r\n if self.template_name is None:\r\n return []\r\n else:\r\n return [self.template_name]",
"def _get_instance_templates(self):\r\n return [(instance.name, instance.t)\r\n for instance in self.get_instances()]",
"def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrometheusSpecStorageVolumeClaimTemplateStatusConditionsArgs']]]]:\n return pulumi.get(self, \"conditions\")",
"def get_template_names(self):\n return [self.page.get_template(\n self.request,\n *self.args,\n **self.kwargs\n )]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all parameter templates in this dataset.
|
def parameter_templates(self) -> ParameterTemplateCollection:
return ParameterTemplateCollection(self.project_id, self.uid, self.session)
|
[
"def parameter_template(self) -> Template:\n return self.__parameter_template",
"def _get_instance_templates(self):\r\n return [(instance.name, instance.t)\r\n for instance in self.get_instances()]",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def generative_parameters(self):\n pass",
"def all_templates(self):\n if self._all_templates is None:\n all_templates = {}\n dom = self._get_xml(self.ALL_TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n user = e.getAttribute('userName')\n name = e.getAttribute('name')\n if user in all_templates:\n templates = all_templates[user]\n templates[name] = e.toxml()\n else:\n templates = {}\n templates[name] = e.toxml()\n all_templates[user] = templates\n self._all_templates = all_templates\n return self._all_templates",
"def templates(self):\n if self._templates is None:\n templates = {}\n dom = self._get_xml(self.TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n name = e.getAttribute('name')\n if name in templates:\n raise ServiceError(\n \"Two templates with same name: \" + name)\n else:\n templates[name] = e.toxml()\n self._templates = templates\n return self._templates",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def static_cpu_templates_params():\n for name in sorted(get_supported_cpu_templates()):\n yield pytest.param(name, id=\"static_\" + name)",
"def get_template_names(self):\n return [self.page.get_template(\n self.request,\n *self.args,\n **self.kwargs\n )]",
"def iter_templates(self):\n for page in self.iter_templates_pages():\n results = page.json()['results']\n for item in results:\n yield item",
"def get_policy_templates(self, **kwargs):\n baseURL = self.baseURL + \"policy-templates/list\"\n return self._make_request(\"get\",baseURL)",
"def list_question_templates(self):\n return self.query(\"\"\"{\n allQuestionTemplates {\n edges {\n node {\n id\n scId\n questionType\n text\n expectationType\n answerTemplate\n answerValidation\n storyTemplate\n compatibleSpecTypes\n }\n }\n }\n }\"\"\")",
"def get_resources(self):\n\t\treturn self.model.all()",
"def _resources(self):\r\n return self._resources_mapper()",
"def templates(self) -> Optional[Sequence['outputs.UpstreamTemplateResponse']]:\n return pulumi.get(self, \"templates\")",
"def list_parameters(self):\n endpoint = self.build_url(\"/parameters\")\n return self.request('get', endpoint)",
"def get_all_resources(self) -> Generator[GenomicResource, None, None]:",
"def response_templates(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"response_templates\")",
"def _all_templates(self):\n for startmodel in self._all_starting_models():\n for template in startmodel.templates:\n yield template",
"def templates():\n return [\n Template(\"dummy\", [\n Decompressor,\n DummyService,\n ])\n ]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all material templates in this dataset.
|
def material_templates(self) -> MaterialTemplateCollection:
return MaterialTemplateCollection(self.project_id, self.uid, self.session)
|
[
"def _all_templates(self):\n for startmodel in self._all_starting_models():\n for template in startmodel.templates:\n yield template",
"def templates(self):\n if self._templates is None:\n templates = {}\n dom = self._get_xml(self.TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n name = e.getAttribute('name')\n if name in templates:\n raise ServiceError(\n \"Two templates with same name: \" + name)\n else:\n templates[name] = e.toxml()\n self._templates = templates\n return self._templates",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def get_materials():\n\n return Material.query.all()",
"def fetch_machine_specs_templates(self):\n sql = \"\"\"SELECT\n *\n FROM\n machine_specs\n WHERE\n is_template = 1\n ORDER BY\n name\n \"\"\" \n rows = self._execute(sql)\n \n return [rfp_machine_specs.MachineSpecsTemplate(row) for row in rows]",
"def get_device_templates(self):\n response = self.request(PATH_DEVICE_TEMPLATES)\n\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n print(\"Unrecognised status for fetch device templates\" + response.status_code)",
"def all_templates(self):\n if self._all_templates is None:\n all_templates = {}\n dom = self._get_xml(self.ALL_TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n user = e.getAttribute('userName')\n name = e.getAttribute('name')\n if user in all_templates:\n templates = all_templates[user]\n templates[name] = e.toxml()\n else:\n templates = {}\n templates[name] = e.toxml()\n all_templates[user] = templates\n self._all_templates = all_templates\n return self._all_templates",
"def get_sets(self, material):\n try:\n set_name = str(material[\"name\"]) + '_' + str(material[\"id\"])\n j = 0\n setlist = []\n for _set in self.source_dictionary.get(\"model_info\").get(\"sets\").get(set_name):\n setlist.append(None)\n setlist[j] = self.source_dictionary.get(\"model_info\").get(\"sets\").get(set_name)[j]\n setlist[j][\"texture\"] = (self.get_textures(_set))\n j += 1\n return setlist\n except TypeError:\n print(\"Please define correct source file first\")",
"def get_resources(self):\n\t\treturn self.model.all()",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def _resources(self):\r\n return self._resources_mapper()",
"def get_all_resources(self) -> Generator[GenomicResource, None, None]:",
"def iter_templates(self):\n for page in self.iter_templates_pages():\n results = page.json()['results']\n for item in results:\n yield item",
"def templates(self) -> Optional[Sequence['outputs.UpstreamTemplateResponse']]:\n return pulumi.get(self, \"templates\")",
"def _get_instance_templates(self):\r\n return [(instance.name, instance.t)\r\n for instance in self.get_instances()]",
"def generate_materials_xml(self):\n\n xml_strings = []\n\n for mat in self.number.mat_to_ind:\n root = ET.Element(\"material\")\n root.set(\"id\", mat)\n\n density = ET.SubElement(root, \"density\")\n density.set(\"units\", \"sum\")\n\n temperature = ET.SubElement(root, \"temperature\")\n mat_id = self.number.mat_to_ind[mat]\n temperature.text = str(self.materials[mat_id].temperature)\n\n for nuc in self.number.nuc_to_ind:\n if nuc in self.participating_nuclides:\n val = 1.0e-24*self.number.get_atom_density(mat, nuc)\n\n # If nuclide is zero, do not add to the problem.\n if val > 0.0:\n if self.settings.round_number:\n val_magnitude = np.floor(np.log10(val))\n val_scaled = val / 10**val_magnitude\n val_round = round(val_scaled, 8)\n\n val = val_round * 10**val_magnitude\n\n nuc_element = ET.SubElement(root, \"nuclide\")\n nuc_element.set(\"ao\", str(val))\n nuc_element.set(\"name\", nuc)\n else:\n # Only output warnings if values are significantly\n # negative. CRAM does not guarantee positive values.\n if val < -1.0e-21:\n print(\"WARNING: nuclide \", nuc, \" in material \", mat,\n \" is negative (density = \", val, \" at/barn-cm)\")\n self.number[mat, nuc] = 0.0\n\n for sab in self.materials[mat_id].sab:\n sab_el = ET.SubElement(root, \"sab\")\n sab_el.set(\"name\", sab)\n\n if _have_lxml:\n fragment = ET.tostring(root, encoding=\"unicode\", pretty_print=\"true\")\n xml_strings.append(fragment)\n else:\n clean_xml_indentation(root, spaces_per_level=2)\n fragment = ET.tostring(root, encoding=\"unicode\", pretty_print=\"true\")\n xml_strings.append(fragment)\n\n xml_string = \"\".join(xml_strings)\n\n # Append beginning, end text.\n if self.rank == 0:\n xml_string = \"<?xml version='1.0' encoding='utf-8'?>\\n<materials>\\n\" + xml_string\n if self.rank == self.size:\n xml_string += \"\\n</materials>\"\n\n xml_bytes = np.fromstring(xml_string, dtype=np.uint8)\n\n # Use MPI-IO to write to disk.\n # First, communicate to all nodes the length of their string.\n str_len = np.zeros(self.size, np.int32)\n\n str_my_len = np.zeros(1, np.int32)\n str_my_len[0] = len(xml_string)\n self.comm.Allgather([str_my_len, MPI.INT], [str_len, MPI.INT])\n\n # Compute index start.\n start_ind = np.sum(str_len[0:self.rank])\n\n # Open/create file\n handle = MPI.File.Open(self.comm, \"materials.xml\", MPI.MODE_WRONLY|MPI.MODE_CREATE)\n\n handle.Seek(start_ind, MPI.SEEK_SET)\n handle.Write(xml_bytes)\n handle.Close()\n\n self.comm.barrier()",
"def get_templates(self):\n index_templates = {}\n for path in glob.iglob(self.data_path + '/template/*.json'):\n logger.debug('Reading index template setup from {}'.format(path))\n index_template = None\n with open(path) as f:\n index_template = json.load(f)\n template_name = index_template['name']\n setup_body = index_template['body']\n index_templates[template_name] = setup_body\n return index_templates",
"def list_question_templates(self):\n return self.query(\"\"\"{\n allQuestionTemplates {\n edges {\n node {\n id\n scId\n questionType\n text\n expectationType\n answerTemplate\n answerValidation\n storyTemplate\n compatibleSpecTypes\n }\n }\n }\n }\"\"\")",
"def getRRDTemplates(self):\n result = super(Device, self).getRRDTemplates()\n # Check if version of the system\n # modeled by OperatingSystem plugin is Windows 2003.\n # https://jira.hyperic.com/browse/HHQ-5553\n if '2003' in self.getOSProductName():\n for template in result:\n ad = self.getRRDTemplateByName('Active Directory 2003')\n if ad:\n if 'Active Directory' in template.id:\n result[result.index(template)] = ad\n if self.msexchangeversion:\n for template in result:\n exchange = self.getRRDTemplateByName(self.msexchangeversion)\n if exchange:\n if 'MSExchange' in template.id:\n result[result.index(template)] = exchange\n return result",
"def get_email_templates(self):\n call = \"rest/asset/v1/emailTemplates.json\"\n method = \"GET\"\n return self.__generic_api_call(call, method)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all measurement templates in this dataset.
|
def measurement_templates(self) -> MeasurementTemplateCollection:
return MeasurementTemplateCollection(self.project_id, self.uid, self.session)
|
[
"def all_templates(self):\n if self._all_templates is None:\n all_templates = {}\n dom = self._get_xml(self.ALL_TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n user = e.getAttribute('userName')\n name = e.getAttribute('name')\n if user in all_templates:\n templates = all_templates[user]\n templates[name] = e.toxml()\n else:\n templates = {}\n templates[name] = e.toxml()\n all_templates[user] = templates\n self._all_templates = all_templates\n return self._all_templates",
"def templates(self):\n if self._templates is None:\n templates = {}\n dom = self._get_xml(self.TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n name = e.getAttribute('name')\n if name in templates:\n raise ServiceError(\n \"Two templates with same name: \" + name)\n else:\n templates[name] = e.toxml()\n self._templates = templates\n return self._templates",
"def _get_instance_templates(self):\r\n return [(instance.name, instance.t)\r\n for instance in self.get_instances()]",
"def get_device_templates(self):\n response = self.request(PATH_DEVICE_TEMPLATES)\n\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n print(\"Unrecognised status for fetch device templates\" + response.status_code)",
"def _all_templates(self):\n for startmodel in self._all_starting_models():\n for template in startmodel.templates:\n yield template",
"def fetch_machine_specs_templates(self):\n sql = \"\"\"SELECT\n *\n FROM\n machine_specs\n WHERE\n is_template = 1\n ORDER BY\n name\n \"\"\" \n rows = self._execute(sql)\n \n return [rfp_machine_specs.MachineSpecsTemplate(row) for row in rows]",
"def templates():\n return [\n Template(\"dummy\", [\n Decompressor,\n DummyService,\n ])\n ]",
"def peak_templates(self):\n peak_templates = []\n for peak_descr in self:\n expanded_dims = [dim_group.dimensions for dim_group in peak_descr]\n templates = product(*expanded_dims)\n for template in templates:\n peak_templates.append(PeakTemplate(template))\n return peak_templates",
"def iter_templates(self):\n for page in self.iter_templates_pages():\n results = page.json()['results']\n for item in results:\n yield item",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def list_question_templates(self):\n return self.query(\"\"\"{\n allQuestionTemplates {\n edges {\n node {\n id\n scId\n questionType\n text\n expectationType\n answerTemplate\n answerValidation\n storyTemplate\n compatibleSpecTypes\n }\n }\n }\n }\"\"\")",
"def get_templates(self):\n index_templates = {}\n for path in glob.iglob(self.data_path + '/template/*.json'):\n logger.debug('Reading index template setup from {}'.format(path))\n index_template = None\n with open(path) as f:\n index_template = json.load(f)\n template_name = index_template['name']\n setup_body = index_template['body']\n index_templates[template_name] = setup_body\n return index_templates",
"def templates(self) -> Optional[Sequence['outputs.UpstreamTemplateResponse']]:\n return pulumi.get(self, \"templates\")",
"def create_report_from_measurements(\n template: Path,\n measurementsdata: Dict[str, List]) -> str:\n with open(template, 'r') as resourcetemplatefile:\n resourcetemplate = resourcetemplatefile.read()\n tm = Template(resourcetemplate)\n\n content = tm.render(\n data=measurementsdata\n )\n\n return content",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def get_all_measurements():\n measurements = Measurement.objects.all()\n return measurements",
"def getRRDTemplates(self):\n result = super(Device, self).getRRDTemplates()\n # Check if version of the system\n # modeled by OperatingSystem plugin is Windows 2003.\n # https://jira.hyperic.com/browse/HHQ-5553\n if '2003' in self.getOSProductName():\n for template in result:\n ad = self.getRRDTemplateByName('Active Directory 2003')\n if ad:\n if 'Active Directory' in template.id:\n result[result.index(template)] = ad\n if self.msexchangeversion:\n for template in result:\n exchange = self.getRRDTemplateByName(self.msexchangeversion)\n if exchange:\n if 'MSExchange' in template.id:\n result[result.index(template)] = exchange\n return result",
"def all_templates_names(self):\n if self._all_templates_names is None:\n all_templates_names = {}\n dom = self._get_xml(self.ALL_TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n user = e.getAttribute('userName')\n name = e.getAttribute('name')\n if user in all_templates_names:\n all_templates_names[user].append(name)\n else:\n templates_names = []\n templates_names.append(name)\n all_templates_names[user] = templates_names\n self._all_templates_names = all_templates_names\n return self._all_templates_names",
"def get_resources(self):\n\t\treturn self.model.all()",
"def get_template_names(self):\r\n return [self.page_instance.get_template(self.request)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all process templates in this dataset.
|
def process_templates(self) -> ProcessTemplateCollection:
return ProcessTemplateCollection(self.project_id, self.uid, self.session)
|
[
"def list_by_template(self,\n uid: Union[UUID, str, LinkByUID, GEMDProcessTemplate]\n ) -> Iterator[ProcessSpec]:\n return self._get_relation('process-templates', uid=uid)",
"def iter_templates(self):\n for page in self.iter_templates_pages():\n results = page.json()['results']\n for item in results:\n yield item",
"def _all_templates(self):\n for startmodel in self._all_starting_models():\n for template in startmodel.templates:\n yield template",
"def get_templates(self):\n index_templates = {}\n for path in glob.iglob(self.data_path + '/template/*.json'):\n logger.debug('Reading index template setup from {}'.format(path))\n index_template = None\n with open(path) as f:\n index_template = json.load(f)\n template_name = index_template['name']\n setup_body = index_template['body']\n index_templates[template_name] = setup_body\n return index_templates",
"def _get_instance_templates(self):\r\n return [(instance.name, instance.t)\r\n for instance in self.get_instances()]",
"def templates(self):\n if self._templates is None:\n templates = {}\n dom = self._get_xml(self.TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n name = e.getAttribute('name')\n if name in templates:\n raise ServiceError(\n \"Two templates with same name: \" + name)\n else:\n templates[name] = e.toxml()\n self._templates = templates\n return self._templates",
"def _create_template(self, num_instances):\n conf_name = self.properties['launch_configuration_name']\n conf = self.stack.resource_by_refid(conf_name)\n instance_definition = copy.deepcopy(conf.t)\n instance_definition['Type'] = 'OS::Nova::Server'\n # resolve references within the context of this stack.\n fully_parsed = self.stack.resolve_runtime_data(instance_definition)\n\n resources = {}\n for i in range(num_instances):\n resources[\"%s-%d\" % (self.name, i)] = fully_parsed\n return {\"Resources\": resources}",
"def fetchTemplates(self):\n\n templates = {}\n\n # fetch templates from DB for relevant parser ID\n sql = \"\"\"\n SELECT \n FT.template_id as TID, \n template_name, \n field_name, \n regex, \n regex_group, \n regex_match_index \n FROM \n FieldTemplates FT\n JOIN \n ParserToFieldTemplateMapping PTFTM \n USING \n (template_id) \n JOIN \n Fields F \n ON \n (FT.field_id=F.field_id)\n JOIN \n FieldTemplateRegex FTR \n ON \n (FT.regex_id=FTR.regex_id) \n WHERE \n PTFTM.parser_id = %d\n AND \n FT.status = 1\n ORDER BY TID\n \"\"\"\n\n # execute query\n with self.inquisitionDbHandle.cursor() as dbCursor:\n dbCursor.execute(sql % (int(self.parserID)))\n\n # fetch results\n dbResults = dbCursor.fetchall()\n for row in dbResults:\n # add each template to template store\n templates[row['TID']] = Template(row['TID'], row['field_name'], row['regex'], row['regex_group'],\n row['regex_match_index'], row['template_name'])\n\n self.lgr.debug('loaded template SUCCESSFULLY :: ' + str(templates[row['TID']]))\n\n dbCursor.close()\n\n return templates",
"def all_templates(self):\n if self._all_templates is None:\n all_templates = {}\n dom = self._get_xml(self.ALL_TEMPLATES_PATH)\n for e in dom.getElementsByTagName('template'):\n user = e.getAttribute('userName')\n name = e.getAttribute('name')\n if user in all_templates:\n templates = all_templates[user]\n templates[name] = e.toxml()\n else:\n templates = {}\n templates[name] = e.toxml()\n all_templates[user] = templates\n self._all_templates = all_templates\n return self._all_templates",
"def _get_template(self) -> List[str]:\n with open(self.template_location, \"r\") as file:\n return file.readlines()",
"def templates(self) -> Optional[Sequence['outputs.UpstreamTemplateResponse']]:\n return pulumi.get(self, \"templates\")",
"def list_question_templates(self):\n return self.query(\"\"\"{\n allQuestionTemplates {\n edges {\n node {\n id\n scId\n questionType\n text\n expectationType\n answerTemplate\n answerValidation\n storyTemplate\n compatibleSpecTypes\n }\n }\n }\n }\"\"\")",
"def get_template_names(self):\r\n return [self.page_instance.get_template(self.request)]",
"def get_templates(self) -> dict[str, str]:\n templates = {}\n for file in self.files:\n file_path = Path(file)\n template_path = get_template_path(file_path)\n if file in self.custom_templates:\n template = self.custom_templates[file]\n template = self.clean_text(template)\n elif self.templates.has_template(template_path):\n template = self.templates.get_template(template_path).source\n else:\n try:\n template = self.get_text_template(file)\n except Exception as e:\n raise ConfigServiceTemplateError(\n f\"node({self.node.name}) service({self.name}) file({file}) \"\n f\"failure getting template: {e}\"\n )\n template = self.clean_text(template)\n templates[file] = template\n return templates",
"def templates():\n return [\n Template(\"dummy\", [\n Decompressor,\n DummyService,\n ])\n ]",
"def _wikipedia_Page_templatePages(self):\n return [template for template in toolserver.Generators.getTemplatelinks(self)]",
"def get_device_templates(self):\n response = self.request(PATH_DEVICE_TEMPLATES)\n\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n print(\"Unrecognised status for fetch device templates\" + response.status_code)",
"def _discover_templates():\n vms = []\n for file in os.listdir(paths.packer_templates):\n json = os.path.join(paths.packer_templates,\n file, file + '.json')\n if os.path.exists(json):\n vms.append(file)\n return vms",
"def get_all_resources(self) -> Generator[GenomicResource, None, None]:",
"def getRRDTemplates(self):\n result = super(Device, self).getRRDTemplates()\n # Check if version of the system\n # modeled by OperatingSystem plugin is Windows 2003.\n # https://jira.hyperic.com/browse/HHQ-5553\n if '2003' in self.getOSProductName():\n for template in result:\n ad = self.getRRDTemplateByName('Active Directory 2003')\n if ad:\n if 'Active Directory' in template.id:\n result[result.index(template)] = ad\n if self.msexchangeversion:\n for template in result:\n exchange = self.getRRDTemplateByName(self.msexchangeversion)\n if exchange:\n if 'MSExchange' in template.id:\n result[result.index(template)] = exchange\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all process runs in this dataset.
|
def process_runs(self) -> ProcessRunCollection:
return ProcessRunCollection(self.project_id, self.uid, self.session)
|
[
"def processes(self):\n r = requests.get(self.uri+'processes')\n r.raise_for_status()\n return r.json()",
"def processes(self):\n ret = self._get_attr(\"processes\")\n return [IGuestProcess(a) for a in ret]",
"async def get_info_all_process():\n return supervisord_daemon.all_process_info()",
"def list_user_defined_processes(self) -> List[dict]:\n data = self.get(\"/process_graphs\").json()[\"processes\"]\n return VisualList(\"processes\", data=data)",
"def load_all_runs(self) -> Sequence[RunResult]:",
"def read_memory(self):\n # Get the procfs status information for each process.\n #\n # Done in a loop instead of a comprehension because we need to handle\n # the case where one of these processes is no longer alive, which will\n # cause a DoesNotExist exception.\n statuses = []\n for process in self.processes:\n try:\n statuses.append(process.status)\n except (DoesNotExist, UnknownProcess):\n # Process is no longer running.\n continue\n\n # Dispatch metrics for each worker.\n for idx, status in enumerate(statuses):\n try:\n vmsize = Values(\n plugin_instance='vmsize',\n values=(status['VmSize'],),\n )\n\n vmrss = Values(\n plugin_instance='vmrss',\n values=(status['VmRSS'],),\n )\n except (DoesNotExist, UnknownProcess):\n # Process is no longer running.\n continue\n\n for metric in (vmsize, vmrss):\n metric.dispatch(\n plugin=self.name,\n type='gauge',\n type_instance=str(idx),\n )",
"def list_runs(self):\n res = self.api_client.ListRuns()\n return res.response().result",
"def get_workers(self):\n with self._engine.begin() as conn:\n worker_rows = conn.execute(\n select([cl_worker, cl_worker_dependency.c.dependencies]).select_from(\n cl_worker.outerjoin(\n cl_worker_dependency,\n cl_worker.c.worker_id == cl_worker_dependency.c.worker_id,\n )\n )\n ).fetchall()\n worker_run_rows = conn.execute(cl_worker_run.select()).fetchall()\n\n worker_dict = {\n (row.user_id, row.worker_id): {\n 'user_id': row.user_id,\n 'worker_id': row.worker_id,\n 'group_uuid': row.group_uuid,\n 'tag': row.tag,\n 'cpus': row.cpus,\n 'gpus': row.gpus,\n 'memory_bytes': row.memory_bytes,\n 'free_disk_bytes': row.free_disk_bytes,\n 'checkin_time': row.checkin_time,\n 'socket_id': row.socket_id,\n # run_uuids will be set later\n 'run_uuids': [],\n 'dependencies': row.dependencies\n and self._deserialize_dependencies(row.dependencies),\n 'shared_file_system': row.shared_file_system,\n 'tag_exclusive': row.tag_exclusive,\n 'exit_after_num_runs': row.exit_after_num_runs,\n 'is_terminating': row.is_terminating,\n 'preemptible': row.preemptible,\n }\n for row in worker_rows\n }\n for row in worker_run_rows:\n worker_dict[(row.user_id, row.worker_id)]['run_uuids'].append(row.run_uuid)\n return list(worker_dict.values())",
"def getProcesses(self):\n return self.dbg.enumerate_processes()",
"def runs(self):\n\t\treturn copy.copy(self._runs)",
"def load_all_runs(self) -> List[RunResult]:\n results = []\n with open(self.store_location, mode='rb') as f:\n while True:\n try:\n r = self.serializer.load(f)\n results.append(r)\n except EOFError:\n break\n\n return results",
"def executions(self):\n return self._client.get_executions(self.name)",
"def get_runs(self):\n try:\n return self.__dict__['runs']\n except KeyError:\n json = self._connection._make_request('routes/%s/runs/' % self.id)\n obj_list = [BusRun(\n j[\"id\"],\n j['display_name'],\n j['direction_name'],\n self,\n self._connection,\n ) for j in json.get(\"items\")]\n self.__dict__['runs'] = obj_list\n return obj_list",
"def stats_per_process(self):\n values = cmd_across_all_procs(self._server_per_proc, 'stats')\n\n return values",
"def host_processes(self, session):\n url = utils.urljoin(self.base_path, self.id, 'host_info', 'processes')\n resp = session.get(url, endpoint_filter=self.service).json()\n return resp['info']",
"def _get_running_builds(self):\n running_builds = self._get_resource(self._RUNNING_BUILDS_RESOURCE)\n if self._COUNT_ATTRIBUTE in running_builds and running_builds[self._COUNT_ATTRIBUTE] > 0:\n return running_builds[self._BUILD_ATTRIBUTE]\n return []",
"def _get_process_info(self):\n process_infos = []\n if CONF.process_info.proc_detail:\n logger.debug('More information about the collection process.')\n processss = psutil.process_iter(attrs=['name', 'exe', 'pid',\n 'username', 'cmdline',\n 'memory_percent', 'status',\n 'create_time',\n 'cpu_percent', 'cpu_num'])\n else:\n processss = psutil.process_iter(attrs=[\n 'name', 'exe', 'pid', 'status'])\n for process in processss:\n p_info = process.info\n if p_info.get('create_time', None):\n p_info['create_time'] = utils.str_time(p_info['create_time'])\n else:\n pass\n process_infos.append(p_info)\n logger.info('Collect all process information.')\n return process_infos",
"def get_processes(self):\n processes = {}\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name'])\n processes[pinfo['name']] = pinfo['pid']\n except psutil.NoSuchProcess:\n pass\n return(processes)",
"def get_processes(process_id=None):\n return get_response(api_.describe_processes(request, process_id))",
"def workers(self):\n return self._wrap_get('/workers')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all measurement runs in this dataset.
|
def measurement_runs(self) -> MeasurementRunCollection:
return MeasurementRunCollection(self.project_id, self.uid, self.session)
|
[
"def get_all_measurements():\n measurements = Measurement.objects.all()\n return measurements",
"def list_runs(self):\n res = self.api_client.ListRuns()\n return res.response().result",
"def load_all_runs(self) -> Sequence[RunResult]:",
"def runs(self):\n\t\treturn copy.copy(self._runs)",
"def get(self):\n measurements = {}\n for monitorUrl in self.monitors:\n measurements = self.loadFromSingleMonitor(\n measurements,\n monitorUrl,\n self.request.getMeasurements(monitorUrl, self.authToken)\n )\n return measurements",
"def load_all_runs(self) -> List[RunResult]:\n results = []\n with open(self.store_location, mode='rb') as f:\n while True:\n try:\n r = self.serializer.load(f)\n results.append(r)\n except EOFError:\n break\n\n return results",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def get_all_metrics(self):\n if self.collect_all:\n return self.all_metric_results\n else:\n return {}",
"def get_runs(self):\n try:\n return self.__dict__['runs']\n except KeyError:\n json = self._connection._make_request('routes/%s/runs/' % self.id)\n obj_list = [BusRun(\n j[\"id\"],\n j['display_name'],\n j['direction_name'],\n self,\n self._connection,\n ) for j in json.get(\"items\")]\n self.__dict__['runs'] = obj_list\n return obj_list",
"def all_data(self) -> SampleSet:\n if self._all_data is None:\n self._all_data = self._get_all_data()\n return self._all_data",
"def test_get_run_resources(self):\n pass",
"def run(self):\n return self.cdb.db.query(\"dataset\", self.query)",
"def get_measure_units(self):\n self.__cursor.execute(\"exec [technics].[get_measure_units]\")\n return self.__cursor.fetchall()",
"def test_meter_run_read(self):\n auth_headers = { \"Authorization\": \"Bearer \" + \"tokstr\" }\n\n for meter_run, consumption_metadata in zip(self.meter_runs,self.consumption_metadatas):\n\n response = self.client.get('/api/v1/meter_runs/{}/'.format(meter_run.id), **auth_headers)\n assert response.status_code == 200\n assert response.data[\"project\"] == self.project.id\n assert response.data[\"consumption_metadata\"] == consumption_metadata.id\n\n model_parameters_baseline = json.loads(response.data[\"model_parameter_json_baseline\"])\n model_parameters_reporting = json.loads(response.data[\"model_parameter_json_reporting\"])\n\n assert type(model_parameters_baseline) == dict\n assert type(model_parameters_reporting) == dict\n\n assert len(response.data[\"serialization\"]) > 7000\n\n assert len(response.data[\"dailyusagebaseline_set\"]) == 1461\n assert len(response.data[\"dailyusagereporting_set\"]) == 1461\n\n assert response.data[\"meter_type\"] == \"DFLT_RES_E\" or \\\n response.data[\"meter_type\"] == \"DFLT_RES_NG\"\n\n assert type(response.data[\"annual_usage_baseline\"]) == float\n assert type(response.data[\"annual_usage_reporting\"]) == float\n assert type(response.data[\"gross_savings\"]) == float\n assert type(response.data[\"annual_savings\"]) == float\n assert type(response.data[\"cvrmse_baseline\"]) == float\n assert type(response.data[\"cvrmse_reporting\"]) == float",
"def get_resources(self):\n js = 'return window.performance.getEntriesByType(\"resource\");'\n try:\n resources = self._wait().until(\n lambda driver: driver.execute_script(js),\n message='Resources not generated yet or there are none')\n return [ResourceTiming(**resource) for resource in resources]\n except TimeoutException:\n return None # because there were no Resources captured for the current web page",
"def results(self):\r\n return pd.Series(\r\n {\r\n \"metric_bo\": getattr(self, \"metric_bo\", None),\r\n \"time_bo\": getattr(self, \"time_bo\", None),\r\n \"metric_train\": getattr(self, \"metric_train\", None),\r\n \"metric_test\": getattr(self, \"metric_test\", None),\r\n \"time_fit\": getattr(self, \"time_fit\", None),\r\n \"mean_bagging\": getattr(self, \"mean_bagging\", None),\r\n \"std_bagging\": getattr(self, \"std_bagging\", None),\r\n \"time_bagging\": getattr(self, \"time_bagging\", None),\r\n \"time\": getattr(self, \"time\", None),\r\n },\r\n name=self.name,\r\n )",
"def measurements(self):\n return dict([(x['name'], x) for x in self.meta['measurements']])",
"def get_resources(self):\n\t\treturn self.model.all()",
"def listRuns(self, minrun=-1, maxrun=-1, logical_file_name=\"\", block_name=\"\", dataset=\"\"):\n\ttry:\n\t\tconn = self.dbi.connection()\n\t\ttran=False\n\t\tret=self.runlist.execute(conn, minrun, maxrun, logical_file_name, block_name,\n\t\tdataset, tran)\n\t\treturn ret\n\n\texcept Exception, ex:\n\t\traise ex\n\t\t\n\tfinally:\n\t\tconn.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all material runs in this dataset.
|
def material_runs(self) -> MaterialRunCollection:
return MaterialRunCollection(self.project_id, self.uid, self.session)
|
[
"def get_materials():\n\n return Material.query.all()",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def get_all_resources(self) -> Generator[GenomicResource, None, None]:",
"def get_resources(self):\n\t\treturn self.model.all()",
"def load_all_runs(self) -> Sequence[RunResult]:",
"def runs(self):\n\t\treturn copy.copy(self._runs)",
"def _resources(self):\r\n return self._resources_mapper()",
"def all_resources(self) -> Iterator[Resource]:\n yield from self.articles.by_ref.values()\n yield from self.assets.by_ref.values()\n yield from self.includes.by_ref.values()",
"def list_runs(self):\n res = self.api_client.ListRuns()\n return res.response().result",
"def get_items(self):\n\n self.logger.info(\"Dielectric Builder Started\")\n\n self.logger.info(\"Setting indexes\")\n self.ensure_indicies()\n\n q = dict(self.query)\n q.update(self.materials.lu_filter(self.dielectric))\n q[\"dielectric\"] = {\"$exists\": 1}\n mats = self.materials.distinct(self.materials.key, q)\n\n self.logger.info(\"Found {} new materials for dielectric data\".format(len(mats)))\n\n return self.materials.query(criteria=q, properties=[self.materials.key, \"dielectric\", \"piezo\", \"structure\"])",
"def assets(self):\n self.allassets=self.createdSRTs+self.createdMRTs#keeps the record of all assets in the wind farm\n return self.allassets",
"def _get_materials(self) -> \"adsk::core::Ptr< adsk::core::Materials >\" :\n return _core.MaterialLibrary__get_materials(self)",
"def get_materials(self):\n with open(\"materials.json\", \"r\") as read_file:\n self.materials = json.load(read_file)\n read_file.close()\n self.set_material(self.materials[list(self.materials.keys())[0]])",
"def dryrun_all(self, run_as_root: bool = False) -> Result:\n return Result(\n {\n r: self.dryrun(r, run_as_root=run_as_root)\n for r in self.resources.values()\n }\n )",
"def get_materials(self, iterations):\n materials = []\n for i in range(iterations+1):\n name = f\"{NAME}_Mat{i}\"\n material = bpy.data.materials.get(name)\n if not material:\n material = bpy.data.materials.new(name)\n materials.append(material)\n return materials",
"def get_all_materials(self, memo=None):\n\n materials = OrderedDict()\n\n # Append all Cells in each Cell in the Universe to the dictionary\n cells = self.get_all_cells(memo)\n for cell in cells.values():\n materials.update(cell.get_all_materials(memo))\n\n return materials",
"def get_sets(self, material):\n try:\n set_name = str(material[\"name\"]) + '_' + str(material[\"id\"])\n j = 0\n setlist = []\n for _set in self.source_dictionary.get(\"model_info\").get(\"sets\").get(set_name):\n setlist.append(None)\n setlist[j] = self.source_dictionary.get(\"model_info\").get(\"sets\").get(set_name)[j]\n setlist[j][\"texture\"] = (self.get_textures(_set))\n j += 1\n return setlist\n except TypeError:\n print(\"Please define correct source file first\")",
"def collect_resource_entries(self, resource: GenomicResource) -> Manifest:",
"def test_get_run_resources(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all ingredient runs in this dataset.
|
def ingredient_runs(self) -> IngredientRunCollection:
return IngredientRunCollection(self.project_id, self.uid, self.session)
|
[
"def get_recipe_ingredients():\n\n \"\"\"IN USE\"\"\"\n\n return RecipeIngredient.query.all()",
"def get(self):\n auth_header = request.headers.get('authorization')\n data = get_all_ingredient.parse_args(request)\n return MealBusiness.get_all_ingredient(auth_token=auth_header,data=data)",
"def get_ingredients(self):\n return self.pizza.get_ingredients()",
"def ingredient(self):\n return self._ingredient",
"def get_ingredients():\n\n \"\"\"IN USE\"\"\"\n\n return Ingredient.query.all()",
"def get_recipes():\n\n \"\"\"IN USE\"\"\"\n\n return Recipe.query.all()",
"def get_recipes():\n\n return Recipe.query.all()",
"def recipes(self) -> pulumi.Output[Sequence['outputs.SoftwareRecipeResponse']]:\n return pulumi.get(self, \"recipes\")",
"def get_user_ingredients(self):\r\n return self.user_ingredients",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def ingredients(self):\n ingredients = []\n for dish_key in self.dishes:\n dish = dish_key.get()\n for ingredient in dish.ingredients:\n ingredients.append(ingredient.pretty)\n\n return sorted(set(ingredients))",
"def get(self, request, pk):\n ingredient = Ingredient.objects.get(id=pk)\n recipes = ingredient.ingredientrecipe_set.all()\n ctx = {'ingredient': ingredient,\n 'recipes': recipes}\n\n return TemplateResponse(request,\n 'snack_puzzle/ingredient_detail.html',\n ctx)",
"def recipe_names(self):\n return self._recipe_names",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def get(self):\n return IngredientsList.query.all(), 200",
"def recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SoftwareRecipeArgs']]]]:\n return pulumi.get(self, \"recipes\")",
"def get_all_resources(self) -> Generator[GenomicResource, None, None]:",
"def get_recipe_inputs(self):\n return self.recipe_settings.get('inputs')",
"def recipe_get(utensil_id):\n get_utensil(utensil_id)\n where_clause = db.models.RecipeUtensils.utensil == utensil_id\n\n recipes = list(api.recipes.select_recipes(where_clause))\n recipes, _ = schemas.recipe_schema_list.dump({'recipes': recipes})\n return recipes",
"def get_ingredients(self, drink_type: str) -> List[str]:\n return [ingredient for ingredient in self.get_receipe(drink_type)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all process specs in this dataset.
|
def process_specs(self) -> ProcessSpecCollection:
return ProcessSpecCollection(self.project_id, self.uid, self.session)
|
[
"def processes(self):\n r = requests.get(self.uri+'processes')\n r.raise_for_status()\n return r.json()",
"def processes(self):\n ret = self._get_attr(\"processes\")\n return [IGuestProcess(a) for a in ret]",
"def list_user_defined_processes(self) -> List[dict]:\n data = self.get(\"/process_graphs\").json()[\"processes\"]\n return VisualList(\"processes\", data=data)",
"def get_processes(process_id=None):\n return get_response(api_.describe_processes(request, process_id))",
"async def get_info_all_process():\n return supervisord_daemon.all_process_info()",
"def list():\n for pf in Processors.getProcessFactories():\n for n in pf.getNames(): \n yield (n.namespaceURI, n.localPart)",
"def _get_process_info(self):\n process_infos = []\n if CONF.process_info.proc_detail:\n logger.debug('More information about the collection process.')\n processss = psutil.process_iter(attrs=['name', 'exe', 'pid',\n 'username', 'cmdline',\n 'memory_percent', 'status',\n 'create_time',\n 'cpu_percent', 'cpu_num'])\n else:\n processss = psutil.process_iter(attrs=[\n 'name', 'exe', 'pid', 'status'])\n for process in processss:\n p_info = process.info\n if p_info.get('create_time', None):\n p_info['create_time'] = utils.str_time(p_info['create_time'])\n else:\n pass\n process_infos.append(p_info)\n logger.info('Collect all process information.')\n return process_infos",
"def iter_procs(self):\n for row in self:\n if row.service_def:\n yield row",
"def _getProcessOutputs(self, process):\n processOutputs = [] \n for data in process.ProcessOutputs.Output:\n output = {} \n output[\"Identifier\"] = str(data.Identifier.value()) \n ita = self._getTitleAbstract(data)\n for key in ita.keys():\n output[key] = ita[key] \n \n if data.ComplexOutput != None:\n output[\"ComplexOutput\"] = self._getComplexData(data.ComplexOutput)\n if data.LiteralOutput != None:\n output[\"LiteralOutput\"] = self._getLiteralData(data.LiteralOutput)\n \n processOutputs.append(output)\n \n return processOutputs",
"def get_process_schema():\n clean_expired_sessions()\n\n dictio = {}\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n\n logging.info(\n \"get_process_schema start session=\" + str(session) + \" process=\" + str(process))\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n Commons.semaphore_matplot.acquire()\n try:\n # reads the decoration\n decoration = request.args.get('decoration', default='freq', type=str)\n # reads the typeOfModel\n type_of_model = request.args.get('typeOfModel', default='dfg', type=str)\n # reads the simplicity\n simplicity = request.args.get('simplicity', default=constants.DEFAULT_DEC_FACTOR, type=float)\n variant = type_of_model + \"_\" + decoration\n parameters = {\"decreasingFactor\": simplicity}\n handler = lh.get_handler_for_process_and_session(process, session)\n filters_chain = handler.get_filters_chain_repr()\n ps_repr = process + \"@@\" + variant + \"@@\" + str(simplicity) + \"@@\" + filters_chain\n saved_obj = lh.get_object_memory(ps_repr) if Configuration.enable_process_caching else None\n if saved_obj is not None:\n base64 = saved_obj[0]\n model = saved_obj[1]\n format = saved_obj[2]\n this_handler = saved_obj[3]\n activities = saved_obj[4]\n start_activities = saved_obj[5]\n end_activities = saved_obj[6]\n gviz_base64 = saved_obj[7]\n graph_rep = saved_obj[8]\n type_of_model = saved_obj[9]\n decoration = saved_obj[10]\n second_model = saved_obj[11]\n second_format = saved_obj[12]\n activity_key = saved_obj[13]\n log_summary = saved_obj[14]\n else:\n base64, model, format, this_handler, activities, start_activities, end_activities, gviz_base64, graph_rep, type_of_model, decoration, second_model, second_format, activity_key, log_summary = handler.get_schema(\n variant=variant,\n parameters=parameters)\n lh.save_object_memory(ps_repr, [base64, model, format, this_handler, activities, start_activities,\n end_activities, gviz_base64, graph_rep, type_of_model, decoration,\n second_model, second_format, activity_key, log_summary])\n if model is not None:\n if type(model) is not str:\n model = model.decode('utf-8')\n\n dictio = {\"base64\": base64.decode('utf-8'), \"model\": model, \"format\": format, \"handler\": this_handler,\n \"activities\": activities,\n \"start_activities\": start_activities, \"end_activities\": end_activities,\n \"gviz_base64\": gviz_base64.decode('utf-8'), \"graph_rep\": graph_rep,\n \"type_of_model\": type_of_model, \"decoration\": decoration,\n \"second_model\": second_model, \"second_format\": second_format, \"activity_key\": activity_key}\n for key in log_summary:\n dictio[key] = log_summary[key]\n except:\n logging.error(traceback.format_exc())\n Commons.semaphore_matplot.release()\n\n logging.info(\n \"get_process_schema complete session=\" + str(session) + \" process=\" + str(process) + \" user=\" + str(\n user))\n ret = jsonify(dictio)\n return ret",
"def workflowSpecs():\n return listWorkflowSpecs()",
"def host_processes(self, session):\n url = utils.urljoin(self.base_path, self.id, 'host_info', 'processes')\n resp = session.get(url, endpoint_filter=self.service).json()\n return resp['info']",
"def get_processes(self):\n processes = {}\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name'])\n processes[pinfo['name']] = pinfo['pid']\n except psutil.NoSuchProcess:\n pass\n return(processes)",
"def test_metricset_process(self):\n\n fields = [\"process.entity_id\", \"process.pid\", \"process.ppid\", \"process.name\", \"process.executable\",\n \"process.args\", \"process.start\", \"process.working_directory\", \"user.id\", \"user.group.id\"]\n\n # Windows does not have effective and saved IDs, and user.name is not always filled for system processes.\n if sys.platform != \"win32\":\n fields.extend([\"user.effective.id\", \"user.saved.id\", \"user.effective.group.id\", \"user.saved.group.id\",\n \"user.name\", \"user.group.name\"])\n\n # process.hash.max_file_size: 1 - To speed things up during testing, we effectively disable hashing.\n # errors_allowed|warnings_allowed=True - Disabling hashing causes the dataset to add an error to the event\n # and log a warning. That should not fail the test.\n self.check_metricset(\"system\", \"process\", COMMON_FIELDS + fields, {\"process.hash.max_file_size\": 1},\n errors_allowed=True, warnings_allowed=True)",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def get_all_resources(self) -> Generator[GenomicResource, None, None]:",
"def stats_per_process(self):\n values = cmd_across_all_procs(self._server_per_proc, 'stats')\n\n return values",
"def get_resource_info(self):\n return self._res_list",
"def list(request):\n api_client = request.user.agave_oauth.api_client\n\n project_uuid = request.GET.get('project_uuid', None)\n specimen_uuid = request.GET.get('specimen_uuid', None)\n\n if not specimen_uuid and not project_uuid:\n messages.warning(request, 'Missing project or specimen UUID, cannot find processes.')\n return HttpResponseRedirect(reverse('ids_projects:project-list-private'))\n\n project = None\n specimen = None\n process = None\n\n try:\n if not specimen_uuid:\n project = Project(api_client=api_client, uuid=project_uuid)\n processes = project.processes\n else:\n specimen = Specimen(api_client=api_client, uuid=specimen_uuid)\n processes = specimen.processes\n project = Specimen.project\n\n except Exception as e:\n exception_msg = 'Unable to load project, specimen, or processes. %s' % e\n logger.error(exception_msg)\n messages.warning(request, exception_msg)\n return HttpResponseRedirect(reverse('ids_projects:project-list-private'))\n\n context = { 'project': project,\n 'specimen' : specimen,\n 'processes': processes\n }\n\n return render(request, 'ids_projects/processes/index.html', context)",
"def read_all():\n\n # Create the list of solutionresources from our data\n solutionresource = SolutionResource.query.order_by(SolutionResource.key).all()\n app.logger.debug(pformat(solutionresource))\n # Serialize the data for the response\n solutionresource_schema = SolutionResourceSchema(many=True)\n data = solutionresource_schema.dump(solutionresource)\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all measurement specs in this dataset.
|
def measurement_specs(self) -> MeasurementSpecCollection:
return MeasurementSpecCollection(self.project_id, self.uid, self.session)
|
[
"def get_all_measurements():\n measurements = Measurement.objects.all()\n return measurements",
"def measurements(self):\n return dict([(x['name'], x) for x in self.meta['measurements']])",
"def collect_data_spec(self):\n pass",
"def get(self):\n measurements = {}\n for monitorUrl in self.monitors:\n measurements = self.loadFromSingleMonitor(\n measurements,\n monitorUrl,\n self.request.getMeasurements(monitorUrl, self.authToken)\n )\n return measurements",
"def _make_physical_measurements(self, **kwargs):\n resource = json.loads(self.measurement_json)\n\n if 'resource' in kwargs:\n resource = json.loads(kwargs.pop('resource'))\n\n for k, default_value in (\n (\"physicalMeasurementsId\", 1),\n (\"participantId\", self.participant.participantId),\n (\"createdSiteId\", 1),\n (\"finalizedSiteId\", 2),\n (\"origin\", 'hpro'),\n (\"collectType\", PhysicalMeasurementsCollectType.SITE),\n (\"originMeasurementUnit\", OriginMeasurementUnit.UNSET)\n ):\n if k not in kwargs:\n kwargs[k] = default_value\n\n record = PhysicalMeasurements(**kwargs)\n self.dao.store_record_fhir_doc(record, resource)\n return record",
"def get_spec_data(self):\n print \"Start get spec threshold data...\"\n\n # get spec threshold data\n spec_threshold_sensor, spec_threshold_sdr = self.get_spec_threshold_discrete_data(\"Threshold Sensors\",\n conf.Start_SpecFile,\n conf.End_SpecFile)\n # get spec discrete data\n spec_discrete_sensor, spec_discrete_sdr = self.get_spec_threshold_discrete_data(\"Discrete Sensors\",\n conf.Start_Discrete_SpecFile,\n conf.End_Discrete_SpecFile)\n return spec_threshold_sensor, spec_threshold_sdr, spec_discrete_sdr",
"def populate_measurements(self):\n self._populate(settings.ROOT_MEASUREMENTS, 'measurement', 'meas')",
"def get_specs(self) -> dict:\n return self.specs.find({})[0]",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def get_measurement_types():\n\n all_measures = ['temperature']\n\n ####################\n return all_measures\n ####################",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def get_measure_units(self):\n self.__cursor.execute(\"exec [technics].[get_measure_units]\")\n return self.__cursor.fetchall()",
"def get_measurements(self, measure_regexp):\n query = \"SHOW MEASUREMENTS WITH MEASUREMENT =~ {}\".format(\n measure_regexp)\n results = self._make_query(query)\n return [x['name'] for y in results for x in y['measurements']]",
"def listStationsMetaData(self):\n docList = self.getMeasurementsDocuments(type=\"IMSData\")\n return pandas.DataFrame([x.desc for x in docList])",
"def get_data_spec(self):\n return self._data_spec.copy()",
"def stats(self):\n return [\n {\n \"title\": \"all-resources\",\n \"resource__path\": [],\n \"resource__total_strings\": self.total_strings,\n \"pretranslated_strings\": self.pretranslated_strings,\n \"strings_with_errors\": self.strings_with_errors,\n \"strings_with_warnings\": self.strings_with_warnings,\n \"unreviewed_strings\": self.unreviewed_strings,\n \"approved_strings\": self.approved_strings,\n }\n ]",
"def get_measurement_map(self) -> dict:\n\n data = self.get_map()\n return data[\"measurements\"]",
"def get_measurements(self, id, key):\n m = self._get_measurement_raw(id, key)\n m = m.get('body', {}).get('measuregrps', {})\n if not m:\n return\n\n for entry in m:\n # Category 1 is actual measure, as opposed to objective.\n # Skip all others.\n if entry['category'] != 1:\n continue\n date = datetime.datetime.fromtimestamp(entry['date'])\n for measure in entry['measures']:\n name = measure['type']\n name = self.TYPES.get(name, str(name))\n # actual value = value * 10^unit\n val = measure.get('value', 0) * (10 ** measure.get('unit', 0))\n yield date, name, val",
"def initiate_measurement_group(self):\r\n ds = Dataset()\r\n ds.RelationshipType = \"CONTAINS\"\r\n ds.ValueType = \"CONTAINER\"\r\n ds.ConceptNameCodeSequence = generate_sequence(\"ConceptNameCodeSequence\", [{\r\n \"CodeValue\": \"125007\",\r\n \"CodingSchemeDesignator\": \"DCM\",\r\n \"CodeMeaning\": \"Measurement Group\"}])\r\n ds.ContinuityOfContent = \"SEPARATE\",\r\n # ds.ContentTemplateSequence = generate_sequence(\"ContentTemplateSequence\", [{\r\n # \"MappingResource\": \"DCMR\",\r\n # \"MappingResourceUID\": \"1.2.840.10008.8.1.1\",\r\n # \"TemplateIdentifier\": \"1411\"}])\r\n return ds",
"def list_table_specs(self, dataset_id: str, project_id: str, location: str):\n url_suffix = f'datasets/{dataset_id}/tableSpecs'\n submit_operation_response = self._make_api_call(\n method='GET',\n url_suffix=url_suffix,\n payload=None,\n project_id=project_id,\n compute_region=location)\n return submit_operation_response['tableSpecs']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all material specs in this dataset.
|
def material_specs(self) -> MaterialSpecCollection:
return MaterialSpecCollection(self.project_id, self.uid, self.session)
|
[
"def get_materials():\n\n return Material.query.all()",
"def _get_materials(self) -> \"adsk::core::Ptr< adsk::core::Materials >\" :\n return _core.MaterialLibrary__get_materials(self)",
"def create_materials(self):\n Mat = namedtuple('Mat', ['name', 'is_waste'])\n Mat.__new__.__defaults__ = (None, False)\n self.materials = {}\n self.compositions = {}\n self.fractions = {}\n material_names = [\n Mat('Plastic', is_waste=True),\n Mat('Crude Oil'),\n Mat('Petrol'),\n Mat('Milk'),\n Mat('Packaged Milk'),\n Mat('Packaged Cucumber'),\n Mat('Cucumber'),\n Mat('Human Waste', is_waste=True),\n Mat('Other Waste', is_waste=True)\n ]\n\n Frac = namedtuple('Fraction', ['composition', 'material', 'fraction'])\n Frac.__new__.__defaults__ = (None, None, 0.0)\n fractions = [Frac('Packaged Milk', 'Milk', 0.25),\n Frac('Packaged Milk', 'Plastic', 0.75),\n Frac('Packaged Cucumber', 'Plastic', 0.15),\n Frac('Packaged Cucumber', 'Cucumber', 0.85)\n ]\n\n for mat in material_names:\n material = MaterialFactory(\n name=mat.name,\n keyflow=self.kic)\n self.materials[mat.name] = material\n Factory = WasteFactory if mat.is_waste else ProductFactory\n composition = Factory(name=mat.name)\n self.compositions[mat.name] = composition\n\n for frac in fractions:\n fraction = ProductFractionFactory(\n fraction=frac.fraction,\n material=self.materials[frac.material],\n composition=self.compositions[frac.composition],\n publication=self.pub,\n )\n self.fractions[frac.material] = fraction",
"def get_sets(self, material):\n try:\n set_name = str(material[\"name\"]) + '_' + str(material[\"id\"])\n j = 0\n setlist = []\n for _set in self.source_dictionary.get(\"model_info\").get(\"sets\").get(set_name):\n setlist.append(None)\n setlist[j] = self.source_dictionary.get(\"model_info\").get(\"sets\").get(set_name)[j]\n setlist[j][\"texture\"] = (self.get_textures(_set))\n j += 1\n return setlist\n except TypeError:\n print(\"Please define correct source file first\")",
"def get_materials(self):\n with open(\"materials.json\", \"r\") as read_file:\n self.materials = json.load(read_file)\n read_file.close()\n self.set_material(self.materials[list(self.materials.keys())[0]])",
"def get_material_names (self, obj):\n index = 0\n mats = []\n for mat in obj.data.materials:\n mats.append (\"Material_%d\" % (index))\n return mats",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def get_items(self):\n\n self.logger.info(\"Dielectric Builder Started\")\n\n self.logger.info(\"Setting indexes\")\n self.ensure_indicies()\n\n q = dict(self.query)\n q.update(self.materials.lu_filter(self.dielectric))\n q[\"dielectric\"] = {\"$exists\": 1}\n mats = self.materials.distinct(self.materials.key, q)\n\n self.logger.info(\"Found {} new materials for dielectric data\".format(len(mats)))\n\n return self.materials.query(criteria=q, properties=[self.materials.key, \"dielectric\", \"piezo\", \"structure\"])",
"def get_all_resource(self):\n query = APIData.query()\n query = query.filter(APIData.indexed_data == \"TYPE->RESOURCE\")\n query = query.filter(APIData.indexed_data == \"DATASET_ID->\" + str(self.key.id()))\n\n resources = query.fetch()\n resource_dict = []\n\n # if resources:\n # for r in resources:\n # resource_dict.append(r.to_api_object())\n\n return resources",
"def material(self):\n for session in self.course.sessions.values():\n for material in session.materials:\n if self == material.lesson:\n return material",
"def generate_materials_xml(self):\n\n xml_strings = []\n\n for mat in self.number.mat_to_ind:\n root = ET.Element(\"material\")\n root.set(\"id\", mat)\n\n density = ET.SubElement(root, \"density\")\n density.set(\"units\", \"sum\")\n\n temperature = ET.SubElement(root, \"temperature\")\n mat_id = self.number.mat_to_ind[mat]\n temperature.text = str(self.materials[mat_id].temperature)\n\n for nuc in self.number.nuc_to_ind:\n if nuc in self.participating_nuclides:\n val = 1.0e-24*self.number.get_atom_density(mat, nuc)\n\n # If nuclide is zero, do not add to the problem.\n if val > 0.0:\n if self.settings.round_number:\n val_magnitude = np.floor(np.log10(val))\n val_scaled = val / 10**val_magnitude\n val_round = round(val_scaled, 8)\n\n val = val_round * 10**val_magnitude\n\n nuc_element = ET.SubElement(root, \"nuclide\")\n nuc_element.set(\"ao\", str(val))\n nuc_element.set(\"name\", nuc)\n else:\n # Only output warnings if values are significantly\n # negative. CRAM does not guarantee positive values.\n if val < -1.0e-21:\n print(\"WARNING: nuclide \", nuc, \" in material \", mat,\n \" is negative (density = \", val, \" at/barn-cm)\")\n self.number[mat, nuc] = 0.0\n\n for sab in self.materials[mat_id].sab:\n sab_el = ET.SubElement(root, \"sab\")\n sab_el.set(\"name\", sab)\n\n if _have_lxml:\n fragment = ET.tostring(root, encoding=\"unicode\", pretty_print=\"true\")\n xml_strings.append(fragment)\n else:\n clean_xml_indentation(root, spaces_per_level=2)\n fragment = ET.tostring(root, encoding=\"unicode\", pretty_print=\"true\")\n xml_strings.append(fragment)\n\n xml_string = \"\".join(xml_strings)\n\n # Append beginning, end text.\n if self.rank == 0:\n xml_string = \"<?xml version='1.0' encoding='utf-8'?>\\n<materials>\\n\" + xml_string\n if self.rank == self.size:\n xml_string += \"\\n</materials>\"\n\n xml_bytes = np.fromstring(xml_string, dtype=np.uint8)\n\n # Use MPI-IO to write to disk.\n # First, communicate to all nodes the length of their string.\n str_len = np.zeros(self.size, np.int32)\n\n str_my_len = np.zeros(1, np.int32)\n str_my_len[0] = len(xml_string)\n self.comm.Allgather([str_my_len, MPI.INT], [str_len, MPI.INT])\n\n # Compute index start.\n start_ind = np.sum(str_len[0:self.rank])\n\n # Open/create file\n handle = MPI.File.Open(self.comm, \"materials.xml\", MPI.MODE_WRONLY|MPI.MODE_CREATE)\n\n handle.Seek(start_ind, MPI.SEEK_SET)\n handle.Write(xml_bytes)\n handle.Close()\n\n self.comm.barrier()",
"def get_materials(self, iterations):\n materials = []\n for i in range(iterations+1):\n name = f\"{NAME}_Mat{i}\"\n material = bpy.data.materials.get(name)\n if not material:\n material = bpy.data.materials.new(name)\n materials.append(material)\n return materials",
"def test_get_ifc_materials(self):\n pass",
"def material(self):\n return self.edb_padstack.GetData().GetMaterial()",
"def _resources(self):\r\n return self._resources_mapper()",
"def get_all_resources(self) -> Generator[GenomicResource, None, None]:",
"def _process_material_definition(self):\n try:\n matdef = [str(m) for m in self.material]\n except TypeError:\n matdef = [str(self.material)]\n\n return matdef",
"def materialFactory(name):\n from pyre.inventory import facility\n return facility(name, factory=Material)",
"def get_resources(self):\n\t\treturn self.model.all()",
"def get_material_categorys():\n return json.dumps(Material_category.get_all_category_names())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a resource representing all ingredient specs in this dataset.
|
def ingredient_specs(self) -> IngredientSpecCollection:
return IngredientSpecCollection(self.project_id, self.uid, self.session)
|
[
"def get_recipe_ingredients():\n\n \"\"\"IN USE\"\"\"\n\n return RecipeIngredient.query.all()",
"def get(self):\n auth_header = request.headers.get('authorization')\n data = get_all_ingredient.parse_args(request)\n return MealBusiness.get_all_ingredient(auth_token=auth_header,data=data)",
"def get_ingredients():\n\n \"\"\"IN USE\"\"\"\n\n return Ingredient.query.all()",
"def get_ingredients(self):\n return self.pizza.get_ingredients()",
"def ingredients(self):\n ingredients = []\n for dish_key in self.dishes:\n dish = dish_key.get()\n for ingredient in dish.ingredients:\n ingredients.append(ingredient.pretty)\n\n return sorted(set(ingredients))",
"def ingredient(self):\n return self._ingredient",
"def recipes(self) -> pulumi.Output[Sequence['outputs.SoftwareRecipeResponse']]:\n return pulumi.get(self, \"recipes\")",
"def get_recipes():\n\n return Recipe.query.all()",
"def get_recipes():\n\n \"\"\"IN USE\"\"\"\n\n return Recipe.query.all()",
"def get(self, request, pk):\n ingredient = Ingredient.objects.get(id=pk)\n recipes = ingredient.ingredientrecipe_set.all()\n ctx = {'ingredient': ingredient,\n 'recipes': recipes}\n\n return TemplateResponse(request,\n 'snack_puzzle/ingredient_detail.html',\n ctx)",
"def test_retrieve_ingredient_list(self):\n\t\tIngredient.objects.create(user=self.user, name='Oatmeal')\n\t\tIngredient.objects.create(user=self.user, name='Coconut')\n\n\t\tres = self.client.get(INGREDIENT_URL)\n\n\t\tingredients = Ingredient.objects.all().order_by('-name')\n\t\tserializer = IngredientSerializer(ingredients, many=True)\n\t\tself.assertEqual(res.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(res.data, serializer.data)",
"def get_user_ingredients(self):\r\n return self.user_ingredients",
"def ingredients_formatted(self, pretty: bool = False, sort: str = None, include_comments: bool = False) -> List[\n str]:\n return [ing.ingredient_formatted(pretty=pretty, include_comments=include_comments) for ing in\n self.ingredients(sort)]",
"def all(self):\n return self._get_resourceset_class()(\n self._client,\n self._path,\n )",
"def get_ingredients(self, drink_type: str) -> List[str]:\n return [ingredient for ingredient in self.get_receipe(drink_type)]",
"def recipe_names(self):\n return self._recipe_names",
"def get(self):\n return IngredientsList.query.all(), 200",
"def getRecipesByIngredients(ingredients):\n\n QueryHelpers.ensureListIsntEmpty(ingredients)\n lowercaseIngredients = [i for i in map(lambda x:x.lower(), ingredients)]\n with sessionInstance() as session:\n allIngredientsInRecipes = session.query(models.IngredientsInRecipes).all()\n\n filteredIngredientsInRecipes = filter(lambda x: x.ingredient.name.lower() in lowercaseIngredients, allIngredientsInRecipes)\n\n # The following makes the assumption that ingredient names are unique (which is enforced by the Ingredients model)\n recipes = []\n counts = {}\n for ingredientInRecipe in filteredIngredientsInRecipes:\n if ingredientInRecipe.recipe_id in counts:\n counts[ingredientInRecipe.recipe_id] = counts[ingredientInRecipe.recipe_id] + 1\n assert counts[ingredientInRecipe.recipe_id] <= len(ingredients)\n else:\n counts[ingredientInRecipe.recipe_id] = 1\n\n if counts[ingredientInRecipe.recipe_id] == len(ingredients):\n recipes.append(ingredientInRecipe.recipe)\n\n return [recipe.summaryDict() for recipe in recipes]",
"def recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SoftwareRecipeArgs']]]]:\n return pulumi.get(self, \"recipes\")",
"def get_ingredients(cls, response: HtmlResponse) -> Union[str, None]:\n ings = response.css(\".wprm-recipe-ingredients ::text\")\n return \" \".join(ing.get() for ing in ings) if ings else None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Register a data model object to the appropriate collection.
|
def register(self, model: DataConcepts, *, dry_run=False) -> DataConcepts:
return self.gemd._collection_for(model).register(model, dry_run=dry_run)
|
[
"def register_data(self):\n raise NotImplementedError",
"def register_model(name: str) -> None:\n # Add the model to the list of valid models.\n VALID_MODELS.append(name)",
"def register(cls_list):\n global REGISTERED_MODELS\n REGISTERED_MODELS = cls_list",
"def register(self, model: Dataset) -> Dataset:\n path = self._get_path()\n dumped_dataset = model.dump()\n dumped_dataset[\"deleted\"] = None\n\n # Only use the idempotent put approach if a) a unique name is provided, and b)\n # the session is configured to use it (default to False for backwards compatibility).\n if model.unique_name is not None and self.session.use_idempotent_dataset_put:\n # Leverage the create-or-update endpoint if we've got a unique name\n data = self.session.put_resource(path, scrub_none(dumped_dataset))\n else:\n\n if model.uid is None:\n # POST to create a new one if a UID is not assigned\n data = self.session.post_resource(path, scrub_none(dumped_dataset))\n\n else:\n # Otherwise PUT to update it\n data = self.session.put_resource(\n self._get_path(model.uid), scrub_none(dumped_dataset))\n\n full_model = self.build(data)\n full_model.project_id = self.project_id\n return full_model",
"def _registerField(self,name,data):\n self._registeredFields.append(name)\n self.__setattr__(name, data)",
"def register_model(self, type, finish, html):\n if type in self._models:\n raise Exception(\"Model type '%s' has already been registered.\" % type)\n\n self._models[type] = {\"finish\":finish, \"html\":html}\n cherrypy.log.error(\"Registered new model '%s'\" % type)",
"def register_models() -> None:\n db.create_all()\n db.session.commit()",
"def add_member(self, model):\n\n #Need to throw exception if model is missing, etc\n model_type = ContentType.objects.get_for_model(model)\n tmap = TaxonomyMap()\n tmap.content_type = model_type\n tmap.object_id = model.id\n tmap.taxonomy_item = self\n tmap.save()",
"def register_schema(cls, app, model_name=None):\n if model_name:\n models = [\n model_name,\n cls.driver.Model._decl_class_registry[model_name.capitalize()]\n ]\n else:\n models = cls.driver.Model._decl_class_registry\n\n for model_name, model_cls in models.iteritems():\n if model_name.startswith('_'):\n continue\n if getattr(model_cls, '_eve_schema', None):\n dict_update(app.config['DOMAIN'], model_cls._eve_schema)",
"def register():\n\n data = collect_data()\n\n log.debug('data is: {0}'.format(json.dumps(data, default=lambda o: o.__dict__)))\n api_submit('/api/register', data, method='put')",
"def add_model(self, model):\n self.models.append(model)\n return self",
"def register_model(self, model_clazz):\n if not model_clazz._meta.abstract:\n clazz_name = get_fqclassname_forclass(model_clazz)\n self.app_models[clazz_name] = model_clazz",
"def register(self, key, obj):\n if key in self._objects:\n raise KeyError('There is already an object registered for the \"%s\" key' % key)\n\n self._set_instance(key, obj)",
"def register(self, model_or_iterable, admin_class=None, **options):\r\n if isinstance(model_or_iterable, ModelBase) and not admin_class:\r\n admin_class = ModelAdmin\r\n \r\n if isinstance(model_or_iterable, TopLevelDocumentMetaclass) and not admin_class:\r\n admin_class = DocumentAdmin\r\n\r\n # Don't import the humongous validation code unless required\r\n #if admin_class and settings.DEBUG:\r\n # from mongoadmin.validation import validate\r\n #else:\r\n validate = lambda model, adminclass: None\r\n\r\n if isinstance(model_or_iterable, ModelBase) or \\\r\n isinstance(model_or_iterable, TopLevelDocumentMetaclass):\r\n model_or_iterable = [model_or_iterable]\r\n\r\n for model in model_or_iterable:\r\n if isinstance(model, TopLevelDocumentMetaclass):\r\n init_document_options(model)\r\n \r\n if hasattr(model._meta, 'abstract') and model._meta.abstract:\r\n raise ImproperlyConfigured('The model %s is abstract, so it '\r\n 'cannot be registered with admin.' % model.__name__)\r\n\r\n if model in self._registry:\r\n raise AlreadyRegistered('The model %s is already registered' % model.__name__)\r\n\r\n # Ignore the registration if the model has been\r\n # swapped out.\r\n if model._meta.swapped:\r\n continue\r\n\r\n # If we got **options then dynamically construct a subclass of\r\n # admin_class with those **options.\r\n if options:\r\n # For reasons I don't quite understand, without a __module__\r\n # the created class appears to \"live\" in the wrong place,\r\n # which causes issues later on.\r\n options['__module__'] = __name__\r\n admin_class = type(\"%sAdmin\" % model.__name__, (admin_class,), options)\r\n\r\n # Validate (which might be a no-op)\r\n validate(admin_class, model)\r\n\r\n # Instantiate the admin class to save in the registry\r\n self._registry[model] = admin_class(model, self)",
"def register(self, model_or_iterable, controller_class=None, **options):\n from django.db.models.base import ModelBase\n if not controller_class:\n from .controller import Controller\n controller_class = Controller\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n super(Backend, self).register(controller_class, model, **options)",
"def register_model(self, model, bundle):\n if model in self._model_registry:\n raise AlreadyRegistered('The model %s is already registered' \\\n % model)\n\n if bundle.url_params:\n raise Exception(\"A primary model bundle cannot have dynamic \\\n url_parameters\")\n\n self._model_registry[model] = bundle",
"def registerViewForModel(view, model):\n components.registerAdapter(view, model, interfaces.IView)",
"def addedDataObject(ob, event):\n log.info('Added data object')\n ob.index_object()",
"def handle_new_model(request, model_name):\n dbsession = DBSession()\n data = JSONAPIValidator(not_empty=True).to_python(request.body)\n item = COMPONENTS[model_name]['class'].from_dict(data, dbsession)\n if item:\n with transaction.manager:\n dbsession.add(item)\n dbsession.flush()\n item_data, item_included = item.as_dict(request=request)\n response = {'data': item_data}\n if item_included:\n response['included'] = filter_list(item_included)\n return response\n return {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update a data model object using the appropriate collection.
|
def update(self, model: DataConcepts) -> DataConcepts:
return self.gemd._collection_for(model).update(model)
|
[
"def update(self, collection, model, id):\n self._validate_collection(collection)\n return {\n \"command\": \"update\",\n \"kwargs\": {\n \"type\": collection,\n \"model\": model,\n \"id\": id,\n }\n }",
"def update(self, new_data):\n self.query.filter_by(id=self.id).update(new_data)",
"def _update(self, _filter, update_data, upsert, many):\n try : \n if (many == False) : \n self.collection.update_one(_filter,update_data,upsert=upsert)\n if (many == True):\n self.collection.update_many(_filter, update_data,upsert=upsert)\n except : \n print(\"ERROR : _update\")",
"def update(self):\n\n self.__check_update_ok()\n self.db.update_dataset_record(self.dataset_dict)",
"def update(self, obj, id):",
"def update_in_db(self, data):\n UserModel.query.filter_by(id=self.id).update(data)\n db.session.commit()",
"def find_one_and_update(collection, _id, data):\n return DB.DATABASE[collection].find_one_and_update({\"_id\": _id}, {\"$set\": data})",
"def update(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"object.update\", self._object._eco_id)\r\n p2e._app.Exec(arg_str)",
"def update(self, **kwargs):\n ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)\n post_bulk_operation.send(sender=self, model=self.model)\n return ret_val",
"def update(self, where_dict, changes_dict):\n def updater(document):\n # If document matches where clause, update\n if self.check_document(document, where_dict):\n document.update(changes_dict)\n return document\n \n self.collection = list(map(updater, self.collection))",
"def update(self, obj):\n with self.session() as session:\n return self.update_with_session(session, obj)",
"def update_model(self, x):\n self.model.update(x)",
"def handle_update(self, data):\n if self.skip_update(data):\n logger.debug('skipping update data', data)\n return\n if data['id'] in self.index:\n self.index[data['id']] = data\n\n for callback in self.callbacks.get('update') or set():\n callback(data)",
"def update(self, request, *args, **kwargs):\n data = request.data\n instance = self.get_object()\n if 'items' in data:\n items = instance.items.all()\n items = {i.id: i for i in items}\n for item in data['items']:\n matched_item = items.get(item['id'], None)\n if matched_item is None:\n continue\n matched_item.status = item['status']\n if 'comments' in item:\n matched_item.comments = item['comments']\n items = items.values()\n if len(items) > 0:\n models.StudentAttendanceItem.objects.bulk_update(\n items, ['status', 'comments']\n )\n instance.average_attendance = self.get_average_attendance(items)\n instance.save()\n return Response(status=status.HTTP_200_OK)",
"def update_collection(self, collection, doc):\n\n\t\ttry:\t\n\t\t\tself.db[collection].update({'_id' : ObjectId(doc['_id'])},\n\t\t\t\t\t\t\t\t\tdoc,\n\t\t\t\t\t\t\t\t\tupsert = False)\n\t\texcept Exception as e:\n\t\t\tlogging.error(\"[{}] : {}\".format(sys._getframe().f_code.co_name,e))\n\t\t\texit(1)",
"def test_update_methods(self):\n state = exp_domain.State.create_default_state('ABC')\n question_data = state.to_dict()\n\n test_object = {\n 'question_id': 'col1.random',\n 'title': 'abc',\n 'question_data': question_data,\n 'question_data_schema_version': 1,\n 'collection_id': 'col1',\n 'language_code': 'en'\n }\n\n question = question_domain.Question.from_dict(test_object)\n question.update_title('hello')\n self.assertEqual(question.title, 'hello')\n\n question.update_question_data({})\n self.assertEqual(question.question_data, {})\n\n question.update_language_code('es')\n self.assertEqual(question.language_code, 'es')",
"def update_instance(self, instance: Model, field: Field, value: Any):",
"def update_model():\n model = request.get_json() or {}\n try:\n write_model(model)\n return jsonify('Success')\n except Exception:\n abort(400)",
"def salesforce_collection_update(self, objects):\n for obj in objects:\n assert obj[\n \"id\"\n ], \"Should be a list of objects with Ids returned by Salesforce Collection Insert\"\n if STATUS_KEY in obj:\n del obj[STATUS_KEY]\n\n assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, (\n \"Cannot update more than %s objects with this keyword\"\n % SF_COLLECTION_INSERTION_LIMIT\n )\n\n records = self.cumulusci.sf.restful(\n \"composite/sobjects\",\n method=\"PATCH\",\n json={\"allOrNone\": True, \"records\": objects},\n )\n\n for record, obj in zip(records, objects):\n obj[STATUS_KEY] = record\n\n for idx, (record, obj) in enumerate(zip(records, objects)):\n if record[\"errors\"]:\n raise AssertionError(\n \"Error on Object {idx}: {record} : {obj}\".format(**vars())\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete a GEMD resource from the appropriate collection.
|
def delete(self, uid: Union[UUID, str, LinkByUID, DataConcepts], *, dry_run=False):
if isinstance(uid, DataConcepts):
collection = self.gemd._collection_for(uid)
else:
collection = self.gemd
return collection.delete(uid, dry_run=dry_run)
|
[
"def delete_collection(collection):\r\n collection.delete_many({})",
"def delete(self, entity):",
"def delete(self):\n if self.data:\n self.data.delete()\n super(Resource, self).delete()",
"def delete(self):\n failed, model, entity = self._get_model_and_entity(True, True)\n if failed: return\n entity.delete()\n self._serve({})",
"def delete_resource(self):\r\n results = ResponsesREST.SERVER_ERROR.value\r\n query = \"DELETE FROM Resource WHERE routeSave = %s; \"\r\n param = [self.route_save]\r\n result = self.connect.send_query(query, param)\r\n if result:\r\n results = ResponsesREST.SUCCESSFUL.value\r\n return results",
"def test_delete_entity_from_dictionary_and_DB(self):\n # Create a resource\n id = {\"_id\": \"/agreement/resource-for-deletion\"}\n resource = core_model.Resource(id[\"_id\"], None, [])\n\n entities = EntityDictionary(None)\n entities[id[\"_id\"]] = resource\n\n # delete the resource\n\n del entities[id[\"_id\"]]\n db_res = self.db.entities.find_one(id)\n\n self.assertEqual(db_res, None)\n self.assertEqual(len(entities), 0)",
"def delete(self):\n self.gridfs.delete(self.file_id)",
"def delete(self):\n self.collection.remove({'_id': self['_id']})",
"def delete_linked_resource(self, resource, rel, media_type): # NOQA\n return self.delete_resource(find_link(resource, rel, media_type).href)",
"def _delete_resource(self, res_type, context, res_id):\n\n res_dict = self._encode_resource(resource_id=res_id)\n LOG.debug(\"delete_%(res_type)s(): %(res_id)s\",\n {'res_type': res_type, 'res_id': res_id})\n status_code, res_info = self._request_backend(context, res_dict,\n res_type, 'DELETE')\n if status_code != requests.codes.ok:\n self._raise_contrail_error(status_code, info=res_info,\n obj_name=res_type)",
"def delete(event, context, helper): # pylint: disable=unused-argument\n logger.info(event)\n props = event['ResourceProperties']\n provider = config.get_provider(props.get('Tenant'))\n try:\n # deleting a grant id that doesn't exist does not raise exception\n # see e2e/test_errors.py\n provider.delete_grant(event['PhysicalResourceId'])\n except Auth0Error as err:\n logger.error(err)\n if 'Path validation error' in err.message:\n logger.error('physical resource id is not a \\\n valid grant. Assuming this failed to create.')\n return\n raise",
"def remove_resource(self, rm):\n pass",
"def delete(self, commit=True):\n for ds in self.datasets:\n ds.dmps.remove(self)\n\n db.session.delete(self)\n\n if commit:\n db.session.commit()",
"def delete_record():",
"def resource_data_delete(resource, key):\r\n return IMPL.resource_data_delete(resource, key)",
"def delete_relationship(self, rel_id) -> Relationship:",
"def test_delete(self):\n rgs, gfs = self._create_save_to_db()\n kf_id = ReadGroupGenomicFile.query.first().kf_id\n\n # Send get request\n response = self.client.delete(url_for(RG_GF_URL,\n kf_id=kf_id),\n headers=self._api_headers())\n # Check status code\n self.assertEqual(response.status_code, 200)\n # Check response body\n response = json.loads(response.data.decode(\"utf-8\"))\n # Check database\n rgf = ReadGroupGenomicFile.query.get(kf_id)\n self.assertIs(rgf, None)",
"def delete(self,cn):\n try:\n collection.delete_one({\"_id\": str(cn)})\n except:\n print(\"Kan de data niet verwijderen\")",
"def handleDeleteEvent(self, deletedResource):\n\n\t\tri = deletedResource.ri\n\t\tgroups = CSE.storage.searchByTypeFieldValue(C.tGRP, 'mid', ri)\n\t\tfor group in groups:\n\t\t\tgroup['mid'].remove(ri)\n\t\t\tgroup['cnm'] = group.cnm - 1\n\t\t\tCSE.storage.updateResource(group)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a new dataset in the collection, or update an existing one. If the Dataset has an ID present, then we update the existing resource, else we create a new one. This differs from super().register() in that None fields are scrubbed, and the json response is not assumed to come in a dictionary with a single entry 'dataset'. Both of these behaviors are in contrast to the behavior of projects. Eventually they will be unified in the backend, and one register() method will suffice.
|
def register(self, model: Dataset) -> Dataset:
path = self._get_path()
dumped_dataset = model.dump()
dumped_dataset["deleted"] = None
# Only use the idempotent put approach if a) a unique name is provided, and b)
# the session is configured to use it (default to False for backwards compatibility).
if model.unique_name is not None and self.session.use_idempotent_dataset_put:
# Leverage the create-or-update endpoint if we've got a unique name
data = self.session.put_resource(path, scrub_none(dumped_dataset))
else:
if model.uid is None:
# POST to create a new one if a UID is not assigned
data = self.session.post_resource(path, scrub_none(dumped_dataset))
else:
# Otherwise PUT to update it
data = self.session.put_resource(
self._get_path(model.uid), scrub_none(dumped_dataset))
full_model = self.build(data)
full_model.project_id = self.project_id
return full_model
|
[
"def update(self, dataset_id, name=None, description=None):\n\n dataset = models.Dataset(\n name=name,\n description=description\n )\n\n repository = self.build_repository(repositories.UpdateDataset)\n return repository.update(dataset_id, dataset)",
"def modify_resource(self):\n \"\"\" Create or update resources \"\"\"\n json_payload = cherrypy.request.json\n if not self._verify_auth_token(json.dumps(json_payload)):\n return json.dumps({'success': False, 'msg': 'User operation not authorized!'})\n\n payload = json_payload['json']\n name = payload['filename']\n vendor = payload['server']\n url = payload['url']\n type = payload['type']\n desc = payload['desc']\n session = cherrypy.request.db\n try:\n resource = Resource.get_resource_by_name_vendor(session, name, vendor)\n if not resource:\n resource = Resource(name, desc, type, vendor, url)\n session.add(resource)\n else:\n resource.name = name\n resource.vendor = vendor\n resource.url = url\n resource.type = type\n resource.description = desc\n session.commit()\n except DBAPIError, err:\n log.error('Database operation error %s' % err)\n return json.dumps({'success': False, 'msg': 'Sync resource failed!'})\n return json.dumps({'success': True, 'msg': 'Sync resource success!'})",
"def dataset(self, dataset):\n if self._dataset is not None:\n self._dataset.delete()\n\n if dataset is not None:\n self._dataset = self._model.add(dataset, self)",
"def update_dataset(self, datasetresourcename: str, dataset_patch: DatasetPATCH, query_params: Dict[str, object] = None) -> Dataset:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetresourcename\": datasetresourcename,\n }\n\n path = Template(\"/catalog/v2alpha2/datasets/${datasetresourcename}\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = dataset_patch.to_dict()\n response = self.base_client.patch(url, json=data, params=query_params)\n return handle_response(response, Dataset)",
"def update_or_create(self, pk, data):\n if pk:\n obj = self.get(pk)\n if obj.get('id'):\n print(obj)\n return self.update(obj['id'], data)\n return self.create(data)",
"def _create(self):\n data = {\n 'device_id': self.id, \n 'name': \"No name\",\n \"registered\": False\n }\n self._tydb.upsert(data, Query().device_id == self.id)",
"def append(self, dataset, identifier):\n\n if isinstance(dataset, str):\n dataset = self._dataset_class(dataset_path=dataset)\n\n if not isinstance(dataset, self._dataset_class):\n raise CompatibilityException('Incompatible dataset. '\n 'You can only add instances of '\n 'type {}'.format(self._dataset_class))\n\n if len(dataset.description)>0:\n identifier = dataset.description\n\n if not self._is_init:\n self._ids = set(dataset.samplet_ids)\n self.targets = dataset.targets\n self._target_sizes = dataset.target_sizes\n\n self.num_samplets = len(self._ids)\n self._modalities[identifier] = dataset.data\n self.feature_names[identifier] = dataset.feature_names\n self.num_features.append(dataset.num_features)\n\n # maintaining a no-data pyradigm Dataset internally to reuse its methods\n self._dataset = copy(dataset)\n # replacing its data with zeros\n self._dataset.data = {id_: np.zeros(1) for id_ in self._ids}\n\n if hasattr(dataset, 'attr'):\n self._common_attr = dataset.attr\n self._common_attr_dtype = dataset.attr_dtype\n else:\n self._common_attr = dict()\n self._common_attr_dtype = dict()\n\n self._attr = dict()\n\n self._is_init = True\n else:\n # this also checks for the size (num_samplets)\n if set(dataset.samplet_ids) != self._ids:\n raise CompatibilityException(\n 'Differing set of IDs in two datasets.'\n ' Unable to add this dataset to the MultiDataset.')\n\n if dataset.targets != self.targets:\n raise CompatibilityException(\n 'Targets for some IDs differ in the two datasets.'\n ' Unable to add this dataset to the MultiDataset.')\n\n if identifier not in self._modalities:\n self._modalities[identifier] = dataset.data\n self.feature_names[identifier] = dataset.feature_names\n self.num_features.append(dataset.num_features)\n else:\n raise KeyError('{} already exists in MultiDataset'\n ''.format(identifier))\n\n if hasattr(dataset, 'attr'):\n if len(self._common_attr) < 1:\n # no attributes were set at all - simple copy sufficient\n self._common_attr = dataset.attr.copy()\n self._common_attr_dtype = dataset.attr_dtype.copy()\n else:\n for a_name in dataset.attr:\n if a_name not in self._common_attr:\n self._common_attr[a_name] = dataset.attr[a_name]\n self._common_attr_dtype[a_name] = \\\n dataset.attr_dtype[a_name]\n elif self._common_attr[a_name] != dataset.attr[a_name]:\n raise ValueError(\n 'Values and/or IDs differ for attribute {}. '\n 'Ensure all datasets have common attributes '\n 'with the same values'.format(a_name))\n\n\n # each addition should be counted, if successful\n self.modality_count += 1",
"def upsert(self, data):\n\t\turl = '/samples/upsert'\n\t\treturn post(url, data)",
"def update(self):\n\n self.__check_update_ok()\n self.db.update_dataset_record(self.dataset_dict)",
"def putData(self, data):\n try:\n self.getDataset().insert_one(data)\n except errors.DuplicateKeyError:\n updateData = {'$set': data}\n self.getDataset().update_one(\n {'_id': data['_id']}, updateData)",
"def _create_dataset_if_necessary(client, dataset_id):\n dataset_reference = bigquery.dataset.DatasetReference(client.project, dataset_id)\n try:\n dataset = client.get_dataset(dataset_reference)\n return\n except NotFound:\n pass\n dataset = bigquery.Dataset(dataset_reference)\n dataset.location = client.location\n print(f\"Creating dataset: {dataset_id}\")\n dataset = client.create_dataset(dataset)",
"def upsert(self, kind: VersionedDataKind, item: dict):",
"def register(self, pid, record, url, **kwargs):\n local_success = super().register(pid, record)\n if not local_success:\n return False\n\n if self.is_api_client_setup:\n # PIDS-FIXME: move to async task, exception handling included\n try:\n doc = DataCite43JSONSerializer().dump_one(record)\n self.api_client.public_doi(\n metadata=doc, url=url, doi=pid.pid_value)\n except DataCiteError as e:\n logging.warning(\"DataCite provider errored when updating \" +\n f\"DOI for {pid.pid_value}\")\n self._log_errors(e)\n\n return False\n else:\n logging.warning(\"DataCite client not configured. \" +\n f\"Cannot register DOI for {pid.pid_value}\")\n\n return True",
"def test_add_resource_which_is_already_in_dictionary(self):\n id = {\"_id\": \"/agreement/4545-4545454-sdasdas-blah\"}\n\n res_0 = core_model.Resource(\"11235\", None, None)\n res_1 = core_model.Resource(\"18512\", None, None)\n\n resources = EntityDictionary(None)\n resources[id[\"_id\"]] = res_0\n resources[id[\"_id\"]] = res_1\n\n self.assertEqual(self.db.entities.find().count(), 1)\n self.assertEqual(self.db.entities.find_one(id)[\"identifier\"],\n res_1.identifier)\n LOG.info(\"Overwriting existing entry in dict reflected in DB\")",
"def merge(self, dataset):\n self.__dataset.update(**dataset)",
"def create_or_update(\n self,\n data_source_name, # type: str\n body=None, # type: Any\n **kwargs # type: Any\n ):\n # type: (...) -> Any\n cls = kwargs.pop('cls', None) # type: ClsType[Any]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n\n content_type = kwargs.pop('content_type', \"application/json\") # type: Optional[str]\n\n if body is not None:\n json = body\n else:\n json = None\n\n request = build_data_sources_create_or_update_request(\n data_source_name=data_source_name,\n content_type=content_type,\n json=json,\n template_url=self.create_or_update.metadata['url'],\n )\n path_format_arguments = {\n \"Endpoint\": self._serialize.url(\"self._config.endpoint\", self._config.endpoint, 'str', skip_quote=True),\n }\n request.url = self._client.format_url(request.url, **path_format_arguments)\n\n pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n if response.status_code == 200:\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if response.status_code == 201:\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized",
"def dataset_create(self, **kwargs):\n print(\"Creating RENKU dataset...\")\n opts = {\n \"dataset_name\": \"Dataset name\"\n }\n for key, val in opts.items():\n if key not in kwargs.keys():\n if key in self.__dict__.keys():\n kwargs[key] = self.__dict__[key]\n else:\n kwargs[key] = input(val + \": \")\n\n cmd = Command([self.renku_cli,\n 'dataset',\n 'create',\n kwargs[\"dataset_name\"]\n ]\n )\n print(cmd.stdout.read().decode() + cmd.stderr.read().decode())\n return self.__get_dataset_metadata(kwargs[\"dataset_name\"])",
"def create(self, pk=None, **kwargs):\n pk = pk or next(self.pk_gen)\n obj = self.cls(**kwargs)\n dobj = self.DataObj(obj, pk)\n self.dset.add(dobj)",
"def put_dataset_release(body: Dict) -> Response:\n\n return put_item(DatasetRelease, body)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List datasets using pagination. Leaving page and per_page as default values will yield all elements in the collection, paginating over all available pages.
|
def list(self, *, per_page: int = 1000) -> Iterator[Dataset]:
return super().list(per_page=per_page)
|
[
"async def paginate(\n self, url: str, page_sz: Optional[int] = None, **params\n ) -> List[Dict]:\n\n # always make a copy of the Caller provided parameters so we\n # do not trample any of their settings.\n\n _params = params.copy()\n\n # fetch the first page of data, which will also tell us the the total\n # number of pages we need to fetch.\n\n _params[\"limit\"] = page_sz or self.DEFAULT_PAGE_SZ\n res = await self.get(url, params=_params)\n res.raise_for_status()\n body = res.json()\n records = body[\"data\"]\n total_pages = body[\"total_pages\"]\n\n # fetch the remaining pages concurrently; remember that the `range`\n # function does not include the ending value ... so +1 the total_pages.\n\n tasks = list()\n for page in range(2, total_pages + 1):\n _params[\"page\"] = page\n tasks.append(self.get(url, params=_params.copy()))\n\n for next_r in asyncio.as_completed(tasks):\n res = await next_r\n body = res.json()\n records.extend(body[\"data\"])\n\n # return the full list of all records.\n\n return records",
"def paginate(self, resource, page=1, page_size=100, **kwargs):\n\n response = resource(page=page, page_size=page_size, **kwargs)\n items = response[\"results\"]\n\n if response[\"page\"] * response[\"page_size\"] >= response[\"count\"]:\n return items\n else:\n return [\n *items,\n *self.paginate(resource, page=page + 1, page_size=page_size, **kwargs),\n ]",
"def list_datasets():\r\n\r\n dir_info = APTDataDirectory.get_all_datasets(app.config['WORKING_PATH'])\r\n dir_valid = dict([(dir, isinstance(info, APTDataDirectory)) for dir, info in dir_info.items()])\r\n return render_template(\"dataset_list.html\", dir_info=dir_info, dir_valid=dir_valid,\r\n navbar=[('List Datasets', '#')])",
"def query(self, dataset, **options):\n url = self.url + \"/\" + dataset\n\n query_chunks = None\n for field, v in options.items():\n if \"in(\" in str(v) and len(str(v)) > 1950:\n values = re.split(r\"in\\((.*?)\\)\", options[field])[1].split(\",\")\n chunksize = int(floor(1950 / len(max(values))))\n query_chunks = (field, [x for x in _chunks(values, chunksize)])\n\n while True:\n if self.links:\n response = self.session.get(self.url + self.links[\"next\"][\"url\"])\n else:\n if query_chunks and query_chunks[1]:\n options[query_chunks[0]] = self.in_(query_chunks[1].pop(0))\n\n response = self.session.get(url, params=options)\n\n if not response.ok:\n raise DAQueryException(\n \"Non-200 response: {} {}\".format(\n response.status_code, response.text\n )\n )\n\n records = response.json()\n\n if not len(records):\n self.links = None\n\n if query_chunks and query_chunks[1]:\n continue\n\n break\n\n if \"next\" in response.links:\n self.links = response.links\n\n for record in records:\n yield record",
"def test_list_recordsets_multiple_pages(rs_fixture):\n client = rs_fixture.client\n rs_zone = rs_fixture.zone\n\n # first page of 2 items\n list_results_page = client.list_recordsets_by_zone(rs_zone[\"id\"], max_items=2, status=200)\n rs_fixture.check_recordsets_page_accuracy(list_results_page, size=2, offset=0, next_id=True, max_items=2)\n\n # second page of 5 items\n start = list_results_page[\"nextId\"]\n list_results_page = client.list_recordsets_by_zone(rs_zone[\"id\"], start_from=start, max_items=5, status=200)\n rs_fixture.check_recordsets_page_accuracy(list_results_page, size=5, offset=2, next_id=True, start_from=start, max_items=5)\n\n # third page of 6 items\n start = list_results_page[\"nextId\"]\n list_results_page = client.list_recordsets_by_zone(rs_zone[\"id\"], start_from=start, max_items=16, status=200)\n rs_fixture.check_recordsets_page_accuracy(list_results_page, size=15, offset=7, next_id=False, start_from=start, max_items=16)",
"def _list_all_pages(endpoint_obj, list_params: dict, *args, **kwargs):\n\n params = list_params.copy()\n\n # set default pagination count if not provided\n if 'count' not in params:\n params['count'] = '200'\n else:\n params['count'] = str(params['count'])\n\n # get first response\n response = endpoint_obj.list(*args, params=params, **kwargs)\n output = list(response.value)\n\n # keep getting pages while they are available\n while response.pagination.has_previous_page():\n time.sleep(1)\n try:\n response = endpoint_obj.list(\n *args,\n params=response.pagination.url_params_previous_page,\n **kwargs)\n\n output += list(response.value)\n except:\n pass\n\n return output",
"def ls(self, prefix='', offset=0, limit=20):\n if not prefix:\n prefix = ''\n\n params = {\n 'action': 'query',\n 'list': 'allpages',\n 'apprefix': prefix,\n 'aplimit': limit + 1\n }\n params.update(DEFAULT_PARAMS)\n resp = json.loads(self.opener.open(API_ROOT, urllib.urlencode(params)).read())\n page_json = resp['query']['allpages']\n page_list = [p['title'] for p in page_json]\n return page_list",
"def get_data(self, *args, **kwargs):\n params = {}\n if \"params\" in kwargs:\n params = kwargs[\"params\"]\n\n if \"per_page\" not in params:\n params[\"per_page\"] = 200\n\n kwargs[\"params\"] = params\n data = super(Manager, self).get_data(*args, **kwargs)\n unpaged_data = self.__deal_with_pagination(args[0], data, params)\n\n return unpaged_data",
"def do_pagination(self, request, queryset):\n limit_max = getattr(settings, 'WAGTAILAPI_LIMIT_MAX', 20)\n\n try:\n offset = int(request.GET.get('offset', 0))\n assert offset >= 0\n except (ValueError, AssertionError):\n raise BadRequestError(\"offset must be a positive integer\")\n\n try:\n limit = int(request.GET.get('limit', min(20, limit_max)))\n\n if limit > limit_max:\n raise BadRequestError(\"limit cannot be higher than %d\" % limit_max)\n\n assert limit >= 0\n except (ValueError, AssertionError):\n raise BadRequestError(\"limit must be a positive integer\")\n\n start = offset\n stop = offset + limit\n\n return queryset[start:stop]",
"def get_records(self, backend=None):\n request_params = copy.deepcopy(self.params)\n request_params['offset'] = int(request_params.get('offset', 0))\n requested_count = int(request_params.get('limit', 0))\n # if no limit is specified we request all the records and use the default page size\n if requested_count == 0:\n request_params['limit'] = self.page_size\n else:\n # set the limit to the smaller value so that we don't request a large number of records\n # when all we actually need is one (for example)\n request_params['limit'] = min(self.page_size, requested_count)\n\n # if there is an offset already in the request params then we can't fulfill this request\n # using the solr or versioned-datastore cursor/search after pagination techniques\n if request_params['offset'] > 0:\n backend = None\n before, after = self.backends.get(backend, (self._default_before, self._default_after))\n\n before(request_params)\n count = 0\n while True:\n try:\n response = requests.post(self.api_url, json=request_params, headers=self.headers)\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n raise StreamError(\"Failed fetching URL {}: {}\".format(self.api_url, e))\n\n result = response.json()['result']\n if not result['records']:\n return\n for record in result['records']:\n yield record\n count += 1\n if count == requested_count:\n return\n after(request_params, result)",
"def paginate(docs, per_page=10):\n return [docs[i: i + per_page] for i in range(0, len(docs), per_page)]",
"def page(self):\n limit = self.get_limit()\n offset = self.get_offset()\n count = self.get_count()\n objects = self.get_slice(limit, offset)\n meta = {\n 'offset': offset,\n 'limit': limit,\n 'total_count': count,\n }\n\n if limit and self.method.upper() == 'GET':\n meta['previous'] = self.get_previous(limit, offset)\n meta['next'] = self.get_next(limit, offset, count)\n\n return {\n self.collection_name: objects,\n 'page_meta': meta,\n }",
"def get_listings(self, query, limit=50, pages=10, delay=1):\n\n if not isinstance(limit, int) or limit < 1:\n raise ValueError(f'Items per page ({limit}) must be a positive integer.')\n\n if not isinstance(pages, int) or pages < 1:\n raise ValueError(f'Number of pages ({pages}) must be a positive integer.')\n\n listings = None\n last_page = False\n\n for i in range(pages):\n try:\n # get listings on current page\n result = self.get_homes(query, items_per_grid=limit, offset=i*limit)\n time.sleep(delay)\n except Exception:\n print(f'Error encountered for {query} on page {i+1}')\n break\n\n # handle case when API returns results, but no listings\n if 'listings' not in result['explore_tabs'][0]['sections'][0]:\n print(f'No results for {query} on page {i+1}')\n break\n\n # convert current listings to DataFrame and append to all listings\n current_listings = result['explore_tabs'][0]['sections'][0]['listings']\n df_list = pd.DataFrame([x['listing'] for x in current_listings])\n df_price = pd.DataFrame([x['pricing_quote'] for x in current_listings])\n df = df_list.merge(df_price, left_index=True, right_index=True)\n listings = listings.append(df) if listings is not None else df\n\n # check if there are additional pages\n # looping once more after has_next_page is false returns a few more results\n if not result['explore_tabs'][0]['pagination_metadata']['has_next_page']:\n if last_page:\n print(f'Finished searching {query}')\n break\n else:\n last_page = True\n\n # drop duplicate listings just in case\n if listings is not None:\n listings = listings.drop_duplicates(subset='id')\n\n return listings",
"def search_all(self, params={}):\n params['limit'] = self.single_page_limit\n params['offset'] = 0\n while True:\n h_url = self.query_url.format(query=urlencode(params, True))\n #print h_url\n r = requests.get(h_url).json()\n rows = r.get('rows')\n params['offset'] += len(rows)\n if params['offset'] > self.multi_page_limit:\n break\n if len(rows) is 0:\n break\n for row in rows:\n yield row",
"def test_results_per_page(self):\r\n self.manager.create_api(self.Person, methods=['POST', 'GET'])\r\n for n in range(25):\r\n response = self.app.post('/api/person', data=dumps({}))\r\n assert 201 == response.status_code\r\n response = self.app.get('/api/person?results_per_page=20')\r\n assert 200 == response.status_code\r\n data = loads(response.data)\r\n assert 20 == len(data['objects'])\r\n # Fall back to default number of results per page on bad requests.\r\n response = self.app.get('/api/person?results_per_page=-1')\r\n assert 200 == response.status_code\r\n data = loads(response.data)\r\n assert 10 == len(data['objects'])\r\n # Only return max number of results per page.\r\n response = self.app.get('/api/person?results_per_page=30')\r\n assert 200 == response.status_code\r\n data = loads(response.data)\r\n assert 25 == len(data['objects'])",
"async def fetch_paginated(\n client, bearer_token: str, url: str, data_key: str\n) -> List[Dict[str, Any]]:\n results: List[Dict[str, Any]] = []\n\n page_url = url # we'll modify it as we go\n for _ in range(MaxNPages):\n response = await client.get(\n page_url,\n headers={\n \"Authorization\": f\"Bearer {bearer_token}\",\n \"Accept\": \"application/json\",\n },\n )\n response.raise_for_status()\n data = response.json()\n if not isinstance(data, dict):\n raise RuntimeError(\"Intercom did not return a JSON Object\")\n if data_key not in data:\n raise RuntimeError(f'Intercom did not return \"{data_key}\" data')\n\n results.extend(data[data_key])\n\n if \"pages\" in data and data[\"pages\"][\"next\"]:\n page_url = data[\"pages\"][\"next\"]\n else:\n break\n\n return results",
"def list(self, limit=None, marker=None, return_raw=False):\r\n uri = \"/%s\" % self.uri_base\r\n pagination_items = []\r\n if limit is not None:\r\n pagination_items.append(\"limit=%s\" % limit)\r\n if marker is not None:\r\n pagination_items.append(\"marker=%s\" % marker)\r\n pagination = \"&\".join(pagination_items)\r\n if pagination:\r\n uri = \"%s?%s\" % (uri, pagination)\r\n return self._list(uri, return_raw=return_raw)",
"def paging_results(self):\n\n return 10",
"async def get_all(self, url: str, top: int = -1, skip: int = 0) -> typing.AsyncIterator[dict]:\n param_sep = \"&\" if \"?\" in url else \"?\"\n # -1 means everything\n if top == -1:\n top = float(\"inf\")\n while True:\n # Respect the max specified\n count = min(top, 50)\n top -= count\n\n request_url = url + f\"{param_sep}$skip={skip}&$top={count}\"\n async with self._session.get(request_url) as resp:\n page = (await resp.json())[\"d\"][\"results\"]\n\n for i in page:\n yield i #NOSONAR\n if not page or top == 0:\n break\n skip += len(page)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a Dataset with the given unique name.
|
def get_by_unique_name(self, unique_name: str) -> Dataset:
if unique_name is None:
raise ValueError("You must supply a unique_name")
path = self._get_path(query_terms={"unique_name": unique_name})
data = self.session.get_resource(path)
if len(data) == 1:
return self.build(data[0])
elif len(data) > 1:
raise RuntimeError("Received multiple results when requesting a unique dataset")
else:
raise NotFound(path)
|
[
"def get_dataset(self, name):\n return Dataset(self.get_dataset_path(name))",
"def dataset(self, name):\n return Dataset(name, client=self)",
"def get_saved_dataset(self, name: str) -> SavedDataset:\n if not flags_helper.is_test():\n warnings.warn(\n \"Retrieving datasets is an experimental feature. \"\n \"This API is unstable and it could and most probably will be changed in the future. \"\n \"We do not guarantee that future changes will maintain backward compatibility.\",\n RuntimeWarning,\n )\n\n dataset = self._registry.get_saved_dataset(name, self.project)\n provider = self._get_provider()\n\n retrieval_job = provider.retrieve_saved_dataset(\n config=self.config, dataset=dataset\n )\n return dataset.with_retrieval_job(retrieval_job)",
"def get_dataset_by_name(\n dataset_name: str,\n subset_name: Optional[str] = None,\n cache_root: Optional[str] = None,\n inverse_triples: bool = False,\n self_loops: bool = False,\n split: Union[None, str, float] = None,\n **kwargs\n) -> KnowledgeGraphAlignmentDataset:\n dataset_name = dataset_name.lower()\n if dataset_name == 'dbp15k_full':\n dataset = DBP15KFull(\n subset=subset_name,\n cache_root=cache_root,\n split=split,\n inverse_triples=inverse_triples,\n self_loops=self_loops,\n )\n elif dataset_name == 'dbp15k_jape':\n dataset = DBP15K(\n subset=subset_name,\n cache_root=cache_root,\n inverse_triples=inverse_triples,\n split=split,\n self_loops=self_loops,\n )\n elif 'wk3l' in dataset_name:\n # size = kwargs.pop('size', '15k')\n size = re.search('wk3l([0-9]+k)', dataset_name).group(1)\n if split == '30':\n split = 0.3\n dataset = WK3l(\n cache_root=cache_root,\n subset=subset_name,\n size=size,\n inverse_triples=inverse_triples,\n split=split,\n self_loops=self_loops,\n )\n elif dataset_name == 'dwy100k':\n dataset = DWY100K(\n subset=subset_name,\n cache_root=cache_root,\n inverse_triples=inverse_triples,\n self_loops=self_loops,\n )\n else:\n raise KeyError(f'Could not load dataset: \"{dataset_name}\"')\n return dataset",
"def get_dataset(self, dataset_id: str) -> Optional[Dataset]:",
"def get_dataset_by_label(self, label):\n match = self._rx_set_label.match(label)\n if match:\n g = int(match.group(1))\n s = int(match.group(2))\n return self.get_dataset(g, s)\n else:\n raise ValueError",
"def get_dataset(self, dataset_id) -> Dataset:\n return self._get_single(Entity.Dataset, dataset_id)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n time_zone: Optional[pulumi.Input[str]] = None) -> 'Dataset':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _DatasetState.__new__(_DatasetState)\n\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"self_link\"] = self_link\n __props__.__dict__[\"time_zone\"] = time_zone\n return Dataset(resource_name, opts=opts, __props__=__props__)",
"def get_tf_dataset(self, name):\n data_sources = glob.glob(\n os.path.join(self.data_dir, name, '*.tfrecords'))\n # Build dataset provider\n dataset = tf.data.TFRecordDataset(data_sources)\n dataset = dataset.map(self.get_parser_op())\n dataset = dataset.repeat(repeat)\n\n return dataset",
"def dataset(filename):\n from crds import data_file\n if data_file.is_dataset(filename):\n return filename\n else:\n raise ValueError(\"Parameter\", repr(filename),\n \"does not appear to be a dataset filename.\")",
"def load_dataset(dataset_name='mnist'):\n allowed_image_names = ['mnist', 'cifar10', 'cifar100', 'fmnist']\n\n if dataset_name in allowed_image_names:\n return load_image_dataset(dataset_name)\n\n ucr_split = dataset_name.split('/')\n if len(ucr_split) > 1 and ucr_split[0].lower() == 'ucr':\n # is a ucr dataset time series dataset\n id = -1\n\n try:\n id = int(ucr_split[-1])\n except ValueError:\n # assume it is a name of the time series dataset\n\n try:\n id = ucr_utils.DATASET_NAMES.index(ucr_split[-1].lower())\n except ValueError:\n print(\"Could not match %s to either id or name of dataset !\" % (ucr_split[-1]))\n\n if id < 0:\n raise ValueError('Could not match %s to either id or name of dataset !' % (ucr_split[-1]))\n\n return load_ucr_dataset(id, normalize_timeseries=True)\n\n else:\n raise ValueError(\"Could not parse the provided dataset name : \", dataset_name)",
"def findDataset(h5_group, ds_name):\n from .pycro_data import PycroDataset\n\n # print 'Finding all instances of', ds_name\n ds = []\n\n def __find_name(name, obj):\n if ds_name in name.split('/')[-1] and isinstance(obj, h5py.Dataset):\n try:\n ds.append([name, PycroDataset(obj)])\n except TypeError:\n ds.append([name, obj])\n except:\n raise\n return\n\n h5_group.visititems(__find_name)\n\n return ds",
"def get_precreated_dataset( precreated_datasets, name ):\n names = [ d.name for d in precreated_datasets ]\n if names.count( name ) > 0:\n return precreated_datasets.pop( names.index( name ) )\n else:\n return None",
"def __guess_dataset_by_filename(filename):\n try:\n fields = os.path.split(filename)\n if fields:\n if fields[-1].startswith('dataset_') and fields[-1].endswith('.dat'): # dataset_%d.dat\n return Dataset.get(int(fields[-1][len('dataset_'): -len('.dat')]))\n except Exception:\n pass # some parsing error, we can't guess Dataset\n return None",
"def load():\n data = _get_data()\n names = data.dtype.names\n dataset = Dataset(data=data, names=names)\n return dataset",
"def new_dataset(dataset_name: str):\n icedata.template.generate_dataset(dataset_name)",
"def get_imdb(name):\n if not __sets.has_key(name):\n if not __sets.has_key(name.split(\"-\")[0]):\n raise KeyError('Unknown dataset: {}'.format(name))\n else:\n cfg.SET_VARIANT=name.split(\"-\")[1]\n return __sets[name.split(\"-\")[0]]()\n return __sets[name]()",
"def open_dataset(name, cache=True, cache_dir=_default_cache_dir,\n github_url='https://github.com/bradyrx/climdata',\n branch='master', extension=None, **kws):\n if name.endswith('.nc'):\n name = name[:-3]\n longdir = _os.path.expanduser(cache_dir)\n fullname = name + '.nc'\n localfile = _os.sep.join((longdir, fullname))\n md5name = name + '.md5'\n md5file = _os.sep.join((longdir, md5name))\n\n if not _os.path.exists(localfile):\n # This will always leave this directory on disk.\n # May want to add an option to remove it.\n if not _os.path.isdir(longdir):\n _os.mkdir(longdir)\n\n if extension is not None:\n url = '/'.join((github_url, 'raw', branch, extension, fullname))\n _urlretrieve(url, localfile)\n url = '/'.join((github_url, 'raw', branch, extension, md5name))\n _urlretrieve(url, md5file)\n else:\n url = '/'.join((github_url, 'raw', branch, fullname))\n _urlretrieve(url, localfile)\n url = '/'.join((github_url, 'raw', branch, md5name))\n _urlretrieve(url, md5file)\n\n localmd5 = file_md5_checksum(localfile)\n with open(md5file, 'r') as f:\n remotemd5 = f.read()\n if localmd5 != remotemd5:\n _os.remove(localfile)\n msg = \"\"\"\n MD5 checksum does not match, try downloading dataset again.\n \"\"\"\n raise IOError(msg)\n\n ds = _open_dataset(localfile, **kws)\n\n if not cache:\n ds = ds.load()\n _os.remove(localfile)\n\n return ds",
"def get_by_name(cls, name: str) -> \"DataDictionary\":\n cls.logger.debug(\"Get CDS data dictionary with %s name\", name)\n return DataDictionary(\n data_dictionary_json=cls.send_message_json(\n \"GET\",\n f\"Get {name} CDS data dictionary\",\n f\"{cls._url}/api/v1/dictionary/{name}\",\n auth=cls.auth),\n fix_schema=False\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
sample program demonstrating what this system can do adds 1 store adds 2 customers adds 8 videos the customers rent/return videos
|
def main():
store1 = Store(address1)
store1.add_customer(Customer(first_name1, last_name1, phone_number1, dob, email))
store1.add_customer(Customer(first_name2, last_name2, phone_number2, dob, email))
video1 = store1.add_video(Video("300"))
video2 = store1.add_video(Video("Spaceballs"))
video3 = store1.add_video(Video("Frozen"))
video4 = store1.add_video(Video("World War Z"))
video5 = store1.add_video(Video("Sister Act"))
video6 = store1.add_video(Video("The Mighty Ducks"))
video7 = store1.add_video(Video("Invincible"))
video8 = store1.add_video(Video("Dances With Wolves"))
store1.rent_video(phone_number1, video3)
store1.rent_video(phone_number1, video4)
store1.rent_video(phone_number1, video5)
store1.rent_video(phone_number2, video7)
store1.rent_video(phone_number2, video8)
print "Rented: ", store1.populate_videos(store1.rented_video_IDs())
store1.rent_video(phone_number1, video8) # try to rent something that has already been rented
store1.return_video(video4)
store1.return_video(video1) # try to return something that has not been rented
print "Rented: ", store1.populate_videos(store1.rented_video_IDs())
print " ### Customer: %s is currently renting: %s" % (store1.customers[phone_number1], store1.populate_videos(store1.customers[phone_number1].rented_video_IDs))
|
[
"def netflix_build_actual_ratings () :\r\n \r\n global verbose\r\n global MOVIES_DIR, PROBE_PATH\r\n global actualRatings, probe\r\n \r\n # allows for testing with hard-coded probe data\r\n if probe == None :\r\n f = open(PROBE_PATH)\r\n probe = f.read()\r\n f.close()\r\n probe = probe.splitlines()\r\n \r\n movieID = 0\r\n thisMovieRatings = {}\r\n tempRatings = []\r\n \r\n for thisID in probe:\r\n i = thisID.find(\":\")\r\n \r\n if i > -1 : # This is Movie ID - read movie entries\r\n \r\n lineLen = len(thisID)\r\n thisFile = MOVIES_DIR + \"mv_00\" + ((6-lineLen)*\"0\") + thisID[0:lineLen-1] + \".txt\"\r\n movieID = int(thisID[0:lineLen-1])\r\n assert(movieID > 0)\r\n assert(movieID <= NUM_MOVIES)\r\n \r\n f = open(thisFile, 'r')\r\n thisFile = f.readlines()\r\n f.close()\r\n \r\n thisMovieRatings.clear()\r\n for j in range (1, len(thisFile)) :\r\n temp = thisFile[j].partition(\",\")\r\n tempRating = ord(temp[2][0])-48\r\n assert(tempRating > 0)\r\n assert(tempRating <= 5)\r\n thisMovieRatings[temp[0]] = tempRating\r\n \r\n if verbose :\r\n print \"Grabbing Actual Ratings for Movie \" + thisID[0:lineLen-1]\r\n \r\n else : #this is a Customer ID, find and add their rating\r\n tempRatings.append(thisMovieRatings[thisID])\r\n \r\n actualRatings = tuple(tempRatings)",
"def _process_next_case(self):\n rated = self.cb.next_test_case()\n\n sim_users = self.retrieve(rated.user) # Retrieves a set of user ids\n sim_movies = self.reuse(rated.user, neighbors=sim_users) # Set of MovieInfo\n\n feedback, retain_rated_case, _ = self.review(rated, sim_movies)\n self.retain(rated, feedback, retain_rated_case)",
"def fetch_data(movies):\n reviews = list()\n for key, val in movies.items():\n\n # sending request to access the particular url\n movie_url = val[1]\n print(\"Getting Data of Movie : {}\".format(key))\n response = requests.get(movie_url)\n soup = BeautifulSoup(response.content, 'lxml')\n content = soup.find_all('section', class_ = \"ipc-page-section ipc-page-section--base\")\n \n review_url = soup.find_all('a', class_ = \"ipc-title ipc-title--section-title ipc-title--base ipc-title--on-textPrimary ipc-title-link-wrapper\")\n review_url = \"https://www.imdb.com\" + review_url[2]['href']\n \n review_url_response = requests.get(review_url)\n review_url_soup = BeautifulSoup(review_url_response.content, 'lxml')\n \n # here we have got several reviews from a single movie.\n total_reviews = review_url_soup.find_all('div', class_ = \"review-container\")\n # here, it made us necessary to iterate a loop, because it contains several reviews, and every review is important to us.\n for review in total_reviews:\n # using exception handling in case, if there is no title or review or rating is not present.\n try:\n rating = review.find(\"div\", class_ = \"ipl-ratings-bar\")\n rating = rating.find('span').text.strip().split(\"/\")[0]\n except:\n rating = \" \"\n try:\n title = review.find('a', class_ = \"title\").text.strip()\n except: \n title = \"NaN\"\n try:\n review_content = review.find('div', class_ = \"text show-more__control\").text.strip()\n except:\n review_content = None\n \n\n # Appending data to the list\n reviews.append((rating, title, review_content))\n \n print(\"Total Reviews Fetch from the data are : {}\".format(len(reviews)))\n \n return reviews # return type: list of tuples",
"def add_vehicle_to_showroom(vehicle_list, visitor_count):\n model_no = input(\"Model number of car: \")\n print(\"\")\n if validate_model(model_no, vehicle_list) == False: # check given model_no already exists or not\n return visitor_count\n while (True):\n print(\"Type of car : \")\n print(\"1.Normal Vehicle \\t 2.Sports Vehicle \\t 3.Heavy Vehicle\")\n print(\"(Type '1/2/3' to select)\")\n car_option = input()\n print(\"\")\n if car_option == '1':\n vehicle_list[str(model_no)] = add_normal_vehicle(model_no) # Add Normal vehicle to list\n break\n elif car_option == '2':\n visitor_count += 20 # visitor number increase by 20\n vehicle_list[str(model_no)] = add_sports_vehicle(model_no) # Add sports vehicle to list\n break\n elif car_option == '3':\n\n vehicle_list[str(model_no)] = add_heavy_vehicle(model_no) # Add heavy vehicle to list\n break\n else:\n print(\"Invalid choice. Please try again.\")\n continue\n print(\"\\nVehicle model no. \", model_no, \" is added.\")\n return visitor_count",
"def main() -> None:\n user_preferences, user_input = main_runner()\n\n for i in range(len(user_preferences)):\n if user_preferences[i] == 'Genre':\n user_preferences[i] = 'genre'\n elif user_preferences[i] == 'Release Year':\n user_preferences[i] = 'release_year'\n elif user_preferences[i] == 'Language':\n user_preferences[i] = 'language'\n else:\n user_preferences[i] = 'duration'\n\n start_year = user_input['release_year'][0]\n stop_year = user_input['release_year'][1]\n year_range = set(range(start_year, stop_year))\n\n genre = user_input['genres']\n\n duration_str = user_input['duration']\n\n if duration_str == 'Short(<60 min)':\n duration_tpl = (41, 60)\n elif duration_str == 'Medium (60-180 min)':\n duration_tpl = (60, 181)\n else:\n duration_tpl = (181, 809)\n\n duration_range = set(range(duration_tpl[0], duration_tpl[1]))\n\n language = user_input['language']\n\n user = Movie('user', 'User', year_range, {genre}, duration_range, {language}, 5.0)\n\n graph = load_dataset('IMDb movies.csv', user)\n movies = graph.recommend_movies(user.movie_id, user_preferences)\n\n display_recommended_movies(movies)",
"def default_movies_for_user(userscore, services, num_movies, watched_movies):\n \n movies = []\n alreadyseen = []\n total = 0\n genrescore = userscore.copy()\n for genre in genrescore:\n total += genrescore[genre]\n\n for genre in genrescore:\n genrescore[genre] = genrescore[genre] / total\n\n for genre in genrescore:\n genrescore[genre] = math.ceil(genrescore[genre] * num_movies)\n\n moviessofar = 0\n services_string = '|'.join(services)\n watchprovidersstring = \"&with_watch_providers=\" + services_string + \"&watch_region=US\"\n if services == []:\n watchprovidersstring = ''\n page = 1\n response = requests.get(\"https://api.themoviedb.org/3/discover/movie?api_key=\" + tmdb_api_key +\n \"&language=en-US®ion=US&sort_by=popularity.desc&include_adult=false&include_video=false&page=\" + str(page) +\n watchprovidersstring + \"&with_watch_monetization_types=flatrate\")\n data = response.json()['results']\n\n for genre in genrescore:\n while moviessofar < genrescore[genre]:\n for result in data:\n if result['title'] not in alreadyseen and (str(result['id']) not in watched_movies) and moviessofar < genrescore[genre] and str(genre) in str(result['genre_ids']):\n movie = {}\n movie['id'] = result['id']\n movie['title'] = result['title']\n movie['genre_ids'] = result['genre_ids']\n movie['image'] = 'https://image.tmdb.org/t/p/w500' + result['poster_path']\n sources = sources_from_tmdbID(movie['id'])\n if sources != 'None':\n sources_with_service = [sources[x] for x in sources if str(sources[x]) in services] \n movie['source'] = sources_with_service\n movies.append(movie)\n alreadyseen.append(result['title'])\n moviessofar += 1\n page += 1\n if moviessofar < genrescore[genre]:\n response = requests.get(\"https://api.themoviedb.org/3/discover/movie?api_key=\" + tmdb_api_key +\n \"&language=en-US®ion=US&sort_by=popularity.desc&include_adult=false&include_video=false&page=\" + str(page) +\n watchprovidersstring + \"&with_watch_monetization_types=flatrate\")\n data = response.json()['results']\n moviessofar = 0\n\n random.shuffle(movies)\n if len(movies) - num_movies > 0:\n return movies[:-(len(movies) - num_movies)]\n return movies",
"def statisticsExample():\n\n \"\"\"\n 1. We initialize the required layers of the application\n \"\"\"\n\n '''\n Start client Controller\n '''\n clientRepo = Repository()\n clientValidator = ClientValidator()\n clientController = ClientController(clientValidator, clientRepo)\n\n aaron = clientController.create(100, \"1820203556699\", \"Aaron\")\n bob = clientController.create(101, \"2750102885566\", \"Bob\")\n carol = clientController.create(102, \"1820604536579\", \"Carol\")\n\n '''\n Start car Controller\n '''\n carRepo = Repository()\n carValidator = CarValidator()\n carController = CarController(carValidator, carRepo)\n\n audiA3 = carController.create(200, \"CJ 01 AAA\", \"Audi\", \"A3\")\n audiA4 = carController.create(201, \"CJ 01 BBB\", \"Audi\", \"A4\")\n audiA5 = carController.create(202, \"CJ 01 CCC\", \"Audi\", \"A5\")\n audiA6 = carController.create(203, \"CJ 01 DDD\", \"Audi\", \"A6\")\n audiA7 = carController.create(204, \"CJ 01 EEE\", \"Audi\", \"A7\")\n vwpolo = carController.create(205, \"CJ 01 FFF\", \"VW\", \"Polo\")\n vwpassat = carController.create(206, \"CJ 01 GGG\", \"VW\", \"Passat\")\n vwgolf = carController.create(207, \"CJ 01 HHH\", \"VW\", \"Golf\")\n dacialodgy = carController.create(208, \"CJ 01 ERT\", \"Dacia\", \"Lodgy\")\n daciaduster = carController.create(209, \"CJ 01 YTH\", \"Dacia\", \"Duster\")\n\n '''\n Start rental Controller\n '''\n rentRepo = Repository()\n rentValidator = RentalValidator()\n rentController = RentalController(rentValidator, rentRepo, carRepo, clientRepo)\n\n rentController.createRental(300, aaron, audiA3, date(2015, 11, 20), date(2015, 11, 22))\n rentController.createRental(301, carol, audiA5, date(2015, 11, 24), date(2015, 11, 25))\n rentController.createRental(302, carol, audiA6, date(2015, 12, 10), date(2015, 12, 12))\n rentController.createRental(303, aaron, audiA4, date(2015, 11, 21), date(2015, 11, 25))\n rentController.createRental(304, aaron, audiA3, date(2015, 11, 24), date(2015, 11, 27))\n rentController.createRental(305, bob, audiA5, date(2015, 11, 26), date(2015, 11, 27))\n rentController.createRental(306, carol, audiA6, date(2015, 12, 15), date(2015, 12, 20))\n rentController.createRental(307, bob, audiA4, date(2015, 12, 1), date(2015, 12, 10))\n rentController.createRental(308, carol, audiA4, date(2015, 12, 11), date(2015, 12, 15))\n rentController.createRental(309, aaron, audiA5, date(2015, 11, 28), date(2015, 12, 2))\n\n rentController.createRental(310, aaron, vwpolo, date(2015, 11, 20), date(2015, 11, 22))\n rentController.createRental(311, carol, vwgolf, date(2015, 11, 24), date(2015, 11, 25))\n rentController.createRental(312, carol, vwpassat, date(2015, 12, 10), date(2015, 12, 12))\n rentController.createRental(313, aaron, dacialodgy, date(2015, 11, 21), date(2015, 11, 25))\n rentController.createRental(314, aaron, vwpolo, date(2015, 11, 24), date(2015, 11, 27))\n rentController.createRental(315, bob, vwgolf, date(2015, 11, 26), date(2015, 11, 27))\n rentController.createRental(316, carol, vwgolf, date(2015, 12, 15), date(2015, 12, 20))\n rentController.createRental(317, bob, daciaduster, date(2015, 12, 1), date(2015, 12, 10))\n rentController.createRental(318, carol, daciaduster, date(2015, 12, 11), date(2015, 12, 15))\n rentController.createRental(319, aaron, vwpassat, date(2015, 11, 28), date(2015, 12, 2))\n\n \"\"\"\n Statistic 1:\n - \"Most rented cars\". The list of cars, sorted by the number of times they were rented\n \"\"\"\n print(\"Most rented cars. The list of cars, sorted by the number of times they were rented\")\n print(\"Times\".ljust(10) + \" Car\".ljust(40))\n for cr in rentController.mostOftenRentedCars(): \n print (cr)\n\n print(\"-\"*70)\n\n \"\"\"\n Statistic 2:\n - \"Most rented cars\". The list of cars, sorted by the number of days they were rented\n \"\"\"\n print(\"Most rented cars. The list of cars, sorted by the number of days they were rented\")\n print(\"Days\".ljust(10) + \" Car\".ljust(40))\n for cr in rentController.mostRentedCars():\n print (cr)\n\n print(\"-\"*70)\n \n \"\"\"\n Statistic 3:\n - \"Most rented car make\". The list of car makes, sorted by the number of rentals\n \"\"\"\n print(\"Most rented car make. The list of car makes, sorted by the number of rentals\")\n print(\"Times\".ljust(10) + \" Car make\".ljust(40))\n for cr in rentController.mostOftenRentedCarMake():\n print (cr)",
"def main():\n client = build(\"shopping\", SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)\n resource = client.products()\n # Note the 'q' parameter, which will contain the value of the search query\n request = resource.list(source=\"public\", country=\"US\", q=\"digital camera\")\n response = request.execute()\n pprint.pprint(response)",
"def populate_movie_details():\n\n toy_story = media.Movie(\n \"Toy story\",\n \"A story of a boy and his toys\",\n \"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\",\n \"https://www.youtube.com/watch?v=KYz2wyBy3kc\"\n )\n\n avatar = media.Movie(\n \"Avatar\",\n \"A marine on an alien planet\",\n \"http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg\",\n \"https://www.youtube.com/watch?v=d1_JBMrrYw8\"\n )\n\n sup = media.Movie(\n \"Up\",\n \"A house lifted by baloons\",\n \"http://upload.wikimedia.org/wikipedia/en/0/05/Up_%282009_film%29.jpg\",\n \"https://www.youtube.com/watch?v=pkqzFUhGPJg\"\n )\n\n interstellar = media.Movie(\n \"Interstellar\",\n \"Finding new life in space\",\n \"http://upload.wikimedia.org/wikipedia/en/b/bc/Interstellar_film_poster.jpg\",\n \"https://www.youtube.com/watch?v=nyc6RJEEe0U\"\n )\n\n big_hero_6 = media.Movie(\n \"Big Hero 6\",\n \"Boy genius builds robots and saves world\",\n \"http://upload.wikimedia.org/wikipedia/en/4/4b/Big_Hero_6_%28film%29_poster.jpg\",\n \"https://www.youtube.com/watch?v=8IdMPpKMdcc\"\n )\n\n the_lego_movie = media.Movie(\n \"The Lego Movie\",\n \"Everything is awesome, Everything is cool when you're part of a team!\",\n \"http://upload.wikimedia.org/wikipedia/en/1/10/The_Lego_Movie_poster.jpg\",\n \"https://www.youtube.com/watch?v=fZ_JOBCLF-I\"\n )\n\n movies = [toy_story, avatar, sup, interstellar, big_hero_6, the_lego_movie]\n\n return movies",
"def main():\n start_time = time.time()\n api_key = get_api_key('api_key.txt')\n\n list_groups_of_trending_ids = get_list_group_ids('trending-yt.csv')\n list_groups_of_nontrending_ids = get_list_group_ids('nontrending-yt.csv')\n # initialize trending and nontrending details objects\n nontrending_details = VideoDetails()\n trending_details = VideoDetails()\n # modify 2nd parameter of slice to specify number of groups of 50 to query\n slice_trending = slice(0, len(list_groups_of_trending_ids))\n slice_nontrending = slice(0, len(list_groups_of_nontrending_ids))\n\n # get video stats for each list of videos\n print(\"Getting nontrending video data...\")\n for group in tqdm(list_groups_of_nontrending_ids[slice_nontrending]):\n get_video_details(group, nontrending_details, api_key)\n print(\"Getting trending video data...\")\n for group in tqdm(list_groups_of_trending_ids[slice_trending]):\n get_video_details(group, trending_details, api_key)\n\n # convert class objects to tuples\n nontrending_details = details_object_to_array(nontrending_details)\n trending_details = details_object_to_array(trending_details)\n\n print(\"\\n\\nNontrending video details \\n ------------------------- \\n\")\n for details in nontrending_details:\n print(details)\n\n print(\"\\n\\nTrending video details \\n ------------------------- \\n\")\n for details in trending_details:\n print(details)\n\n nontrending_stats_mat = np.array(nontrending_details).astype(np.float)\n trending_stats_mat = np.array(trending_details).astype(np.float)\n np.save('nontrending_stats', nontrending_stats_mat)\n np.save('trending_stats', trending_stats_mat)\n print(\"Took \" + str(time.time() - start_time) + \" seconds.\")",
"def soldout():",
"def main():\n movies = []\n f = open('movie_list.txt', 'r')\n while True:\n title = f.readline()\n pic = f.readline()\n vid = f.readline()\n movies.append(media.Movie(title, pic, vid));\n\n line = f.readline()\n if not line: \n break\n\n open_movies_page(movies)",
"def add_movie(conn, *, id_parse=ACTOR_ID_PARSE, info_cap=MAX_INFO_SIZE):\n print('adding new movie')\n printc('b',\n '** Note ** : if release time is left blank, current date will be assumed. '\n 'To enter actors, provide each actor\\'s id #, space-separated. Actor ids are '\n 'not required, but a director id is. If the actor is a main actor, '\n 'enter the actor id with a * at its end (without space), e.g. 12345*.'\n )\n title, genre, url, rating, budget, gross_income, director_id, studio, actors, info = menu_selections(\n 'title', 'genre', 'url (at most 100 chars)', 'rating (e.g. G, PG-13)',\n 'budget ($)', 'gross revenue($)', 'director id', 'studio (at most 20 chars)',\n 'actor ids\\0', f'additional info/summary [{info_cap} chars max]\\0'\n )\n info = truncate(info, info_cap)\n # just take the date as today\n# date = custom_select(\n# \"Enter release date (empty field sets date to today)\", get_date)[1]\n# if not date:\n# date = dt.date.today()\n \n actors, is_main = zip(*(\n actor_id.groups() for actor_id in id_parse.finditer(actors)\n ))\n is_main = tuple('t' if m else 'f' for m in is_main)\n roles = tuple(truncate(input(f'enter role for actor {a} (at most 50 chars): '),50) for a in actors)\n \n\n conn.autocommit = False\n with conn.cursor() as cur:\n # IMPORTANT -- make this a transaction that succeeds only if both parts\n # (adding movie and actors) succeeds\n try:\n cur.execute(\n \"\"\"\n INSERT INTO movie\n (title, genre, url, rating, budget, gross_income, director_id, studio, summary, date_released)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, CURRENT_DATE) RETURNING id;\"\"\",\n (title, genre, url, rating, budget, gross_income, director_id, studio, info)\n )\n movie_id = cur.fetchone()[0]\n \n execute_batch(cur,\n \"\"\"\n INSERT INTO act\n (actor_id, movie_id, if_main, role)\n VALUES (%s, %s, %s, %s);\"\"\",\n list(zip(actors, [movie_id]*len(actors), is_main, roles))\n )\n\n printc('g', f'movie {title} inserted with id {movie_id}')\n conn.commit()\n except Exception as e:\n print('add_movie: error:', repr(e))\n conn.rollback()\n \n conn.autocommit = True",
"def userReviews():\n usersList = files.readUsers()\n beersList = files.readBeers()\n breweryList = files.readBreweries()\n breweryToBeers = files.readBreweryToBeers()\n\n total = 0\n totalUsersComplete = 0\n for userHash, user in usersList.iteritems():\n totalUsersComplete += 1\n # if the data has been normalized, old data will not\n # have usernames. Ignore older users which may have\n # already gotten reviews\n if user.username:\n userId = user.uid\n username = user.username\n user.username = None\n userReviewCount = 0\n offsetTotal = 0\n ratings = {}\n\n print 'Processing ' + str(userId) + ': ' + username\n # each response returns at most 25 reviews. To get more user\n # reviews, call again with an offset get at most 50 reviews\n # from the same user\n while (userReviewCount < 2):\n print username + ': ' + str(userReviewCount + 1)\n data = untappd.getUserReviewData(username, offsetTotal)\n offset = data['response']['beers']['count']\n offsetTotal += offset\n reviews = data['response']['beers']['items']\n for review in reviews:\n userRating = review['rating_score']\n if userRating > 0:\n beerInfo = review['beer']\n breweryInfo = review['brewery']\n # fill in beer information\n if hash(str(beerInfo['bid'])) not in beersList:\n stylesList = []\n style = unicode(beerInfo['beer_style']).encode(\"utf-8\")\n styles = style.lower().title().split('/')\n for style in styles:\n style = style.strip()\n stylesList.append(style)\n beerAttribs = {\n 'bid': str(beerInfo['bid']),\n 'name': unicode(beerInfo['beer_name']).encode(\"utf-8\"),\n 'label': beerInfo['beer_label'],\n 'abv': beerInfo['beer_abv'],\n 'ibu': beerInfo['beer_ibu'],\n 'style': stylesList,\n 'description': unicode(beerInfo['beer_description']).encode(\"utf-8\"),\n 'rating': beerInfo['rating_score'],\n 'numRatings': 1,\n 'brewery': str(breweryInfo['brewery_id'])\n }\n beer = UT.UntappdBeer(beerAttribs)\n beersList[hash(beer.bid)] = beer\n else:\n beersList[hash(str(beerInfo['bid']))].numRatings += 1\n # fill in brewery information\n if hash(str(breweryInfo['brewery_id'])) not in breweryList:\n breweryAttribs = {\n 'breweryId': str(breweryInfo['brewery_id']),\n 'name': unicode(breweryInfo['brewery_name']).encode(\"utf-8\"),\n 'label': breweryInfo['brewery_label'],\n 'country': unicode(breweryInfo['country_name']).encode(\"utf-8\"),\n 'location': unicode(breweryInfo['location']).encode(\"utf-8\")\n }\n brewery = UT.UntappdBrewery(breweryAttribs)\n breweryList[hash(brewery.breweryId)] = brewery\n\n # map breweery_id to a list of beers produced there\n if hash(str(breweryInfo['brewery_id'])) not in breweryToBeers:\n # store the current beer in a list of beers of\n # the brewery\n breweryToBeers[hash(str(breweryInfo['brewery_id']))] = {str(breweryInfo['brewery_id']): [str(beerInfo['bid'])]}\n else:\n # add current beer to brewery's list of beers\n breweryToBeers[hash(str(breweryInfo['brewery_id']))][str(breweryInfo['brewery_id'])].append(str(beerInfo['bid']))\n\n # add list of beer ratings to user\n ratings[str(beerInfo['bid'])] = userRating\n userReviewCount += 1\n user.ratings = ratings\n\n # store the dictionaries after new data so user doesn't kill process before writing\n # with open('../data/users.json', 'wb') as usersFile:\n # json = jpickle.encode(usersList)\n # usersFile.write(json)\n # with open('../data/beers.json', 'wb') as beersFile:\n # json = jpickle.encode(beersList)\n # beersFile.write(json)\n # with open('../data/breweries.json', 'wb') as breweriesFile:\n # json = jpickle.encode(breweryList)\n # breweriesFile.write(json)\n # with open('../data/breweryToBeers.json', 'wb') as breweryToBeersFile:\n # json = jpickle.encode(breweryToBeers)\n # breweryToBeersFile.write(json)\n\n # if the offset is less than 25, then there are no more reviews to retrieve\n if offset < 25:\n break\n writeJSONFile('../data/users.json', usersList)\n writeJSONFile('../data/beers.json', beersList)\n writeJSONFile('../data/breweries.json', breweryList)\n writeJSONFile('../data/breweryToBeers.json', breweryToBeers)\n\n total += len(ratings)\n print str(userId) + ': ' + username + ', Processed: ' + str(len(ratings)) + ' reviews'\n print 'Total Reviews: ' + str(total)\n print 'Total Users Completed: ' + str(totalUsersComplete)\n sleep(37 * (userReviewCount))\n else:\n total += len(user.ratings)",
"def get_video_rental_history(video):\n results = db.session.query(Rental, Video, Customer) \\\n .select_from(Rental).join(Video).join(Customer).all()\n response = []\n for rental, video, customer, in results:\n response.append({\n \"id\":customer.id,\n \"name\":customer.name,\n \"postal_code\":customer.postal_code,\n \"checkout_date\":customer.registered_at,\n \"due_date\":rental.due_date,\n })\n return jsonify(response), 200",
"def recommend(n_clicks, num_recs, upperlimit, lowerlimit, input_box):\n\n context = clean_text(input_box)\n print(upperlimit, num_recs, n_clicks)\n if context != '':\n if lowerlimit:\n hd2vrecommendations = hd2v_wvindvout_recommend(context, hd2vreducedmodel) \n bm25recommendations = solr_recommend(context, 'mag_en_cs_50_all')\n citedbm25_recommendations = solr_cited_recommend(context, 'mag_en_cs_50_cited_all')\n if not hd2vrecommendations or not bm25recommendations or not citedbm25_recommendations:\n return html.Div([\n html.Br(),\n html.Br(),\n html.H2('No recommendations returned.'),\n ])\n hybrid_recommendations = hybrid_recommend(hd2vrecommendations, bm25recommendations, citedbm25_recommendations)\n # magid, title, year, citations, abstract\n if upperlimit:\n all_recommendations = get_paper_details(hybrid_recommendations)\n reduced_recommendations = [recomm for recomm in all_recommendations if recomm[3]<=500]\n reduced_recommendations = get_topn(reduced_recommendations, num_recs)\n else:\n reduced_recommendations = get_paper_details(get_topn(hybrid_recommendations, num_recs))\n #recommended_titles = [details[1] for details in get_paper_details(reduced_recommendations)]\n return html.Div([\n html.Br(),\n html.Br(),\n html.H2('Recommendations:'),\n html.Ol([html.Li(html.A(recomm[1], \n href='https://academic.microsoft.com/paper/{}'.format(recomm[0]),\n title=' Year: {}\\nAbstract:{}'\\\n .format(recomm[2], recomm[4]))\n ) \n for recomm in reduced_recommendations])\n ])\n else:\n hd2vrecommendations = hd2v_wvindvout_recommend(context, hd2vmodel)\n bm25recommendations = solr_recommend(context, 'mag_en_cs_all')\n citedbm25_recommendations = solr_cited_recommend(context, 'mag_en_cs_cited_all')\n if not hd2vrecommendations or not bm25recommendations or not citedbm25_recommendations:\n return html.Div([\n html.Br(),\n html.Br(),\n html.H2('No recommendations returned.'),\n ])\n hybrid_recommendations = hybrid_recommend(hd2vrecommendations, bm25recommendations, citedbm25_recommendations)\n # magid, title, year, citations, abstract\n if upperlimit:\n all_recommendations = get_paper_details(hybrid_recommendations)\n reduced_recommendations = [recomm for recomm in all_recommendations if recomm[3]<=500]\n reduced_recommendations = get_topn(reduced_recommendations, num_recs)\n else:\n #print(hybrid_recommendations)\n reduced_recommendations = get_paper_details(get_topn(hybrid_recommendations, num_recs))\n #recommended_titles = [details[1] for details in get_paper_details(reduced_recommendations)]\n return html.Div([\n html.Br(),\n html.Br(),\n html.H2('Recommendations:'),\n html.Ol([html.Li(html.A(recomm[1], \n href='https://academic.microsoft.com/paper/{}'.format(recomm[0]),\n title=' Year: {}\\nAbstract:{}'\\\n .format(recomm[2], recomm[4]))\n ) \n for recomm in reduced_recommendations])\n ])",
"def create_recommendations(self, test_case='all'):\n\n\tprint()\n\tprint('TEST - Create Recommendations')\n\tprint(test_case)\n\n\t# Init\n\tname_dic = {\n\t\t\t\t\t# Products\n\t\t\t\t\t'prod_0':\t\t'ACNETOPIC 200ML',\t\t\t\t# Topic\n\t\t\t\t\t'prod_1':\t\t'KIT POST LASER CO2 COOPER',\t# Kit\n\t\t\t\t\t'prod_2':\t\t'TARJETA VIP',\t\t\t\t\t# Card\n\t\t\t\t\t'prod_3':\t\t'OTROS',\t\t\t\t\t\t# Other\n\t\t\t\t\t'prod_4':\t\t'COMISION DE ENVIO',\t\t\t# Comission\n\n\t\t\t\t\t# Lasers\n\t\t\t\t\t'co2': \t\t\t'LASER CO2 FRACCIONAL - Cuello - Rejuvenecimiento - Grado 1 - 1 sesion',\t# Co2\n\t\t\t\t\t'exc':\t\t\t'LASER EXCILITE - Abdomen - Alopecias - 1 sesion - 15 min',\t\t\t\t\t# Excilite\n\t\t\t\t\t'ipl':\t\t\t'LASER M22 IPL - Abdomen - Depilacion - 1 sesion - 15 min',\t\t\t\t\t# Ipl\n\t\t\t\t\t'ndy':\t\t\t'LASER M22 ND YAG - Localizado Cuerpo - Hemangiomas - 1 sesion - 15 min',\t# Ndyag\n\t\t\t\t\t'qui':\t\t\t'QUICKLASER - Cuello - Rejuvenecimiento - Grado 1 - 1 sesion',\t\t\t\t# Quick\n\n\t\t\t\t\t# Cosmetology\n\t\t\t\t\t'cos_0':\t\t'CARBOXITERAPIA - Cuerpo - Rejuvenecimiento - 1 sesion - 30 min',\t\t\t\t# Carboxitherapy\n\t\t\t\t\t'cos_1':\t\t'PUNTA DE DIAMANTES - Rostro - Limpieza profunda - 1 sesion - 30 min',\t\t\t# Diamond Tip\n\t\t\t\t\t'cos_2':\t\t'LASER TRIACTIVE + CARBOXITERAPIA - Rostro + Papada + Cuello - Reafirmacion - 10 sesiones - 30 min',\t# Laser Triactive + Carbo\n\n\t\t\t\t\t# Medical\n\t\t\t\t\t'med_0':\t\t'BOTOX - 1 Zona - Rejuvenecimiento Zona - 1 sesion',\t\t\t\t\t\t\t\t\t\t# Botox\n\t\t\t\t\t'med_1':\t\t'CRIOCIRUGIA - Todo Rostro - Acne - 1 sesion',\t\t\t\t\t\t\t\t\t\t\t\t# Cryo\n\t\t\t\t\t'med_2':\t\t'ACIDO HIALURONICO - 1 Jeringa - Rejuvenecimiento Facial - 1 sesion - FILORGA UNIVERSAL',\t# Hialuronic\n\t\t\t\t\t'med_3':\t\t'INFILTRACIONES',\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Infil\n\t\t\t\t\t'med_4':\t\t'MESOTERAPIA NCTF - Todo Rostro - Rejuvenecimiento Facial - 5 sesiones',\t\t\t\t\t# Meso\n\t\t\t\t\t'med_5':\t\t'PLASMA - Todo Rostro - Rejuvenecimiento Facial - 1 sesion',\t\t\t\t\t\t\t\t# Plasma\n\t\t\t\t\t'med_6':\t\t'REDUX - 1 Zona - Rejuvenecimiento Zona - 1 sesion',\t\t\t\t\t\t\t\t\t\t# Redux\n\t\t\t\t\t'med_7':\t\t'ESCLEROTERAPIA - Piernas - Varices - 1 sesion',\t\t\t\t\t\t\t\t\t\t\t# Sclero\n\t\t\t\t\t'med_8':\t\t'VITAMINA C ENDOVENOSA',\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Vitamin\n\n\t\t\t\t\t# New Services\n\t\t\t\t\t'gyn':\t\t\t'LASER CO2 FRACCIONAL - Monalisa Touch / Revitalizacion',\n\t\t\t\t\t'echo':\t\t\t'ECOGRAFIAS ESPECIALES - Cadera Pediatrica (Bilateral) - 1 sesion',\n\t\t\t\t\t'prom':\t\t\t'CARBOXITERAPIA - Localizado Cuerpo - Rejuvenecimiento Facial - 6 sesiones',\n\t\t}\n\n\ttst_list_all = [\n\t\t\t\t\t'prod_0',\n\t\t\t\t\t'prod_1',\n\t\t\t\t\t'prod_2',\n\t\t\t\t\t'prod_3',\n\t\t\t\t\t'prod_4',\n\n\t\t\t\t\t'co2',\n\t\t\t\t\t'exc',\n\t\t\t\t\t'ipl',\n\t\t\t\t\t'ndy',\n\t\t\t\t\t'qui',\n\n\t\t\t\t\t'cos_0',\n\t\t\t\t\t'cos_1',\n\t\t\t\t\t'cos_2',\n\n\t\t\t\t\t'med_0',\n\t\t\t\t\t'med_1',\n\t\t\t\t\t'med_2',\n\t\t\t\t\t'med_3',\n\t\t\t\t\t'med_4',\n\t\t\t\t\t'med_5',\n\t\t\t\t\t'med_6',\n\t\t\t\t\t'med_7',\n\t\t\t\t\t'med_8',\n\n\t\t\t\t\t'gyn',\n\t\t\t\t\t'echo',\n\t\t\t\t\t'prom',\n\t]\n\n\ttst_list_prod = [\n\t\t\t\t\t'prod_0',\n\t\t\t\t\t'prod_1',\n\t\t\t\t\t'prod_2',\n\t\t\t\t\t'prod_3',\n\t\t\t\t\t'prod_4',\n\t]\n\n\ttst_list_laser = [\n\t\t\t\t\t'co2',\n\t\t\t\t\t'exc',\n\t\t\t\t\t'ipl',\n\t\t\t\t\t'ndy',\n\t\t\t\t\t'qui',\n\t]\n\n\ttst_list_cos = [\n\t\t\t\t\t'cos_0',\n\t\t\t\t\t'cos_1',\n\t\t\t\t\t'cos_2',\n\t]\n\n\ttst_list_med = [\n\t\t\t\t\t'med_0',\n\t\t\t\t\t'med_1',\n\t\t\t\t\t'med_2',\n\t\t\t\t\t'med_3',\n\t\t\t\t\t'med_4',\n\t\t\t\t\t'med_5',\n\t\t\t\t\t'med_6',\n\t\t\t\t\t'med_7',\n\t\t\t\t\t'med_8',\n\t]\n\n\ttst_list_new = [\n\t\t\t\t\t'gyn',\n\t\t\t\t\t'echo',\n\t\t\t\t\t'prom',\n\t]\n\n\ttst_list_empty = []\n\n\n\t# Test cases\n\tif test_case in ['all']:\n\t\ttst_list = tst_list_all\n\n\telif test_case in ['laser']:\n\t\ttst_list = tst_list_laser\n\n\telif test_case in ['product']:\n\t\ttst_list = tst_list_prod\n\n\telif test_case in ['medical']:\n\t\ttst_list = tst_list_med\n\n\telif test_case in ['cosmetology']:\n\t\ttst_list = tst_list_cos\n\n\telif test_case in ['new']:\n\t\ttst_list = tst_list_new\n\n\telif test_case in [False]:\n\t\ttst_list = tst_list_empty\n\n\telse:\n\t\tprint('This should not happen !!!')\n\n\n\t# Loop\n\tfor tst in tst_list:\n\t\t\n\t\t# Init\n\t\tname = name_dic[tst]\n\t\t\n\t\t# Search\n\t\tproduct = self.env['product.template'].search([\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t('name', '=', name),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t('pl_price_list', 'in', ['2019']),\n\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t#order='date_order desc',\n\t\t\t\t\t\t\t\t\t\t\t\tlimit=1,\n\t\t\t\t\t\t\t\t\t)\n\n\t\t# Check Exceptions\n\t\ttry:\n\t\t\tproduct.ensure_one()\n\n\t\texcept ProductErrorException:\n\t\t\tmsg_name = \"ERROR: Record Must be One Only.\"\n\t\t\tclass_name = type(product).__name__\n\t\t\tobj_name = name\n\t\t\tmsg = msg_name + '\\n' + class_name + '\\n' + obj_name\n\t\t\traise ProductErrorException('msg')\n\n\n\t\tproduct_id = product.id\n\n\t\t# Check if product complete \n\t\tprint()\n\t\tprint('Check product_template complete')\n\t\tprint(product)\n\t\tprint(product.name)\n\t\tprint(product.pl_price_list)\n\t\tprint(product.pl_family)\n\t\tprint(product.pl_subfamily)\n\t\tprint(product.pl_zone)\n\t\tprint(product.pl_pathology)\n\t\tprint(product.pl_sessions)\n\t\tprint(product.pl_level)\n\t\tprint(product.pl_time)\n\t\tprint(product.pl_zone)\n\t\tprint(product.pl_treatment)\n\t\tprint()\n\n\n\t\t# *** Create recommendation\n\t\tservice = self.env['openhealth.service'].create({\n\t\t\t'service': \t\t\tproduct_id,\n\t\t\t'family': \t\t\tproduct.pl_family,\n\t\t\t'subfamily': \t\tproduct.pl_subfamily,\n\t\t\t'zone': \t\t\tproduct.pl_zone,\n\t\t\t'pathology': \t\tproduct.pl_pathology,\n\t\t\t'sessions': \t\tproduct.pl_sessions,\n\t\t\t'level': \t\t\tproduct.pl_level,\n\t\t\t'time': \t\t\tproduct.pl_time,\n\t\t\t'price_applied': \tproduct.pl_price_list,\n\t\t\t'sel_zone': \t\tproduct.pl_zone,\n\t\t\t'pl_treatment': \tproduct.pl_treatment,\n\n\t\t\t'treatment': \t\tself.id,\n\t\t})\n\t\n\t\n\t\t# Check if service complete \n\t\t#print()\n\t\t#pint(service)\n\t\t#print(service.name)\n\t\t#print(service.pl_treatment)\n\t\t#print(service.family)\n\t\t#print(service.subfamily)\n\t\t#print(service.zone)\n\t\t#print(service.pathology)\n\t\t#print(service.sessions)\n\t\t#print(service.level)\n\t\t#print(service.time)\n\t\t#print(service.price_applied)\n\t\t#print(service.sel_zone)\n\t\t#print(service.treatment)\n\t\t#print()",
"def movielist(actor):\n #query the api endpoint to get id of the actor from the movie db\n actorendpoint='http://api.tmdb.org/3/search/person'\n parameters1={'api_key':TMDB_KEY,'query':actor}\n json_actorid=requests.get(actorendpoint,params=parameters1)\n actoridjson=json.loads(json_actorid.text)\n #get the actor id from the json data\n actorid=str(actoridjson['results'][0]['id'])\n #append the actor id to the api endpoint for scraping movie credits data for the actor\n movieendpoint='https://api.themoviedb.org/3/person/'+actorid+'/movie_credits'\n parameters2={'api_key':TMDB_KEY}\n json_movies_data=requests.get(movieendpoint,params=parameters2)\n actorjson=json_movies_data.json()\n #Get the list of movies from the returned json data\n movieslist=[mov['original_title'] for mov in actorjson['cast']]\n movieids=[]\n print('Fetching '+actor+' Movie List:')\n #use the movie names list to query the movie db api for movie ids\n for movie in movieslist:\n movieendpoint='http://api.tmdb.org/3/search/movie'\n parameters3={'api_key':TMDB_KEY,'query':movie}\n json_movieid=requests.get(movieendpoint,params=parameters3)\n movieidjson=json_movieid.json()\n movieid=str(movieidjson['results'][0]['id'])\n movieids.append(movieid)\n print('.',end='')\n print()\n #return the movie names and movie ids lists\n return movieslist,movieids",
"def populate_list(movies, debug_on=False):\n global KEY\n movie_objects = []\n\n # Go through each title to find and generate each movie instance.\n for i in range(0, len(movies)):\n query = movies[i].replace(\" \", \"+\")\n movie_exists = False\n\n # Search OMDB site to obtain data and initialize Movie object.\n request = Request('http://www.omdbapi.com/?t=%s' % query)\n try:\n response = urlopen(request)\n data = json.loads(response.read())\n # if data obtained successfully, initialize with data.\n if data.get(\"Title\"):\n movie_objects.append(\n media.Movie(data[\"Title\"],\n data[\"Poster\"],\n data[\"Plot\"])\n )\n movie_exists = True\n # On failure to retrieve data,\n # initialize Movie object with set default values.\n else:\n movie_objects.append(\n media.Movie(\n movies[i],\n \"images/notFound.png\",\n \"Movie Data not found: %s\" % movies[i],\n \"https://www.youtube.com/watch?v=GfAnyT9QitU\"\n )\n )\n print ('DataError: could not find movie \"%s\" in database'\n % movies[i])\n # On failure to connect to the OMDB site,\n # initialize Movie object with set default values\n # and notify of URL error.\n except URLError, e:\n movie_objects.append(\n media.Movie(\n movies[i],\n \"images/notFound.png\",\n \"Movie Data not found: %s\" % movies[i],\n \"https://www.youtube.com/watch?v=GfAnyT9QitU\"\n )\n )\n print 'URLError: could not access site.', e\n\n # If the data was collected successfully,\n # proceed with collection of trailer url.\n if movie_exists:\n video = Request(\n 'https://www.googleapis.com/youtube/v3/search?part=id&q=' +\n query +\n '+trailer&max-results=1&key=' + KEY)\n # Search YouTube to obtain trailer url.\n try:\n response = urlopen(video)\n vid_data = json.loads(response.read())\n video = vid_data['items'][0]\n movie_objects[i].trailer_youtube_url = (\n \"https://www.youtube.com/watch?v=\" +\n video['id']['videoId'])\n # On failure to connect to YouTube,\n # set trailer url to default.\n except URLError, e:\n movie_objects[i].trailer_youtube_url = (\n \"https://www.youtube.com/watch?v=GfAnyT9QitU\")\n print ('URLError: Could not access site'\n 'to retrieve video:', e)\n\n # If debug flag set to True,\n # print the new Movie instance's data to console.\n if debug_on:\n movie_objects[i].debug_print()\n\n return movie_objects"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resize and subtract mean from video input
|
def preprocess_input(video):
intervals = np.ceil(np.linspace(0, video.shape[0] - 1, 16)).astype(int)
frames = video[intervals]
# Reshape to 128x171
reshape_frames = np.zeros((frames.shape[0], 128, 171, frames.shape[3]))
for i, img in enumerate(frames):
img = imresize(img, (128, 171), 'bicubic')
reshape_frames[i, :, :, :] = img
mean_path = get_file('c3d_mean.npy',
C3D_MEAN_PATH,
cache_subdir='models',
md5_hash='08a07d9761e76097985124d9e8b2fe34')
# Subtract mean
mean = np.load(mean_path)
reshape_frames -= mean
# Crop to 112x112
reshape_frames = reshape_frames[:, 8:120, 30:142, :]
# Add extra dimension for samples
reshape_frames = np.expand_dims(reshape_frames, axis=0)
return reshape_frames
|
[
"def video_mean(self):\r\n self.imganalysis_averageimage = np.mean(self.videostack, axis = 0)\r\n self.pw_averageimage.setImage(self.imganalysis_averageimage)\r\n self.samplingrate_cam = self.Spincamsamplingrate.value() \r\n self.cam_time_label = np.arange(self.videostack.shape[0])/self.samplingrate_cam\r\n \r\n fig = plt.figure(figsize=(8.0, 5.8))\r\n fig.suptitle(\"Mean intensity of raw video\")\r\n plt.imshow(self.imganalysis_averageimage)\r\n fig.savefig(os.path.join(self.main_directory, 'Analysis results//Mean intensity of raw video.png'), dpi=1000)\r\n plt.show()\r\n \r\n self.mean_camera_counts = []\r\n for i in range(self.videostack.shape[0]):\r\n self.mean_camera_counts.append(np.mean(self.videostack[i]))\r\n \r\n fig2, ax2 = plt.subplots(figsize=(8.0, 5.8))\r\n fig2.suptitle(\"Mean intensity trace of raw video\")\r\n plt.plot(self.cam_time_label, self.mean_camera_counts)\r\n ax2.set_xlabel('time(s)')\r\n ax2.set_ylabel('Pixel values')\r\n fig2.savefig(os.path.join(self.main_directory, 'Analysis results//Mean intensity trace of raw video.png'), dpi=1000)\r\n plt.show()",
"def segment(self):\n frames = []\n for i, frame in enumerate(self.video):\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frames.append(frame)\n frames = np.array(frames)\n avg = frames.mean(axis=0)\n avg_blur = cv2.GaussianBlur(avg, (0, 0), 3)\n segmentation = []\n cv2.startWindowThread()\n cv2.namedWindow(\"segmentation\")\n for frame in frames:\n sub = (avg_blur - 30) - cv2.GaussianBlur(frame, (0, 0), 3)\n segmentation.append(sub)\n cv2.imshow(\"segmentation\", sub)\n cv2.waitKey(1)\n self.segmentation = np.array(segmentation)",
"def preprocess_frame(self, frame):\n\n # Greyscale frame\n img = np.mean(frame,-1)\n\n # Normalize Pixel Values\n img = img/255.0\n\n # Remove black bar at the bottom\n img = img[:-12]\n\n # Resize\n #img = misc.imresize(img, (resized_image_res))\n\n return img",
"def process_frame(self, frame):\n frame = numpy.float32(frame)\n if self.avg_frame is None:\n self.avg_frame = frame\n else:\n self.avg_frame = cv2.accumulateWeighted(frame, self.avg_frame,\n ALPHA)\n return cv2.convertScaleAbs(self.avg_frame)",
"def update_video(self):\r\n if not self.curr_frame < self.min_frame and not self.curr_frame > self.max_frame:\r\n frame = self.frames[self.curr_frame]\r\n self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))\r\n self.video_display.create_image(0, 0, image = self.photo, anchor = tk.NW)\r\n self.curr_frame = self.curr_frame + 1\r\n else:\r\n self.curr_frame = self.min_frame\r\n\r\n self.video_slider.advance_slider(self.curr_frame)",
"def difference_render(func, device=0, dist=10):\n\n cap = cv2.VideoCapture(device)\n _, last_frame = cap.read()\n\n while True:\n _, this_frame = cap.read()\n cv2.imshow(\n 'frame',\n func(\n np.abs(\n this_frame.astype(np.int) - last_frame.astype(np.int)).astype(np.uint8)))\n last_frame = np.roll(this_frame, dist)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()",
"def maxsquare(self):\n # This ffmpeg filter can throw the error: \"Padded dimensions cannot be smaller than input dimensions.\" since the preview is off by one. Add one here to make sure.\n # FIXME: not sure where in some filter chains this off-by-one error is being introduced, but probably does not matter since it does not affect any annotations \n # and since the max square always preserves the scale and the upper left corner of the source video. \n # FIXME: this may trigger an inefficient resizing operation during load()\n if not self.issquare():\n d = max(self.shape())\n self._ffmpeg = self._ffmpeg.filter('pad', d+1, d+1, 0, 0)\n self.shape(shape=(d+1, d+1))\n return self.crop(vipy.geometry.BoundingBox(xmin=0, ymin=0, width=int(d), height=int(d)))\n else:\n return self",
"def _resize(self):\n avg_frames = 87 #this is the average image frame length in the entire dataset\n for i in range(len(self.data)):\n image = self.data[i]\n self.data[i] = resize(image, width=avg_frames, height=len(image))",
"def watch(self, name=None, roi=None, pad=10):\n if name is None or isinstance(name, int):\n with h5py.File(self.data_file, 'r+') as h:\n keys = list(h['meanmovs'].keys())\n if name is None:\n for i,k in enumerate(keys):\n print('{}\\t{}'.format(i,k))\n return None\n else:\n name = keys[name]\n\n mov = self.get_meanmov(name=name)\n if roi is not None:\n roi = self.get_roi()[roi]\n aw = np.argwhere(roi)\n ymin,xmin = np.min(aw,axis=0) - pad\n ymax,xmax = np.max(aw,axis=0) + pad\n ymin,xmin,ymax,xmax = [int(np.round(i)) for i in [ymin,xmin,ymax,xmax]]\n ymin,xmin = [max(0,i) for i in [ymin,xmin]]\n ymax = min(self.shape[1], ymax)\n xmax = min(self.shape[0], xmax)\n mov = mov[:,ymin:ymax,xmin:xmax]\n\n mov.play()\n return mov",
"def on_roi_param_changed(self):\r\n limits = self.get_roi_limits()\r\n x_start, x_end, y_start, y_end, z_start, z_end = self.clip_roi_box_vals(limits)\r\n x_size = x_end - x_start\r\n y_size = y_end - y_start\r\n z_size = z_end - z_start\r\n self.update_est_data_size(z_size, y_size, x_size)",
"def resizeVideo(self, width, height):\n if self.vid_opened:\n img = self.image_holder.resize(width, height)\n self.changeFrameTo(img)",
"def bin_video(video, binning):\n return np.array([np.mean([\n frame[\n ox:-binning-np.shape(video)[1]%binning:binning, \n oy:-binning-np.shape(video)[2]%binning:binning\n ]\n for ox in range(binning) \n for oy in range(binning)\n ], axis=0) for frame in video]).astype(np.int16)",
"def __resize(self, frame):\n height, width, layers = frame.shape\n frame = cv2.resize(frame, (int(width/self.__SCALE_REATIO), int(height/self.__SCALE_REATIO))) \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = np.float32(gray)\n\n return frame, gray",
"def on_window_resize(self, event):\n image_width = event.width\n image_height = int(event.width / self.aspect_ratio)\n\n if image_height > event.height:\n image_height = event.height\n image_width = int(event.height * self.aspect_ratio)\n\n self.cv_displayed_image = cv2.resize(self.cv_image, (image_width, image_height))\n self.zoom_ratio = self.cv_displayed_image.shape[1] / self.cv_image.shape[1]\n self.add_rectangles()\n self.show_cv_image(self.cv_displayed_image)",
"def video_set(self, event):\n self.filter.set(10)\n self.update_image(0)",
"def demostrating_video_stab(filename, new_size=(320, 240), tracking_mode=True):\n if tracking_mode:\n from .curve import tracking\n\n def decorator(func):\n funcs = {}\n for i in range(4):\n @tracking(track_len=20, detect_interval=5)\n def f(prev, cur):\n return func(prev, cur)\n funcs[i] = f\n return funcs\n\n @decorator\n def tracked(prev, cur):\n return get_grey_images(prev, cur)\n\n print('Video ' + filename + ' processing')\n R = get_cov_from_video(filename, new_size)*1e-2\n Q, P = np.diag([1e-8, 1e-7, 4e-3, 1e-7, 1e-8, 4e-3]), np.eye(6)\n F, H = np.eye(6), np.eye(6)\n X = np.zeros((6, 1))\n kf_6 = KalmanFilterND(X, F, H, P, Q, R)\n # -----------------------------------------------------------------\n R = np.ones((2, 2))*1e-6\n Q, P = np.diag([1e-3, 1e-3]), np.eye(2)\n H = np.eye(2)\n F = np.eye(2)\n X = np.zeros((2, 1))\n kf_2 = KalmanFilterND(X, F, H, P, Q, R)\n # ------------------------------------------------------------------\n R = np.ones((3, 3))*1e-6\n F = np.eye(3)\n H = np.eye(3)\n X = np.zeros(3)\n P = np.ones(3)\n Q = np.diag([4e-3, 4e-3, 1e-7])\n kf_3 = KalmanFilterND(X, F, H, P, Q, R)\n # ------------------------------------------------------------------\n cap, n_frames, fps, prev = video_open(filename, new_size)\n\n old, smoothed_affine, smoothed_translational, smoothed_similarity = [], [], [], []\n # video writer args\n fourcc = cv2.VideoWriter_fourcc(*'H264')\n fps = cap.get(5)\n video_stab = filename[:-4] + 'stab.mp4'\n out = cv2.VideoWriter(video_stab, fourcc, fps, new_size)\n cumulative_transform = np.insert(np.array([[1, 0], [0, 1]]), [2], [0], axis=1)\n last_affine = cumulative_transform.copy()\n cumulative_smoothed1 = cumulative_transform.copy()\n cumulative_smoothed2 = cumulative_transform.copy()\n cumulative_smoothed3 = cumulative_transform.copy()\n for i in range(n_frames-1):\n # read frames\n ret2, cur = cap.read()\n cur = cv2.resize(cur, new_size, cv2.INTER_AREA)\n # get affine transform between frames\n affine = cv2.estimateRigidTransform(prev, cur, False)\n # Sometimes there is no Affine transform between frames, so we use the last\n if not np.all(affine):\n affine = last_affine\n last_affine = affine\n # Accumulated frame to frame original transform\n cumulative_transform = sum_2_affine(cumulative_transform, affine)\n # save original affine for comparing with stabilized\n old.append(cumulative_transform)\n z = np.array([affine.ravel()]).T # (a1, a2, b1, a3, a4, b2)^T\n z1 = affine[:2, 2:] # b1, b2\n z2 = affine[0][2], affine[1][2], math.atan2(affine[1][0], affine[0][0]) # (b1, b2, a)\n # predict new vector and update\n x1 = kf_6.predict_and_update(z)\n x2 = kf_2.predict_and_update(z1)\n x3 = kf_3.predict_and_update(z2)\n\n # create new Affine transform\n\n smoothed_affine_motion = np.float32(x1.reshape(2, 3))\n affine_motion = compensating_transform(smoothed_affine_motion, cumulative_transform)\n\n a11, a22 = math.cos(x3[2]), math.sin(x3[2])\n smoothed_similarity_motion = np.array([[a11, -a22, x3[0]], [a22, a11, x3[1]]])\n similarity_motion = compensating_transform(smoothed_similarity_motion, cumulative_transform)\n\n smoothed_translational_motion = np.array([[1, 0, x2[0]], [0, 1, x2[1]]])\n translational_motion = compensating_transform(smoothed_translational_motion, cumulative_transform)\n\n # get stabilized frame\n cur1 = warp(cur, affine_motion, new_size)\n cur2 = warp(cur, translational_motion, new_size)\n cur3 = warp(cur, similarity_motion, new_size)\n if i > 1 and tracking_mode:\n tr1, tr2 = tracked[0](prev, cur), tracked[1](prev1, cur1)\n tr3, tr4 = tracked[2](prev2, cur2), tracked[3](prev3, cur3)\n else:\n tr1, tr2, tr3, tr4 = cur, cur1, cur2, cur3\n # Accumulated frame to frame smoothed transform\n # smoothed cumulative transform affine model\n cumulative_smoothed1 = sum_2_affine(cumulative_smoothed1, smoothed_affine_motion)\n smoothed_affine.append(cumulative_smoothed1)\n # smoothed cumulative transform similarity model\n cumulative_smoothed2 = sum_2_affine(cumulative_smoothed2, smoothed_similarity_motion)\n smoothed_similarity.append(cumulative_smoothed2)\n # smoothed cumulative transform translational model\n cumulative_smoothed3 = sum_2_affine(cumulative_smoothed3, smoothed_translational_motion)\n smoothed_translational.append(cumulative_smoothed3)\n # concatenate original and stabilized frames\n result = concatenate_n_images(tr1, tr2, tr3, tr4)\n cv2.imshow('Original/smoothed', result)\n out.write(tr2)\n prev, prev1 = tr1, tr2\n prev, prev1, prev2, prev3 = tr1, tr2, tr3, tr4\n if cv2.waitKey(np.int(1000//fps)) & 0xFF == ord('q'):\n break\n\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n\n # plot affine transform params trajectories\n trajectory(old, 'r')\n trajectory(smoothed_affine, 'g')\n trajectory(smoothed_similarity, 'b')\n trajectory(smoothed_translational, 'y')\n\n plt.show()",
"def shrink(img, win_size):\n res = np.zeros((img.shape[0] // win_size, img.shape[1] // win_size))\n\n for i in range(res.shape[0]):\n for j in range(res.shape[1]):\n res[i, j] = np.mean(img[i * win_size:(i + 1) * win_size,\n j * win_size:(j + 1) * win_size])\n\n return res",
"def frame_mean ( frame , expression , cuts = '' ) : \n return frame_moment ( frame , order = 1 , expression = expression , cuts = cuts )",
"def process_image(self):\n # resize image to fit video resolution without cropping\n scale = min(self._video_resolution[0] / self._img.width, self._video_resolution[1] / self._img.height)\n self._img_scaled = self._img.resize((int(self._img.width * scale), int(self._img.height * scale)))\n\n # calculate image offset to center image in video\n self._img_offset = ((self._video_resolution[0] - self._img_scaled.width) // 2,\n (self._video_resolution[1] - self._img_scaled.height) // 2)\n\n self._img_processed = True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Instantiates a C3D Kerasl model
|
def C3D(weights='sports1M'):
if weights not in {'sports1M', None}:
raise ValueError('weights should be either be sports1M or None')
if K.image_data_format() == 'channels_last':
shape = (16, 112, 112,3)
else:
shape = (3, 16, 112, 112)
model = Sequential()
model.add(Conv3D(64, 3, activation='relu', padding='same', name='conv1', input_shape=shape))
model.add(MaxPooling3D(pool_size=(1,2,2), strides=(1,2,2), padding='same', name='pool1'))
model.add(Conv3D(128, 3, activation='relu', padding='same', name='conv2'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool2'))
model.add(Conv3D(256, 3, activation='relu', padding='same', name='conv3a'))
model.add(Conv3D(256, 3, activation='relu', padding='same', name='conv3b'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool3'))
model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv4a'))
model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv4b'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool4'))
model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv5a'))
model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv5b'))
model.add(ZeroPadding3D(padding=(0,1,1)))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool5'))
model.add(Flatten())
model.add(Dense(4096, activation='relu', name='fc6'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu', name='fc7'))
model.add(Dropout(0.5))
model.add(Dense(487, activation='softmax', name='fc8'))
if weights == 'sports1M':
model.load_weights(cfg.c3d_model_weights)
return model
|
[
"def __generateC3DModel(self, input_shape, custom_weights_path):\n\n c3dModel = C3D(input_shape=input_shape, weights_path=custom_weights_path)\n\n model = c3dModel.generateModel()\n\n new_model = tf.keras.Model(inputs=model.input, outputs=model.layers[16].output)\n\n return new_model",
"def build_model(self, num_classes, num_filters, kernel_size, input_shape, depth):\n model = Sequential()\n _3d_input_shape = (input_shape[0], input_shape[1], depth, input_shape[2])\n kernel_size = (kernel_size[0], kernel_size[1], kernel_size[2])\n model.add(Conv3D(num_filters[0], kernel_size=kernel_size, input_shape=_3d_input_shape\n , padding='same'))\n model.add(Activation('relu'))\n model.add(Conv3D(num_filters[0], kernel_size=kernel_size, padding='same'))\n model.add(Activation('relu'))\n model.add(MaxPooling3D(pool_size=(3, 3, 3), padding='same'))\n model.add(Conv3D(num_filters[1], kernel_size=kernel_size, padding='same'))\n model.add(Activation('relu'))\n model.add(Conv3D(num_filters[1], kernel_size=kernel_size, padding='same'))\n model.add(Activation('relu'))\n model.add(MaxPooling3D(pool_size=(3, 3, 3), padding='same'))\n model.add(Flatten())\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.25))\n model.add(Dense(num_classes, activation='softmax'))\n return model",
"def P3D19(**kwargs):\r\n model = P3D(Bottleneck, [2, 3], **kwargs)\r\n\r\n return model",
"def P3D35(**kwargs):\r\n model = P3D(Bottleneck, [4, 5], **kwargs)\r\n\r\n return model",
"def build_model(classes, height, width):\n print(\"> Building Keras neural network...\")\n network_model = model.simple_3(classes=classes, height=height, width=width)\n return network_model",
"def init_model_scratch(args):\n img_size = args.img_size\n channels = args.channels\n num_class = args.num_class\n inputs = Input(shape=(img_size, img_size, channels), name='input')\n conv1 = Conv2D(16, (3,3), padding='same', activation='relu', name='conv1')(inputs)\n pool1 = MaxPooling2D(name='pool1')(conv1)\n conv2 = Conv2D(32, (3,3), padding='same', activation='relu', name='conv2')(pool1)\n pool2 = MaxPooling2D(name='pool2')(conv2)\n conv3 = Conv2D(64, (3,3), padding='same', activation='relu', name='conv3')(pool2)\n pool3 = MaxPooling2D(name='pool3')(conv3)\n flatten = Flatten(name='flatten')(pool3)\n fc1 = Dense(units=128, activation='relu', name='fc1')(flatten)\n dropout = Dropout(rate=0.5, name='dropout')(fc1)\n predictions = Dense(units=num_class, activation='softmax', name='prediction')(dropout)\n model = models.Model(inputs=inputs, outputs=predictions)\n model.compile(\n optimizer=optimizers.Adam(),\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n\n return model",
"def construct_model():\n #Building the MLP\n \n model = Sequential()\n model.add(Flatten(input_shape=(64,64,3)))\n model.add(Dense(1000, activation=\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(512, activation=\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(3, activation=\"softmax\"))\n\n #8 Compilation of MLP\n model.compile(loss='categorical_crossentropy',\n optimizer='adam', \n metrics=['accuracy'])\n \n print(model.summary())\n print('Initialise and Compilation of CNN complete')\n\n \n \n return model",
"def unet_model_3d(n_labels,shape,W,lr=1e-5, pool_size=(2, 2, 2), initial_learning_rate=0.00001, deconvolution=False,\n depth=3, n_base_filters=16, include_label_wise_dice_coefficients=False, metrics=dice_coefficient,\n batch_normalization=False, activation_name=\"sigmoid\"):\n inputs = Input(shape)\n print('Input shape:',shape)\n current_layer = inputs\n levels = list()\n\n # add levels with max pooling\n for layer_depth in range(depth):\n layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth),\n batch_normalization=batch_normalization)\n layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2,\n batch_normalization=batch_normalization)\n if layer_depth < depth - 1:\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\n levels.append([layer1, layer2, current_layer])\n else:\n current_layer = layer2\n levels.append([layer1, layer2])\n\n # add levels with up-convolution or up-sampling\n for layer_depth in range(depth-2, -1, -1):\n up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,\n n_filters=current_layer._keras_shape[4])(current_layer)\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=4)\n current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=concat, batch_normalization=batch_normalization)\n current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=current_layer,\n batch_normalization=batch_normalization)\n\n\n if n_labels>1:\n final_convolution = Conv3D(n_labels, 1)(current_layer)\n o = Reshape((shape[0] * shape[1]* shape[2],n_labels), input_shape=(shape[0], shape[1], shape[2],n_labels))(final_convolution)\n activation_name=\"softmax\"\n# o = (Permute((2, 1)))(o)\n if n_labels==1:\n o = Conv3D(n_labels, (1, 1, 1))(current_layer)\n activation_name=\"sigmoid\"\n act = Activation(activation_name)(o)\n model = Model(inputs=inputs, outputs=act)\n\n if not isinstance(metrics, list):\n metrics = [metrics]\n\n if include_label_wise_dice_coefficients and n_labels > 1:\n label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(n_labels)]\n if metrics:\n metrics = metrics + label_wise_dice_metrics\n else:\n metrics = label_wise_dice_metrics\n if W !='':\n model.load_weights(W)\n if n_labels>1:\n# model.compile(loss=weighted_dice_coefficient_loss, optimizer = Adam(lr = initial_learning_rate) , metrics=metrics )\n model.compile(loss=\"categorical_crossentropy\", optimizer=Adam(lr = lr) , metrics=['accuracy'] )\n if n_labels==1:\n model.compile(loss=\"binary_crossentropy\", optimizer = Adam(lr = lr) , metrics=['accuracy'] )\n# model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)\n model.summary()\n return model",
"def create_model(self):\n # Calculate the input shape\n self.__calculate_the_input_shape()\n\n self.__keras_model = Sequential()\n self.__keras_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=self.input_shape))\n self.__keras_model.add(Conv2D(64, (3, 3), activation='relu'))\n self.__keras_model.add(MaxPooling2D(pool_size=(2, 2)))\n self.__keras_model.add(Dropout(0.25))\n self.__keras_model.add(Flatten())\n self.__keras_model.add(Dense(128, activation='relu'))\n self.__keras_model.add(Dropout(0.5))\n self.__keras_model.add(Dense(self.__number_of_classes, activation='softmax'))",
"def P3D199(modality='RGB', **kwargs):\n model = P3D(Bottleneck, [3, 8, 36, 3], modality=modality, **kwargs)\n return model",
"def __init__(self):\n super(TestModel, self).__init__(name='test_model')\n self.layer1 = keras.layers.Dense(10, activation='relu')",
"def initialize_model(model_type, input_shapes, architecture):\n if model_type == 'composition':\n model = CompositionModel(input_shapes, **architecture)\n\n elif model_type == 'coverage':\n model = CoverageModel(*input_shapes, **architecture)\n\n else:\n compo_model = initialize_model(\"composition\",\n input_shapes['composition'],\n architecture['composition'])\n cover_model = initialize_model(\"coverage\",\n input_shapes['coverage'],\n architecture['coverage'])\n model = CoCoNet(compo_model, cover_model, **architecture['merge'])\n\n return model.to(DEVICE)",
"def build_model():\n model_weights = np.load('models/sound8.npy').item()\n\n filter_parameters = [{'name': 'conv1', 'num_filters': 16, 'padding': 32,\n 'kernel_size': 64, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv2', 'num_filters': 32, 'padding': 16,\n 'kernel_size': 32, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv3', 'num_filters': 64, 'padding': 8,\n 'kernel_size': 16, 'conv_strides': 2},\n\n {'name': 'conv4', 'num_filters': 128, 'padding': 4,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv5', 'num_filters': 256, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2,\n 'pool_size': 4, 'pool_strides': 4},\n\n {'name': 'conv6', 'num_filters': 512, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv7', 'num_filters': 1024, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv8', 'num_filters': 1000, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv8_2', 'num_filters': 401, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n ]\n\n inputs = Input(shape=(None, 1)) # define inputs\n\n x = inputs\n for layer in filter_parameters:\n if 'conv8' not in layer['name']:\n x = ZeroPadding1D(padding=layer['padding'])(x)\n else:\n x = ZeroPadding1D(padding=layer['padding'])(conv7_layer_output)\n\n conv_layer = Conv1D(layer['num_filters'],\n kernel_size=layer['kernel_size'],\n strides=layer['conv_strides'],\n padding='valid', name=layer['name'])\n\n weights = model_weights[layer['name']]['weights'].reshape(conv_layer.get_weights()[0].shape)\n biases = model_weights[layer['name']]['biases']\n conv_layer.set_weights([weights, biases])\n\n x = conv_layer(x)\n\n if 'conv8' not in layer['name']: # except the last layers\n gamma = model_weights[layer['name']]['gamma']\n beta = model_weights[layer['name']]['beta']\n mean = model_weights[layer['name']]['mean']\n var = model_weights[layer['name']]['var']\n\n batch_norm = BatchNormalization()\n batch_norm.set_weights([gamma, beta, mean, var])\n x = batch_norm(x)\n x = Activation('relu')(x)\n if 'pool_size' in layer:\n x = MaxPooling1D(pool_size=layer['pool_size'],\n strides=layer['pool_strides'],\n padding='valid')(x)\n if layer['name'] == 'conv7':\n conv7_layer_output = x\n elif layer['name'] == 'conv8':\n imagenet_output = x\n elif layer['name'] == 'conv8_2':\n places_output = x\n\n model = Model(inputs=inputs,outputs=[imagenet_output, places_output])\n return model",
"def c3d_clstm(inputs, num_classes, reuse, is_training):\n with tf.device('/gpu:0'):\n with tf.variable_scope('Conv3D_ConvLSTM', reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n if inputs.get_shape().ndims!=5:\n raise Exception(\"The input dimension of 3DCNN must be rank 5\")\n network_input = tl.layers.InputLayer(inputs, name='input_layer') #Input Layer\n # 3DCNN-BN Layer 1\n conv3d_1 = tl.layers.Conv3dLayer(network_input,\n act=tf.identity,\n shape=[3,3,3,3,64],\n strides=[1,1,1,1,1],\n padding='SAME',\n name='Conv3d_1')\n conv3d_1 = tl.layers.BatchNormLayer(layer=conv3d_1, \n act=tf.nn.relu,\n is_train=is_training,\n name='BatchNorm_1')\n pool3d_1 = tl.layers.PoolLayer(conv3d_1,\n ksize=[1,1,2,2,1],\n strides=[1,1,2,2,1],\n padding='SAME',\n pool = tf.nn.max_pool3d,\n name='Pool3D_1')\n # 3DCNN-BN Layer 2\n conv3d_2_3x3 = tl.layers.Conv3dLayer(pool3d_1, \n act=tf.identity, \n shape=[3,3,3,64,128], \n strides=[1,1,1,1,1],\n padding='SAME',\n name='Conv3d_2_3x3')\n conv3d_2_3x3 = tl.layers.BatchNormLayer(layer=conv3d_2_3x3, \n act=tf.nn.relu,\n is_train=is_training, \n name='BatchNorm_2_3x3')\n pool3d_2 = tl.layers.PoolLayer(conv3d_2_3x3,\n ksize=[1,2,2,2,1],\n strides=[1,2,2,2,1],\n padding='SAME',\n pool = tf.nn.max_pool3d,\n name='Pool3D_2')\n # 3DCNN-BN Layer 3\n conv3d_3a_3x3 = tl.layers.Conv3dLayer(pool3d_2, \n act=tf.identity, \n shape=[3,3,3,128,256],\n strides=[1,1,1,1,1],\n padding='SAME',\n name='Conv3d_3a_3x3')\n conv3d_3b_3x3 = tl.layers.Conv3dLayer(conv3d_3a_3x3, \n act=tf.identity, \n shape=[3,3,3,256,256],\n strides=[1,1,1,1,1],\n padding='SAME',\n name='Conv3d_3b_3x3')\n conv3d_3_3x3 = tl.layers.BatchNormLayer(layer=conv3d_3b_3x3, \n act=tf.nn.relu,\n is_train=is_training, \n name='BatchNorm_3_3x3')\n# pool3d_3 = tl.layers.PoolLayer(conv3d_3_3x3,\n# ksize=[1,2,2,2,1],\n# strides=[1,2,2,2,1],\n# padding='SAME',\n# pool = tf.nn.max_pool3d,\n# name='Pool3D_3')\n # ConvLstm Layer\n shape3d = conv3d_3_3x3.outputs.get_shape().as_list()\n num_steps = shape3d[1]\n convlstm1 = tl.layers.RNNLayer(conv3d_3_3x3,\n cell_fn=clstm.ConvLSTMCell,\n cell_init_args={'state_is_tuple':False},\n n_hidden=256,\n initializer=tf.random_uniform_initializer(-0.1, 0.1),\n n_steps=num_steps,\n return_last=False,\n return_seq_2d=False,\n name='clstm_layer_1')\n convlstm2 = tl.layers.RNNLayer(convlstm1,\n cell_fn=clstm.ConvLSTMCell,\n cell_init_args={'state_is_tuple':False},\n n_hidden=384,\n initializer=tf.random_uniform_initializer(-0.1, 0.1),\n n_steps=num_steps,\n return_last=True,\n return_seq_2d=False,\n name='clstm_layer_2')\n # SPP Layer 1\n spp_bin_1 = tl.layers.PoolLayer(convlstm2,\n ksize=[1,28,28,1],\n strides=[1,28,28,1],\n padding='SAME',\n pool = tf.nn.max_pool,\n name='SPP_1')\n spp_bin_1 = tl.layers.FlattenLayer(spp_bin_1, \n name='Flatten_SPP_1')\n spp_bin_2 = tl.layers.PoolLayer(convlstm2,\n ksize=[1,14,14,1],\n strides=[1,14,14,1],\n padding='SAME',\n pool = tf.nn.max_pool,\n name='SPP_2')\n spp_bin_2 = tl.layers.FlattenLayer(spp_bin_2, \n name='Flatten_SPP_2')\n spp_bin_4 = tl.layers.PoolLayer(convlstm2,\n ksize=[1,7,7,1],\n strides=[1,7,7,1],\n padding='SAME',\n pool = tf.nn.max_pool,\n name='SPP_4')\n spp_bin_4 = tl.layers.FlattenLayer(spp_bin_4, \n name='Flatten_SPP_4')\n spp_bin_7 = tl.layers.PoolLayer(convlstm2,\n ksize=[1,4,4,1],\n strides=[1,4,4,1],\n padding='SAME',\n pool = tf.nn.max_pool,\n name='SPP_8')\n spp_bin_7 = tl.layers.FlattenLayer(spp_bin_7, \n name='Flatten_SPP_7')\n concat_spp = tl.layers.ConcatLayer(layer=[spp_bin_1,\n spp_bin_2,\n spp_bin_4,\n spp_bin_7],\n concat_dim=1,\n name='Concat_SPP')\n # FC Layer 1\n classes = tl.layers.DropconnectDenseLayer(concat_spp, \n keep=0.5,\n n_units=num_classes,\n act=tf.identity,\n name='Classes')\n return classes",
"def create_model(self, parameters={}):\n if self.model_name == \"DBSCAN\":\n self.model = cluster.DBSCAN()\n elif self.model_name == \"KMeans\":\n self.model = cluster.KMeans()\n elif self.model_name == \"hdbscan\":\n self.model_glo = hdbscan\n self.model = self.model_glo.HDBSCAN()\n\n for key, value in parameters.items():\n setattr(self.model, key, value)\n return",
"def init_specific_model(model_type, img_size, latent_dim):\n model_type = model_type.lower().capitalize()\n if model_type not in MODELS:\n err = \"Unkown model_type={}. Possible values: {}\"\n raise ValueError(err.format(model_type, MODELS))\n\n encoder = get_encoder(model_type)\n decoder = get_decoder(model_type)\n model = VAE(img_size, encoder, decoder, latent_dim)\n model.model_type = model_type # store to help reloading\n return model",
"def triple_cnn(model_params, shape):\n from keras.layers import Dense, Dropout, Flatten, InputLayer, MaxPooling2D, ZeroPadding2D\n from keras.layers.normalization import BatchNormalization\n from keras.layers.convolutional import Conv2D\n from keras.models import Sequential\n\n model = Sequential()\n\n model.add(InputLayer(input_shape=(shape[1], shape[2], shape[3])))\n model.add(BatchNormalization())\n\n model.add(Conv2D(model_params['conv_filters'],\n model_params['kernel_size'],\n strides=model_params['kernel_stride'],\n activation=model_params['cnn_activation'],\n padding='same'))\n model.add(BatchNormalization())\n\n model.add(Conv2D(model_params['conv_filters'],\n model_params['kernel_size'],\n strides=model_params['kernel_stride'],\n activation=model_params['cnn_activation'],\n padding='same'))\n model.add(BatchNormalization())\n\n model.add(Conv2D(model_params['conv_filters'],\n model_params['kernel_size'],\n strides=model_params['kernel_stride'],\n activation=model_params['cnn_activation'],\n padding='same'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(padding='same'))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n\n model.add(Dense(model_params['dense_1'], activation=model_params['activate_1']))\n model.add(Dense(model_params['dense_1'], activation=model_params['activate_1']))\n model.add(Dense(28, activation='softmax'))\n\n model.compile(loss=model_params['loss'],\n optimizer=model_params['optimizer'],\n metrics=['accuracy'])\n\n print(model.summary())\n return model",
"def __init__(self, model_type, cv_param_list, cv_param_name, **model_kwargs):\n self.model_type = model_type\n self.model_kwargs = model_kwargs\n self.cv_params = cv_param_list\n self.cv_param_name = cv_param_name\n self.cv_model = None\n self.cv_hyperparams = {}",
"def create_model(self):\n\n # The data\n X = theano.shared(np.zeros((self.num_training_samples, self.num_pred)))\n y = theano.shared(np.zeros(self.num_training_samples, dtype=int))\n\n self.shared_vars = {\n 'model_input': X,\n 'model_output': y\n }\n\n model = pm.Model()\n with model:\n # Priors\n alpha = np.ones(self.num_cats)\n pi = pm.Dirichlet('pi', alpha, shape=self.num_cats)\n mu = pm.Normal('mu', mu=0, sd=100, shape=(self.num_cats, self.num_pred))\n sigma = pm.HalfNormal('sigma', 100, shape=(self.num_cats, self.num_pred))\n\n # Assign classes to data points\n z = pm.Categorical('z', pi, shape=self.num_training_samples, observed=y)\n\n # The components are independent and normally distributed\n xi = pm.Normal('xi', mu=mu[z], sd=sigma[z], observed=X)\n\n return model"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Populate SIGMA, SIGMA_SPECTRUM, WEIGHT, WEIGHT_SPECTRUM columns in the MS
|
def apply_weights(self, rms):
if rms.shape != self.data.shape:
abort('The rms array used to populate SIGMA, SIGMA_SPECTRUM, WEIGHT, and WEIGHT_SPECTRUM does not have the expected dimensions:\n'\
'rms.shape = '+rms.shape+'. Expected dimensions: '+self.data.shape)
tab = pt.table(self.msname, readonly=False,ack=False)
tab.putcol("SIGMA", rms[:,0,:])
tab.putcol("SIGMA_SPECTRUM", rms)
tab.putcol("WEIGHT", 1/rms[:,0,:]**2)
tab.putcol("WEIGHT_SPECTRUM", 1/rms**2)
tab.close()
info('SIGMA and WEIGHT columns updated based on thermal noise only; no frequency dependence (eg., tropospheric opacity) yet. '+ \
'SIGMA_SPECTRUM and WEIGHT_SPECTRUM are populated with frequency independent SIGMA and WEIGHT values.')
|
[
"def defineSigmaLevels():\n # A and B values for the definition of sigma levelist\n # Since there are 72 model levels, there are 73 half levels, so it is for A and B values\n # the unit of A is hPa!!!!!!!!!!!!\n # from surface to TOA\n A = np.array([\n 0.000000e+00, 4.804826e-02, 6.593752e+00, 1.313480e+01, 1.961311e+01, 2.609201e+01,\n 3.257081e+01, 3.898201e+01, 4.533901e+01, 5.169611e+01, 5.805321e+01, 6.436264e+01,\n 7.062198e+01, 7.883422e+01, 8.909992e+01, 9.936521e+01, 1.091817e+02, 1.189586e+02,\n 1.286959e+02, 1.429100e+02, 1.562600e+02, 1.696090e+02, 1.816190e+02, 1.930970e+02,\n 2.032590e+02, 2.121500e+02, 2.187760e+02, 2.238980e+02, 2.243630e+02, 2.168650e+02,\n 2.011920e+02, 1.769300e+02, 1.503930e+02, 1.278370e+02, 1.086630e+02, 9.236572e+01,\n 7.851231e+01, 6.660341e+01, 5.638791e+01, 4.764391e+01, 4.017541e+01, 3.381001e+01,\n 2.836781e+01, 2.373041e+01, 1.979160e+01, 1.645710e+01, 1.364340e+01, 1.127690e+01,\n 9.292942e+00, 7.619842e+00, 6.216801e+00, 5.046801e+00, 4.076571e+00, 3.276431e+00,\n 2.620211e+00, 2.084970e+00, 1.650790e+00, 1.300510e+00, 1.019440e+00, 7.951341e-01,\n 6.167791e-01, 4.758061e-01, 3.650411e-01, 2.785261e-01, 2.113490e-01, 1.594950e-01,\n 1.197030e-01, 8.934502e-02, 6.600001e-02, 4.758501e-02, 3.270000e-02, 2.000000e-02,\n 1.000000e-02,],dtype=float)\n # reverse A\n A = A[::-1] * 100 # change unit to Pa\n # the unit of B is 1!!!!!!!!!!!!\n # from surfac eto TOA\n B = np.array([\n 1.000000e+00, 9.849520e-01, 9.634060e-01, 9.418650e-01, 9.203870e-01, 8.989080e-01,\n 8.774290e-01, 8.560180e-01, 8.346609e-01, 8.133039e-01, 7.919469e-01, 7.706375e-01,\n 7.493782e-01, 7.211660e-01, 6.858999e-01, 6.506349e-01, 6.158184e-01, 5.810415e-01,\n 5.463042e-01, 4.945902e-01, 4.437402e-01, 3.928911e-01, 3.433811e-01, 2.944031e-01,\n 2.467411e-01, 2.003501e-01, 1.562241e-01, 1.136021e-01, 6.372006e-02, 2.801004e-02,\n 6.960025e-03, 8.175413e-09, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\n 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,\n 0.000000e+00,],dtype=float)\n # reverse B\n B = B[::-1]\n\n return (A, B)",
"def RMS(data):\n data = np.power(data, 2)\n data = np.mean(data, axis=1)\n data = np.sqrt(data)\n return data",
"def __get_sigmas(self):\n stack_sigma = {}\n _stack = self.stack\n\n _file_path = os.path.abspath(os.path.dirname(__file__))\n _database_folder = os.path.join(_file_path, 'reference_data', self.database)\n\n _list_compounds = _stack.keys()\n for _compound in _list_compounds:\n _list_element = _stack[_compound]['elements']\n stack_sigma[_compound] = {}\n for _element in _list_element:\n stack_sigma[_compound][_element] = {}\n _list_isotopes = _stack[_compound][_element]['isotopes']['list']\n _list_file_names = _stack[_compound][_element]['isotopes']['file_names']\n _list_isotopic_ratio = _stack[_compound][_element]['isotopes']['isotopic_ratio']\n _iso_file_ratio = zip(_list_isotopes, _list_file_names, _list_isotopic_ratio)\n stack_sigma[_compound][_element]['isotopic_ratio'] = _list_isotopic_ratio\n\n # _dict_sigma_isotopes_sum = {}\n _sigma_all_isotopes = 0\n _energy_all_isotopes = 0\n for _iso, _file, _ratio in _iso_file_ratio:\n stack_sigma[_compound][_element][_iso] = {}\n # print(_iso, _file, _ratio)\n if _compound in _utilities.h_bond_list:\n if _iso == '1-H':\n _utilities.is_element_in_database(element='H', database='Bonded_H')\n _database_folder_h = os.path.join(_file_path, 'reference_data', 'Bonded_H')\n sigma_file = os.path.join(_database_folder_h, 'H-{}.csv'.format(_compound))\n if _compound == 'ZrH':\n print(\"NOTICE:\\n\"\n \"Your entry {} contains bonded H, and has experimental data available.\\n\"\n \"Therefore, '1-H' cross-section has been replaced by the data \"\n \"reported at https://t2.lanl.gov/nis/data/endf/endfvii-thermal.html\".format(_compound))\n else:\n print(\"NOTICE:\\n\"\n \"Your entry {} contains bonded H, and has experimental data available.\\n\"\n \"Therefore, '1-H' cross-section has been replaced by the data \"\n \"reported at https://doi.org/10.1103/PhysRev.76.1750\".format(_compound))\n else:\n sigma_file = os.path.join(_database_folder, _file)\n else:\n sigma_file = os.path.join(_database_folder, _file)\n _dict = _utilities.get_sigma(database_file_name=sigma_file,\n e_min=self.energy_min,\n e_max=self.energy_max,\n e_step=self.energy_step)\n stack_sigma[_compound][_element][_iso]['energy_eV'] = _dict['energy_eV']\n stack_sigma[_compound][_element][_iso]['sigma_b'] = _dict['sigma_b'] * _ratio\n stack_sigma[_compound][_element][_iso]['sigma_b_raw'] = _dict['sigma_b']\n\n # sigma for all isotopes with their isotopic ratio\n _sigma_all_isotopes += _dict['sigma_b'] * _ratio\n _energy_all_isotopes += _dict['energy_eV']\n\n # energy axis (x-axis) is averaged to take into account differences between x-axis of isotopes\n _mean_energy_all_isotopes = _energy_all_isotopes / len(_list_isotopes)\n stack_sigma[_compound][_element]['energy_eV'] = _mean_energy_all_isotopes\n stack_sigma[_compound][_element]['sigma_b'] = _sigma_all_isotopes\n\n self.stack_sigma = stack_sigma",
"def test_ms_weights(sma_mir, tmp_path, onewin):\n from casacore import tables\n\n ms_uv = UVData()\n testfile = os.path.join(tmp_path, \"out_ms_weights.ms\")\n if onewin:\n sma_mir.select(freq_chans=np.arange(16384))\n\n sma_mir.nsample_array[0, :, :] = np.tile(\n np.arange(sma_mir.Nfreqs / sma_mir.Nspws), (sma_mir.Npols, sma_mir.Nspws)\n ).T\n sma_mir.write_ms(testfile)\n\n tb_main = tables.table(testfile, readonly=False, ack=False)\n tb_main.removecols(\"WEIGHT_SPECTRUM\")\n tb_main.close()\n\n ms_uv.read(testfile, use_future_array_shapes=True)\n\n # Check that the values do indeed match expected (median) value\n assert np.all(ms_uv.nsample_array == np.median(sma_mir.nsample_array))\n\n ms_uv.read(testfile, read_weights=False, use_future_array_shapes=True)\n # Check that the values do indeed match expected (median) value\n assert np.all(ms_uv.nsample_array == 1.0)",
"def feature_scale(self):\n \n #-------------------------------------------------------------------------\n # List of quantitative features to be standardized\n #-------------------------------------------------------------------------\n list_quant_feature = ['Quantity','UnitPrice']\n self._list_quant_feature = list_quant_feature.copy()\n\n #-------------------------------------------------------------------------\n # Standardization is applied over quantitative features in list.\n #-------------------------------------------------------------------------\n self.std_scale, X_quantitative_std = \\\n p5_util.df_features_standardize(self.df_invoice, list_quant_feature)\n\n\n df_quant_std = pd.DataFrame(X_quantitative_std\\\n , index=self.df_invoice.index)\n \n #-------------------------------------------------------------------------\n # Columns from standardized dataframe are renamed\n #-------------------------------------------------------------------------\n df_quant_std.rename(columns={0:'Quantity',1:'UnitPrice'},inplace=True)\n\n #-------------------------------------------------------------------------\n # Standardized values dataframe is aggregated to df_invoice\n #-------------------------------------------------------------------------\n list_col_drop = ['Quantity','UnitPrice']\n list_col_keep = \\\n [col for col in self.df_invoice.columns if col not in list_col_drop ]\n self.df_invoice = self.df_invoice[list_col_keep]\n\n self.df_invoice = pd.concat([self.df_invoice,df_quant_std], axis=1)\n \n return",
"def _compute_samples_stats(self): \n samples = self.samples\n keys_yu = {'I','s','rho_ini','rho_ter','theta'}\n keys_y = {'d_ini','d_ter','T','p'}\n samples_stats= {}\n for key in {'mean','sd','5','50','95'}: samples_stats[key]={}\n for key in keys_yu: \n samples_stats['mean'][key]=np.mean(samples[key],axis=2)\n samples_stats['sd'][key]=np.sqrt(np.var(samples[key],axis=2))\n samples_stats['5'][key]=np.percentile(samples[key],q=5,axis=2)\n samples_stats['50'][key]=np.percentile(samples[key],q=50,axis=2)\n samples_stats['95'][key]=np.percentile(samples[key],q=95,axis=2)\n for key in keys_y:\n samples_stats['mean'][key]=np.mean(samples[key],axis=1)\n samples_stats['sd'][key]=np.sqrt(np.var(samples[key],axis=1))\n samples_stats['5'][key]=np.percentile(samples[key],q=5,axis=1)\n samples_stats['50'][key]=np.percentile(samples[key],q=50,axis=1)\n samples_stats['95'][key]=np.percentile(samples[key],q=95,axis=1)\n self.samples_stats = samples_stats",
"def get_spectral_values(saveFileName=csv_save, audioDirectory=data_directory):\r\n us8k = 'air_conditioner,car_horn,children_playing,dog_bark,drilling,' \\\r\n 'engine_idling,gun_shot,jackhammer,siren,street_music'.split(sep=\",\")\r\n\r\n # Create a header for the CSV file\r\n header = 'filename chroma_stft rmse spectral_centroid spectral_bandwidth rolloff zero_crossing_rate'\r\n for i in range(1, 21):\r\n header += f' mfcc{i}'\r\n header += ' label'\r\n header = header.split()\r\n print(header)\r\n\r\n # Save Spectral feature values to a CSV file\r\n on_file = 0\r\n file = open(saveFileName, 'w', newline='')\r\n with file:\r\n writer = csv.writer(file)\r\n writer.writerow(header)\r\n for i in range(1, 11):\r\n for filename in os.listdir(f'{audioDirectory}/fold{i}'):\r\n clip = f'{audioDirectory}/fold{i}/{filename}'\r\n if clip[-3:] == \"wav\":\r\n on_file = on_file + 1\r\n print(f'On File: {on_file}')\r\n y, sr = librosa.load(clip, mono=True)\r\n rms = librosa.feature.rms(y=y)\r\n chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)\r\n spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)\r\n spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)\r\n rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)\r\n zcr = librosa.feature.zero_crossing_rate(y)\r\n mfcc = librosa.feature.mfcc(y=y, sr=sr)\r\n to_append = f'{filename} {np.mean(chroma_stft)} {np.mean(rms)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}'\r\n for e in mfcc:\r\n to_append += f' {np.mean(e)}'\r\n to_append += f' {us8k[int(filename.split(sep=\"-\")[1])]}'\r\n file = open(saveFileName, 'a', newline='')\r\n with file:\r\n writer = csv.writer(file)\r\n writer.writerow(to_append.split())",
"def tweak_standard(self, wave_in, counts_in, counts_ivar_in, gpm_in, meta_table):\n return wave_in, counts_in, counts_ivar_in, gpm_in",
"def music_spec(sig, Fs, windowSize, stepSize, ORDER):\n iData = sliding_window(sig, dLen, sSize, fillvalue=0)\n out = np.zeros([512, np.int(len(sig)/sSize)]) #this is hardwired in pseudospectrum_MUSIC\n count = 0\n for win in iData:\n data = [ii for ii in win]\n #data = data - np.mean(data, axis=0)\n pfreq,p = pseudospectrum_MUSIC(np.array(data),ORDER,None,Fs,None)\n out[:,count] = p \n count = count + 1\n \n maxSpec1Cols = np.amax(out, axis=0)\n maxSpec1Cols[maxSpec1Cols==0] = 1e-6\n Sxx1Norm = out / maxSpec1Cols\n \n f = np.linspace(0, Fs/2, num=out.shape[0])\n t = np.arange(out.shape[1])\n return t, f, out, Sxx1Norm",
"def build_s_mat(f_min=-1.0, f_max=30.0, f_step=0.1, \\\n im_min=-1, im_max=30.0, im_step=0.1, Hz=True):\n\n f = arange(f_min, f_max, f_step)\n im = arange(im_min, im_max, im_step)\n \n nr = len(im)\n nc = len(f)\n s = zeros((nr,nc), dtype='D')\n\n for i in range(nr):\n for j in range(nc):\n s[i,j] = -f[j] + 1.0j*im[i]\n\n if Hz:\n s = s*2.0*pi\n\n return s",
"def _smooth_price_data(self, sigma):\n self.High = features.gaussian_filter(self.High_raw, sigma)\n self.Low = features.gaussian_filter(self.Low_raw, sigma)\n self.Close = features.gaussian_filter(self.Close_raw, sigma)\n self.Open = features.gaussian_filter(self.Open_raw, sigma)\n self.Volume = features.gaussian_filter(self.Volume_raw, sigma)",
"def test2_SingleObservationScaleByChan(self):\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test2\")\n self.inpms += \".test2\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n #sjran = setjy(vis=self.inpms, field='Uranus', spw='', modimage='',\n sjran = setjy(vis=self.inpms, field='Titan', spw='', modimage='',\n scalebychan=True, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2010', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n \n if 'MODEL_DATA' not in cols:\n raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto1'] = tblocal.getcell('MODEL_DATA', 18)\n #record['long1'] = tblocal.getcell('MODEL_DATA', 19)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n # Titan\n if self.ismms:\n\t\t #record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n\t\t #record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t #record['auto3'] = tblocal.getcell('MODEL_DATA', 2835)\n\t\t #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(auto0query)\n record['auto0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(long0query)\n\t\t record['long0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n\t\t record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n\t\t record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n else:\n\t\t record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n\t\t record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t record['auto3'] = tblocal.getcell('MODEL_DATA', 405)\n\t\t record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n # record['history'] = self.get_last_history_line(self.inpms, origin='setjy::imager::setjy()', hint=\"V=0] Jy\")\n #record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n \"\"\"Flux density in HISTORY (scalebychan)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus\", \"V=0] Jy\"])\n if not self.ismms: self.check_history(self.result['history'], [\"Titan\", \"V=0] Jy\"])\n\n #\"\"\"WVR spw with scalebychan\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[25.93320656+0.j,\n # 26.88228607+0.j]]),\n # 0.003)\n\n \"\"\"Zero spacing of spw 1 with scalebychan\"\"\"\n # 8 (decreasing freq!) chans, XX & YY.\n #self.check_eq(self.result['auto1'],\n # numpy.array([[65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j],\n # [65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j]]),0.0001)\n # Titan ------------\n # check spw0, YY chan 0, 1920, 3839\n self.check_eq(self.result['auto0'][1][0], 3.30965233+0.j, 0.0001)\n self.check_eq(self.result['auto0'][1][1920], 3.31375313+0j, 0.0001)\n self.check_eq(self.result['auto0'][1][3839], 3.31785417+0j, 0.0001)\n\n \"\"\"Long spacing of spw 1 with scalebychan\"\"\"\n #self.check_eq(self.result['long1'],\n # numpy.array([[4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j],\n # [4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j]]),0.0001)\n # Titan\n self.check_eq(self.result['long0'][1][0],(2.77658414+6.98719121e-12j),0.0001)\n self.check_eq(self.result['long0'][1][1920],(2.77936244+6.99878090e-12j),0.0001)\n self.check_eq(self.result['long0'][1][3839],(2.78213906+7.01037362e-12j),0.0001)\n\n # spw 4 only has 1 chan, so it should be the same as without scalebychan.\n #\"\"\"Zero spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[69.33396912+0.j],[69.33396912+0.j]]),0.0001)\n \"\"\"Zero spacing of spw 3 with scalebychan\"\"\"\n self.check_eq(self.result['auto3'][1][0], (3.0934467+0j),0.0001)\n self.check_eq(self.result['auto3'][1][1920], (3.08946729+0j),0.0001)\n self.check_eq(self.result['auto3'][1][3839], (3.08549213+0j),0.0001)\n\n #\"\"\"Long spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['long4'], numpy.array([[2.83933783+0.j],[2.83933783+0.j]]),0.0001)\n\n \"\"\"Long spacing of spw 3 with scalebychan\"\"\"\n self.check_eq(self.result['long3'][1][0],(2.62812424+6.38091359e-12j) ,0.0001)\n self.check_eq(self.result['long3'][1][1920],(2.62534332+6.36981873e-12j) ,0.0001)\n self.check_eq(self.result['long3'][1][3839],(2.62256360+6.35873776e-12j) ,0.0001)\n\n return sjran",
"def run_sm_experiment(sample_data):\n # statsmodels feeds in (n_samples, n_channels)\n sm_var = VAR(endog=sample_data.squeeze().T)\n sm_params = sm_var.fit(maxlags=5, trend='n')",
"def GET_SPH():\n nmax = 52\n gamval = np.zeros(nmax)\n lamval = np.zeros(nmax)\n bval = np.zeros(nmax)\n\n gamval[0] = 1.00001\n gamval[1] = 1.0001\n gamval[2] = 1.001\n gamval[3] = 1.01\n gamval[4] = 1.03\n gamval[5] = 1.05\n gamval[6] = 1.07\n gamval[7] = 1.10\n gamval[8] = 1.15\n gamval[9] = 1.2\n gamval[10] = 1.3\n gamval[11] = 1.4\n gamval[12] = 1.5\n gamval[13] = 1.6\n gamval[14] = 1.66667\n gamval[15] = 1.7\n gamval[16] = 1.8\n gamval[17] = 1.86\n gamval[18] = 1.88\n gamval[19] = 1.9\n gamval[20] = 2.0\n gamval[21] = 2.010\n gamval[22] = 2.012\n gamval[23] = 2.2\n gamval[24] = 2.2215\n gamval[25] = 2.2217\n gamval[26] = 2.4\n gamval[27] = 2.5518\n gamval[28] = 2.55194\n gamval[29] = 2.6\n gamval[30] = 2.8\n gamval[31] = 3.0\n gamval[32] = 3.2\n gamval[33] = 3.4\n gamval[34] = 3.6\n gamval[35] = 3.8\n gamval[36] = 4.0\n gamval[37] = 4.5\n gamval[38] = 5.0\n gamval[39] = 5.5\n gamval[40] = 6.0\n gamval[41] = 6.5\n gamval[42] = 7.0\n gamval[43] = 8.0\n gamval[44] = 10.0\n gamval[45] = 15.0\n gamval[46] = 20.0\n gamval[47] = 30.0\n gamval[48] = 50.0\n gamval[49] = 100.0\n gamval[50] = 1000.0\n gamval[51] = 9999.0\n\n lamval[0] = 1.0044047883\n lamval[1] = 1.0135647885\n lamval[2] = 1.0401005736\n lamval[3] = 1.1088100742\n lamval[4] = 1.1671691602\n lamval[5] = 1.2015664277\n lamval[6] = 1.2269581432\n lamval[7] = 1.2563291060\n lamval[8] = 1.2928404943\n lamval[9] = 1.3207565353\n lamval[10] = 1.3628123548\n lamval[11] = 1.3943607838\n lamval[12] = 1.4195913539\n lamval[13] = 1.4405288149\n lamval[14] = 1.4526927211\n lamval[15] = 1.4583285785\n lamval[16] = 1.4737227445\n lamval[17] = 1.4820184714\n lamval[18] = 1.4846461951\n lamval[19] = 1.4872097129\n lamval[20] = 1.4991468274\n lamval[21] = 1.5002661592\n lamval[22] = 1.5004885113\n lamval[23] = 1.5193750470\n lamval[24] = 1.5213088378\n lamval[25] = 1.5213266323\n lamval[26] = 1.5358986669\n lamval[27] = 1.5465622206\n lamval[28] = 1.5465714207\n lamval[29] = 1.5496663736\n lamval[30] = 1.5613198923\n lamval[31] = 1.5713126233\n lamval[32] = 1.5799755842\n lamval[33] = 1.5875567751\n lamval[34] = 1.5942459679\n lamval[35] = 1.6001909794\n lamval[36] = 1.6055087137\n lamval[37] = 1.6166309698\n lamval[38] = 1.6254243269\n lamval[39] = 1.6325476141\n lamval[40] = 1.6384333257\n lamval[41] = 1.6433769444\n lamval[42] = 1.6475870992\n lamval[43] = 1.6543738548\n lamval[44] = 1.6637583967\n lamval[45] = 1.6760512867\n lamval[46] = 1.6821004429\n lamval[47] = 1.6880830534\n lamval[48] = 1.6928204564\n lamval[49] = 1.6963447551\n lamval[50] = 1.6994953607\n lamval[51] = 1.6998093041\n\n bval[0] = 0.541777\n bval[1] = 0.607335\n bval[2] = 0.758422\n bval[3] = 0.988008\n bval[4] = 0.996617\n bval[5] = 0.931071\n bval[6] = 0.860781\n bval[7] = 0.769242\n bval[8] = 0.658324\n bval[9] = 0.584657\n bval[10] = 0.496984\n bval[11] = 0.448082\n bval[12] = 0.417547\n bval[13] = 0.397073\n bval[14] = 0.386974\n bval[15] = 0.382711\n bval[16] = 0.372341\n bval[17] = 0.367499\n bval[18] = 0.366070\n bval[19] = 0.364725\n bval[20] = 0.359085\n bval[21] = 0.358608\n bval[22] = 0.358514\n bval[23] = 0.351834\n bval[24] = 0.351293\n bval[25] = 0.351288\n bval[26] = 0.348072\n bval[27] = 0.346707\n bval[28] = 0.346707\n bval[29] = 0.346472\n bval[30] = 0.346267\n bval[31] = 0.346985\n bval[32] = 0.348323\n bval[33] = 0.350078\n bval[34] = 0.352112\n bval[35] = 0.354327\n bval[36] = 0.356656\n bval[37] = 0.362682\n bval[38] = 0.368678\n bval[39] = 0.374437\n bval[40] = 0.379873\n bval[41] = 0.384959\n bval[42] = 0.389698\n bval[43] = 0.398201\n bval[44] = 0.411949\n bval[45] = 0.434177\n bval[46] = 0.447247\n bval[47] = 0.461834\n bval[48] = 0.474726\n bval[49] = 0.485184\n bval[50] = 0.495226\n bval[51] = 0.496265\n\n return gamval, lamval, bval",
"def calc_flux_mag(self):\n\n if 'flux' in self._data:\n self._fluxes = self._data['flux']\n self._mags = self.flux2mag(self._fluxes)\n elif 'fluxes' in self._data:\n self._fluxes = self._data['fluxes']\n self._mags = self.flux2mag(self._fluxes)\n elif 'mag' in self._data:\n self._mags = self._data['mag']\n self._fluxes = self.mag2flux(self._mags)\n elif 'mags' in self._data:\n self._mags = self._data['mags']\n self._fluxes = self.mag2flux(self._mags)\n elif 'magnitudes' in self._data:\n self._mags = self._data['magnitudes']\n self._fluxes = self.mag2flux(self._mags)\n else:\n raise KeyError('Cannot find \"fluxes\" or \"mags\" in photometric data')\n\n if 'fluxerr' in self._data:\n self._fluxerr = self._data['fluxerr']\n self._magerr = self.fluxerr2magerr(self.fluxes, self._fluxerr)\n elif 'magerr' in self._data:\n self._magerr = self._data['magerr']\n self._fluxerr = self.magerr2fluxerr(self._mags, self._magerr)\n else:\n self._magerr = np.array([])\n self._fluxerr = np.array([])\n\n if 'mjd' in self._data:\n self._mjds = self._data['mjd']\n elif 'mjds' in self._data:\n self._mjds = self._data['mjds']\n else:\n raise KeyError('Cannot find \"mjd\" or \"mjds\" in photometric data')",
"def setMasses(self):\n self.ave_masses = {'X': 0.0000, 'G': 57.0513, 'A': 71.0779, 'S': 87.0773, 'P': 97.1152,\n 'V': 99.1311, 'T':101.1039, 'C':103.1429, 'L':113.1576, 'I':113.1576,\n 'J':113.1576, 'N':114.1026, 'O':114.1472, 'B':114.5950, 'D':115.0874,\n 'Q':128.1292, 'K':128.1723, 'Z':128.6216, 'E':129.1140, 'M':131.1961,\n 'H':137.1393, 'F':147.1739, 'R':156.1857, 'Y':163.1733, 'W':186.2099,\n 'U':150.0379, '*': 0.00000, '-': 0.00000, 'water':18.02}\n self.mono_masses = {'X': 0.000000, 'G': 57.021464, 'A': 71.037114, 'S': 87.032028, 'P':97.052764,\n 'V': 99.068414, 'T':101.047679, 'C':103.009185, 'L':113.084064, 'I':113.084064,\n 'J':113.084064, 'N':114.042927, 'O':114.147200, 'B':114.595000, 'D':115.026943,\n 'Q':128.058578, 'K':128.094963, 'Z':128.621600, 'E':129.042593, 'M':131.040485,\n 'H':137.058912, 'F':147.068414, 'R':156.101111, 'Y':163.063320, 'W':186.079313,\n 'U':150.953630, '*': 0.000000, '-': 0.000000, 'water':18.01057}\n return",
"def factor_ms(self, mean_stress, ms_correction=True):\n if not ms_correction:\n return 1\n if self.h2x <= mean_stress <= self.h3x:\n return (self.sigma_D - self.M * mean_stress) / self.sigma_D\n elif self.h1x <= mean_stress < self.h2x:\n return (self.sigma_D - self.M * self.sigma_D / (self.M - 1)) / self.sigma_D\n elif mean_stress < self.h1x:\n return (mean_stress + self.Rp / self.gamma_M) / self.sigma_D\n else:\n return (\n self.Rp / self.gamma_M\n - (self.Rp / self.gamma_M - self.sigma_D) / (1 - self.M)\n ) / self.sigma_D",
"def test1_UBandModelwithQBandMS(self):\n\n # The MS is in Q band, so deliberately choose the U band model so that the structure\n # is not too far off, but whether or not its flux density is scaled makes a difference.\n\n print \"Running multiple setjy with different parameters...\"\n for use_oldstandard in [False, True]:\n # for debugging ...\n #for use_oldstandard in [True]:\n selStandard = (\"Perley-Taylor 99\" if use_oldstandard else \"Perley-Butler 2010\")\n print \"!!!!! Run with standard=\\\"%s\\\" !!!!!\" % selStandard\n self.result[use_oldstandard] = self.run_setjy(use_oldstandard)\n\n \n print \"!!!! Run with standard=\\\"manual\\\", fluxdensity !!!!!\"\n self.result['fluxdens'] = self.run_setjy(False, 1234.0)\n print \"!!!! Run with standard=\\\"manual\\\", fluxdensity and spix !!!!!\"\n self.result['spix'] = self.run_setjy(False,1234.0 * (43.42064/35.0)**0.7,-0.7,\"35.0GHz\")\n\n # check on HISTORY sub-table entries - does not check for values\n \"\"\"Flux density in HISTORY (old standard)?\"\"\"\n #no scaling\n #self.check_history(self.result[True]['history'],[\"Scaling spw 1's model image to I =\"])\n if not self.ismms: self.check_history(self.result[True]['history'],[\"fld ind 12) spw 1 [I=\"])\n \"\"\"Flux density in HISTORY (new default standard)?\"\"\"\n if not self.ismms: self.check_history(self.result[False]['history'],[\"Scaling spw(s) [0, 1]'s model image to I =\"])\n #\"\"\"Flux density in HISTORY (fluxdensity)?\"\"\" <= no flux density is written in HISTORY, just input flux dens.\n #self.check_history(self.result['fluxdens']['history'],[\"Scaling spw 1's model image to I =\"])\n \"\"\"Flux density in HISTORY (spix)?\"\"\"\n #self.check_history(self.result['spix']['history'],[\"Scaling spw 1's model image to I =\"])\n if not self.ismms: self.check_history(self.result['spix']['history'],[\"Flux density as a function of frequency\"])\n\n # computed flux check\n # -different standards\n \"\"\" Returned flux density (using old standard) \"\"\"\n # fieldid = 12\n self.assertTrue(self.result[True]['setjyran'].has_key('12'))\n self.check_eq(self.result[True]['setjyran']['12']['1']['fluxd'][0],0.91134687,0.0001)\n \"\"\" Returned flux density (default standard=Perley-Butler 2010) \"\"\"\n self.assertTrue(self.result[False]['setjyran'].has_key('12'))\n #self.check_eq(self.result[False]['setjyran']['12']['1']['fluxd'][0],0.0,0.0001)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result[False]['setjyran']['12']['1']['fluxd'][0],1.0510757,0.0001)\n #\n # -manual mode (fluxdensity specification)\n \"\"\" Returned flux density (with input fluxdensity) \"\"\"\n self.assertTrue(self.result['fluxdens']['setjyran'].has_key('12'))\n self.check_eq(self.result['fluxdens']['setjyran']['12']['1']['fluxd'][0],1234.0,0.0001)\n \"\"\" Returned flux density (with input fluxdensity and spix) \"\"\"\n self.assertTrue(self.result['spix']['setjyran'].has_key('12'))\n #self.check_eq(self.result['spix']['setjyran']['12']['1']['fluxd'][0],1233.91240671,0.0001)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result['spix']['setjyran']['12']['1']['fluxd'][0],1234.0328507,0.0001)\n #\n # -for standard='Perley-Butler 2010, with model image\n \"\"\"modimage != '' and fluxdensity == 0 -> no scaling?\"\"\"\n #self.check_eq(self.result[False]['short'], 2.712631, 0.05)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result[False]['short'], 1.0508747, 0.05)\n #self.check_eq(self.result[False]['long'], 2.4080808, 0.05)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result[False]['long'], 0.9328917, 0.05)\n #\n # -for standard='Perley-Taylor 99' (no model specification is allowed)\n \"\"\"Perley-Taylor 99 standard?\"\"\"\n self.check_eq(self.result[True]['short'], 0.911185, 0.025)\n #self.check_eq(self.result[True]['long'], 0.808885, 0.025)\n # Updated value for the updated run_setjy 2014-05-01 TT\n self.check_eq(self.result[True]['long'], 0.9114067, 0.025)\n #\"\"\"modimage != '' and fluxdensity > 0\"\"\" this is no longer supported in the task\n \"\"\"fluxdensity > 0\"\"\" # should be = input fluxdensity for model vis\n self.check_eq(self.result['fluxdens']['short'], 1234.0, 0.05)\n self.check_eq(self.result['fluxdens']['long'], 1234.0, 0.05)\n #\"\"\"modimage != '', fluxdensity > 0, and spix = -0.7\"\"\" with modimage no longer supproted\n \"\"\"fluxdensity > 0, and spix = -0.7\"\"\"\n #self.check_eq(self.result['spix']['short'], 1233.7, 0.5)\n #self.check_eq(self.result['spix']['long'], 1095.2, 0.5)\n self.check_eq(self.result['spix']['short'], 1234.0, 0.5)\n self.check_eq(self.result['spix']['long'], 1234.0, 0.5)\n\n return True",
"def m_s(ms2, scale, f, alphasMZ=0.1185, loop=3):\n if scale == 2 and f == 3:\n return ms2 # nothing to do\n _sane(scale, f)\n crd = rundec.CRunDec()\n alphas_2 = alpha_s(2, 3, alphasMZ=alphasMZ, loop=loop)\n if f == 3:\n alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)\n return crd.mMS2mMS(ms2, alphas_2, alphas_scale, f, loop)\n elif f == 4:\n crd.nfMmu.Mth = 1.3\n crd.nfMmu.muth = 1.3\n crd.nfMmu.nf = 4\n return crd.mL2mH(ms2, alphas_2, 2, crd.nfMmu, scale, loop)\n elif f == 5:\n mc = 1.3\n crd.nfMmu.Mth = mc\n crd.nfMmu.muth = mc\n crd.nfMmu.nf = 4\n msmc = crd.mL2mH(ms2, alphas_2, 2, crd.nfMmu, mc, loop)\n crd.nfMmu.Mth = 4.8\n crd.nfMmu.muth = 4.8\n crd.nfMmu.nf = 5\n alphas_mc = alpha_s(mc, 4, alphasMZ=alphasMZ, loop=loop)\n return crd.mL2mH(msmc, alphas_mc, mc, crd.nfMmu, scale, loop)\n else:\n raise ValueError(f\"Invalid input: f={f}, scale={scale}\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
insert mean delays (i.e. nonturbulent) due to dry and wet components
|
def trop_calc_mean_delays(self):
delay = self.trop_ATM_dispersion() / speed_of_light
self.delay_alltimes = delay / np.sin(self.elevation_tropshape)
phasedelay_alltimes = 2*np.pi * delay / np.sin(self.elevation_tropshape) * self.chan_freq.reshape((1, self.chan_freq.shape[0], 1))
np.save(II('$OUTDIR')+'/atm_output/phasedelay_alltimes', phasedelay_alltimes)
np.save(II('$OUTDIR')+'/atm_output/delay_alltimes', self.delay_alltimes)
self.phasedelay_alltimes = phasedelay_alltimes
|
[
"def propigate_delays(self, elements, math):\n pass",
"def update_delay(self, delay):",
"def delay() -> None:\n print(\"DELAY \" + str(int(numpy.random.normal(MU, SIGMA))))",
"def get_delay_minimum(self, synapse_info):",
"def _delay(self):\n time.sleep(random.randint(self.min_delay,self.max_delay)/1000.0)",
"def new_random_delay():\n return random.randrange(100, 200, 3)",
"def embed_delay(ts, dim, tau):\n ts = ts.flatten()\n new_ts = np.zeros([len(ts), dim])\n\n l = np.int32(np.floor(dim / 2.0))\n for i, o in zip(range(0, dim), range(-l, l + 1)):\n data = np.roll(ts, -tau + o)\n new_ts[:, i] = data\n new_ts = new_ts[:-(dim - 1) * tau, ]\n\n return new_ts",
"def grpdelay(b, a=1, nfft=512, whole='none', Fs=2.*pi):\n#==================================================================\n if whole !='whole':\n nfft = 2*nfft\n#\n w = Fs * np.arange(0, nfft)/nfft\n \n try: len(a)\n except TypeError: \n a = 1; oa = 0\n c = b\n try: len(b)\n except TypeError: print 'No proper filter coefficients: len(a) = len(b) = 1 !'\n else: \n oa = len(a)-1; # order of a(z)\n c = np.convolve(b,a[::-1]) # c(z) = b(z)*a(1/z)*z^(-oa); a[::-1] reverses a\n try: len(b)\n except TypeError: b=1; ob=0; \n else: \n ob = len(b)-1; # order of b(z) \n\n oc = oa + ob; # order of c(z)\n \n cr = c * np.arange(0,oc+1) # multiply with ramp -> derivative of c wrt 1/z\n\n num = np.fft.fft(cr,nfft)\n den = np.fft.fft(c,nfft)\n#\n minmag = 10 * np.spacing(1) # equivalent to matlab \"eps\"\n polebins = np.where(abs(den)<minmag)[0] # find zeros, convert tuple to array\n if np.size(polebins) > 0: # check whether polebins array is empty\n print '*** grpdelay warning: group delay singular -> setting to 0 at:'\n for i in polebins:\n print 'f = {0} '.format((Fs*i/nfft))\n num[i] = 0;\n den[i] = 1; \n\n tau_g = np.real(num / den) - oa;\n# \n if whole !='whole':\n nfft = nfft/2\n tau_g = tau_g[0:nfft]\n w = w[0:nfft]\n\n return tau_g, w",
"def update(self):\n if not hasattr(self,\"verbose\"):\n self.verbose = 0\n if not hasattr(self,\"deltas\") or self.deltas is None:\n self.deltas = [np.zeros(dw.shape) for w,dw,n in self.weights()]\n for ds,(w,dw,n) in zip(self.deltas,self.weights()):\n ds.ravel()[:] = self.momentum * ds.ravel()[:] + self.learning_rate * dw.ravel()[:]\n w.ravel()[:] += ds.ravel()[:]\n if self.verbose:\n LOG.info(\"{} {} {}\".format(n, (np.amin(w), np.amax(w)), (np.amin(dw), np.amax(dw))))",
"def warmup(self):\n\t\treturn int(self._warmup/self.tick_period) * self.tick_period",
"def avg_time_distortion(self, step_pattern):\n if len(self.data['time_distortion'][step_pattern]) != self.data['num_queries']:\n print('Not every query aligned, align the remaining queries')\n return\n else:\n I = self.data['num_queries']\n avg_td = sum(self.data['time_distortion'][step_pattern].values())/I\n return avg_td",
"def time_update(self):\r\n self.time = []\r\n t = [0] + self.time_final_all_section()\r\n for i in range(self.number_of_section):\r\n self.time.append((t[i+1] - t[i]) / 2.0 * self.tau[i]\r\n + (t[i+1] + t[i]) / 2.0)\r\n return np.concatenate([i for i in self.time])",
"def delay():\n latency = 0.49\n sleep(latency)",
"def get_intrinsic_delays(self):\n\n pass",
"def test_mean_waiting_time_formula_using_direct_for_class_1_individuals_example():\n all_states = [\n (0, 0),\n (0, 1),\n (0, 2),\n (0, 3),\n (1, 3),\n (2, 3),\n (3, 3),\n ]\n pi = np.array(\n [\n [0.11428571, 0.22857143, 0.22857143, 0.22857143],\n [np.nan, np.nan, np.nan, 0.11428571],\n [np.nan, np.nan, np.nan, 0.05714286],\n [np.nan, np.nan, np.nan, 0.02857143],\n ]\n )\n\n mean_wait = mean_waiting_time_formula_using_direct_approach(\n all_states=all_states,\n pi=pi,\n class_type=0,\n lambda_1=1,\n lambda_2=1,\n mu=1,\n num_of_servers=2,\n threshold=3,\n system_capacity=3,\n buffer_capacity=3,\n )\n\n assert round(mean_wait, NUMBER_OF_DIGITS_TO_ROUND) == round(\n 0.20000000175, NUMBER_OF_DIGITS_TO_ROUND\n )",
"def sir_mean_trajectory(sim_dir, plot = True):\n FN = 500;\n\n all_traj, times = consolidate_trajectories(sim_dir, FORCE_NUMBER=FN);\n\n fpt = np.mean(times[:,-1])\n for i in range(0,np.shape(times)[1]):\n times[:,i] = times[:,i] - times[:,-1]\n\n mean_traj_times, step_size = np.linspace( -2*fpt, np.max(times),\n #mean_traj_times, step_size = np.linspace( np.min(times), np.max(times),\n FN, retstep=True );\n mean_traj = np.zeros( ( len(mean_traj_times), np.shape(all_traj)[2]) );\n normalization = np.zeros( len( mean_traj_times ) );\n\n # Populate the mean position at each time point\n for traj_idx in range(0, np.shape(all_traj)[0]):\n for time_idx in range(1, np.shape(all_traj)[1]):\n # check that it's not zero time\n if ( times[traj_idx,time_idx]-times[traj_idx,time_idx-1] != 0.0\n and -int(np.floor(times[traj_idx,time_idx]/step_size))<FN):\n mean_traj[-1+int(np.floor(times[traj_idx,time_idx]\n /step_size)),:] \\\n += all_traj[traj_idx,time_idx,:]\n normalization[-1+int(np.floor(times[traj_idx,time_idx]\n /step_size) )]+= 1\n\n mean_traj = mean_traj / normalization[:,np.newaxis]\n\n #sir_dstbn_fp(all_traj, plot) # TODO : somewhere else\n\n if plot:\n fig = plt.figure(); ax = plt.gca()\n for i in range(0, len(mean_traj_times) - 1 ):\n\n ax.plot(mean_traj[i:i+2,0], mean_traj[i:i+2,1],\n color=plt.cm.plasma(int(255*i/len(mean_traj_times))))\n plt.ylim(bottom=0.0); #plt.xlim(left=0.0)\n min_ylim, max_ylim = plt.ylim()\n min_xlim, max_xlim = plt.xlim()\n plt.xlabel(r'number susceptible ($S$)')\n plt.ylabel(r'number infected ($I$)')\n plt.show()\n\n return mean_traj",
"def _full_times(self, index):\n # Number of points in the buffer arrays\n n_before = int(self._buffers[index][0]/self.dt)\n if self._buffers[index][0]%self.dt:\n n_before += 1\n n_after = int(self._buffers[index][1]/self.dt)\n if self._buffers[index][1]%self.dt:\n n_after += 1\n # Proper starting points of buffer arrays to preserve dt\n t_min = self.times[0] - n_before*self.dt\n t_max = self.times[-1] + n_after*self.dt\n return np.concatenate((\n np.linspace(t_min, self.times[0], n_before, endpoint=False),\n self.times,\n np.linspace(self.times[-1], t_max, n_after+1)[1:]\n ))",
"def delayed_insert(self):\n return \"\"\"--delayed-insert\"\"\"",
"def test_flexible_raft_delay(drop_ratio):\n cluster_size = [i for i in range(3, 8, 2)]\n for i in cluster_size:\n delay = []\n for j in range(0, min(i//2+1, 4)):\n res = singleBenchmark(50, 10, i, i+1-j, j, drop_ratio, 0, delay=True) if j != 0 else singleBenchmark(50, 10, i, 0, 0, drop_ratio, 0, delay=True)\n delay.append(res)\n filename = \"result_%d_%f\" % (i, drop_ratio)\n with open(filename, 'a') as f:\n f.write(\"Delay with cluster size = %d & drop ratio = %f\\n\" % (i, drop_ratio))\n f.write(str(delay)+\"\\n\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
this will change the pointing error for each antenna every pointing_timescale which one of could essentially think of as a scan length (e.g. 10 minutes)
|
def pointing_constant_offset(self,pointing_rms, pointing_timescale,PB_FWHM230):
self.PB_FWHM = PB_FWHM230 / (self.chan_freq.mean() / 230e9) # convert 230 GHz PB to current obs frequency
self.num_mispoint_epochs = max(1, int(np.floor(self.obslength / (pointing_timescale * 60.)))) # could be number of scans, for example
self.mjd_per_ptg_epoch = (self.mjd_obs_end - self.mjd_obs_start) / self.num_mispoint_epochs
self.mjd_ptg_epoch_timecentroid = np.arange(self.mjd_obs_start,self.mjd_obs_end,
self.mjd_per_ptg_epoch) + (self.mjd_per_ptg_epoch/2.)
self.pointing_offsets = pointing_rms.reshape(self.Nant,1) * np.random.randn(self.Nant,self.num_mispoint_epochs) # units: arcsec
for ant in range(self.Nant):
ind = (self.mjd_ptg_epoch_timecentroid < self.mjd_ant_rise[ant]) \
| (self.mjd_ptg_epoch_timecentroid > self.mjd_ant_set[ant])
self.pointing_offsets[ant,ind] = np.nan # this masks out pointing offsets for stowed antennas
PB_model = ['gaussian']*self.Nant # primary beam model set in input config file. Hardwired to Gaussian for now.
amp_errors = np.zeros([self.Nant,self.num_mispoint_epochs])
for ant in range(self.Nant):
if PB_model[ant] == 'consine3':
amp_errors[ant,:] = np.cos(self.pointing_offsets[ant,:]/206265.)**3 #placeholder, incorrect
elif PB_model[ant] == 'gaussian':
amp_errors[ant,:] = np.exp(-0.5*(self.pointing_offsets[ant,:]/(self.PB_FWHM[ant]/2.35))**2)
self.pointing_amp_errors = amp_errors
|
[
"def pid(self):\n\n # Calculating Error for altitude, latitude, longitude\n self.check_obstacle()\n self.waypoint_setter()\n rospy.loginfo(\"##Setpoint:%s, %s, %s\",str(self.setpoint[1]),str(self.setpoint[2]),str(self.setpoint[0]))\n self.error_in_meters()\n self.marker_status()\n self.err[0] = self.setpoint[0] - self.altitude_coord\n self.err[1] = self.setpoint[1] - self.latitude_coord\n self.err[2] = self.setpoint[2] - self.longitude_coord\n\n # Calculating Change in Error for altitude, latitude, longitude\n\n self.changerror[0] = self.err[0] - self.lasterror[0]\n self.changerror[1] = self.err[1] - self.lasterror[1]\n self.changerror[2] = self.err[2] - self.lasterror[2]\n\n # Calculating sum of Error for altitude, latitude, longitude\n\n self.errorsum[0] = (self.errorsum[0] + self.err[0])*self.sample_time\n self.errorsum[1] = (self.errorsum[1] + self.err[1])*self.sample_time\n self.errorsum[2] = (self.errorsum[2] + self.err[2])*self.sample_time\n\n # Calculating Output which is to be sent to attitude_controller.py through edrone/cmd pub\n\n self.output[0] = self.k_p[0]*self.err[0] + self.k_i[0]*self.errorsum[0] + self.k_d[0]*self.changerror[0]/self.sample_time\n self.output[1] = self.k_p[1]*self.err[1] + self.k_i[1]*self.errorsum[1] + self.k_d[1]*self.changerror[1]/self.sample_time\n self.output[2] = self.k_p[2]*self.err[2] + self.k_i[2]*self.errorsum[2] + self.k_d[2]*self.changerror[2]/self.sample_time\n\n # Equation for Throttle , Pitch, Roll and Yaw values for attitude_controller.py\n\n self.drone_cmd = edrone_cmd()\n self.drone_cmd.rcThrottle = self.output[0] + 1500\n self.drone_cmd.rcPitch = 1500 + 6*self.output[1]\n self.drone_cmd.rcRoll = 1500 + 6*self.output[2]\n self.drone_cmd.rcYaw = 1500\n\n # Storing Current error\n\n self.lasterror[0] = self.err[0]\n self.lasterror[1] = self.err[1]\n self.lasterror[2] = self.err[2]\n # Publishing msg drone_cmd\n self.drone_pub.publish(self.drone_cmd)",
"def draw_raPointErrorsRaw(self):\n axe, _ = self.generateFlat(widget=self.raPointErrorsRaw)\n x = self.index\n y = self.errorRA_S\n p = self.pierside\n xLabel = 'Star Number'\n yLabel = 'Error per Star [arcsec]'\n self.plotFigureFlat(axe, x, y, p, xLabel, yLabel, False, 3)\n return True",
"def AdaptTimeStep(self): \r\n \r\n estimator_LowOrder=self.dy_TestOrder[:]*self.TimeStep\r\n \r\n estimator_HighOrder=self.dYtmp[:]*self.TimeStep \r\n \r\n AbsError=np.abs(estimator_HighOrder-estimator_LowOrder)\r\n MaxError=np.max(AbsError) \r\n scale=1.0\r\n\r\n \r\n if (MaxError>self.zero):\r\n \r\n if(self.AdaptativeError>=MaxError):\r\n \r\n scale=((self.AdaptativeError/(MaxError))**(self.AdaptativeOrderSup)*(self.AlphatimeStep))\r\n else: \r\n scale=((self.AdaptativeError/(MaxError))**(self.AdaptativeOrderInf)*(self.AlphatimeStep)) \r\n self.TimeStep=scale*self.TimeStep",
"def draw_raPointErrorsRawRef(self):\n axe, _ = self.generateFlat(widget=self.raPointErrorsRawRef)\n x = self.angularPosRA\n y = self.errorRA_S\n p = self.pierside\n xLabel = 'RA Encoder Abs [deg]'\n yLabel = 'Error per Star [arcsec]'\n self.plotFigureFlat(axe, x, y, p, xLabel, yLabel, True, 3)\n return True",
"def modify_ti(self, sol_points = None, max_psin = 1.1, decay_length = 0.015,\n rad_loc_for_exp_decay = 1.0, reduce_ti = True, ti_min = 1, plotit = False):\n\n tiexp = self.data['pedData']['fitVals']['tisplpsi']['y']\n tiexppsi = self.data['pedData']['fitVals']['tisplpsi']['x']\n\n ti_mod = tiexp.copy()\n xrad = tiexppsi.copy()\n\n if reduce_ti:\n saved_ratio_file_loc = \\\n '/fusion/projects/results/solps-iter-results/wilcoxr/T_D_C_ratio.txt'\n\n print('Reducing T_D according to ratio of T_D / T_C from ' + saved_ratio_file_loc)\n\n try:\n with open(saved_ratio_file_loc, 'r') as f:\n lines = f.readlines()\n\n psin_ratio = []\n T_DC_ratio = [] # The ratio T_D / T_C from 171558\n\n for line in lines:\n elements = line.split()\n if elements[0] != '#':\n psin_ratio.append(np.float(elements[0]))\n T_DC_ratio.append(np.float(elements[1]))\n\n T_ratio_fit = np.interp(tiexppsi, np.array(psin_ratio),\n np.array(T_DC_ratio), left=1)\n # if > given range, chooses endpoint\n ti_reduced = tiexp * T_ratio_fit\n\n except FileNotFoundError:\n print(\"Can't retrieve T_D/T_C ratio file, not reducing Ti\")\n ti_reduced = tiexp\n\n ti_mod = ti_reduced\n\n\n # Modify Ti profile to decay exponentially outside separatrix\n if decay_length is not None:\n outer_inds = np.where(tiexppsi >= rad_loc_for_exp_decay)[0]\n val_at_exp_decay_start = np.interp(rad_loc_for_exp_decay, tiexppsi, ti_mod)\n\n if sol_points is not None:\n xrad = np.delete(xrad, outer_inds)\n ti_mod = np.delete(ti_mod, outer_inds)\n\n extra_points = np.linspace(rad_loc_for_exp_decay, max_psin, sol_points + 1)\n xrad = np.append(xrad, extra_points)\n outer_inds = np.where(xrad >= rad_loc_for_exp_decay)[0]\n ti_mod = np.append(ti_mod, np.ones(sol_points + 1))\n\n ti_mod[outer_inds] = (val_at_exp_decay_start - ti_min * 1e-3) * \\\n np.exp(-(xrad[outer_inds]-rad_loc_for_exp_decay) / decay_length) + ti_min * 1e-3\n\n if plotit:\n psi_TS = self.data['pedData']['fitPsiProf']\n teexp = self.data['pedData']['fitProfs']['teprof']\n\n plt.figure()\n plt.plot(psi_TS, teexp, 'g', lw=1, label = 'T$_e$ (TS)')\n plt.plot(tiexppsi, tiexp, '--sk', lw=2, label='T$_{C+6}$ (CER)')\n if reduce_ti:\n plt.plot(tiexppsi, ti_reduced, '-xr', ms=8, mew=2, lw=2,\n label='T$_D$ (inferred)')\n plt.plot(xrad, ti_mod, '-ob', lw=3, label = 'Final T$_D$')\n plt.xlabel('$\\psi_n$')\n plt.ylabel('T$_i$ (keV)')\n plt.legend(loc='best')\n plt.grid('on')\n plt.show(block=False)\n\n self.data['pedData']['fitVals']['ti_mod'] = {'x':xrad, 'y':ti_mod}",
"def calc_errors(self, location, goal):\n # rospy.loginfo('location', location)\n # rospy.loginfo('goal', goal)\n along = self.along_axis_error(location, goal)\n off = self.off_axis_error(location, goal)\n heading = self.heading_error(location, goal)\n # rospy.loginfo('a: %d o: %d h: %d' % (along, off, heading,))\n return (along, off, heading,)",
"def speed_violation(gps_data, type, speed_limit, time):\n time_elapsed = 0\n first_point = True\n list_violations = []\n for i in range(len(gps_data) - 1):\n time0 = gps_data[i-1].get(\"time\")\n lon1 = gps_data[i].get(\"longitude\")\n lat1 = gps_data[i].get(\"latitude\")\n time1 = gps_data[i].get(\"time\")\n lon2 = gps_data[i+1].get(\"longitude\")\n lat2 = gps_data[i+1].get(\"latitude\")\n time2 = gps_data[i+1].get(\"time\")\n\n if type == \"Explicit\":\n if(gps_data[i].get(\"speed\") is None):\n speed = speed_between_points(lon1, lat1, time1, lon2, lat2, time2)\n else:\n speed = gps_data[i].get(\"speed\")\n elif type == \"Location\":\n speed = speed_between_points(lon1, lat1, time1, lon2, lat2, time2)\n\n if speed >= speed_limit: \n if first_point == True:\n starting_point = gps_data[i]\n first_point = False\n else: \n time_elapsed += time1.timestamp() - time0.timestamp() \n else:\n if sec_to_minute(time_elapsed) >= time:\n violation = {\n 'duration': time_elapsed, \n 'lat1': starting_point['latitude'],\n 'long1': starting_point['longitude'],\n 'time1': starting_point['time'],\n 'lat2': gps_data[i-1]['latitude'],\n 'long2': gps_data[i-1]['longitude'],\n 'time2': gps_data[i-1]['time']\n }\n\n list_violations.append(violation)\n time_elapsed = 0\n first_point = True\n\n return list_violations",
"def scan_laser_piezo(bristol,PM,vstart=-5,vend=5,scanpts = 50,PowerMeterOn=False,save_data=True,lasercurrent=np.nan,potreading=np.nan,PDOn=False,LFOn=False,LFauto=None,wdir=None):\n toptica_bounded_write(vstart)\n voltage = np.linspace(vstart,vend,scanpts)\n FP_FSR_V = 4.783\n lpFP = LivePlot(1, 2, 5, 3, '.', 'Time (s)',\"PD (V)\")\n \n lpSpec = LivePlot(1,2,5,3,'o','Wavelength (nm-air)','Count')\n # lpLRFP = LivePlotLR(1, 1, 8, 5, 'o', 'Laser piezo (V)', 'Peak center (V)','Peak amplitude (V)')\n lpLRFP = LivePlotLR(1, 1, 8, 5, 'o', 'Laser piezo (V)', 'Peak center (V)','Peak center (V)')\n\n if PDOn:\n lpLR = LivePlotLR(1, 1, 8, 5, 'o', 'Laser piezo (V)', 'Wavelength (nm-air)','Photodiode (V)')\n elif PowerMeterOn:\n lpLR = LivePlotLR(1, 1, 8, 5, 'o', 'Laser piezo (V)', 'Wavelength (nm-air)','Thorlabs power (mW)')\n else:\n lpLR = LivePlotLR(1, 1, 8, 5, 'o', 'Laser piezo (V)', 'Wavelength (nm-air)','Bristol power (mW)')\n\n volt_so_far=[]\n powerlist=[]\n lambdalist=[]\n thorpowerlist = []\n PDvoltlist = []\n lambdaSpecList = []\n fwhmSpecList = []\n\n FPlambda=[]\n FPpower=[]\n \n \n now = datetime.datetime.now()\n date_str = now.strftime(\"%y%m%d %H%M\")[:6]\n time_str = now.strftime(\"%y%m%d %H%M%S\")[-6:]\n \n \n if LFOn: \n # setup spectrometer\n base_name = 'spectrometer test'\n acq_time = 0.2\n save_data = True\n\n #deal with LightField settings\n LFauto.set_acquisition_time(acq_time)\n LFauto.set_path(wdir)\n LFauto.set_filename(base_name)\n LFauto.set_filename_increment()\n \n \n for ind,v in enumerate(voltage):\n toptica_bounded_write(v)\n volt_so_far.append(v)\n\n # read fabry perot - need to start the read and write channel at the same time\n \n ao_pts_half=500\n t0_this,v_ao_scan_this,aiV_this = FP_scan(ao_pts_half=ao_pts_half,v_final=10.0)\n best_vals = FP_fit(t0_this[0:ao_pts_half-1],v_ao_scan_this[0:ao_pts_half-1], aiV_this[0:ao_pts_half-1],userange=ao_pts_half)\n if ind==0:\n FPlambda.append(best_vals[1])\n else:\n FPlambda.append(unwrap_by_pt(best_vals[1],FPlambda[ind-1],FP_FSR_V))\n\n FPpower.append(-best_vals[0])\n\n# yfit = lorentziansin(v_ao_scan_this, best_vals[0], best_vals[1],best_vals[2],best_vals[3],best_vals[4],best_vals[5],best_vals[6])\n yfit = lorentzianFSR(v_ao_scan_this, best_vals[0], best_vals[1],best_vals[2],best_vals[3],best_vals[4])\n print(f'FWHM {best_vals[2]:.4f}, FSR {best_vals[4]:.4f}, Finesse {best_vals[4]/best_vals[2]:.4f}')\n lpFP.plot_live(t0_this[0:ao_pts_half-1],aiV_this[0:ao_pts_half-1],yfit[0:ao_pts_half-1])\n lpFP.ax1.set_title(f'Center at {best_vals[1]:.3f} V')\n plt.tight_layout()\n time.sleep(0.1)\n\n # lpLRFP.plot_live(volt_so_far, FPlambda,FPpower)\n lpLRFP.plot_live(volt_so_far, FPlambda,FPlambda)\n\n # read wavemeter\n currentLambda,currentPower = bristol.readBristol()\n lambdalist.append(currentLambda)\n powerlist.append(currentPower)\n\n # read Thorlabs powermeter\n if PDOn:\n tarray,aiV = read_mult_volt(ai_scan_rate = 1000,ai_pts = 100,min_val=-10.0,max_val=10.0)\n PDvoltlist.append(np.average(aiV))\n thorpowerlist.append(np.nan)\n lpLR.plot_live(volt_so_far, lambdalist,PDvoltlist)\n lpLR.ax1r.set_ylim([0,1.1*max(PDvoltlist)])\n elif PowerMeterOn: \n currentPMpower = PM.measure_power(737)*1e3\n thorpowerlist.append(currentPMpower)\n PDvoltlist.append(np.nan)\n lpLR.plot_live(volt_so_far, lambdalist,thorpowerlist)\n lpLR.ax1r.set_ylim([0,1.1*max(thorpowerlist)])\n else:\n thorpowerlist.append(np.nan)\n PDvoltlist.append(np.nan)\n lpLR.plot_live(volt_so_far, lambdalist,powerlist)\n lpLR.ax1r.set_ylim([0,1.1*max(powerlist)])\n\n mid80 = np.percentile(lambdalist,90)-np.percentile(lambdalist,10)\n if ind>=3: \n lpLR.ax1.set_ylim([np.percentile(lambdalist,10)-mid80/3,np.percentile(lambdalist,90)+mid80/3])\n \n if LFOn:\n fname = \"laser test \"+time_str+\" \" + str(ind).zfill(2)\n LFauto.set_filename(fname)\n LFauto.set_filename_increment()\n LFauto.acquire()\n data_ref = LFauto.load_acquired_data(wdir, fname)\n # fit a Lorentzian\n\n# def lorentzian_bkg_func(x_array, a0, x0, fwhm,bkg):\n# return a0 / ( 1+4*( (x_array-x0)/fwhm )**2 )+bkg\n\n init_vals = [np.amax(data_ref.y), data_ref.x[np.argmax(data_ref.y)],0.08,np.amin(data_ref.y)]\n try:\n best_vals, covar = curve_fit(lorentzian_bkg_func, data_ref.x,data_ref.y, p0=init_vals)\n yfit = lorentzian_bkg_func(data_ref.x, best_vals[0], best_vals[1],best_vals[2],best_vals[3])\n lpFP.plot_live(t0_this,aiV_this,yfit)\n lpFP.ax1.set_title(f'Center at {best_vals[1]:.3f} V')\n plt.tight_layout()\n time.sleep(0.1)\n \n lpSpec.plot_live(data_ref.x,data_ref.y,yfit)\n lpSpec.ax1.set_title(f'Center at {best_vals[1]:.3f}, fwhm {best_vals[2]:.3f}')\n \n lambdaSpecList.append(best_vals[1])\n fwhmSpecList.append(best_vals[2])\n except:\n print('Fit error')\n lambdaSpecList.append(np.nan)\n fwhmSpecList.append(np.nan)\n else:\n lambdaSpecList.append(np.nan)\n fwhmSpecList.append(np.nan)\n\n toptica_bounded_write(0)\n\n# save_data = True\n if save_data:\n data_type = 'TopticaScan'\n\n data_header=f\"\"\"\n current (mA, at 0V) = {lasercurrent}\n pot reading = {potreading}\n volt,lambda (nm-air),power (mW),FP lambda (V),FP power (V), Thorlabs power (mW), PD volt (V), lambda Spectromter (nm-air), fwhm Spectrometer (nm-air)\n \"\"\"\n data_array = np.array([volt_so_far, lambdalist,powerlist,FPlambda,FPpower,thorpowerlist,PDvoltlist,lambdaSpecList,fwhmSpecList]).T\n data_save(data_array, lpLR.fig, data_type, data_header)",
"def tipper_err(self, tipper_err_array):\r\n if self.tipper_err is not None and \\\r\n (self._tipper_err.shape != tipper_err_array.shape):\r\n raise ZError('Shape of new \"tipper_err\" array does not match old' \r\n 'new shape {0} != old shape {1}'.format(tipper_err_array.shape,\r\n self._tipper_err.shape)\r\n )\r\n\r\n # make sure the input array is of required shape\r\n if tipper_err_array is not None:\r\n if len(tipper_err_array.shape) == 3 and \\\r\n tipper_err_array.shape[1:3] == (1, 2):\r\n if tipper_err_array.dtype in ['float', 'int']:\r\n self._tipper_err = tipper_err_array\r\n\r\n assert self._tipper_err.shape == self._tipper.shape\r\n\r\n # for consistency recalculate mag and angle\r\n self.compute_mag_direction()\r\n\r\n # for consistency recalculate amplitude and phase\r\n self.compute_amp_phase()",
"def update_errors(self, errors=None):\n self.errors = np.asarray(errors) if errors is not None else np.sqrt(self.data)",
"def update_lr(self, error, last_error, lr):\n last_error = np.array(last_error).mean()\n if (error < last_error) and (lr < 1.):\n lr = lr * 1.01\n print 'growing learning rate to ', lr\n elif error >= last_error and (lr > 0.):\n lr = lr * 0.8\n print 'shrinking learning rate to ', lr\n return lr",
"def reach_gradient(self):\n\n # Use the text field to say what happened\n self.robot_arm.text = \"Not improved\"\n\n # begin homework 2 : Problem 1\n b_improved = False\n d_scl = 0.1\n d_eps = pi/10000\n # Keep trying smaller increments while nothing improves\n while d_scl > 0.0001 and b_improved == False:\n # calculate the current distance\n pt = self.robot_arm.arm_end_pt()\n dist = pow( pt[0] - self.reach_x.value(), 2) + pow( pt[1] - self.reach_y.value(), 2)\n # Try each angle in turn\n for ang in self.theta_slds:\n save_ang = ang.value()\n\n # Gradient\n ang.set_value( save_ang - d_eps )\n pt_new = self.robot_arm.arm_end_pt()\n dist_new = pow( pt_new[0] - self.reach_x.value(), 2) + pow( pt_new[1] - self.reach_y.value(), 2)\n\n ang_try = save_ang + d_scl * pi\n if (dist_new < dist):\n ang_try = save_ang - 0.99 * d_scl * pi\n\n ang.set_value( ang_try )\n pt_new = self.robot_arm.arm_end_pt()\n dist_new = pow( pt_new[0] - self.reach_x.value(), 2) + pow( pt_new[1] - self.reach_y.value(), 2)\n if (dist_new < dist):\n b_improved = True\n dist = dist_new\n self.robot_arm.text = \"Improved {} eps {}\".format(ang.name, d_scl)\n else:\n ang.set_value( save_ang )\n d_scl = d_scl / 2\n #end homework 2 : Problem 1",
"def draw_raModelErrorsRef(self):\n axe, _ = self.generateFlat(widget=self.raModelErrorsRef)\n x = self.angularPosRA\n y = self.errorRA\n p = self.pierside\n xLabel = 'RA Encoder Abs [deg]'\n yLabel = 'Error per Star [arcsec]'\n self.plotFigureFlat(axe, x, y, p, xLabel, yLabel, True, 3)\n return True",
"def draw_raModelErrors(self):\n axe, _ = self.generateFlat(widget=self.raModelErrors)\n x = self.index\n y = self.errorRA\n p = self.pierside\n xLabel = 'Star Number'\n yLabel = 'Error per Star [arcsec]'\n self.plotFigureFlat(axe, x, y, p, xLabel, yLabel, False, 3)\n return True",
"def gps_quality_check(rs_raw,rs_constants):\n #This is called in input_translators because thats where raw can be modified\n # if gps data is available, fill any gaps with ground values: \n if hasattr(rs_raw,'GPS_MSL_Alt'): \n rs_raw.GPS_MSL_Alt[np.isnan(rs_raw.GPS_MSL_Alt)] = rs_constants['lidar_altitude'] \n rs_raw.latitude[np.isnan(rs_raw.GPS_MSL_Alt)] = rs_constants['latitude']\n rs_raw.longitude[np.isnan(rs_raw.GPS_MSL_Alt)] = rs_constants['longitude'] \n\n maximumAlt = 16000.0\n if np.any(np.greater(rs_raw.GPS_MSL_Alt, maximumAlt)):\n # replace huge altitude values with a default altitude\n print 'hsrl_read_utilities: Warning - Spikes in GPS altitude!'\n rs_raw.GPS_MSL_Alt[np.greater(rs_raw.GPS_MSL_Alt, maximumAlt)] = \\\n rs_constants['lidar_altitude']\n else: #GPS is missing, assume aircraft is on the ground\n #get missing values from calvals_*****.txt file\n print 'mobile installation with missing fields' \n rs_raw.GPS_MSL_Alt = hau.T_Array(rs_constants['lidar_altitude']*np.ones(rs_raw.times.shape))\n if not hasattr(rs_raw,'latitude'):\n rs_raw.latitude = hau.T_Array(rs_constants['latitude']*np.ones(rs_raw.times.shape))\n if not hasattr(rs_raw,'longitude'):\n rs_raw.longitude = hau.T_Array(rs_constants['longitude']*np.ones(rs_raw.times.shape))\n if not hasattr(rs_raw,'pitch_angle'):\n rs_raw.pitch_angle = hau.T_Array(np.zeros(rs_raw.times.shape))\n if not hasattr(rs_raw,'roll_angle'):\n rs_raw.roll_angle = hau.T_Array(np.zeros(rs_raw.times.shape))\n \n if hasattr(rs_raw,'pitch_angle'):\n try:\n if anynan(rs_raw.pitch_angle):\n print 'Pitch angle missing----replacing with zeros'\n rs_raw.pitch_angle[np.isnan(rs_raw.pitch_angle)] = \\\n np.zeros_like(rs_raw.pitch_angle[np.isnan(rs_raw.pitch_angle)])\n rs_raw.roll_angle[np.isnan(rs_raw.roll_angle)] = \\\n np.zeros_like(rs_raw.roll_angle[np.isnan(rs_raw.roll_angle)])\n except TypeError:#not a float\n pass\n return rs_raw",
"def plot_peaks(data, ti, msvt, tot, hst):\r\n plt.figure(figsize=(15,4)) #set size of figure\r\n plt.xlabel(\"Time (ms)\") #name x axis\r\n plt.ylabel(\"Angular velocity (deg/s)\") #name y axis\r\n plt.plot(ti, data) #plot data (angular velocity) vs time\r\n \r\n #finding nearest timepoint in angular velocity data to the peaks/throughs found\r\n #this is needed because the peaks/throughs don't perfectly match the angular velocity data further in the decimals\r\n #index of this point is taken and used for plotting\r\n t1 = []\r\n \r\n for value in msvt:\r\n q = find_nearest(ti, value) #apply function, q = timestamp of peak\r\n z = np.where(ti==q) #find index of this peak \r\n g = z[0] #take list with index value from tuple z\r\n t1.append(g[0]) #take value of index and append to t1\r\n \r\n t2 = []\r\n for value in tot:\r\n q = find_nearest(ti, value) #apply function, q = timestamp of peak\r\n z = np.where(ti==q) #find index of this peak \r\n g = z[0] #take list with index value from tuple z\r\n t2.append(g[0]) #take value of index and append to t1\r\n \r\n t3 = []\r\n for value in hst:\r\n q = find_nearest(ti, value) #apply function, q = timestamp of peak\r\n z = np.where(ti==q) #find index of this peak \r\n g = z[0] #take list with index value from tuple z\r\n t3.append(g[0]) #take value of index and append to t1\r\n \r\n \r\n plt.plot(msvt, data[t1], \"rx\", \r\n markeredgewidth=3, label='max swing velocity') #put an x on all the msv maxima\r\n plt.plot(tot, data[t2], \"go\", markeredgewidth=3, label='toe off') #put an o on all the toe off maxima\r\n plt.plot(hst, data[t3], \"m+\", markeredgewidth=3, label='heel strike') #put an + on all the heel strike maxima\r\n plt.legend()\r\n return",
"def _error_in_wavelength(detector_angle, return_rot):\n # First, calculate the normal beam direction for this detector angle\n # Detector is in the horizontal plane\n beam_wanted = np.array([sin(-detector_angle[0]), 0.0, cos(detector_angle[0])])\n # Use the utility function to get the rotation matrix and wavelength\n (rot, wavelength) = crystal_calc.get_sample_rotation_matrix_to_get_beam(beam_wanted, hkl, ub_matrix, starting_rot_matrix=None)\n # Error in wavelength\n error = np.abs(wavelength - wl)\n\n # increase error if you are off the range of the goniometer.\n diff_min = detector_angle - det_angle_limits[0]\n if diff_min < 0: error += np.abs(diff_min * 10)\n diff_max = detector_angle - det_angle_limits[1]\n if diff_max > 0: error += diff_max * 10\n\n #print \"For angle\", detector_angle, \" i find it at WL \", wavelength, \" giving an error of \", error\n\n if return_rot:\n return (rot, wavelength, error)\n else:\n return error",
"def update_lamps(self):\n\t\tpass",
"def add_pol_leakage_manual(self):\n\n if self.parang_corrected == False:\n # Compute P-Jones matrices\n self.pjones_mat = np.zeros((self.Nant,self.time_unique.shape[0],2,2),dtype=complex)\n self.djones_mat = np.zeros((self.Nant,self.time_unique.shape[0],2,2),dtype=complex)\n\n for ant in range(self.Nant):\n self.djones_mat[ant,:,0,0] = 1\n self.djones_mat[ant,:,0,1] = self.leakR_real[ant]+1j*self.leakR_imag[ant]\n self.djones_mat[ant,:,1,0] = self.leakL_real[ant]+1j*self.leakL_imag[ant]\n self.djones_mat[ant,:,1,1] = 1\n\n if self.mount[ant] == 'ALT-AZ':\n self.pjones_mat[ant,:,0,0] = np.exp(-1j*self.parallactic_angle[ant,:]) # INI: opposite of feed angle i.e. parang +/- elev\n self.pjones_mat[ant,:,0,1] = 0\n self.pjones_mat[ant,:,1,0] = 0\n self.pjones_mat[ant,:,1,1] = np.exp(1j*self.parallactic_angle[ant,:])\n elif self.mount[ant] == 'ALT-AZ+NASMYTH-L':\n self.pjones_mat[ant,:,0,0] = np.exp(-1j*(self.parallactic_angle[ant,:]-self.elevation_copy_dterms[ant,:]))\n self.pjones_mat[ant,:,0,1] = 0\n self.pjones_mat[ant,:,1,0] = 0\n self.pjones_mat[ant,:,1,1] = np.exp(1j*(self.parallactic_angle[ant,:]-self.elevation_copy_dterms[ant,:]))\n elif self.mount[ant] == 'ALT-AZ+NASMYTH-R':\n self.pjones_mat[ant,:,0,0] = np.exp(-1j*(self.parallactic_angle[ant,:]+self.elevation_copy_dterms[ant,:]))\n self.pjones_mat[ant,:,0,1] = 0\n self.pjones_mat[ant,:,1,0] = 0\n self.pjones_mat[ant,:,1,1] = np.exp(1j*(self.parallactic_angle[ant,:]+self.elevation_copy_dterms[ant,:]))\n \n data_reshaped = self.data.reshape((self.data.shape[0],self.data.shape[1],2,2))\n\n for a0 in range(self.Nant):\n for a1 in range(a0+1,self.Nant):\n bl_ind = self.baseline_dict[(a0,a1)]\n time_ind = 0\n for ind in bl_ind:\n data_reshaped[ind] = np.matmul(self.djones_mat[a0,time_ind], np.matmul(self.pjones_mat[a0,time_ind], np.matmul(data_reshaped[ind], \\\n np.matmul(np.conjugate(self.pjones_mat[a1,time_ind].T), np.conjugate(self.djones_mat[a1,time_ind].T)))))\n time_ind = time_ind + 1\n\n self.data = data_reshaped.reshape(self.data.shape) \n self.save_data()\n\n elif self.parang_corrected == True:\n # Add P-Jones corruptions (parallactic angle rotation) using meqtrees\n # add_pjones(self.output_column)\n\n # Construct station-based leakage matrices (D-Jones)\n #self.pol_leak_mat = np.zeros((self.Nant,2,2),dtype=complex) # To serve as both D_N and D_C\n self.pol_leak_mat = np.zeros((self.Nant,self.time_unique.shape[0],2,2),dtype=complex)\n #self.rotation_mat = np.zeros((self.Nant,self.time_unique.shape[0],2,2),dtype=complex) # To serve as Rot(theta=parang+/-elev)\n \n # Set up D = D_N = D_C, Rot(theta = parallactic_angle +/- elevation). Notation following Dodson 2005, 2007.\n for ant in range(self.Nant):\n if self.mount[ant] == 'ALT-AZ':\n self.pol_leak_mat[ant,:,0,0] = 1\n self.pol_leak_mat[ant,:,0,1] = (self.leakR_real[ant]+1j*self.leakR_imag[ant])*np.exp(1j*2*(self.parallactic_angle[ant,:]))\n self.pol_leak_mat[ant,:,1,0] = (self.leakL_real[ant]+1j*self.leakL_imag[ant])*np.exp(-1j*2*(self.parallactic_angle[ant,:]))\n self.pol_leak_mat[ant,:,1,1] = 1\n\n elif self.mount[ant] == 'ALT-AZ+NASMYTH-LEFT':\n self.pol_leak_mat[ant,:,0,0] = 1\n self.pol_leak_mat[ant,:,0,1] = (self.leakR_real[ant]+1j*self.leakR_imag[ant])*np.exp(1j*2*(self.parallactic_angle[ant,:]-self.elevation_copy_dterms[ant,:]))\n self.pol_leak_mat[ant,:,1,0] = (self.leakL_real[ant]+1j*self.leakL_imag[ant])*np.exp(-1j*2*(self.parallactic_angle[ant,:]-self.elevation_copy_dterms[ant,:]))\n self.pol_leak_mat[ant,:,1,1] = 1\n \n elif self.mount[ant] == 'ALT-AZ+NASMYTH-RIGHT':\n self.pol_leak_mat[ant,:,0,0] = 1\n self.pol_leak_mat[ant,:,0,1] = (self.leakR_real[ant]+1j*self.leakR_imag[ant])*np.exp(1j*2*(self.parallactic_angle[ant,:]+self.elevation_copy_dterms[ant,:]))\n self.pol_leak_mat[ant,:,1,0] = (self.leakL_real[ant]+1j*self.leakL_imag[ant])*np.exp(-1j*2*(self.parallactic_angle[ant,:]+self.elevation_copy_dterms[ant,:]))\n self.pol_leak_mat[ant,:,1,1] = 1\n\n # Save to external file as numpy array\n # np.save(II('$OUTDIR')+'/pol_leakage', self.pol_leak_mat)\n\n data_reshaped = self.data.reshape((self.data.shape[0],self.data.shape[1],2,2))\n\n for a0 in range(self.Nant):\n for a1 in range(a0+1,self.Nant):\n bl_ind = self.baseline_dict[(a0,a1)]\n time_ind = 0\n for ind in bl_ind:\n data_reshaped[ind] = np.matmul(self.pol_leak_mat[a0,time_ind], np.matmul(data_reshaped[ind], \\\n np.conjugate(self.pol_leak_mat[a1,time_ind].T)))\n time_ind = time_ind + 1\n \n self.data = data_reshaped.reshape(self.data.shape) \n self.save_data()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read in the bandpass info from an ASCII table, interpolate (spline) MS frequencies, and apply to data
|
def bandpass_correct(self):
info("Applying scalar B-Jones amplitudes")
# Read in the file
bjones_inp = np.loadtxt(self.bandpass_table,dtype=str)
self.bpass_input_freq = bjones_inp[0][1:].astype(np.float64)
self.bpass_input_freq *= 1e9 # convert from GHz to Hz
self.bjones_ampl = bjones_inp[1:,1:].astype(np.float64)
# Interpolate between the frequencies given in the bandpass table
if self.bpass_input_freq[0] > self.chan_freq[0] or self.bpass_input_freq[-1] < self.chan_freq[-1]:
warn("Input frequencies out of range of MS frequencies. Extrapolating in some places.")
bjones_interpolated=np.zeros((self.Nant,self.chan_freq.shape[0]))
for ant in range(self.Nant):
spl = ius(self.bpass_input_freq, self.bjones_ampl[ant],k=self.bandpass_freq_interp_order)
bjones_interpolated[ant] = spl(self.chan_freq)
# apply the B-Jones terms by iterating over baselines
for a0 in range(self.Nant):
for a1 in range(a0+1,self.Nant):
for msfreq_ind in range(self.chan_freq.shape[0]):
bl_ind = self.baseline_dict[(a0,a1)]
self.data[bl_ind,msfreq_ind,:] *= bjones_interpolated[a0,msfreq_ind] * bjones_interpolated[a1,msfreq_ind]
self.save_data()
### plot bandpasses
|
[
"def read_esa_predicts(filename,frequency_hz=401.585625e6):\n\n with open(filename) as f:\n lines = f.readlines()\n\n data_start=False\n lineskip=0\n\n m={}\n k=0\n c=299792458; # Speed of light\n\n for line in lines:\n if not data_start and line.find('KM') > 0:\n data_start=True\n # Dict for data to go into\n n=len(lines)-lineskip-1\n logging.debug(\"Reserving for %d items\"%(n))\n m['timestr']=np.zeros([n],dtype='|S23')\n m['doppler_hz']=np.zeros([n],dtype=float)\n m['doppler_rate_hz_s']=np.zeros([n],dtype=float)\n elif data_start:\n d=line.split()\n if len(d) == 11:\n timestr=d[0].replace('/','-')\n timestr+='T'\n timestr+=d[1]\n timestr+=d[2]\n range_m = float(d[3])*1000;\n range_rate_m_s = float(d[4])*1000;\n range_rate_rate_m_s_s = float(d[5])\n elif len(d) == 10:\n timestr=d[0].replace('/','-')\n timestr+='T'\n timestr+=d[1]\n range_m = float(d[2])*1000;\n range_rate_m_s = float(d[3])*1000;\n range_rate_rate_m_s_s = float(d[4])\n else:\n logging.error(\"Line not understood %s\"%(d))\n sys.exit(-1)\n\n m['timestr'][k] = timestr\n m['doppler_hz'][k] = -range_rate_m_s/c*frequency_hz\n m['doppler_rate_hz_s'][k] = -range_rate_rate_m_s_s/c*frequency_hz\n k+=1\n else:\n lineskip+=1\n continue\n\n # Convert time string to timestamp\n m['utc']=Time(m['timestr'])\n\n logging.debug('Done reading doppler file %s'%(filename))\n logging.info('Predicts filename : %s'%(filename))\n logging.info(' Start time : %s'%(m['utc'][0]))\n logging.info(' End time : %s'%(m['utc'][-1]))\n logging.info(' Duration : %s days'%(m['utc'][-1]-m['utc'][0]))\n logging.info(' Start model : %0.1f Hz, %0.3f Hz/s'\\\n %(m['doppler_hz'][0],m['doppler_rate_hz_s'][0]))\n logging.info(' End model : %0.1f Hz, %0.3f Hz/s'\\\n %(m['doppler_hz'][-1],m['doppler_rate_hz_s'][-1]))\n\n return m",
"def extracted_in(in_file, meta_dict, print_iterator, test):\n\n ## Open the fits file and load the data\n try:\n fits_hdu = fits.open(in_file)\n except IOError:\n print(\"\\tERROR: File does not exist: %s\" % in_file)\n exit()\n\n data = fits_hdu[1].data\n fits_hdu.close()\n\n ## Initializations\n whole_lc = psd_lc.Lightcurve(n_bins=meta_dict['n_bins'])\n n_seg = 0\n exposure = 0\n dt_whole = np.array([])\n df_whole = np.array([])\n i = 0 # start of bin index to make segment of data for inner for-loop\n j = meta_dict['n_bins'] # end of bin index to make segment of data for\n # inner for-loop\n\n ## Loop through segments of the data\n while j <= len(data.field(1)): ## while we haven't reached the end of the\n ## file\n\n n_seg += 1\n start_time = data.field(0)[i]\n end_time = data.field(0)[j-1]\n\n ## Extract the second column of 'data' and assigns it to 'rate'.\n rate = data[i:j].field(1)\n\n power_segment, mean_rate_segment = make_ps(rate)\n assert int(len(power_segment)) == meta_dict['n_bins'], \"ERROR: \"\\\n \"Something went wrong in make_ps. Length of power spectrum\"\\\n \" segment != n_bins.\"\n\n dt_seg = (end_time - start_time) / float(meta_dict['n_bins'])\n df_seg = 1.0 / (meta_dict['n_bins'] * dt_seg)\n\n ## Compute variance and rms of the positive-frequency power in the\n ## reference band. Only keep segments where the variance > 0.\n absrms_pow = raw_to_absrms(power_segment[0:meta_dict['n_bins']/2+1],\n mean_rate_segment, meta_dict['n_bins'], dt_seg, noisy=True)\n\n var, rms = var_and_rms(absrms_pow, df_seg)\n\n if var >= 0.0:\n whole_lc.power += power_segment\n whole_lc.mean_rate += mean_rate_segment\n\n exposure += end_time - start_time\n\n dt_whole = np.append(dt_whole, dt_seg)\n df_whole = np.append(df_whole, df_seg)\n\n if n_seg % print_iterator == 0:\n print(\"\\t\", n_seg)\n\n if (test == True) and (n_seg == 1): # For testing\n break\n\n ## Clear loop variables for the next round\n rate = None\n power_segment = None\n mean_rate_segment = None\n\n ## Increment the counters and indices\n i = j\n j += meta_dict['n_bins']\n ## Since the for-loop goes from i to j-1 (since that's how the range\n ## function works) it's ok that we set i=j here for the next round.\n ## This will not cause double-counting rows or skipping rows.\n\n return whole_lc, n_seg, exposure, dt_whole, df_whole",
"def measureLines(self):\n #create a dictionary for all the corresponding wavelengths of the absorption features\n indexDict = {}\n #list the indices for each important absorption feature: numlo, numhi, denomlo, denomhi \n # THESE ARE ALL IN VACUUM and angstroms!!\n indexDict['CaK'] = [3924.8111, 3944.8163, 3944.8163, 3954.8189]\n indexDict['Cadel'] = [4087.8536, 4117.8618, 4137.8670, 4177.1771]\n indexDict['CaI4217'] = [4217.8880, 4237.8932, 4237.8932, 4257.1981]\n indexDict['Gband'] = [4286.2057, 4316.2136, 4261.1992, 4286.2057]\n indexDict['Hgam'] = [4333.7182, 4348.7222, 4356.2242, 4371.2281] \n indexDict['FeI4383'] = [4379.8305, 4389.8331, 4356.2242, 4371.2281]\n indexDict['FeI4404'] = [4401.0358, 4411.0384, 4416.0397, 4426.0423]\n indexDict['blue'] = [6101.6887, 6301.7424, 4501.2624, 4701.3153]\n indexDict['Hbeta'] = [4848.3542, 4878.3622, 4818.3463, 4848.3542]\n indexDict['MgI'] = [5154.1357, 5194.1463, 5101.4214, 5151.4348]\n indexDict['NaD'] = [5881.6297, 5906.6364, 5911.6378, 5936.6445]\n indexDict['CaI6162'] = [6151.7021, 6176.7088, 6121.6941, 6146.7008]\n indexDict['Halpha'] = [6549.8090, 6579.8171, 6584.8184, 6614.8265]\n indexDict['CaH3'] = [6961.9198, 6991.9279, 7043.9419, 7047.9430]\n indexDict['TiO5'] = [7127.9646, 7136.9670, 7043.9419, 7047.9430]\n indexDict['VO7434'] = [7432.0465, 7472.0573, 7552.0789, 7572.0843]\n indexDict['VO7445'] = [7352.0249, 7402.0384, 0.56250000, 7512.0681, 7562.0816, 0.43750000, 7422.0438, 7472.0573]\n indexDict['VO-B'] = [7862.1626, 7882.1680, 0.50000000, 8082.2220, 8102.2274, 0.50000000, 7962.1896, 8002.2004]\n indexDict['VO7912'] = [7902.1734, 7982.1950, 8102.2274, 8152.2409]\n indexDict['Rb-B'] = [7924.7796, 7934.7823, 0.50000000, 7964.7904, 7974.7931, 0.50000000, 7944.7850, 7954.7877]\n indexDict['NaI'] = [8179.2482, 8203.2547, 8153.2412, 8177.2477]\n indexDict['TiO8'] = [8402.3085, 8417.3125, 8457.3233, 8472.3274]\n indexDict['TiO8440'] = [8442.3193, 8472.3274, 8402.3085, 8422.3139]\n indexDict['Cs-A'] = [8498.4341, 8508.4368, 0.50000000, 8538.4449, 8548.4476, 0.50000000, 8518.4395, 8528.4422]\n indexDict['CaII8498'] = [8485.3309, 8515.3390, 8515.3390, 8545.3471] \n indexDict['CrH-A'] = [8582.3571, 8602.3626, 8623.3682, 8643.3736]\n indexDict['CaII8662'] = [8652.3761, 8677.3828, 8627.3693, 8652.3761]\n indexDict['FeI8689'] = [8686.3853, 8696.3880, 8666.3799, 8676.3826]\n indexDict['color-1'] = [8902.4437, 9102.4979, 7352.0249, 7552.0789]\n indexDict['another_color'] = [7352.0249, 7552.0789, 6101.6887, 6301.7424]\n \n for index in indexDict:\n #check if we should use the single or mutliple region version\n if len(index) == 4: \n numeratorIndex = np.where( self._wavelength > index[0] and self._wavelength < index[1])\n denominatorIndex = np.where( self._wavelength > index[2] and self._wavelength < index[3])\n \n \n elif len(index) == 8: \n \n \n \n \n print('Not implemented')\n\n return lineIndices",
"def interpolated_freq(self, freq):\n i = int(np.round(freq/self.df))\n iinitial = i - 10 if i > 10 else 0\n ifinal = i + 10 if len(self.freq) - i < 10 else len(self.freq)\n return scipy.interpolate.InterpolatedUnivariateSpline(self.freq[iinitial:ifinal], np.abs(self.fdata[iinitial:ifinal]))(freq)",
"def data_extraction(dhdl, freq):\n\n f = open(dhdl,'r')\n lines = f.readlines()\n f.close()\n\n if freq is None:\n freq = 1\n\n state, time = [], [] # units of time: ps\n i = 0 # line number (excluding metatexts)\n for l in lines:\n if l[0] != '#' and l[0] != '@':\n i += 1\n if i % freq == 0:\n time.append(float(l.split()[0]))\n state.append(int(float(l.split()[1])))\n state = np.array(state)\n time = np.array(time) / 1000 # units: ns\n\n return time, state",
"def read_data(pna, data, points, outputfile, power):\n # date = time.strftime('%Y-%m-%d', time.localtime())\n path1 = os.getcwd() + '/save/' + f'/{time.strftime(\"%Y-%m-%d\")}/'\n if not os.path.exists(path1):\n os.mkdir(path1)\n\n # read in frequency\n freq = np.linspace(float(pna.query('SENSe1:FREQuency:START?')), float(pna.query('SENSe1:FREQuency:STOP?')), points)\n\n # read in phase\n if data == 'mag':\n pna.write('CALCulate1:FORMat PHASe')\n else:\n pna.write('CALCulate1:FORMat REAL')\n re = pna.query_ascii_values('CALCulate1:DATA? FDATA', container=np.array)\n\n # read in mag\n if data == 'mag':\n pna.write('CALCulate1:FORMat MLOG')\n else:\n pna.write('CALCulate1:FORMat IMAGinary')\n im = pna.query_ascii_values('CALCulate1:DATA? FDATA', container=np.array)\n\n # open output file and put data points into the file\n file1 = path1 + outputfile[0:-4] + '_' + str(power) + 'dB' + time.strftime('%H-%M-%S', time.localtime()) + '.csv'\n file = open(file1, \"a\")\n count = 0\n\n for i in freq:\n file.write(str(re[count]) + ' ' + str(im[count]) + '\\n')\n count = count + 1\n file.close()\n\n return file1",
"def compute_gsm_splines(self):\n\n print \"computing GSM for Pol %s\" % self.pol\n drift_data, drift_lsts, drift_freqs = self.data, self.lsts, self.freqs\n \n # Extend to full 24 hours then form interpolation spline\n nd = np.zeros((13, 145))\n nd[:, :144] = drift_data\n nd[:, 144] = drift_data[:, 0]\n drift_lsts = np.append(drift_lsts, drift_lsts[0]+24)\n drift_lsts[0] = 0.0\n drift_data = nd\n \n fits = [ [] for ii in range(self.npol + 1)]\n #print fits\n for ii in range(len(drift_lsts)):\n\n fit = self.curve_fit(self.freqs, drift_data[:, ii])\n #if not ii%10:\n #print fit\n \n for jj in range(len(fit)):\n fits[jj].append(fit[jj])\n \n self.gsm_pols = np.array(fits)\n \n self.gsm_spline = []\n for kk in range(self.gsm_pols.shape[0]):\n self.gsm_spline.append(interpolate.interp1d(drift_lsts, self.gsm_pols[kk, :], kind='cubic'))",
"def read(self, poleFile, fbFile):\n text = open(poleFile, 'rU')\n line = text.readline()\n while line.split(':')[0] != 'operation lpc_poles':\n line = text.readline()\n text.readline() \n self.__nx = int(text.readline().rstrip().split(' ')[1])\n self.__dx = 1 / float(text.readline().rstrip().split(' ')[1])\n self.__x1 = float(text.readline().rstrip().split(' ')[1])\n # this line contains the date of the measurement\n text.readline()\n for i in range(self.__nx):\n line = text.readline()\n # replace multiple spaces with a single space\n line = pat1.sub(' ', line)\n # delete the space at the beginning of the line\n line = pat2.sub('', line)\n values = line.strip().split(' ')\n n_poles = (int(values[0]) - 2) / 2\n P = [float(x) for x in values[3:3+n_poles]]\n PB = [float(x) for x in values[3+n_poles:]]\n self.__poles.append(P)\n self.__pole_bandwidths.append(PB)\n text.close()\n\n p = os.popen('fea_print ' + STYLE_FILE + ' ' + fbFile)\n lines = p.readlines()\n if len(lines) < self.__nx:\n print filename\n print \"ERROR: number of samples from .pole file (%d) not equal to output of fea_print (%s)\" % (self.__nx, len(lines))\n sys.exit()\n for i in range(self.__nx):\n time = i * self.__dx + self.__x1\n F = []\n B = []\n fields = lines[i].rstrip('\\n').split('\\t')\n # for now, it's hardcoded into STYLE_FILE that we're asking ESPS to extract three formants\n F.append(int(fields[0]))\n F.append(int(fields[1]))\n F.append(int(fields[2]))\n B.append(int(fields[3]))\n B.append(int(fields[4]))\n B.append(int(fields[5]))\n self.__times.append(time)\n self.__formants.append(F)\n self.__bandwidths.append(B)",
"def test_2d_freq_tp():\n dic, data = ng.pipe.read(NMRPIPE_2D_FREQ_TP)\n assert data.shape == (8, 2)\n assert data.dtype == 'float32'\n assert data[0, 0] == 1.\n assert data[0, 1] == 1.\n assert data[1, 0] == 2.\n check_simple_roundtrip(dic, data)\n check_ppm_limits(dic, data, 0, [54.70, -32.80])\n check_ppm_limits(dic, data, 1, [179.00, 99.00])",
"def treat_channel_data(index):\n ch = traces[index-1]\n freq = self.format_and_eval_string(getattr(self,\n 'freq_%d' % index))*1e6\n\n # Remove points that do not belong to a full period.\n samples_per_period = int(sampling_rate/freq)\n samples_per_trace = int(ch.shape[-1])\n if (samples_per_trace % samples_per_period) != 0:\n extra = samples_per_trace % samples_per_period\n ch = ch.T[:-extra].T\n\n if not avg_bef_demod:\n ntraces, nsamples = np.shape(ch)\n ch = ch.reshape(int(ntraces/num_loop), num_loop, nsamples)\n else:\n nsamples = np.shape(ch)[0]\n phi = np.linspace(0, 2*np.pi*freq*((nsamples-1)*2e-9), nsamples)\n cosin = np.cos(phi)\n sinus = np.sin(phi)\n # The mean value of cos^2 is 0.5 hence the factor 2 to get the\n # amplitude.\n if not avg_bef_demod:\n ch_i = 2*np.mean(ch*cosin, axis=2)\n ch_q = 2*np.mean(ch*sinus, axis=2)\n ch_i_av = ch_i.T[0] if not avg_aft_demod else np.mean(ch_i,\n axis=0)\n ch_q_av = ch_q.T[0] if not avg_aft_demod else np.mean(ch_q,\n axis=0)\n else:\n ch_i = None\n ch_q = None\n ch_i_av = 2*np.mean(ch*cosin)\n ch_q_av = 2*np.mean(ch*sinus)\n self.write_in_database('Ch%d_I' % index, ch_i_av)\n self.write_in_database('Ch%d_Q' % index, ch_q_av)\n\n if getattr(self, 'ch%d_trace' % index):\n ch_av = ch if not avg_aft_demod else np.mean(ch, axis=0)\n self.write_in_database('Ch%d_trace' % index, ch_av)\n\n return freq, cosin, sinus, ch_i, ch_q",
"def test_4d_freq_stream():\n dic, data = ng.pipe.read_lowmem(NMRPIPE_4D_FREQ_STREAM)\n assert data.shape == (2, 3, 4, 5)\n assert data.dtype == 'float32'\n assert data[0, 0, 0, 0] == 1.\n assert data[0, 0, 0, 1] == 1.\n assert data[0, 0, 1, 0] == 1.\n assert data[0, 1, 0, 0] == 1.\n assert data[1, 0, 0, 0] == 2.\n check_ppm_limits(dic, data, 0, [180.00, 80.00])\n check_ppm_limits(dic, data, 1, [186.67, 53.33])\n check_ppm_limits(dic, data, 2, [179.00, 59.00])\n check_ppm_limits(dic, data, 3, [44.70, -35.30])\n check_simple_roundtrip(dic, data, lowmem=True)",
"def _interpolateFourierData(self) -> np.ndarray:\n res = np.empty(\n (\n *self._arrInputFourierData.shape[:2],\n self._operatingFrequencies.shape[0],\n *self._arrInputFourierData.shape[3:],\n ),\n dtype=cDtype,\n )\n\n # evaluate the spline\n for ii3 in range(res.shape[3]):\n for ii4 in range(res.shape[4]):\n for ii1 in range(res.shape[1]):\n for ii0 in range(res.shape[0]):\n # this might trigger spline generation, since\n # self.frequencySplines is a cacheable property.\n res[ii0, ii1, :, ii3, ii4] = np.array(\n self.frequencySplines[ii0, ii1, 0, ii3, ii4](\n self._operatingFrequencies\n )\n ) + 1j * np.array(\n self.frequencySplines[ii0, ii1, 1, ii3, ii4](\n self._operatingFrequencies\n )\n )\n return res",
"def Interpolate(x, y, data):\n\n widthToUse = data.shape[1]\n heightToUse = data.shape[0]\n\n ix=numba.int32(x)\n iy=numba.int32(y)\n\n xIndex = np.zeros((4,), dtype=numba.int32)\n yIndex = np.zeros((4,), dtype=numba.int32)\n\n# Set X indexes\n# p is the index of the rightmost influencing spline\n p = (ix + 2) if (0.0 <= x) else (ix + 1)\n for k in range(4):\n xIndex[k] = -1 if (p<0 or p>=widthToUse) else p\n p -= 1\n\n# Set Y indexes\n p = (iy + 2) if (0.0 <= y) else (iy + 1)\n for k in range(4):\n yIndex[k] = -1 if (p<0 or p>=heightToUse) else p\n p -= 1\n\n\n# Compute how much the sample depart from an integer position\n# [ conditional because int rounds down for positive numbers and up for negative numbers ]\n\n ex = x - ((ix) if (0.0 <= x) else (ix - 1))\n ey = y - ((iy) if (0.0 <= y) else (iy - 1))\n\n xWeight = np.zeros((4,), dtype=numba.float64)\n yWeight = np.zeros((4,), dtype=numba.float64)\n\n\n# Set X weights for the image and derivative interpolation\n for (weight, e) in [(xWeight, ex), (yWeight, ey)]:\n s = 1.0 - e\n weight[0] = 0.5 * e * e * e / 3.0 \n weight[1] = 2.0 / 3.0 - (2.0 - s) * 0.5 * s * s\n weight[2] = 2.0 / 3.0 - (2.0 - e) * 0.5 * e * e \n weight[3] = 0.5 * s * s * s / 3.0 \n\n\n\n ival = 0.0\n for j in range(4):\n s = 0.0\n iy=yIndex[j]\n if iy != -1:\n for i in range(4):\n ix=xIndex[i]\n if ix!=-1:\n s += xWeight[i]*data[iy][ix]\n ival+=yWeight[j] * s\n return ival",
"def __apply_passband(self,order,passband_tab):\n\n # Apply grism sensitibity to filter... i.e. use filter as wavelength basis\n fs = interp1d_picklable(self.SENS_data[order][0],self.SENS_data[order][1],bounds_error=False,fill_value=0.)\n\n xs = []\n ys = []\n overlap = 0\n for i,l in enumerate(np.array(passband_tab[\"col1\"])):\n xs.append(l)\n ys.append(passband_tab[\"col2\"][i] * fs(l))\n if fs(l)>0:\n overlap = 1\n if overlap==0:\n print \"Sensitivity and filter passband do not ovelap. Check units...\"\n \n self.SENS_data[order][1] = np.asarray(ys)\n self.SENS_data[order][0] = np.asarray(xs)\n\n self.SENS[order] = interp1d_picklable(self.SENS_data[order][0],self.SENS_data[order][1],bounds_error=False,fill_value=0.)\n\n return",
"def test_1d_freq():\n dic, data = ng.pipe.read(NMRPIPE_1D_FREQ)\n assert data.shape == (16, )\n assert data.dtype == 'float32'\n assert data[0].real == 1.\n assert data[1].real == 2.\n check_simple_roundtrip(dic, data)\n check_ppm_limits(dic, data, 0, [149.00, 55.25])",
"def test_2d_freq():\n dic, data = ng.pipe.read(NMRPIPE_2D_FREQ)\n assert data.shape == (2, 8)\n assert data.dtype == 'float32'\n assert data[0, 0] == 1.\n assert data[0, 1] == 2.\n assert data[1, 0] == 1.\n check_simple_roundtrip(dic, data)\n check_ppm_limits(dic, data, 0, [179.00, 99.00])\n check_ppm_limits(dic, data, 1, [54.70, -32.80])",
"def get_spectrum(day):\n # First, get *all* of the data\n tel, freq, days, flux, eflux_form, eflux_sys = get_data_all()\n print(day)\n # Don't generate eflux_tot here,\n # because the right thing to do depends on the telescope\n\n # The way we interpolate depends on the telescope.\n # For ATCA data, you can assume that the data is rising as a power law\n # in log-log space.\n # The frequencies are 5.5, 9, 16.7, 21.2, and 34 GHz.\n # ignore upper limits\n choose_tel = np.logical_and(tel == 'ATCA', eflux_form > 0)\n ufreq = np.unique(freq[choose_tel])\n nus = [] # which frequencies to keep\n ms = [] # power-law fit\n bs = [] # power-law fit\n \n for val in ufreq:\n # fit a power law in log-log space\n choose = np.logical_and(choose_tel, freq==val)\n # no upper limits\n\n # only bother if there's >1 point at this frequency\n if sum(choose) > 1:\n nus.append(val)\n # if you allow weights, then the 34 GHz points look really weird\n m,b = np.polyfit(\n np.log10(days[choose]), np.log10(flux[choose]), deg=1)#,\n #w=1/eflux_tot[choose]**2)\n ms.append(m)\n bs.append(b)\n \n # Now, interpolate for that day to get the spectrum\n spec = []\n for ii,val in enumerate(nus):\n # Get light curve for that day\n spec.append(10**(ms[ii] * np.log10(day) + bs[ii]))\n\n # OK, \"nus\" and \"spec\" have been updated for the ATCA band!\n\n # Next: SMA.\n # This is much more complicated, since the emission is not self-absorbed\n # and the frequencies are not consistent.\n \n # I think the right thing to do is to interpolate the data\n # for each frequency...\n\n # The frequencies with by far the most data are 215.5 GHz and 231.5 GHz\n # And the one with definitively the most data is 231.5\n\n # So I think the right thing to do is show the 231.5 GHz light curve.\n # For every day that wasn't observed with the 231.5 GHz receiver,\n # interpolate the spectrum if you can\n\n # the higher frequencies are more difficult.\n # on each day, there are at least two measurements\n # that span 345 GHz.\n # so, on each day interpolate to estimate the flux at 345 GHz.\n tel, freq, days, flux, eflux_form, eflux_sys = get_data_all()\n eflux_tot = np.sqrt(eflux_form**2 + eflux_sys**2)\n\n choose_tel = tel == 'SMA' \n d = [] # day\n f_lo = [] # interpolated flux at 231.5 GHz\n f_hi = [] # interpolated flux at 345 GHz\n uday = np.unique(days)\n\n for ii,val in enumerate(uday):\n choose = np.logical_and(choose_tel, days==val)\n success = False # at least one point added\n\n # lower bands\n keep_flux = flux[choose][freq[choose] == 231.5]\n if len(keep_flux) == 1:\n # Then you can save the value directly\n success = True\n f_lo.append(keep_flux[0])\n elif len(keep_flux) > 1:\n print(\"more than one point on a given day?\")\n\n else:\n # Then you can try to interpolate\n try:\n xfit = freq[choose]\n yfit = flux[choose]\n order = np.argsort(xfit)\n out = np.interp(231.5, xfit[order], yfit[order])\n f_lo.append(out)\n success = True\n except:\n # don't extrapolate\n pass\n\n # upper bands\n keep_flux = flux[choose][freq[choose] == 345]\n if len(keep_flux) == 1:\n # Then you can save the value directly\n f_hi.append(keep_flux[0])\n success = True\n else:\n # Then you can try to interpolate\n try:\n xfit = freq[choose]\n yfit = flux[choose]\n order = np.argsort(xfit)\n out = np.interp(345, xfit[order], yfit[order])\n f_hi.append(out)\n success = True\n except:\n # don't extrapolate\n pass\n if success:\n d.append(val)\n\n # Now, interpolate for that day to get the two-point spectrum\n nus.append(231.5)\n d = np.array(d)\n f_lo = np.array(f_lo)\n f_hi = np.array(f_hi)\n order = np.argsort(d)\n spec.append(np.interp(day, d[order], f_lo[order]))\n nus.append(345)\n spec.append(np.interp(day, d[order], f_hi[order]))\n\n # Finally: ALMA\n # For any ALMA points, you should just return the value on the day\n # if it exists.\n # Nothing to interpolate.\n choose = np.logical_and(days == day, tel == 'ALMA')\n for ii,nuval in enumerate(freq[choose]):\n nus.append(nuval)\n spec.append(flux[choose][ii])\n\n return np.array(nus), np.array(spec)",
"def parsepcalfile(infile,band_tone_sel=()):\n\n pcalvalues = {}\n pcalids = []\n times = numpy.zeros(0)\n\n for line in infile:\n line = line.split()\n\n if (difxVersion == 240) and line[0]=='#':\n continue\n\n # Decode the information in the current line\n if (difxVersion == 240):\n # line = ['KY', '57092.6388948', '0.0000119', '1', '8', '16', <pcal data>]\n station = line[0]\n mjd = float(line[1])\n tint = float(line[2])*86400.0 \n dstream = max(int(line[3]), 1)\n nsubband = max(int(line[4]), 1)\n ntones = int(line[5])\n\n # line = ..., '21997' 'R' '-2.03274e-05' '9.69250e-05', ...]\n tone = line[6:]\n vals_per_tone = 4 # freq pol re im\n\n times = numpy.append(times, [mjd])\n if len(band_tone_sel)==0:\n selected = [(b,t) for b in range(nsubband) for t in range(ntones)]\n else:\n selected = band_tone_sel\n\n # Pick selected tones from current PCal line\n npol = 1 # in DiFX post-2.4.0 trunk, dstream# replaces npol in the text file... \n for pol in range(npol):\n for (band,tonenr) in selected:\n if (tonenr >= ntones) or (band >= nsubband):\n continue\n i = vals_per_tone * (pol*(nsubband/npol)*ntones + band*ntones + tonenr)\n pc = tone[i:(i+vals_per_tone)]\n if (pc[0] == '-1') or (pc[0] == '0'):\n continue\n\n id = pc[0] + pc[1] # + ' tone ' + str(tonenr)\n if not(id in pcalvalues):\n pcalvalues[id] = numpy.zeros(0)\n pcalids.append(id)\n print ('New band added: %s' % (id))\n pcalvalues[id] = numpy.append(pcalvalues[id], [float(pc[2]) + 1j*float(pc[3])])\n\n pcaldata = (pcalvalues,pcalids,times,tint)\n return pcaldata",
"def EStokTP_freqs(lines): \n lines = lines.strip('\\n')\n lines = lines.split('[1/cm]')[1].split('Zero')[0] \n lines = lines.split()\n nfreqs = lines[0]\n freqs = lines[1:]\n freqs = np.array(map(float, freqs))\n freqs = np.sort(freqs)[::-1]\n return freqs.tolist()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add constant stationbased polarization leakage (DJones term)
|
def add_pol_leakage_manual(self):
if self.parang_corrected == False:
# Compute P-Jones matrices
self.pjones_mat = np.zeros((self.Nant,self.time_unique.shape[0],2,2),dtype=complex)
self.djones_mat = np.zeros((self.Nant,self.time_unique.shape[0],2,2),dtype=complex)
for ant in range(self.Nant):
self.djones_mat[ant,:,0,0] = 1
self.djones_mat[ant,:,0,1] = self.leakR_real[ant]+1j*self.leakR_imag[ant]
self.djones_mat[ant,:,1,0] = self.leakL_real[ant]+1j*self.leakL_imag[ant]
self.djones_mat[ant,:,1,1] = 1
if self.mount[ant] == 'ALT-AZ':
self.pjones_mat[ant,:,0,0] = np.exp(-1j*self.parallactic_angle[ant,:]) # INI: opposite of feed angle i.e. parang +/- elev
self.pjones_mat[ant,:,0,1] = 0
self.pjones_mat[ant,:,1,0] = 0
self.pjones_mat[ant,:,1,1] = np.exp(1j*self.parallactic_angle[ant,:])
elif self.mount[ant] == 'ALT-AZ+NASMYTH-L':
self.pjones_mat[ant,:,0,0] = np.exp(-1j*(self.parallactic_angle[ant,:]-self.elevation_copy_dterms[ant,:]))
self.pjones_mat[ant,:,0,1] = 0
self.pjones_mat[ant,:,1,0] = 0
self.pjones_mat[ant,:,1,1] = np.exp(1j*(self.parallactic_angle[ant,:]-self.elevation_copy_dterms[ant,:]))
elif self.mount[ant] == 'ALT-AZ+NASMYTH-R':
self.pjones_mat[ant,:,0,0] = np.exp(-1j*(self.parallactic_angle[ant,:]+self.elevation_copy_dterms[ant,:]))
self.pjones_mat[ant,:,0,1] = 0
self.pjones_mat[ant,:,1,0] = 0
self.pjones_mat[ant,:,1,1] = np.exp(1j*(self.parallactic_angle[ant,:]+self.elevation_copy_dterms[ant,:]))
data_reshaped = self.data.reshape((self.data.shape[0],self.data.shape[1],2,2))
for a0 in range(self.Nant):
for a1 in range(a0+1,self.Nant):
bl_ind = self.baseline_dict[(a0,a1)]
time_ind = 0
for ind in bl_ind:
data_reshaped[ind] = np.matmul(self.djones_mat[a0,time_ind], np.matmul(self.pjones_mat[a0,time_ind], np.matmul(data_reshaped[ind], \
np.matmul(np.conjugate(self.pjones_mat[a1,time_ind].T), np.conjugate(self.djones_mat[a1,time_ind].T)))))
time_ind = time_ind + 1
self.data = data_reshaped.reshape(self.data.shape)
self.save_data()
elif self.parang_corrected == True:
# Add P-Jones corruptions (parallactic angle rotation) using meqtrees
# add_pjones(self.output_column)
# Construct station-based leakage matrices (D-Jones)
#self.pol_leak_mat = np.zeros((self.Nant,2,2),dtype=complex) # To serve as both D_N and D_C
self.pol_leak_mat = np.zeros((self.Nant,self.time_unique.shape[0],2,2),dtype=complex)
#self.rotation_mat = np.zeros((self.Nant,self.time_unique.shape[0],2,2),dtype=complex) # To serve as Rot(theta=parang+/-elev)
# Set up D = D_N = D_C, Rot(theta = parallactic_angle +/- elevation). Notation following Dodson 2005, 2007.
for ant in range(self.Nant):
if self.mount[ant] == 'ALT-AZ':
self.pol_leak_mat[ant,:,0,0] = 1
self.pol_leak_mat[ant,:,0,1] = (self.leakR_real[ant]+1j*self.leakR_imag[ant])*np.exp(1j*2*(self.parallactic_angle[ant,:]))
self.pol_leak_mat[ant,:,1,0] = (self.leakL_real[ant]+1j*self.leakL_imag[ant])*np.exp(-1j*2*(self.parallactic_angle[ant,:]))
self.pol_leak_mat[ant,:,1,1] = 1
elif self.mount[ant] == 'ALT-AZ+NASMYTH-LEFT':
self.pol_leak_mat[ant,:,0,0] = 1
self.pol_leak_mat[ant,:,0,1] = (self.leakR_real[ant]+1j*self.leakR_imag[ant])*np.exp(1j*2*(self.parallactic_angle[ant,:]-self.elevation_copy_dterms[ant,:]))
self.pol_leak_mat[ant,:,1,0] = (self.leakL_real[ant]+1j*self.leakL_imag[ant])*np.exp(-1j*2*(self.parallactic_angle[ant,:]-self.elevation_copy_dterms[ant,:]))
self.pol_leak_mat[ant,:,1,1] = 1
elif self.mount[ant] == 'ALT-AZ+NASMYTH-RIGHT':
self.pol_leak_mat[ant,:,0,0] = 1
self.pol_leak_mat[ant,:,0,1] = (self.leakR_real[ant]+1j*self.leakR_imag[ant])*np.exp(1j*2*(self.parallactic_angle[ant,:]+self.elevation_copy_dterms[ant,:]))
self.pol_leak_mat[ant,:,1,0] = (self.leakL_real[ant]+1j*self.leakL_imag[ant])*np.exp(-1j*2*(self.parallactic_angle[ant,:]+self.elevation_copy_dterms[ant,:]))
self.pol_leak_mat[ant,:,1,1] = 1
# Save to external file as numpy array
# np.save(II('$OUTDIR')+'/pol_leakage', self.pol_leak_mat)
data_reshaped = self.data.reshape((self.data.shape[0],self.data.shape[1],2,2))
for a0 in range(self.Nant):
for a1 in range(a0+1,self.Nant):
bl_ind = self.baseline_dict[(a0,a1)]
time_ind = 0
for ind in bl_ind:
data_reshaped[ind] = np.matmul(self.pol_leak_mat[a0,time_ind], np.matmul(data_reshaped[ind], \
np.conjugate(self.pol_leak_mat[a1,time_ind].T)))
time_ind = time_ind + 1
self.data = data_reshaped.reshape(self.data.shape)
self.save_data()
|
[
"def ode_rhs(self):\n\n #: Bandpass l_ce\n #b, a = signal.butter(2, 50, 'low', analog=True)\n #l_ce_filt = signal.lfilter(b, a, self._l_ce.sym)\n\n l_ce_tol = cas.fmax(self._l_ce.sym, 0.0)\n _stim = cas.fmax(0.01, cas.fmin(self._stim.sym, 1.))\n\n #: Algrebaic Equation\n l_mtc = self._l_slack.val + self._l_opt.val + self._delta_length.sym\n l_se = l_mtc - l_ce_tol\n\n #: Muscle Acitvation Dynamics\n self._dA.sym = (\n _stim - self._activation.sym)/GeyerMuscle.tau_act\n\n #: Muscle Dynamics\n #: Series Force\n _f_se = (self._f_max.val * (\n (l_se - self._l_slack.val) / (\n self._l_slack.val * self.e_ref))**2) * (\n l_se > self._l_slack.val)\n\n #: Muscle Belly Force\n _f_be_cond = self._l_opt.val * (1.0 - self.w)\n\n _f_be = (\n (self._f_max.val * (\n (l_ce_tol - self._l_opt.val * (1.0 - self.w)) / (\n self._l_opt.val * self.w / 2.0))**2)) * (\n l_ce_tol <= _f_be_cond)\n\n #: Force-Length Relationship\n val = cas.fabs(\n (l_ce_tol - self._l_opt.val) / (self._l_opt.val * self.w))\n exposant = GeyerMuscle.c * val**3\n _f_l = cas.exp(exposant)\n\n #: Force Parallel Element\n _f_pe_star = (self._f_max.val * (\n (l_ce_tol - self._l_opt.val) / (self._l_opt.val * self.w))**2)*(\n l_ce_tol > self._l_opt.val)\n\n #: Force Velocity Inverse Relation\n _f_v_eq = ((\n self._f_max.val * self._activation.sym * _f_l) + _f_pe_star)\n\n f_v_cond = cas.logic_and(\n _f_v_eq < self.tol, _f_v_eq > -self.tol)\n\n _f_v = cas.if_else(f_v_cond, 0.0, (_f_se + _f_be) / ((\n self._f_max.val * self._activation.sym * _f_l) + _f_pe_star))\n\n f_v = cas.fmax(0.0, cas.fmin(_f_v, 1.5))\n\n self._v_ce.sym = cas.if_else(\n f_v < 1.0, self._v_max.sym * self._l_opt.val * (\n 1.0 - f_v) / (1.0 + f_v * GeyerMuscle.K),\n self._v_max.sym*self._l_opt.val * (f_v - 1.0) / (\n 7.56 * GeyerMuscle.K *\n (f_v - GeyerMuscle.N) + 1.0 - GeyerMuscle.N\n ))\n\n #: Active, Passive, Tendon Force Computation\n _f_v_ce = cas.if_else(\n self._v_ce.sym < 0.,\n (self._v_max.sym*self._l_opt.val - self._v_ce.sym) /\n (self._v_max.sym*self._l_opt.val + GeyerMuscle.K * self._v_ce.sym),\n GeyerMuscle.N + (GeyerMuscle.N - 1) * (\n self._v_max.sym*self._l_opt.val + self._v_ce.sym\n ) / (\n 7.56 * GeyerMuscle.K * self._v_ce.sym - self._v_max.sym*self._l_opt.val\n ))\n\n self._a_force = self._activation.sym * _f_v_ce * _f_l * self._f_max.val\n self._p_force = _f_pe_star*_f_v - _f_be\n self._t_force = _f_se\n\n self._alg_tendon_force.sym = self._z_tendon_force.sym - self._t_force\n self._alg_active_force.sym = self._z_active_force.sym - self._a_force\n self._alg_passive_force.sym = self._z_passive_force.sym - self._p_force\n self._alg_v_ce.sym = self._z_v_ce.sym - self._v_ce.sym\n self._alg_l_mtc.sym = self._z_l_mtc.sym - l_mtc\n self._alg_dact.sym = self._z_dact.sym - self._dA.sym\n\n return True",
"def meanSolar():",
"def __init__(self, params: parameters_lib.SwirlLMParameters):\n super(ConstantDensity, self).__init__(params)\n\n self.rho = params.rho",
"def young_SFR(self):\n self.young_SFR_MsunPyr = self.mstar_young.sum() / 10.0e6",
"def correct_gaseous(self,filepathtogasploutput=None,Wband=False):\n\n if filepathtogasploutput is None:\n print('Please supply filepath to gaspl output')\n\n return\n\n import scipy.io \n import scipy.interpolate\n d = scipy.io.loadmat(filepathtogasploutput)\n #create interp funcs so we can plug in the apr gate structure\n ka_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L'].ravel(),kind='cubic',bounds_error=False) \n ku_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L2'].ravel(),kind='cubic',bounds_error=False)\n \n if Wband:\n w_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L3'].ravel(),kind='cubic',bounds_error=False)\n\n k_ku = ku_func(self.xrds.alt3d.values)\n k_ka = ka_func(self.xrds.alt3d.values)\n\n k_ku = k_ku*0.03 #conver to db/gate\n k_ka = k_ka*0.03 #conver to db/gate\n\n k_ku[np.isnan(k_ku)] = 0\n k_ka[np.isnan(k_ka)] = 0\n\n k_ku = 2*np.cumsum(k_ku,axis=(0))\n k_ka = 2*np.cumsum(k_ka,axis=(0))\n\n ku_new = self.xrds.Ku.values + k_ku \n da = xr.DataArray(ku_new,\n dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),\n 'along_track':np.arange(self.xrds.Ku.shape[2])},\n coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),\n 'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),\n 'time3d': (['range','cross_track','along_track'],self.xrds.time3d),\n 'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})\n\n da.fillna(value=-9999)\n da.attrs['units'] = 'dBZ'\n da.attrs['standard_name'] = 'Ku-band Reflectivity'\n\n self.xrds['Ku'] = da\n\n ka_new = self.xrds.Ka.values + k_ka\n da = xr.DataArray(ka_new,\n dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),\n 'along_track':np.arange(self.xrds.Ku.shape[2])},\n coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),\n 'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),\n 'time3d': (['range','cross_track','along_track'],self.xrds.time3d),\n 'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})\n\n da.fillna(value=-9999)\n da.attrs['units'] = 'dBZ'\n da.attrs['standard_name'] = 'Ka-band Reflectivity'\n\n self.xrds['Ka'] = da\n\n if Wband:\n k_w = w_func(self.xrds.alt3d.values)\n k_w = k_w*0.03 \n k_w[np.isnan(k_w)] = 0\n k_w = 2*np.cumsum(k_w,axis=(0))\n\n w_new = self.xrds.W.values + k_w\n da = xr.DataArray(w_new,\n dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),\n 'along_track':np.arange(self.xrds.Ku.shape[2])},\n coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),\n 'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),\n 'time3d': (['range','cross_track','along_track'],self.xrds.time3d),\n 'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})\n\n da.fillna(value=-9999)\n da.attrs['units'] = 'dBZ'\n da.attrs['standard_name'] = 'W-band Reflectivity'\n\n self.xrds['W'] = da\n\n return",
"def dzdypartial(self, x, y, rang, murvir):\n\n return 10**(2*y)*rang**2 / ((murvir + rang*10**y) *\n (murvir + rang*10**y) * np.log(1. + 10**y*rang/murvir)\\\n - rang*10**y)\\\n - 10**(2*y) / ((1. + 10**y)*((1. + 10**y)*np.log(1. + 10**y)\\\n - 10**y))",
"def _rdaam_diffusivity(self):\r\n self._damage()\r\n \r\n R = 0.008314472\r\n \r\n product = lambda lmbd : np.exp(lmbd * self.t1) - np.exp(lmbd * self.t2)\r\n \r\n if self.mineral =='apatite':\r\n rhov = (8/8 * self.U238 * 3.19 * product(self.lmbd238)\r\n + 7/8 * self.U235 * 3.19 * product(self.lmbd235)\r\n + 6/8 * self.Th232 * 3.19 * product(self.lmbd232)\r\n )\r\n \r\n # Parameters: Flowers et al. (2009)\r\n lambdaf = 8.46e-17\r\n lambdaD = 1.55125e-10\r\n eta = 0.91\r\n L = 8.15e-4\r\n \r\n anneal_d = self.damage * (lambdaf / lambdaD) * rhov * eta * L\r\n anneal_d = np.sum(anneal_d, axis=0)\r\n \r\n omega = 1e-22\r\n psi = 1e-13\r\n Do = 0.6071\r\n Ea = 122.3\r\n Et = 34\r\n \r\n trap_diff = psi * anneal_d + omega * anneal_d**3\r\n \r\n diffusivities = 1e8 * ((Do * np.exp(-Ea / (R * self.T_mean)))\r\n / (trap_diff * np.exp(Et / (R * self.T_mean)) + 1))\r\n \r\n self.diffusivities = diffusivities\r\n \r\n if self.mineral =='zircon':\r\n alphai = (8 * self.U238 * product(self.lmbd238)\r\n + 7 * self.U235 * product(self.lmbd235)\r\n + 6 * self.Th232 * product(self.lmbd232)\r\n )\r\n \r\n anneal_d = alphai * np.flip(self.damage)\r\n anneal_d = np.sum(anneal_d, axis=0)\r\n \r\n # Parameters: Guenthner et al. (2013)\r\n Ba = 5.48E-19\r\n SV = 1.669\r\n D0l = 193188\r\n El = 165\r\n D0N17 = 0.0034\r\n EaN17 = 71\r\n Lint_lattice = 45920\r\n \r\n a = self.r * 1e-4\r\n fa = 1 - np.exp(-Ba * anneal_d)\r\n DI = 1 - np.exp(-Ba * anneal_d * 3)\r\n Lint = (4.2 / (fa * SV) - 2.5)\r\n Tau = (Lint_lattice / Lint)**2\r\n DTaua2 = (1 / Tau) * D0l * np.exp(-El / (R * self.T_mean)) / (a * (1 - DI))**2\r\n DN17a2 = D0N17 * np.exp(-EaN17 / (R * self.T_mean)) / (a * DI)**2\r\n \r\n diffusivities = self.r**2 * (DI / DN17a2 + (1 - DI) / DTaua2)**-1\r\n \r\n self.diffusivities = diffusivities",
"def lsr_nonsense():\n\n RO = 8.\n VO = 220.\n BOVY_TIME_CONVERSION = bovy_conversion.time_in_Gyr(VO, RO) * 1000 # Myr/bovy_time\n\n perimeter = 2 * np.pi * 8 * u.kpc\n velocity = 220 * u.km / u.s\n # for reference, LSR (at 8 kpc, with V = 220 km/s) should take this long\n # to complete one orbit\n orbit_time = (perimeter / velocity).to(\"Myr\")\n\n max_age = orbit_time.value / BOVY_TIME_CONVERSION\n ntimes = 100\n ts = np.linspace(0, max_age, ntimes)\n\n # demo a star (with vT=220, vR=0, vZ=0, z=0, phi=0.1 pi) staying\n # fixed in our coordinate frame\n R, vR, vT, z, vz, phi = 1., 0., 1., 0., 0., 0.\n LSR_coords = [R, vR, vT, z, vz, phi]\n lsr = Orbit(vxvv=LSR_coords, solarmotion='schoenrich', vo=220, ro=8)\n lsr.integrate(ts, mp, method='odeint')\n\n lsr_data = lsr.getOrbit()\n lsrR = RO * lsr_data[:,0]\n lsrphi = lsr_data[:,5]\n\n lsrX = lsrR * np.cos(lsrphi)\n lsrY = lsrR * np.sin(lsrphi)\n lsrZ = RO * lsr_data[:,3]\n\n R, vR, vT, z, vz, phi = 1., 0., 1., 0., 0., 0.25*np.pi\n rot_lsr_coords = [R, vR, vT, z, vz, phi]\n rot_lsr = Orbit(vxvv=rot_lsr_coords, solarmotion='schoenrich', vo=220, ro=8)\n rot_lsr.integrate(ts, mp, method='odeint')\n\n rot_lsr_data = rot_lsr.getOrbit()\n\n # putting into corotating cartesian system centred on LSR\n XYZUVW_rot = galpy_coords_to_xyzuvw(rot_lsr_data, ts)\n plt.clf()\n plt.plot(XYZUVW_rot[:,0], XYZUVW_rot[:,1])\n plt.savefig(\"temp_plots/rotXY.png\")\n\n\n orbit_time = (perimeter / velocity).to(\"Myr\")\n ts = np.linspace(0., 10*orbit_time.value, 1000) / BOVY_TIME_CONVERSION\n ra, dec, dist, mu_ra, mu_dec, vlos = 0., 0., 0., 0., 0., 0.\n solar_coords = [ra, dec, dist, mu_ra, mu_dec, vlos]\n sun = Orbit(vxvv=solar_coords, radec=True,\n solarmotion='schoenrich') # should just be the sun's orbit\n sun.integrate(ts, mp, method='odeint')\n\n # get the orbit [R, vR, vT, z, vz, phi] (pos scaled by ro, vel scaled by vo)\n sun_data = sun.getOrbit()\n XYZUVW_sun = galpy_coords_to_xyzuvw(sun_data, ts)\n plt.clf()\n plt.plot(XYZUVW_sun[:,0], XYZUVW_sun[:,1])\n plt.savefig(\"temp_plots/sunXY.png\")\n plt.clf()\n plt.plot(XYZUVW_sun[:,0], XYZUVW_sun[:,2])\n plt.savefig(\"temp_plots/sunXZ.png\")",
"def lumped_cpw(freq,\n line_width,\n line_gap,\n substrate_thickness,\n film_thickness,\n dielectric_constant=11.45,\n loss_tangent=10**-5,\n london_penetration_depth=30 * 10**-9):\n s = line_width\n w = line_gap\n h = substrate_thickness\n t = film_thickness\n eRD = dielectric_constant\n tanD = loss_tangent\n lambdaLT = london_penetration_depth\n wfreq = freq * 2 * np.pi\n\n Kk0, Kk01, Kk1, Kk11 = elliptic_int_constants(s, w, h)\n\n C = 2 * e0 * (eRD - 1) * (Kk1 / Kk11) + 4 * e0 * (Kk0 / Kk01)\n\n #filling factor\n q = 0.5 * (Kk1 * Kk01) / (Kk11 * Kk0)\n\n #Admittance\n G = wfreq * C * q * tanD\n\n #Effective Dielectric Constant\n etfSqrt = effective_dielectric_constant(freq, s, w, h, t, q, Kk0, Kk01, eRD)\n\n #External Inducatance\n Z0 = (30 * np.pi / etfSqrt) * Kk01 / Kk0\n Lext = Z0**2 * C\n Cstar = 2 * e0 * (etfSqrt**2 - 1) * (Kk1 / Kk11) + 4 * e0 * (Kk0 / Kk01)\n\n #Kinetic Inductance\n A1 = (-t / np.pi) + (1 / 2) * np.sqrt((2 * t / np.pi)**2 + s**2)\n B1 = s**2 / (4 * A1)\n C1 = B1 - (t / np.pi) + np.sqrt((t / np.pi)**2 + w**2)\n D1 = 2 * t / np.pi + C1\n\n LkinStep = (u0 * lambdaLT * C1 / (4 * A1 * D1 * Kk0))\n\n Lkin1 = LkinStep * 1.7 / (np.sinh(t / (2 * lambdaLT)))\n Lkin2 = LkinStep * 0.4 / (np.sqrt(\n (((B1 / A1)**2) - 1) * (1 - (B1 / D1)**2)))\n\n Lk = Lkin1 + Lkin2\n\n return Lk, Lext, C, G, Z0, etfSqrt**2, Cstar",
"def stillinger_weber(displacement: DisplacementFn,\n sigma=2.0951,\n A=7.049556277,\n B=0.6022245584,\n lam=21.0,\n gamma=1.2,\n epsilon=2.16826,\n three_body_strength=1.0,\n cutoff=3.77118) -> Callable[[Array], Array]:\n two_body_fn = partial(_sw_radial_interaction, sigma, B, cutoff)\n three_body_fn = partial(_sw_angle_interaction, gamma, sigma, cutoff)\n three_body_fn = vmap(vmap(vmap(three_body_fn, (0, None)), (None, 0)))\n\n def compute_fn(R, **kwargs):\n d = partial(displacement, **kwargs)\n dR = space.map_product(d)(R, R)\n dr = space.distance(dR)\n first_term = util.high_precision_sum(two_body_fn(dr)) / 2.0 * A\n second_term = lam * util.high_precision_sum(three_body_fn(dR, dR)) / 2.0\n return epsilon * (first_term + three_body_strength * second_term)\n return compute_fn",
"def AdvectionRK4_floating(particle, fieldset, time):\n if particle.beach==0:\n particle.distance=fieldset.distance2shore[time,particle.depth,particle.lat,particle.lon]\n d2=particle.depth\n if particle.lon>180:\n particle.lon-=360\n if particle.lon<-180:\n particle.lon+=360\n (u1, v1) = fieldset.UV[time, d2, particle.lat, particle.lon]\n (uS1, vS1) = fieldset.Ust[time, d2, particle.lat, particle.lon],fieldset.Vst[time, d2, particle.lat, particle.lon]\n # lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt)\n lon1, lat1 = (particle.lon + (u1+uS1)*.5*particle.dt, particle.lat + (v1+vS1)*.5*particle.dt)\n\n if lon1>180:\n lon1-=360\n if lon1<-180:\n lon1+=360\n (u2, v2) = fieldset.UV[time + .5 * particle.dt, d2, lat1, lon1]\n (uS2, vS2) = fieldset.Ust[time + .5 * particle.dt, d2, lat1, lon1],fieldset.Vst[time + .5 * particle.dt, d2, lat1, lon1]\n # lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt)\n lon2, lat2 = (particle.lon + (u2+uS2)*.5*particle.dt, particle.lat + (v2+vS2)*.5*particle.dt)\n\n if lon2>180:\n lon2-=360\n if lon2<-180:\n lon2+=360\n (u3, v3) = fieldset.UV[time + .5 * particle.dt, d2, lat2, lon2]\n (uS3, vS3) = fieldset.Ust[time + .5 * particle.dt, d2, lat2, lon2],fieldset.Vst[time + .5 * particle.dt, d2, lat2, lon2]\n # lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt)\n lon3, lat3 = (particle.lon + (u3+uS3)*particle.dt, particle.lat + (v3+vS3)*particle.dt)\n \n if lon3>180:\n lon3-=360\n if lon3<-180:\n lon3+=360\n (u4, v4) = fieldset.UV[time + particle.dt, d2, lat3, lon3]\n (uS4, vS4) = fieldset.Ust[time + particle.dt, d2, lat3, lon3],fieldset.Vst[time + particle.dt, d2, lat3, lon3]\n \n # particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt\n particle.lon += ((u1+uS1) + 2*(u2+uS2) + 2*(u3+uS3) + (u4+uS4)) / 6. * particle.dt\n if particle.lon>180:\n particle.lon-=360\n if particle.lon<-180:\n particle.lon+=360\n # particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt\n particle.lat += ((v1+vS1) + 2*(v2+vS2) + 2*(v3+vS3) + (v4+vS4)) / 6. * particle.dt",
"def __init__(self,xsym,ax,Lmax,Mmax,lmax,parity='natural',ax2=None,psum=None,Lmin=None,Mmin=None,lsum=None): \n\n # set the defaults\n def default_if_None(val,deflt): \n if val is None: \n return deflt\n else:\n return val\n\n self.xsym=xsym\n self.ax =ax\n self.bx =default_if_None(ax2,ax)\n self.ax.show('radial axis 1')\n self.bx.show('radial axis 2')\n L0=default_if_None(Lmin,Lmax)\n M0=default_if_None(Mmin,Mmax)\n ls=default_if_None(lsum,2*lmax)+1\n ks=default_if_None(psum,self.ax.order()+self.bx.order())\n\n self.gaunt=GauntCoeffTable(2*lmax)\n self.bang=[]\n self.len=-1\n block_i0=0\n count=0\n for L in range(L0,Lmax+1):\n for M in range(M0,Mmax+1):\n for l1 in range(lmax+1):\n for l2 in range(lmax+1):\n if l1+l2>ls: continue\n if parity=='natural' and (L+l1+l2)%2==1: continue\n if parity=='unnatural' and (L+l1+l2)%2==0: continue\n if xsym!=0 and l1<l2: continue # skip exchange symmetric angular part\n self.bang.append(BasTwoAngle(L,M,l1,l2))\n ba=self.bang[-1]\n\n # generate product basis\n for e1 in self.ax.e:\n for e2 in self.bx.e:\n ba.brad.append(BasTwoRadial(e1.centrifugal(l1),e2.centrifugal(l2)))\n br=ba.brad[-1]\n for k1 in range(e1.n):\n for k2 in range(e2.n):\n count+=1\n br.k1.append(k1)\n br.k2.append(k2)\n itotal=block_i0+e1.i0+k1+self.ax.len()*(e2.i0+k2)\n# print 'block',L,M,l1,l2,itotal,block_i0\n self.len=max(self.len,itotal+1)\n br.i.append(itotal)\n block_i0=block_i0+self.ax.len()*self.bx.len() \n print 'total',self.len",
"def smooth_gauge(w1,w2):\n m = uij(w1,w2) # matrix of wavefunctions\n U, s, V = np.linalg.svd(m, full_matrices=True) # sing val decomp\n R = (U*V).H # rotation matrix\n wnew = w2.copy()*0. # initialize\n wold = w2.copy() # old waves\n for ii in range(R.shape[0]):\n for jj in range(R.shape[0]):\n wnew[ii] += R[jj,ii]*wold[jj]\n return wnew",
"def rate_dislocation_annihilation(rho,T,sigma,omega,Q_cl,b,d_check,mu,tau):\n rho_dot = (rho - 1E10)*climb_velocity(T,sigma,omega,Q_cl,b)/(calc_d_hat(mu,b,tau) - d_check)\n \n return rho_dot",
"def calculate_soil_water_fac(self):\n # turn into fraction...\n smc_topsoil = self.state.pawater_topsoil / self.params.wcapac_topsoil\n smc_root = self.state.pawater_root / self.params.wcapac_root\n \n if self.control.sw_stress_model == 0:\n wtfac_topsoil = smc_topsoil**self.params.qs \n wtfac_root = smc_root**self.params.qs \n \n elif self.control.sw_stress_model == 1:\n wtfac_topsoil = self.calc_sw_modifier(smc_topsoil, \n self.params.ctheta_topsoil, \n self.params.ntheta_topsoil)\n \n wtfac_root = self.calc_sw_modifier(smc_root, \n self.params.ctheta_root, \n self.params.ntheta_root)\n \n elif self.control.sw_stress_model == 2:\n \n # Stomatal limitaiton\n # Exponetial function to reduce g1 with soil water limitation\n # based on Zhou et al. 2013, AFM, following Makela et al 1996.\n # For the moment I have hardwired the PFT parameter as I am still\n # testing.\n # Because the model is a daily model we are assuming that LWP is\n # well approximated by the night SWP.\n \n if float_eq(smc_topsoil, 0.0):\n psi_swp_topsoil = -1.5\n else:\n arg1 = self.params.psi_sat_topsoil\n arg2 = smc_topsoil /self.params.theta_sat_topsoil\n arg3 = -self.params.b_topsoil\n psi_swp_topsoil = arg1 * arg2**arg3\n \n if float_eq(smc_root, 0.0):\n psi_swp_root = -1.5\n else:\n arg1 = self.params.psi_sat_root\n arg2 = smc_root/self.params.theta_sat_root\n arg3 = -self.params.b_root\n psi_swp_root = arg1 * arg2**arg3\n \n # multipliy these by g1, same as eqn 3 in Zhou et al. 2013.\n b = 0.66\n \n wtfac_topsoil = exp(b * psi_swp_topsoil)\n wtfac_root = exp(b * psi_swp_root)\n \n #print self.state.pawater_root,wtfac_root \n return (wtfac_topsoil, wtfac_root)",
"def dispersiveLJ_derivative2(self, r):\n\t\treturn -42/pow(r,8)",
"def bandpass_correct(self):\n info(\"Applying scalar B-Jones amplitudes\")\n # Read in the file\n bjones_inp = np.loadtxt(self.bandpass_table,dtype=str)\n self.bpass_input_freq = bjones_inp[0][1:].astype(np.float64)\n self.bpass_input_freq *= 1e9 # convert from GHz to Hz\n self.bjones_ampl = bjones_inp[1:,1:].astype(np.float64)\n \n\n # Interpolate between the frequencies given in the bandpass table\n if self.bpass_input_freq[0] > self.chan_freq[0] or self.bpass_input_freq[-1] < self.chan_freq[-1]:\n warn(\"Input frequencies out of range of MS frequencies. Extrapolating in some places.\")\n\n bjones_interpolated=np.zeros((self.Nant,self.chan_freq.shape[0]))\n for ant in range(self.Nant):\n spl = ius(self.bpass_input_freq, self.bjones_ampl[ant],k=self.bandpass_freq_interp_order)\n bjones_interpolated[ant] = spl(self.chan_freq)\n\n # apply the B-Jones terms by iterating over baselines\n for a0 in range(self.Nant):\n for a1 in range(a0+1,self.Nant):\n for msfreq_ind in range(self.chan_freq.shape[0]):\n bl_ind = self.baseline_dict[(a0,a1)]\n self.data[bl_ind,msfreq_ind,:] *= bjones_interpolated[a0,msfreq_ind] * bjones_interpolated[a1,msfreq_ind]\n self.save_data()\n\n\n ### plot bandpasses",
"def _gradient_one_sample(self, sample: np.ndarray, params: np.ndarray) -> np.ndarray:\n w = self.vgbs.embedding(params)\n jac = self.vgbs.embedding.jacobian(params)\n\n h = self.h_reparametrized(sample, params)\n\n if self.vgbs.threshold:\n diff = sample - self.vgbs.mean_clicks_by_mode(params)\n else:\n diff = sample - self.vgbs.mean_photons_by_mode(params)\n\n return h * (diff / w) @ jac",
"def vat_rate():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the paths of all .wav files found recursively in the path.
|
def recursive_wav_paths(path):
absolute_paths = []
for folder, subs, files in os.walk(path):
for file in files:
extension = os.path.splitext(file)[1]
if extension.lower() == '.wav':
file_path = os.path.join(folder, file)
absolute_paths.append(os.path.abspath(file_path))
return absolute_paths
|
[
"def _get_wav_files(dir_path):\n files = []\n for file in os.listdir(dir_path):\n if file.endswith(\".wav\"):\n files.append(file)\n return files",
"def _get_files(path):\n ret_val = []\n for root, _, files in os.walk(path):\n for f in files:\n ret_val.append(os.path.join(root, f))\n return ret_val",
"def get_audios(path):\n supported_formats = [\".wav\", \".mp3\", \".ogg\", \".flac\", \".m4a\"]\n return [\n item\n for sublist in [[os.path.join(dir, file) for file in files]\n for dir, _, files in list(os.walk(path))]\n for item in sublist if os.path.splitext(item)[1] in supported_formats\n ]",
"def get_all_file_paths_in_path(path: str):\n def join_paths(dir_path, filenames):\n return (joinpath(path, dir_path, filename) for \\\n filename in filenames)\n files_iter = (join_paths(dir_path, filenames) for \\\n dir_path, _, filenames in walk(path))\n return chain.from_iterable(files_iter)",
"def get_filenames(path):\n return [f for f in listdir(path) if isfile(join(path, f))]",
"def audio_paths(texts, audio_dir):\n return [ \"{}/{}.wav\".format(audio_dir, encode(text)) for text in texts ]",
"def get_timing_file_paths(path):\r\n\r\n\tpaths = []\r\n\tfor root, dirs, files in os.walk(path):\r\n\t\tfor file in files:\r\n\t\t\tif file.endswith(\"cpp.timing.txt\"):\r\n\t\t\t\tfile_path = os.path.join(root, file)\r\n\t\t\t\tpaths.append(file_path)\r\n\r\n\treturn paths",
"def get_filenames(self, path):\r\n \r\n files = []\r\n try:\r\n with scandir(path) as input_dir: \r\n for entry in input_dir:\r\n if entry.is_file():\r\n files.append(entry.name)\r\n except FileNotFoundError:\r\n print(\"The directory \" + path + \" was not found\")\r\n exit(1)\r\n \r\n if files:\r\n print(str(len(files)) + \" files found:\")\r\n for file in files: print(\"\\t\"+file)\r\n return files\r\n else:\r\n print(\"No files found at \" + path + \"; exiting\")\r\n exit(2)",
"def get_orig_media_dirs(path):\n dirs = []\n for dirname, dirnames, filenames in os.walk(path):\n for subdir in dirnames:\n dirs.append(\n os.path.join(dirname, subdir))\n return dirs",
"def searchFiles(self):\n list_of_files = []\n for dirname, dirnames, filenames in os.walk(self.path):\n for filename in filenames:\n if filename.startswith(self.prefix) and filename.endswith(self.suffix):\n list_of_files.append(os.path.join(self.path, filename))\n return list_of_files",
"def get_all_names_from_path(path):\n return [f for f in listdir(path) if isfile(join(path, f))]",
"def get_clip_paths(path: str) -> list:\n return [\n os.path.join(path, file) for file in os.listdir(path) if file.endswith(\".mp4\")\n ]",
"def find_files(suffix, path):\n\n if os.path.isfile(path): # edge case, when suppied path is a file by itself -- added as per suggestion in code review\n if path.endswith(suffix):\n return [path]\n\n path_list = []\n for file_name in os.listdir(path):\n file_path = os.path.join(path, file_name)\n \n if os.path.isfile(file_path) and file_path.endswith(suffix):\n path_list.append(file_path)\n \n elif os.path.isdir(file_path):\n path_list.extend(find_files(suffix, file_path))\n \n return path_list",
"def mp3files():\n\tBase_dir = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))\n\tmp3_source = os.path.join(Base_dir,'raw_data','mp3_files')\n\tmp3list = []\n\tfor paths,dirs,files in scandir.walk(mp3_source):\n\t#for paths,dirs,files in scandir.walk(r'D:\\Audio\\forJarvis'):\n\t\t\"\"\"if want to search mp3 files from all you HDD then \n\t\tprovide all drives path postions instead of D:\\\\Audio\n\t\tadd extra back slash where ever back slash occur. \n\t\t\"\"\"\n\t\tfor file in files:\n\t\t\tif file.endswith('.mp3'):\n\t\t\t\tfullpath =mp3list.append(os.path.join(paths,file))\n\t#print mp3list\n\t#print len(mp3list)\n\treturn mp3list",
"def _findFilesInPath(self, startpath):\n allfiles = []\n if not os.access(startpath, os.R_OK):\n log().info(\"Skipping inaccessible path %s\" % startpath)\n return allfiles\n\n for subf in os.listdir(unicode(startpath)):\n newpath = os.path.join(startpath, subf)\n newpath = os.path.abspath(newpath)\n if os.path.isfile(newpath):\n if not self._checkExtension(subf):\n continue\n elif self._blacklistedFilename(subf):\n continue\n else:\n allfiles.append(newpath)\n else:\n if self.recursive:\n allfiles.extend(self._findFilesInPath(newpath))\n #end if recursive\n #end if isfile\n #end for sf\n return allfiles",
"def _find_paths(dir_path, file_pattern):\n pattern = os.path.join(dir_path, \"**\", file_pattern)\n return glob.glob(pattern, recursive=True)",
"def _get_files(self, path: str) -> List[str]:\n files = glob(os.path.normpath(os.path.join(PROJECT_BASE_PATH, path + '*')))\n return [\n file for file in files\n if file.endswith(self.allowed_extensions)\n ]",
"def find_files(suffix, path):\n # Recursion\n result = []\n\n if not bool(path):\n return []\n\n if not bool(suffix):\n suffix = None\n\n if os.path.isdir(path): # if the current path is a file\n if path.endswith(suffix): # if the file has extension suffix='.c'\n result.append(path)\n else:\n children = os.listdir(path)\n \n for child in children:\n full_path = os.path.join(path, child)\n\n if os.path.isdir(full_path):\n result += find_files(suffix, full_path)\n elif os.path.isfile(full_path) and full_path.endswith(suffix):\n result.append(full_path)\n\n return result\n '''\n # Iterative\n result = []\n nodesToExpand = [path] # stack\n\n while nodesToExpand:\n full_path = nodesToExpand.pop()\n if os.path.isfile(full_path) and full_path.endswith(suffix):\n result.append(full_path)\n elif os.path.isdir(full_path):\n for child in os.listdir(full_path):\n nodesToExpand.append(os.path.join(full_path, child))\n return sorted(result)\n '''",
"def find_all_raw_files():\n\n # Adapt path if script is not run in main directory\n path_add = check_path_location()\n\n # Find all csv, dat or mat files in raw (subfolders)\n wild_paths = ['*/', '*/*/', '*/*/*/', '*/*/*/*/', '*/*/*/*/*/'] # Slight overkill, but to be sure\n all_files = list()\n for wild_path in wild_paths:\n all_files += glob(path_add + path_to_raw_data + wild_path + '*.csv')\n all_files += glob(path_add + path_to_raw_data + wild_path + '*.dat')\n all_files += glob(path_add + path_to_raw_data + wild_path + '*.mat')\n\n # Remove path, only return filename with extension\n for i, file in enumerate(all_files):\n all_files[i] = basename(file)\n\n return all_files"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate the spectrogram of a reference recording located at path. Code written by Bongjun Kim.
|
def reference_spectrogram(path, augmentations: audaugio.ChainBase):
try:
y, sr = librosa.load(path, sr=44100)
except audioop.error as e:
logger = logging.getLogger('logger')
logger.warning("Could not load {0}\n{1}".format(path, e))
return None
augmented_audio = augmentations(y, sr)
spectrograms = []
for audio in augmented_audio:
if audio.shape[0] < 4 * sr:
pad = np.zeros((4 * sr - audio.shape[0]))
y_fix = np.append(audio, pad)
else:
y_fix = audio[0:int(4 * sr)]
s = librosa.feature.melspectrogram(y=y_fix, sr=sr, n_fft=1024, hop_length=1024, power=2)
s = librosa.power_to_db(s, ref=np.max)
s = s[:, 0:128]
spectrograms.append(s)
return spectrograms
|
[
"def get_spectrogram_data(frame_rate, np_frames):\n # Set format details for plot.\n #fig = plt.figure(num=None, figsize=(12, 7.5), dpi=300)\n #ax = fig.add_subplot(111)\n #ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n #ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))\n #ax.yaxis.set_major_locator(ticker.MultipleLocator(2000))\n #ax.yaxis.set_minor_locator(ticker.MultipleLocator(500))\n #ax.tick_params(axis='both', direction='inout')\n #plt.title(f\"Spectrogram of:\\n{input_file}\")\n plt.title(f\"Spectrogram\")\n plt.xlabel('Time (seconds)')\n plt.ylabel('Frequency (Hz)')\n\n # If NFFT is too high, then there the horizontal (frequency) resolution is\n # too fine, and there are multiple bands for each formant. However, if\n # NFFT is too low, then the whole image is rather blurry and even the\n # formants are not well differentiated (i.e. at the default vaules for NFFT\n # and noverlap). noverlap that is half of NFFT seems to minimize background\n # noise, as well.\n noverlap = 128 # default: 128; other: 256\n NFFT = 256 # default: 256; other: 512\n\n # Create the plot.\n spectrum, frequencies, times, img = plt.specgram(\n np_frames,\n Fs=frame_rate,\n cmap='gnuplot',\n noverlap=noverlap,\n NFFT=NFFT,\n )\n return spectrum, frequencies, times, img",
"def spectrogram(samples):\n S, freqs, times = mlab.specgram(samples, NFFT=4096, Fs=44100,\n window=mlab.window_hanning,\n noverlap=(4096 // 2))\n return S, freqs, times",
"def compute_spectrogram(self):\n f, t, sxx = ss.spectrogram(self.audio_array, fs=self.sample_freq, window='hann', nfft=NFFT, nperseg=NFFT,\n noverlap=NFFT * .5)\n sxx = 10*np.log10(sxx) # decibels\n sxx[np.where(np.isnan(sxx) | np.isinf(sxx))] = 0 # if somehow the sxx was negative, we put the value to 0\n self.spectrogram = f, t, sxx\n return self.spectrogram",
"def create_spectogram1(track_id):\n filename = get_path(track_id)\n y, sr = librosa.load(filename)\n spectrogram = librosa.feature.melspectrogram(y = y, sr = sr, n_fft = 2048, hop_length = 1024)\n spectrogram = librosa.power_to_db(spectrogram, ref = np.max)\n return spectrogram[:, 473:601]",
"def create_spectrogram(self, audio_path):\n audio_name = audio_path.split(\"/\")[-1].replace(\".wav\", \"\")\n fs, w = wavfile.read(audio_path)\n if len(w.shape) == 2:\n w = w[:, 0]\n dur = len(w) / fs\n\n cmap = plt.cm.get_cmap('Greys')\n cmap.set_under('w')\n f, t, sxx = scipy.signal.spectrogram(w, fs=fs, window='hann', nperseg=int(fs / 12.32),\n noverlap=int(self.overlap * (fs / 12.32)), mode='psd', nfft=16000)\n sxx_db = 10 * np.log10(abs(sxx[:1500, :]) / 2 * 10e-5)\n\n dpi = 50\n fig = plt.figure(figsize=(dur * self.sec_size // dpi, self.sec_size * 2 // dpi), dpi=dpi, frameon=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n extent = (0, dur * self.sec_size // dpi, 0, self.sec_size * 2 // dpi)\n plt.imshow(sxx_db[::-1, :], cmap=cmap, extent=extent, norm=mpl.colors.Normalize(vmin=-50, vmax=0, clip=False))\n plt.savefig(osp.join(self.out_path, '%s.jpeg' % audio_name), dpi=dpi, frameon=False)\n\n # Resize saved image in case of bad matplotlib result\n img = imread(osp.join(self.out_path, '%s.jpeg' % audio_name))\n img = resize(img, (dur * self.sec_size, self.sec_size * 2)[::-1])\n imsave(osp.join(self.out_path, '%s.jpeg' % audio_name), img)",
"def calculate_spectrogram(framerate, data, chunk_len, stride, window_func=np.hamming):\n\tnum_chunks = int((len(data)-chunk_len)/float(stride))+1\n\twindow = window_func(chunk_len)\n\tchunks = [data[i*stride:i*stride+chunk_len] for i in range(num_chunks)]\n\twindowed_chunks = [window*chunk for chunk in chunks]\n\t# fourier transform each chunk, get abs magnitude\n\tspectra = np.array([np.abs(np.fft.fft(chunk)) for chunk in windowed_chunks])\n\treturn spectra",
"def imitation_spectrogram(path, augmentations: audaugio.ChainBase):\n try:\n y, sr = librosa.load(path, sr=16000)\n except audioop.error as e:\n logger = logging.getLogger('logger')\n logger.warning(\"Could not load {0}\\n{1}\".format(path, e))\n return None\n\n augmented_audio = augmentations(y, sr)\n\n spectrograms = []\n for audio in augmented_audio:\n # zero-padding\n if audio.shape[0] < 4 * sr:\n pad = np.zeros((4 * sr - audio.shape[0]))\n y_fix = np.append(audio, pad)\n else:\n y_fix = audio[0:int(4 * sr)]\n s = librosa.feature.melspectrogram(y=y_fix, sr=sr, n_fft=133,\n hop_length=133, power=2, n_mels=39,\n fmin=0.0, fmax=5000)\n s = s[:, :482]\n s = librosa.power_to_db(s, ref=np.max)\n spectrograms.append(s)\n return spectrograms",
"def create_spectrogram(voice_sample):\n\n in_fpath = Path(voice_sample.replace('\"', \"\").replace(\"'\", \"\"))\n original_wav, sampling_rate = librosa.load(str(in_fpath))\n\n # Plot the signal read from wav file\n fig = plt.figure()\n #plt.subplot(111)\n plt.title(f\"Spectrogram of file {voice_sample}\")\n\n plt.plot(original_wav)\n plt.xlabel(\"Sample\")\n plt.ylabel(\"Amplitude\")\n\n # plt.subplot(212)\n # plt.specgram(original_wav, Fs=sampling_rate)\n # plt.xlabel(\"Time\")\n # plt.ylabel(\"Frequency\")\n # # plt.savefig(voice_sample.split(\".\")[0] + \"_spectogram.png\")\n return fig",
"def mel_spectogram(aud):\n mel = librosa.feature.melspectrogram(aud,\n sr=SAMPLING_RATE,\n n_fft=N_FFT,\n hop_length=H_L,\n n_mels=MEL_CHANNELS)\n # mel = np.log(mel + 1e-5)\n return mel",
"def spectrogram(samples, sample_rate, frame_len, fps, batch=50):\n if len(samples) < frame_len:\n return np.empty((0, frame_len // 2 + 1), dtype=samples.dtype)\n win = np.hanning(frame_len).astype(samples.dtype)\n hopsize = sample_rate // fps\n num_frames = max(0, (len(samples) - frame_len) // hopsize + 1)\n batch = min(batch, num_frames)\n if batch <= 1 or not samples.flags.c_contiguous:\n rfft = rfft_builder(samples[:frame_len], n=frame_len)\n spect = np.vstack(np.abs(rfft(samples[pos:pos + frame_len] * win))\n for pos in range(0, len(samples) - frame_len + 1,\n int(hopsize)))\n else:\n rfft = rfft_builder(np.empty((batch, frame_len), samples.dtype),\n n=frame_len, threads=1)\n frames = np.lib.stride_tricks.as_strided(\n samples, shape=(num_frames, frame_len),\n strides=(samples.strides[0] * hopsize, samples.strides[0]))\n spect = [np.abs(rfft(frames[pos:pos + batch] * win))\n for pos in range(0, num_frames - batch + 1, batch)]\n if num_frames % batch:\n spect.append(spectrogram(\n samples[(num_frames // batch * batch) * hopsize:],\n sample_rate, frame_len, fps, batch=1))\n spect = np.vstack(spect)\n return spect",
"def make_spectro_old(\n fname, \n sample_rate=22050, \n n_fft=1024,\n hl=256, \n n_mels=512,\n cmap='magma',\n show=True, \n save=False\n ):\n \n # update this with os.path.join()\n fpath = \"../audio/\" + fname + \".wav\"\n y, sr = librosa.load(fpath,\n sr=sample_rate,\n duration=5.0,\n )\n \n # make the spectrogram matrix on mel scale\n M = librosa.feature.melspectrogram(y=y,\n sr=sample_rate,\n hop_length=hl, \n n_mels=n_mels\n )\n \n # creates figure of same aspect ratio as original\n w, h = figaspect(M)\n fig = plt.figure(figsize=(w,h), dpi=108)\n \n # these next two create a subplot with no margins\n ax = plt.subplot(111)\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1, \n wspace=0, hspace=0\n )\n \n # creates visuals for display or saving\n if show or save:\n librosa.display.specshow(librosa.power_to_db(M, ref=np.max),\n sr=sample_rate,\n hop_length=hl,\n y_axis='mel', # mel, log, fft\n x_axis='time', # time\n cmap=cmap\n )\n\n if show:\n plt.show()\n \n if save:\n img_fpath = \"../specs/\" + fname + \".png\"\n plt.savefig(img_fpath, dpi=fig.dpi)\n plt.close(fig)\n \n return M",
"def get_Spectrogram(audio, Fs, fft_size=200, step_size=1, spec_thresh=2.5, window_type=0):\n return abs(move_spec(\n aid_spectrogram(audio.astype('float64'), log=True, thresh=spec_thresh, fft_size=fft_size, step_size=step_size,\n window_type=0))) / 2.5",
"def spectrum(self):\n spectrum = self.inherit(numpy.fft.fftshift(numpy.fft.fft(self, axis=-1), axes=-1)).view(MRSSpectrum)\n return spectrum",
"def mp3_to_spectrogram(file):\r\n y, sr = librosa.load(file, mono=False)\r\n mspec = librosa.feature.melspectrogram(y=y, sr=sr)\r\n temp_n_mels, temp_t_frames = mspec.shape\r\n\r\n if temp_n_mels > get_n_mels():\r\n set_n_mels(temp_n_mels)\r\n\r\n if temp_t_frames > get_t_frames():\r\n set_t_frames(temp_t_frames)\r\n\r\n return mspec\r\n # return mspec[0]\r",
"def save_spectrogram_tisv():\n print(\"start text independent utterance feature extraction\")\n os.makedirs(hp.data.train_path, exist_ok=True) # make folder to save train file\n os.makedirs(hp.data.test_path, exist_ok=True) # make folder to save test file\n\n utter_min_len = (hp.data.tisv_frame * hp.data.hop + hp.data.window) * hp.data.sr # lower bound of utterance length\n total_speaker_num = len(audio_path)\n train_speaker_num= (total_speaker_num//10)*9 # split total data 90% train and 10% test\n print(\"total speaker number : %d\"%total_speaker_num)\n print(\"train : %d, test : %d\"%(train_speaker_num, total_speaker_num-train_speaker_num))\n for i, folder in enumerate(audio_path):\n print(\"%dth speaker processing...\"%i)\n utterances_spec = []\n for utter_name in os.listdir(folder):\n if utter_name[-4:] == '.wav':\n utter_path = os.path.join(folder, utter_name) # path of each utterance\n times, segs = VAD_chunk(2, utter_path)\n #print(\"+++++++++++++++++++++++++++++\", len(segs))\n for i, seg in enumerate(segs):\n if (times[i][1]-times[i][0]) > 0.2: # If partial utterance is sufficient long,\n #utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.\n S = librosa.core.stft(y=seg, n_fft=hp.data.nfft,\n win_length=int(hp.data.window * hp.data.sr), hop_length=int(hp.data.hop * hp.data.sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances\n #samples = random.sample(S, 3 * hp.data.tisv_frame)\n #first = samples[]\n print(\"************************\", S.shape)\n #if(len(S) < 360):\n # print(\"less than 360\", len(S))\n # continue\n for i in range(0, S.shape[1] - hp.data.tisv_frame, hp.data.tisv_frame):\n #print(\"Appending of shape\", S[:, i * hp.data.tisv_frame: (i + 1) * hp.data.tisv_frame].shape)\n utterances_spec.append(S[:, i * hp.data.tisv_frame: (i + 1) * hp.data.tisv_frame ])\n #utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance\n #utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance\n #print(\"Shape of S\", S[-2].shape, S[-1].shape)\n #concat_seg, concat_times = concat_segs(times, segs)\n #STFT_frames, STFT_times = get_STFTs(concat_seg, concat_times)\n #STFT_frames = np.stack(STFT_frames, axis=2)\n #STFT_frames = np.transpose(STFT_frames, axes=(2,1,0))\n\n #utter, sr = librosa.core.load(utter_path, hp.data.sr) # load utterance audio\n #intervals = librosa.effects.split(utter, top_db=30) # voice activity detection \n # this works fine for timit but if you get array of shape 0 for any other audio change value of top_db\n # for vctk dataset use top_db=100\n \"\"\"for interval in intervals:\n if (interval[1]-interval[0]) > utter_min_len: # If partial utterance is sufficient long,\n utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.\n S = librosa.core.stft(y=utter_part, n_fft=hp.data.nfft,\n win_length=int(hp.data.window * sr), hop_length=int(hp.data.hop * sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances\n utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance\n utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance\n \"\"\"\n utterances_spec = np.array(utterances_spec)\n print(\"utterances_spec\", utterances_spec.shape)\n if(utterances_spec.shape[0] == 0):\n continue\n #print(utterances_spec.shape)\n if i<train_speaker_num: # save spectrogram as numpy file\n np.save(os.path.join(hp.data.train_path, \"speaker%d.npy\"%i), utterances_spec)\n else:\n np.save(os.path.join(hp.data.test_path, \"speaker%d.npy\"%(i-train_speaker_num)), utterances_spec)",
"def plotSpectrogram(self):\n\n\t\t#max freq represetnedf (nyquist constrained)\n\t\timgHeight = self.sampleRate/2\n\n\t\tself.p.y_range.end = imgHeight * self.numChannels\n\n\t\timgWidth = self.signalDuration\n\t\tself.p.x_range.end = imgWidth\n\n\t\tfor channelNum in self.activeChannels:\n\t\t\tchannelSignal = self.signal[:,channelNum]\n\n\t\t\tfreqs,times,data = self.log_specgram(channelSignal,self.sampleRate)\n\n\t\t\tself.p.image(image=[data], x=0, y=imgHeight*channelNum, dw=imgWidth, dh=imgHeight, palette=\"Spectral11\")",
"def _MelSpectrogram(self, signal):\n p = self.params\n # FFT.\n real_frequency_spectrogram = tf.signal.rfft(signal, [self._fft_size])\n magnitude_spectrogram = tf.abs(real_frequency_spectrogram)\n if p.compute_energy:\n magnitude_spectrogram = tf.square(magnitude_spectrogram)\n\n # Shape of magnitude_spectrogram is num_frames x (fft_size/2+1)\n # Mel_weight is [num_spectrogram_bins, num_mel_bins]\n mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins=p.num_bins,\n num_spectrogram_bins=self._fft_size // 2 + 1,\n sample_rate=p.sample_rate,\n lower_edge_hertz=p.lower_edge_hertz,\n upper_edge_hertz=p.upper_edge_hertz,\n dtype=tf.float32)\n # Weight matrix implemented in the magnitude domain.\n batch_size, num_frames, fft_channels = py_utils.GetShape(\n magnitude_spectrogram, 3)\n mel_spectrogram = tf.matmul(\n tf.reshape(magnitude_spectrogram,\n [batch_size * num_frames, fft_channels]), mel_weight_matrix)\n mel_spectrogram = tf.reshape(mel_spectrogram,\n [batch_size, num_frames, p.num_bins])\n\n return mel_spectrogram",
"def spectrogram(files, adv_ms, len_ms, specfmt=\"dB\", mel_filters_N=12):\n\n # If not a list, make it so number one...\n if not isinstance(files, list):\n files = [files]\n\n # Set up frame stream and pass to DFT streamer\n framestream = MultiFileAudioFrames(files, adv_ms, len_ms)\n dftstream = DFTStream(framestream, specfmt=specfmt, mels_N=mel_filters_N)\n\n # Grab the spectra\n spectra = []\n for s in dftstream:\n spectra.append(s)\n\n # Convert to matrix\n spectra = np.asarray(spectra)\n\n # Time axis in s\n adv_s = framestream.get_frameadv_ms() / 1000\n t = [s * adv_s for s in range(spectra.shape[0])]\n\n return [spectra, t, dftstream.get_Hz()]",
"def audio_file_to_mel_spectrogram(filepath, num_bins, hop_length):\n # Read in audio file\n x, sr = librosa.load(filepath, sr=22050)\n\n # Get image window (aka image length)\n window = create_image_window(x, hop_length)\n\n # fourier transform\n stft = np.abs(librosa.stft(x, n_fft=2048, hop_length=hop_length))\n\n # Get Mel Spectrogram Features\n mel_spect = librosa.feature.melspectrogram(y=window, sr=sr,\n S=stft ** 2,\n n_fft=hop_length*2,\n n_mels=num_bins,\n hop_length=hop_length)\n # Convert to Db\n mel_spect = librosa.power_to_db(mel_spect, ref=np.max)\n\n return mel_spect"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate the spectrogram of an imitation located at path. Code written by Bongjun Kim.
|
def imitation_spectrogram(path, augmentations: audaugio.ChainBase):
try:
y, sr = librosa.load(path, sr=16000)
except audioop.error as e:
logger = logging.getLogger('logger')
logger.warning("Could not load {0}\n{1}".format(path, e))
return None
augmented_audio = augmentations(y, sr)
spectrograms = []
for audio in augmented_audio:
# zero-padding
if audio.shape[0] < 4 * sr:
pad = np.zeros((4 * sr - audio.shape[0]))
y_fix = np.append(audio, pad)
else:
y_fix = audio[0:int(4 * sr)]
s = librosa.feature.melspectrogram(y=y_fix, sr=sr, n_fft=133,
hop_length=133, power=2, n_mels=39,
fmin=0.0, fmax=5000)
s = s[:, :482]
s = librosa.power_to_db(s, ref=np.max)
spectrograms.append(s)
return spectrograms
|
[
"def create_spectrogram(self, audio_path):\n audio_name = audio_path.split(\"/\")[-1].replace(\".wav\", \"\")\n fs, w = wavfile.read(audio_path)\n if len(w.shape) == 2:\n w = w[:, 0]\n dur = len(w) / fs\n\n cmap = plt.cm.get_cmap('Greys')\n cmap.set_under('w')\n f, t, sxx = scipy.signal.spectrogram(w, fs=fs, window='hann', nperseg=int(fs / 12.32),\n noverlap=int(self.overlap * (fs / 12.32)), mode='psd', nfft=16000)\n sxx_db = 10 * np.log10(abs(sxx[:1500, :]) / 2 * 10e-5)\n\n dpi = 50\n fig = plt.figure(figsize=(dur * self.sec_size // dpi, self.sec_size * 2 // dpi), dpi=dpi, frameon=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n extent = (0, dur * self.sec_size // dpi, 0, self.sec_size * 2 // dpi)\n plt.imshow(sxx_db[::-1, :], cmap=cmap, extent=extent, norm=mpl.colors.Normalize(vmin=-50, vmax=0, clip=False))\n plt.savefig(osp.join(self.out_path, '%s.jpeg' % audio_name), dpi=dpi, frameon=False)\n\n # Resize saved image in case of bad matplotlib result\n img = imread(osp.join(self.out_path, '%s.jpeg' % audio_name))\n img = resize(img, (dur * self.sec_size, self.sec_size * 2)[::-1])\n imsave(osp.join(self.out_path, '%s.jpeg' % audio_name), img)",
"def get_spectrogram_data(frame_rate, np_frames):\n # Set format details for plot.\n #fig = plt.figure(num=None, figsize=(12, 7.5), dpi=300)\n #ax = fig.add_subplot(111)\n #ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n #ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))\n #ax.yaxis.set_major_locator(ticker.MultipleLocator(2000))\n #ax.yaxis.set_minor_locator(ticker.MultipleLocator(500))\n #ax.tick_params(axis='both', direction='inout')\n #plt.title(f\"Spectrogram of:\\n{input_file}\")\n plt.title(f\"Spectrogram\")\n plt.xlabel('Time (seconds)')\n plt.ylabel('Frequency (Hz)')\n\n # If NFFT is too high, then there the horizontal (frequency) resolution is\n # too fine, and there are multiple bands for each formant. However, if\n # NFFT is too low, then the whole image is rather blurry and even the\n # formants are not well differentiated (i.e. at the default vaules for NFFT\n # and noverlap). noverlap that is half of NFFT seems to minimize background\n # noise, as well.\n noverlap = 128 # default: 128; other: 256\n NFFT = 256 # default: 256; other: 512\n\n # Create the plot.\n spectrum, frequencies, times, img = plt.specgram(\n np_frames,\n Fs=frame_rate,\n cmap='gnuplot',\n noverlap=noverlap,\n NFFT=NFFT,\n )\n return spectrum, frequencies, times, img",
"def spectrogram(samples):\n S, freqs, times = mlab.specgram(samples, NFFT=4096, Fs=44100,\n window=mlab.window_hanning,\n noverlap=(4096 // 2))\n return S, freqs, times",
"def compute_spectrogram(self):\n f, t, sxx = ss.spectrogram(self.audio_array, fs=self.sample_freq, window='hann', nfft=NFFT, nperseg=NFFT,\n noverlap=NFFT * .5)\n sxx = 10*np.log10(sxx) # decibels\n sxx[np.where(np.isnan(sxx) | np.isinf(sxx))] = 0 # if somehow the sxx was negative, we put the value to 0\n self.spectrogram = f, t, sxx\n return self.spectrogram",
"def calculate_spectrogram(framerate, data, chunk_len, stride, window_func=np.hamming):\n\tnum_chunks = int((len(data)-chunk_len)/float(stride))+1\n\twindow = window_func(chunk_len)\n\tchunks = [data[i*stride:i*stride+chunk_len] for i in range(num_chunks)]\n\twindowed_chunks = [window*chunk for chunk in chunks]\n\t# fourier transform each chunk, get abs magnitude\n\tspectra = np.array([np.abs(np.fft.fft(chunk)) for chunk in windowed_chunks])\n\treturn spectra",
"def reference_spectrogram(path, augmentations: audaugio.ChainBase):\n try:\n y, sr = librosa.load(path, sr=44100)\n except audioop.error as e:\n logger = logging.getLogger('logger')\n logger.warning(\"Could not load {0}\\n{1}\".format(path, e))\n return None\n\n augmented_audio = augmentations(y, sr)\n\n spectrograms = []\n for audio in augmented_audio:\n if audio.shape[0] < 4 * sr:\n pad = np.zeros((4 * sr - audio.shape[0]))\n y_fix = np.append(audio, pad)\n else:\n y_fix = audio[0:int(4 * sr)]\n s = librosa.feature.melspectrogram(y=y_fix, sr=sr, n_fft=1024, hop_length=1024, power=2)\n s = librosa.power_to_db(s, ref=np.max)\n s = s[:, 0:128]\n spectrograms.append(s)\n return spectrograms",
"def create_spectogram1(track_id):\n filename = get_path(track_id)\n y, sr = librosa.load(filename)\n spectrogram = librosa.feature.melspectrogram(y = y, sr = sr, n_fft = 2048, hop_length = 1024)\n spectrogram = librosa.power_to_db(spectrogram, ref = np.max)\n return spectrogram[:, 473:601]",
"def plotSpectrogram(self):\n\n\t\t#max freq represetnedf (nyquist constrained)\n\t\timgHeight = self.sampleRate/2\n\n\t\tself.p.y_range.end = imgHeight * self.numChannels\n\n\t\timgWidth = self.signalDuration\n\t\tself.p.x_range.end = imgWidth\n\n\t\tfor channelNum in self.activeChannels:\n\t\t\tchannelSignal = self.signal[:,channelNum]\n\n\t\t\tfreqs,times,data = self.log_specgram(channelSignal,self.sampleRate)\n\n\t\t\tself.p.image(image=[data], x=0, y=imgHeight*channelNum, dw=imgWidth, dh=imgHeight, palette=\"Spectral11\")",
"def create_spectrogram(voice_sample):\n\n in_fpath = Path(voice_sample.replace('\"', \"\").replace(\"'\", \"\"))\n original_wav, sampling_rate = librosa.load(str(in_fpath))\n\n # Plot the signal read from wav file\n fig = plt.figure()\n #plt.subplot(111)\n plt.title(f\"Spectrogram of file {voice_sample}\")\n\n plt.plot(original_wav)\n plt.xlabel(\"Sample\")\n plt.ylabel(\"Amplitude\")\n\n # plt.subplot(212)\n # plt.specgram(original_wav, Fs=sampling_rate)\n # plt.xlabel(\"Time\")\n # plt.ylabel(\"Frequency\")\n # # plt.savefig(voice_sample.split(\".\")[0] + \"_spectogram.png\")\n return fig",
"def plot_spectrogram(self):\n f, t, sxx = self.compute_spectrogram()\n plt.pcolormesh(t, f, sxx)\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time (sec)')\n plt.show()",
"def get_spectrogram_slice(self, seg, start_time, end_time):\n seg_uname = segment_to_unique_name(seg)\n seg_spec = self.segment_specs[seg_uname]\n stim_spec_t,stim_spec_freq,stim_spec = seg_spec.get_spectrogram(float(start_time), float(end_time))\n\n return stim_spec_t,stim_spec_freq,stim_spec",
"def mel_spectogram(aud):\n mel = librosa.feature.melspectrogram(aud,\n sr=SAMPLING_RATE,\n n_fft=N_FFT,\n hop_length=H_L,\n n_mels=MEL_CHANNELS)\n # mel = np.log(mel + 1e-5)\n return mel",
"def _MelSpectrogram(self, signal):\n p = self.params\n # FFT.\n real_frequency_spectrogram = tf.signal.rfft(signal, [self._fft_size])\n magnitude_spectrogram = tf.abs(real_frequency_spectrogram)\n if p.compute_energy:\n magnitude_spectrogram = tf.square(magnitude_spectrogram)\n\n # Shape of magnitude_spectrogram is num_frames x (fft_size/2+1)\n # Mel_weight is [num_spectrogram_bins, num_mel_bins]\n mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins=p.num_bins,\n num_spectrogram_bins=self._fft_size // 2 + 1,\n sample_rate=p.sample_rate,\n lower_edge_hertz=p.lower_edge_hertz,\n upper_edge_hertz=p.upper_edge_hertz,\n dtype=tf.float32)\n # Weight matrix implemented in the magnitude domain.\n batch_size, num_frames, fft_channels = py_utils.GetShape(\n magnitude_spectrogram, 3)\n mel_spectrogram = tf.matmul(\n tf.reshape(magnitude_spectrogram,\n [batch_size * num_frames, fft_channels]), mel_weight_matrix)\n mel_spectrogram = tf.reshape(mel_spectrogram,\n [batch_size, num_frames, p.num_bins])\n\n return mel_spectrogram",
"def specimg(Fs, signal, tone, nfft, noverlap, wbins, complex=False):\n nperseg = nfft if nfft < 256 or noverlap >= 256 else 256\n f, t, Sxx = spectrogram(signal, Fs, nfft=nfft, noverlap=noverlap, nperseg=nperseg, scaling='density')\n fbin = (tone/(Fs/2))* (len(f)-1)\n if complex:\n fbin /= 2\n center_bin = int(round(fbin))\n return f[center_bin-wbins:center_bin+wbins+1], t, Sxx[center_bin-wbins:center_bin+wbins+1,:]",
"def get_Spectrogram(audio, Fs, fft_size=200, step_size=1, spec_thresh=2.5, window_type=0):\n return abs(move_spec(\n aid_spectrogram(audio.astype('float64'), log=True, thresh=spec_thresh, fft_size=fft_size, step_size=step_size,\n window_type=0))) / 2.5",
"def save_spectrogram_tisv():\n print(\"start text independent utterance feature extraction\")\n os.makedirs(hp.data.train_path, exist_ok=True) # make folder to save train file\n os.makedirs(hp.data.test_path, exist_ok=True) # make folder to save test file\n\n utter_min_len = (hp.data.tisv_frame * hp.data.hop + hp.data.window) * hp.data.sr # lower bound of utterance length\n total_speaker_num = len(audio_path)\n train_speaker_num= (total_speaker_num//10)*9 # split total data 90% train and 10% test\n print(\"total speaker number : %d\"%total_speaker_num)\n print(\"train : %d, test : %d\"%(train_speaker_num, total_speaker_num-train_speaker_num))\n for i, folder in enumerate(audio_path):\n print(\"%dth speaker processing...\"%i)\n utterances_spec = []\n for utter_name in os.listdir(folder):\n if utter_name[-4:] == '.wav':\n utter_path = os.path.join(folder, utter_name) # path of each utterance\n times, segs = VAD_chunk(2, utter_path)\n #print(\"+++++++++++++++++++++++++++++\", len(segs))\n for i, seg in enumerate(segs):\n if (times[i][1]-times[i][0]) > 0.2: # If partial utterance is sufficient long,\n #utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.\n S = librosa.core.stft(y=seg, n_fft=hp.data.nfft,\n win_length=int(hp.data.window * hp.data.sr), hop_length=int(hp.data.hop * hp.data.sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances\n #samples = random.sample(S, 3 * hp.data.tisv_frame)\n #first = samples[]\n print(\"************************\", S.shape)\n #if(len(S) < 360):\n # print(\"less than 360\", len(S))\n # continue\n for i in range(0, S.shape[1] - hp.data.tisv_frame, hp.data.tisv_frame):\n #print(\"Appending of shape\", S[:, i * hp.data.tisv_frame: (i + 1) * hp.data.tisv_frame].shape)\n utterances_spec.append(S[:, i * hp.data.tisv_frame: (i + 1) * hp.data.tisv_frame ])\n #utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance\n #utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance\n #print(\"Shape of S\", S[-2].shape, S[-1].shape)\n #concat_seg, concat_times = concat_segs(times, segs)\n #STFT_frames, STFT_times = get_STFTs(concat_seg, concat_times)\n #STFT_frames = np.stack(STFT_frames, axis=2)\n #STFT_frames = np.transpose(STFT_frames, axes=(2,1,0))\n\n #utter, sr = librosa.core.load(utter_path, hp.data.sr) # load utterance audio\n #intervals = librosa.effects.split(utter, top_db=30) # voice activity detection \n # this works fine for timit but if you get array of shape 0 for any other audio change value of top_db\n # for vctk dataset use top_db=100\n \"\"\"for interval in intervals:\n if (interval[1]-interval[0]) > utter_min_len: # If partial utterance is sufficient long,\n utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.\n S = librosa.core.stft(y=utter_part, n_fft=hp.data.nfft,\n win_length=int(hp.data.window * sr), hop_length=int(hp.data.hop * sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances\n utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance\n utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance\n \"\"\"\n utterances_spec = np.array(utterances_spec)\n print(\"utterances_spec\", utterances_spec.shape)\n if(utterances_spec.shape[0] == 0):\n continue\n #print(utterances_spec.shape)\n if i<train_speaker_num: # save spectrogram as numpy file\n np.save(os.path.join(hp.data.train_path, \"speaker%d.npy\"%i), utterances_spec)\n else:\n np.save(os.path.join(hp.data.test_path, \"speaker%d.npy\"%(i-train_speaker_num)), utterances_spec)",
"def handle_create_spectrograms(state):\n states = []\n\n if state == 'ALL':\n states = ['FOCUSED', 'UNFOCUSED', 'DROWSY']\n else:\n states = [state]\n\n # need to check if state-data directory exists in path\n if not os.path.isdir(STATE_DATA_OUTPUT):\n print('Error: Directory \\'{0}\\' with raw input data doesnt exists!'.format(STATE_DATA_OUTPUT))\n exit(1)\n\n # iterate through states that we need to generate spectrogram images for\n for curr_state in states:\n output_root = os.path.join(CWD, curr_state)\n\n create_output_directory(output_root)\n\n path_to_search = os.path.join(STATE_DATA_OUTPUT, '**', curr_state)\n state_data_files = glob.glob(path_to_search, recursive=True)\n\n for filename in state_data_files:\n output_subpath = filename.replace(STATE_DATA_OUTPUT, '')\n output_subpath = output_subpath.replace(curr_state, '')\n output_filepath = '{0}{1}'.format(output_root, output_subpath)\n\n os.makedirs(output_filepath)\n\n # need to get data from file\n data = load_raw_state_data(filename)\n\n output_image = os.path.join(output_filepath, curr_state)\n\n # 128, 256, 10mins, ./FOCUSED/eeg_record7/10/FOCUSED\n interate_data(FREQUENCY, M, data, output_image)",
"def spectrogram(intensity, taxis, faxis):\r\n\r\n fig, ax = plt.subplots()\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Frequency (Hz)')\r\n k = ax.pcolormesh(taxis, faxis, intensity)\r\n c = plt.colorbar(k)\r\n c.set_label('Intensity (dB rel.)')\r\n plt.show()",
"def spectrograph(self):\n from pfs.utils.fibers import spectrographFromFiberId\n return spectrographFromFiberId(self.fiberId)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Output relative mouse move position based on angles and accelerations.
|
def handle_mouse_move(self, angles, acc):
if angles['theta'] > THETA_TRESHOLD and acc['a_x'] < -ACC_TRESHOLD:
print('move left')
pyautogui.moveRel(-10, 0)
elif angles['theta'] > THETA_TRESHOLD and acc['a_x'] > ACC_TRESHOLD:
print('move right')
pyautogui.moveRel(10, 0)
elif angles['theta'] > THETA_TRESHOLD and acc['a_y'] < -ACC_TRESHOLD:
print('move up')
pyautogui.moveRel(0, -10)
elif angles['theta'] > THETA_TRESHOLD and acc['a_y'] > 200:
print('move down')
pyautogui.moveRel(0, 10)
|
[
"def get_mouse_position(self):\r\n\t\treturn -Vector.origin[0] + pygame.mouse.get_pos()[0], \\\r\n\t\tVector.origin[1] - pygame.mouse.get_pos()[1]",
"def mousepos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]",
"def display_mouse_position(self,xOffset=0, yOffset=0):\n print('Press Ctrl-C to quit.')\n if xOffset != 0 or yOffset != 0:\n print('xOffset: %s yOffset: %s' % (xOffset, yOffset))\n resolution = self.size()\n try:\n while True:\n # Get and print the mouse coordinates.\n x, y = self.position()\n positionStr = 'X: ' + str(x - xOffset).rjust(4) + ' Y: ' + str(y - yOffset).rjust(4)\n if (x - xOffset) < 0 or (y - yOffset) < 0 or (x - xOffset) >= resolution[0] or (y - yOffset) >= \\\n resolution[1]:\n pixelColor = ('NaN', 'NaN', 'NaN')\n else:\n pixelColor = screenshot().getpixel((x, y))\n positionStr += ' RGB: (' + str(pixelColor[0]).rjust(3)\n positionStr += ', ' + str(pixelColor[1]).rjust(3)\n positionStr += ', ' + str(pixelColor[2]).rjust(3) + ')'\n sys.stdout.write(positionStr)\n sys.stdout.write('\\b' * len(positionStr))\n sys.stdout.flush()\n except KeyboardInterrupt:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def print_angles(self):\n print('The shoulder is at position:', self.shoulder.position)\n print('The Tilt is at position:', self.tilt.position)\n print('The Pan is at position:', self.pan.position)",
"def mouse_move(self, x_float, y_float):\n x = int(x_float * self.screen_x)\n y = int(y_float * self.screen_y)\n self.last_x = x\n self.last_y = y\n\n win32api.SetCursorPos((x, y))",
"def on_mouse_motion(self, x, y):\n\tself.mousePos = (x,y)",
"def mouse_controller():\n mouse = Controller()\n for k in range(250,350):\n mouse.position = (k,k+50) # For the moving effect",
"def mouse_position():\n pos = pygame.mouse.get_pos()\n return pygame.math.Vector2(pos[0], pos[1])",
"def track_mouse():\n for i in range(1999):\n time.sleep(.1)\n x,y = bot.position()\n print(str(x) + \", \" + str(y))",
"def get_angle(self):\n mx, my = self.mouse.get_pos()\n angle = math.degrees(math.atan2((mx - C_X), (my - C_Y)))\n if angle < 0:\n angle += 360\n return int(angle)",
"def rotate(self, player_delta_vector_m: Vec3):\n\n base.camera.setH(-controls.mouse_pos['x']\n * controls.MOUSE_SENSITIVITY_DEG)\n\n controls.limit_mouse_pos(self.__MIN_PITCH_DEG, self.__max_pitch_deg)\n\n if controls.mouse_pos['y'] * controls.MOUSE_SENSITIVITY_DEG \\\n < self.__MIN_PITCH_DEG:\n base.camera.setP(self.__MIN_PITCH_DEG)\n\n elif controls.mouse_pos['y'] * controls.MOUSE_SENSITIVITY_DEG \\\n > self.__max_pitch_deg:\n base.camera.setP(self.__max_pitch_deg)\n\n else:\n base.camera.setP(controls.mouse_pos['y']\n * controls.MOUSE_SENSITIVITY_DEG)\n\n camera_yaw_deg = base.camera.getH()\n camera_yaw_rad = math.radians(camera_yaw_deg)\n camera_yaw_quarter = rotation.get_angle_quarter(camera_yaw_deg)\n\n pos_x_of_x_offset_m = 0\n pos_y_of_x_offset_m = 0\n\n if camera_yaw_quarter == 1 or 2:\n pos_x_of_x_offset_m = math.cos(camera_yaw_rad) \\\n * self.__relative_offset_m.getX()\n\n pos_y_of_x_offset_m = math.sin(camera_yaw_rad) \\\n * self.__relative_offset_m.getX()\n\n elif camera_yaw_quarter == 3:\n angle_rad = math.radians(camera_yaw_deg - 90)\n\n pos_x_of_x_offset_m = -math.sin(angle_rad) \\\n * self.__relative_offset_m.getX()\n\n pos_y_of_x_offset_m = math.cos(angle_rad) \\\n * self.__relative_offset_m.getX()\n\n elif camera_yaw_quarter == 4:\n angle_rad = math.radians(camera_yaw_deg - 180)\n\n pos_x_of_x_offset_m = -math.cos(angle_rad) \\\n * self.__relative_offset_m.getX()\n\n pos_y_of_x_offset_m = -math.sin(angle_rad) \\\n * self.__relative_offset_m.getX()\n\n camera_pitch_rad = math.radians(base.camera.getP())\n\n # Pitch cosinus - looking down.\n x = player_delta_vector_m.getX() + pos_x_of_x_offset_m \\\n + (math.sin(camera_yaw_rad) * self.__relative_offset_m.getY()\n * math.cos(camera_pitch_rad))\n\n y = player_delta_vector_m.getY() + pos_y_of_x_offset_m \\\n + (math.cos(camera_yaw_rad) * -self.__relative_offset_m.getY()\n * math.cos(camera_pitch_rad))\n\n z = player_delta_vector_m.getZ() + self.__relative_offset_m.getZ() \\\n + (math.sin(-camera_pitch_rad) * self.__relative_offset_m.getY())\n\n base.camera.setPos(x, y, z)",
"def print_position(self) -> None:\n self.hkl_now = list(self.calculate_hkl_from_angles())\n self.pseudo_dict_to_update = self.get_pseudo_angles_from_motor_angles()\n print(\"\")\n print(\n \"HKL now = \",\n format_5_decimals(self.hkl_now[0]),\n format_5_decimals(self.hkl_now[1]),\n format_5_decimals(self.hkl_now[2]),\n )\n print(\"\")\n print(\n \"Alpha = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"alpha\"])\n )\n )\n print(\n \"Beta = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"beta\"])\n )\n )\n print(\n \"Psi = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"psi\"])\n )\n )\n print(\n \"Tau = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"tau\"])\n )\n )\n print(\n \"Qaz = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"qaz\"])\n )\n )\n print(\n \"Naz = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"naz\"])\n )\n )\n print(\n \"Omega = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"omega\"])\n )\n )\n print(\"\")\n print(\n \"Del = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"del\"][\"value\"])\n )\n )\n print(\n \"Eta = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"eta\"][\"value\"])\n )\n )\n print(\n \"Chi = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"chi\"][\"value\"])\n )\n )\n print(\n \"Phi = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"phi\"][\"value\"])\n )\n )\n print(\n \"Nu = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"nu\"][\"value\"])\n )\n )\n print(\n \"Mu = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"mu\"][\"value\"])\n )\n )\n print(\"\")",
"def _update_position(self, (x, y), (q, z), angle):\n self._current_position = (x, y)\n self._current_direction = (q, z)\n self._current_angle = angle\n self._senderPi_position.sent_position(x, y)\n self._senderPi_direction.sent_direction(self.get_angle())\n self._current_angle = (angle * 180.0) / pi\n self.add_to_console(\"[ \" + str(datetime.now().time())[:11] + \" ] \"\n + \"Current position: \" + str(self.get_position()))",
"def getMouseLocation(self):\n return Point(self._x, self._y)",
"def getMousePos(self):\n\n return pygame.mouse.get_pos()",
"def getPos(self):\n\t\treturn self.__robot.x(), self.__robot.y(), self.__robot.a()",
"def move_cursor(self, y, x):\n print(self.term.move(y, x))",
"def _rotate(self):\n mousepos = pygame.mouse.get_pos()\n dx = self.rect.centerx - mousepos[0]\n dy = self.rect.centery - mousepos[1]\n # Note: dy != 0 since we divide by it; the > N is to prevent erratic\n # wobbling when the mouse is near the center.\n if abs(dx) > 2 and abs(dy) > 2:\n tangent = float(dx) / float(dy)\n angle = math.degrees(math.atan(tangent))\n if (dy < 0):\n angle = 180.0 + angle\n self.angle = angle",
"def mouse_motion(self, canvas_x, canvas_y):\r\n ###if self.doing_mouse_motion:\r\n ### return # Block multiple calls\r\n pc = CanvasCoords(self.sc, canvas_x=canvas_x, canvas_y=canvas_y)\r\n pc_lat = pc.lat\r\n pc_long = pc.long\r\n self.doing_mouse_motion = True\r\n if self.in_point is not None and self.in_point_is_down:\r\n SlTrace.lg(f\"mouse_motion(in_point): canvas_x,y={canvas_x:.0f}, {canvas_y:.0f}\", \"mouse_motion\")\r\n point = self.in_point\r\n if self.in_point_start is not None:\r\n lat_start = self.in_point_start[0]\r\n long_start = self.in_point_start[1]\r\n lat_delta = pc_lat-lat_start\r\n long_delta = pc_long-long_start\r\n lat_new = lat_start + lat_delta\r\n long_new = long_start + long_delta\r\n SlTrace.lg(f\" lat,long_start: {lat_start:.6f}, {long_start:.6f}\"\r\n f\" lat_delta:{lat_delta:.6f}, {long_delta:.6f} lat,long_new:{lat_new:.6f},{long_new:.6f}\", \"mouse_motion\")\r\n point.move(lat=lat_new, long=long_new)\r\n else:\r\n if self.track_sc:\r\n self.sc_track_update(canvas_x, canvas_y)\r\n ###self.doing_mouse_motion = False\r"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the pathname of the latest ragel trie.
|
def LatestRagelTriePath(top_level_dir, bitness):
if bitness not in (32, 64):
raise AssertionError('invalid bitness: ', bitness)
ragel_dirs = {32: 'ragel_trie_x86_32', 64: 'ragel_trie_x86_64'}
tries = os.listdir(os.path.join(top_level_dir, ragel_dirs[bitness]))
if not tries:
raise AssertionError('no tries found: ', top_level_dir, bitness)
# For now, we assume that all of the tries start with the date,
# so we pick the maximum.
return os.path.join(top_level_dir, ragel_dirs[bitness], max(tries))
|
[
"def lastpath(self):\n if self._lastpath is None:\n return \"\"\n maplist = self.mapstr[:].split()\n for xval, yval, tree in self._lastpath:\n maplist[xval] = f\"{maplist[xval][:yval]}{tree}{maplist[xval][yval + 1:]}\"\n\n return \"\\n\".join(maplist)",
"def _lookup_last_dir():\n return _load_pickle().get('last_directory', '')",
"def get_latest_symlink_path(repodir):\n return os.path.join(repodir, LATESTREPO)",
"def path_tail(path: str) -> str:\n return os.path.split(path)[0]",
"def getRucioPath(self, scope, lfn, prefix='rucio'):\n\n # <prefix=rucio>/<scope>/md5(<scope>:<lfn>)[0:2]/md5(<scope:lfn>)[2:4]/<lfn>\n\n hash_hex = hashlib.md5('%s:%s' % (scope, lfn)).hexdigest()\n\n paths = [prefix] + scope.split('.') + [hash_hex[0:2], hash_hex[2:4], lfn]\n paths = filter(None, paths) # remove empty parts to avoid double /-chars\n return '/'.join(paths)\n\n #scope = os.path.join(*scope.split('.')) # correct scope\n #return os.path.join(prefix, scope, hash_hex[0:2], hash_hex[2:4], lfn)",
"def get_path_ref_high(path=None):\n if path == None:\n path = my.pwd()\n\n #print \"2\",path\n ### check ob wir in ti sind\n get.from_path_string_job_is(path=path,job=\"ti\")\n\n #print \"3 \"\n ## get sc for refpath\n sc = get.from_path_string_details_supercell(path=path)\n\n\n ## get /home/glensk/v/PAW_PBE/Al/ti_divak_fcc4 oder so, der pfad in welchem\n pathout = get.get_path_job_type_cell(path=path)\n\n #print \"4\",path\n ref = my.checkdir(pathout+\"/ref_high_\"+sc,create=True)\n return ref",
"def get_tree_path(h_tree, item_id):\n path_names = get_tree_path_names(h_tree, item_id)\n path_names.reverse()\n return '/'.join(path_names)",
"def get_path(self) -> str:\n return f'{self.parent.path}.{self.key}' if self.parent else self.key",
"def find_best_path(lattice):\n # FIXME *** IMPLEMENT ME ***\n\n best_path = []\n start = lattice[len(lattice) - 1]['<s>'][1]\n best_path.append(start)\n count = 0\n for num in range(len(lattice) - 2, 1, -1):\n best_path.insert(0,lattice[num][best_path[count]][1])\n count += 1\n \n return best_path",
"def get_path(self, path):\n return path[len(self.base)+2:]",
"def _dir_revhash(gid: int) -> str:\n dir_hash = list(reversed(str(gid)))\n dir_hash.pop()\n return path.join(*dir_hash) if dir_hash else path.curdir",
"def find_latest_checkpoint(self) -> str:\n files = glob.glob(os.path.join(self.storage_dir, \"*_state.pth\"))\n latest_file_path = max(files, key=os.path.getctime)\n latest_file_path = os.path.join(self.storage_dir, latest_file_path)\n return latest_file_path",
"def get_raxml_phylippath(dir):\n nick = get_msa_nickname(dir)\n return dir + \"/\" + ap.params[\"geneid\"] + SEP + nick + SEP + \"raxml\" + SEP + \"phylip\"",
"def fullpath(self):\n top = self\n path = []\n while top:\n path.insert(0, str(top))\n top = top.directory\n return dpath(os.path.join(*path), **self.connection)",
"def hash(self):\r\n return self.relpath",
"def get_last_checkpoint():\n checkpoint_dir = get_checkpoint_dir()\n checkpoints = [f for f in pathmgr.ls(checkpoint_dir) if _NAME_PREFIX in f]\n last_checkpoint_name = sorted(checkpoints)[-1]\n return os.path.join(checkpoint_dir, last_checkpoint_name)",
"def last_model_path(self) -> str:\n m = self.last_model()\n if m < 1:\n return None\n path = os.path.join(self.model_dir, str(m), MODEL_NAME)\n if not os.path.exists(path):\n self.clear()\n return None\n return path",
"def list_longest_path(node: Union[BTNode, None]) -> list:",
"def get_lava_job_id():\n lava_id = []\n list_dir = [f for f in os.listdir('/') if re.match(r'lava',f)]\n for d in list_dir:\n print('[DEBUG] lava id: %s' % d)\n lava_id.append(os.path.join('/', d))\n cur_lava_id = max(lava_id, key=os.path.getmtime).replace('/lava-', '')\n print('[DEBUG] Current lava id: %s' % cur_lava_id)\n return cur_lava_id"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A horizontal dual spin box widget designed for the XY size of a maze.
|
def __init__(
self,
minimum: XY,
maximum: XY,
initialValue: XY,
parent: Optional[QWidget] = None,
label: str = "by",
*args: Tuple[Any, Any],
**kwargs: Tuple[Any, Any],
) -> None:
valid = True
if (initialValue.x < minimum.x) or (initialValue.y > maximum.y):
valid = False
elif (initialValue.x < minimum.x) or (initialValue.y > maximum.y):
valid = False
if not valid:
raise ValueError(
f"Initial value for XYPicker must be between {minimum} and {maximum}."
)
super(XYPicker, self).__init__(parent=parent, *args, **kwargs)
self.setContentsMargins(0, 0, 0, 0)
mazeSizePickerLayout = QHBoxLayout()
mazeSizePickerLayout.setContentsMargins(0, 0, 0, 0)
self.__xSpinBox = QSpinBox()
self.__ySpinBox = QSpinBox()
self.__xSpinBox.setMinimum(minimum.x)
self.__ySpinBox.setMinimum(minimum.y)
self.__xSpinBox.setMaximum(maximum.x)
self.__ySpinBox.setMaximum(maximum.y)
self.__xSpinBox.setValue(initialValue.x)
self.__ySpinBox.setValue(initialValue.y)
mazeSizePickerLayout.addWidget(self.__xSpinBox)
mazeSizePickerLayout.addWidget(QLabel(label))
mazeSizePickerLayout.addWidget(self.__ySpinBox)
self.setLayout(mazeSizePickerLayout)
|
[
"def inflatebox(factor, lft, bot, rt, top):\n midx = (rt + lft)/2\n halfwidth = factor*(rt - lft)/2\n midy = (top + bot)/2\n halfheight = factor*(top - bot)/2\n return midx - halfwidth, midy - halfheight, \\\n midx + halfwidth, midy + halfheight",
"def draw_box(self, boxsize):\n self.go_to(Point(-boxsize, -boxsize))\n self.draw_square(boxsize * 2)\n self.go_home()",
"def box_type_1(self, X, Y, name, ident, box_width, box_height):\r\n boxW2 = box_width / 2\r\n boxH2 = box_height / 2\r\n x0, y0 = X - boxW2, Y - boxH2 # Top_left of box\r\n x1, y1 = X + boxW2, Y + boxH2 # Bottom_right of box\r\n width = x1 - x0\r\n height = y1 - y0\r\n \r\n box = gui.SvgRectangle(x0, y0, width, height)\r\n box.set_stroke(width=2, color='black')\r\n box.set_fill(color='yellow')\r\n box_name = gui.SvgText(X, Y, name)\r\n box_name.attributes['text-anchor'] = 'middle'\r\n box_id = gui.SvgText(X, Y + 15, str(ident))\r\n box_id.attributes['text-anchor'] = 'middle'\r\n self.sheet.append([box, box_name, box_id])\r\n\r\n mid_north = [X, Y - boxH2]\r\n mid_south = [X, Y + boxH2]\r\n mid_east = [X + boxW2, Y]\r\n mid_west = [X - boxW2, Y]\r\n\r\n return mid_north, mid_south, mid_east, mid_west",
"def setMinBoxArea(self, value) -> None:\n ...",
"def _define_size(self):\n rect = wx.Rect(0, 0, 0, 0)\n self.rect.height = self.PADDING\n for line in self.lines:\n # iterate through each line and set the TextBox around the line with\n # the largest width and height\n if line.rect.width > rect.width:\n rect.width = line.rect.width + self.PADDING\n if line.rect.height > rect.height:\n rect.height = line.rect.height\n # grow our TextBox to fit the next line of Text include Padding\n self.rect.height += line.rect.height\n self.rect.height += self.PADDING\n self.rect.width = rect.width",
"def __init__(self, ax, name, label, valmin, valmax, valinit=0.5, width=1, valfmt='%1.2f', \n closedmin=True, closedmax=True, slidermin=None,\n slidermax=None, dragging=True, **kwargs):\n AxesWidget.__init__(self, ax)\n\n self.valmin = valmin\n self.valmax = valmax\n self.val = valinit\n self.valinit = valinit\n self.ax = ax\n self.width = width\n self.poly = ax.axvspan(valmax-self.width,valmax, 0, 1, **kwargs)\n self.name = name\n #axhspan\n #self.vline = ax.axvline(valinit, 0, 1, color='r', lw=1)\n\n self.valfmt = valfmt\n ax.set_yticks([])\n ax.set_xlim((valmin, valmax))\n ax.set_xticks([])\n ax.set_navigate(False)\n\n self.connect_event('button_press_event', self._update)\n self.connect_event('button_release_event', self._update)\n if dragging:\n self.connect_event('motion_notify_event', self._update)\n self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,\n verticalalignment='center',\n horizontalalignment='right')\n\n self.valtext = ax.text(1.02, 0.5, valfmt % valinit,\n transform=ax.transAxes,\n verticalalignment='center',\n horizontalalignment='left')\n\n self.cnt = 0\n self.observers = {}\n\n self.closedmin = closedmin\n self.closedmax = closedmax\n self.slidermin = slidermin\n self.slidermax = slidermax\n self.drag_active = False",
"def size_calc(self):\n #rospy.loginfo(\"box_size: {}\".format(self.box_size))\n width = self.flag_box[1][0] - self.flag_box[0][0]\n height = self.flag_box[1][1] - self.flag_box[0][1]\n # self.box_size = width*height\n #print(\"AREA\", width*height)\n box_area = width*height\n if box_area <= 320 and box_area >= 250:\n self.count += 1\n else:\n self.count == 0\n print(\"COUNT\", self.count)\n self.box_x = (self.flag_box[0][0]+self.flag_box[1][0])/2\n #rospy.loginfo(\"x: {} , y: {}\".format(self.box_x, box[0][1]))",
"def box(self):\n self._write_pos(0, 0, None, \"┌\", None) # ┌╭\n for x in range(1, self.window_size[1]):\n self._write_pos(0, x, None, \"─\", None)\n\n self._write_pos(0, self.window_size[1] - 1, None, \"┐\", None) # ┐╮\n for y in range(1, self.window_size[0]):\n self._write_pos(y, self.window_size[1] - 1, None, \"│\", None)\n\n self._write_pos(\n self.window_size[0], self.window_size[1] - 1, None, \"┘\", None\n ) # ┘╯\n\n for x in range(self.window_size[1] - 2, 0, -1):\n self._write_pos(self.window_size[0], x, None, \"─\", None)\n\n self._write_pos(self.window_size[0], 0, None, \"└\", None) # └╰\n\n for y in range(self.window_size[0] - 1, 0, -1):\n self._write_pos(y, 0, None, \"│\", None)",
"def onTimer(self):\r\n\t\t\r\n\t\t#self.setMinimumSize(10,10)\r\n\t\tself.setMaximumSize(10000,10000)",
"def draw_bar(self): \n for i in range(0, self._max_step):\n self.create_rectangle(i*(300/self._max_step),0,(i+1)*(300/self._max_step),20,\\\n fill='',outline='white')",
"def plot_box( x0, y0, xl, yl=None, rotang=None, **style ):\n \n if None == rotang:\n rotang = 0\n \n if None == yl:\n yl = xl\n \n dx=xl/2.0\n dy=yl/2.0\n \n xx = np.array([ x0-dx, x0+dx, x0+dx, x0-dx])\n yy = np.array([ y0-dy, y0-dy, y0+dy, y0+dy])\n \n dx = xx-x0\n dy = yy-y0 \n cosa = np.cos( np.deg2rad( -rotang) )\n sina = np.sin( np.deg2rad( -rotang) ) \n rx = ( dx * cosa + dy * sina ) + x0\n ry = (-dx * sina + dy * cosa ) + y0\n\n # plot it\n add_region(rx,ry, **style)",
"def _layout_pixel_size(self):\n #Pixel Size\n pixel_size_txt = wx.StaticText(self, -1, 'Pixel Size:')\n x_pixel_size_txt = wx.StaticText(self, -1, 'x = ')\n self.x_pixel_size_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n y_pixel_size_txt = wx.StaticText(self, -1, 'y = ')\n self.y_pixel_size_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n z_pixel_size_txt = wx.StaticText(self, -1, 'z = ')\n self.z_pixel_size_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n pixel_size_unit_txt = wx.StaticText(self, -1, 'Unit: ')\n self.pixel_size_unit_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n self.pixel_sizer.AddMany([(pixel_size_txt, 0, wx.LEFT | wx.RIGHT, 10),\n (x_pixel_size_txt, 0, wx.LEFT, 17),\n (self.x_pixel_size_tcl, 0, wx.RIGHT, 10),\n (y_pixel_size_txt, 0, wx.EXPAND),\n (self.y_pixel_size_tcl, 0, wx.RIGHT, 10),\n (z_pixel_size_txt, 0, wx.EXPAND),\n (self.z_pixel_size_tcl, 0, wx.RIGHT, 10),\n (pixel_size_unit_txt, 0, wx.EXPAND),\n (self.pixel_size_unit_tcl, 0, wx.RIGHT, 10)])",
"def circuit_diagram_widget() -> wid.Box:\n # The max circuit height corresponds to a 20Q circuit with flat\n # classical register.\n top_out = wid.Output(\n layout=wid.Layout(\n width=\"100%\",\n height=\"auto\",\n max_height=\"1000px\",\n overflow=\"hidden scroll\",\n )\n )\n\n top = wid.Box(children=[top_out], layout=wid.Layout(width=\"100%\", height=\"auto\"))\n\n return top",
"def __init__(self, x1_size, x2_size, x_shift, y_size, z_size):\n self.x1_size = x1_size\n self.x2_size = x2_size\n self.x_shift = x_shift\n self.y_size = y_size\n self.z_size = z_size #thickness size\n self.location = Point(0, 0, 0)",
"def box_size(self, time=0):\n\n\t\treturn self[time].configuration.box[0]",
"def resize(event): \n\n drawTet(tet,tetColor)",
"def periodic_box(fname, dsize, render):\r\n geo_to_stl(fname + \"Tessellation.geo\")\r\n vtk_tools.stl_to_periodic_box(\r\n fname + \"Tessellation.stl\", fname + \"TessellationBox.stl\", [0, 0, 0],\r\n [dsize, dsize, dsize], render\r\n )",
"def update(self):\n self.rect.topleft = (self.x * BOX_LENGTH, self.y * BOX_LENGTH)",
"def draw(self, t, size):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the X and Y values of this input widget. Returns Tuple[int, int] The X and Y values of the number picker spin boxes.
|
def getValues(self) -> XY:
return XY(
self.__xSpinBox.value(),
self.__ySpinBox.value(),
)
|
[
"def _get_plot_coordinates(self) -> Tuple[int, int]:\n return self._x0 + AXIS_SPACE_PX, self._y0 # y does not need to be added AXIS_SPACE_PX, since it is at bottom",
"def unpack_coords(self):\n y = self.flat_value/Point.width\n x = abs((y * self.width) - self.flat_value)\n return x, y",
"def as_tuple(self) -> tuple:\n return self.xl, self.yl, self.xh, self.yh",
"def int_pair(self):\n return tuple([int(self.x), int(self.y)])",
"def get_coords(self):\n return self.x1, self.y1, self.x2, self.y2",
"def get_element_location(self, value):\n try:\n location = self.get_element(value).location_in_view\n x = location['x']\n y = location['y']\n return x, y\n except AttributeError as msg:\n raise msg",
"def xy(self):\n return self._x, self._y",
"def get_position(self):\n return self._rect.x, self._rect.y",
"def get_selected_box(event):\n\n # -- init absolute start position\n x_start = 0\n y_start = 0\n if isinstance(event.widget, Label):\n x_start = event.widget.winfo_x()\n y_start = event.widget.winfo_y()\n\n x = (event.x + x_start) // Map.BOX_DIM\n y = (event.y + y_start) // Map.BOX_DIM\n\n return x, y",
"def pos_to_tuple(self):\n return self.pos.x, self.pos.y",
"def getPositionAndSize(self):\n (x, y) = self.getXY()\n w = self.getWidth()\n h = self.getHeight()\n return x, y, w, h",
"def coordinates(self):\n\t\tplayer = self.value['Map']['World']['Player']\n\t\treturn player['X'], player['Y']",
"def position(self):\n self._pedbcomponents._edb.Geometry.PointData(self._pedbcomponents._edb_value(0.0),\n self._pedbcomponents._edb_value(0.0))\n if is_ironpython:\n out = self.pin.GetPositionAndRotationValue()\n else:\n out = self.pin.GetPositionAndRotationValue(\n self._pedbcomponents._edb.Geometry.PointData(self._pedbcomponents._edb_value(0.0),\n self._pedbcomponents._edb_value(0.0)),\n self._pedbcomponents._edb_value(0.0),)\n if out[0]:\n return [out[1].X.ToDouble(), out[1].Y.ToDouble()]",
"def _coords(self, x, y):\n return y, x * 2",
"def position(self):\n\t\t\n\t\treturn tuple(self._position)",
"def get_coords(self):\n\t\treturn self.x, self.y, self.z",
"def __get_x_y(self, number):\n return number % self.map.width, number / self.map.width",
"def get_xy_for_electrode(self, idx):\n # check if grid exists\n if self.grid is None:\n raise ValueError('Grid is not set.')\n else:\n x, y = self.grid[:, idx]\n return int(x-1), int(y-1)",
"def get_interpolation_points(self):\n return self._lower_points, self._upper_points"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse a cUR50 tfrecord Record into a tuple of tensors
|
def cUR50_parser(record):
keys_to_features = {
"uniref_id": tf.FixedLenFeature([], tf.string),
"seq_len": tf.FixedLenFeature([], tf.int64),
"seq": tf.FixedLenFeature([], tf.string),
"seq_phyche": tf.VarLenFeature(tf.float32),
}
parsed = tf.parse_single_example(record, keys_to_features)
uniref_id = parsed["uniref_id"]
uniref_id = tf.cast(uniref_id, tf.string)
seq_len = parsed["seq_len"]
seq_len = tf.cast(seq_len, tf.int32)
seq = parsed["seq"]
#seq = tf.reshape(seq, [-1])
seq_phyche = tf.sparse_tensor_to_dense(parsed["seq_phyche"])
return uniref_id, seq_len, seq, seq_phyche
|
[
"def _parse_record(example_proto):\n\n example = tf.parse_single_example(example_proto, feature)\n im = tf.decode_raw(example['image'], tf.float32)\n im = tf.reshape(im, (img_rows, img_cols, 1))\n\n label = tf.decode_raw(example['label'], tf.int32)\n label = tf.reshape(label, (4, 1))\n\n return (im, label)",
"def parse_tfrecord_example(serialized: bytes) -> typing.Tuple[tf.Tensor, tf.Tensor]:\n features = {\n \"feature/value\": tf.io.FixedLenFeature(shape=(), dtype=tf.string),\n \"label/value\": tf.io.FixedLenFeature(shape=(), dtype=tf.string),\n }\n example = tf.io.parse_single_example(serialized, features)\n x = tf.io.decode_raw(example[\"feature/value\"], feature_dtype)\n y = tf.io.decode_raw(example[\"label/value\"], label_dtype)\n # The shapes are encoded in the TFRecord file, but we cannot use\n # them dynamically (aka reshape according to the shape in this example).\n if feature_shape is not None:\n x = tf.reshape(x, shape=feature_shape)\n if label_shape is not None:\n y = tf.reshape(y, shape=label_shape)\n return x, y",
"def _decode_record(record, name_to_features):\n\texample = tf.parse_example(record, name_to_features)\n\treturn example",
"def _decode_record(record):\n name_to_features = {\n \"input_ids\":\n tf.io.FixedLenFeature([max_encoder_length], tf.int64),\n \"segment_ids\":\n tf.io.FixedLenFeature([max_encoder_length], tf.int64),\n \"masked_lm_positions\":\n tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.io.FixedLenFeature([1], tf.int64),\n }\n example = tf.io.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example",
"def _decode_record(record, name_to_features):\n # example = tf.parse_single_example(record, name_to_features)\n example = tf.io.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example",
"def __extract_fn(self, tfrecord):\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'size': tf.io.FixedLenFeature([2], tf.int64)\n }\n # Extract the data record\n sample = tf.io.parse_single_example(tfrecord, feature_description)\n image = tf.io.decode_image(sample['image'], channels=3)\n image = tf.reshape(image, [sample['size'][0], sample['size'][1], 3]) # TODO this line should be useless ?\n label = sample['label']\n return (image, label)",
"def parse_record_keras(raw_record, is_training, dtype):\n image, label = jr_main.parse_record(raw_record, is_training, dtype)\n label = tf.sparse_to_dense(label, (jr_main.NUM_CLASSES,), 1)\n return image, label",
"def read_tfrecord_viz(example):\n features = {\n \"image\": tf.io.FixedLenFeature([], tf.string), # tf.string = bytestring (not text string)\n \"class\": tf.io.FixedLenFeature([], tf.int64), # shape [] means scalar\n }\n # decode the TFRecord\n example = tf.io.parse_single_example(example, features)\n\n image = tf.image.decode_jpeg(example['image'], channels=3)\n image = tf.cast(image, tf.uint8)\n image = tf.reshape(image, [TARGET_SIZE,TARGET_SIZE, 3])\n\n class_label = tf.cast(example['class'], tf.int32)\n\n return image, class_label",
"def parse_record(record, training):\n ### YOUR CODE HERE\n # Reshape from [depth * height * width] to [depth, height, width].\n depth_major = record.reshape((3, 32, 32))\n\n # Convert from [depth, height, width] to [height, width, depth]\n image = np.transpose(depth_major, [1, 2, 0])\n\n ### END CODE HERE\n\n image = preprocess_image(image, training) # If any.\n\n image = np.transpose(image, [2, 0, 1])\n \n return image",
"def read_tfrecord(\n tfrecord_infile='{}-00000-of-00001.gz'.format(TFRECORD_OUTFILE),\n idx=0):\n raw_dataset = get_raw_dataset(tfrecord_infile)\n\n parsed_dataset = raw_dataset.map(_parse_function)\n\n parsed_record = get_record(parsed_dataset, idx)\n\n return convert_parsed_record_to_ndarray(parsed_record)",
"def _parse_proto(buf):\n feature_map = {\n \"frames\":\n tf.FixedLenFeature(shape=[_NUM_VIEWS], dtype=tf.string),\n \"top_down_frame\":\n tf.FixedLenFeature(shape=[1], dtype=tf.string),\n \"cameras\":\n tf.FixedLenFeature(\n shape=[_NUM_VIEWS * _NUM_RAW_CAMERA_PARAMS], dtype=tf.float32),\n \"captions\":\n tf.VarLenFeature(dtype=tf.string),\n \"simplified_captions\":\n tf.VarLenFeature(dtype=tf.string),\n \"meta_shape\":\n tf.VarLenFeature(dtype=tf.string),\n \"meta_color\":\n tf.VarLenFeature(dtype=tf.string),\n \"meta_size\":\n tf.VarLenFeature(dtype=tf.string),\n \"meta_obj_positions\":\n tf.VarLenFeature(dtype=tf.float32),\n \"meta_obj_rotations\":\n tf.VarLenFeature(dtype=tf.float32),\n \"meta_obj_colors\":\n tf.VarLenFeature(dtype=tf.float32),\n }\n\n example = tf.parse_single_example(buf, feature_map)\n images = tf.concat(example[\"frames\"], axis=0)\n images = tf.map_fn(\n tf.image.decode_jpeg,\n tf.reshape(images, [-1]),\n dtype=tf.uint8,\n back_prop=False)\n top_down = tf.image.decode_jpeg(tf.squeeze(example[\"top_down_frame\"]))\n cameras = tf.reshape(example[\"cameras\"], shape=[-1, _NUM_RAW_CAMERA_PARAMS])\n captions = tf.sparse_tensor_to_dense(example[\"captions\"], default_value=\"\")\n simplified_captions = tf.sparse_tensor_to_dense(\n example[\"simplified_captions\"], default_value=\"\")\n meta_shape = tf.sparse_tensor_to_dense(\n example[\"meta_shape\"], default_value=\"\")\n meta_color = tf.sparse_tensor_to_dense(\n example[\"meta_color\"], default_value=\"\")\n meta_size = tf.sparse_tensor_to_dense(example[\"meta_size\"], default_value=\"\")\n meta_obj_positions = tf.sparse_tensor_to_dense(\n example[\"meta_obj_positions\"], default_value=0)\n meta_obj_positions = tf.reshape(meta_obj_positions, shape=[-1, 3])\n meta_obj_rotations = tf.sparse_tensor_to_dense(\n example[\"meta_obj_rotations\"], default_value=0)\n meta_obj_rotations = tf.reshape(meta_obj_rotations, shape=[-1, 4])\n meta_obj_colors = tf.sparse_tensor_to_dense(\n example[\"meta_obj_colors\"], default_value=0)\n meta_obj_colors = tf.reshape(meta_obj_colors, shape=[-1, 4])\n\n data_tensors = {\n \"images\": images,\n \"cameras\": cameras,\n \"captions\": captions,\n \"simplified_captions\": simplified_captions,\n \"top_down\": top_down\n }\n if _PARSE_METADATA:\n data_tensors.update({\n \"meta_shape\": meta_shape,\n \"meta_color\": meta_color,\n \"meta_size\": meta_size,\n \"meta_obj_positions\": meta_obj_positions,\n \"meta_obj_rotations\": meta_obj_rotations,\n \"meta_obj_colors\": meta_obj_colors\n })\n return data_tensors",
"def _parse_example(serialized_example):\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"targets\": tf.VarLenFeature(tf.int64)\n }\n parsed = tf.parse_single_example(serialized_example, data_fields)\n inputs = tf.sparse_tensor_to_dense(parsed[\"inputs\"])\n targets = tf.sparse_tensor_to_dense(parsed[\"targets\"])\n return inputs, targets",
"def parse_imagenet_record(raw_record, is_training, dtype, cpu_preprocess_fn=None, seed=None):\n image_buffer, label, bbox = parse_example_proto(raw_record)\n\n image = preprocess_image(\n image_buffer=image_buffer,\n bbox=bbox,\n output_height=DEFAULT_IMAGE_SIZE,\n output_width=DEFAULT_IMAGE_SIZE,\n num_channels=NUM_CHANNELS,\n cpu_preprocess_fn=cpu_preprocess_fn,\n is_training=is_training,\n seed=seed,\n )\n image = tf.cast(image, dtype)\n\n # Subtract one so that labels are in [0, 1000), and cast to int32 for\n # Keras model.\n label = tf.cast(tf.cast(tf.reshape(label, shape=[1]), dtype=tf.int32) - 1, dtype=tf.int32)\n return image, label",
"def _ParseSequenceExample(record, feature_map, context_map):\n context, features = tf.io.parse_single_sequence_example(\n serialized=record,\n context_features=context_map,\n sequence_features=feature_map)\n # Add all keys from context to features. Keys must not overlap.\n common_keys = set(context.keys()) & set(features.keys())\n if common_keys:\n raise ValueError(\n 'Keys {} are present in context and features.'.format(common_keys))\n features.update(context)\n return features",
"def parse_flir_record_metadata(stream: BinaryIO, record_nr: int) -> Optional[Tuple[int, int, int, int]]:\n # FLIR record entry (ref 3):\n # 0x00 - int16u record type\n # 0x02 - int16u record subtype: RawData 1=BE, 2=LE, 3=PNG; 1 for other record types\n # 0x04 - int32u record version: seen 0x64,0x66,0x67,0x68,0x6f,0x104\n # 0x08 - int32u index id = 1\n # 0x0c - int32u record offset from start of FLIR data\n # 0x10 - int32u record length\n # 0x14 - int32u parent = 0 (?)\n # 0x18 - int32u object number = 0 (?)\n # 0x1c - int32u checksum: 0 for no checksum\n entry = 32 * record_nr\n stream.seek(entry)\n record_type = int.from_bytes(stream.read(2), \"big\")\n if record_type < 1:\n return None\n\n _ = int.from_bytes(stream.read(2), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n record_offset = int.from_bytes(stream.read(4), \"big\")\n record_length = int.from_bytes(stream.read(4), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n return (entry, record_type, record_offset, record_length)",
"def iterate_tfrecord(filename, decode=False):\n for record in tf.python_io.tf_record_iterator(filename):\n example = tf.train.Example()\n example.ParseFromString(record)\n if decode:\n yield decode_image_tensor(example)\n else:\n yield example",
"def parse_flir_record_metadata(stream: BinaryIO, record_nr: int) -> Optional[Tuple[int, int, int, int]]:\n # FLIR record entry (ref 3):\n # 0x00 - int16u record type\n # 0x02 - int16u record subtype: RawData 1=BE, 2=LE, 3=PNG; 1 for other record types\n # 0x04 - int32u record version: seen 0x64,0x66,0x67,0x68,0x6f,0x104\n # 0x08 - int32u index id = 1\n # 0x0c - int32u record offset from start of FLIR data\n # 0x10 - int32u record length\n # 0x14 - int32u parent = 0 (?)\n # 0x18 - int32u object number = 0 (?)\n # 0x1c - int32u checksum: 0 for no checksum\n entry = 32 * record_nr\n stream.seek(entry)\n record_type = int.from_bytes(stream.read(2), 'big')\n if record_type < 1:\n return None\n\n _ = int.from_bytes(stream.read(2), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n record_offset = int.from_bytes(stream.read(4), 'big')\n record_length = int.from_bytes(stream.read(4), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n return entry, record_type, record_offset, record_length",
"def _extract_image_and_label(record):\n features = tf.parse_single_example(\n record,\n features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64)\n })\n\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n image.set_shape(image_size * image_size * 3)\n image = tf.reshape(image, [image_size, image_size, 3])\n\n image = tf.cast(image, tf.float32) * (2. / 255) - 1.\n\n label = tf.cast(features['label'], tf.int32)\n label += label_offset\n\n return image, label",
"def _record_row_parser(buf):\n column, value = buf.split(':', 1)\n\n return column.strip(), value.strip()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Open a tfrecords file in the cpdb format, parse, and return a tf.data.Dataset object
|
def cpdb_dataset(tfrecords):
dataset = tf.data.TFRecordDataset(tfrecords)
dataset = dataset.map(lambda x: cpdb_parser(x))
return dataset
|
[
"def read_tfrecord_dataset(filepaths):\n return tf.data.TFRecordDataset(filenames=filepaths).map(parse_tf_example)",
"def read_tfrecord(\n tfrecord_infile='{}-00000-of-00001.gz'.format(TFRECORD_OUTFILE),\n idx=0):\n raw_dataset = get_raw_dataset(tfrecord_infile)\n\n parsed_dataset = raw_dataset.map(_parse_function)\n\n parsed_record = get_record(parsed_dataset, idx)\n\n return convert_parsed_record_to_ndarray(parsed_record)",
"def make_dataset(tfrecord_file, batch_size):\n dataset = tf.data.TFRecordDataset(tfrecord_file)\n dataset = dataset.map(parse_example)\n padded_shapes = (\n tf.TensorShape([1]), # length\n tf.TensorShape([1]), # label\n tf.TensorShape([None]) # words\n )\n dataset = dataset.padded_batch(batch_size, padded_shapes)\n\n return dataset",
"def _read_tfrecord_examples(filename):\n result = {}\n for raw_record in tf.data.TFRecordDataset([filename]):\n tf_example = tf.train.Example()\n tf_example.ParseFromString(raw_record.numpy())\n id_feature = tf_example.features.feature['id'].bytes_list\n result[id_feature.value[0].decode('utf-8')] = tf_example\n return result",
"def load_tfrecord_dataset(tfrecord_name, batch_size, shuffle=True):\n #raw_dataset = tf.data.TFRecordDataset(tfrecord_name)\n raw_dataset = tf.data.Dataset.list_files(tfrecord_name)\n\n raw_dataset = raw_dataset.interleave(tf.data.TFRecordDataset,\n num_parallel_calls=tf.data.experimental.AUTOTUNE,\n deterministic=False)\n\n raw_dataset = raw_dataset.repeat()\n\n\n dataset = raw_dataset.map(\n _parse_tfrecord(),\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n dataset = dataset.shuffle(buffer_size=10)\n\n return dataset",
"def load_tf_records(source_file, preprocessing, shuffle=True, batch_size=32,\n repeat_count=-1, greyscale=False, num_threads=4, cache=False,\n take=-1):\n dataset = tf.data.TFRecordDataset(source_file, buffer_size=1024*1024)\n\n def decoding(data):\n features = tf.parse_single_example(data,\n features={\n 'A/width': tf.FixedLenFeature([], tf.int64),\n 'A/height': tf.FixedLenFeature([], tf.int64),\n 'A/filename': tf.FixedLenFeature([], tf.string),\n 'A/encoded': tf.FixedLenFeature([], tf.string),\n 'B/width': tf.FixedLenFeature([], tf.int64),\n 'B/height': tf.FixedLenFeature([], tf.int64),\n 'B/filename': tf.FixedLenFeature([], tf.string),\n 'B/encoded': tf.FixedLenFeature([], tf.string),\n 'key': tf.FixedLenFeature([], tf.string),\n 'num': tf.FixedLenFeature([], tf.int64),\n })\n\n channels = 1 if greyscale else 3\n features[\"A/image\"] = decode_image(features[\"A/encoded\"], channels)\n features[\"B/image\"] = decode_image(features[\"B/encoded\"], channels)\n return features\n\n if take > 0:\n dataset = dataset.take(take)\n\n if cache:\n # when caching, it makes sense to decode only once\n dataset = dataset.map(decoding, num_parallel_calls=num_threads)\n dataset = dataset.cache()\n else:\n # otherwise, combine decoding and preprocessing so we use just a single map\n preprocessing = lambda x: preprocessing(decoding(x))\n\n dataset = dataset.repeat(repeat_count)\n if shuffle:\n dataset = dataset.shuffle(buffer_size=256)\n dataset = dataset.map(preprocessing, num_parallel_calls=num_threads)\n batched = dataset.batch(batch_size)\n\n return batched.prefetch(10)",
"def get_tf_dataset(self, name):\n data_sources = glob.glob(\n os.path.join(self.data_dir, name, '*.tfrecords'))\n # Build dataset provider\n dataset = tf.data.TFRecordDataset(data_sources)\n dataset = dataset.map(self.get_parser_op())\n dataset = dataset.repeat(repeat)\n\n return dataset",
"def iterate_tfrecord(filename, decode=False):\n for record in tf.python_io.tf_record_iterator(filename):\n example = tf.train.Example()\n example.ParseFromString(record)\n if decode:\n yield decode_image_tensor(example)\n else:\n yield example",
"def read_tfrecords_dir(\n dirname: str,\n image_width: int = 256,\n image_height: int = 256,\n image_channels: int = 3,\n):\n filenames = glob.glob(os.path.join(dirname, \"*.tfrecord\"))\n\n print(f\"tfrecords: {filenames}\")\n\n raw_dataset = tf.data.TFRecordDataset(filenames=filenames)\n\n dataset = raw_dataset.map(\n lambda d: tf_tools._parse_fn(\n example_serialized=d,\n img_width=image_width,\n img_height=image_height,\n img_channels=image_channels,\n )\n )\n\n return dataset",
"def check_data(self, tfrecords_filename):\n record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)\n\n for string_record in record_iterator:\n # Parse the next example\n example = tf.train.Example()\n example.ParseFromString(string_record)\n\n # Get the features you stored (change to match your tfrecord writing code)\n seq = (example.features.feature['seq_raw']\n .bytes_list\n .value[0])\n\n label = (example.features.feature['label_raw']\n .bytes_list\n .value[0])\n\n # Convert to a numpy array (change dtype to the datatype you stored)\n seq_array = np.fromstring(seq, dtype=np.float64)\n label_array = np.fromstring(label, dtype=np.float64)\n\n # Print the image shape; does it match your expectations?\n print(seq_array.shape)\n print(label_array.shape)",
"def get_data(fn):\n rows = []\n dbf = dbflib.open(fn)\n for i in range(dbf.record_count()):\n rows.append(dbf.read_record(i))\n\n return pd.DataFrame(rows)",
"def decode_tf_records_to_pandas(decoding_features_spec,\n tf_records_path,\n max_n_examples=None,\n random_filter_keep_rate=1.0,\n filter_fn=None):\n\n if not max_n_examples:\n max_n_examples = float('inf')\n\n reader = tf.TFRecordReader()\n filenames = tf.train.match_filenames_once(tf_records_path)\n filename_queue = tf.train.string_input_producer(filenames,\n num_epochs=1)\n\n _, serialized_example = reader.read(filename_queue)\n read_data = tf.parse_single_example(\n serialized=serialized_example, features=decoding_features_spec)\n\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n tf.train.start_queue_runners(sess)\n\n d = []\n new_line = sess.run(read_data)\n count = 0\n while new_line:\n if filter_fn:\n keep_line = filter_fn(new_line)\n else:\n keep_line = True\n keep_line = keep_line and (random.random() < random_filter_keep_rate)\n\n if keep_line:\n d.append(new_line)\n count += 1\n if count >= max_n_examples:\n break\n if not (count % 100000):\n logging.info('Loaded {} lines.'.format(count))\n\n try:\n new_line = sess.run(read_data)\n except tf.errors.OutOfRangeError:\n logging.info('End of file.')\n break\n\n res = pd.DataFrame(d)\n return res",
"def create_waveform_dataset_from_tfrecord_files(dir_path):\n \n # Get TFRecord file paths.\n file_paths = _get_file_paths_sorted_by_name(dir_path)\n \n # Concatenate files to create non-repeating dataset.\n dataset = tf.data.TFRecordDataset(file_paths)\n \n # Parse example protos.\n dataset = dataset.map(\n _parse_example,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n \n return dataset",
"def read_raw_dataset(dataset_file):\n \n df = pd.read_pickle(\"./datasets/pickle_files/df_pickle_\" + dataset_file)\n return df",
"def parse_reduce_file(reduce_file):\n with open(reduce_file, 'r') as file:\n while True:\n line = file.readline()\n if not line:\n break\n if file.readline().lower().startswith('data'):\n break\n file_str = file.read()\n if file_str:\n file_lines = file_str.split('\\r\\n')\n type_fns = ReduceTypes.types\n record_list = []\n for record_str in file_lines[3:]:\n if record_str:\n record_list.append([fun(rec) for fun, rec in zip(type_fns, record_str.split('\\t'))])\n return DataFrame.from_records(record_list, columns=ReduceTypes.names, index='DptNum')\n #return reduce_frame",
"def continuous_dataset(filepath):\n dataset = tf.data.TextLineDataset(filepath)\n # Append 0 to start in case there's blank rows.\n dataset = dataset.map(tf.string_to_number)\n return dataset",
"def __get_tfrecord_files_from_dataset_info_file(self):\n yaml_file = os.path.join(self.data_dir, 'dataset_info.yaml')\n with open(yaml_file, 'r') as stream:\n try:\n dataset_info = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n print('Error parsing file', yaml_file)\n raise e\n tfrecord_files = [os.path.join(self.data_dir, path) for path in\n dataset_info[\"splits\"][self.split][\"tfrecord_files\"]]\n return tfrecord_files",
"def read_and_process_data(self, filename):\n dataset = NER.Dataset()\n with codecs.open(filename, 'r', 'utf-8') as f:\n for line in f.readlines():\n field = line.strip().split(',')\n if len(field) == 3:\n # Special case: The token is a comma \",\"\n self.process_data(dataset, ',', 'O')\n else:\n self.process_data(dataset, field[0], field[1])\n return dataset\n return None",
"def readRecord(recordName):\n filenameQueue = tf.train.string_input_producer([recordName])\n reader = tf.TFRecordReader()\n _, serializedExample = reader.read(filenameQueue)\n features = tf.parse_single_example(serializedExample, features={\n \"image_raw\": tf.FixedLenFeature([], tf.string)\n })\n\n image = features[\"image_raw\"]\n image = tf.decode_raw(image, tf.uint8)\n image = preprocess(image)\n return image"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Speaks the string input
|
def speak(text):
proc.stdin.write('(SayText "%s")\n' % text)
|
[
"def handle_speak(event):\n bus.emit(Message('speak', event))",
"def stringReceived(self, string):\n raise NotImplementedError()",
"def string(self, string):\n\n self.__emulate_keyboard('type', string)",
"def printAndSay(self, string): \n tts = gTTS(text=string, lang='en')\n tts.save(f\"speech.{self.carID}.mp3\")\n print(string)\n os.system(f\"ffplay -nodisp -autoexit -volume 100 -loglevel quiet \\\n speech.{self.carID}.mp3\")",
"def get_string(self):\n self.text = input(\"Please enter string: \")",
"def say(self, msg):\n if self.silent:\n return\n\n tts_msg = (msg.replace(u'½', u' and a half')\n .replace(u' AM', ' A.M.')\n .replace(u' PM', ' P.M.')).lstrip()\n if tts_msg.startswith('and a half'):\n tts_msg = tts_msg[6:]\n\n tts_msg = tts_msg.encode('utf-8')\n\n proc = subprocess.Popen([\"festival\", \"--tts\"], # nosec\n stdin=subprocess.PIPE)\n proc.stdin.write(tts_msg)\n proc.stdin.close()",
"def say(self, text):\n if self.speaker:\n print \"Speech:\", text\n else:\n print \"Speech:\", text",
"def speak(msg):\n if(pu.io.myIP()=='192.168.5.115'):\n os.system(\"say -v Veena \"+str(msg))",
"def _typed(self, event) -> None:\n\t\tself._active.typed(event)",
"def hook_StreamString(state, level, format_ea, str_ea):\n DeepManticore(state).api_stream_string(level, format_ea, str_ea)",
"def input_command():\r\n\r\n r = sr.Recognizer()\r\n m = sr.Microphone()\r\n with m as source:\r\n r.adjust_for_ambient_noise(source)\r\n with m as source:\r\n print('listening...')\r\n audio = r.listen(source)\r\n\r\n try:\r\n print('recognizing...')\r\n query = r.recognize_google(audio)\r\n print(f\"\\\"{query}\\\"\")\r\n except Exception as e:\r\n print(e)\r\n speak(\"Sorry!! Didn't catch that\")\r\n query = input_command()\r\n return query",
"def speak_srv(self, req):\n\n self.buffer_requests(req)\n return \"\"",
"def handle_action(self, text, **kwargs):\n\n # Use lowercase for processing.\n text = text.lower()\n\n logger.debug(\"Received text: '%s'\", text)\n\n if any(word in text for word in self.STOP_LISTENING):\n self.speak(\"As you wish boss. Enjoy your day.\")\n sys.exit()\n elif any(word == text for word in self.NAMES):\n self.speak(\"Yes boss?\")\n elif \"how are you\" in text:\n self.speak(\"I'm fine, thank you.\")\n elif \"thank you\" in text:\n self.speak(\"Any time boss.\")\n elif any(\"day\" in text for word in self.TIME_COMMANDS):\n self.speak(time.strftime(\"%x\"))\n elif any(word in text for word in self.TIME_COMMANDS):\n self.speak(time.strftime(\"%X\"))\n elif any(word in text for word in self.GREETING) or text==\"hi\":\n self.speak(\"Hello boss.\")\n elif any(string == text for string in self.SWEAR):\n self.speak(\"I'm sorry boss, but I was not built to execute such a task.\")\n elif \"open\" in text:\n obj = text.split(\"open \")[1]\n\n if obj not in self.OPEN_ACTIONS:\n self.speak(\"I'm sorry boss, but I'm unable to recognize the open command\")\n else:\n if \"browser\" in obj:\n self.speak(\"Opening browser.\")\n webbrowser.open(\"https://www.google.com/\")\n elif \"map\" in obj:\n self.speak(\"Opening maps.\")\n webbrowser.open(\"https://www.google.com/maps/\")\n elif \"gmail\" in obj:\n self.speak(\"Opening gmail.\")\n webbrowser.open(\"https://mail.google.com/\")\n elif \"facebook\" in obj:\n self.speak(\"Opening facebook.\")\n webbrowser.open(\"https://www.facebook.com/\")\n elif \"youtube\" in obj:\n self.speak(\"Opening youtube.\")\n webbrowser.open(\"https://www.youtube.com/\")\n elif \"home folder\" in obj:\n self.speak(\"Opening home folder.\")\n folder = os.path.expanduser(\"~\")\n os.system('xdg-open \"%s\"' % folder)\n elif \"music\" in obj:\n self.speak(\"Opening music folder.\")\n folder = os.path.expanduser(\"~/Music\")\n os.system('xdg-open \"%s\"' % folder)\n elif \"pictures\" in obj:\n self.speak(\"Opening pictures folder.\")\n folder = os.path.expanduser(\"~/Pictures\")\n os.system('xdg-open \"%s\"' % folder)\n elif \"videos\" in obj:\n self.speak(\"Opening videos folder.\")\n folder = os.path.expanduser(\"~/Videos\")\n os.system('xdg-open \"%s\"' % folder)\n elif \"downloads\" in obj:\n self.speak(\"Opening downloads folder.\")\n folder = os.path.expanduser(\"~/Downloads\")\n os.system('xdg-open \"%s\"' % folder)\n elif \"documents\" in obj:\n self.speak(\"Opening documents folder.\")\n folder = os.path.expanduser(\"~/Documents\")\n os.system('xdg-open \"%s\"' % folder)\n elif \"search\" in text:\n txt_split = text.split(\" \")\n if txt_split[0]==\"wolfram\" and txt_split[1]==\"search\":\n self.handle_wolframalpha_search(text.split(\"search \")[1])\n else:\n self.speak(\"Opening google search.\")\n url = \"https://www.google.com.tr/search?q={}\".format(text.split(\"search \")[1])\n webbrowser.open(url)\n elif \"calculate\" in text:\n self.handle_wolframalpha_search(text.split(\"calculate \")[1])\n elif any(word in text for word in self.WEATHER):\n if ' in ' in text:\n place = text.split(\" in \")[1]\n out = data_output(data_organizer(data_fetch(url_builder_city(place))))\n if 'brief' in text:\n self.speak(out[0])\n elif 'detailed' in text:\n self.speak(out[1])\n else:\n self.speak(out[0])\n else:\n IP = ipgetter.myip()\n match = geocoder.ip(IP)\n coords = match.latlng\n data = data_organizer(data_fetch(url_builder_coords(coords[0], coords[1])))\n out = data_output(data)\n\n if 'brief' in text:\n self.speak(out[0])\n elif 'detailed' in text:\n self.speak(out[1])\n else:\n self.speak(out[0])\n elif \"detect\" in text or \"facial detection\" in text:\n if \"faces\" in text:\n faces = detect_faces()\n if faces == 1:\n self.speak(\"I'm detecting \"+str(faces)+\" face. I stored the frame in the data folder.\")\n else:\n self.speak(\"I'm detecting \"+str(faces)+\" faces. I stored the frame in the data folder.\")",
"def say(self, phrase):\n if self.use_Nao:\n print(phrase)\n naoqiutils.speak(phrase)\n else:\n print(phrase)",
"def ubercam(string):\n pass",
"async def speak(ctx):\n await bot.say(\"<:reinbark:384084080578396160> *bork bork*\")\n # await bot.say('Bork', tts=True)\n print(\"bot has spoken\")",
"def now_speaking(self, text):\n self.nottreal.view.wizard_window.msg_queue.remove(text)\n self.nottreal.view.wizard_window.msg_history.add(text)\n self.change_state(VUIState.SPEAKING)",
"def lick():\n return \"*licks ice cream cone*\"",
"def handle(phrase):\n print('Heard: {}'.format(phrase))\n\n if bool(re.search(r'\\blight on\\b', phrase, re.IGNORECASE)):\n GPIO.output(18, GPIO.LOW)\n if bool(re.search(r'\\blight off\\b', phrase, re.IGNORECASE)):\n GPIO.output(18, GPIO.HIGH)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clone `endpoint` in the indicated `destination` folder
|
def clone_to_folder(destination, endpoint):
click.echo('... cloning ' + endpoint + ' to ' + destination)
execute('git clone -q ' + endpoint)
|
[
"def clone_files(session, uuid, source, target):\n payload = {'site': uuid, 'path': 'environments/'+target+'/files'}\n data = {\n 'clone-from-environment': source,\n }\n return api.request(session, payload, 'POST', data)",
"def clone_url(self):\n raise NotImplementedError",
"def copy(self, src_urlstr, dest_urlstr):\n if src_urlstr[0:3] == \"ws:\":\n if dest_urlstr[0:3] == \"ws:\":\n raise Exception(\n \"source and destination may not both reference a workspace\"\n )\n pathparts = wsurl_to_tokens(src_urlstr)\n return self.download(pathparts[0], pathparts[1], dest_urlstr)\n if dest_urlstr[0:3] == \"ws:\":\n pathparts = wsurl_to_tokens(dest_urlstr)\n return self.upload(src_urlstr, pathparts[0], pathparts[1])\n raise Exception(\"source and destination may not both be local\")",
"def git_clone(self, url, target):\n pass",
"def test_clone_to_dest(self):\n repo = 'git@github.com:user/repository'\n dest = 'weird_destination'\n unbox.main([repo, dest])\n subprocess.check_call.assert_called_with(['git', 'clone', repo, dest])",
"def copy_file(server, source, target):\n with setup_server_connection(server) as connection:\n Transfer(connection).put(local=source, remote=target)",
"def _cmd_paste(self) -> None:\n if API_CURRENT in self._request and API_SRC in self._request and API_DEST in self._request:\n cur_dir = self._find_dir(self._request[API_CURRENT], None)\n src = self._find_dir(self._request[API_SRC], None)\n dest = self._find_dir(self._request[API_DEST], None)\n\n if not cur_dir or not src or not dest or not 'targets[]' in self._request:\n self._response[RSP_ERROR] = \"Invalid parameters\"\n return\n\n files = self._request['targets[]']\n if not isinstance(files, list):\n files = [files]\n\n cut = False\n if API_CUT in self._request and self._request[API_CUT] == '1':\n cut = True\n\n if not self._is_allowed(src, ACCESS_READ) or not self._is_allowed(dest, ACCESS_WRITE):\n self._response[RSP_ERROR] = \"Access denied\"\n return\n\n for file_hash in files:\n f = self._find(file_hash, src)\n if not f:\n self._response[RSP_ERROR] = \"File not found\"\n return\n new_dest = dest.joinpath(f.name)\n\n if str(dest).find(str(f)) == 0:\n self._response[RSP_ERROR] = \"Unable to copy into itself\"\n return\n\n if cut:\n if not self._is_allowed(f, ACCESS_RM):\n self._response[RSP_ERROR] = \"Move failed\"\n self._set_error_data(str(f), \"Access denied\")\n self._content(cur_dir, True)\n return\n # TODO thumbs\n if new_dest.exists():\n self._response[RSP_ERROR] = \"Unable to move files\"\n self._set_error_data(str(f), \"File or folder with the same name already exists\")\n self._content(cur_dir, True)\n return\n try:\n f.rename(new_dest)\n self._rm_tmb(f)\n continue\n except:\n self._response[RSP_ERROR] = \"Unable to move files\"\n self._set_error_data(str(f), \"Unable to move\")\n self._content(cur_dir, True)\n return\n else:\n if not self._copy(f, new_dest):\n self._response[RSP_ERROR] = \"Unable to copy files\"\n self._content(cur_dir, True)\n return\n continue\n\n self._content(cur_dir, True)\n else:\n self._response[RSP_ERROR] = \"Invalid parameters\"\n return",
"def move_to(self, destination):\n\n def _update_folder(url):\n self.set_property(\"ServerRelativeUrl\", url)\n\n def _move_to(destination_folder):\n \"\"\"\n :type destination_folder: Folder\n \"\"\"\n destination_url = \"/\".join([destination_folder.serverRelativeUrl, self.name])\n qry = ServiceOperationQuery(self, \"MoveTo\", {\"newUrl\": destination_url})\n self.context.add_query(qry).after_query_execute(_update_folder, destination_url)\n\n def _source_folder_resolved():\n if isinstance(destination, Folder):\n destination.ensure_property(\"ServerRelativeUrl\", _move_to, destination)\n else:\n self.context.web.ensure_folder_path(destination).after_execute(_move_to)\n\n self.ensure_properties([\"ServerRelativeUrl\", \"Name\"], _source_folder_resolved)\n return self",
"def copyTo(target=None, new_id=None):",
"def copy_remote(src_path, dst_path):\n assert ':' not in src_path, src_path\n idx = dst_path.find(':')\n dst = dst_path[:idx]\n file_path = dst_path[idx+1:]\n assert ':' not in file_path, dst_path\n if os.path.isfile(src_path):\n cmd = 'scp %s %s' % (src_path, dst_path)\n else:\n cmd = 'scp -r %s %s' % (src_path, dst_path)\n res = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')\n return res",
"def test_copy_move(self):\n # XXX: python-easwebday does not support webdav COPY/MOVE operation yet.\n # with tmp_repo() as ra:\n # with tmp_repo() as rb:\n # roota = ra.get_dir('/')\n # rootb = rb.get_dir('/')\n pass",
"def upload_filepath(ipppssoot, src_filepath, dest_filepath):\n if dest_filepath.startswith(\"s3\"):\n # make copies locally to be included in tarfile for s3\n output_dir = get_output_path(\"file:outputs\", ipppssoot)\n os.makedirs(output_dir, exist_ok=True)\n local_outpath = os.path.join(output_dir, os.path.basename(dest_filepath))\n shutil.copy(src_filepath, local_outpath)\n else:\n os.makedirs(os.path.dirname(dest_filepath), exist_ok=True)\n shutil.copy(src_filepath, dest_filepath)",
"def clone(self, source_name, snapshot_id, dest_name):\n wrap_popen('collie', 'vdi', 'clone', '-s', snapshot_id, source_name,\n dest_name)",
"def copy_content(origin=CONTENT_FOLDER, destination=os.path.join(BUILD_FOLDER, GALLERY_WEB_PATH)):\n copy_tree(origin, destination)",
"def clone(self, branch):\n targetdir = self.basedir + \"/\" + branch + \"/\" + self.postfix\n g = git.cmd.Git()\n g.clone(self.url, targetdir, branch=branch, depth=1)",
"def replicate_url_to_bucket(url, s3_bucket, root_dir='html', append_to_path='', copy_only_if_missing=False):\n s3_file_path = get_path_from_url(url, 'html', append_to_path=append_to_path)\n\n if copy_only_if_missing and s3_file_exists(s3_file_path, s3_bucket):\n return s3_file_path\n else:\n print(\"Replicating \" + url + \" to \" + s3_file_path)\n boto3.client('s3').upload_fileobj(\n retrieve_url(url),\n s3_bucket,\n s3_file_path\n )\n return s3_file_path",
"def copy_folder_or_file(self, path_from, path_to):\n url = self._base_url + \"/resources/copy\"\n\n payload = {'path': path_to, 'from': path_from}\n r = requests.post(url, headers=self.base_headers, params=payload)\n self._check_code(r)",
"def pyo_copy(src, dst):\n\n if src.is_dir():\n for i in src.iterdir():\n pyo_copy(i, dst / i.name)\n return\n\n if not (str(src).endswith(\".pyo\") or str(src).endswith(\".pem\")):\n return\n\n dst.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(src, dst)",
"def copy_to_tmp(origin: Path, dest: Path) -> Path:\n shutil.copy(origin.as_posix(), dest.as_posix())\n\n return dest / origin.name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r"""Returns the GMSD between `x` and `y`, without downsampling and color space conversion. `_gmsd` is an auxiliary function for `gmsd` and `GMSD`.
|
def _gmsd(
x: torch.Tensor,
y: torch.Tensor,
kernel: torch.Tensor,
value_range: float = 1.,
c: float = 0.00261, # 170. / (255. ** 2)
alpha: float = 0.,
) -> torch.Tensor:
c *= value_range ** 2
# Gradient magnitude
pad = kernel.size(-1) // 2
gm_x = tensor_norm(filter2d(x, kernel, padding=pad), dim=1)
gm_y = tensor_norm(filter2d(y, kernel, padding=pad), dim=1)
gm_xy = gm_x * gm_y
# Gradient magnitude similarity
gms_num = (2. - alpha) * gm_xy + c
gms_den = gm_x ** 2 + gm_y ** 2 - alpha * gm_xy + c
gms = gms_num / gms_den
# Gradient magnitude similarity deviation
gmsd = (gms - gms.mean((-1, -2), keepdim=True)) ** 2
gmsd = torch.sqrt(gmsd.mean((-1, -2)))
return gmsd
|
[
"def _msgmsd(\n x: torch.Tensor,\n y: torch.Tensor,\n kernel: torch.Tensor,\n weights: torch.Tensor,\n alpha: float = 0.5,\n **kwargs,\n) -> torch.Tensor:\n\n gmsds = []\n\n for i in range(weights.numel()):\n if i > 0:\n x = F.avg_pool2d(x, kernel_size=2, ceil_mode=True)\n y = F.avg_pool2d(y, kernel_size=2, ceil_mode=True)\n\n gmsds.append(_gmsd(x, y, kernel, alpha=alpha, **kwargs))\n\n msgmsd = torch.stack(gmsds, dim=-1) ** 2\n msgmsd = torch.sqrt((msgmsd * weights).sum(dim=-1))\n\n return msgmsd",
"def get_gsd(self, pxlh):\n if self.units == 'm':\n k_unit = 100\n elif self.units == 'cm':\n k_unit = 1\n elif self.units == 'in':\n k_unit = 2.54\n else:\n print(\"GSD unit error. Returning [-1, -1]\")\n return [-1, -1]\n\n return [(self.cam_sensor_width*pxlh*k_unit)/(\n self.cam_focal_length*self.cam_image_width),\n (self.cam_sensor_height*pxlh*k_unit)/(\n self.cam_focal_length*self.cam_image_height)]",
"def utmToWgs(x, y):\n return utm_projection(x, y, inverse=True)",
"def msd(x, y):\n # WARNING: We hardcode the max and min value here\n max_ = 5\n min_ = 1\n\n if len(x) == 0:\n return -np.inf\n else:\n return 1 - (1 / len(x)) * np.sum(((x - y) / (max_ - min_)) ** 2)",
"def WGS_to_DMS(wgs):\n # Break down latitude degrees into D M S components\n lat_deg = int(wgs.get_latitude_degrees())\n lat_ms = (wgs.get_latitude_degrees() - lat_deg) * 60.0 * 60.0\n lat_m = int(lat_ms / 60.0)\n lat_s = lat_ms % 60.0\n\n # Break down longitude degrees into D M S components\n lon_deg = int(wgs.get_longitude_degrees())\n lon_ms = (wgs.get_longitude_degrees() - lon_deg) * 60.0 * 60.0\n lon_m = int(lon_ms / 60)\n lon_s = lon_ms % 60\n\n # Build coordinate object\n return DMSCoordinate(lat_deg, lat_m, lat_s, lon_deg, lon_m, lon_s)",
"def ddiffmdGm(self, t_measure, Gp, Gm, dGm):\r\n f1 = self.diff_M(t_measure, Gp, Gm+dGm)\r\n f0 = self.diff_M(t_measure, Gp, Gm )\r\n return (f1-f0)/dGm",
"def getGDDMinMax(self):\r\n gddmin = int(self['GDDMIN'])\r\n gddmax = int(self['GDDMAX'])\r\n return((gddmin, gddmax))",
"def ddiffpdGm(self, t_measure, Gp, Gm, dGm):\r\n f1 = self.diff_P(t_measure, Gp, Gm+dGm)\r\n f0 = self.diff_P(t_measure, Gp, Gm )\r\n return (f1-f0)/dGm",
"def mse_grad(x, y):\n return 2.0 * (x - y) / x.size",
"def mrd(self, x, y):\n return numpy.max(numpy.abs(x - y) / x)",
"def grad_x(self, x, y):\n raise NotImplementedError('Grad oracle is not implemented.')",
"def wgsToUtm(x, y):\n return utm_projection(x, y)",
"def dmse(f_x, y): \n return 2 * (f_x - y)",
"def setGDD(self, gddValue):\r\n gddMinMax = self.getGDDMinMax()\r\n if int(gddValue) < gddMinMax[0] or int(gddValue) > gddMinMax[1]:\r\n# print 'apparently outside range'\r\n return\r\n self['GDD'] = gddValue",
"def get_segment(xg, yg, p1, p2):\n dl = (np.gradient(xg)[1].mean() +\n np.gradient(yg)[0].mean()) / 2\n size = int(np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) / dl)\n xs = np.linspace(p1[0], p2[0], size)\n ys = np.linspace(p1[1], p2[1], size)\n\n return xs, ys",
"def DMS_to_WGS(dms):\n # Build latitude value\n lat = (dms.latitude.degrees\n + (dms.latitude.minutes / 60.0)\n + (dms.latitude.seconds / 3600.0)\n )\n # Check N/S - South is negative\n lat *= -1 if dms.latitude.hemisphere == 'S' else 1\n\n # Build longitude value\n lon = (dms.longitude.degrees\n + (dms.longitude.minutes / 60.0)\n + (dms.longitude.seconds / 3600.0)\n )\n # Check E/W - West is negative\n lon *= -1 if dms.longitude.hemisphere == 'W' else 1\n\n return WGSCoordinate(float(lat), float(lon), WGS_COORD_MODE_DEG);",
"def dense_gauss_kernel(sigma, x, y=None):\r\n\r\n xf = pylab.fft2(x) # x in Fourier domain\r\n x_flat = x.flatten()\r\n xx = pylab.dot(x_flat.transpose(), x_flat) # squared norm of x\r\n \r\n if y is not None:\r\n # general case, x and y are different\r\n yf = pylab.fft2(y)\r\n y_flat = y.flatten()\r\n yy = pylab.dot(y_flat.transpose(), y_flat)\r\n else:\r\n # auto-correlation of x, avoid repeating a few operations\r\n yf = xf\r\n yy = xx\r\n\r\n # cross-correlation term in Fourier domain\r\n xyf = pylab.multiply(xf, pylab.conj(yf))\r\n\r\n # to spatial domain\r\n xyf_ifft = pylab.ifft2(xyf)\r\n #xy_complex = circshift(xyf_ifft, floor(x.shape/2))\r\n row_shift, col_shift = pylab.floor(pylab.array(x.shape)/2).astype(int)\r\n xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)\r\n xy_complex = pylab.roll(xy_complex, col_shift, axis=1)\r\n xy = pylab.real(xy_complex)\r\n\r\n # calculate gaussian response for all positions\r\n scaling = -1 / (sigma**2)\r\n xx_yy = xx + yy\r\n xx_yy_2xy = xx_yy - 2 * xy\r\n k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))\r\n\r\n #print(\"dense_gauss_kernel x.shape ==\", x.shape)\r\n #print(\"dense_gauss_kernel k.shape ==\", k.shape)\r\n\r\n return k",
"def rms(x,y=None):\n\n x = np.asarray(x)\n\n if y is None:\n return np.sqrt(np.mean(np.square(x)))\n else:\n y = np.asarray(y)\n return np.sqrt( np.sum(y*np.square(x)) / np.sum(y) )",
"def ddiffmdGp(self, t_measure, Gp, Gm, dGp):\r\n f1 = self.diff_M(t_measure, Gp+dGp, Gm)\r\n f0 = self.diff_M(t_measure, Gp , Gm)\r\n return (f1-f0)/dGp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r"""Returns the MSGMSD between `x` and `y`, without color space conversion. `_msgmsd` is an auxiliary function for `msgmsd` and `MSGMSD`.
|
def _msgmsd(
x: torch.Tensor,
y: torch.Tensor,
kernel: torch.Tensor,
weights: torch.Tensor,
alpha: float = 0.5,
**kwargs,
) -> torch.Tensor:
gmsds = []
for i in range(weights.numel()):
if i > 0:
x = F.avg_pool2d(x, kernel_size=2, ceil_mode=True)
y = F.avg_pool2d(y, kernel_size=2, ceil_mode=True)
gmsds.append(_gmsd(x, y, kernel, alpha=alpha, **kwargs))
msgmsd = torch.stack(gmsds, dim=-1) ** 2
msgmsd = torch.sqrt((msgmsd * weights).sum(dim=-1))
return msgmsd
|
[
"def msd(x, y):\n # WARNING: We hardcode the max and min value here\n max_ = 5\n min_ = 1\n\n if len(x) == 0:\n return -np.inf\n else:\n return 1 - (1 / len(x)) * np.sum(((x - y) / (max_ - min_)) ** 2)",
"def rmsd_no_align(frame1, frame2):\n ## find the displacement for each coordinate\n disp = frame1 - frame2\n ##find squared displacement\n numpy.multiply(disp, disp, disp)\n sd = numpy.sum(disp, axis=1) # \"squared displacement\"\n ## mean squared displacement, etc.\n msd = sd.sum() / len(sd)\n rmsd = math.sqrt(msd)\n\n# print msd\n# print rmsd\n return rmsd",
"def test(x: np.ndarray, y: np.ndarray, kernel: kernels.KernelFunc = kernels.exp_quad,\n linear: bool = False, unbiased: bool = True, chunksize: int = None, seed: int = None,\n **kwargs) -> Tuple[float, float, float]:\n mean, sem = mmd2(x, y, kernel, linear, unbiased, chunksize, seed, **kwargs)\n pval = np.nan\n text = f\"MMD\\u00b2_{'l' if linear else ('u' if unbiased else 'b')} \\u2248 {mean:.6g}\"\n if np.isfinite(sem):\n text += f\" \\u00b1 {sem:.6g}\"\n if linear:\n z, pval = asymptotic_linear_test(mean, sem)\n text += f\" (z={z:.4g}, p={pval:.4f})\"\n print(text)\n return mean, sem, pval",
"def utmToWgs(x, y):\n return utm_projection(x, y, inverse=True)",
"def mrd(self, x, y):\n return numpy.max(numpy.abs(x - y) / x)",
"def dmse(f_x, y): \n return 2 * (f_x - y)",
"def unpack_twist_msg(msg, stamped=False):\n if stamped:\n v = msg.twist.linear\n w = msg.twist.angular\n else:\n v = msg.linear\n w = msg.angular\n\n return (v.x, v.y, v.z), (w.x, w.y, w.z)",
"def _dist_matrix(self, x, y):\n dm = dtw_distance(x, y, self.window, self.normalize)\n\n return dm",
"def DMs(DMstart,DMend,dDM):\n\n #NDMs = np.log10(float(DMend)/float(DMstart))/np.log10(1.0+dDM)\n NDMs = (DMend-DMstart)/dDM\n\n return int(np.round(NDMs))",
"def mcdEdxNEW( xzyeT ):\n\n xzyeTStrip = []\n for i in range(len(xzyeT)):\n if xzyeT[i][1] >= -0.49375:\n xzyeTStrip.append(xzyeT[i])\n\n \n if len(xzyeTStrip) < 2:\n return (0,1)\n \n xzyeTStrip = np.asarray(xzyeTStrip)\n upsPointIndex = np.argmin(xzyeTStrip[:,1])\n\n xzyeT5cm = []\n for i in range(len(xzyeTStrip)):\n if np.linalg.norm( xzyeTStrip[i][:3] - xzyeTStrip[upsPointIndex][:3] ) <= 5:\n xzyeT5cm.append( xzyeTStrip[i] )\n\n xzyeT5cm = np.asarray(xzyeT5cm)\n \n end = np.argmax(xzyeT5cm[:,1])\n start = np.argmin(xzyeT5cm[:,1])\n\n dE = (xzyeT5cm[start][3] - xzyeT5cm[end][3])*1000\n dx = np.linalg.norm(xzyeT5cm[start][:3] - xzyeT5cm[end][:3])\n\n return (dE,dx)",
"def convert_dms_cm(dms_val):\n return 413836.537 * (1 / (dms_val ** 1.416))",
"def find_msd(direct, frames):\n x = pd.DataFrame()\n for i in range(frames):\n #reading the x coordinates from the txt files\n s = pd.read_csv(direct + \"/images/fronts/\"+str(i)+\".png_sx.txt\", sep = \" \")\n s.columns = [0,1]\n x[i] = s[0]\n #computes the MSD of the dataframe with the x coordinates\n msd = an.MSD(x)\n #saving it\n msd.to_csv(direct + \"/msd.txt\", header = None, index = False,sep=' ')\n print(colors.green|\"msd saved in files 'msd.txt'\")\n\n return msd",
"def normalize_msg(msgU,msgD,msgL,msgR):\r\n\r\n avg=np.mean(msgU,axis=2)\r\n msgU -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgD,axis=2)\r\n msgD -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgL,axis=2)\r\n msgL -= avg[:,:,np.newaxis]\r\n avg=np.mean(msgR,axis=2)\r\n msgR -= avg[:,:,np.newaxis]\r\n\r\n return msgU,msgD,msgL,msgR",
"def calculate_msd_d(positions):\n number_particles, duration, dimensions = positions.shape\n msd = numpy.mean(\n numpy.square(positions - positions[:, 0:1, :]), axis=(0, -1)\n )\n time = numpy.arange(duration)[..., numpy.newaxis]\n slope = numpy.linalg.lstsq(time, msd, rcond=None)[0][0]\n d = slope / (2 * dimensions)\n return msd, d",
"def WGS_to_DMS(wgs):\n # Break down latitude degrees into D M S components\n lat_deg = int(wgs.get_latitude_degrees())\n lat_ms = (wgs.get_latitude_degrees() - lat_deg) * 60.0 * 60.0\n lat_m = int(lat_ms / 60.0)\n lat_s = lat_ms % 60.0\n\n # Break down longitude degrees into D M S components\n lon_deg = int(wgs.get_longitude_degrees())\n lon_ms = (wgs.get_longitude_degrees() - lon_deg) * 60.0 * 60.0\n lon_m = int(lon_ms / 60)\n lon_s = lon_ms % 60\n\n # Build coordinate object\n return DMSCoordinate(lat_deg, lat_m, lat_s, lon_deg, lon_m, lon_s)",
"def test_dm_text(self):\n msg = {\n \"id_str\": \"1\",\n 'text': 'This is a dm.',\n \"sender_id\": 1,\n \"sender_id_str\": \"1\",\n \"sender_screen_name\": \"fakeuser\",\n \"sender\": {},\n \"recipient_id\": 2,\n \"recipient_id_str\": \"2\",\n \"recipient_screen_name\": \"fakeuser2\",\n \"recipient\": {},\n }\n self.assertEqual('This is a dm.', self.messagetools.dm_text(msg))",
"def mmms(*args):\n return _seb.mmms(*args)",
"def correct_dcm(dcm):\n x = dcm.pixel_array + 1000\n px_mode = 4096\n x[x>=px_mode] = x[x>=px_mode] - px_mode\n dcm.PixelData = x.tobytes()\n dcm.RescaleIntercept = -1000\n return dcm.pixel_array, dcm.RescaleIntercept",
"def get_wmd(s1, s2, dists, w2i, get_flow=False):\n s1, s2 = s1.split(), s2.split()\n h1, h2, words = get_histograms(s1, s2, w2i)\n D = dists[np.ix_(words, words)]\n\n if get_flow:\n return pyemd.emd_with_flow(h1, h2, D)\n\n return pyemd.emd(h1, h2, D)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set configured GPIO pin direction
|
def set_direction(self, direction):
|
[
"def setup_pin(self, pin):\n # TODO add some extra checks here. Maybe verify BCM?\n GPIO.setup(pin, GPIO.OUT)",
"def set_pin_mode(self, pin, mode):\n if isRasPi:\n GPIO.setup(pin, mode)\n self.pin_config[pin] = mode\n return self.get_pin_mode(pin)",
"def set_pin_direction(self, pin, direction):\n if type(pin) is list:\n for p in pin:\n self.set_pin_direction(p, direction)\n return\n\n pin_id = self._pin_mapping.get(pin, None)\n if pin_id and type(direction) is ahio.Direction:\n self._set_pin_direction(pin_id, direction)\n else:\n raise KeyError(\"Requested pin is not mapped: %s\" % pin)",
"def __change_direction(self):\n\n self.current_direction = self.next_direction",
"def set_pin_value(self, pin, value):\n if isRasPi:\n GPIO.output(pin, value)\n return value",
"def toggle(gpio_pin):\r\n digitalWrite(gpio_pin, digitalRead(gpio_pin) ^ 1)",
"def setup(self, pin, mode, state=0):\n if not (pin in valid_pins):\n show_error_and_exit(\n \"This pin is out of range! Please use valid pins!\")\n else:\n if not (mode in {self.IN, self.OUT}):\n print('invalid mode')\n sys.exit()\n else:\n self.pin_modes[pin] = mode\n if mode == self.IN:\n if not (state in {self.PUD_UP, self.PUD_DOWN, NO_SIGNAL}):\n show_error_and_exit('invalid input state!')\n else:\n self.pin_states[pin] = state\n else:\n if not (state in {self.LOW, self.HIGH, NO_SIGNAL}):\n show_error_and_exit('invalid output state!')\n else:\n self.pin_states[pin] = state",
"def SetDirection(self, *args) -> \"void\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_SetDirection(self, *args)",
"def set_gpio_pin_mode(self, identifier: int, pin_mode: GPIOPinMode) -> None:\n self._mode[identifier] = pin_mode",
"def togglePin(self,pin):\n if -2 == pin:\n self.stData = 1 - self.stData\n GPIO.output(self.pinDATA, self.dataLevel[self.stData and 1 or 0])\n elif -3 == pin:\n self.stClock = 1 - self.stClock\n GPIO.output(self.pinCLOCK, self.stClock and GPIO.HIGH or GPIO.LOW)\n elif -4 == pin:\n self.stLatch = 1 - self.stLatch\n GPIO.output(self.pinLATCH, self.stLatch and GPIO.HIGH or GPIO.LOW)",
"def direction_but_pressed(self):\n if self.power_on:\n if self.direction == 'forward':\n self.config_direction('reverse')\n else:\n self.config_direction('forward')\n\n self.throttle_frame.set_direction(self.direction )",
"def setup(self, channel, mode=\"out\"):\n self.gpio_setup(channel, mode)",
"def pin_activate(pin_id):\n GPIO.setup(pin_id, GPIO.OUT)\n return",
"def SetDirection(self, direction: 'itkMatrixD44') -> \"void\":\n return _itkImagePython.itkImageBase4_SetDirection(self, direction)",
"def SetDirection(self, direction: 'itkMatrixD22') -> \"void\":\n return _itkImagePython.itkImageBase2_SetDirection(self, direction)",
"def led_set(state):\n l = Pin(LED, Pin.OUT)\n l.value(state)",
"def set_pullup(self, pin, is_pullup):\n with self:\n self._set_register_value(\"GPPU\", pin, is_pullup)",
"def change_pin(self, pin):\r\n self.pin = pin",
"def motor_ctrl(motor, dir):\n gpio.output(sett[\"STBY\"], gpio.HIGH)\n\n if motor == MOTOR_A:\n if dir == FORWARD:\n gpio.output(sett[\"AIN1\"], gpio.HIGH) # Set AIN1 \\ direction of motor A\n gpio.output(sett[\"AIN2\"], gpio.LOW) # Set AIN2 / \n gpio.output(sett[\"PWMA\"], gpio.HIGH) # Set AIN2 / \n elif dir == BACKWARD:\n gpio.output(sett[\"AIN1\"], gpio.LOW) # Set AIN1 \\ direction of motor A\n gpio.output(sett[\"AIN2\"], gpio.HIGH) # Set AIN2 / \n gpio.output(sett[\"PWMA\"], gpio.HIGH) # Set AIN2 / \n elif dir == STOP:\n gpio.output(sett[\"AIN1\"], gpio.LOW)\n gpio.output(sett[\"AIN2\"], gpio.LOW)\n gpio.output(sett[\"PWMA\"], gpio.LOW) # Set AIN2 / \n elif motor == MOTOR_B: \n if dir == FORWARD:\n gpio.output(sett[\"BIN1\"], gpio.HIGH) # Set AIN1 \\ direction of motor A\n gpio.output(sett[\"BIN2\"], gpio.LOW) # Set AIN2 / \n gpio.output(sett[\"PWMB\"], gpio.HIGH) # Set AIN2 / \n elif dir == BACKWARD:\n gpio.output(sett[\"BIN1\"], gpio.LOW) # Set AIN1 \\ direction of motor A\n gpio.output(sett[\"BIN2\"], gpio.HIGH) # Set AIN2 / \n gpio.output(sett[\"PWMB\"], gpio.HIGH) # Set AIN2 / \n elif dir == STOP:\n gpio.output(sett[\"BIN1\"], gpio.LOW)\n gpio.output(sett[\"BIN2\"], gpio.LOW)\n gpio.output(sett[\"PWMB\"], gpio.LOW) # Set AIN2 / "
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function sends a monitoring record to a Java Gateway and receives the reply as a json.
|
def send_to_network(gateway, line):
values = line.split(',') + [0.0, 0.0, 0.0, 0.0]
raw_record = gateway.jvm.MonitoringRecord(*values)
record = gateway.entry_point.mappingFunc('1', raw_record).toJson()
return record
|
[
"def send(self):\n json_report = None\n try:\n json_report = json.dumps(self.report)\n except Exception as err:\n print(\"Could not convert the report to JSON. Threw exception: {}\".format(err))\n print('Report: {}'.format(self.report))\n\n if json_report:\n try:\n response = requests.post('https://metrics-api.iopipe.com/v0/event', data=json.dumps(self.report))\n print('POST response: {}'.format(response))\n print(json.dumps(self.report, indent=2))\n self._sent = True\n except Exception as err:\n print('Error reporting metrics to IOPipe. {}'.format(err))\n print(json.dumps(self.report, indent=2))",
"def report_log(target, record_object):\n return requests.post(\n os.path.join(target, \"lofka/service/push\"),\n data=json.dumps(record_object).encode()\n )",
"def send_event(self, request):\n # type: (Dict) -> Optional[httplib.HTTPResponse]\n resp = None\n conn = None\n try:\n rb_json = self._encoder.encode(request)\n headers = self.get_headers(request)\n with StopWatch() as sw:\n conn = get_connection(self._agent_url)\n conn.request(\"POST\", self._endpoint, rb_json, headers)\n resp = get_connection_response(conn)\n if resp.status < 300:\n log.debug(\"sent %d in %.5fs to %s. response: %s\", len(rb_json), sw.elapsed(), self.url, resp.status)\n else:\n log.debug(\"failed to send telemetry to the Datadog Agent at %s. response: %s\", self.url, resp.status)\n except Exception:\n log.debug(\"failed to send telemetry to the Datadog Agent at %s.\", self.url)\n finally:\n if conn is not None:\n conn.close()\n return resp",
"def send_metric(s):\n pass",
"def send_record(test_record = None):\n if test_record is None:\n return\n try:\n temp = test_record.copy()\n if 'latency' in temp:\n del temp['latency']\n bandwidth_req = WSGI_URL + '?' + urllib.urlencode(temp)\n req = urllib2.urlopen(bandwidth_req)\n temp = test_record.copy()\n if 'bandwidth' in temp:\n del temp['bandwidth']\n latency_req = WSGI_URL + '?' + urllib.urlencode(temp)\n req = urllib2.urlopen(latency_req)\n except Exception, e:\n pass",
"def generate_json(rubrik_monitor):\n\n logger = logging.getLogger(\"rubrik\")\n\n headers = {'content-type': 'application/json, charset=utf-8'}\n\n # The token stores the REST API credentials and is used for each call to the server.\n token = rubrik_monitor.token\n if token is None:\n token = rubrik_monitor.token = get_rubrik_token(rubrik_monitor)\n if token is None: # If still no token, just fail and come back in 60 seconds.\n rubrik_monitor.json = json.dumps({\"error\": \"Error getting login token from Rubrik\"}, indent=4)\n return\n\n try:\n # Summary Report\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/report/backup_jobs/summary?report_type=daily\",\n auth=HTTPBasicAuth(token, ''), verify=False, headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/report/backup_jobs/summary?report_type=daily: \" + r.text)\n rubrik_monitor.data.success_count = json.loads(r.text)[\"successCount\"]\n rubrik_monitor.data.failure_count = json.loads(r.text)[\"failureCount\"]\n rubrik_monitor.data.running_count = json.loads(r.text)[\"runningCount\"]\n\n # Storage stats\n # Note that \"used\" here includes system space. We're more interested in snapshot space\n # so we'll get a used value in the next query.\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/stats/system_storage\", auth=HTTPBasicAuth(token, ''), verify=False, headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/stats/system_storage: \" + r.text)\n # Grab the data and convert to gigabytes (rounding up)\n rubrik_monitor.data.total = int(json.loads(r.text)[\"total\"] / (1000 * 1000 * 1000))\n rubrik_monitor.data.available = int(json.loads(r.text)[\"available\"] / (1000 * 1000 * 1000))\n\n # Snapshot stats\n # For some reason this value is returned as a string by the API.\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/stats/snapshot_storage/physical\", auth=HTTPBasicAuth(token, ''), verify=False, headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/stats/snapshot_storage/physical: \" + r.text)\n # Grab the data, convert from string and convert to gigabytes (rounding up)\n rubrik_monitor.data.used = int(int(json.loads(r.text)[\"value\"]) / (1000 * 1000 * 1000))\n\n # Average Storage Growth Per Day\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/stats/average_storage_growth_per_day\",\n auth=HTTPBasicAuth(token, ''), verify=False, headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/stats/average_storage_growth_per_day: \" + r.text)\n # Grab data and convert to gigabytes (rounding up)\n rubrik_monitor.data.avg_growth_per_day = int(json.loads(r.text)[\"bytes\"] / (1000 * 1000 * 1000))\n\n # Physical Ingest per day (each stat covers a 24 hour day)\n # The values returned with -1day are different than when using -2day or higher (and they seem wrong)\n # So we pull in the values for -2day instead.\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/stats/physical_ingest_per_day/time_series?range=-2day\",\n auth=HTTPBasicAuth(token, ''), verify=False, headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/stats/physical_ingest_\"\n \"per_day/time_series?range=-2day: \" + r.text)\n # Grab data and convert to gigabytes (rounding up)\n rubrik_monitor.data.ingested_yesterday = int(json.loads(r.text)[-2][\"stat\"] / (1000 * 1000 * 1000))\n rubrik_monitor.data.ingested_today = int(json.loads(r.text)[-1][\"stat\"] / (1000 * 1000 * 1000))\n\n # Node Status\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/cluster/me/node\", auth=HTTPBasicAuth(token, ''), verify=False, headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/cluster/me/node: \" + r.text)\n status_json = json.loads(r.text)\n system_status = \"OK\"\n for x in range(0, status_json[\"total\"]):\n if status_json[\"data\"][x][\"status\"] != \"OK\":\n system_status = status_json[\"data\"][x][\"status\"]\n rubrik_monitor.data.node_status = system_status\n\n # Current Streams\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/stats/streams/count\", auth=HTTPBasicAuth(token, ''), verify=False, headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/stats/streams/count: \" + r.text)\n streams = json.loads(r.text)[\"count\"]\n\n # IOPS/Throughput\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/cluster/me/io_stats?range=-10sec\",\n auth=HTTPBasicAuth(token, ''), verify=False, headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/cluster/me/io_stats?range=-10sec: \" + r.text)\n iops_reads = json.loads(r.text)[\"iops\"][\"readsPerSecond\"][0][\"stat\"]\n iops_writes = json.loads(r.text)[\"iops\"][\"writesPerSecond\"][0][\"stat\"]\n throughput_reads = json.loads(r.text)[\"ioThroughput\"][\"readBytePerSecond\"][0][\"stat\"]\n throughput_writes = json.loads(r.text)[\"ioThroughput\"][\"writeBytePerSecond\"][0][\"stat\"]\n # convert byte_reads from Bytes to Megabytes\n throughput_reads = int(throughput_reads / (1024 * 1024)) # Round up\n throughput_writes = int(throughput_writes / (1024 * 1024)) # Round up\n\n # PhysicalIngest (Live data)\n r = rubrik_monitor.session.get(\n RUBRIK_URL + \"/api/v1/stats/physical_ingest/time_series?range=-10sec\", auth=HTTPBasicAuth(token, ''),\n verify=False,\n headers=headers)\n if r.status_code != 200:\n raise RubrikNotConnectedException(\"Error getting /api/v1/stats/physical_\"\n \"ingest/time_series?range=-10sec \" + r.text)\n ingest = json.loads(r.text)[0][\"stat\"]\n # convert byte_reads from Bytes to Megabytes\n ingest = int(ingest / (1024 * 1024)) # Round up\n\n # Save stat datapoints to our persistent monitor object\n rubrik_monitor.data.iops.append(iops_reads + iops_writes)\n rubrik_monitor.data.throughput.append(throughput_reads + throughput_writes)\n rubrik_monitor.data.ingest.append(ingest)\n rubrik_monitor.data.streams.append(streams)\n\n # If we already have the max number of datapoints in our list, delete the oldest item\n if len(rubrik_monitor.data.iops) > MAX_DATAPOINTS:\n del (rubrik_monitor.data.iops[0])\n del (rubrik_monitor.data.throughput[0])\n del (rubrik_monitor.data.ingest[0])\n del (rubrik_monitor.data.streams[0])\n\n # Format our output as json under the stats name\n output = json.dumps({\"stats\": rubrik_monitor.data.__dict__})\n\n # ====================================\n # Generate JSON output and assign to rubrik_monitor object (for return back to caller module)\n rubrik_monitor.json = output\n\n logger.debug(rubrik_monitor.json)\n\n except Exception as error:\n logger.error(\"Error getting data from Rubrik: \" + str(error))\n rubrik_monitor.json = json.dumps({\"error\": \"Error getting data from Rubrik\"}, indent=4)\n rubrik_monitor.token = None # Reset login\n rubrik_monitor.session = None # Reset HTTP session",
"def send_metrics(metrics):\n\n conn_info = (CARBON_HOST, CARBON_PORT)\n now = int(time())\n # print(\"Connecting to: %s:%d\" % conn_info)\n # for (metric, value) in metrics.items():\n # line = f\"{metric} {value} {now}\\n\"\n # print(line)\n\n try:\n with socket.socket() as sock:\n print(\"Connecting to: %s:%d\" % conn_info)\n sock.connect(conn_info)\n for (metric, value) in metrics.items():\n line = f\"{metric} {value} {now}\\n\"\n print(\"Sending:\\n%s\" % line)\n sock.send(bytes(line, \"utf-8\"))\n except socket.error:\n raise SystemExit(\"Couldn't connect to %s:%d\" % conn_info)",
"def onStepDetected(timestamp):\n send_socket.send(json.dumps({'user_id' : user_id, 'sensor_type' : 'SENSOR_SERVER_MESSAGE', 'message' : 'SENSOR_STEP', 'data': {'timestamp' : timestamp}}) + \"\\n\")",
"def send_back_json(self, return_dict, all_alocated, connection):\n\n logging.info(\"Sending back Json to client ID %s\" % self.client_connections_map[connection])\n\n response = {}\n if all_alocated:\n\n response['statement'] = 'You got everything you requested for'\n\n else:\n\n response['statement'] = 'Not all machines were allocated. check log for more info'\n response['data'] = return_dict\n\n serialized_res = json.dumps(response)\n connection.send(serialized_res)\n # self.connection_lock.release()",
"def handle_message(value: str):\n status = parse_message(value)\n log.info(f'Received a metric: {status}')\n save_to_db(status)",
"def emit(self, record):\r\n try:\r\n import httplib, urllib\r\n host = self.host\r\n h = httplib.HTTP(host)\r\n url = self.url\r\n data = urllib.urlencode(self.mapLogRecord(record))\r\n if self.method == \"GET\":\r\n if (string.find(url, '?') >= 0):\r\n sep = '&'\r\n else:\r\n sep = '?'\r\n url = url + \"%c%s\" % (sep, data)\r\n h.putrequest(self.method, url)\r\n # support multiple hosts on one IP address...\r\n # need to strip optional :port from host, if present\r\n i = string.find(host, \":\")\r\n if i >= 0:\r\n host = host[:i]\r\n h.putheader(\"Host\", host)\r\n if self.method == \"POST\":\r\n h.putheader(\"Content-type\",\r\n \"application/x-www-form-urlencoded\")\r\n h.putheader(\"Content-length\", str(len(data)))\r\n h.endheaders()\r\n if self.method == \"POST\":\r\n h.send(data)\r\n h.getreply() #can't do anything with the result\r\n except (KeyboardInterrupt, SystemExit):\r\n raise\r\n except:\r\n self.handleError(record)",
"def call():\n response = VoiceResponse()\n response.say('Hello. Please leave your question after the beep.')\n response.record()\n response.hangup()\n\n # Message is not saved here but on Twilio console.\n # Add a link to it in the queue\n Qqueue.enqueue(\"RECORDED CALL --- https://www.twilio.com/console/voice/recordings/recording-logs/\")\n Qqueue.save()\n\n return str(response)",
"def send_results(self, measure_prefix, hist_id, mw):\n datastr = 'Experiment,SOURCE=imageanalysis,name=\"%s\" measure=%s,'%(mw.objectName(), measure_prefix)\n datastr +=','.join(['%s=%s'%(key.replace(' ', '_'), val) for key, val in mw.histo_handler.temp_vals.items()\n if mw.histo_handler.types[key] == float]) \n datastr += ' ' + str(int(time.time()*1e9)) + '\\n'\n \n msg = \"POST /write?db=arduino HTTP/1.1\\nHost: 129.234.190.191\\n\"\n msg += \"User-Agent: PyDex\\nConnection: close\\n\"\n msg += \"Content-Type: application/x-www-form-urlencoded\\n\"\n msg += \"Content-Length: %s\\n\\n\"%len(datastr)\n msg += datastr\n try:\n _ = simple_msg('129.234.190.191', 8086, msg)\n except Exception as e:\n error(\"Settings window failed to send results to influxdb\\n\"+str(e))",
"def publishJSON(self):\n\n\t\t# setup AWS and connect\n\t\tAWSClient = AWSIoTMQTTClient(\"basicPubSub\")\n\t\tAWSClient.configureEndpoint(myhost, 8883)\n\t\tAWSClient.configureCredentials(rootCA, privatePath, certPath)\n\n\t\tAWSClient.configureAutoReconnectBackoffTime(1,32,20)\n\t\tAWSClient.configureOfflinePublishQueueing(-1)\n\t\tAWSClient.configureDrainingFrequency(2)\n\t\tAWSClient.configureConnectDisconnectTimeout(10)\n\t\tAWSClient.configureMQTTOperationTimeout(10)\n\t\tAWSClient.connect()\n\n\t\t# create JSON object\n\t\tjsonData = {}\t\n\t\tjsonData['type'] = 'overhead'\n\t\tjsonData['mqtt_lat'] = self.mqtt_latency\n\t\tjsonData['mqtt_ovh'] = self.mqtt_size\n\t\tjsonData['ws_lat'] = self.ws_latency\n\t\tjsonData['ws_ovh'] = self.ws_size\n\t\tjsonData['coap_lat'] = self.coap_latency\n\t\tjsonData['coap_ovh'] = self.coap_size\n\t\tstrData= json.dumps(jsonData)\n\t\t\n\t\t# publish JSON\n\t\tAWSClient.publish(\"AccessControl/performance\", str(strData), 1)\n\t\tprint(\"Published data!\")\n\n\t\t# reset stats for new values\n\t\tself.clearVals()",
"def testNotifications(self):\n req = {\n 'jsonrpc': '2.0',\n 'method': 'greet',\n }\n msg = json.dumps(req)\n self.sock.sendall(msg.encode())\n time.sleep(0.1)\n res = self.sock.recv(1024).decode()\n self.assertEqual(res, '')",
"def send_record_firehose(firehose, kinesis_stream, data):\n delivery_stream_name = kinesis_stream\n response = firehose.put_record(\n DeliveryStreamName=delivery_stream_name,\n Record={\n 'Data': json.dumps(data)\n }\n )\n return response",
"def record_worker(post_dict):\n\n # If we got a Django QueryDict in the argument, adjust it from\n # a multi-value dictionary to an ordinary dictionary.\n if hasattr(post_dict, 'dict'):\n post_dict = post_dict.dict()\n\n cf_data = json.loads(post_dict['payload'])\n\n # There are two known paths towards the 'judgments' value.\n judgments_paths = [('results', ),\n (0, 'results')]\n judgments_data = _find_unique_in_cf_json(cf_data, 'judgments',\n judgments_paths)\n # Find the CID.\n judgment_data = _cf_json_getitem(judgments_data, 0)\n cid = _find_unique_in_cf_json(judgment_data, 'cid', ('unit_data', ))\n\n # Update the XML session file.\n with XMLSession(cid=cid) as session:\n session.record_judgment(judgment_data)",
"def index(self,entry,):\n index_json = \"\"\"{ \"index\": {\"_index\": \"%s\", \"_type\": \"%s\"} }\"\"\" %(self.index_name, self.record_type)\n text_message = json.dumps(entry)\n wire_message = \"%s\\n%s\\n\"%(index_json,text_message)\n if self.debug:\n print wire_message\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n recd = s.sendto(wire_message,(self.udp_host,self.udp_port))\n s.close()",
"def create_watch(self, handle, config, as_json=True, timestamp_format=APITimestampFormat.NANOSECOND):\n return self._xjtrans(\"/views/%s/watches\" % handle, \"POST\", config, as_json, timestamp_format)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Finds all the shared caracters from left to right. find_uninflected_stem('rAmaH', 'rAmo') => 1+aH
|
def find_uninflected_stem(stem, form):
i = 0
while i <= len(stem) - 1 and i <= len(form) - 1 and stem[i] == form[i]:
i += 1
stem_ending = stem[i:]
form_ending = form[i:]
if stem_ending == '' and form_ending == '':
operation = ''
else:
form_ending_len = len(form_ending)
operation = '-{}+{}'.format(form_ending_len, stem_ending)
return operation
|
[
"def fold_stem(l_seq, r_seq):\n fc = RNA.fold_compound(l_seq + r_seq)\n fc.hc_add_from_db('<'*len(l_seq) + '>'*len(r_seq))\n fc, mfe = fc.pf()\n return fc, mfe",
"def test_search_small(self):\n seq = \"GCCTGGAAAGGC\"\n motif = [(4, \"CTGGAAAG\")]\n self.assertEqual(stem.search(motif, seq), [seq])",
"def get_rackwords(self):\n\n combs = []\n words = []\n for no in range(1, len(self.rack) + 1):\n combs1 = list(\"\".join(lets) for lets in itertools.combinations(self.rack, no))\n combs.extend(combs1)\n combs = list(set(combs))\n for seq in combs:\n if len(seq) > 1: # one letter words not allowed as first turn\n if seq == \" \":\n if self.lett2:\n words.extend([(0, lt) for lt in self.lett2])\n else:\n while seq[0] == \" \":\n seq = seq[1:] + seq[0]\n letts = [r for r in seq if r != seq[0]]\n words.extend(list(self.graph.contains_lett(list(seq[0]), letts)))\n words = [w[1] for w in words if len(w[1]) > 1]\n words = set(tuple(w) for w in words)\n return list(list(w) for w in words)",
"def word_plays(hand, board_letters):\r\n # find prefix + L + suffix; L from board_letters, rest from hand\r\n results = set()\r\n for pre in find_prefixes(hand): #find_prefixes(hand, '', set()):\r\n for L in board_letters:\r\n add_suffixes(removed(hand, pre), pre+L, results)\r\n return results",
"def __find_original_word(self, bunsetsu):\n return (bunsetsu.mrph_list()[0].genkei, bunsetsu.mrph_list()[0].hinsi)",
"def shared_k_mers(k: int, strand_1: str, strand_2: str) -> list:\n comp_2 = get_complement(strand_2, True)\n k_mer_dict = defaultdict(list)\n positions = []\n len_2 = len(strand_2)\n for i in range(len(strand_1) - k + 1):\n k_mer_dict[strand_1[i:i + k]].append(i)\n\n for i in range(len(strand_2) - k + 1):\n if strand_2[i:i + k] in k_mer_dict:\n for pos in k_mer_dict[strand_2[i:i + k]]:\n positions.append((pos, i))\n\n if comp_2[i:i + k] in k_mer_dict:\n for pos in k_mer_dict[comp_2[i:i + k]]:\n rev = len_2 - k - i\n positions.append((pos, rev))\n\n return positions",
"def stem(s):\r\n special=['s']\r\n one_singular=['y','e','a']\r\n singular=['on','er','us','en','st']\r\n plural=['ie','ey','es']\r\n three_end=['ier','ing','dom','er','ism','ist','ion','ous','iou']\r\n four_end=['ible','able','ment','ness','ship','sion','tion','ance','ence','ious']\r\n two_prefix=['re','un','co','de']\r\n three_prefix=['pre','dis']\r\n if len(s)>=3 and s[-1] in special:\r\n if s[-3:-1] in plural:\r\n return s[:-3]\r\n if s[-4:-1] in three_end:\r\n return s[:-4]\r\n if len(s)>=5:\r\n if s[-5:-1]in four_end:\r\n return s[:-5]\r\n if s[:2] in two_prefix:\r\n return s[2:]\r\n if s[:3] in three_prefix:\r\n return s[3:]\r\n if s[-2:-1] in one_singular:\r\n return s[:-2]\r\n else:\r\n return s[:-1]\r\n if len(s)>=3:\r\n if s[:2] in two_prefix:\r\n return s[2:]\r\n if s[:3] in three_prefix:\r\n return s[3:]\r\n if s[-1] in one_singular:\r\n return s[:-1]\r\n if s[-2:] in plural:\r\n return s[:-2]\r\n if s[-3:] in three_end:\r\n return s[:-3]\r\n if len(s)>=5:\r\n if s[-4]in four_end:\r\n return s[:-4] \r\n else:\r\n return s\r\n if s[-1]in one_singular:\r\n return s[:-1]\r\n if s[-2:] in singular:\r\n return s\r\n if s[-2:]in plural:\r\n return s\r\n else:\r\n return s",
"def test_search_big(self):\n seq = \"GCCTGGAAAGGC\"\n filler = \"A\"*50\n big_seq = filler + seq + filler\n motif = [(54, 'CTGGAAAG')]\n self.assertEqual(stem.search(motif, big_seq), [seq])",
"def process_common_prefix(self):\n total_count = 0\n # This counts the number of iteration\n it_count = 0\n while True:\n it_count += 1\n\n count = 0\n # Always make a new copy here because LF will add\n # new terminals\n temp = self.non_terminal_set.copy()\n\n for symbol in temp:\n ret = symbol.left_factorize()\n if ret is True:\n count += 1\n\n if count == 0:\n break\n else:\n total_count += count\n\n dbg_printf(\"Left factorized for %d symbols in %d iterations\",\n total_count,\n it_count)\n\n return",
"def spellings(self):\n scale_candidates = []\n nb_alt_prev = 7\n for tonic_base in self.tonic.closest_white_keys():\n note_names = []\n bad = False\n nb_alt = 0\n for i, cur_base in enumerate(Note.whites_from(tonic_base)):\n cur_note = self.notes[i]\n name = cur_note.name_with_base_white(cur_base)\n note_names.append(name)\n\n if Note.sharp_sym in name or Note.flat_sym in name:\n nb_alt += 1\n if Note.sharp_sym * 2 in name or Note.flat_sym * 2 in name:\n bad = True\n\n if not bad:\n if nb_alt < nb_alt_prev:\n scale_candidates = []\n scale_candidates.append(tuple(note_names))\n nb_alt_prev = nb_alt\n\n return scale_candidates",
"def standard2(word_nm,harakat):\n if len(word_nm)!=len(harakat):\n return u\"\";\n else:\n word=u\"\";\n i=0;\n word_nm,harakat=geminating(word_nm,harakat);\n if len(word_nm)!=len(harakat):\n return u\"\";\n## حالة عدم الابتداء بسكون\n##إذا كان الحرف الثاني مضموما تكون الحركة الأولى مضمومة، وإلا تكون مكسورة\n if len(harakat)!=0 and harakat[0]==SUKUN:\n word_nm=ALEF+word_nm\n if len(harakat)>=2 and harakat[1]in (DAMMA, WAW_HARAKA):\n harakat=DAMMA+harakat\n else:\n harakat=KASRA+harakat\n\n word_nm=tahmeez2(word_nm,harakat);\n if len(word_nm)!=len(harakat):\n return u\"\";\n word_nm,harakat=homogenize(word_nm,harakat);\n if len(word_nm)!=len(harakat):\n return u\"\";\n\n\n#### حالة عدم الابتداء بسكون\n####إذا كان الحرف الثاني مضموما تكون الحركة الأولى مضمومة، وإلا تكون مكسورة\n## if len(harakat)!=0 and harakat[0]==SUKUN:\n#### if word_nm.startswith(ALEF_HAMZA_ABOVE):\n#### word_nm=ALEF+word_nm\n#### else: word_nm=ALEF+word_nm;\n##\n## if len(harakat)>=2 and harakat[1]in (DAMMA, WAW_HARAKA):\n## harakat=DAMMA+harakat\n#### معالجة حالة البدء بساكن لا سيما إن كان همزة على الألف\n## if word_nm.startswith(ALEF_HAMZA_ABOVE):\n## word_nm=ALEF+WAW_HAMZA+word_nm[1:]\n## else: word_nm=ALEF+word_nm;\n## else:\n## harakat=KASRA+harakat\n## if word_nm.startswith(ALEF_HAMZA_ABOVE):\n## word_nm=ALEF+YEH_HAMZA+word_nm[1:]\n## else: word_nm=ALEF+word_nm;\n while i <len(word_nm):\n if harakat[i]==ALEF_HARAKA:\n word+=word_nm[i]+FATHA+ALEF;\n i+=1;\n elif harakat[i]==ALEF_WAW_HARAKA:\n word+=word_nm[i]+FATHA+ALEF;\n i+=1;\n elif harakat[i]==ALEF_YEH_HARAKA :\n if i+1<len(word_nm):\n \tword+=word_nm[i]+FATHA+ALEF;\n else :\n \tword+=word_nm[i]+FATHA+ALEF_MAKSURA;\n## \tword+=word_nm[i]+FATHA+\"*\";\n i+=1;\n elif harakat[i]==WAW_HARAKA:\n word+=word_nm[i]+DAMMA+WAW;\n i+=1;\n elif harakat[i]==YEH_HARAKA:\n word+=word_nm[i]+KASRA+YEH;\n i+=1;\n elif harakat[i]==ALTERNATIVE_YEH_HARAKA:\n word+=word_nm[i]+KASRA+YEH;\n i+=1;\n elif harakat[i]==NOT_DEF_HARAKA:\n word+=word_nm[i];\n i+=1;\n\n else:\n word+=word_nm[i]+harakat[i];\n i+=1;\n if word.endswith(FATHA+YEH+FATHA):\n \tword=word[:-2]+ALEF_MAKSURA;\n elif word.endswith(FATHA+WAW+FATHA):\n \tword=word[:-2]+ALEF;\n##-\tتحويل همزة القطع على الألف بعدها فتحة وهمزة القطع على الألف بعدها سكون إلى ألف ممدودة\n\n\tword=word.replace( u\"%s%s%s\"%(ALEF_HAMZA_ABOVE,FATHA,ALEF),ALEF_MADDA);\n\tword=word.replace( u\"%s%s\"%(ALEF_MADDA,FATHA),ALEF_MADDA);\n\tword=word.replace( u\"%s%s\"%(ALEF_MADDA,ALEF),ALEF_MADDA);\n\tword=word.replace( u\"%s%s%s%s\"%(ALEF_HAMZA_ABOVE,FATHA,ALEF_HAMZA_ABOVE,SUKUN),ALEF_MADDA);\n\tword=word.replace( u\"%s%s%s%s\"%(ALEF_HAMZA_ABOVE,FATHA,ALEF_HAMZA_ABOVE,FATHA),ALEF_MADDA);\n\tword=word.replace( u\"%s%s%s%s\"%(ALEF,KASRA,HAMZA,SUKUN),ALEF+KASRA+YEH_HAMZA+SUKUN);\n\tword=word.replace( u\"%s%s%s%s\"%(ALEF,DAMMA,HAMZA,SUKUN),ALEF+DAMMA+WAW_HAMZA+SUKUN);\n\tword=word.replace( u\"%s%s%s%s\"%(ALEF_HAMZA_ABOVE,DAMMA,WAW_HAMZA,SUKUN),ALEF_HAMZA_ABOVE+DAMMA+WAW);\n\tword=word.replace( u\"%s%s%s%s\"%(WAW_HAMZA,SUKUN,YEH_HAMZA,KASRA),YEH_HAMZA+SHADDA+KASRA);\n\tword=word.replace( u\"%s%s%s%s\"%(WAW_HAMZA,SUKUN,ALEF_HAMZA_ABOVE,FATHA),ALEF_HAMZA_ABOVE+SHADDA+FATHA);\n\tword=word.replace( u\"%s%s%s%s\"%(ALEF_HAMZA_ABOVE,SUKUN,YEH_HAMZA,KASRA),YEH_HAMZA+SHADDA+KASRA);\n\n## معالجة ألف التفريق\n\tword=word.replace( ALEF_WASLA,ALEF);\n## معالجة ألف الوصل الزائدة عند إضافتها إلى أول الفعل المثال\n\tword=word.replace( u\"%s%s%s%s\"%(ALEF,DAMMA,YEH,SUKUN),ALEF+DAMMA+WAW);\n\n\n\treturn word;\n\t# إعلال و إبدال الهمزة.",
"def stem(s):\r\n if(s[-1:] == 's'):\r\n if(len(s) < 3):\r\n s = s\r\n else:\r\n s = s[:-1]\r\n return stem(s)\r\n \r\n\r\n elif(s[-3:] == 'ing'):\r\n if(len(s) < 5):\r\n s = s\r\n else:\r\n if(s[-4] == s[-5]):\r\n s = s[:-4]\r\n else:\r\n s = s[:-3]\r\n elif(s[-2:] == 'er'):\r\n s = s[:-2]\r\n\r\n elif(s[-2:] == 'ed'):\r\n if(len(s) < 4):\r\n s = s\r\n else:\r\n s = s[:-2]\r\n elif(s[-2:] == 'es'):\r\n if(len(s) < 4):\r\n s = s\r\n else:\r\n s = s[:-2]\r\n elif(s[-3:] == 'est'):\r\n if(len(s) < 5):\r\n s = s\r\n else:\r\n s = s[:-3]\r\n elif(s[-3:] == 'less'):\r\n if(len(s) < 5):\r\n s = s\r\n else:\r\n s = s[:-3]\r\n elif(s[-1:] == 'y'):\r\n if(len(s) < 5):\r\n s = s \r\n else:\r\n s = s[:-1] + 'i'\r\n return s",
"def delete_common_words(data):",
"def get_stem(word):\r\n #stub\r\n #PLACEHOLDER\r\n\r\n ps = PorterStemmer()\r\n \r\n return word",
"def _stem_words(stemmer, words):\n return [stemmer.stem(word.lower()) for word in words]",
"def disambiguate(sentence, lesk=simple_lesk):\n words = sentence.split(' ')\n\n best_sense = []\n\n for word in words:\n best_sense.append(lesk(word, sentence))\n\n return best_sense",
"def _get_base_parts(self, word: str) -> set:\n stemmer = SnowballStemmer('english')\n basics = set()\n splitter = self.splitter\n\n for part in splitter.split(word):\n part = part.lower()\n cost = splitter.word_cost.get(part)\n if cost:\n basics.add(stemmer.stem(part).lower())\n\n return basics",
"def test_search_end(self):\n seq = \"GCCTGGAAAGGC\"\n filler = \"A\" * 50\n big_seq = filler*2 + seq\n motif = [(104, 'CTGGAAAG')]\n self.assertEqual(stem.search(motif, big_seq), [seq])",
"def test_stem_words():\n\n # check for stemmed version of certain words\n test_str = 'The dog was running up the sidewalk.' # should be stemmed\n stemmed_text = text_utilities.stem_words(test_str)\n stemmed_text = stemmed_text.split(' ')\n assert not stemmed_text[3] == 'running'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Finds the path of the tshark executable. If the user has provided a path or specified a location in config.ini it will be used. Otherwise default locations will be searched.
|
def get_process_path(tshark_path=None, process_name="tshark"):
config = get_config()
possible_paths = [config.get(process_name, "%s_path" % process_name)]
# Add the user provided path to the search list
if tshark_path is not None:
possible_paths.insert(0, tshark_path)
# Windows search order: configuration file's path, common paths.
if sys.platform.startswith('win'):
for env in ('ProgramFiles(x86)', 'ProgramFiles'):
program_files = os.getenv(env)
if program_files is not None:
possible_paths.append(
os.path.join(program_files, 'Wireshark', '%s.exe' % process_name)
)
# Linux, etc. search order: configuration file's path, the system's path
else:
os_path = os.getenv(
'PATH',
'/usr/bin:/usr/sbin:/usr/lib/tshark:/usr/local/bin'
)
for path in os_path.split(':'):
possible_paths.append(os.path.join(path, process_name))
for path in possible_paths:
if os.path.exists(path):
if sys.platform.startswith('win'):
path = path.replace("\\", "/")
return path
raise TSharkNotFoundException(
'TShark not found. Try adding its location to the configuration file. '
'Searched these paths: {}'.format(possible_paths)
)
|
[
"def whereis(progName, logger: logging.Logger = None):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n if platform == \"win32\":\n filename, file_extension = os.path.splitext(progName)\n if file_extension != '.exe' or file_extension != '.com':\n progName = progName + '.exe'\n\n for path in os.environ.get('PATH', '').split(os.pathsep):\n exeProgram = os.path.join(path, progName)\n if os.path.exists(exeProgram) and not os.path.isdir(exeProgram) and os.access(exeProgram, os.X_OK):\n return exeProgram\n\n # not found, so display this\n user_paths = os.environ['PATH'].split(os.pathsep)\n if logger is not None:\n logger.info('{func:s} !!! progName {prog:s} not found in PATH {path!s}'.format(func=cFuncName, prog=progName, path=user_paths))\n else:\n sys.stderr.write('progName %s not found in PATH %s\\n' % (colored(progName, 'red'), user_paths))\n\n return None",
"def locate_program(name):\n prog_path = shutil.which(name)\n if not prog_path:\n return None\n return Path(prog_path)",
"def get_path(self, tool):\n\t\tpaths = os.getenv('PATH').split(':')\n\t\ttool_path = None\n\t\tfor path in paths:\n\t\t\tif os.path.isfile(path+\"/\"+tool):\n\t\t\t\ttool_path = path+\"/\"+tool\n\t\tif tool_path is None:\n\t\t\tprint 'Error: Unable to locate '+tool+' in PATH.'\n\t\t\tsys.exit(1)\n\t\treturn tool_path",
"def find_executable(self, executable, path=None):\n\t\tif os.path.isfile(executable):\n\t\t\treturn executable\n\n\t\tif path is None:\n\t\t\tpath = os.environ['PATH']\n\t\tpaths = string.split(path, os.pathsep)\n\t\n\t\tfor path in paths:\n\t\t\tfullname = os.path.join(path, executable)\n\t\t\tif os.path.isfile(fullname):\n\t\t\t\treturn fullname\n\t\treturn ''",
"def which(searchFile) :\n for searchPath in os.environ[\"PATH\"].split(os.pathsep):\n test=os.path.join(searchPath,searchFile)\n if os.path.isfile(test): return test\n\n return None",
"def search_for_configuration_file():\n # ./paper-git.cfg\n config_path = os.path.abspath('paper-git.cfg')\n if os.path.exists(config_path):\n return config_path\n # ./var/etc/paper-git.cfg\n config_path = os.path.abspath(\n os.path.join('var', 'etc', 'paper-git.cfg'))\n if os.path.exists(config_path):\n return config_path\n # /etc/paper-git.cfg\n config_path = os.path.join('/etc', 'paper-git.cfg')\n if os.path.exists(config_path):\n return config_path\n # None of the above configuration files exists.\n return None",
"def get_PATH_environment_variable():\n return os.getenv('PATH', default='')",
"def locateProg(progName, logger: logging.Logger = None):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n logger.info('{func:s}: locate programs {prog:s}'.format(func=cFuncName, prog=progName))\n\n exePROG = whereis(progName, logger)\n if exePROG is None:\n if logger is not None:\n logger.info('{func:s} !!! did not found executable {prog:s} in path. Program Exits\\n'.format(func=cFuncName, prog=progName))\n else:\n sys.stderr.write(colored(('!!! did not found executable %s in path. Program Exits\\n' % progName), 'red'))\n sys.exit(amc.E_NOT_IN_PATH)\n\n logger.info('{func:s}: {prog:s} is {cmd:s}'.format(func=cFuncName, prog=progName, cmd=colored(exePROG, 'green')))\n\n return exePROG",
"def _discover_sdk_path():\n # adapted from {http://code.google.com/p/bcannon/source/browse/\n # sites/py3ksupport-hrd/run_tests.py}\n\n # Poor-man's `which` command.\n for path in os.environ['PATH'].split(':'):\n if os.path.isdir(path) and 'dev_appserver.py' in os.listdir(path):\n break\n else:\n raise RuntimeError(\"couldn't find appcfg.py on $PATH\")\n\n # Find out where App Engine lives so we can import it.\n app_engine_path = os.path.join(os.path.dirname(path), 'google_appengine')\n if not os.path.isdir(app_engine_path):\n raise RuntimeError('%s is not a directory' % app_engine_path)\n return app_engine_path",
"def get_config_path(name):\n sp_root_dir = join(environ.get('HOME', '/'), '.stormpath')\n return join(sp_root_dir, 'cli', name)",
"def get_path(executable, log=None):\n code, out, err = run_cmd('which {}'.format(executable))\n if code != 0 or err == '{} not found'.format(executable):\n raise PathError('{} is not in your path'.format(executable), log)\n else:\n return os.path.abspath(out)",
"def script_path(sname):\n\n return examples_dir / \"scripts\" / Path(sname)",
"def _detect(env):\n try:\n return env['genparse']\n except KeyError:\n pass\n\n genparse = env.WhereIs('genparse', env['SDK_TOOLS'] + '/bin')\n if genparse:\n return genparse\n\n raise SCons.Errors.StopError(\n GenParseNotFound,\n \"Could not find AT Parser (genparse.exe)\")",
"def bin_path():\n path = \"bin\"\n\n if platform.system() == \"Windows\":\n path = \"Scripts\"\n\n return path",
"def FindExecutable( exe_name ):\n\n import gview\n\n if os.name == 'nt':\n (root, ext) = os.path.splitext(exe_name)\n if ext != '.exe':\n exe_name = exe_name + '.exe'\n\n if os.path.isfile(exe_name):\n return exe_name\n\n if os.path.isfile(os.path.join(gview.home_dir,'bin',exe_name)):\n return os.path.join(gview.home_dir,'bin',exe_name)\n\n exe_path = os.environ['PATH']\n if (os.name == 'nt'):\n path_items = exe_path.split(';')\n else:\n path_items = exe_path.split(':')\n\n for item in path_items:\n exe_path = os.path.join(item,exe_name)\n if os.path.isfile(exe_path):\n return exe_path\n\n return None",
"def findExecutable(name, alwaysAddSuffix=False):\n\n\texecNames = _getExecNames(name, alwaysAddSuffix)\n\tif os.path.isabs(name):\n\t\tfor e in execNames:\n\t\t\tif os.access(e, os.X_OK):\n\t\t\t\treturn e\n\t\treturn None\n\tpath = os.getenv(\"PATH\", os.defpath)\n\tpathList = path.split(os.pathsep)\n\tchimera = os.getenv('CHIMERA')\n\tif chimera:\n\t\tpathList.insert(0, os.path.join(chimera, 'bin'))\n\tdel chimera\n\tfor p in pathList:\n\t\tfor e in execNames:\n\t\t\tfilename = os.path.join(p, e)\n\t\t\tif os.access(filename, os.X_OK):\n\t\t\t\treturn filename\n\treturn _findInstalledApp(execNames)",
"def find_path():\n if sys.platform == \"linux2\" or sys.platform == \"linux\":\n extension = \".so\"\n elif sys.platform == \"darwin\":\n extension = \".dylib\"\n elif sys.platform == \"win32\":\n extension = \".dll\"\n else:\n print(\"Unknown system type!\")\n return (True,0,0)\n\n path_lgc = imp.find_module('localgraphclustering')[1]\n return path_lgc+\"/src/lib/graph_lib_test/libgraph\"+extension",
"def find_runfile(runfile=None):\n # Obtain env value\n names = []\n if runfile is not None:\n names.append(runfile)\n names.append(DEFAULT_RUNFILE_NAME)\n # Create .py version if necessary\n if not names[0].endswith('.py'):\n names += [names[0] + '.py']\n # Does the name contain path elements?\n if os.path.dirname(names[0]):\n # If so, expand home-directory markers and test for existence\n for name in names:\n expanded = os.path.expanduser(name)\n if os.path.exists(expanded):\n if name.endswith('.py') or _is_package(expanded):\n return os.path.abspath(expanded)\n else:\n # Otherwise, start in cwd and work downwards towards filesystem root\n path = '.'\n # Stop before falling off root of filesystem (should be platform\n # agnostic)\n while os.path.split(os.path.abspath(path))[1]:\n for name in names:\n joined = os.path.join(path, name)\n if os.path.exists(joined):\n if name.endswith('.py') or _is_package(joined):\n return os.path.abspath(joined)\n path = os.path.join('..', path)\n # Implicit 'return None' if nothing was found",
"def which(path, exefile):\n for p in (path or \"\").split(';'):\n next = os.path.join(p, exefile)\n if os.path.exists(next):\n return next\n\n return \"\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns 'Y' for tshark versions >= 1.10.0 and 'R' for older versions.
|
def get_tshark_display_filter_flag(tshark_version):
if tshark_version >= LooseVersion("1.10.0"):
return '-Y'
else:
return '-R'
|
[
"def yn(value: bool) -> str:\n return \"Y\" if value else \"N\"",
"def yn_bool(yn_flag: str) -> bool:\n\n return True if yn_flag.upper() == 'Y' else False",
"def test_radio_version_inc(self):\n assert bs.return_radio_version(\"10.3.2.2639\") == \"10.3.2.2640\"",
"def get_roaster_state(self):\n value = self._current_state.value\n if(value == b'\\x02\\x01'):\n return 'idle'\n elif(value == b'\\x04\\x04'):\n return 'cooling'\n elif(value == b'\\x08\\x01'):\n return 'sleeping'\n # handle null bytes as empty strings\n elif(value == b'\\x00\\x00' or value == b''):\n return 'connecting'\n elif(value == b'\\x04\\x02'):\n return 'roasting'\n else:\n return 'unknown'",
"def test_less_than_with_older_codename(self):\n assert salt_version.less_than(\"Nitrogen\") is False",
"def myst_version():\n return 0.13",
"def ts_get_version():\n ts_version = ts.__version__\n lm.write_log_with_timestamp('tushare version: ' + ts_version)\n return ts_version",
"def test_radio_version(self):\n assert bs.return_radio_version(\"10.3.2.2639\", \"10.3.2.5460\") == \"10.3.2.5460\"",
"def ch10ver(self):\r\n rccver = {0: \"106-05 or earlier\",\r\n 7: \"106-07\",\r\n 8: \"106-09\",\r\n 9: \"106-11\",\r\n 10: \"106-13\",\r\n 11: \"106-15\",\r\n 12: \"106-17\",\r\n 13: \"106-19\"}\r\n return rccver[self.tmats_info.Ch10Version]",
"def test_equal_older_codename(self):\n assert salt_version.equal(\"Nitrogen\") is False",
"def test_equal_newer_codename(self):\n assert salt_version.equal(\"Fluorine\") is False",
"def riskType(self):\n if self.riskIncrease == True:\n return 'ARI'\n else:\n return 'ARR'",
"def y(self):\n y_str = self.get('y')\n return int(y_str)",
"def test_str(self, pokedex):\n version = pokedex.query(versions.Version).get(1)\n assert str(version) == 'R'",
"def getRSelMode(self,targetDevice):\n if (targetDevice in self.adc_based_acquisition):\n return \"e5x\"\n elif (targetDevice in [\"SAML22\"]):\n return \"l22\"\n elif (targetDevice in [\"PIC32CZCA80\", \"PIC32CZCA90\"]):\n return \"pic32cz\"\n else:\n return \"std\"",
"def test_equal_older_codename_new_version(self):\n assert salt_version.equal(\"Nitrogen\") is False",
"def zigbee_stack_str(ver):\n if ver == OTA_UPG_HDR_ZIGBEE_STACK_2006:\n ver_str = \"2006\"\n elif ver == OTA_UPG_HDR_ZIGBEE_STACK_2007:\n ver_str = \"2007\"\n elif ver == OTA_UPG_HDR_ZIGBEE_STACK_PRO:\n ver_str = \"Pro\"\n elif ver == OTA_UPG_HDR_ZIGBEE_STACK_IP:\n ver_str = \"IP\"\n else:\n ver_str = \"Unknown\"\n return ver_str",
"def _is_rack_switch(self):\n (_stdin, stdout, _stderr) = self.client.exec_command(self.SHOW_VERSION_COMMAND)\n output = stdout.read().decode()\n for line in output.splitlines():\n if self.VERSION_TAG in line:\n self.version = line.split()[2]\n elif self.MTM_TAG in line:\n self.machine_type_model = line.split()[-1]\n elif self.SERIAL_NUM_TAG in line:\n self.serial_number = line.split()[-1]\n\n # if MTM is found, must be a rack switch\n if self.machine_type_model == \"\":\n return False\n else:\n return True",
"def pyearR(self):\n return self.patterns.year"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plot pixels in 3D.
|
def plot3d(pixels, colors_rgb, axis_labels=list("RGB"),
axis_limits=[(0, 255), (0, 255), (0, 255)], plot=False):
# Create figure and 3D axes
fig = plt.figure(figsize=(8, 8))
ax = Axes3D(fig)
# Set axis limits
ax.set_xlim(*axis_limits[0])
ax.set_ylim(*axis_limits[1])
ax.set_zlim(*axis_limits[2])
# Set axis labels and sizes
ax.tick_params(axis='both', which='major', labelsize=14, pad=8)
ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)
ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)
ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)
# Plot pixel values with colors given in colors_rgb
ax.scatter(
pixels[:, :, 0].ravel(),
pixels[:, :, 1].ravel(),
pixels[:, :, 2].ravel(),
c=colors_rgb.reshape((-1, 3)), edgecolors='none')
if plot:
# Read a color image
img = cv2.imread("275.png")
# Select a small fraction of pixels to plot by subsampling it
scale = max(img.shape[0], img.shape[1], 64) / 64 # at most 64 rows and columns
img_small = cv2.resize(img, (np.int(img.shape[1] / scale), np.int(img.shape[0] / scale)),
interpolation=cv2.INTER_NEAREST)
# Convert subsampled image to desired color space(s)
img_small_RGB = cv2.cvtColor(img_small, cv2.COLOR_BGR2RGB) # OpenCV uses BGR, matplotlib likes RGB
img_small_HSV = cv2.cvtColor(img_small, cv2.COLOR_BGR2HSV)
img_small_rgb = img_small_RGB / 255. # scaled to [0, 1], only for plotting
# Plot and show
plot3d(img_small_RGB, img_small_rgb)
plt.show()
plot3d(img_small_HSV, img_small_rgb, axis_labels=list("HSV"))
plt.show()
return ax # return Axes3D object for further manipulation
|
[
"def plot_3d(pts):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n xs, ys, zs = zip(*pts)\n ax.scatter(xs, ys, zs, c='r', marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()",
"def plot_original_3d(self, path=\"images\"):\n raise NotImplementedError(\"nyi\")",
"def scatter3D(X, Y, Z):\n print('Plot in 3D...')\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X, Y, Z, c=np.abs(Z), cmap=cm.coolwarm)\n ax.set_xlabel('M (slope)')\n ax.set_ylabel('B (intercept)')\n ax.set_zlabel('Z Label')\n plt.show()",
"def plot_3d(x, y, z, df, cmap = plt.cm.seismic_r):\n\n fig = plt.figure(figsize = (10, 10))\n \n ax = fig.add_subplot(111, projection='3d')\n \n # 3d scatterplot\n ax.scatter(df[x], df[y],\n df[z], c = df[z], \n cmap = cmap, s = 40)\n\n # Plot labeling\n ax.set_xlabel(x)\n ax.set_ylabel(y)\n ax.set_zlabel(z)\n\n plt.title('{} as function of {} and {}'.format(\n z, x, y), size = 18);",
"def draw(xs,ys,zs):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n xs,ys = np.meshgrid(xs,ys)\n ax.plot_surface(xs,ys,zs)\n plt.show()",
"def plotGlobe3D():",
"def plot_3d_surface(array, title=''):\n\n\tny,nx=array.shape\n\tx = np.linspace(0, 1, nx)\n\ty = np.linspace(0, 1, ny)\n\txv, yv = np.meshgrid(x, y)\n\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tax.plot_surface(xv, yv, array, cmap=cm.coolwarm)\n\tif title != '': plt.title(title)\n\tplt.show()",
"def drawGeometry(posSrc, posMic):\r\n fig = plt.figure(figsize=(8, 6))\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter3D(posMic[:,0], posMic[:,1], posMic[:,2], marker='.')\r\n ax.scatter3D(posSrc[:,0], posSrc[:,1], posSrc[:,2], marker='*')\r\n ax.set_xlabel(\"x (m)\")\r\n ax.set_ylabel(\"y (m)\")\r\n ax.set_zlabel(\"z (m)\")\r\n plt.show()",
"def surface_plot():\n X = np.linspace(-2, 2, 100)\n Y = np.linspace(-1, 3, 100)\n [x, y] = np.meshgrid(X, Y)\n z = h(x, y)\n\n plt.style.use('classic')\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=plt.cm.viridis, linewidth=0, antialiased=False)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$')\n ax.set_zlabel('$h(x, y)$')\n plt.show()",
"def show_3d(self, colors=None, colormap='gray', scale_factor=0.02, \n display=True):\n \n from enthought.mayavi import mlab\n \n if colors is None:\n colors = self[2,:]\n\n # I want at most 50K points\n stride = 1 + len(self)/50000\n\n pts = self[:,::stride]\n colors = colors[::stride]\n\n # Draw clusters in point cloud\n fig = mlab.figure()\n mlab.points3d(pts[0,:], pts[1,:], pts[2,:], \\\n colors, colormap=colormap, figure=fig, \\\n scale_mode='none', scale_factor=scale_factor)\n\n mlab.view(180,180)\n \n if show:\n mlab.show() \n else:\n return fig",
"def point_3d(self, x, y, z):\n self._point_3d(x, y, z)",
"def plot3d(mat, path):\n data = []\n for r, row in enumerate(mat):\n for c, val in enumerate(row):\n data.append([r, c, val])\n\n df = pd.DataFrame(data)\n\n # Transform it to a long format\n df.columns = [\"X\", \"Y\", \"Z\"]\n\n # And transform the old column name in something numeric\n # df['X']=pd.Categorical(df['X'])\n # df['X']=df['X'].cat.codes\n\n # Make the plot\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_trisurf(df['Y'], df['X'], df['Z'],\n cmap=plt.cm.viridis, linewidth=0.2)\n\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label, ax.zaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()):\n item.set_fontsize(22)\n ax.tick_params(axis='both', which='major', pad=-1)\n\n plt.xticks([0, 5, 10])\n plt.yticks([0, 5, 10])\n ax.set_zticks([0, 6, 12])\n\n if path is None:\n plt.show()\n else:\n plt.savefig(path)\n plt.close()",
"def ThreeDPositionPlot(self):\r\n try:\r\n numberOfParticles = len(self.LoadSim.Simulation[0])\r\n lengthOfSimulation = len(self.LoadSim.Time)\r\n # creates a list of three lists per particle.\r\n inputData = [[[], [], []] for i in range(numberOfParticles)]\r\n for i in range(lengthOfSimulation):\r\n for j in range(numberOfParticles):\r\n for k in range(3):\r\n inputData[j][k].append(self.LoadSim.Simulation[i][j].position[k])\r\n\r\n fig = plt.figure()\r\n ax = fig.gca(projection='3d')\r\n for j in range(numberOfParticles):\r\n ax.plot(inputData[j][0], inputData[j][1], inputData[j][2]\r\n , label='%s'%(self.LoadSim.Simulation[0][j].name))\r\n plt.title(\"Position of particles over time\")\r\n ax.set_xlabel(\"x position (m)\"), ax.set_ylabel(\"y position (m)\"), ax.set_zlabel(\"z position (m)\")\r\n ax.legend()\r\n plt.savefig(\"%s 3D position.jpg\"%(self.fileName))\r\n plt.show()\r\n\r\n except:\r\n AttributeError\r\n print(\"You cannot plot this figure with the data you have provided.\")",
"def plotPointCloud(object, model=None):\n\n # Layout for plot\n fig = make_subplots(\n rows=2, cols=2,\n vertical_spacing=0.05,\n horizontal_spacing=0.05,\n specs=[[{'type': 'scatter3d'}, {'type': 'scatter3d'}],\n [{'type': 'scatter3d'}, {'type': 'scatter3d'}]])\n \n objlst = Data(object)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\n for i in range(4):\n point_cloud = objlst[np.random.randint(len(objlst))]\n\n if model is not None and 'Autoencoder' in model.name:\n point_cloud = model(point_cloud[None,:].to(device))\n point_cloud = torch.squeeze(point_cloud,0)\n \n if model is not None and 'Generator' in model.name:\n noise = noiseFunc(0, 0.2, 1, device)\n point_cloud = model(noise)\n point_cloud = torch.squeeze(point_cloud,0)\n\n\n np_point_cloud = point_cloud.detach().cpu().numpy()\n \n fig.add_trace(\n go.Scatter3d(x=np_point_cloud[:,0], \n y=np_point_cloud[:,1], \n z=np_point_cloud[:,2],\n mode='markers',\n marker=dict(size=1,\n color=np_point_cloud[:,2], \n colorscale='Viridis', \n opacity=0.8\n )),\n row=i//2 + 1, col=i%2 + 1\n )\n \n fig.update_layout(\n showlegend=False,\n height=800,\n width=1500\n )\n\n fig.show()",
"def render3D(self):\n mesh = trimesh.Trimesh(vertices=self.verts, faces=self.faces)\n mesh.show(resolution=(512, 512))",
"def plot_3D(self, with_triangulation=False):\n plotly = ensure_plotly()\n\n plots = []\n\n vertices = self.tri.vertices\n if with_triangulation:\n Xe, Ye, Ze = [], [], []\n for simplex in self.tri.simplices:\n for s in itertools.combinations(simplex, 2):\n Xe += [vertices[i][0] for i in s] + [None]\n Ye += [vertices[i][1] for i in s] + [None]\n Ze += [vertices[i][2] for i in s] + [None]\n\n plots.append(plotly.graph_objs.Scatter3d(\n x=Xe, y=Ye, z=Ze, mode='lines',\n line=dict(color='rgb(125,125,125)', width=1),\n hoverinfo='none'\n ))\n\n Xn, Yn, Zn = zip(*vertices)\n colors = [self.data[p] for p in self.tri.vertices]\n marker = dict(symbol='circle', size=3, color=colors,\n colorscale='Viridis',\n line=dict(color='rgb(50,50,50)', width=0.5))\n\n plots.append(plotly.graph_objs.Scatter3d(\n x=Xn, y=Yn, z=Zn, mode='markers',\n name='actors', marker=marker,\n hoverinfo='text'\n ))\n\n axis = dict(\n showbackground=False,\n showline=False,\n zeroline=False,\n showgrid=False,\n showticklabels=False,\n title='',\n )\n\n layout = plotly.graph_objs.Layout(\n showlegend=False,\n scene=dict(xaxis=axis, yaxis=axis, zaxis=axis),\n margin=dict(t=100),\n hovermode='closest')\n\n fig = plotly.graph_objs.Figure(data=plots, layout=layout)\n\n return plotly.offline.iplot(fig)",
"def plt_3d_slices(slice_dict):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection = '3d')\n num_slices = len(slice_dict)\n \n for key in slice_dict:\n intersects = slice_dict[key]\n idx = 0\n while idx < len(intersects):\n if intersects[idx].shape[0] == 1:\n x_vec, y_vec, z_vec = intersects[idx][0][0], intersects[idx][0][1], intersects[idx][0][2]\n ax.scatter(x_vec, y_vec, z_vec, color = 'green', s = 1)\n if intersects[idx].shape[0] == 2:\n x_vec, y_vec, z_vec = [], [], []\n x_points, y_points, z_points = intersects[idx][:,0], intersects[idx][:,1], intersects[idx][:,2] \n x_vec, y_vec, z_vec = np.hstack((x_vec, x_points)), np.hstack((y_vec,y_points)), np.hstack((z_vec, z_points))\n ax.plot(x_vec, y_vec, z_vec ,color = 'green')\n if intersects[idx].shape[0] == 3:\n x_vec, y_vec, z_vec = [], [], []\n x_points, y_points, z_points = intersects[idx][:,0], intersects[idx][:,1], intersects[idx][:,2] \n x_vec, y_vec, z_vec = np.hstack((x_vec, x_points)), np.hstack((y_vec,y_points)), np.hstack((z_vec, z_points))\n ax.plot(x_vec, y_vec, z_vec, color = 'blue')\n idx+=1\n plt.show()",
"def draw3d(atom, **kwargs):\n return atom.molecule.draw3d(highlight_atoms=[atom], **kwargs)",
"def scatter3D(x, y, z, ax=None, trim=1, colors='k'):\n \n if not ax:\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d') \n \n if len(colors)==1:\n x, y, z = trim_data(x, y, z, trim=trim)\n else:\n x, y, z, colors = trim_data(x, y, z, colors, trim=trim)\n \n ax.scatter(x, y, z, c=colors, s=5, marker='.', edgecolor = 'face')\n return ax"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the atom's morgan index
|
def refresh_morgan(self):
self.morgan = self.new_morgan
self.new_morgan=0
|
[
"def show_atom_index(self):\r\n try:\r\n self.show_atom_index_judge = True\r\n self.show_atom_element_judge = False\r\n\r\n self.plot(self.Atomsobject)\r\n except Exception as e:\r\n print(e)",
"def modIndex(self, suffix, attr, mod):\n entries_backend = self.getBackendsForSuffix(suffix, ['cn'])\n # assume 1 local backend\n dn = \"cn=%s,cn=index,%s\" % (attr, entries_backend[0].dn)\n self.modify_s(dn, mod)",
"def update_index(self, entity, **kwargs):",
"def reindexContact(self, ob,contact):\n pass",
"def update_indexes():\n appcfg(\"vacuum_indexes\", quiet=True, cwd=opts.proj.dirs.dist)\n appcfg(\"update_indexes\", quiet=True, cwd=opts.proj.dirs.dist)\n print(\"---> update_indexes success\\n\")",
"def update_after_success(self, new_atom_position):\n i = np.argmin(cdist(self.all_atom_absolute_nm, new_atom_position.reshape((-1,2))))\n new_atom_position = self.all_atom_absolute_nm[i,:]\n self.atoms = np.delete(self.atoms, (self.atoms == new_atom_position).all(axis=1).nonzero(), axis=0)\n self.designs = np.delete(self.designs, (self.designs == self.design_chosen).all(axis=1).nonzero(), axis=0)\n self.anchors.append(new_atom_position)",
"def update(self):\n self.weight_mom[self.index] = self.sub_weight_mom\n self.weight[self.index] = self.sub_weight",
"def adjustIndexNode(self,f):\r\n if not self.indexmap.has_key(f):\r\n return\r\n inode = self.indexmap[f]\r\n if inode.cnt == 0:\r\n self.removeindexnode(f)",
"def set_index(self):\n \n HarvestManUrlParser.IDX += 1\n self.index = HarvestManUrlParser.IDX",
"def update_archive(self, cnt=1):\n my = self.date.strftime('%B %Y') # September-2008\n sy = self.date.strftime('%Y') #2008\n sm = self.date.strftime('%m') #09\n\n\n archive = Archive.query().filter(Archive.monthyear==my).get()\n if self.entrytype == 'post':\n if not archive:\n archive = Archive(monthyear=my, year=sy, month=sm, entrycount=1)\n self.monthyear = my\n archive.put()\n else:\n # ratchet up the count\n archive.entrycount += cnt\n archive.put()\n self.blog.entrycount += cnt\n self.blog.put()",
"def reindex(self):\n for idx, line in enumerate(self.line_map):\n line.index = idx\n if line.annotations:\n for x in line.annotations:\n x.line_num = idx",
"def _assign_index(self, vocab, item2id, vocab_size, item_num):\n sorted_vocab = sorted(vocab.items(),\n key = lambda x: x[1], reverse = True)\n if vocab_size > 0:\n sorted_vocab = sorted_vocab[:vocab_size]\n for item, _ in sorted_vocab:\n if item in item2id:\n continue\n item2id[item] = item_num\n item_num += 1",
"def update_largest_index(self):\n while len(self.main_list[self.largest_affinity]) < 1:\n self.largest_affinity -= 1",
"def reindexObject(idxs=[]):",
"def reindex(self, item):\n assert item.id is not None and item.id != u''\n search.indexer.set(item)",
"def _update_indexes(self):\n ntemp = 0\n ntarg = 0\n for pos in self.positions:\n if pos.temp!='-':\n ntemp+=1\n if pos.targ!='-':\n ntarg+=1\n pos.ntemp = ntemp\n pos.ntarg = ntarg",
"def _re_number(self):\n new_dataset_indices = []\n for g, graph in enumerate(self.graphs):\n graph._force_index(g)\n for s, graph_set in enumerate(graph.sets):\n graph_set._force_index(s)\n new_dataset_indices.append((g,s))\n for i, dataset in enumerate(self.datasets):\n dataset._force_index(*new_dataset_indices[i])",
"def update(self, item, index):\n\n # Iterate through linked list until index is reached\n current = self.head\n i = 0\n while i < index:\n current = current.next\n i += 1\n\n # Increment the value at that key by 1 \n current.data[item] += 1",
"def _update_index(self):\n self.current_index = (self.current_index + 1) % self.nb_intervals"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Include the morgan index into the atom's name
|
def include_morgan_in_name(self):
self.name=self.old_name+str(self.morgan)
|
[
"def getMarkerName(index):",
"def atom_name(self):\n return self.atom.name.strip()",
"def index_file_name(name: str) -> str:\n return name + '-idx.json'",
"def get_index_name(msg_date):\n return 'email-message-index-{}'.format(msg_date.format('YYYYMM'))",
"def _make_index_name(z_type, column_name):\n\n table_abbrev = \"mea_\" + z_type.replace(\"_\",\"\")[:3]\n column_abbrev = ''.join([x[0] for x in column_name.split('_')])\n md5 = hashlib.md5(\n '{}.{}'.format(z_type, column_name).encode('utf-8')). \\\n hexdigest()\n hashlen = NAME_LIMIT - (len(table_abbrev) + len(column_abbrev) +\n 3 * len('_') + len('ix'))\n return '_'.join([table_abbrev, column_abbrev, md5[:hashlen], 'ix'])",
"def show_atom_index(self):\r\n try:\r\n self.show_atom_index_judge = True\r\n self.show_atom_element_judge = False\r\n\r\n self.plot(self.Atomsobject)\r\n except Exception as e:\r\n print(e)",
"def _shard_name(self, n):\n return self.output_prefix + '.' + str(n)",
"def renderName(self, torrentdata):\n if len(torrentdata[\"group\"][\"musicInfo\"][\"artists\"]) > self.config[\"pattern\"][\"listindividualartists\"]:\n artist = self.config[\"pattern\"][\"variousartists\"]\n else:\n artist = self.config[\"pattern\"][\"artistjoiner\"].join(sorted([artist[\"name\"] for artist in torrentdata[\"group\"][\"musicInfo\"][\"artists\"]]))\n\n fileformat = torrentdata[\"torrent\"][\"format\"]\n\n formatdata = {\n \"artist\": artist,\n \"album\": torrentdata[\"group\"][\"name\"],\n \"year\": torrentdata[\"group\"][\"year\"],\n \"format\": fileformat\n }\n name = self.config[\"pattern\"][\"string\"] % formatdata\n\n return name",
"def format_index_name(prefix, index, index_type=None):\n if index_type is None or index_type == index:\n return \"_\".join([prefix, index])\n else:\n return \"_\".join([prefix, index, index_type])",
"def _assembly_organism_name(self, refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file, output_organism_name_file):\n\n fout = open(output_organism_name_file, 'w')\n for assembly_file in [refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file]:\n with open(assembly_file) as f:\n f.readline()\n header = f.readline().strip().split('\\t')\n org_name_index = header.index('organism_name')\n\n for line in f:\n line_split = line.strip().split('\\t')\n\n gid = line_split[0]\n if gid.startswith('GCA_'):\n gid = 'GB_' + gid\n else:\n gid = 'RS_' + gid\n org_name = line_split[org_name_index]\n fout.write('%s\\t%s\\n' % (gid, org_name))\n fout.close()",
"def label_species_atoms(spcs):\n index=0\n for spc in spcs:\n for atom in spc.mol.atoms:\n atom.label = str(index)\n index+=1",
"def get_atom_name(self, atom: xlib.Atom) -> str:\n return xlib.get_atom_name(display=self.dpy, atom=atom)",
"def _get_atom_index(parm, name) :\n for i, atom in enumerate(parm.atoms) :\n if atom.name == name :\n return i\n return None",
"def _index_name(table, field, index_type):\n return \"{}_{}_{}\".format(table, field, index_type)",
"def cindex(self):\n return self.short_spec_name.capitalize()",
"def get_name(n, l, m):\n return '%d%s%d' % (n, OrbLet[l] if l < len(OrbLet) else '_%d_' % l, m)",
"def _get_index_id_from_name(self) -> Optional[str]:\n pass",
"def new_index_from_name(base_name):\n return base_name + \".\" + str(int(time.time()))",
"def str_atom(atom: int) -> str:\n return ELEMENT_NAMES[atom]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
duplicates an object within a scene
|
def duplicate(scene, ob):
copy = ob.copy()
# some ops will fail (like triangle mesh) if the object we're operating on
# is hidden. i think its safe to unhide it
copy.hide = False
copy.data = ob.data.copy()
scene.objects.link(copy)
return copy
|
[
"def duplicate_helper_object(scene, remote_groups, ob, instance):\n newob = find_cached(scene, ob, instance)\n if newob: return newob\n newob = ob.copy()\n newob.data = newob.data.copy()\n scene.objects.link(newob)\n newob.layers = scene.layers\n newob.matrix_world = ob.matrix_world\n make_suffix(newob, ob.name, ob.library.filepath, instance)\n if newob.type == 'ARMATURE':\n old_active = scene.objects.active\n scene.objects.active = newob\n newob.select = True\n bpy.ops.object.editmode_toggle()\n for bone in newob.data.edit_bones:\n if not 'CASHIFY' in bone.keys():\n newob.data.edit_bones.remove(bone)\n bpy.ops.object.editmode_toggle()\n scene.objects.active = old_active\n for bone in newob.pose.bones:\n while len(bone.constraints):\n bone.constraints.remove(bone.constraints[0])\n local_groups = group_object_from_remote(ob, newob, remote_groups, instance)\n return newob",
"def _do_duplicate_scene(self):\n try:\n song = self.song()\n song.duplicate_scene(list(song.scenes).index(self._scene))\n self.component_message('Scene Duplicated', get_name(self._scene))\n except:\n pass",
"def clone( self ):\r\n\t\tcloneObject = mxs.cross3dhelper.cloneObjects([self._nativePointer], expandHierarchy=True)\r\n\t\treturn self.__class__(self.scene(), cloneObject[0])",
"def duplicateSurface(local=bool, name=\"string\", constructionHistory=bool, object=bool):\n pass",
"def bake_objects(self):\n\n selected_objects = cmds.ls(sl=True)\n del self.objects_to_shade[:]\n for obj in selected_objects:\n self.objects_to_shade.append(obj)\n print self.objects_to_shade",
"def duplicate(objects, renameChildren=bool, returnRootsOnly=bool, parentOnly=bool, instanceLeaf=bool, smartTransform=bool, inputConnections=bool, name=\"string\", upstreamNodes=bool):\n pass",
"def test_deepcopied(self):\n ############################################################\n # Test if the MayaVi2 visualization can be deep-copied.\n\n # Pop the source object.\n s = self.scene\n source = s.children.pop()\n # Add it back to see if that works without error.\n s.children.append(source) \n cp = source.children[0].children[-1]\n s = self.scene\n\n self.check()\n\n # Now deepcopy the source and replace the existing one with\n # the copy. This basically simulates cutting/copying the\n # object from the UI via the right-click menu on the tree\n # view, and pasting the copy back.\n source1 = copy.deepcopy(source)\n s.children[0] = source1\n s = self.scene\n self.check()\n #from enthought.mayavi.tools.show import show\n #show()",
"def dup_object(self): # real signature unknown; restored from __doc__\n pass",
"def simpleCopySelection():\n # ideas / tests / original:\n # push into current group..\n\n App = FreeCAD\n Gui = FreeCADGui\n\n selection = FreeCADGui.Selection.getSelection()\n\n for obj in selection:\n obj_new = object_create_copy(obj)\n obj_new.ViewObject.Visibility = True\n obj.ViewObject.Visibility = False\n # try to add it at same tree location\n obj_parent = find_Parent(obj)\n if obj_parent:\n obj_parent.addObject(obj_new)\n\n #\n\n App.ActiveDocument.recompute()",
"def polyDuplicateAndConnect(removeOriginalFromShaders=bool, renameChildren=bool):\n pass",
"def duplicate(self, cell):\n n = Node_Instance(self.matching, cell)\n \n # note that IRI nodes and objects are not duplicated\n # they don't need to be -- they shouldn't be modified\n # objects in complete triples are just that-- complete\n # objects in incomplete triples are replaced when completed\n \n n.triples = self.triples[:]\n n.incomplete_triples = self.incomplete_triples[:]\n \n return n",
"def copy_move_by_vec(self, vector):\r\n vector = p2e._base._util.scale_1000(vector)\r\n \r\n arg_str = p2e._base._util._convert_args_to_string(\"object.duplicate\", \r\n self._object._eco_id, \r\n vector[0], \r\n vector[1], \r\n vector[2])\r\n p2e._app.Exec(arg_str)\r\n \r\n #get the id of the new object\r\n eco_id = p2e.model.scan.num_objects() - 1\r\n \r\n #create the object\r\n return _ObjectRoot(eco_id, None)",
"def _setDupliObject(self, obj):\n bge_wrappers = {\n types.KX_GameObject : KX_EnemyGameObject,\n types.BL_ArmatureObject : BL_EnemyArmatureObject,\n }\n\n bge_class = obj.__class__\n assert(bge_class in bge_wrappers)\n\n # replace the BGE object class with our own\n self._dupli_object = bge_wrappers[bge_class](obj, self)\n\n # setup the logic bricks\n self._setupLogicBricks()",
"def dup (self):\n r = TrackI(comment=self.comment)\n regions = self.regions\n new_regions = {}\n chrs = regions.keys()\n chrs.sort()\n for chrom in chrs:\n new_regions[chrom]=[]\n new_regions[chrom].extend(regions[chrom])\n r.regions = new_regions\n return r",
"def snap_object():\n selection = cmds.ls(selection=True)\n if len(selection) >= 2:\n pos = cmds.xform(selection[-1], q=True, ws=True, t=True)\n rot = cmds.xform(selection[-1], q=True, ws=True, ro=True)\n for item in selection[:-1]:\n cmds.xform(item, ws=True, t=pos)\n cmds.xform(item, ws=True, ro=rot)\n LOG.info('Snapped {} to {}'.format(item, selection[-1]))",
"def add_to_world(self, game_obj):\n self.game_objects[game_obj.id] = game_obj",
"def update_objects(self):\n\t\tself.update_projectiles()",
"def copyKey(objects, time=(), hierarchy=\"string\", animLayer=\"string\", float=(), includeUpperBound=bool, clipboard=\"string\", shape=bool, controlPoints=bool, forceIndependentEulerAngles=bool, attribute=\"string\", animation=\"string\", index=int, option=\"string\"):\n pass",
"def _replace_selection(self):\n try:\n new = self.current_sprite_class.create_for_editor(self.selection.points)\n except (ValueError, TypeError, IndexError):\n pass\n else:\n self.hole.groups['all'].add(new)\n self.hole.groups['collidibles'].add(new)\n self.selection.kill()\n self.selection = new"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate incoming meter_value supposed to be negative (1) because it is about consumption
|
def _calc_result(self):
return self.pv_value + self.meter_value*(-1)
|
[
"def meter_value(self):\n return int(\n (self.amountused / self.amounttotal)\n * self.arcrange + self.arcoffset\n )",
"def electric_meter(self, data):\n # convert power diff from kwh to kws\n #self.watts = (self.powerDiff * 3600 /self.timeDiff)\n\n dtime = data.get('Time')\n self.newTime = parser.parse(dtime)\n\n self.meterID = data.get('Message').get('ID')\n self.currentTime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n \n self.newConsumption = data.get('Message').get('Consumption')\n \n self.meter_type = \"Electric\"\n\n if not self.meterID in config.meters.keys():\n if config.debug:print(\"first time seeing this id: {}\".format(self.meterID))\n config.meters[self.meterID] = {\"Time\": self.newTime, \"ID\":self.meterID, \"Consumption\": self.newConsumption}\n return False\n else:\n\n self.oldConsumption = config.meters[self.meterID].get('Consumption')\n self.oldTime = config.meters[self.meterID].get('Time')\n\n # level shift.\n config.meters[self.meterID]['Consumption'] = self.newConsumption\n config.meters[self.meterID]['Time'] = self.newTime\n\n\n self.timeDiff = self.newTime - self.oldTime\n\n ##### DEbUG TAKE OUT.\n #if self.meterID in config.myMeters:print(data)\n\n if(self.timeDiff.total_seconds() < 0):print(\"Error: Time Diff Negative. Customer: %s. %d - %d = %d\" % (self.meterID, self.newTime, self.oldTime, self.timeDiff))\n\n self.wattDiff = self.newConsumption - self.oldConsumption\n\n #if(self.wattDiff != 0):\n #if(self.wattDiff):\n if data.get('Message').get('Consumption'):\n\n #print(data)\n self.kwhPerMin = (self.wattDiff / (self.timeDiff.total_seconds() / 60)) / 100 # <-\n\n\n # if numbers are way out of range throw error\n if self.meterID in config.myMeters:\n print(\"[%s] Customer %s Using %f kwh per minute. (consumption: %d) - (time elapsed: %d s) ### %d\" % (self.currentTime, self.meterID, self.kwhPerMin, self.wattDiff, self.timeDiff.total_seconds(),self.newConsumption))\n else:\n print(\"[%s] Customer %s Using %f kwh per minute. (consumption: %d) - (time elapsed: %d s)\" % (self.currentTime, self.meterID, self.kwhPerMin, self.wattDiff, self.timeDiff.total_seconds()))\n \n self.log_data(data,self.wattDiff,self.kwhPerMin,\"kwh/min\")\n\n else:\n # consumption data hasn't changed. time shift back and wait some more.\n config.meters[self.meterID]['Time'] = self.oldTime\n config.meters[self.meterID]['Consumption'] = self.oldConsumption #redundant?\n self.log_data(data,0,0,\"kwh/min\")\n return True",
"def read_value(self, channel):\n value = None\n reply = self.comm(47 + channel)\n if self.ranges[channel]['action'] == 'voltage':\n num_value = reply - 2 ** 15\n scale = 1.0 * 2 ** 15 / float(self.ranges[channel]['fullrange'])\n value = num_value / scale\n if self.ranges[channel]['action'] == 'tc':\n scale = 1.0 * 2 ** 16 / 1400\n value = (reply/scale) - 150\n return value",
"def _Gain(self, value):\n v = (((self.max-self.min))-float(value))\n v = int(v*10)/10.0\n return v",
"def gas_meter(self, data):\n\n dtime = data.get('Time')\n\n self.newTime = parser.parse(dtime)\n self.meterID = data.get('Message').get('ID')\n self.currentTime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n\n self.newConsumption = data.get('Message').get('Consumption')\n \n self.meter_type = \"Gas\"\n\n if not self.meterID in config.meters.keys():\n if config.debug:print(\"first time seeing this id: {}\".format(self.meterID))\n config.meters[self.meterID] = {\"Time\": self.newTime, \"ID\":self.meterID, \"Consumption\": self.newConsumption}\n return False\n else:\n\n self.oldConsumption = config.meters[self.meterID].get('Consumption')\n self.oldTime = config.meters[self.meterID].get('Time')\n\n # level shift.\n config.meters[self.meterID]['Consumption'] = self.newConsumption\n config.meters[self.meterID]['Time'] = self.newTime\n\n\n self.timeDiff = self.newTime - self.oldTime\n\n ##### DEbUG TAKE OUT.\n #if self.meterID in config.myMeters:print(data)\n\n if(self.timeDiff.total_seconds() < 0):print(\"Error: Time Diff Negative. Customer: %s. %d - %d = %d\" % (self.meterID, self.newTime, self.oldTime, self.timeDiff))\n\n self.mcfDiff = self.newConsumption - self.oldConsumption\n\n #if(self.wattDiff != 0):\n #if(self.mcfDiff):\n \n if data.get('Message').get('Consumption'):\n #print(data)\n self.mcfPerMin = (self.mcfDiff / (self.timeDiff.total_seconds() / 60)) / 1000 # <-\n\n # if numbers are way out of range throw error\n if self.meterID in config.myMeters:\n print(\"[%s] Customer %s Using %f mcf per minute. (consumption: %d) - (time elapsed: %d s) ### %d\" % (self.currentTime, self.meterID, self.mcfPerMin, self.mcfDiff, self.timeDiff.total_seconds(),self.newConsumption))\n else:\n print(\"[%s] Customer %s Using %f mcf per minute. (consumption: %d) - (time elapsed: %d s)\" % (self.currentTime, self.meterID, self.mcfPerMin, self.mcfDiff, self.timeDiff.total_seconds()))\n\n self.log_data(data,self.mcfDiff,self.mcfPerMin,\"mcf/min\")\n \n else:\n # consumption data hasn't changed. time shift back and wait some more.\n config.meters[self.meterID]['Time'] = self.oldTime\n config.meters[self.meterID]['Consumption'] = self.oldConsumption #redundant?\n \n self.log_data(data,0,0,\"mcf/min\")\n\n return True",
"def meters_cust_interrupts(self) -> float:\n return float(self.dss_obj.MetersF(ctypes.c_int32(3), ctypes.c_double(0)))",
"def _normalize_reading(self, value):\n return value / float(self.max_sensor_val)",
"def getEnergy(self) -> float:\n ...",
"def get_sweep_average(self): #tested and documented\n self.send_message(\"AVS?\")\n msg = self.flush_buffer()\n if msg==\"OFF\":\n return 0\n else:\n return int(msg)",
"def get_sensor_value(self):\r\n \r\n tsl = tsl2591.Tsl2591() # initialize\r\n full, ir = tsl.get_full_luminosity() # read raw values (full spectrum and ir spectrum)\r\n lux = tsl.calculate_lux(full, ir) # convert raw values to lux\r\n print ('Lux:', lux)\r\n digital = round(lux,1)\r\n return(digital)\r\n \r\n return(1.0)",
"def measure_v(self):\n self._ser.write('MEAS?')\n __value = float(self._ser.read()[:-1])\n print(f'IT6861A OUT Voltage: {__value}V')\n return __value",
"def get_measurement(self):\n self._co2 = None\n\n if self.interface == 'UART':\n self.ser.flushInput()\n time.sleep(1)\n self.ser.write(\"\\xff\\x01\\x86\\x00\\x00\\x00\\x00\\x00\\x79\")\n time.sleep(.01)\n resp = self.ser.read(9)\n if len(resp) != 0:\n high_level = struct.unpack('B', resp[2])[0]\n low_level = struct.unpack('B', resp[3])[0]\n co2 = high_level * 256 + low_level\n return co2\n\n elif self.interface == 'I2C':\n self.write_register(self.FCR, 0x07)\n self.send(self.cmd_measure)\n try:\n co2 = self.parse(self.receive())\n except Exception:\n co2 = None\n return co2\n\n return None",
"def water_meter(self, data):\n \n dtime = data.get('Time')\n \n self.newTime = parser.parse(dtime)\n \n self.meterID = data.get('Message').get('ID') \n self.currentTime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n \n self.currentConsumption = data.get('Message').get('Consumption')\n \n self.meter_type = \"Water\"\n \n if \"900\" in data.get(\"Type\"):\n #Neptune R900 meters. Cu3/GPM 1/10\n self.newConsumption = data.get('Message').get('Consumption') / 10.0\n else:\n #Assuming others are 1:1 \n self.newConsumption = data.get('Message').get('Consumption') \n\n if not self.meterID in config.meters.keys():\n if config.debug:print(\"first time seeing this id: {}\".format(self.meterID))\n config.meters[self.meterID] = {\"Time\": self.newTime, \"ID\":self.meterID, \"Consumption\": self.newConsumption}\n return False\n else:\n \n self.oldConsumption = config.meters[self.meterID].get('Consumption')\n self.oldTime = config.meters[self.meterID].get('Time')\n \n # level shift.\n config.meters[self.meterID]['Consumption'] = self.newConsumption\n config.meters[self.meterID]['Time'] = self.newTime\n \n\n self.timeDiff = self.newTime - self.oldTime\n \n ##### DEbUG TAKE OUT.\n #if self.meterID in config.myMeters:print(data)\n \n if(self.timeDiff.total_seconds() < 0):print(\"Error: Time Diff Negative. Customer: %s. %d - %d = %d\" % (self.meterID, self.newTime, self.oldTime, self.timeDiff))\n \n self.waterDiff = (self.newConsumption - self.oldConsumption) \n \n if(self.waterDiff != 0):\n # water meter only updates a static export every 7-15 minutes and repeats ~30. ignore unless something changed.\n if \"900\" in data.get(\"Type\"):\n #Neptune R900 meters. Cu3/GPM 1/10\n self.waterPerMin = self.waterDiff / (self.timeDiff.total_seconds() / 60) \n\n else:\n #Assuming others are 1:1\n self.waterPerMin = self.waterDiff / (self.timeDiff.total_seconds() / 60)\n\n \n ### disply whats new and write to database.\n if self.meterID in config.myMeters:\n print(\"[%s] Customer %s Using %f gallons per min. (consumption: %d) - (time elapsed: %d s) ### %d\" % (self.currentTime, self.meterID, self.waterPerMin, self.waterDiff, self.timeDiff.total_seconds(),self.currentConsumption))\n else:\n print(\"[%s] Customer %s Using %f gallons per min. (consumption: %d) - (time elapsed: %d s)\" % (self.currentTime, self.meterID, self.waterPerMin, self.waterDiff, self.timeDiff.total_seconds()))\n \n self.log_data(data,self.waterDiff,self.waterPerMin,\"gallons/min\")\n\n \n else:\n # consumption data hasn't changed. time shift back and wait some more.\n config.meters[self.meterID]['Time'] = self.oldTime\n config.meters[self.meterID]['Consumption'] = self.oldConsumption #redundant?\n \n # log no change to db for graph. test.\n self.log_data(data,0,0,\"gallons/min\")\n \n return True",
"def depth_to_meter(self, depth):\r\n\r\n meter = 0\r\n if depth < 2047:\r\n meter = int(round(1000.0 / ((depth * -0.00307) + 3.33)))\r\n\r\n return meter",
"def measure_v(self):\n self._ser.write('MEAS:VOLT?')\n __value = float(self._ser.read()[:-1])\n print(f'C62012P OUT Voltage: {__value}V')\n return __value",
"def _calculate_accumulated_energy(self, outside_illumination):\n acc = outside_illumination * self.max_pv_absorption * self.timeframe\n self.battery['delta'] = acc\n self.battery['current'] = truncate(arg=(acc + self.battery['current']),\n upper=self.battery['max'])",
"def getMeterReading(self):\n return self._MeterReading",
"def get_meas(self, ch):\r\n\t\tif (ch==1) or (ch==2):\r\n\t\t\tself.autorange(ch)\r\n\t\t\tbuffer = self.dev.ctrl_transfer(bmRequestType = 0xC0, bRequest = self.GET_MEAS_KILL60HZ, wValue = 0, wIndex = ch, data_or_wLength = 6) \r\n\t\t\tret = []\r\n\t\t\tvalue = ((buffer[1]<<8)|buffer[0])-((buffer[3]<<8)|buffer[2])\r\n\t\t\tif buffer[5]==self.SRCV_MEASI:\r\n\t\t\t\tvalue = value*self.get_meas_imult[buffer[4]]\r\n\t\t\t\tunits = 1\r\n\t\t\telse:\r\n\t\t\t\tvalue = value*self.get_meas_vmult[buffer[4]]\r\n\t\t\t\tunits = 0\r\n\t\t\tret.append(value)\r\n\t\t\tret.append(units)\r\n\t\t\treturn ret\r\n\t\telse:\r\n\t\t\tprint \"Illegal channel number specified.\\n\"",
"def get_max_volume(self) -> float:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Provide generated photovoltaic value
|
def _get_simulated_photovoltaic_value(self):
return random.randint(5000,9000)
|
[
"def change_value(image):\n\n out = None\n\n ### YOUR CODE HERE\n out = 0.5 * image ** 2\n ### END YOUR CODE\n\n return out",
"def vat_rate():",
"def volumen_cilindro(radio, altura):\n volumen = pi * radio ** 2 * altura\n return volumen",
"def getValue(self, *args) -> \"PyObject *\":\n return _coin.SbImage_getValue(self, *args)",
"def generate_image(self) -> None:",
"def get_image(self):",
"def get_imaginary(self):\n return self.imaginary",
"def get_volume(self):\n\t\treturn 2 * power(PI, 2) * self.b * self.c * self.r",
"def volts(self, value):\n volt = ((value - 2048) * 10.) / 2048.\n return volt",
"def _pyforaComputedValueArg(self):\n return self.computedValue",
"def getValue(self, *args) -> \"SbImage const &\":\n return _coin.SoSFImage_getValue(self, *args)",
"def getOGTagsImage(self):",
"def _get_seed_value_from_image(self, img):\n img_val = img.ravel()[0]\n img_str = \"{:.4f}\".format(img_val)\n\n _, decimal_str = img_str.split(\".\")\n seed_val = int(decimal_str)\n\n return seed_val",
"def value(self):\n return super(CompositeOutputDevice, self).value",
"def iva(precio):\n iva = precio * 0.16\n return precio + iva",
"def getGpValue(self) -> int:\n ...",
"def __repr__(self):\n\n return f\"<Photo {self.photo_id} for apt {self.p_apartment_url}>\"",
"def image(self, obj):",
"def volume(self):\n\t\treturn (4/3) * PI * power(self.r, 3)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run the consume method of PVSimulator
|
def main():
pv_simulator = PVSimulator()
pv_simulator.consume()
|
[
"def produce_consume():\n logger = logging.getLogger(__name__)\n\n even_consumer = actors.Printer.start(\"Even Printer\")\n odd_consumer = actors.Printer.start(\"Odd Printer\")\n producer = NumberGenerator.start(\"RNG\")\n producer.proxy().register(even_consumer, 'even number')\n producer.proxy().register(odd_consumer, 'odd number')\n\n logger.info(\"Producing for 2 seconds at an interval of 0.1 seconds...\")\n producer.tell({'command': 'start producing', 'interval': 0.1})\n time.sleep(2)\n producer.tell({'command': 'stop producing'})\n time.sleep(2)\n logger.info(\"Producing for 2 seconds at an interval of 0.5 seconds...\")\n producer.tell({'command': 'start producing', 'interval': 0.5})\n time.sleep(2)\n producer.tell({'command': 'stop producing'})\n time.sleep(2)\n logger.info(\"Producing for 2 seconds...\")\n producer.tell({'command': 'start producing'})\n time.sleep(2)\n producer.tell({'command': 'stop producing'})\n logger.info(\"Quitting\")\n\n pykka.ActorRegistry.stop_all() # stop actors in LIFO order",
"def start_consuming(self):",
"def main():\n\n producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER, api_version=KAFKA_API_VERSION,\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n\n # Get the UUID of the OpenNebula VIM\n token = identity.bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password'))\n one_vim_uuid = get_opennebula_vim_uuid(token)\n\n # Get the list of VM ids of the OpenNebula NSs\n vm_ids = get_opennebula_vm_ids(token, one_vim_uuid)\n logger.info('The list of VMs {} have been detected given the VIM uuid `{}``'.format(vm_ids, one_vim_uuid))\n\n # Get the metrics for each running VM in OpenNebula instantiated due to the OSM\n for vm_id in vm_ids:\n # Get the info of the VM by given session and VM id\n one_vm_info = OneVMInfo()\n response = one_vm_info.get(XML_RPC_SERVER, XML_RPC_SESSION, vm_id)\n raw_response = response.text\n\n # Parse the response and keep the monitoring metrics as a dict\n monitoring_info, last_poll = export_data_from_one_vm_info(raw_response)\n\n if last_poll is None:\n logger.warning(\"The last poll is {}\".format(last_poll))\n return\n\n # Convert the unix time in UCT iso8601 format\n timestamp = convert_unix_timestamp_to_datetime_str(float(last_poll))\n\n for metric, value in monitoring_info.items():\n metric_type = metric.lower()\n payload = {\"vdu_uuid\": vm_id, \"type\": metric_type, \"value\": value,\n \"unit\": get_unit_by_metric(metric_type), \"timestamp\": timestamp}\n\n # Publish the metric\n request = producer.send(KAFKA_OPENNEBULA_TOPIC, payload)\n try:\n # set timeout in 5 sec\n request.get(timeout=5)\n except KafkaError as ke:\n logger.error(ke)\n producer.close()",
"def run(self):\n try:\n if self.init():\n if self.mbProto['type'] == 'RTU':\n self.master.open()\n while True:\n for mbEvt in self.instance_list:\n if \"rw\" in mbEvt.operation:\n mbEvt.read_data()\n time.sleep(5)\n if mbEvt.value is not None:\n mbEvt.write_data(mbEvt.value)\n elif \"wo\" in mbEvt.operation:\n if mbEvt.value is not None:\n mbEvt.write_data(mbEvt.value)\n else:\n\n mbEvt.read_data()\n\n time.sleep(5)\n except Exception as e:\n logger.info(\"Found error: %s\" % e)",
"def start_consume_memory(duthost, container):\n mem_size = container.mem_size_to_allocate()\n cmd = \"\"\"python3 -c 'import ctypes, time; arr = (ctypes.c_uint8 * {})(); time.sleep(1000)'\"\"\".format(mem_size)\n logger.info(\"Executing python command to consume %s in %s container\", mem_size, container.name)\n docker_cmd = 'docker exec {} {} &'.format(container.name, cmd)\n duthost.shell(docker_cmd, module_ignore_errors=True)",
"def start_consuming(self):\n logger.debug('Issuing consumer related RPC commands')\n self.add_on_cancel_callback()\n self._consumer_tag = self._channel.basic_consume(self.queue, self.on_message)",
"def _run(self):\n for p in list(self.process.all_outputs(unique=True)):\n if p.output_type == 'Analyte':\n \n project = p.samples[0].project.name\n new_container_name = self.find_available_container(project, '96 well plate')\n p.container.name = new_container_name\n p.container.put()\n\n break",
"def _consumer(self):\n while True:\n self.progress_bar.next(0)\n if self.state == 'play':\n representation = self.managed_objects['representations'] \\\n .candidate(int(self.bandwidth))\n if representation:\n self.managed_objects['download'].add(representation)\n else:\n time.sleep(0.01)\n else:\n time.sleep(0.01)",
"def run(self):\n\t\tself.client.loop_start()\n\t\tself.discover_and_notify()\n\t\tself.publish()",
"def test_consume(self, mock_publish, mock_pika):\n work = mock.Mock()\n work.return_value = mock.MagicMock()\n consume(work, mock_pika, 'queue', 'routing')\n mock_pika.channel.assert_called()",
"def execute(self):\n\n # open the value first to allow the water to flow as soon as the pump runs\n try:\n self.valve.activate()\n except AttributeError:\n if self.valve is not None:\n GPIO.output(self.valve, GPIO.LOW)\n\n # start the pump\n self.pump.activate()\n\n # wait the wanted time\n time.sleep(self.duration)\n\n # close the valve first to shut down the flow as fast as possible\n try:\n self.valve.deactivate()\n except AttributeError:\n if self.valve is not None:\n GPIO.output(self.valve, GPIO.HIGH)\n\n # stop the pump\n self.pump.deactivate()",
"def test_add_vmdk(self, fake_consume_task):\n fake_dev = MagicMock()\n fake_the_vm = MagicMock()\n fake_the_vm.config.hardware.device = [fake_dev]\n disk_size = 1\n\n virtual_machine.add_vmdk(fake_the_vm, disk_size)\n\n self.assertTrue(fake_consume_task.called)",
"def run(self):\n print(\"-------------------------------------------------------\")\n print(\"error: procccessor is empty.\")\n print(\" Advice:\")\n print(\" procccessor is wrong type.\")\n print(\"-------------------------------------------------------\")",
"def start_consuming(self):\n self.logger.info('Issuing consumer related RPC commands')\n self.add_on_cancel_callback()\n if self._consumer_tag is None:\n self._consumer_tag = self._channel.basic_consume(self.on_message,\n self.queue)\n else:\n self._channel.basic_consume(self.on_message, self.queue, consumer_tag=self._consumer_tag)",
"def test_pvc_snapshot_performance(self, teardown_factory, pvc_size):\n\n tests_numbers = 3 # number of tests to run\n\n # Getting the total Storage capacity\n ceph_cluster = CephCluster()\n ceph_capacity = ceph_cluster.get_ceph_capacity()\n\n log.info(f'Total capacity size is : {ceph_capacity}')\n log.info(f'PVC Size is : {pvc_size}')\n log.info(f'Needed capacity is {int(int(pvc_size) * 5)}')\n if int(ceph_capacity) < int(pvc_size) * 5:\n log.error(\n f'PVC size is {pvc_size}GiB and it is too large for this system'\n f' which have only {ceph_capacity}GiB'\n )\n return\n # Calculating the file size as 25% of the PVC size\n # in the end the PVC will be 75% full\n filesize = self.pvc_obj.size * 0.25\n # Change the file size to MB and from int to str\n file_size = f'{int(filesize * 1024)}M'\n\n snap_yaml = constants.CSI_RBD_SNAPSHOT_YAML\n if self.interface == constants.CEPHFILESYSTEM:\n snap_yaml = constants.CSI_CEPHFS_SNAPSHOT_YAML\n\n all_results = []\n\n for test_num in range(tests_numbers):\n test_results = {\n 'test_num': test_num + 1,\n 'dataset': (test_num + 1) * filesize * 1024, # size in MiB\n 'create': {'time': None, 'speed': None},\n 'restore': {'time': None, 'speed': None},\n }\n log.info(f'Starting test phase number {test_num}')\n # Step 1. Run I/O on a pod file.\n file_name = f'{self.pod_obj.name}-{test_num}'\n log.info(f'Starting IO on the POD {self.pod_obj.name}')\n # Going to run only write IO to fill the PVC for the snapshot\n self.pod_obj.fillup_fs(size=file_size, fio_filename=file_name)\n\n # Wait for fio to finish\n fio_result = self.pod_obj.get_fio_results()\n err_count = fio_result.get('jobs')[0].get('error')\n assert err_count == 0, (\n f\"IO error on pod {self.pod_obj.name}. \"\n f\"FIO result: {fio_result}\"\n )\n log.info('IO on the PVC Finished')\n\n # Verify presence of the file\n file_path = pod.get_file_path(self.pod_obj, file_name)\n log.info(f\"Actual file path on the pod {file_path}\")\n assert pod.check_file_existence(self.pod_obj, file_path), (\n f\"File {file_name} doesn't exist\"\n )\n log.info(f\"File {file_name} exists in {self.pod_obj.name}\")\n\n # Step 2. Calculate md5sum of the file.\n orig_md5_sum = pod.cal_md5sum(self.pod_obj, file_name)\n\n # Step 3. Take a snapshot of the PVC and measure the time of creation.\n snap_name = self.pvc_obj.name.replace(\n 'pvc-test', f'snapshot-test{test_num}'\n )\n log.info(f'Taking snapshot of the PVC {snap_name}')\n snap_obj = pvc.create_pvc_snapshot(\n self.pvc_obj.name,\n snap_yaml,\n snap_name,\n helpers.default_volumesnapshotclass(self.interface).name,\n )\n snap_obj.ocp.wait_for_resource(\n condition='true', resource_name=snap_obj.name,\n column=constants.STATUS_READYTOUSE, timeout=60\n )\n teardown_factory(snap_obj)\n snap_con_name = snap_obj.ocp.get(\n resource_name=snap_name,\n out_yaml_format=True\n )[\"status\"][\"boundVolumeSnapshotContentName\"]\n log.info(f'Snap content is {snap_con_name}')\n test_results['create']['time'] = helpers.measure_snapshot_creation_time(\n self.interface, snap_obj.name, snap_con_name\n )\n test_results['create']['speed'] = int(\n test_results['dataset'] / test_results['create']['time']\n )\n log.info(f' Test {test_num} dataset is {test_results[\"dataset\"]} MiB')\n log.info(f'Snapshot creation time is : {test_results[\"create\"][\"time\"]} sec.')\n log.info(f'Snapshot speed is : {test_results[\"create\"][\"speed\"]} MB/sec')\n\n # Step 4. Restore the PVC from the snapshot and measure the time\n # Same Storage class of the original PVC\n sc_name = self.pvc_obj.backed_sc\n\n # Size should be same as of the original PVC\n pvc_size = str(self.pvc_obj.size) + \"Gi\"\n\n # Create pvc out of the snapshot\n # Both, the snapshot and the restore PVC should be in same namespace\n\n log.info('Restoring from the Snapshot')\n restore_pvc_name = self.pvc_obj.name.replace(\n 'pvc-test', f'restore-pvc{test_num}'\n )\n restore_pvc_yaml = constants.CSI_RBD_PVC_RESTORE_YAML\n if self.interface == constants.CEPHFILESYSTEM:\n restore_pvc_yaml = constants.CSI_CEPHFS_PVC_RESTORE_YAML\n\n log.info('Resorting the PVC from Snapshot')\n restore_pvc_obj = pvc.create_restore_pvc(\n sc_name=sc_name, snap_name=snap_obj.name,\n namespace=snap_obj.namespace, size=pvc_size,\n pvc_name=restore_pvc_name,\n restore_pvc_yaml=restore_pvc_yaml\n )\n helpers.wait_for_resource_state(\n restore_pvc_obj,\n constants.STATUS_BOUND,\n timeout=3600 # setting this to 60 Min. since it can be take long time to restore,\n # and we want it to finished.\n )\n teardown_factory(restore_pvc_obj)\n restore_pvc_obj.reload()\n log.info('PVC was restored from the snapshot')\n test_results['restore']['time'] = helpers.measure_pvc_creation_time(\n self.interface, restore_pvc_obj.name\n )\n test_results['restore']['speed'] = int(\n test_results['dataset'] / test_results['restore']['time']\n )\n log.info(f'Snapshot restore time is : {test_results[\"restore\"][\"time\"]}')\n log.info(f'restore sped is : {test_results[\"restore\"][\"speed\"]} MB/sec')\n\n # Step 5. Attach a new pod to the restored PVC\n restore_pod_obj = helpers.create_pod(\n interface_type=self.interface, pvc_name=restore_pvc_obj.name,\n namespace=snap_obj.namespace,\n pod_dict_path=constants.NGINX_POD_YAML\n )\n\n # Confirm that the pod is running\n helpers.wait_for_resource_state(\n resource=restore_pod_obj,\n state=constants.STATUS_RUNNING\n )\n teardown_factory(restore_pod_obj)\n restore_pod_obj.reload()\n\n # Step 6. Verify that the file is present on the new pod also.\n log.info(\n f\"Checking the existence of {file_name} \"\n f\"on restore pod {restore_pod_obj.name}\"\n )\n assert pod.check_file_existence(restore_pod_obj, file_path), (\n f\"File {file_name} doesn't exist\"\n )\n log.info(f\"File {file_name} exists in {restore_pod_obj.name}\")\n\n # Step 7. Verify that the md5sum matches\n log.info(\n f\"Verifying that md5sum of {file_name} \"\n f\"on pod {self.pod_obj.name} matches with md5sum \"\n f\"of the same file on restore pod {restore_pod_obj.name}\"\n )\n assert pod.verify_data_integrity(\n restore_pod_obj,\n file_name,\n orig_md5_sum\n ), 'Data integrity check failed'\n log.info(\"Data integrity check passed, md5sum are same\")\n\n all_results.append(test_results)\n\n # logging the test summery, all info in one place for easy log reading\n c_speed, c_runtime, r_speed, r_runtime = (0 for i in range(4))\n log.info('Test summery :')\n for tst in all_results:\n c_speed += tst['create']['speed']\n c_runtime += tst['create']['time']\n r_speed += tst['restore']['speed']\n r_runtime += tst['restore']['time']\n log.info(\n f\"Test {tst['test_num']} results : dataset is {tst['dataset']} MiB\"\n f\"Take snapshot time is {tst['create']['time']} \"\n f\"at {tst['create']['speed']} MiB/Sec \"\n f\"Restore from snapshot time is {tst['restore']['time']} \"\n f\"at {tst['restore']['speed']} MiB/Sec \"\n )\n log.info(f' Average snapshot creation time is {c_runtime / tests_numbers} sec.')\n log.info(f' Average snapshot creation speed is {c_speed / tests_numbers} MiB/sec')\n log.info(f' Average snapshot restore time is {r_runtime / tests_numbers} sec.')\n log.info(f' Average snapshot restore speed is {r_speed / tests_numbers} MiB/sec')",
"def run(self):\n self.run_mc()",
"def test_stop_start(self):\r\n log.info(\"CONFIG: %s\", self._agent_config())\r\n self.create_sample_data_set_dir('node59p1_step2.dat', TELEM_DIR, \"node59p1.dat\")\r\n\r\n self.assert_initialize(final_state=ResourceAgentState.COMMAND)\r\n\r\n # Slow down processing to 1 per second to give us time to stop\r\n self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})\r\n self.assert_start_sampling()\r\n\r\n # Verify we get one sample\r\n try:\r\n # Read the first file and verify the data\r\n result = self.data_subscribers.get_samples(DataParticleType.CONTROL, 1)\r\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\r\n result.extend(result1)\r\n\r\n log.debug(\"RESULT: %s\", result)\r\n\r\n # Verify values\r\n self.assert_data_values(result, 'test_data_1-2.txt.result.yml')\r\n\r\n # Setup for Recovered\r\n self.create_sample_data_set_dir('SAMI_P0080_180713_integration_control_ph.txt', RECOVERED_DIR)\r\n # Read the first recovered file\r\n result = self.data_subscribers.get_samples(RecoveredDataParticleType.METADATA, 2)\r\n # Note - increase default timeout for Instrument particles\r\n result1 = self.data_subscribers.get_samples(RecoveredDataParticleType.INSTRUMENT, 10, 20)\r\n result.extend(result1)\r\n\r\n # Verify Recovered values\r\n self.assert_data_values(result, 'SAMI_P0080_180713_control_ph.yml')\r\n\r\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\r\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\r\n self.assert_sample_queue_size(RecoveredDataParticleType.METADATA, 0)\r\n self.assert_sample_queue_size(RecoveredDataParticleType.INSTRUMENT, 0)\r\n\r\n # Second part of test\r\n\r\n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\")\r\n # Now read the first record of the second file then stop\r\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\r\n log.debug(\"RESULT 1: %s\", result)\r\n\r\n # Stop sampling\r\n self.assert_stop_sampling()\r\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\r\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\r\n\r\n # Restart sampling and ensure we get the last records of the file\r\n self.assert_start_sampling()\r\n result2 = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\r\n log.debug(\"RESULT 2: %s\", result2)\r\n result.extend(result2)\r\n log.debug(\"RESULT: %s\", result)\r\n self.assert_data_values(result, 'test_data_3-4.txt.result.yml')\r\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\r\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\r\n\r\n # Test recovered file\r\n self.create_sample_data_set_dir('SAMI_P0080_180713_integration_control_ph_2.txt', RECOVERED_DIR)\r\n # Now read the first three records of the second recovered file then stop\r\n result = self.data_subscribers.get_samples(RecoveredDataParticleType.METADATA, 2)\r\n result1 = self.data_subscribers.get_samples(RecoveredDataParticleType.INSTRUMENT, 1)\r\n result.extend(result1)\r\n\r\n # Stop sampling\r\n self.assert_stop_sampling()\r\n\r\n # Restart sampling and ensure we get the last records of the file\r\n self.assert_start_sampling()\r\n result2 = self.data_subscribers.get_samples(RecoveredDataParticleType.INSTRUMENT, 5)\r\n result.extend(result2)\r\n self.assert_data_values(result, 'SAMI_P0080_180713_control_ph_2.yml')\r\n self.assert_sample_queue_size(RecoveredDataParticleType.METADATA, 0)\r\n self.assert_sample_queue_size(RecoveredDataParticleType.INSTRUMENT, 0)\r\n\r\n except SampleTimeout as e:\r\n log.error(\"Exception trapped: %s\", e, exc_info=True)\r\n self.fail(\"Sample timeout.\")",
"def test_producer_process(self):\n pp = ProducerProcess(prod_function, {'add': 5}, self.queue, self.pipe[0])\n pp.start()\n pp.join()\n\n # The queue is the correct size.\n self.assertTrue(self.queue.qsize() == NUM_RESULTS)\n\n # All of the results are in the range.\n while self.queue.qsize() > 0:\n result = self.queue.get()\n self.assertTrue(result in range(1, 11))\n\n # Producer's range is NUM_RESULTS + 1 (add variable).\n self.assertTrue(len(self.pipe[1].recv()[\"result\"]) == NUM_RESULTS + 1)",
"def main():\n machine = get_this_machine()\n\n while True:\n # TODO: write a better method to update machine data\n update_data(machine)\n dispatch_info(machine)\n print 'Data dispatched'\n time.sleep(DELTA)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a list that has `length` number of elements, and each element is the integer 1. Returns the list.
|
def create_ones_list(length):
return 0
|
[
"def build_list(length):\n return build_list_with_step(length, 1)",
"def count(length):\n return list(range(length))",
"def build_list_with_step(length, step):\n lst = []\n i = 0\n while len(lst) < length:\n if i % step == 0:\n lst.append(i)\n i += 1\n return lst",
"def zeros(size):\n return [0] * size",
"def mkOnes(length):\n\n zeroes = ''\n for i in xrange(length):\n zeroes = zeroes + '1'\n return zeroes",
"def GenRandomSet(length, width = 10.0):\r\n X = []\r\n for i in range(length):\r\n X.append((np.random.rand(2)-1.)*width/2.0)\r\n return X",
"def get_empty_array(n):\n r = ['' for i in range(n)]\n \n return r",
"def normalize_length(_list, length, cut_type='tail'):\n real_length = len(_list)\n if real_length == 0:\n return [0] * length, 0\n\n if real_length <= length:\n if not isinstance(_list[0], list):\n _list.extend([0] * (length - real_length))\n else:\n _list.extend([[]] * (length - real_length))\n return _list, real_length\n\n if cut_type == 'head':\n return _list[:length], length\n if cut_type == 'tail':\n return _list[-length:], length",
"def make_dict_lists(length):\n new_dict = {}\n for idx in range(length):\n new_dict[idx] = [0 for dummy_idx in range(idx)]\n # new_dict[idx] = [0] * idx\n return new_dict",
"def append_blank_elements(some_list, desired_length):\n if len(some_list) < desired_length:\n number = desired_length - len(some_list)\n blank = [''] * number\n some_list.extend(blank)\n return some_list",
"def length(xs):\n T = xs.get_type().args[0]\n return Const(\"length\", TFun(ListType(T), NatType))(xs)",
"def generateBoolListIndividual(length):\r\n from random import choices\r\n individual = choices([True, False], k = length)\r\n return individual",
"def make_dict_lists(length):\n dict_answer = {}\n for idx in range(length):\n dict_answer[idx] = [0] * idx\n return dict_answer",
"def generateDiscreteListIndividual(length, options):\r\n from random import choices\r\n individual = choices(options, k = length)\r\n return individual",
"def random_list(length, input):\n input = list(input) if input == \"dr\" else list(map(int, list(input)))\n return random.choice(input, size=length)",
"def test_non_empty_immutable_sequence() -> None:\n assert len(wrap([0])) == 1\n assert bool(wrap([0])) is True\n assert list(wrap([0])) == [0]",
"def test_empty_immutable_sequence() -> None:\n assert len(wrap([])) == 0\n assert bool(wrap([])) is False\n assert list(wrap([])) == []",
"def slices(sequence, length):\n\n if length > len(sequence) or length == 0:\n raise ValueError('Length is greater than sequence')\n\n slices = []\n\n index_counter = 0\n\n while index_counter + length <= len(sequence):\n slices.append([int(i) for i in sequence[index_counter:index_counter+length]])\n index_counter += 1\n\n return slices",
"def _make_typed_list_for_neighbors(size):\n res = TypedList()\n\n # Creates a prototype for the inner lists, with type inference via append-and-pop.\n proto_list = TypedList()\n proto_list.append(np_ncount_t(0))\n proto_list.pop()\n for i in range(size):\n res.append(proto_list.copy()) # Just copy the empty prototype\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns True if the length of the list is even. Returns False otherwise.
|
def is_even(values):
return False
|
[
"def is_even(x):\n return True",
"def is_even(name):\n \n return get_name_length(name) % 2 == 0",
"def even_number_of_evens(numbers):\n\n # Check to see if the list is empty\n if numbers == []:\n return False\n else:\n # Set a `number_of_evens` variable that will be incremented each time\n # an even number is found\n evens = 0\n \n # Iterate of over each item and if it's an even number, increment the\n # `evens` variable\n for number in numbers:\n if number % 2 == 0:\n evens += 1\n \n if evens == 0:\n return False\n else:\n return evens % 2 == 0",
"def isEven(number: int) -> bool:\n return ((number % 2) == 0)",
"def ends_with_even(a:int) -> bool:\n x = a % 10\n return(not x % 2)",
"def check_if_number_even(n):\n if (n % 2) == 0:\n return True\n else:\n return False",
"def is_odd(n):\r\n\treturn n % 2 != 0",
"def even_only(list):\n x = []\n for i in range(len(list)):\n if list[i] %2 == 0:\n x.append(list[i])\n return x",
"def even_only(list):\n new_list = []\n for i in list:\n if i % 2 == 0:\n new_list.append(i)\n return new_list",
"def is_even(k):\n if k == 0:\n return True\n elif abs(k) == 1:\n return False\n else:\n return is_even(abs(k) - 2)",
"def ends_with_odd(a:int) -> bool:\n x = a % 10\n return(bool(x % 2))",
"def odd_len(self):\n return len(list(filter(lambda x: x == 1, self.parity_vector)))",
"def find_an_even(l):\n for e in l:\n if e%2 == 0:\n return e\n raise ValueError('Argument does not contain a even number.')",
"def all_even(number_list):\n\n # do the opposite of the above function (this time, find positive #s)\n\n even_elements = [num for num in number_list if num % 2 == 0]\n return even_elements",
"def _two_pairs(self, cards: List[Card]) -> bool:\n counts_per_rank = self._get_counts_per_rank(cards)\n return list(counts_per_rank).count(2) == 2",
"def is_evenly_divisible(i, N):\n for j in range(1, N+1):\n if i % j != 0:\n return False\n return True",
"def oddpairs(seq):\n pairs = itertools.combinations(seq, 2)\n return any(isoddproduct(a,b) for a,b in pairs)",
"def squares_of_even_elements_in_odd_positions(list_arg):\n\n new_list = []\n for i in range(len(list_arg)):\n if i % 2 != 0 and list_arg[i] % 2 == 0:\n new_list.append(list_arg[i] ** 2)\n return new_list",
"def all_even(number_list):\n even_numbers = []\n for item in number_list:\n if item % 2 == 0:\n #modulo: if you can divide it by two and there is no remainder\n even_numbers.append(item)\n\n return even_numbers"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If the list is even, return string_list without changing anything. If the list is not even, append the string "SIKE" to the end of string_list, then return the string_list.
|
def make_even(string_list):
return 0
|
[
"def format_list(my_list):\n my_list[-1] = \"and \" + my_list[-1] #add the and requirement to appear before the last item\n print(my_list, type(my_list))\n new_even_list = my_list[1::2]\n print(new_even_list, type(new_even_list))\n formated_string = \", \".join(new_even_list)\n print(formated_string, type(formated_string)) \n #last_item = my_list.pop()\n #my_list.insert(0,last_item)\n return(formated_string)",
"def even_only(list):\n new_list = []\n for i in list:\n if i % 2 == 0:\n new_list.append(i)\n return new_list",
"def even_only(list):\n x = []\n for i in range(len(list)):\n if list[i] %2 == 0:\n x.append(list[i])\n return x",
"def intersperse(lst, item, prepend_if_nonzero=False):\n result = [item] * (len(lst) * 2 - 1)\n result[0::2] = lst\n if len(lst) > 0 and prepend_if_nonzero:\n result = [item] + result\n return result",
"def _shorten_list(self, s):\n\n new_lst = []\n\n if isinstance(self._list[0], list):\n for inclusion_report in self._list:\n word = inclusion_report[0]\n new_lst.append(word)\n self._list = new_lst\n new_lst = []\n\n for word in self._list:\n inclusion_report = self._does_include(s, word)\n if inclusion_report:\n new_lst.append(inclusion_report)\n\n self._list = new_lst",
"def all_even(number_list):\n even_numbers = []\n for item in number_list:\n if item % 2 == 0:\n #modulo: if you can divide it by two and there is no remainder\n even_numbers.append(item)\n\n return even_numbers",
"def squares_of_even_elements_in_odd_positions(list_arg):\n\n new_list = []\n for i in range(len(list_arg)):\n if i % 2 != 0 and list_arg[i] % 2 == 0:\n new_list.append(list_arg[i] ** 2)\n return new_list",
"def mergeStringLists(firstList: list, secondList: list) -> list:\n\n\t\t\tif not secondList: return firstList\t#nothing to add\n\t\t\tif not firstList: return [i for i in secondList]\t#return copy of secondList\n\n\t\t\tfor curString in secondList:\n\t\t\t\tif not curString in firstList:\n\t\t\t\t\twarnOnCloseMatch(curString, firstList)\n\t\t\t\t\tfirstList.append(curString)\n\t\t\treturn firstList",
"def middle(list):\n new_list = list[1:-1]\n return new_list",
"def both_ends(s): \n l = len(s)\n new_s = '' \n \n if l < 2:\n return new_s\n else:\n new_s = s[0] + s[1] + s[l-2] + s[l-1]\n return new_s",
"def getWordsWithSameEnd(word, wordList, n):\n wordst=[]#initializes the list\n \n\n for name in wordList:\n \n if word=='existt':\n name[-n:],word[-n:],n,'gghhh'\n \n if name[-n:]==word[-n:]:\n wordst.append(name)#appends words with last n letters\n return wordst",
"def every_other_item(my_list):\n # return a slice of the list that skips every 2nd number\n\n every_other_item = my_list[::2]\n \n return every_other_item",
"def concat_list(str_list):\r\n new_string = '' #this empty string will fill up with\r\n # strings\r\n for component in str_list:\r\n new_string += component + ' '#a new component is added to\r\n #the string every time.\r\n return(new_string[:-1]) #deletes the space at the end\r",
"def all_even(number_list):\n\n # do the opposite of the above function (this time, find positive #s)\n\n even_elements = [num for num in number_list if num % 2 == 0]\n return even_elements",
"def middle(list):\n new = list[1:-1]\n return new",
"def separateLists(original_list):\n\teL = []\t# even list\n\toL = [] # odd list\n\tfor num in original_list: # iterate over list\n\t\tif num % 2: # evals true if # odd\n\t\t\toL.append(num) # add odd number to oL\n\t\telse: # if even\n\t\t\teL.append(num) # add even number to eL\n\t\tif DFLAG: print(\"SL num val: \", num) # debug msg\n\n\tif DFLAG: print(\"SL RV's\\neL= \",eL,\"\\noL= \",oL) # debug msg\n\treturn eL,oL # return sorted lists",
"def list_to_string(input_list, seperator):\n output = input_list[0]\n for item in input_list[1:]:\n output = string_concatenator(output, item, seperator)\n return output",
"def reserve_list(input_strings: list) -> list:\n new_list = []\n reverse = True\n for elem in input_strings:\n if elem == \"python\":\n reverse = False\n elif elem == \"java\":\n reverse = True\n if reverse:\n if elem == \"java\":\n new_list.append(elem)\n else:\n new_list.append(elem[::-1])\n else:\n new_list.append(elem)\n new_list = new_list[::-1]\n return new_list",
"def reverse_pair(sentence):\n list = sentence.split()\n l = []\n for s in list:\n l = [s] + l\n return ' '.join(l)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Counts how many times `target` appears in `values` and returns an int.
|
def count_value_1(values, target):
return 0
|
[
"def count_in_sorted(arr, target, target_inc):\n return lowest_index(arr, target_inc) - lowest_index(arr, target)",
"def get_target_counts(camera, target_scaling, scaling_tolerance):\n try:\n bit_depth = camera.bit_depth.to_value(u.bit)\n except NotImplementedError:\n bit_depth = 16\n\n target_counts = int(target_scaling * 2 ** bit_depth)\n counts_tolerance = int(scaling_tolerance * 2 ** bit_depth)\n\n return target_counts, counts_tolerance",
"def incrementCount(self, label, label_value, target_value):\n self.count[ label ][ label_value ][ target_value ] += 1",
"def count_equal(input_list: List[A], target: A) -> int:\n\n count = 0\n for n in input_list:\n if n == target:\n count += 1\n\n return count",
"def index(values:List[int], target_value:int) -> int:\n return binary_search(\n ordered_indexed_values = sorted(\n enumerate(values),\n key=lambda index_value_pair:index_value_pair[-1]\n ),\n target_value=target_value\n )",
"def search_sorted_1(data, target):\n count = 0\n for item in data:\n count += 1\n if item == target:\n return count # Found it\n return count # Didn't find it",
"def collect_reducer_count(values):\n return len(values)",
"def pairs_with_sum(nums, target):\n frequency = {}\n counter = 0\n\n for i in nums:\n if i not in frequency:\n frequency[i] = 1\n else:\n frequency[i] += 1\n\n for i in frequency:\n find = target - i\n\n if (i == find and frequency[i] <= 1):\n continue\n\n if find in frequency and frequency[find] != 0 and frequency[i] != 0:\n appearances = min(frequency[i], frequency[find])\n\n if find == i:\n appearances //= 2\n\n counter += appearances\n frequency[find] -= appearances\n frequency[i] -= appearances\n\n return counter",
"def target_sizes(self):\n return Counter(self.targets.values())",
"def get_num_gt(target):\n for i in range(50):\n if target[i][1] == 0:\n return i\n\n raise ValueError(\"Target should have 0 value\")",
"def compare_counts(result, target, delta=0):\n # Don't use get_counts method which converts hex\n output = result.data(0)[\"counts\"]\n assertDictAlmostEqual(output, target, delta=delta)",
"def _map_to_counts(regions, targets, idx_map):\n num_targets = len(np.unique(targets))\n mapping = {region: np.zeros(num_targets) for region in np.unique(regions)}\n for region in mapping:\n region_targets = targets[idx_map[region]]\n for t in region_targets:\n mapping[region][t] += 1\n return mapping",
"def count(self, value: object) -> int:\n\n # Initialize variables\n cur_node = self.sentinel.next\n count = 0\n\n # Iterate through list, adding 1 to count for each time value is found.\n while cur_node != self.sentinel:\n if cur_node.value == value:\n count += 1\n\n cur_node = cur_node.next\n\n return count",
"def count(self, key):\n number = 0\n\n for value in self._values:\n if key == value:\n number += 1\n return number",
"def count(s,value):\n\ttotal = 0\n\tfor x in s:\n\t\tif x == value:\n\t\t\ttotal = total +1\n\treturn total",
"def countSubStringMatch(target,key):\r\n \r\n target0 = target\r\n instances = 0\r\n x = 0\r\n y = 0\r\n while(x!=-1):\r\n x=find(target,key,y)\r\n if(x==-1):\r\n print 'Number of times that ', key,' appears in ',target0, 'is:',instances\r\n return instances\r\n\r\n else:\r\n instances+=1\r\n y=x\r\n\r\n return None",
"def count(self, value: _T) -> int:\n return self._data.count(value)",
"def count(self, value: int) -> int:\n count = 0\n current = self.head\n\n while current:\n if current.value == value:\n count += 1\n current = current.next\n\n return count",
"def count_values(dic):\n values = dic.values()\n check = []\n count = 0\n for i in values:\n if i not in check:\n count += 1\n check.append(i)\n return count"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constructor function initializes object with title and year
|
def __init__(self, title, year):
self.title = title
self.year = year
# id is a field that is required for rendering of the website later
self.id = "-".join(title.split())
|
[
"def __init__(self,season,year):\n seasondict = buildseasondict(season,year)\n self.season = seasondict[\"season\"]\n self.year = seasondict[\"year\"]",
"def __init__(self, author, title):\r\n self.author = author\r\n self.title = title",
"def __init__(self, year: int, start_m: int = 0, end_m: int = 11):\n self._year = year\n self._first = year_starts_on(year)\n self._start_m = min(start_m, end_m)\n self._end_m = max(start_m, end_m)",
"def __init__(self, source: str, year: int, bare: bool) -> None:\n if not source and not year:\n self.data = self._generate_random_data()\n self.year = self._get_year_from_data()\n elif source and not year:\n self.data = self._parse_file(source)\n self.year = self._get_year_from_data()\n elif not source and year:\n if calendar.isleap(year):\n self.data = self._generate_random_data(days=366)\n else:\n self.data = self._generate_random_data()\n self.year = year\n else: # if there are both data & year\n self.data = self._parse_file(source)\n self.year = year\n self.match_data_and_year()\n\n if bare:\n self.week_load = False\n else:\n self.week_load = True",
"def __init__(self, section_title,articles_raw,date):\n self.section_title = section_title\n self.articles_raw = articles_raw\n self.date = date",
"def __init__(self,name,age,year):\r\n #Person.__init__(self,name, age)\r\n super().__init__(name,age)\r\n self.year = year",
"def __init__(self, gregorian_year):\n # from wikipedia (http://en.wikipedia.org/wiki/Sexagenary_cycle)\n self.solar_year = str(gregorian_year)\n self.year_in_cicle = str(int(gregorian_year) - (60 * (int(gregorian_year) - 3) // 60))\n self._table = LunarYear._get_year()",
"def __init__(self,year=2014):\n self.year = year\n self.df = salaries_preprocessing_by_year()\n self.df = self.df.reset_index(1)",
"def __init__(self, title=None, length=None):\n self.title = self.Title\n if title is not None:\n self.title = title\n\n self.mlength = self.Length\n if length is not None:\n self.mlength = length\n\n self.name = self.title.lower()",
"def __init__(\n\t\tself,\n\t\treal_title,\n\t\ttitle,\n\t\tlink):\n\t\t\n\t\tassert isinstance(real_title,str)\n\t\tassert isinstance(title,str)\n\t\tassert isinstance(link,str)\n\n\t\tself.real_title = real_title\n\t\tself.title = title\n\t\tself.link = link",
"def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube, director, release_date):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.director = director\n self.release_date = release_date",
"def __init__(self, data=None, titlefilter=''):\n self._titlefilter = titlefilter\n if data:\n self.parse(data)",
"def __init__(self, artist, title, minutes, seconds):\n\n self.title = title\n self.artist = artist\n self.minutes = minutes\n self.seconds = seconds",
"def __init__(self):\n try:\n HTMLParser.__init__(self)\n\n self.good_data = True\n\n self.title = None\n self.in_title = False\n self.is_same_month = False\n\n self.in_tbody = False\n self.in_abbr = False\n self.in_td = False\n\n self.year = None\n self.month = None\n\n self.tr_column_count = 3\n\n self.weather = {}\n self.weather_current_key = None\n\n self.temps_list = []\n\n except Exception as error:\n print(f\"WeatherScrapper::__init__::{error}\")",
"def __init__ (self, movie_title, movie_storyline, poster_image, trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube",
"def __init__(\n self,\n dt_start=dt.datetime(1, 1, 1),\n dt_end=dt.datetime(1, 1, 1),\n months='',\n allow_whole_year=True,\n ):\n ###################################################\n # INPUT CHECK #\n ###################################################\n for d in (dt_start, dt_end):\n assert isinstance(d, dt.date)\n assert isinstance(allow_whole_year, bool)\n\n # months:\n if months.lower() == 'year':\n mon = 'jfmamjjasond'\n else:\n mon = months.lower()\n allowed_month_seqs = 'jfmamjjasondjfmamjjasond'\n allowed_months = (\n 'jan', 'feb', 'mar', 'apr', 'may', 'jun',\n 'jul', 'aug', 'sep', 'oct', 'nov', 'dec',\n )\n if not isinstance(mon, str):\n raise TypeError('months must be str.')\n if len(mon) == 1:\n msg = 'months must be a str of at least two month initial letters.'\n raise ValueError(msg)\n if len(mon) > 12:\n raise ValueError('months must not contain more than 12 letters.')\n if mon not in allowed_months:\n if mon[:3].lower() not in allowed_month_seqs:\n msg = 'Not a sequence of month initial letters: %s' % mon\n raise ValueError(msg)\n\n ###################################################\n # INITIALIZE #\n ###################################################\n # self.start will be in year 1\n # self.end will be between 0 and 1 year later than self.end, and will\n # thus be in year 1 or 2\n if mon == '':\n start = year_one(dt_start)\n end = year_one(dt_end)\n elif mon in allowed_month_seqs:\n first_month = allowed_month_seqs.find(mon) + 1\n last_month = (first_month + len(mon)) % 12\n if last_month == 0:\n last_month = 12\n start = dt.datetime(1, first_month, 1)\n end = dt.datetime(1, last_month, 1)\n elif mon[:3].lower() in allowed_months:\n first_month = allowed_months.index(mon) + 1\n last_month = (first_month + 1) % 12\n if last_month == 0:\n last_month = 12\n start = dt.datetime(1, first_month, 1)\n end = dt.datetime(1, last_month, 1)\n\n\n # make sure that end is not earlier than start:\n if end < start:\n end = end.replace(year=2)\n if end == start and allow_whole_year:\n end = end.replace(year=2)\n\n self.start = start\n self.end = end",
"def __init__(self, title, url, congress):\n self.title = title\n self.url = url\n self.congress = congress",
"def __init__(self, title, developer, publisher):\r\n self.title = title\r\n self.developer = developer\r\n self.publisher = publisher\r\n self.esrb = \"rp\" # Default value\r",
"def __init__(self, m_and_y, sales):\r\n self.month_and_year = m_and_y\r\n self.total_sales = sales"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets poster image of movie
|
def set_poster(self, poster):
self.poster = poster
|
[
"def update_poster_path(self, movie, poster_path):\n movie.poster_path = poster_path\n movie.save()",
"async def _poster(self, ctx, *, value=None):\r\n key = 'poster'\r\n # test key for url\r\n if ctx.message.server.id not in self.guilds:\r\n data = _unknown_guild(ctx)\r\n await self.bot.send_message(ctx.message.channel, embed=data)\r\n return\r\n\r\n data = self._get_embed(ctx)\r\n if len(ctx.message.attachments) > 0:\r\n print('alliance poster attachment detected')\r\n image = ctx.message.attachments[0]\r\n print(json.dumps(image))\r\n value = image['url']\r\n print(value)\r\n if value is None or value.lower() == \"none\":\r\n data = self._update_guilds(ctx, key, None)\r\n if send_request(value) is True:\r\n data = self._update_guilds(ctx, key, value)\r\n data.set_image(url=value)\r\n elif send_request(value) is False:\r\n data.title = 'Image Verification Failed:sparkles:'\r\n await self.bot.send_message(ctx.message.channel, embed=data)",
"def get_poster_url(self):\n return self.poster_url",
"def download_poster(movie, api_key=api_key, base_url=base_image_url, pics_folder=pics_folder):\n name = movie[\"name\"]\n poster_path = get_poster_url(name, api_key)\n filename = poster_path.split(\"/\")[1]\n URL = base_url + poster_path\n response = urlopen(URL)\n status = response.getcode()\n # print(status)\n img = response.read()\n file = open(pics_folder + filename, \"wb\")\n file.write(img)\n file.close\n return filename",
"def get_poster(self, url):\n poster = urllib2.urlopen(url).read()\n img_name = 'uploads/posters/%s' % url.split('/')[-1]\n f = open(img_name, 'wb')\n f.write(poster)\n f.close()\n return img_name",
"def setImage(self, path):\n\t\tpass",
"def set_image(self):\r\n self.sc.set_image()",
"def __init__ (self, movie_title, movie_storyline, poster_image, trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube",
"def setImage(self, image, normalize = None):\n \n self.viewer.setImage(image, normalize)\n self.updateCaption()",
"def get_poster(url, store_path='.\\\\'):\n print('Getting poster url...')\n movie_page = requests.get(url).content\n posters_url = bs(movie_page, 'lxml').find('a', class_='nbgnbg')['href']\n posters_page = requests.get(posters_url).content\n poster_url = bs(posters_page, 'lxml').find('div', class_='cover').find('a')['href']\n poster_page = requests.get(poster_url).content\n pic_url = bs(poster_page, 'lxml').find('a', class_='mainphoto').find('img')['src']\n print('It is at {}'.format(pic_url))\n res = requests.get(pic_url)\n\n if not path.exists(store_path):\n makedirs(store_path)\n with open(path.join(store_path, 'poster.jpg'), 'wb') as f:\n f.write(res.content)\n\n print('The poster is stored at {}'.format(store_path))",
"def set_image_src(self, image_src):\n # load the image\n self.image_src = image_src\n self.image = simplegui.load_image(self.image_src)",
"def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube, director, release_date):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.director = director\n self.release_date = release_date",
"def set_camimage(self, camimg, scale = None):\n self._camimg = camimg\n if camimg is None:\n return\n self.render()",
"def getPosterImageURL(movie):\n\n valid = \"image/movies/\" + movie.id + \"/poster.png\"\n invalid = \"image/movies/0000 Unknown/poster.png\"\n\n return valid if os.path.isfile(valid) else invalid",
"def __init__(self, master, movie_data):\n super().__init__(master)\n self.transient()\n self.focus_set()\n\n # Display Movie Poster\n try:\n if movie_data['Poster'] != 'N/A':\n urllib.request.urlretrieve(movie_data['Poster'], \"poster.jpg\")\n image = Image.open(\"poster.jpg\")\n image = image.resize((160,240))\n else:\n image = Image.open(\"default_poster.jpg\")\n except urllib.error.HTTPError: # windows error\n image = Image.open(\"default_poster.jpg\")\n except urllib.error.URLError: # mac error\n image = Image.open(\"default_poster.jpg\")\n photo = ImageTk.PhotoImage(image)\n label = tk.Label(self, image=photo)\n label.image = photo # keep a reference!\n label.grid(row=0, column=0)\n\n # display all data\n Movie_Data_Frame = tk.Frame(self)\n year = str(movie_data['Year'])[:4]\n rating = str(movie_data['imdbRating'])\n runtime = movie_data['Runtime'].replace(' min', '')\n tk.Label(Movie_Data_Frame, text=movie_data['Title'], wraplength=300, font=('Helvetica',20)).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Year: ' + year).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='imdb Rating: ' + rating).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Runtime: ' + str(runtime) + ' mins').grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Plot: ' + movie_data['Plot'], wraplength=300, justify=tk.LEFT).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Genre: ' + movie_data['Genre'], wraplength=300, justify=tk.LEFT).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Director: ' + movie_data['Director']).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Actors: ' + movie_data['Actors'], wraplength=300, justify=tk.LEFT).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Awards: ' + movie_data['Awards'], wraplength=300, justify=tk.LEFT).grid(sticky='w')\n Movie_Data_Frame.grid(row=0, column=1)\n tk.Button(self, text='Save Movie', command=lambda : self.writeToFile(movie_data)).grid(sticky=\"nsew\")",
"def background_image(self, value: str):\r\n self._background_image = value",
"def setImageSink(self, widget=None):\n #self._logger.debug(\"gstPlayeR: Setting the image sink.\") \n \n # If no widget is given, set it to the default\n if not widget:\n widget = self.videoWindow\n\n self.videoWindowConfigure(widget)\n \n # Set the image sink\n self._player.setImgSink(widget)\n\n return False",
"def set_motion_image(self):\n\n\t\tcurrent_direction = self.all_images[self.direction]\n\t\tself.image = current_direction[self.motion_image]",
"def set_avatar(person, file_path, client=default):\n person = normalize_model_parameter(person)\n return raw.upload(\n \"/pictures/thumbnails/persons/%s\" % person[\"id\"],\n file_path,\n client=client\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets trailer of movie
|
def set_trailer(self, trailer):
self.trailer = trailer
|
[
"def get_trailer(movie_id, api_key):\n\n # initialize trailer variable with a placeholder YouTube video\n trailer = \"https://www.youtube.com/watch?v=D-CQVnuiR1I\"\n\n # structure the URL for the API request\n url = \"https://api.themoviedb.org/3/movie/\"\n url += str(movie_id) #\n url += \"/videos?api_key=\" + api_key\n\n # send the request\n request = requests.get(url)\n\n # check if the response is OK (200 status code)\n if request.status_code != 200:\n # return the placeholder trailer\n return trailer\n\n # convert the response to json format\n videos = request.json()\n\n # go through each video associated with the movie to find YouTube URL\n for video in videos['results']:\n # check if the video is a YouTube video and if it is a trailer\n if video['site'] == \"YouTube\" and video['type'] == \"Trailer\":\n trailer = \"http://youtube.com/watch?v=\" + video['key']\n break\n\n return trailer",
"def __init__ (self, movie_title, movie_storyline, poster_image, trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube",
"def make_trailer(trailer_url):\n match = re.search('\\?v=(.*)', trailer_url)\n if match:\n return 'plugin://plugin.video.youtube/play/?video_id=%s' % (\n match.group(1))",
"def show_trailer(self):\n webbrowser.open(self.youtube)",
"def get_movie_trailer(movie_id):\n\n # Create the request url with the api key and the movie id\n video_url = 'https://api.themoviedb.org/3/movie/' + str(movie_id) \\\n + '/videos?api_key=' + str(API_KEY) + '&language=en-US'\n\n video_response = urllib.urlopen(video_url)\n\n # Convert the api response to json obj to extract the list movies form it\n video_content = json.loads(video_response.read())\n\n # Check if the movie hasn't any trailers return False\n if len(video_content['results']) < 1:\n return False\n\n # Otherwise if the movie has a trailer concatenate the youtube video key to\n # the youtube website and return it\n trailer_youtube_url = 'https://www.youtube.com/watch?v=' + \\\n video_content['results'][0]['key']\n return trailer_youtube_url",
"def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube, director, release_date):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.director = director\n self.release_date = release_date",
"def download_imdb_trailer(self, movie_id):\n #session = Session()\n\n try:\n response = requests.get(IMDB_TRAILER_URL % movie_id)\n root = html.fromstring(response.content)\n for a in root.xpath(\"//div[@class='search-results']/ol/li/div/a\"):\n vid = a.xpath(\"./@data-video\")[0]\n vurl = a.xpath(\"./@href\")[0]\n vpic = a.xpath(\"./img/@src\")[0]\n\n response = requests.get(vurl)\n container_url = root.xpath(\"//iframe[@id='video-player-container']/@src\")[0].strip()\n response = requests.get(container_url)\n root = html.fromstring(response.content)\n data = root.xpath(\"//script[@class='imdb-player-data']/text()\")[0]\n vdata = json.loads(data)\n for v in vdata['videoPlayerObject']['video']['videoInfoList']:\n if v['videoMimeType'] == 'video/mp4':\n video_url = v['videoUrl']\n break\n\n response = requests.get(video_url)\n filename = u\"%s/%s.mp4\" % (movie_id, vid)\n files = [('file', (filename, StringIO.StringIO(response.content))), ]\n response = requests.post(\"http://61.155.215.52:3000/upload\",\n files=files)\n if response.content != 'ok':\n raise Exception(\"Upload failure!\")\n\n sql = \"\"\"insert into trailer_source(movie_id, imdb_id, create_date, link)\n values(:movie_id, :imdb_id, :date, :link)\n \"\"\"\n data = {'movie_id': movie_id, 'imdb_id': vid, 'create_date': ''}\n session.execute(sql, data)\n except Exception as exc:\n raise self.retry(exc=exc, countdown=60)\n\n session.commit()\n\n return filename, response.content",
"def add_movie(self, title, alt_title, year, video_id, build_url):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n movie_meta = '%s (%d)' % (title, year)\n folder = re.sub(r'[?|$|!|:|#]', r'', alt_title)\n dirname = self.nx_common.check_folder_path(\n path=os.path.join(self.movie_path, folder))\n filename = os.path.join(dirname, movie_meta + '.strm')\n progress = xbmcgui.DialogProgress()\n progress.create(self.kodi_helper.get_local_string(650), movie_meta)\n if xbmcvfs.exists(filename):\n return\n if not xbmcvfs.exists(dirname):\n xbmcvfs.mkdirs(dirname)\n if self.movie_exists(title=title, year=year) is False:\n progress.update(50)\n time.sleep(0.5)\n self.db[self.movies_label][movie_meta] = {'alt_title': alt_title}\n self._update_local_db(filename=self.db_filepath, db=self.db)\n url = build_url({'action': 'play_video', 'video_id': video_id})\n self.write_strm_file(path=filename, url=url, title_player=movie_meta)\n progress.update(100)\n time.sleep(1)\n progress.close()",
"def set_poster(self, poster):\n self.poster = poster",
"def update_poster_path(self, movie, poster_path):\n movie.poster_path = poster_path\n movie.save()",
"def video(self, value):\n self._video = value",
"def add_movie(self, new_movie):\r\n self.movies.append(Movie(new_movie[0], new_movie[1], new_movie[2], new_movie[3]))",
"def main_video():\n annotate_movie(\"project_video.mp4\", \"annotated_project_video.mp4\")\n # annotate_movie(\"challenge_video.mp4\", \"annotated_challenge_video.mp4\")",
"def write(self, frame):\n self.video_writer.write(frame)",
"async def set_footer(self, footer: str):\n self.preview_embed.set_footer(text=footer)",
"def stopWritingVideo(self):\n self.__videoFilename = None\n self.__videoEncoding = None\n self.__videoWriter = None",
"def populate_movie_details():\n\n toy_story = media.Movie(\n \"Toy story\",\n \"A story of a boy and his toys\",\n \"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\",\n \"https://www.youtube.com/watch?v=KYz2wyBy3kc\"\n )\n\n avatar = media.Movie(\n \"Avatar\",\n \"A marine on an alien planet\",\n \"http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg\",\n \"https://www.youtube.com/watch?v=d1_JBMrrYw8\"\n )\n\n sup = media.Movie(\n \"Up\",\n \"A house lifted by baloons\",\n \"http://upload.wikimedia.org/wikipedia/en/0/05/Up_%282009_film%29.jpg\",\n \"https://www.youtube.com/watch?v=pkqzFUhGPJg\"\n )\n\n interstellar = media.Movie(\n \"Interstellar\",\n \"Finding new life in space\",\n \"http://upload.wikimedia.org/wikipedia/en/b/bc/Interstellar_film_poster.jpg\",\n \"https://www.youtube.com/watch?v=nyc6RJEEe0U\"\n )\n\n big_hero_6 = media.Movie(\n \"Big Hero 6\",\n \"Boy genius builds robots and saves world\",\n \"http://upload.wikimedia.org/wikipedia/en/4/4b/Big_Hero_6_%28film%29_poster.jpg\",\n \"https://www.youtube.com/watch?v=8IdMPpKMdcc\"\n )\n\n the_lego_movie = media.Movie(\n \"The Lego Movie\",\n \"Everything is awesome, Everything is cool when you're part of a team!\",\n \"http://upload.wikimedia.org/wikipedia/en/1/10/The_Lego_Movie_poster.jpg\",\n \"https://www.youtube.com/watch?v=fZ_JOBCLF-I\"\n )\n\n movies = [toy_story, avatar, sup, interstellar, big_hero_6, the_lego_movie]\n\n return movies",
"def UpdateFile(moviefile_uncpath):\n\n global num_errors, verbose_mode\n\n utils.Msg(\"Updating file: \\\"\"+moviefile_uncpath+\"\\\" ...\", verbose_mode)\n\n movie = GetMovieInfo(moviefile_uncpath)\n if movie == None:\n return\n\n cv = \"\" # column=value list for UPDATE\n cv += \"idMovie = NULL, \"\n cv += \"idMediumType = 'FILE', \"\n cv += \"idStatus = %(status)s, \"\n cv += \"Uncut = %(fn_uncut)s, \"\n cv += \"Language = %(fn_language)s, \"\n cv += \"SubtitleLanguage = %(fn_subtitle_language)s, \"\n cv += \"Duration = %(duration_min)s, \"\n cv += \"idQuality = %(idVideoQuality)s, \"\n cv += \"DesiredDisplayAspectRatioWidth = %(fn_dar_width)s, \"\n cv += \"DesiredDisplayAspectRatioHeight = %(fn_dar_height)s, \"\n cv += \"DisplayAspectRatio = %(video_dar)s, \"\n cv += \"OriginalDisplayAspectRatio = %(video_dar_org)s, \"\n cv += \"idContainerFormat = %(idContainerFormat)s, \"\n cv += \"idVideoFormat = %(idVideoFormat)s, \"\n cv += \"VideoFormatProfile = %(video_format_profile)s, \"\n cv += \"VideoSamplingWidth = %(video_width)s, \"\n cv += \"VideoSamplingHeight = %(video_height)s, \"\n cv += \"VideoBitrate = %(video_bitrate_kbps)s, \"\n cv += \"VideoFramerate = %(video_framerate_fps)s, \"\n cv += \"idVideoFramerateMode = %(idVideoFramerateMode)s, \"\n # cv += \"VideoQualityFactor = '', \" # TBD: Get this value from MediaInfo\n cv += \"idAudioFormat = %(idAudioFormat)s, \"\n cv += \"AudioFormatProfile = %(audio_format_profile)s, \"\n # cv += \"idAudioChannelType = '', \" # TBD: Get this value from MediaInfo\n cv += \"TechnicalFlaws = %(fn_techcomm)s, \"\n cv += \"AudioBitrate = %(audio_bitrate_kbps)s, \"\n cv += \"idAudioBitrateMode = %(idAudioBitrateMode)s, \"\n cv += \"AudioSamplingRate = %(audio_samplingrate_hz)s, \"\n now = str(datetime.datetime.now())[0:19]\n cv += \"TSUpdated = '\"+now+\"', \"\n cv += \"TSVerified = '\"+now+\"', \"\n cv += \"Title = %(title)s, \"\n cv += \"ReleaseYear = %(year)s, \"\n cv += \"SeriesTitle = %(series_title)s, \"\n cv += \"EpisodeTitle = %(episode_title)s, \"\n cv += \"EpisodeId = %(episode_id)s, \"\n cv += \"FolderPath = %(folder_path)s\"\n\n # movie[\"fn_threed\"]+\", \"\n # movie[\"fn_partial\"]+\", \"\n\n sql = \"UPDATE Medium SET \"+cv+\" WHERE FilePath = %(file_path)s AND idCabinet = %(idCabinet)s\"\n\n medium_cursor = movies_conn.cursor(MySQLdb.cursors.Cursor)\n\n medium_cursor.execute(sql,movie)\n\n medium_cursor.close()\n\n movies_conn.commit()",
"def addFrameToMovie(frame, movie):\n # frame = None\n # movie = None\n # if a.__class__ == Movie:\n # movie = a\n # frame = b\n # else:\n # movie = b\n # frame = a\n\n if not (isinstance(movie,Movie) and isinstance(frame, str)):\n # if movie.__class__ != Movie or frame.__class__ != String:\n repValError(\"addFrameToMovie(frame, movie): frame is not a string or movie is not a Movie objectd\")\n\n movie.addFrame(frame)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
handles the Add Teacher button being clicked
|
def addTeacherBtn_clicked(self):
first = str(self.ui.firstNameLineEdit.text()).strip()
first = sanitize(first)
last = str(self.ui.lastNameLineEdit.text()).strip()
last = sanitize(last)
address = str(self.ui.addressLineEdit.text()).strip()
address = sanitize(address)
city = str(self.ui.cityLineEdit.text()).strip()
city = sanitize(city)
postal = str(self.ui.postalCodeLineEdit.text()).replace(" ", "")
postal = sanitize(postal)
postal = stripPostal(postal)
daytimePhone = str(self.ui.daytimePhoneLineEdit.text()).strip()
daytimePhone = sanitize(daytimePhone)
daytimePhone = stripPhoneNumber(daytimePhone)
eveningPhone = str(self.ui.eveningPhoneLineEdit.text()).strip()
eveningPhone = sanitize(eveningPhone)
eveningPhone = stripPhoneNumber(eveningPhone)
email = str(self.ui.emailLineEdit.text()).strip()
email = sanitize(email)
# Check for empty fields
if first is None or first == "":
QMessageBox.warning(self, 'Missing Field', 'Must have a First Name', QMessageBox.Ok)
return
if last is None or last == "":
QMessageBox.warning(self, 'Missing Field', 'Must have a Last Name', QMessageBox.Ok)
return
if email is None or email == "":
if QMessageBox.question(self, 'Missing Email', 'Are you sure you want to leave Email blank?', QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:
return
# Check for valid fields
elif validEmail(email) == False:
QMessageBox.warning(self, 'Invalid Email', email + ' is not a valid email format', QMessageBox.Ok)
return
if validateName(first) == False:
if QMessageBox.question(self, 'Validate First Name', 'Are you sure \'' + first + '\' is correct?', QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:
return
if validateName(last) == False:
if QMessageBox.question(self, 'Validate Last Name', 'Are you sure \'' + last + '\' is correct?', QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:
return
# Check for duplicated teacher
tList = dbInteractionInstance.getTeachersWithName(first=first, last=last)
if len(tList) > 0:
s = ""
for t in tList:
s += "{0} {1}, email: {2}\n".format(t.first, t.last, t.email)
if QMessageBox.question(self, 'Possible Duplicate',
'This name exists in the database already:\n{0}\nDo you still want to add this person?'.format(s),
QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:
return
self.teacher = Teacher(first, last, address, city, postal, daytimePhone, eveningPhone, email)
result = dbInteractionInstance.addTeacher(self.teacher)
if result == "":
QMessageBox.information(self, 'Add Teacher/Contact', 'Successfully added new teacher/contact', QMessageBox.Ok)
self.clearFields()
if self.closeAfterAdd:
self.accept()
else:
QMessageBox.critical(self, 'Add Teacher/Contact', 'Failed to add new teacher/contact\n{0}'.format(result), QMessageBox.Ok)
|
[
"def createNewTeacherBtn_clicked(self):\n dialog = AddTeacherDialog(testing=self.testing, closeAfterAdd=True)\n # For Modal dialog\n result = dialog.exec_()\n\n if result == True:\n t = dialog.getTeacher()\n self.ui.teacherLineEdit.setText(t.first + ' ' + t.last)\n self.teacherId = dbInteractionInstance.getLastTeacherId()",
"def chooseTeacherBtn_clicked(self):\n dialog = ChooseTeacherDialog()\n # For Modal dialog\n result = dialog.exec_()\n\n if result == True:\n self.teacherId = dialog.getTeacherId()\n # Use the id to get the name for displaychoose\n t = dbInteractionInstance.getTeacherFromId(self.teacherId)\n name = name = t.first + \" \" + t.last\n self.ui.teacherLineEdit.setText(name)",
"def editTeacherBtn_clicked(self):\n dialog = ChooseTeacherDialog()\n # For Modal dialog\n result = dialog.exec_()\n\n if result == True:\n teacherId = dialog.getTeacherId()\n # Open edit dialog with teacher\n dialog = EditTeacherDialog(teacherId=teacherId)\n dialog.exec_()",
"def addTeacherToCourse(self, teacher):\r\n self.extra_teachers.append(teacher)",
"def deleteTeacherBtn_clicked(self):\n dialog = ChooseTeacherDialog()\n # For Modal dialog\n result = dialog.exec_()\n\n if result == True:\n teacherId = dialog.getTeacherId()\n if QMessageBox.question(self, \"Cannot be undone!\", \"Warning! This action cannot be undone. \\nAre you sure you want to delete this teacher/contact?\", QMessageBox.Yes|QMessageBox.No) == QMessageBox.Yes:\n # Delete the teacher\n dbInteractionInstance.deleteTeacherFromId(teacherId)",
"def test24_add_teacher_by_name(self):\n self.group_page.CreateGroupWindow().select_teacher(TEST_TEACHERS_NAME)\n teachers_list = self.group_page.CreateGroupWindow(). \\\n get_values_from_added_teachers_list()\n self.assertIn(TEST_TEACHERS_NAME, teachers_list)",
"def test18_add_default_teacher(self):\n self.group_page.CreateGroupWindow().add_teacher()\n teachers_list = self.group_page.CreateGroupWindow(). \\\n get_values_from_added_teachers_list()\n self.assertIn(TEST_TEACHERS_NAME, teachers_list)",
"def add_teacher():\n\n teacher_fname = request.form.get('teacher_fname')\n teacher_lname = request.form.get('teacher_lname')\n teacher_email = request.form.get('teacher_email')\n teacher_phone = request.form.get('teacher_phone')\n teacher_password = request.form.get('teacher_password')\n\n # check to see if teacher already exists\n # more explicit, less complicated: get the first teacher that has the email\n teacher = db.session.query(Teacher).filter(Teacher.teacher_email == teacher_email).first()\n\n if teacher:\n return jsonify({'status': 'error- email already in use'})\n\n # now create the teacher:\n teacher = crud.create_teacher(teacher_fname, teacher_lname, teacher_email, teacher_phone, hash_input(teacher_password))\n\n if not teacher: \n return jsonify({'status': 'error-please try again'})\n\n return jsonify({'status': 'ok', 'full_name':teacher.full_name, 'email':teacher.teacher_email, 'pw':teacher.teacher_password})",
"def clearTeacherBtn_clicked(self):\n self.ui.teacherLineEdit.clear()\n self.teacherId = None",
"def teacher(request):\n if request.method == 'POST':\n form = TeacherForm(request.POST)\n if form.is_valid():\n return HttpResponseRedirect('/thanks/')\n elif request.user.is_authenticated:\n return redirect('menu:index')\n else:\n form = TeacherForm()\n\n return render(request, 'home/teacher.html', {'form': form})",
"def addTeacherCourse(self, course):\r\n self.courses.append(course)",
"def new_teacher(profile, teacher, student):\n triggering = profile.notify_joined_my_village\n data = {'teacher-id': teacher.id, 'student-id': student.id}\n _record(profile, types.NEW_TEACHER, triggering=triggering, data=data)",
"def __callUpdateSubjectTeacher(self): \r\n idGiven=input(\" Give ID:\")\r\n newTeacher=input(\" Give a new teacher:\") \r\n try:\r\n self.__table.createSubject(idGiven, \"subject\", newTeacher)\r\n self.__table.updateSubjectTeacher(idGiven, newTeacher)\r\n print(\"Teacher's name has been successfully changed.\")\r\n except InputError as ex:\r\n print(ex.getErrors())\r\n except IdNotFound as ex:\r\n print(ex.getErrors())",
"def createNewParticipantBtn_clicked(self):\n dialog = AddParticipantDialog(testing=self.testing, closeAfterAdd=True)\n # For Modal dialog\n result = dialog.exec_()\n\n if result == True:\n p = dialog.getParticipant()\n self.ui.participantLineEdit.setText(p.first + ' ' + p.last)\n self.participantId = dbInteractionInstance.getLastParticipantId()",
"def form_valid(self, form):\n form.instance.teacher = self.request.user\n return super(AddressCreate, self).form_valid(form)",
"def create(self, validated_data):\n return Teacher.objects.create(**validated_data)",
"def create_teacher(teacher_name):\r\n return Teacher(teacher_name)",
"def __callAddSubject(self):\r\n idSubject=input(\" Give ID:\")\r\n name=input(\" Give subject name:\")\r\n teacher=input(\" Give teacher's name:\") \r\n try:\r\n sub=self.__table.createSubject(idSubject,name,teacher)\r\n self.__table.addSubject(sub)\r\n print(\"Subject \"+sub.getName()+\" has been successfully added.\")\r\n except InputError as ex:\r\n print(ex.getErrors())\r\n except IdError as ex:\r\n print(ex.getErrors())",
"def student_add_form():\n\n return render_template(\"add_student.html\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Flushes the messages send to the bot during downtime so that the bot does not start spamming when it gets online again.
|
def flush_messages(bot):
updates = bot.get_updates()
while updates:
print("Flushing {} messages.".format(len(updates)))
time.sleep(1)
updates = bot.get_updates(updates[-1]["update_id"] + 1)
|
[
"async def flush(ctx):\r\n\tisAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\r\n\t# Only allow admins to change server stats\r\n\tif not isAdmin:\r\n\t\treturn\r\n\t# Flush settings\r\n\tawait quickFlush()\r\n\tmsg = 'Flushed settings to disk.'\r\n\tawait bot.send_message(ctx.message.channel, msg)",
"async def spam(self) -> None:\n spam:str = self.msg.split(\" \",1)[1]\n n, spam = spam.split(' ', 1)\n \n for i in range(int(n)):\n await self.message.channel.send(spam)\n sleep(0.6)\n await self.mensagem()\n \n spam = n = i = None\n del spam, n, i",
"def heartbeat_thread(self):\n while True:\n self.send_message(msg.Heartbeat())\n time.sleep(0.5)",
"def flushQueuedMessages(self):\n if self.queued_time is not None:\n self.getProxy(0).massDelayedActions(self.queued_time, \n self.queued_messages)\n self.queued_messages = []\n self.queued_time = None",
"async def _unfriendtime(self, ctx, days: int):\n await self.config.guild(ctx.guild).unfriendafter.set(days)\n await ctx.send(f\"Inactivity days till auto unfriend is {days} days.\")",
"async def sleep(self, ctx):\r\n\r\n await self.client.change_presence(status=discord.Status.invisible)\r\n\r\n Database.Bot[\"sleeping\"] = True\r\n\r\n await ctx.send(\r\n f'Bot going to sleep.. will not respond again until `{Database.Main[ctx.guild.id].get(\"prefix\", \".\")}wake` is sent'\r\n )",
"def send_smses():\n smses = Sms.objects.filter(sent=False)\n fail = 0\n\n for sms in smses:\n if fail < 3:\n try:\n message = unicode(sms.message, \"utf-8\")\n send_sms(sms.harambee.candidate_id, message)\n except (ValueError, httplib2.ServerNotFoundError):\n fail += 1\n continue\n\n sms.sent = True\n sms.time_sent = timezone.now()\n try:\n sms.save()\n except IntegrityError:\n fail += 1",
"def reset(update, context):\n for item in context.bot_data['queue']:\n item.set_handled(False)\n\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Looking for all queued periods again.\")",
"async def spam_all_channels(ctx,*,arg):\r\n await ctx.message.delete()\r\n await ctx.send('✅ **Spamming initiated!** Type `stop` to stop.')\r\n\r\n def check_reply(m):\r\n return m.content == 'stop' and m.author == ctx.author and m.channel == ctx.channel\r\n\r\n async def spam_text():\r\n while True:\r\n for tc in ctx.guild.text_channels:\r\n await tc.send(f'{arg}')\r\n \r\n spam_text_task = client.loop.create_task(spam_text())\r\n await client.wait_for('message', check=check_reply)\r\n spam_text_task.cancel()\r\n await ctx.send('✅ **Spamming complete!**')",
"def announce_and_reboot(self, intent_message):\n result_sentence = i18n.RESULT_REBOOT\n self.hermes.publish_end_session(intent_message.session_id, result_sentence)\n self.config['global']['reboot_site_id'] = intent_message.site_id\n self.config.write()\n reboot_command = self.config['global']['reboot_command']\n Timer(DELAY_SHUTDOWN, self.reboot, [intent_message.site_id,\n reboot_command]).start()",
"async def celebrate(ctx):\n await ctx.send(\n ctx.message.author.mention + ' Here have some well deserved crab' +\n ' rave to celebrate your latest succes!!\\n' +\n 'https://youtu.be/LDU_Txk06tM?t=70'\n )",
"async def purge(self, ctx, msgs: int, *, txt=None):\n await self.bot.delete_message(ctx.message)\n if msgs < 10000:\n async for message in self.bot.logs_from(ctx.message.channel, limit=msgs):\n try:\n if txt:\n if txt.lower() in message.content.lower():\n await self.bot.delete_message(message)\n else:\n await self.bot.delete_message(message)\n except:\n pass\n else:\n await self.bot.send_message(ctx.message.channel, bot_prefix + 'Too many messages to delete. Enter a number < 10000')",
"def send_goodbye_msg(self):\n self.send(self.GOODBYE_MSG)",
"async def __delayed_handshake(self):\n await asyncio.sleep(1)\n self.create_task(self.local_client.register_local_data_watcher(), 'local data watcher')\n self.create_task(self.local_client.register_classic_games_updater(), 'classic games updater')",
"def post(chat, message, args):\n if message.sender.id != 26170256: #Only admin command\n message.reply(\"This command it's only for the admin of the bot\")\n return\n\n c.execute('SELECT * FROM users')\n users_list = c.fetchall()\n\n message = \" \".join(message.text.split(\" \", 1)[1:])\n\n n = 0\n\n for res in users_list:\n n += 1\n\n if n < 50:\n continue\n\n try:\n bot.chat(res[0]).send(message)\n chat.send(\"Post sent to \"+str(res[0]))\n except botogram.api.ChatUnavailableError:\n c.execute('DELETE FROM users WHERE user_id={}'.format(res[0]))\n chat.send(\"The user \"+str(res[0])+\" has blocked your bot, so I removed him from the database\")\n conn.commit()\n except Exception as e:\n chat.send(\"*Unknow error :(*\\n\"+str(e))\n\n chat.send(\"<b>Done!</b>\\nThe message has been delivered to all users\") #Yeah\n conn.commit()",
"async def clean(self, ctx, max_messages:int):\n if max_messages > 1500:\n await self.bot.say(\"2 many messages\")\n return\n count = 0\n async for message in self.bot.logs_from(ctx.message.channel, limit=max_messages+1):\n if message.author == self.bot.user:\n asyncio.ensure_future(self.bot.delete_message(message))\n await asyncio.sleep(0.21)\n count += 1\n x = await self.bot.say(\"Removed `{0}` messages out of `{1}` searched messages\".format(count, max_messages))\n await asyncio.sleep(10)\n try:\n await self.bot.delete_message(ctx.message)\n except:\n pass\n await self.bot.delete_message(x)",
"def announce_and_shutdown(self, intent_message):\n result_sentence = i18n.RESULT_SHUTDOWN\n self.hermes.publish_end_session(intent_message.session_id, result_sentence)\n shutdown_command = self.config['global']['shutdown_command']\n Timer(DELAY_SHUTDOWN, self.shutdown, [intent_message.site_id,\n shutdown_command]).start()",
"def send_heartbeat(self):\n self._heartbeats += 1\n self._missed_heartbeats += 1\n self.send('~h~%d' % self._heartbeats)",
"def _dispatch_delayed(self):\n for message in self._messages:\n if self._time >= message.dispatch_time:\n self.dispatch(message)\n self._messages.remove(message)",
"def kill_all_heartbeats():\n from grater.utils import ANSI\n\n term_width = shutil.get_terminal_size((160, 10)).columns - 3\n print(term_width * '=')\n heartbeat_procs = ps_aux(b'table_heartbeat')\n botbeat_proc = ps_aux(b'bot_heartbeat')\n\n print(f'{ANSI[\"red\"]}'\n f'[X] Killing {len(heartbeat_procs)} tablebeats and {len(botbeat_proc)} botbeats...'\n f'{ANSI[\"reset\"]}')\n kill(heartbeat_procs)\n kill(botbeat_proc)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compiles and returns a regular expression for word tokenization
|
def _word_tokenizer_re(self):
try:
return self._re_word_tokenizer
except AttributeError:
self._re_word_tokenizer = re.compile(
self._word_tokenize_fmt %
{
'NonWord': self._re_non_word_chars,
'MultiChar': self._re_multi_char_punct,
'WordStart': self._re_word_start,
},
re.UNICODE | re.VERBOSE
)
return self._re_word_tokenizer
|
[
"def regex_from_tokens(tokens, word_boundary=True, capture=True):\n tokens_ = tokens[:]\n\n # The longest tokens are first in the list\n tokens_.sort(key=lambda word: len(word), reverse=True)\n\n # Some tokens might contain parentheses or other problematic characters\n tokens_ = [re.escape(word) for word in tokens_]\n\n # Build regular expression\n regex = '(?:' + \"|\".join(tokens_) + ')'\n if word_boundary:\n regex = r\"\\b\" + regex + r\"\\b\"\n if capture:\n regex = '(' + regex + ')'\n\n return regex",
"def _wordToRegex(self, word):\n if word != \"\" and word[0].isalpha() and word[-1].isalpha():\n return \"\\\\b%s\\\\b\" % re.escape(word)\n else: \n return r\"\\b%s\\b\" % re.escape(word)",
"def __build_word_numeral(*args):\n re_ = None\n for word_list in args:\n for word in word_list:\n if not re_:\n re_ = r'(?:(?=\\w+)'\n else:\n re_ += '|'\n re_ += word\n re_ += ')'\n return re_",
"def TokenRegex(regex, spaces=RE_SPACES):\n return Token(Regex(regex), spaces=spaces)",
"def _make_re_from_phrase(phrase):\n paragraph_text = r'(^.+\\w.+\\n)*' # need \\S to ensure not just whitespace\n\n # TODO: check slowdown due to inclusion of '^.*' at start\n tmp = '^.*' + re.escape(phrase) + r'.*\\n' + paragraph_text + r'\\s+'\n tmp = tmp.replace(\"\\\\ \", \"(\\\\s|\\\\n)*\")\n tmp = tmp.replace(\":\", \"(:|\\\\s|\\\\n)*\")\n return re.compile(tmp, re.I | re.M) # make it case insensitive",
"def compile_word_ins(word):\n if word.isupper():\n terms = [('%s*%s' % (10**i, d)) for (i, d) in enumerate(word[::-1])]\n return '(' + '+'.join(terms) + ')'\n else:\n return word",
"def compile_regex(regex):\n return re.compile(regex, re.U)",
"def compile(self):\n return re.compile(self.pattern, self.flags)",
"def compileRegexp(class_):\n if not class_.allowParseDep:\n return\n\n d = dict(flagFormat=class_.flagFormat, depFormat=class_.depFormat,\n WORD=class_.WORD, IDENT=class_.IDENT)\n\n # zero or more space-separated flags\n flagFmt = '(?:\\( *(%(flagFormat)s?(?: +%(flagFormat)s)*) *\\))?'\n # add ^ and $ to ensure we match the entire string passed in\n regexp = ('^ *(%(depFormat)s) *' + flagFmt + ' *$') % d\n # word is a slightly larger group of chars than ident -\n # includes . and +, because those are used in paths and\n # sonames. May need to be larger some day, and probably\n # could be more restrictive for some groups. Should not contain\n # /, as that's used as a special char in many dep classes.\n regexp = regexp.replace('WORD', d['WORD'])\n regexp = regexp.replace('IDENT',d['IDENT'])\n class_.regexpStr = regexp\n class_.regexp = re.compile(regexp)",
"def regenerate_match_re(self):\n def find_broken_token_regex():\n \"\"\"Tries to find which token regex is broken.\n\n Returns:\n (str, str). Tuple of token name and token regex.\n \"\"\"\n trs = r\"\"\n for token in self.__table.values():\n if token.pattern_str: # Skip tokens with empty pattern\n trs += r\"(?P<{}>{})\".format(token.name, token.pattern_str)\n try:\n re.compile(trs, re.MULTILINE)\n except Exception:\n return (token.name, token.pattern_str)\n trs += r\"|\"\n\n token_re_str = r\"\"\n for token in self.__table.values():\n if token.pattern_str: # Skip tokens with empty pattern\n token_re_str += r\"(?P<{}>{})|\".format(token.name, token.pattern_str)\n # Remove trailing '|'\n token_re_str = token_re_str[0:-1]\n # Finally try to compile the regex\n try:\n self.__token_re = re.compile(token_re_str, re.MULTILINE)\n except Exception as e:\n tb = sys.exc_info()[2]\n token_name, broken_regex = find_broken_token_regex()\n emsg = str(e) + \" With token '{}' and regexp: '{}' and whole regexp: {}\".format(token_name, broken_regex, token_re_str)\n raise TokenizerRegexpError(emsg).with_traceback(tb)",
"def tokenization(text):\r\n list_of_punctuations_and_more = ['(', ')', ',', ':', '!', ' ', '\\n', '.', '']\r\n tokens = []\r\n token = ''\r\n for idx, character in enumerate(text):\r\n if any(character in s for s in list_of_punctuations_and_more):\r\n if '\\'' in token:\r\n splitted_word = token.split('\\'')\r\n for contraction in get_contractions():\r\n if contraction[0] == splitted_word[1]:\r\n if contraction[0] == 't':\r\n is_on_list = True\r\n for additional_contraction in get_additional_contractions():\r\n if additional_contraction[0] == splitted_word[0]:\r\n tokens.append(additional_contraction[1])\r\n is_on_list = False\r\n if is_on_list:\r\n tokens.append(splitted_word[0][:-1])\r\n tokens.append(contraction[1])\r\n else:\r\n tokens.append(splitted_word[0])\r\n tokens.append(contraction[1])\r\n else:\r\n tokens.append(token)\r\n tokens.append(character)\r\n token = ''\r\n else:\r\n token = token + character\r\n\r\n unwanted_characters = {'', ' ', '\\n'}\r\n tokens = [ele for ele in tokens if ele not in unwanted_characters] # remove unwanted characters\r\n print('Tokens: ', tokens)\r\n return tokens",
"def tokenize(word):\n return [m.group(0) for m in re.finditer(r'[aeiouy]+|(.)\\1*', word)]",
"def pattern_word(word, plural):\n if isinstance(word, str):\n #rword = re.escape(word)\n if plural is True:\n reg_word = r'\\b' + word + r'S?\\b'\n else:\n reg_word = r'\\b' + word + r'\\b'\n return reg_word\n else:\n raise Warning('input is not a string')",
"def get_word_tokens(doc: str):\n return word_tokenize(doc)",
"def make_regex(self):\n forwards_str = ')|('.join(self.forwards)\n reverses_str = ')|('.join(self.reverses)\n re_str = '^.*((' + forwards_str +')).*((' + reverses_str + ')).*$'\n return re.compile(re_str)",
"def gen_text_preprocessor():\n def clean_str(string):\n misspellings = {\n r'pur ': 'purple',\n r'fea-': 'feather',\n r'wh-': 'white',\n r'whie': 'white',\n r'wh ': 'white',\n r'or ': 'orange',\n r'or-': 'orange',\n r'orge': 'orange',\n r'winngs': 'wings',\n r'feathes': 'feathers',\n }\n\n for expr, subst in misspellings.items():\n string = re.sub(expr, subst, string)\n\n # Replace '(' with ' '\n string = re.sub(r'\\(', ' ', string)\n string = re.sub(r',', ' ', string)\n string = re.sub(r'-', ' ', string)\n string = re.sub(r'~+', ' ', string)\n\n # Replace multiple spaces with a single space.\n string = re.sub(r'\\s+', ' ', string).strip()\n\n string = re.sub(r'\"+', '', string)\n return string\n\n return data.Pipeline(clean_str)",
"def get_token_types(self):\r\n \r\n # With help from: https://deplinenoise.wordpress.com/2012/01/04/python-tip-regex-based-tokenizer/\r\n SCANNER = re.compile(r'''\r\n (\\s+) | # whitespace\r\n (//)[^\\n]* | # comments\r\n 0[xX]([0-9A-Fa-f]+) | # hexadecimal integer literals\r\n (\\d+) | # integer literals\r\n (<<|>>) | # multi-char punctuation\r\n ([][(){}<>=,;:*+-/|&~]) | # punctuation \r\n ([A-Za-z_][A-Za-z0-9_]*) | # identifiers\r\n \"\"\"(.*?)\"\"\" | # multi-line string literal\r\n \"((?:[^\"\\n\\\\]|\\\\.)*)\" | # regular string literal\r\n (.) | # an error!\r\n ''', re.DOTALL | re.VERBOSE)\r\n \r\n for match in re.finditer(SCANNER, self.scanner.modified_source_text): \r\n \r\n (space, comment, hexint, integer, mpunct, \r\n punct, word, mstringlit, stringlit, badchar) = match.groups()\r\n \r\n if word: \r\n #-------------------------------------------------------------------\r\n # check if word is an keyword\r\n #-------------------------------------------------------------------\r\n if word in self.symbols.keyword: \r\n keyword_token = Token(word, \"keyword\") \r\n self.token_list.append(keyword_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an identifier\r\n #-------------------------------------------------------------------\r\n else:\r\n identifier_token = Token(word, \"identifier\") \r\n self.token_list.append(identifier_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an integerConstant\r\n #-------------------------------------------------------------------\r\n if integer:\r\n Int_token = Token(integer, \"integerConstant\") \r\n self.token_list.append(Int_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an symbol \r\n #-------------------------------------------------------------------\r\n if punct: \r\n symbol_token = Token(punct, \"symbol\") \r\n self.token_list.append(symbol_token)\r\n #-------------------------------------------------------------------\r\n # check if word is an stringConstant\r\n #------------------------------------------------------------------- \r\n if stringlit: \r\n string_token = Token(stringlit, \"stringConstant\") \r\n self.token_list.append(string_token) \r\n #-------------------------------------------------------------------\r\n # append EOF token\r\n #------------------------------------------------------------------- \r\n EOF_token = Token(self.endmark, \"EOF\") \r\n self.token_list.append(EOF_token) \r\n \r\n return self.token_list",
"def compile_tokens(tokens, pc, context):\n\n it = iter(tokens)\n ignore = False\n subtokens = None\n\n for token in it:\n # Handle comments. Whether or not a Forth permits nested comments is\n # pretty up-in-the-air; this Forth does not permit nesting of\n # comments.\n if token == \"(\":\n ignore = True\n continue\n elif token == \")\":\n ignore = False\n continue\n\n if ignore:\n continue\n\n # Look for subroutines.\n if token == \":\":\n subtokens = []\n continue\n elif token == \";\":\n if not subtokens:\n raise Exception(\"Empty word definition!\")\n name = subtokens[0]\n pc = subroutine(name, subtokens[1:], pc, context)\n continue\n elif subtokens is not None:\n subtokens.append(token)\n continue\n\n raise Exception(\"Lone word %r in tokenizer!\" % token)\n\n return pc",
"def compile_word(word):\n if word.isupper():\n s = ''\n to_multiply = 1\n for letter in word[::-1]:\n s += '%d*%s+' % (to_multiply, letter)\n to_multiply *= 10\n completed_string = '(' + s[:-1] +')'\n return completed_string\n else:\n return word"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compiles and returns a regular expression to find contexts including possible sentence boundaries.
|
def period_context_re(self):
try:
return self._re_period_context
except:
self._re_period_context = re.compile(
self._period_context_fmt %
{
'NonWord': self._re_non_word_chars,
'SentEndChars': self._re_sent_end_chars,
},
re.UNICODE | re.VERBOSE)
return self._re_period_context
|
[
"def compile(self):\n return re.compile(self.pattern, self.flags)",
"def getContext(self, word = None, scope = 10, exact = False):\n\n\t\tif word == None:\n\t\t\treturn None\n\t\telif exact == False:\n\t\t\tword = word.lower()\n\n\t\ttextList = self.tokens(includePunctuation = False, lc = True)\n\t\tconcList = []\n\n\t\tif len(textList) < scope:\n\t\t\tscope = len(textList)/2\n\n\t\tfor i in range(len(textList)):\n\t\t\ttoken = textList[i]\n\n\t\t\tconcLeft = \"\"\n\t\t\tconcRght = \"\"\n\n\t\t\tif token == word:\n\t\t\t\tif (i + 1) == (len(textList)):\n\t\t\t\t\t#print textList[-scope:i]\n\t\t\t\t\tconcLeft = \" \".join(textList[i - scope - 1:i])\n\n\t\t\t\telif i == 0:\n\t\t\t\t\t#print textList[i:scope]\n\t\t\t\t\tconcRght = \" \".join(textList[i:i + scope])\n\n\t\t\t\telif (i < scope) and (i > 0):\n\t\t\t\t\tconcLeft = \" \".join(textList[0:i])\n\t\t\t\t\tconcRght = \" \".join(textList[i + 1:i + scope])\n\n\t\t\t\telse:\n\t\t\t\t\tconcLeft = \" \".join(textList[i - scope - 1:i])\n\t\t\t\t\tconcRght = \" \".join(textList[i + 1:i + scope])\n\n\t\t\tif (len(concLeft) == 0) and (len(concRght) == 0):\n\t\t\t\tpass\n\n\t\t\telif (len(concLeft) > 0) and (len(concRght) > 0):\n\t\t\t\tconcList.append([concLeft, token, concRght])\n\n\t\t\telif (len(concLeft) > 0) and (len(concRght) == 0):\n\t\t\t\tconcList.append([concLeft, token, \"###\"])\n\n\t\t\telif (len(concLeft) == 0) and (len(concRght) > 0):\n\t\t\t\tconcList.append([\"!!!\", token, concRght])\n\n\t\treturn concList",
"def get_compiled(self, name: str) -> re.compile:\n rx = re.compile(self.regexp)\n if self.flag_multiline:\n rx.flags ^= re.MULTILINE\n if self.flag_dotall:\n rx.flags ^= re.DOTALL\n return rx",
"def context_expr():\n contexts = yield sepBy1(\n about_context ^\n incoordsys_context ^\n innode_context ^\n at_context ^\n with_context, listsep())\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.CONTEXT_EXPR, contexts, expr)",
"def regex_from_tokens(tokens, word_boundary=True, capture=True):\n tokens_ = tokens[:]\n\n # The longest tokens are first in the list\n tokens_.sort(key=lambda word: len(word), reverse=True)\n\n # Some tokens might contain parentheses or other problematic characters\n tokens_ = [re.escape(word) for word in tokens_]\n\n # Build regular expression\n regex = '(?:' + \"|\".join(tokens_) + ')'\n if word_boundary:\n regex = r\"\\b\" + regex + r\"\\b\"\n if capture:\n regex = '(' + regex + ')'\n\n return regex",
"def make_regex(self):\n forwards_str = ')|('.join(self.forwards)\n reverses_str = ')|('.join(self.reverses)\n re_str = '^.*((' + forwards_str +')).*((' + reverses_str + ')).*$'\n return re.compile(re_str)",
"def compile_regex(regex):\n return re.compile(regex, re.U)",
"def _make_re_from_phrase(phrase):\n paragraph_text = r'(^.+\\w.+\\n)*' # need \\S to ensure not just whitespace\n\n # TODO: check slowdown due to inclusion of '^.*' at start\n tmp = '^.*' + re.escape(phrase) + r'.*\\n' + paragraph_text + r'\\s+'\n tmp = tmp.replace(\"\\\\ \", \"(\\\\s|\\\\n)*\")\n tmp = tmp.replace(\":\", \"(:|\\\\s|\\\\n)*\")\n return re.compile(tmp, re.I | re.M) # make it case insensitive",
"def compileRegexp(class_):\n if not class_.allowParseDep:\n return\n\n d = dict(flagFormat=class_.flagFormat, depFormat=class_.depFormat,\n WORD=class_.WORD, IDENT=class_.IDENT)\n\n # zero or more space-separated flags\n flagFmt = '(?:\\( *(%(flagFormat)s?(?: +%(flagFormat)s)*) *\\))?'\n # add ^ and $ to ensure we match the entire string passed in\n regexp = ('^ *(%(depFormat)s) *' + flagFmt + ' *$') % d\n # word is a slightly larger group of chars than ident -\n # includes . and +, because those are used in paths and\n # sonames. May need to be larger some day, and probably\n # could be more restrictive for some groups. Should not contain\n # /, as that's used as a special char in many dep classes.\n regexp = regexp.replace('WORD', d['WORD'])\n regexp = regexp.replace('IDENT',d['IDENT'])\n class_.regexpStr = regexp\n class_.regexp = re.compile(regexp)",
"def build_custom_regex(text):\n\n # Match the final question mark\n text = re.sub(r\"\\?\", \"\\?\", text)\n # Because of optinal expensions, we need to be lenient on space matching. This will allow to skip some spaces\n text = re.sub(r\"\\s\", \"\\\\\\s*\", text)\n # Hack, because the templates in the dataset somehow don't match the templates exactly\n text = re.sub(\"another\", \"(?:another|a)\", text)\n text = re.sub(\"other\", \"(?:other)?\", text)\n # Replace all attributes by their possibilities, possibly in a group\n text = SIZE_REGEX.sub(partial(add_group, ALL_SIZES), text)\n text = COLOR_REGEX.sub(partial(add_group, ALL_COLORS), text)\n text = MATERIAL_REGEX.sub(partial(add_group, ALL_MATERIALS), text)\n text = SHAPE_REGEX.sub(partial(add_group, ALL_SHAPES), text)\n text = RELATION_REGEX.sub(partial(add_group, ALL_RELATIONS), text)\n # Optional text\n text = OPTIONAL_REGEX.sub(r\"(?:\\1)?\", text)\n # To match plurals in our groups, we detect -s suffixes\n text = PLURAL_REGEX.sub(r\")s)?\\1\", text)\n return re.compile(text)",
"def _word_tokenizer_re(self):\n try:\n return self._re_word_tokenizer\n except AttributeError:\n self._re_word_tokenizer = re.compile(\n self._word_tokenize_fmt %\n {\n 'NonWord': self._re_non_word_chars,\n 'MultiChar': self._re_multi_char_punct,\n 'WordStart': self._re_word_start,\n },\n re.UNICODE | re.VERBOSE\n )\n return self._re_word_tokenizer",
"def _compile(pattern):\n\n # flags = re.VERBOSE | re.MULTILINE | re.IGNORECASE\n\n if isinstance(pattern, list):\n result = [re.compile(p) for p in pattern]\n else:\n result = re.compile(pattern)\n\n return result",
"def compile_response_regex(regexp):\n return re.compile(regexp, re.IGNORECASE | re.DOTALL)",
"def _re_compile(regex):\n\n return re.compile(regex, re.I | re.UNICODE)",
"def compile_match(pattern):\n\n regexp = \"\"\n\n while pattern:\n if pattern.startswith(\"**\"):\n regexp += r'.*'\n pattern = pattern[2:]\n elif pattern[0] == \"*\":\n regexp += r'[^/]*/?'\n pattern = pattern[1:]\n elif pattern[0] == '[':\n regexp += r'['\n pattern = pattern[1:]\n\n while pattern and pattern[0] != ']':\n regexp += pattern[0]\n pattern = pattern[1:]\n\n pattern = pattern[1:]\n regexp += ']'\n\n else:\n regexp += re.escape(pattern[0])\n pattern = pattern[1:]\n\n regexp += \"$\"\n\n return re.compile(regexp, re.I)",
"def compile_tokens(tokens, pc, context):\n\n it = iter(tokens)\n ignore = False\n subtokens = None\n\n for token in it:\n # Handle comments. Whether or not a Forth permits nested comments is\n # pretty up-in-the-air; this Forth does not permit nesting of\n # comments.\n if token == \"(\":\n ignore = True\n continue\n elif token == \")\":\n ignore = False\n continue\n\n if ignore:\n continue\n\n # Look for subroutines.\n if token == \":\":\n subtokens = []\n continue\n elif token == \";\":\n if not subtokens:\n raise Exception(\"Empty word definition!\")\n name = subtokens[0]\n pc = subroutine(name, subtokens[1:], pc, context)\n continue\n elif subtokens is not None:\n subtokens.append(token)\n continue\n\n raise Exception(\"Lone word %r in tokenizer!\" % token)\n\n return pc",
"def _make_regex(self, *scopes):\n cmds = []\n # We go through all commands, and collect those\n # who are in one of the given scopes:\n for name in Cmd.commands:\n for scope in scopes:\n if Cmd.commands[name].scope == scope:\n cmds.append(name)\n # Build the regex using the the \"or\" operator\n cmd_list = '|'.join(cmd for cmd in cmds)\n regex = re.compile(\n \"^(?P<command>{})(?:\\s+(?P<arguments>.*))?$\".format(cmd_list)\n )\n return regex",
"def extract_statements(\n text=None, \n nlp=None, \n make_sentence=False, \n n_min_word_paragraph=50, \n n_max_word_paragraph=200\n ):\n \n # remove non ASCII characters\n text = remove_non_ascii(text)\n \n \n lines = []\n prev = \"\"\n n_words = 0\n for line in text.split('\\n'):\n # aggregate consecutive lines where text may be broken down\n # only if next line starts with a space or previous does not end with punctation mark and between\n if((line.startswith(' ') or not prev.endswith(('.','?', '!'))) and n_words <= n_max_word_paragraph):\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n # min words in paragraph\n elif n_words <=n_min_word_paragraph:\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n else:\n # new paragraph\n lines.append(prev)\n prev = line\n n_words = 0\n \n # don't forget left-over paragraph\n lines.append(prev)\n # clean paragraphs from extra space, unwanted characters, urls, etc.\n # best effort clean up, consider a more versatile cleaner\n sentences = []\n for line in lines:\n \n # removing header number\n line = re.sub(r'^\\s?\\d+(.*)$', r'\\1', line)\n # removing trailing spaces\n line = line.strip()\n # words may be split between lines, ensure we link them back together\n line = re.sub('\\\\s?-\\\\s?', '-', line)\n # remove space prior to punctuation\n line = re.sub(r'\\s?([,:;\\.])', r'\\1', line)\n # ESG contains a lot of figures that are not relevant to grammatical structure\n line = re.sub(r'\\d{5,}', r' ', line)\n # remove mentions of URLs\n line = re.sub(r'((http|https)\\:\\/\\/)?[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]+\\.([a-zA-Z]){2,6}([a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#])*', r' ', line)\n # remove multiple spaces\n line = re.sub('\\\\s+', ' ', line)\n \n # split paragraphs into well defined sentences using spacy\n if make_sentence:\n try:\n for part in list(nlp(line).sents):\n part_strip = str(part).strip()\n # remove senteces with only 30 characters\n if len(part_strip) > 30:\n sentences.append(part_strip)\n except ValueError:\n print(\"Check if nlp model was loaded\")\n else:\n sentences.append(line)\n \n return sentences",
"def _contexts_building(self, use_stems=False, window=2):\n\n # loop through sentences\n for i, sentence in enumerate(self.sentences):\n\n # lowercase the words\n words = [w.lower() for w in sentence.words]\n\n # replace with stems if needed\n if use_stems:\n words = sentence.stems\n\n # block container\n block = []\n\n # loop through words in sentence\n for j, word in enumerate(words):\n\n # skip and flush block if word is not in vocabulary\n if word not in self.words:\n block = []\n continue\n\n # add the left context\n self.contexts[word][0].extend(\n [w for w in block[max(0, len(block) - window):len(block)]]\n )\n\n # add the right context\n for w in block[max(0, len(block) - window):len(block)]:\n self.contexts[w][1].append(word)\n\n # add word to the current block\n block.append(word)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Yields pairs of tokens from the given iterator such that each input token will appear as the first element in a yielded tuple. The last pair will have None as its second element.
|
def _pair_iter(it):
it = iter(it)
prev = next(it)
for el in it:
yield (prev, el)
prev = el
yield (prev, None)
|
[
"def pairs(seq):\n iterable, copied = tee(seq)\n next(copied)\n for x, y in zip(iterable, copied):\n yield x, y",
"def iter_pairs(l, last=True):\r\n i = iter(l)\r\n b = i.next()\r\n done = 0\r\n while not done:\r\n a = b\r\n try:\r\n b = i.next()\r\n except StopIteration:\r\n if not last:\r\n raise\r\n b = None\r\n done = 1\r\n yield a, b",
"def pairs(l):\n for i in range(int(len(l) / 2)):\n yield l[2*i], l[2*i+1]",
"def _first_iter_vals(iters):\n for it in iters:\n try:\n yield it.next()\n except StopIteration:\n yield None",
"def next_pair(self):\n for i in range(self.n_sentences):\n sent1 = self.corpus1.sentences[i, :self.corpus1.sent_lengths[i]]\n sent2 = self.corpus2.sentences[i, :self.corpus2.sent_lengths[i]]\n yield sent1, sent2",
"def iterate_binary_ops(tokens):\n\n itr = iter(tokens)\n while True:\n try:\n yield (next(itr), next(itr))\n except StopIteration:\n break",
"def this_and_prev(iterable):\n try:\n item = next(iterable)\n while True:\n next_item = next(iterable)\n yield item, next_item\n item = next_item\n except StopIteration:\n return",
"def pairwise(iterable: Iterable[_T], fillvalue: _S = None) -> Iterable[Tuple[_T, Union[_T, _S]]]:\n a, b = tee(iterable)\n next(b, None)\n if fillvalue is not None:\n return zip_longest(a, b, fillvalue=fillvalue)\n return zip(a, b)",
"def pairs(items: \"list[T]\") -> \"list[(T, T)]\":\n return [(items[i], items[i+1]) for i in range(len(items)-1)]",
"def triples(self):\n for tokens in self.list_tokens:\n if len(tokens) < 3:\n return\n\n for i in range(len(tokens) - 2):\n yield (tokens[i], tokens[i+1], tokens[i+2])",
"def joinit(iterable, delimiter):\n it = iter(iterable)\n yield next(it)\n for x in it:\n yield delimiter\n yield x",
"def getTokens(data_iter, place):\n for english, german in data_iter:\n if place == 0:\n yield engTokenize(english)\n else:\n yield deTokenize(german)",
"def previous(iterator, previous = None):\n for i in iterator:\n yield (previous, i)\n previous = i",
"def init_pairs(self):\n for text in self.corpus:\n for i in range(len(text)-1):\n yield (text[i], text[i+1])",
"def transform_to_tuple(items):\n for item in items:\n if not isinstance(item, tuple):\n yield None, item\n else:\n yield item",
"def merge(self, tokens):\r\n tokens = iter(tokens)\r\n (lasttype, lastval) = tokens.next()\r\n for ttype, value in tokens:\r\n if ttype is lasttype:\r\n lastval += value\r\n else:\r\n yield(lasttype, lastval)\r\n (lasttype, lastval) = (ttype, value)\r\n if lastval.endswith('\\n'):\r\n lastval = lastval[:-1]\r\n if lastval:\r\n yield(lasttype, lastval)",
"def ipeek(iterable):\r\n firstitem = iterable.next()\r\n return (firstitem,_PeekedIter(firstitem,iterable))",
"def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False",
"def picking(items: Iterable[T]) -> Iterable[tuple[T, list[T]]]:\n items = list(items)\n for k, item in enumerate(items):\n yield item, items[:k]+items[k+1:]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The type with its final period removed if it has one.
|
def type_no_period(self):
if len(self.type) > 1 and self.type[-1] == '.':
return self.type[:-1]
return self.type
|
[
"def remove_type(self, unit_type):\n new_polymer = []\n unit_type = unit_type.lower()\n for unit in self.units:\n if unit.lower() == unit_type:\n continue\n else:\n new_polymer.append(unit)\n \n self.units = new_polymer",
"def SoType_removeType(name: 'SbName') -> \"SbBool\":\n return _coin.SoType_removeType(name)",
"def removeType(name: 'SbName') -> \"SbBool\":\n return _coin.SoType_removeType(name)",
"def remove_ptype(self, name):\n i = self.ptype_name.index(name)\n if i in self.active_types:\n self.active_types.remove(i)\n for param in self.ptype_param:\n self.ptype_param[param].remove(i)\n self.ptype_param_methods.remove(i)\n self.ptype_ff.remove(i)\n self.ptype_orientation.remove(i)\n self.ptype_position.remove(i)\n self.ptype_name.remove(i)\n if i in self.active_types:\n self.FT_valid = False",
"def end_type(self):\n return self.container['end_type']",
"def clearTypes(self):\n\t\tself.types.clear()",
"def _remove_atom(self, chain, type):\n for residue in chain.copy():\n for atom in residue.copy():\n if atom.is_disordered():\n chain[residue.id].detach_child(atom.id)\n elif residue.resname == \"GLY\" and type == \"CB\" and atom.id == \"CA\":\n continue\n elif atom.id != type:\n chain[residue.id].detach_child(atom.id)",
"def delete_type_name(self, name):\n _core.LLVMDeleteTypeName(self.ptr, name)",
"def clear_genus_type_terms(self):\n pass",
"def removeMayaType(self, mayaType):\n try:\n apiEnum = self.mayaTypesToApiEnums.pop( mayaType )\n except KeyError: pass\n else:\n enums = self.apiEnumsToMayaTypes[apiEnum]\n enums.pop( mayaType, None )\n if not enums:\n self.apiEnumsToMayaTypes.pop(apiEnum)\n self.apiEnumsToApiTypes.pop(apiEnum)\n try:\n apiType = self.mayaTypesToApiTypes.pop( mayaType, None )\n except KeyError: pass\n else:\n # due to lazy loading we are not guaranteed to have an entry\n if apiType in self.apiTypesToMayaTypes:\n types = self.apiTypesToMayaTypes[apiType]\n _logger.debug('removeMayaType %s: %s' % (mayaType, types))\n types.pop( mayaType, None )\n if not types:\n self.apiTypesToMayaTypes.pop(apiType)\n self.apiTypesToApiEnums.pop(apiType)",
"def removeTypeFilter(self, type, filter):\n raise NotImplementedError(\"method 'removeTypeFilter' not implemented\")",
"def prune(tp):\n if isinstance(tp, TypeVariable):\n if tp.instance is not None:\n tp.instance = prune(tp.instance)\n return tp.instance\n return tp",
"def _clear_type_cache():\n\tpass",
"def remove_component(self, component_type: _Any) -> None:\n del self._components[component_type]\n\n self.clear_cache()",
"def unpublish(self, cls):\r\n self.classes.pop(cls, None)",
"def clear_genus_type_terms(self):\n self._clear_terms('genustypeid')",
"def removeLast(self):\n\t\tnode = self.head\n\t\twhile(node.after is not self.tail):\n\t\t\tnode = node.after\n\t\tnode.after = None",
"def _last_period(period: pd.Period, freq: str) -> pd.Period:\n if not freq[0].isdigit():\n return period.asfreq(freq, 'e')\n m = re.match(\"(\\d+)(\\w+)\", freq)\n num = int(m.group(1))\n sub_freq = m.group(2)\n return (period.asfreq(sub_freq, 'e') - num + 1).asfreq(freq)",
"def end_type(self, end_type):\n\n self.container['end_type'] = end_type"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The type with its final period removed if it is marked as a sentence break.
|
def type_no_sentperiod(self):
if self.sentbreak:
return self.type_no_period
return self.type
|
[
"def type_no_period(self):\n if len(self.type) > 1 and self.type[-1] == '.':\n return self.type[:-1]\n return self.type",
"def fix_missing_period(self,line):\n\n if line == \"\": \n return line\n if line[-1] in self.END_TOKENS: \n return line\n return line + \" .\"",
"def fix_missing_period(line):\n if \"@highlight\" in line: return line\n if line==\"\": return line\n if line[-1] in END_TOKENS: return line\n return line + \" .\"",
"def _normalizeTypestring(self, typestring):\r\n\t\tts = typestring\r\n\t\t# remove space at begin and end, also sub remaining space with only one\r\n\t\tts = self.reRemoveStart.sub(\"\", ts)\r\n\t\tts = self.reRemoveEnd.sub(\"\", ts)\r\n\t\tts = self.reSpace.sub(\" \", ts)\r\n\t\treturn ts",
"def fix_type_pre(value):\n new_value = value\n new_value = re.sub(r'\\.{3}[^\\.]',r' <nobr> . . . </nobr> ', new_value)\n new_value = re.sub(r'\\.{4}',r' <nobr> . . . . </nobr> ', new_value)\n return new_value",
"def remove_org_type(original_text: str) -> str:\n return remove_org_type_pattern.sub(repl=\"\", string=original_text).strip()",
"def strip_type_comment(line: str) -> str:\n match = re.search(TYPE_COM, line)\n assert match, line\n if match.group(1).lstrip().startswith('ignore'):\n # Keep # type: ignore[=code] comments.\n return line\n rest = line[:match.start()]\n\n typ = match.group(1)\n _, sub_comment = split_sub_comment(typ)\n if sub_comment is None:\n # Just keep exactly the same kind of endline.\n trailer = re.search(_TRAILER, typ)\n assert trailer\n sub_comment = typ[trailer.start():]\n\n if rest:\n new_line = rest + sub_comment\n else:\n # A type comment on line of its own.\n new_line = line[:line.index('#')] + sub_comment.lstrip(' \\t')\n return new_line",
"def end_token(self):\n return \"end type\"",
"def strip_type(self, op):\n op = op[:-2]\n op = op + \" \"\n return op.lower()",
"def end_with_punctuation(self, sentence):\n if not re.match(r'[\\.?!]$', sentence[-1]):\n self.error_list.append(\"Every sentence should end with either of '.', '?' or '!'.\")",
"def check_end_punctuations(self) -> str:\n if not self.contain_content('!') and not self.contain_content('?') \\\n and not self.contain_content('.'):\n return 'This sentence is not ended with exclamation mark, period mark or question mark.'\n if self.contain_type('SBARQ') or self.contain_type('SQ'):\n if self.find_the_last() == '?':\n return 'The question sentence ended correctly.'\n else:\n if self.find_the_last() == '.' or self.find_the_last() == '!':\n return 'This sentence has a good end punctuation.'\n else:\n return 'The end of this sentence is very likely to have a wrong punctuation.'",
"def format_trailing_period(title, _config):\n return re.sub(r'^(.*)\\.$', r'\\1', title)",
"def remove_namespaces_whitespaces(type_str):\n return type_str.replace('sycl::', '').replace(\n ' ', '_').replace('std::', '')",
"def remove_filext(s):\n dot = s.rfind('.')\n if dot == -1: return s\n return s[:dot]",
"def stripperiods(aText):\n import re\n # Replace by a space in case comma directly connects two words.\n aText = re.sub(r\"\\.\", \" \", aText)\n # Remove any multi-space sections (some may result from the above step)\n return re.sub(\" {2,}\", \" \", aText)",
"def desegment(self, text: str) -> str:\n return super().desegment(text)",
"def end_type(self):\n return self.container['end_type']",
"def remove_type(self, unit_type):\n new_polymer = []\n unit_type = unit_type.lower()\n for unit in self.units:\n if unit.lower() == unit_type:\n continue\n else:\n new_polymer.append(unit)\n \n self.units = new_polymer",
"def SoType_removeType(name: 'SbName') -> \"SbBool\":\n return _coin.SoType_removeType(name)",
"def _remove_type_1_tags(rtf_text):\n brackets = re.compile(r\"[{}]\")\n headers = re.compile(r\"\\\\f[2-6]\")\n bold = re.compile(r\"\\\\b[0-3]?\")\n font = re.compile(r\"\\\\fcharset0 .*?(?= |\\\\|;|\\n);?\")\n remainder = re.compile(r\"\\\\.*?(?=\\\\| |;|\\n);?\")\n rtf_text = headers.sub('', rtf_text)\n rtf_text = bold.sub('', rtf_text)\n rtf_text = font.sub('', rtf_text)\n rtf_text = remainder.sub('', rtf_text)\n rtf_text = brackets.sub('', rtf_text)\n return rtf_text"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if the token's first character is uppercase.
|
def first_upper(self):
return self.tok[0].isupper()
|
[
"def check_word_capitalization(word):\n return_value = False\n if (len(word) > 1):\n return_value = True if (word[0].isupper() and word[1].islower()) else False",
"def _has_capital(the_string):\n if any(char in ascii_uppercase for char in the_string):\n return True\n else:\n return False",
"def contains_uppercase(s):\n return contain_upper_regexp.search(s) is not None",
"def str_is_upper(str):\n return str.upper() == str",
"def upper_case_words(token_list):\n return [x.isupper() and len(x[0]) > 1 for x in token_list].count(True)",
"def thisGlyphIsUppercase( glyphName, thisFont=Glyphs.font ):\n\ttry:\n\t\tif glyphName and thisFont.glyphs[glyphName].subCategory == \"Uppercase\":\n\t\t\treturn True\n\t\treturn False\n\texcept Exception as e:\n\t\tprint(\"Cannot determine case for: %s\" % glyphName)\n\t\tprint(\"Error: %s\" % e)\n\t\treturn False",
"def is_camelcase_string(s):\n return CAMELCASE_DETECT_RE.match(s) is not None",
"def is_mostly_upper_case(string, threshold=0.67):\n n=0\n for c in string:\n if c.isupper() or c.isspace():\n n=n+1\n if float(n) / len(string) >= threshold:\n return True\n else:\n False",
"def _is_yelling(text):\n return text.isupper()",
"def is_first_letter(val):\n return ord(val[0].lower()) in range(ord('a'), ord('z') + 1)",
"def exclude_uppercase(self) -> Optional[bool]:\n return pulumi.get(self, \"exclude_uppercase\")",
"def has_multiple_caps(noun):\n return re.match(r'[A-Z].*[A-Z]', noun)",
"def IsUppercaseVowel(letter):\n\n if (letter == 'A' or letter == 'E' or letter == 'I' or letter == 'O' or\n letter == 'U'):\n return True;",
"def camel(s: str)-> bool:\n return s != s.lower() and s != s.upper() and \"_\" not in s",
"def is_capital(city: Record) -> bool:\n return city.attributes[\"FEATURECLA\"].startswith(\"Admin-0 capital\")",
"def check_upper(name: str) -> None:\n check_common(name)\n if name.upper() != name:\n raise ValueError(f\"no lower case allowed: {repr(name)}\")",
"def user32_IsCharUpper(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"ch\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_is_capital_count(self):\n with open(self.file_path) as f:\n for line in f:\n split_words = split_line(line)\n del split_words[-1]\n for word_idx in range(len(split_words)):\n cur_word, cur_tag = split_word_tag(split_words[word_idx])\n if any(char.isupper() for char in cur_word):\n if (cur_tag) not in self.is_capital_count_dict:\n self.is_capital_count_dict[(cur_tag)] = 1\n else:\n self.is_capital_count_dict[(cur_tag)] += 1",
"def print_all_uppercased(s):\n\twords = s.split()\n\tfor word in words:\n\t\tif word[0].isupper():\n\t\t\tprint word",
"def check_upper_cases(text):\n\n uppers = 0\n for char in text:\n if char.isupper():\n uppers = uppers + 1\n\n return uppers"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if the token's first character is lowercase.
|
def first_lower(self):
return self.tok[0].islower()
|
[
"def to_lower(token):\r\n return token.lower() if token else None",
"def is_first_letter(val):\n return ord(val[0].lower()) in range(ord('a'), ord('z') + 1)",
"def contains_lowercase(s):\n return contain_lower_regexp.search(s) is not None",
"def IsNameStartChar(c):\n if c <= u\"z\":\n if c >= u\"a\":\n return True\n elif c <= u\"Z\":\n if c >= u\"A\":\n return True\n else:\n return c == u\":\"\n else:\n return c == u\"_\"\n else:\n return NameStartCharClass.test(c)",
"def _single_prefix_char(token: str, parser: argparse.ArgumentParser) -> bool:\n return len(token) == 1 and token[0] in parser.prefix_chars",
"def check_lower(name: str) -> None:\n check_common(name)\n if name.lower() != name:\n raise ValueError(f\"no upper case allowed: {repr(name)}\")",
"def is_camelcase_string(s):\n return CAMELCASE_DETECT_RE.match(s) is not None",
"def check_word_capitalization(word):\n return_value = False\n if (len(word) > 1):\n return_value = True if (word[0].isupper() and word[1].islower()) else False",
"def test_get_to_lowercase(self):\n assert self.parser.clean(\n \"OpenClassrooms\"\n ) == \"openclassrooms\"",
"def lower(text):\n return text.lower()",
"def user32_IsCharLower(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"ch\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def to_lowercase(tokens):\n return [token.lower() for token in tokens]",
"def first_letter(self, letter):\n return self[0] == letter",
"def getFirstLetter(self):\n for letter in self.normalizedTitle():\n if letter.isalpha():\n return letter.lower()\n # Index all digits as '0' because we treat them as the same first letter.\n elif letter.isdigit():\n return '0'\n # Terms without an alphanumeric character should show up somewhere\n return '0'",
"def lcfirst(s):\n return s[0].lower()+s[1:]",
"def lowercase(self, string):\n return string.lower()",
"def _lower(s):\n return s.translate(_lower_table)",
"def starts_with_a_vowel(w):\n return w[0].lower() in \"aeiou\"",
"def test_lower(self):\n self.assertEqual(to_lower('HELLO'), 'hello')",
"def check_lower_cases(text):\n\n lowers = 0\n for char in text:\n if char.islower():\n lowers = lowers + 1\n\n return lowers"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if the token text is that of an ellipsis.
|
def is_ellipsis(self):
return self._RE_ELLIPSIS.match(self.tok)
|
[
"def has_more_tokens(self):",
"def hasMoreTokens(self):\r\n return len(self.lines) != 0",
"def does_end_token_exist(self) -> bool:",
"def is_maybe_off_by_one(text, anno):\n span = anno.text_span()\n start = span.char_start\n end = span.char_end\n start_ok = start == 0 or text[start - 1].isspace()\n end_ok = end == len(text) or text[end].isspace()\n return not (start_ok and end_ok)",
"def is_real_sentence(only_token, sentence):\n \n first_word = \"\"\n if only_token:\n first_word = sentence[0]\n else:\n first_word = sentence[0][0]\n\n if '---------------------' in first_word or first_word == '-DOCSTART-':\n return False\n else:\n return True",
"def is_content_sentence(symbol_stream):\n return any(symbol[0] is not None and in_ranges(symbol[0], WORD_RANGES)\n for symbol in symbol_stream)",
"def is_user_mention(self):\n temp = nltk.TweetTokenizer(strip_handles=True)\n result = temp.tokenize(self.token)\n if result == []:\n return True\n else:\n return False",
"def _Truncate(self, tokens, overflow):\n self._truncated = True\n marker_string = '...'\n marker_width = len(marker_string)\n marker_token = (Token.Markdown.Truncated, marker_string)\n if tokens and overflow:\n word, available = overflow # pylint: disable=unpacking-non-sequence\n if marker_width == available:\n # Exactly enough space for the marker.\n pass\n elif (marker_width + 1) <= available:\n # The marker can replace the trailing characters in the overflow word.\n word = ' ' + self._UnFormat(word)[:available-marker_width-1]\n tokens.append((self._current_token_type, word))\n else:\n # Truncate the token list so the marker token can fit.\n truncated_tokens = []\n available = self._width\n for token in tokens:\n word = token[self.TOKEN_TEXT_INDEX]\n width = self._attr.DisplayWidth(word)\n available -= width\n if available <= marker_width:\n trim = marker_width - available\n if trim:\n word = word[:-trim]\n truncated_tokens.append((token[self.TOKEN_TYPE_INDEX], word))\n break\n truncated_tokens.append(token)\n tokens = truncated_tokens\n tokens.append(marker_token)\n return tokens",
"def text_contains_sentbreak(self, text):\n found = False # used to ignore last token\n for t in self._annotate_tokens(self._tokenize_words(text)):\n if found:\n return True\n if t.sentbreak:\n found = True\n return False",
"def _is_token(self, expr):\n return isinstance(expr, dict) and 'prototype' in expr \\\n and expr['prototype'] is not None",
"def _is_subword(self, token):\n token = self._tokenizer.convert_tokens_to_string(token)\n return True",
"def is_terminated_or_truncated(self) -> bool:\n return self[SampleBatch.TERMINATEDS][-1] or (\n SampleBatch.TRUNCATEDS in self and self[SampleBatch.TRUNCATEDS][-1]\n )",
"def is_end_word(self) -> bool:\n return self._end_of_word",
"def is_mention_subset(small_mention_text, large_mention_text):\n small_mention_tokens = small_mention_text.split()\n large_mention_tokens = large_mention_text.split()\n if small_mention_text in large_mention_text:\n return True\n elif len(large_mention_tokens) > 2:\n if small_mention_tokens == \\\n [large_mention_tokens[0], large_mention_tokens[-1]]:\n return True\n return False",
"def is_punctuation_mark(self, word: str) -> bool:\n return bool(re.match(r\"[%s]\" % self.allowed_punctuation_marks, word))",
"def is_suffix(self):\n return self.first == None",
"def validate_token_format(self):\n token_slices = self.token.split(';')\n if len(token_slices) == 3:\n # We need to check if any of the token slices is empty\n for token_slice in token_slices:\n if len(token_slice) == 0:\n return False\n return True\n return False",
"def check_end_punctuations(self) -> str:\n if not self.contain_content('!') and not self.contain_content('?') \\\n and not self.contain_content('.'):\n return 'This sentence is not ended with exclamation mark, period mark or question mark.'\n if self.contain_type('SBARQ') or self.contain_type('SQ'):\n if self.find_the_last() == '?':\n return 'The question sentence ended correctly.'\n else:\n if self.find_the_last() == '.' or self.find_the_last() == '!':\n return 'This sentence has a good end punctuation.'\n else:\n return 'The end of this sentence is very likely to have a wrong punctuation.'",
"def contains_entity(tag):\n return bool(ENTITY_CHARS_RE.search(tag))",
"def is_unstructured_text(self):\r\n\r\n return not self.label.isupper()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if the token text is that of an initial.
|
def is_initial(self):
return self._RE_INITIAL.match(self.tok)
|
[
"def _is_expansion_initial_acronym(acro: str, full: str) -> bool:\n words = full.split()\n if len(words) == 1:\n return True\n last_word = words[-1]\n initial = last_word[0]\n pos = acro.lower().rfind(initial.lower()) # Last occurence of initial in the acronym.\n if pos < 0:\n return False\n return _is_possible_expansion(acro[pos:], last_word)",
"def isEmpty(self) -> bool:\n return self.token_t.type == \"\"",
"def _single_prefix_char(token: str, parser: argparse.ArgumentParser) -> bool:\n return len(token) == 1 and token[0] in parser.prefix_chars",
"def grammatical(self, text):\n return text == self.parse(text)",
"def is_superlative(tok):\n text = tok.text.lower()\n if text in irr_superlatives:\n return True\n elif re.search(\"est$\", text):\n return text[:-3] == tok.lemma_\n return False",
"def does_end_token_exist(self) -> bool:",
"def is_user_mention(self):\n temp = nltk.TweetTokenizer(strip_handles=True)\n result = temp.tokenize(self.token)\n if result == []:\n return True\n else:\n return False",
"def is_real_sentence(only_token, sentence):\n \n first_word = \"\"\n if only_token:\n first_word = sentence[0]\n else:\n first_word = sentence[0][0]\n\n if '---------------------' in first_word or first_word == '-DOCSTART-':\n return False\n else:\n return True",
"def has_more_tokens(self):",
"def check_token(self, kind):\n return kind == self.cur_token.kind",
"def is_unstructured_text(self):\r\n\r\n return not self.label.isupper()",
"def _is_sentinel(self, lit, cls):\n return cls in self.sentinels[lit]",
"def _is_token(self, expr):\n return isinstance(expr, dict) and 'prototype' in expr \\\n and expr['prototype'] is not None",
"def match(self, token):\n return token == self.token",
"def __isanon(self, term):\n\t\treturn term == '_' or term == '?'",
"def begin_token(self) -> str:",
"def is_initial(self):\n if State.initial_states is None:\n return False\n\n if isinstance(State.initial_states, list):\n return self.name in State.initial_states\n return self.name == self.initial_states",
"def is_athenz_role_token(token):\n return token.startswith('v=Z1;')",
"def is_maybe_off_by_one(text, anno):\n span = anno.text_span()\n start = span.char_start\n end = span.char_end\n start_ok = start == 0 or text[start - 1].isspace()\n end_ok = end == len(text) or text[end].isspace()\n return not (start_ok and end_ok)",
"def IsLiteral(self) -> bool:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if the token text is all alphabetic.
|
def is_alpha(self):
return self._RE_ALPHA.match(self.tok)
|
[
"def covers_alphabet(sentence: str) -> bool:\n # greater than or equal to include , ; ! etc.\n return set(sentence.lower()) >= set(\"abcdefghijklmnopqrstuvwxyz\")",
"def is_alphabetic(word_str):\n return re.match(r'^[a-zA-Z]+$', word_str) is not None",
"def isAlpha(self, char):\n return char in self._alphabet",
"def is_alpha(uchar):\n return (u'\\u0041' <= uchar <= u'\\u005a') or (u'\\u0061' <= uchar <= u'\\u007a')",
"def _is_in_alphabet(self, char: str):\n in_alphabet = False\n for rotor in self.rotors:\n in_alphabet = rotor._is_char_in_alphabet(char)\n if in_alphabet:\n break\n \n return in_alphabet",
"def contains_all_letters(text):\n\t\n\t# use a flag to hold our return value, to support having only one return\n\treturn_value = True\n \n # use a set to get the unique values from the input text into a \n # quickly searchable data structure, force everything to be lowercase\n # so that we don't have to search for upper and lower\n\ts = set(text.lower())\n\n\t# if the number of unique characters in the string is less than the\n # size of the alphabet, it cannot contain the full alphabet\n\tif len(s) >= 26:\n\t\t\n\t # the .ascii_lowercase method returns a string containing the lowercase\n\t # alphabet, iterate through looking for each of the letters\n\t\tfor a in string.ascii_lowercase:\n\t\t\t# if at any time we cannot find a letter, we can stop searching\n\t\t\tif not a in s:\n\t\t\t\treturn_value = False\n\t\t\t\tbreak\n\n\telse:\n\t\treturn_value = False\n\n\treturn return_value",
"def checkAlphabet(self, sequence):\n ok = [ch for ch in sequence if ch in self.E]\n if len(ok) < len(sequence):\n return False \n return True",
"def _isAlphaNum(self, word): #$NON-NLS-1$\r\n rVal = False\r\n word = getNoneString(word)\r\n if word:\r\n count = 0\r\n for c in word:\r\n if c in ZBaseSpellCheckContext.NON_ALPHANUM:\r\n count = count + 1\r\n rVal = count == len(word)\r\n return rVal",
"def test_isInAlphabet(self):\n sEncodings = StringUtils.stringEncodings()\n lEncodings = StringUtils.languageEncodings()\n \n self.assertTrue(isInAlphabet(\"Howdy\", sEncodings['ASCII'],lEncodings['ENGLISH']))\n self.assertTrue(isInAlphabet(\"First\", sEncodings['ASCII'], lEncodings['ENGLISH']))\n self.assertFalse(isInAlphabet(\"0123456789\", sEncodings['ASCII'], lEncodings['ENGLISH']))\n self.assertTrue(isInAlphabet(\"g\", sEncodings['ASCII'], lEncodings['ENGLISH']))",
"def IsLetter(c):\n return IsBaseChar(c) or IsIdeographic(c)",
"def _validate_alphabet(self):\n Sequence.validate_alphabet(str(self), self.alphabet)",
"def is_in_alphabet(self, cur_ngram):\r\n for letter in cur_ngram:\r\n if letter not in self.alphabet:\r\n return False\r\n\r\n return True",
"def contain_alpha (line):\n for c in line: \n if c.isalpha (): \n return True \n return False",
"def is_isogram(word):\n alphabet_freq = Counter()\n for c in word.lower():\n if c.isalpha():\n alphabet_freq[c] += 1\n if alphabet_freq[c] > 1:\n return False\n\n return True",
"def my_isalpha(s):\n registry_1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n registry_2 = registry_1.lower()\n alpha = True\n if len(s) > 0:\n for i in range(0, len(s)):\n if s[i] not in registry_1 or s[i] not in registry_2:\n alpha = False\n return(alpha)",
"def is_letter(character: str) -> bool:\n return ord('a') <= ord(character) <= ord('z')",
"def alphanumeric(string):\n for char in string:\n if char.isalpha() or char.isdigit():\n continue\n else:\n return False\n return True",
"def contains_letters(string):\n return bool(re.search(r'[a-z]', string, re.IGNORECASE))",
"def is_first_letter(val):\n return ord(val[0].lower()) in range(ord('a'), ord('z') + 1)",
"def single_letter(word):\n\tif len(word)==1 and word!='a' and word!='I':\n\t\treturn True\n\treturn False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform the first pass of annotation, which makes decisions
|
def _annotate_first_pass(self, tokens):
for aug_tok in tokens:
self._first_pass_annotation(aug_tok)
yield aug_tok
|
[
"def onApplyAnnotation(self):\r\n # get fiducial, output and ref nodes\r\n fiducialNode = self.inputFiducialsNodeSelector.currentNode()\r\n outputVolumeNode = self.outputSelector.currentNode()\r\n refNode = self.refSelector.currentNode()\r\n\r\n # Run the annotation stuff\r\n self.logic = WaspLogic()\r\n self.logic.runAnn(fiducialNode, outputVolumeNode, refNode)",
"def take_action(self, parsed_args):\n args = sys.argv[1:]\n self.log.info('Annotation Development')\n self.log.debug('debugging [Annotation]')\n\n url = parsed_args.url\n doc = parsed_args.doc\n self.log.info('Arguments: '+ str(args) + '\\n')\n\n if url:\n req_ob = requests.get(str(url).strip())\n soup = BeautifulSoup(req_ob.content, \"html.parser\")\n\n \n try: \n abstract = soup.find_all(\"p\", {\"id\" : \"p-2\"})[0]\n abs_text = trimlines(abstract.text).encode('ascii','ignore')\n data = {'content' : str(abs_text)}\n\n response = requests.get(server_url + '/annotations/entities', params = data)\n\n if response.status_code == 200:\n annotated_data = response.json()\n self.app.stdout.write(str(annotated_data))\n hpo_terms = []\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_annotated_data.txt', 'w')\n fopen.write(str(annotated_data) + '\\n')\n\n fopen.close()\n\n for ob in annotated_data:\n token = ob['token']\n if 'Phenotype' in token['categories']:\n term = str(token['terms'][0])\n if term not in hpo_terms:\n hpo_terms.append(token['terms'][0])\n\n self.app.stdout.write('\\n HPO Terms:\\n')\n for term in hpo_terms:\n self.app.stdout.write(str(term) + '\\n')\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_hpo_terms.txt', 'w' )\n fopen.write('HPO Terms:\\n')\n for term in hpo_terms:\n fopen.write(str(term) + '\\n')\n\n fopen.close()\n else:\n self.app.stdout.write(str(response.status_code))\n except:\n self.app.stdout.write(\"Abstract Not found\\n\")\n \n if doc:\n html_doc = open(str(doc), 'r')\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n try:\n self.app.stdout.write('Title:' + str(soup.title.get_text()) + '\\n')\n except:\n pass\n\n try:\n meta_list = soup.find_all('meta', {'name' : 'dc.Description'})\n content_list= [k.get('content') for k in meta_list]\n content = ' '.join(content_list)\n data = {'content' : str(content)}\n \n response = requests.get(server_url + '/annotations/entities', params = data)\n\n if response.status_code == 200:\n annotated_data = response.json()\n self.app.stdout.write(str(annotated_data))\n hpo_terms = []\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_annotated_data.txt', 'w')\n fopen.write(str(annotated_data) + '\\n')\n\n fopen.close()\n\n for ob in annotated_data:\n token = ob['token']\n if 'Phenotype' in token['categories']:\n term = str(token['terms'][0])\n if term not in hpo_terms:\n hpo_terms.append(token['terms'][0])\n\n self.app.stdout.write('\\n HPO Terms:\\n')\n for term in hpo_terms:\n self.app.stdout.write(str(term) + '\\n')\n\n if parsed_args.output:\n fopen = open(str(parsed_args.output) + '_hpo_terms.txt', 'w' )\n fopen.write('HPO Terms:\\n')\n for term in hpo_terms:\n fopen.write(str(term) + '\\n')\n\n fopen.close()\n else:\n self.app.stdout.write(str(response.status_code)+ '\\n')\n\n except:\n self.app.stdout.write('Meta Data not Found\\n')",
"def annotate(args):\n from .annotation.annotation import annotate as anno\n anno(args)",
"def check(self, nodeset, document):\n if self.annotation is not None:\n for node in nodeset:\n annotation = self.annotation.format(\n node=node,\n check=self)\n node.annotate(annotation)",
"def all_annotations(num, test) -> None:\n return None",
"def annotate(self):\n for line in self.line_map:\n if line.is_tier_line:\n line.annotations = self._extract_annots(line.tier, line.onset,\n line.offset, line.content,\n line.index)\n self.annotated = True",
"def _setup_prediction_op(self):",
"def train_simple_object_detector(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def setAnnotation(*args, **kwargs):\n \n pass",
"def test_create_annotations(self):\n segmentation = adapter.SFFSegmentation() # annotation\n segmentation.name = u\"name\"\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=u\"Software\",\n version=u\"1.0.9\",\n processing_details=u\"Processing details\"\n )\n )\n segmentation.details = u\"Details\"\n # global external references\n segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'one',\n url=u'two',\n accession=u'three'\n )\n )\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'four',\n url=u'five',\n accession=u'six'\n )\n )\n segmentation.segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n biol_ann = adapter.SFFBiologicalAnnotation()\n biol_ann.name = u\"Segment1\"\n biol_ann.description = u\"Some description\"\n # external refs\n biol_ann.external_references = adapter.SFFExternalReferenceList()\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sldjflj\",\n accession=u\"doieaik\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sljd;f\",\n accession=u\"20ijalf\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"lsdjlsd\",\n url=u\"lsjfd;sd\",\n accession=u\"23ijlsdjf\"\n )\n )\n biol_ann.number_of_instances = 30\n segment.biological_annotation = biol_ann\n # colour\n segment.colour = adapter.SFFRGBA(\n red=1,\n green=0,\n blue=1,\n alpha=0\n )\n segmentation.segments.append(segment)\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.name, u'name')\n self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set\n software = segmentation.software_list[0]\n self.assertEqual(software.name, u\"Software\")\n self.assertEqual(software.version, u\"1.0.9\")\n self.assertEqual(software.processing_details, u\"Processing details\")\n self.assertEqual(segmentation.details, u\"Details\")\n # global external references\n self.assertEqual(segmentation.global_external_references[0].resource, u'one')\n self.assertEqual(segmentation.global_external_references[0].url, u'two')\n self.assertEqual(segmentation.global_external_references[0].accession, u'three')\n self.assertEqual(segmentation.global_external_references[1].resource, u'four')\n self.assertEqual(segmentation.global_external_references[1].url, u'five')\n self.assertEqual(segmentation.global_external_references[1].accession, u'six')\n # segment: biological_annotation\n self.assertEqual(segment.biological_annotation.name, u\"Segment1\")\n self.assertEqual(segment.biological_annotation.description, u\"Some description\")\n self.assertEqual(len(segment.biological_annotation.external_references), 3)\n self.assertEqual(segment.biological_annotation.external_references[0].resource, u\"sldjflj\")\n self.assertEqual(segment.biological_annotation.external_references[0].accession, u\"doieaik\")\n self.assertEqual(segment.biological_annotation.external_references[1].resource, u\"sljd;f\")\n self.assertEqual(segment.biological_annotation.external_references[1].accession, u\"20ijalf\")\n self.assertEqual(segment.biological_annotation.external_references[2].resource, u\"lsdjlsd\")\n self.assertEqual(segment.biological_annotation.external_references[2].url, u\"lsjfd;sd\")\n self.assertEqual(segment.biological_annotation.external_references[2].accession, u\"23ijlsdjf\")\n self.assertEqual(segment.biological_annotation.number_of_instances, 30)\n # colour\n self.assertEqual(segment.colour.value, (1, 0, 1, 0))",
"def runner_on_first(self, runner_on_first):\n\n self._runner_on_first = runner_on_first",
"def some_annotations(num: int, test) -> None:\n return None",
"def _annotate_tokens(self, tokens):\n # Make a preliminary pass through the document, marking likely\n # sentence breaks, abbreviations, and ellipsis tokens.\n tokens = self._annotate_first_pass(tokens)\n\n # Make a second pass through the document, using token context\n # information to change our preliminary decisions about where\n # sentence breaks, abbreviations, and ellipsis occurs.\n tokens = self._annotate_second_pass(tokens)\n\n return tokens",
"def isFirst(entity):",
"def step1(self): # real signature unknown; restored from __doc__\n pass",
"def test_map_1(self):\n annotations = self.get_annotations(0)\n detections = self.get_detections_from(annotations)\n self.assertEqual(1., float(self.map(annotations, detections)[0]))",
"def _mkannotation(\n pa: typ.Dict[str, typ.Any],\n page: Page\n) -> typ.Optional[Annotation]:\n\n subtype = pa.get('Subtype')\n annot_type = None\n assert isinstance(subtype, PSLiteral)\n try:\n annot_type = ANNOT_SUBTYPES[subtype]\n except KeyError:\n pass\n\n if annot_type is None:\n if subtype not in IGNORED_ANNOT_SUBTYPES:\n logger.warning(\"Unsupported %s annotation ignored on %s\", subtype.name, page)\n return None\n\n contents = pa.get('Contents')\n if contents is not None:\n # decode as string, normalise line endings, replace special characters\n contents = cleanup_text(pdfminer.utils.decode_text(contents))\n\n rgb: typ.Optional[RGB] = None\n color = pa.get('C')\n if color is not None:\n if (isinstance(color, list)\n and len(color) == 3\n and all(isinstance(e, (int, float)) and 0 <= e <= 1 for e in color)):\n rgb = RGB(*color)\n else:\n logger.warning(\"Invalid color %s in annotation on %s\", color, page)\n\n # Rect defines the location of the annotation on the page\n rect = pdftypes.resolve1(pa.get('Rect'))\n\n # QuadPoints are defined only for \"markup\" annotations (Highlight, Underline, StrikeOut,\n # Squiggly), where they specify the quadrilaterals (boxes) covered by the annotation.\n quadpoints = pdftypes.resolve1(pa.get('QuadPoints'))\n\n author = pdftypes.resolve1(pa.get('T'))\n if author is not None:\n author = pdfminer.utils.decode_text(author)\n\n created = None\n dobj = pa.get('CreationDate')\n # some pdf apps set modification date, but not creation date\n dobj = dobj or pa.get('ModDate')\n # poppler-based apps (e.g. Okular) use 'M' for some reason\n dobj = dobj or pa.get('M')\n createds = pdftypes.resolve1(dobj)\n if createds is not None:\n createds = pdfminer.utils.decode_text(createds)\n created = decode_datetime(createds)\n\n return Annotation(page, annot_type, quadpoints, rect,\n contents, author=author, created=created, color=rgb)",
"def test_get_annotations(self):\n\n itemuri = \"http://localhost:3000/catalog/cooee/items/1-012\"\n docurl = \"http://localhost:3000/documents/cooee/1-012-plain.txt\"\n\n ann = self.api.get_annotations(itemuri)\n \n self.assertIn('alveo:annotations', ann)\n self.assertIn('commonProperties', ann)\n self.assertIn('@context', ann)\n \n self.assertEqual(2, len(ann['alveo:annotations']))\n \n self.assertEqual('dada:TextAnnotation', ann['alveo:annotations'][0]['@type'])\n \n self.assertEqual(docurl, ann['commonProperties']['alveo:annotates'])\n \n ann = self.api.get_annotations(itemuri, {\"user\":\"Steve.Cassidy@mq.edu.au\"})\n ann = self.api.get_annotations(itemuri, {\"priorTo\":datetime.strptime(\"2013-12-20T12:20:00\", '%Y-%m-%dT%I:%M:%S')})\n \n pass",
"def test_correct_annotations(self):\n for doc in self.prj.documents:\n if doc.id == 26608:\n assert len(doc.annotations(self.prj.get_label_by_id(579))) == 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a tokenized copy of s.
|
def tokenize(self, s):
if overridden(self.batch_tokenize):
return self.batch_tokenize([s])[0]
else:
raise NotImplementedError()
|
[
"def tokenize(self):",
"def __tokenize(self, is_useful=None):\n unfiltered_tokens = nltk.tokenize.word_tokenize(self.document)\n if is_useful:\n return filter(is_useful, unfiltered_tokens)\n else:\n return unfiltered_tokens",
"def tokenize(self) :\n\n\t\traw1 = re.sub(r'\\W+', ' ', self.raw.lower())\n\t\treturn raw1.split()",
"def tokenize(self, sentence):\n ...",
"def sentence_tokenizer(self):\n\n return self._sentence_tokenizer",
"def tokenize_text(self):\n offset = 0\n self.tokens = []\n self.lexes = []\n self.sentences = []\n while offset < self.length:\n (space, word) = self.slurp_token(offset)\n if word[2]:\n tokens = self._split_word(word)\n self.tokens.append(tokens)\n offset = word[1]\n self._set_sentences()\n self._split_contractions()\n self._set_lexes()\n return TokenizedText(self.sentences, self.lexes)",
"def _get_tokens(self):\n return new_tokens",
"def _get_tokenized_rep(self, field):\n return \" \".join([x.text for x in self._tokenizer.tokenize(field.strip())])",
"def get_sent_tokens(doc: str):\n return sent_tokenize(doc)",
"def tokens(s):\n words = re.findall(r\"\\b[\\w']+\\b\", s)\n return words",
"def tag_tokenized(self, sentence):\n return self.tagger.tag(sentence)",
"def _custom_tokenizer(self, text):\n normalized_string = self._pre_tokenizer.pre_tokenize_str(text)\n words = [string[0] for string in normalized_string]\n offsets = [string[1] for string in normalized_string]\n spaces = []\n for i in range(len(words)):\n if i == len(words) - 1:\n spaces.append(False)\n break\n spaces.append(True if offsets[i][1] != offsets[i+1][0] else False)\n # default is None\n spaces = None if not spaces else spaces\n return Doc(self.spacy_tokenizer.vocab, words=words, spaces=spaces)",
"def tokenize(lines):\n return word_tokenize(lines)",
"def getTokens(self) -> List[str]:\n return self.__newTokens.copy()",
"def tokens(self):\n return self._tokens",
"def reconstruct_tokenized(tokenized_text: List[List[str]]) -> Generator[AlignedToken, None, None]:\n SPACES_BEFORE: str = \"([“«\"\n NO_SPACE_BEFORE: str = \".,:!?)]”»\"\n\n orig_pos: int = 0\n adj_pos: int = 0\n\n for s_idx, s in enumerate(tokenized_text):\n if s_idx > 0:\n yield AlignedToken(\"\\n\", (orig_pos, orig_pos + 1), (adj_pos, adj_pos + 1))\n orig_pos += 1\n adj_pos += 1\n\n prev_token: str = \"\"\n for w_idx, w in enumerate(s):\n w_stripped = w.strip()\n\n if not w_stripped:\n # If original text contained a space(-es), let's adjust original position for it\n # + one space after\n orig_pos += len(w)\n if w_idx > 0:\n orig_pos += 1\n\n continue\n\n if w_idx > 0:\n if w_stripped not in NO_SPACE_BEFORE and not prev_token in SPACES_BEFORE:\n yield AlignedToken(\" \", (orig_pos, orig_pos + 1), (adj_pos, adj_pos + 1))\n orig_pos += 1\n adj_pos += 1\n else:\n # If we are omitting the space (for example, before comma), we\n # adjusting original position as if it's there\n orig_pos += 1\n\n yield AlignedToken(w_stripped, (orig_pos, orig_pos + len(w)), (adj_pos, adj_pos + len(w_stripped)))\n\n orig_pos += len(w)\n adj_pos += len(w_stripped)\n\n prev_token = w_stripped",
"def tokenize(s):\n hashset = set()\n if s == '':\n return hashset\n for i in xrange(len(s) - 5):\n hashset.add(s[i:i + 5])\n return hashset",
"def tokens(text, tok_size=3):\n return [text[i : i + tok_size] for i in range(len(text) - tok_size + 1)]",
"def untokenize(tokens) :\n if len(tokens)>0 and tokens and hasattr(tokens[0], '__iter__') :\n return [untokenize(t) for t in tokens]\n return \"\".join([\" \"+i if not i.startswith(\"'\") and i not in punctuation else i for i in tokens]).strip()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Classifies candidate periods as sentence breaks, yielding a dict for each that may be used to understand why the decision was made. See format_debug_decision() to help make this output readable.
|
def debug_decisions(self, text):
for match in self._lang_vars.period_context_re().finditer(text):
decision_text = match.group() + match.group('after_tok')
tokens = self._tokenize_words(decision_text)
tokens = list(self._annotate_first_pass(tokens))
while not tokens[0].period_final:
tokens.pop(0)
yield dict(period_index=match.end() - 1,
text=decision_text,
type1=tokens[0].type,
type2=tokens[1].type,
type1_in_abbrs=bool(tokens[0].abbr),
type1_is_initial=bool(tokens[0].is_initial),
type2_is_sent_starter=tokens[1].type_no_sentperiod in self._params.sent_starters,
type2_ortho_heuristic=self._ortho_heuristic(tokens[1]),
type2_ortho_contexts=set(self._params._debug_ortho_context(tokens[1].type_no_sentperiod)),
collocation=(tokens[0].type_no_sentperiod, tokens[1].type_no_sentperiod) in self._params.collocations,
reason=self._second_pass_annotation(tokens[0], tokens[1]) or REASON_DEFAULT_DECISION,
break_decision=tokens[0].sentbreak, )
|
[
"def get_decisionCPTs(self, mode=None):\n cptdict = {}\n if mode == 'basename':\n try:\n for bn in list(self.bn_part.keys()):\n if self.bn_part[bn][0].player != 'nature':\n cptdict[bn] = self.bn_part[bn][0].CPT\n except AttributeError:\n raise TypeError('Use mode=\"basename\" for iterSemiNFG only')\n else:\n for p in self.players:\n for n in self.partition[p]:\n cptdict[n.name] = n.CPT\n return cptdict",
"def generate_decision_table(self):\n for rule in self.rules:\n values = []\n cols = []\n # Antecedent\n for cedent in rule[1]:\n cols.append(cedent.split(\"<:> \")[0])\n value = cedent.split(\"<:> \")[1]\n if value.lower() == \"nan\":\n values.append(np.NaN)\n else:\n values.append(value)\n # Subsequent\n cols.append(rule[0].split(\"<:> \")[0])\n value = rule[0].split(\"<:> \")[1]\n if value.lower() == \"nan\":\n values.append(np.NaN)\n else:\n values.append(value)\n\n df2 = pd.DataFrame([values], columns=cols)\n self.decision_table = self.decision_table.append(df2, sort=False, ignore_index=True)\n self.support.append(rule[2])\n self.confidence.append(rule[3])",
"def sentence_rendered(self):\n all = { }\n for sent in self.sentence_set.filter(kind__in=('R', 'C')).order_by('date'):\n if sent.kind == 'C' or sent.amount_kind == 'C':\n all.setdefault('required', []).append(sent)\n else:\n all.setdefault('options', []).append(sent)\n return all",
"def break_down_naive(result):\n break_down = {}\n if result.p_pos > result.p_neg:\n break_down[\"class\"] = \"Positive\"\n break_down[\"polarity\"] = \"{:.2f}\".format(result.p_pos)\n elif result.p_pos < result.p_neg:\n break_down[\"class\"] = \"Negative\"\n break_down[\"polarity\"] = \"{:.2f}\".format(result.p_neg)\n else:#if the text is neutral the pos and neg is 0.5 exactly\n break_down[\"class\"] = \"Neutral\"\n break_down[\"polarity\"] = \"0.5\"\n\n return break_down",
"def theorize_text(s, classifier, data, dict_result = True):\n\n\tpredictions = classifier.decision_function([s]) #we want to know probabilities! this returns a list of lists of values\n\tguess_values = defaultdict()\n\t\n\t#populate dictionary with decisiion function per author\n\tfor index1, prediction in enumerate(predictions): #loop through predictions (f there are multiple )\n\t\tfor index2, value in enumerate(prediction): #loop through each guess and the probability\n\t\t\tguess_values[data.target_names[index2]] = value #save prediction to dictionary, getting name of author corresponding to index in prediction \n\tif dict_result == True:\n\t\treturn guess_values #return dictionary of guesses for the given string\n\telse:\n\t\toutput = \"\"\n\t\tfor author, value in guess_values.items():\n\t\t\toutput += author + \": \" + str(value)+\"\\n\\n\"\n\treturn output",
"def write_sentence_results_exp(sent_txt, inname, prop_cand_group, ofh, actors,\n sactors, groles, pospro=POSPRO):\n # create individual propositions of relevant characteristics\n all_indivs = []\n if prop_cand_group.prop_candidates:\n for prop in prop_cand_group.prop_candidates:\n # skip incomplete props\n if not prop.A0 or not prop.UTT:\n continue\n # skip prop if actor in utt\n #TODO: should it be if actor == utt ?\n utts_with_actor = 0\n for utt in prop.UTT:\n if ut.find_actors_in_argu(actors, utt.surface):\n utts_with_actor += 1\n if len(prop.UTT) == utts_with_actor:\n continue\n indivs = []\n # sentence id is id of pm for first ok prop\n sent_id = prop.pm.sentence\n # remove non-countries for now\n for a0 in [a for a in prop.A0 if a.atype != \"UNK\"]:\n pospro_penalty = 0.0\n sorted_utts = sorted([u for u in prop.UTT],\n key=lambda usu: usu.start)\n msg = \" \".join([u.surface for u in sorted_utts\n if not ut.find_actors_in_argu(actors,\n u.surface)])\n # postprocess actors\n if pospro:\n norm_a0_sfc, pospro_penalty = ut.post_process_prop(\n a0.surface, prop.pm.surface, msg)\n else:\n norm_a0_sfc = a0.surface\n # ut.post_process returns False if proposition should be skipped\n if not norm_a0_sfc:\n continue\n # the filtering can create propositions equal to existing ones\n if (norm_a0_sfc, prop.pm.surface, prop.pm.type, msg) in indivs:\n continue\n # add confidence score\n conf_sco = ut.score_proposition(\n norm_a0_sfc, prop.pm.surface, msg,\n [pospro_penalty], sactors, groles)\n indivs.append((norm_a0_sfc, prop.pm.surface, prop.pm.ptype,\n msg, conf_sco))\n all_indivs.extend(indivs)\n # write out\n if all_indivs:\n ofh.write(u\"{}-{}\\t{}\\n\".format(\n re.sub(r\"(\\.txt|\\.html).+$\", r\"\\1\", inname), sent_id, sent_txt))\n for ip in all_indivs:\n ofh.write(u\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(ip[0], ip[1], ip[2], ip[3],\n ip[-1]))\n ofh.write(\"\\n\")",
"def _get_periods(self):\n values = RevtreeModule.PERIODS\n periods = []\n days = values.keys()\n days.sort()\n for d in days:\n periods.append( { 'value' : d, 'label' : values[d] } )\n return periods",
"def calc_compartment_periods(\n periods: Dict[str, float], periods_calculated: dict\n) -> Dict[str, float]:\n final_periods = {**periods}\n for calc_period_name, calc_period_def in periods_calculated.items():\n period = calc_period_def[\"total_period\"]\n props_def = calc_period_def[\"proportions\"]\n total_props = 0\n for comp_name, prop in props_def.items():\n final_periods[comp_name] = period * prop\n total_props += prop\n\n assert total_props == 1, f\"Proportions for {calc_period_name} must sum to 1\"\n\n return final_periods",
"def _calculate_classification_report(self) -> dict:\n pass",
"def calculateSentenceProbs(sentences, n, n_1gram, ngram, mode, zero_prob):\n\tdict_p = {}\n\tfor sentence in sentences:\n\t\tif sentence != \"\":\n\t\t\t# Split sentence into words\n\t\t\tseq = sentence.split()\n\t\t\t# calculate probability of sentence\n\t\t\tp = calculateSentenceProb(seq, n, n_1gram, ngram, mode, zero_prob)\n\t\t\tdict_p[sentence] = p\n\treturn dict_p",
"def predict(report_to_predict_file, output_file, title, date):\r\n\t# parse text from file\r\n\treport_to_predict = \"\"\r\n\twith open(report_to_predict_file, 'r', newline = '', encoding = 'ISO-8859-1') as filetoread:\r\n\t\tdata = filetoread.read()\r\n\t\treport_to_predict = prp.remove_u(data)\r\n\t\r\n\t# load postprocessingand min-max confidence score for both tactics and techniques predictions\r\n\tparameters = joblib.load(\"classification_tools/data/configuration.joblib\")\r\n\tmin_prob_tactics = parameters[2][0]\t\r\n\tmax_prob_tactics = parameters[2][1]\r\n\tmin_prob_techniques = parameters[3][0]\r\n\tmax_prob_techniques = parameters[3][1]\r\n\t\r\n\tpred_tactics, predprob_tactics, pred_techniques, predprob_techniques = clt.predict(report_to_predict, parameters)\r\n\t\r\n\t# change decision value into confidence score to display\r\n\tfor i in range(len(predprob_tactics[0])):\r\n\t\tconf = (predprob_tactics[0][i] - min_prob_tactics) / (max_prob_tactics - min_prob_tactics)\r\n\t\tif conf < 0:\r\n\t\t\tconf = 0.0\r\n\t\telif conf > 1:\r\n\t\t\tconf = 1.0\r\n\t\tpredprob_tactics[0][i] = conf*100\r\n\tfor j in range(len(predprob_techniques[0])):\r\n\t\tconf = (predprob_techniques[0][j] - min_prob_techniques) / (max_prob_techniques - min_prob_techniques)\r\n\t\tif conf < 0:\r\n\t\t\tconf = 0.0\r\n\t\telif conf > 1:\r\n\t\t\tconf = 1.0\r\n\t\tpredprob_techniques[0][j] = conf*100\r\n\t\r\n\t#prepare results to display\r\n\tttps = []\r\n\tto_print_tactics = []\r\n\tto_print_techniques = []\r\n\tfor ta in range(len(pred_tactics[0])):\r\n\t\tif pred_tactics[0][ta] == 1:\r\n\t\t\tttps.append(clt.CODE_TACTICS[ta])\r\n\t\t\tto_print_tactics.append([1, clt.NAME_TACTICS[ta], predprob_tactics[0][ta]])\r\n\t\telse:\r\n\t\t\tto_print_tactics.append([0, clt.NAME_TACTICS[ta], predprob_tactics[0][ta]])\r\n\tfor te in range(len(pred_techniques[0])):\r\n\t\tif pred_techniques[0][te] == 1:\r\n\t\t\tttps.append(clt.CODE_TECHNIQUES[te])\r\n\t\t\tto_print_techniques.append([1, clt.NAME_TECHNIQUES[te], predprob_techniques[0][te]])\r\n\t\telse:\r\n\t\t\tto_print_techniques.append([0, clt.NAME_TECHNIQUES[te], predprob_techniques[0][te]])\r\n\tto_print_tactics = sorted(to_print_tactics, key = itemgetter(2), reverse = True)\r\n\tto_print_techniques = sorted(to_print_techniques, key = itemgetter(2), reverse = True)\r\n\tprint(\"Predictions for the given report are : \")\r\n\tprint(\"Tactics :\")\r\n\tfor tpta in to_print_tactics:\r\n\t\tif tpta[0] == 1:\r\n\t\t\tprint(Fore.YELLOW + '' + tpta[1] + \" : \" + str(tpta[2]) + \"% confidence\")\r\n\t\telse:\r\n\t\t\tprint(Fore.CYAN + '' + tpta[1] + \" : \" + str(tpta[2]) + \"% confidence\")\r\n\tprint(Style.RESET_ALL)\r\n\tprint(\"Techniques :\")\r\n\tfor tpte in to_print_techniques:\r\n\t\tif tpte[0] == 1:\r\n\t\t\tprint(Fore.YELLOW + '' + tpte[1] + \" : \"+str(tpte[2])+\"% confidence\")\r\n\t\telse:\r\n\t\t\tprint(Fore.CYAN + '' + tpte[1] + \" : \"+str(tpte[2])+\"% confidence\")\r\n\tprint(Style.RESET_ALL)\r\n\tif output_file != '':\r\n\t\tsave_stix_file(report_to_predict, title, date, ttps, output_file)\r\n\t\tprint(\"Results saved in \" + output_file)",
"def print_candidate_stats(self):\n if not self.docs_entities:\n print(\"No candidates info.\")\n return\n\n # Number of entities with no candidates (no data points)\n n_no_cand = 0\n # Number of entities where ground truth is among the candidates\n n_pos_labels = 0\n # Number of entities where GT is not among the candidates\n n_no_pos_labels = 0\n # Number of candidates excluding the GT candidate\n n_neg_labels = 0\n\n # Total number of named entities\n n_ne = 0\n # Only named entities in the wikidata KB\n n_ne_in_kb = 0\n # Number of named entities not linked to Wikidata KB\n n_ne_bs = 0\n # Number of candidates that belong to entities with no GT\n n_b_cands = 0\n\n for doc_entities in self.docs_entities:\n for entity in doc_entities:\n n_ne += 1\n\n if len(entity['Candidates']) == 0:\n n_no_cand += 1\n elif entity['GroundTruth'] in entity['Candidates']:\n n_pos_labels += 1\n n_neg_labels += len(entity['Candidates']) - 1\n else:\n n_no_pos_labels += 1\n n_neg_labels += len(entity['Candidates'])\n\n if entity['GroundTruth'] == 'B':\n n_ne_bs += 1\n n_b_cands += len(entity['Candidates'])\n else:\n n_ne_in_kb += len(entity['Candidates'])\n\n n_cand = n_pos_labels + n_neg_labels\n\n print(f\"{n_ne: >7,} named entities in total\")\n print(f\"{n_cand: >7,} candidates in total \"\n f\"(total number of data points)\")\n print(f\"{n_pos_labels: >7,} / {n_cand: >7,} positive labels \"\n f\"({100 * n_pos_labels / n_cand: >5.2f} % all all labels )\")\n print(f\"{n_neg_labels: >7,} / {n_cand: >7,} negative labels \"\n f\"({100 * n_neg_labels / n_cand: >5.2f} % all all labels )\")\n\n print(f\"{n_no_cand: >7,} / {n_ne: >7,} \"\n f\"named entities have no candidates\")\n print(f\"{n_no_pos_labels: >7,} / {n_ne: >7,} \"\n f\"named entities where correct label is not among candidates\")\n print(f\"{n_ne_in_kb: >7,} / {n_cand: >7,} \"\n f\"candidates tagged with GT in Wikidata KB\")\n print(f\"{n_ne_bs: >7,} / {n_cand: >7,} \"\n f\"candidates for named entities not in Wikidata KB\")\n\n print(f\"{n_cand/n_ne:.1f} average number of candidates per entity\")",
"def splitSentences(self,txt):\n \n txt = txt.split()\n #txt = txt.split(\"\\s\") #DM to account for longer documents in formative evaluation - change back for impression sections only\n\n #attribute side header to each corresponding sentence\n sentences = []\n wordLoc = 0\n \n\n while(wordLoc < len(txt) ):\n currentWord = txt[wordLoc]\n if( currentWord[-1] in '.?!' ):\n if( currentWord in self.exceptionTerms ):\n wordLoc += 1\n # per discussion with A.G. dropped this exception, since assuming numbers only use decimal points if there \n # are actual decimal point digits expressed and thus the period would not be the last character of the word.\n #elif( self.digits.intersection(currentWord) and \n #not set('()').intersection(currentWord)): # word doesn't include parentheses. Is this necessary?\n #wordLoc += 1\n else:\n sentences.append(unicode(\" \"+' '.join(txt[:wordLoc+1]))) \n txt = txt[wordLoc+1:]\n wordLoc = 0\n else:\n wordLoc += 1\n\n # if any texts remains (due to failure to identify a final sentence termination,\n # then take all remaining text and put into a sentence\n if( txt ):\n sentences.append(unicode(\" \"+' '.join(txt)) )\n \n #print sentences;raw_input()\n return sentences",
"def __set_decision_classes__(self):\n decisions = []\n for decision_object in self.objects:\n decisions.append(decision_object.decision)\n return universal_tools.get_unique_and_frequency(decisions)",
"def write_sentence_results_exp_free(sent_txt, inname, prop_cand_group, ofh, actors,\n sactors, groles, pospro=POSPRO):\n # create individual propositions of relevant characteristics\n all_indivs = []\n if prop_cand_group.prop_candidates:\n for prop in prop_cand_group.prop_candidates:\n indivs = []\n # sentence id is id of pm for first ok prop\n sent_id = prop.pm.sentence\n # DONT remove non-countries for now (XXXXX never matches)\n for a0 in [a for a in prop.A0 if a.atype != \"XXXXX\"]:\n pospro_penalty = 0.0\n sorted_utts = sorted([u for u in prop.UTT],\n key=lambda usu: usu.start)\n msg = \" \".join([u.surface for u in sorted_utts\n if not ut.find_actors_in_argu(actors,\n u.surface)])\n # postprocess actors\n if pospro:\n norm_a0_sfc, pospro_penalty = ut.post_process_prop(\n a0.surface, prop.pm.surface, msg)\n else:\n norm_a0_sfc = a0.surface\n # ut.post_process returns False if proposition should be skipped\n if not norm_a0_sfc:\n continue\n # the filtering can create propositions equal to existing ones\n if (norm_a0_sfc, prop.pm.surface, prop.pm.type, msg) in indivs:\n continue\n # add confidence score\n conf_sco = ut.score_proposition(\n norm_a0_sfc, prop.pm.surface, msg,\n [pospro_penalty], sactors, groles)\n indivs.append((norm_a0_sfc, prop.pm.surface, prop.pm.ptype,\n msg, conf_sco))\n all_indivs.extend(indivs)\n # write out\n if all_indivs:\n ofh.write(u\"{}-{}\\t{}\\n\".format(\n re.sub(r\"(\\.txt|\\.html).+$\", r\"\\1\", inname), sent_id, sent_txt))\n for ip in all_indivs:\n # now outputting all fields so would not need to do 0, 1, 2, -1 ...\n ofh.write(u\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(ip[0], ip[1], ip[2], ip[3],\n ip[-1]))\n ofh.write(\"\\n\")",
"def crime_category_breakdown():\n db_request = main_db_call()\n all_crimes = [item[0] for item in db_request]\n sub_offense = offense_counter(all_crimes)\n sub_pie = color_applicator(sub_offense)\n sub_dict = {}\n for i, thing in enumerate(sub_pie):\n for key, category in UPPER_DICT.items():\n if sub_pie[i][0] in category:\n sub_dict.setdefault(key, [])\n sub_dict[key].append(sub_pie[i])\n return sub_dict",
"def calculate_probabilities(self):\n\n # o numero total de palavras de cada categoria\n total_positive = sum(self.pos_freq.values())\n total_negative = sum(self.neg_freq.values())\n\n vocab_size = len(self.counts.keys())\n\n # TODO: calculate priors for classes\n\n # Para cada palavra no dicionario de frequencia ...\n for word, freq in self.counts.iteritems():\n\n # ajustando entrada para o dicionario de probabilidades\n self.probs.setdefault(word, {}) # adiciona palavra em probs\n self.probs[word].setdefault(\"pos\", 0)\n self.probs[word].setdefault(\"neg\", 0)\n\n # ocorrencias da palavra nas classes (+1 for Laplace smoothing)\n positive_count = freq.get(\"pos\", 0) + 1\n negative_count = freq.get(\"neg\", 0) + 1\n\n pw_given_pos = (float(positive_count) /\n (total_positive + vocab_size + 1))\n pw_given_neg = (float(negative_count) /\n (total_negative + vocab_size + 1))\n\n self.probs[word][\"pos\"] = pw_given_pos\n self.probs[word][\"neg\"] = pw_given_neg\n\n # probabilities for the unknown word\n self.probs[self.unk_word] = {}\n self.probs[self.unk_word]['pos'] = 1.0 / (total_positive + vocab_size + 1)\n self.probs[self.unk_word]['neg'] = 1.0 / (total_negative + vocab_size + 1)",
"def predictModesStep(self):\n predictedProb = []\n for (i, section_entry) in enumerate(self.toPredictSections):\n logging.debug('~' * 10 +\n \"About to get predicted value for section %s (%s -> %s)\" %\n (section_entry.get_id(),\n section_entry.data.start_fmt_time, section_entry.data.end_fmt_time) +\n '~' * 10)\n if section_entry.data.sensed_mode == ecwma.MotionTypes.AIR_OR_HSR:\n predictedProb.append({'AIR_OR_HSR': 1})\n else:\n predictedProb.append(get_prediction(i, section_entry))\n\n return predictedProb",
"def breakdown_vp(self):\n # First print a breakdown of the number of points gained from each category.\n print('VP breakdown:')\n headers = ['Faction', 'Total VP', 'Round', 'Boosters', 'Endgame', 'Techs', 'Adv. Techs', 'Feds', 'QIC Actions',\n 'Tracks', 'Resources', 'Leech']\n rows = []\n for faction, stats in self.faction_stats.items():\n rows.append([\n faction,\n stats.vp,\n stats.vp_from_round_scoring,\n stats.vp_from_boosters,\n stats.vp_from_endgame,\n stats.vp_from_techs,\n stats.vp_from_adv_techs,\n stats.vp_from_feds,\n stats.vp_from_qic_act,\n stats.vp_from_tracks,\n stats.vp_from_resources,\n stats.vp_lost_from_leech,\n ])\n print(tabulate(rows, headers=headers))\n print()\n # Next, print a breakdown of what percentage of the total VP each category contributed.\n print('VP Percentages:')\n headers.remove('Total VP')\n rows = []\n for faction, stats in self.faction_stats.items():\n rows.append([\n faction,\n stats.vp_from_round_scoring / stats.vp * 100,\n stats.vp_from_boosters / stats.vp * 100,\n stats.vp_from_endgame / stats.vp * 100,\n stats.vp_from_techs / stats.vp * 100,\n stats.vp_from_adv_techs / stats.vp * 100,\n stats.vp_from_feds / stats.vp * 100,\n stats.vp_from_qic_act / stats.vp * 100,\n stats.vp_from_tracks / stats.vp * 100,\n stats.vp_from_resources / stats.vp * 100,\n stats.vp_lost_from_leech / stats.vp * 100,\n ])\n print(tabulate(rows, headers=headers, floatfmt='.2f'))\n print()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a text, returns a list of the (start, end) spans of sentences in the text.
|
def span_tokenize(self, text):
return [(sl.start, sl.stop) for sl in self._slices_from_text(text)]
|
[
"def split_sentences(cls, text):\n last_index = 0\n intervaled = []\n for match in cls.SENTENCE_SPLITTER.finditer(text):\n begin, end = match.span()\n intervaled.append(text[last_index:begin])\n intervaled.append(text[begin:end])\n last_index = end\n intervaled.append(text[last_index:])\n\n if len(intervaled) > 1 and not intervaled[-1].strip():\n end = intervaled.pop()\n sep = intervaled.pop()\n intervaled[-1] += sep + end\n\n return intervaled",
"def span_tokenize(self, text):\n return [sl for sl in self._slices_from_text(text)]",
"def segment(text: str):\n\tlines = segment_by_lines(text)\n\tsentences = []\n\tfor line in lines:\n\t\tstripped_line = line.strip()\n\t\tif stripped_line != '':\n\t\t\tsentences.extend(segment_by_punctuation(stripped_line))\n\treturn sentences",
"def sentences_from_text(self, text, realign_boundaries=True):\n sents = [text[sl] for sl in self._slices_from_text(text)]\n if realign_boundaries:\n sents = self._realign_boundaries(sents)\n return sents",
"def extract_sentences(text: str) -> List[str]:\n doc = nlp(text)\n return [sent.text for sent in doc.sents]",
"def get_sentences(text):\n\n\n lines = re.findall(r'\\s*([A-Z].+?[\\?\\!\\.])\\s+',text,flags=re.MULTILINE | re.DOTALL)\n \n\n return [line.replace('\\n',' ') for line in lines]",
"def __parse_sentences_from_text(text: str) -> List[Sentence]:\n doc = nlp(text)\n return doc.sentences",
"def split_into_sentences(text: str) -> typing.List[str]:\n\n return nltk.sent_tokenize(text)",
"def segment_by_lines(text: str):\n\treturn text.splitlines()",
"def get_sentences(raw_text):\n return [\"\".join(s['sentences']) for s in format_doc(raw_text)]",
"def create_idx_spans(self, text):\n idx_spans, shift = [], 0\n while shift < len(text):\n candi_spans = flatten([windowed(range(shift, len(text)+shift), length) for length in range(1, self.L)])\n idx_spans.extend(candi_spans)\n shift += 1\n\n return idx_spans",
"def text_boundaries(texts: Iterable[Text]) -> List[Tuple[int, int]]:\n # TODO docs\n def aggregate_boundaries(boundaries: pvector, text):\n return (\n boundaries + [(boundaries[-1][1], boundaries[-1][1] + len(text))]\n if boundaries else v((0, len(text)))\n )\n\n return list(reduce(aggregate_boundaries, texts, v()))",
"def get_chunks(text, start_tok, end_tok):\n ltext = text.lower()\n idx = -1\n result = []\n while True:\n start = ltext.find(start_tok.lower(), idx+1)\n if start == -1:\n break\n end = ltext.find(end_tok.lower(), start+len(start_tok))\n if start != -1 and end != -1:\n chunk = text[start:end+len(end_tok)]\n result.append(chunk)\n idx = max([idx, start, end]) # sanity\n\n return result",
"def sentence_split(text, properties={'annotators': 'ssplit', 'outputFormat': 'json'}):\n annotated = nlp.annotate(text, properties)\n sentence_split = list()\n for sentence in annotated['sentences']:\n s = [t['word'] for t in sentence['tokens']]\n k = [item.lower() for item in s if item not in [\",\", \".\", '...', '..']]\n sentence_split.append(\" \".join(k))\n return sentence_split",
"def sentences_from_text_legacy(self, text):\n tokens = self._annotate_tokens(self._tokenize_words(text))\n return self._build_sentence_list(text, tokens)",
"def splitSentences(self,txt):\n \n txt = txt.split()\n #txt = txt.split(\"\\s\") #DM to account for longer documents in formative evaluation - change back for impression sections only\n\n #attribute side header to each corresponding sentence\n sentences = []\n wordLoc = 0\n \n\n while(wordLoc < len(txt) ):\n currentWord = txt[wordLoc]\n if( currentWord[-1] in '.?!' ):\n if( currentWord in self.exceptionTerms ):\n wordLoc += 1\n # per discussion with A.G. dropped this exception, since assuming numbers only use decimal points if there \n # are actual decimal point digits expressed and thus the period would not be the last character of the word.\n #elif( self.digits.intersection(currentWord) and \n #not set('()').intersection(currentWord)): # word doesn't include parentheses. Is this necessary?\n #wordLoc += 1\n else:\n sentences.append(unicode(\" \"+' '.join(txt[:wordLoc+1]))) \n txt = txt[wordLoc+1:]\n wordLoc = 0\n else:\n wordLoc += 1\n\n # if any texts remains (due to failure to identify a final sentence termination,\n # then take all remaining text and put into a sentence\n if( txt ):\n sentences.append(unicode(\" \"+' '.join(txt)) )\n \n #print sentences;raw_input()\n return sentences",
"def get_all_spans(text, max_ngram_length):\n start_indexes = []\n for index, char in enumerate(text):\n if not char.isalnum():\n continue\n if index == 0 or not text[index - 1].isalnum():\n start_indexes.append(index)\n if index + 1 == len(text) or not text[index + 1].isalnum():\n for start_index in start_indexes[-max_ngram_length:]:\n yield start_index, index + 1",
"def split_sentences(self, text):\n return tokenize.sent_tokenize(text)[0]",
"def get_sentences_from_parsed_text(doc):\n \n # Open file and split at sentence boarders \n with open(doc,\"r\") as infile:\n infile = infile.read()\n sentences = infile.split('\\n\\n')[:-1]\n\n return sentences"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a text, generates the sentences in that text by only testing candidate sentence breaks. If realign_boundaries is True, includes in the sentence closing punctuation that follows the period.
|
def sentences_from_text(self, text, realign_boundaries=True):
sents = [text[sl] for sl in self._slices_from_text(text)]
if realign_boundaries:
sents = self._realign_boundaries(sents)
return sents
|
[
"def split_into_sentences(text):\n if \".)\" in text: text = text.replace(\".)\", \"<prd>)\")\n sentences = text.split(\".\")\n text = text.replace(\"<prd>\", \".\")\n for s in sentences:\n s = s.replace(\"<prd>\", \".\")\n return sentences",
"def split_sentences(text):\n text = re.sub(r' ([^ .])\\.', r' \\1~.~', text)\n text = text.replace('Inc.', 'Inc~.~')\n for c in '!?':\n text = text.replace(c + ' ', '. ')\n sents = text.split('. ')\n sents = [i.replace('~.~', '.') for i in sents]\n if sents[-1][-1] in '.!?':\n sents[-1] = sents[-1][:-1]\n print(sents)\n return sents",
"def splitSentences(self,txt):\n \n txt = txt.split()\n #txt = txt.split(\"\\s\") #DM to account for longer documents in formative evaluation - change back for impression sections only\n\n #attribute side header to each corresponding sentence\n sentences = []\n wordLoc = 0\n \n\n while(wordLoc < len(txt) ):\n currentWord = txt[wordLoc]\n if( currentWord[-1] in '.?!' ):\n if( currentWord in self.exceptionTerms ):\n wordLoc += 1\n # per discussion with A.G. dropped this exception, since assuming numbers only use decimal points if there \n # are actual decimal point digits expressed and thus the period would not be the last character of the word.\n #elif( self.digits.intersection(currentWord) and \n #not set('()').intersection(currentWord)): # word doesn't include parentheses. Is this necessary?\n #wordLoc += 1\n else:\n sentences.append(unicode(\" \"+' '.join(txt[:wordLoc+1]))) \n txt = txt[wordLoc+1:]\n wordLoc = 0\n else:\n wordLoc += 1\n\n # if any texts remains (due to failure to identify a final sentence termination,\n # then take all remaining text and put into a sentence\n if( txt ):\n sentences.append(unicode(\" \"+' '.join(txt)) )\n \n #print sentences;raw_input()\n return sentences",
"def extract_statements(\n text=None, \n nlp=None, \n make_sentence=False, \n n_min_word_paragraph=50, \n n_max_word_paragraph=200\n ):\n \n # remove non ASCII characters\n text = remove_non_ascii(text)\n \n \n lines = []\n prev = \"\"\n n_words = 0\n for line in text.split('\\n'):\n # aggregate consecutive lines where text may be broken down\n # only if next line starts with a space or previous does not end with punctation mark and between\n if((line.startswith(' ') or not prev.endswith(('.','?', '!'))) and n_words <= n_max_word_paragraph):\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n # min words in paragraph\n elif n_words <=n_min_word_paragraph:\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n else:\n # new paragraph\n lines.append(prev)\n prev = line\n n_words = 0\n \n # don't forget left-over paragraph\n lines.append(prev)\n # clean paragraphs from extra space, unwanted characters, urls, etc.\n # best effort clean up, consider a more versatile cleaner\n sentences = []\n for line in lines:\n \n # removing header number\n line = re.sub(r'^\\s?\\d+(.*)$', r'\\1', line)\n # removing trailing spaces\n line = line.strip()\n # words may be split between lines, ensure we link them back together\n line = re.sub('\\\\s?-\\\\s?', '-', line)\n # remove space prior to punctuation\n line = re.sub(r'\\s?([,:;\\.])', r'\\1', line)\n # ESG contains a lot of figures that are not relevant to grammatical structure\n line = re.sub(r'\\d{5,}', r' ', line)\n # remove mentions of URLs\n line = re.sub(r'((http|https)\\:\\/\\/)?[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]+\\.([a-zA-Z]){2,6}([a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#])*', r' ', line)\n # remove multiple spaces\n line = re.sub('\\\\s+', ' ', line)\n \n # split paragraphs into well defined sentences using spacy\n if make_sentence:\n try:\n for part in list(nlp(line).sents):\n part_strip = str(part).strip()\n # remove senteces with only 30 characters\n if len(part_strip) > 30:\n sentences.append(part_strip)\n except ValueError:\n print(\"Check if nlp model was loaded\")\n else:\n sentences.append(line)\n \n return sentences",
"def split_sentences(cls, text):\n last_index = 0\n intervaled = []\n for match in cls.SENTENCE_SPLITTER.finditer(text):\n begin, end = match.span()\n intervaled.append(text[last_index:begin])\n intervaled.append(text[begin:end])\n last_index = end\n intervaled.append(text[last_index:])\n\n if len(intervaled) > 1 and not intervaled[-1].strip():\n end = intervaled.pop()\n sep = intervaled.pop()\n intervaled[-1] += sep + end\n\n return intervaled",
"def text_boundaries(texts: Iterable[Text]) -> List[Tuple[int, int]]:\n # TODO docs\n def aggregate_boundaries(boundaries: pvector, text):\n return (\n boundaries + [(boundaries[-1][1], boundaries[-1][1] + len(text))]\n if boundaries else v((0, len(text)))\n )\n\n return list(reduce(aggregate_boundaries, texts, v()))",
"def get_sentences(text):\n\n\n lines = re.findall(r'\\s*([A-Z].+?[\\?\\!\\.])\\s+',text,flags=re.MULTILINE | re.DOTALL)\n \n\n return [line.replace('\\n',' ') for line in lines]",
"def sentences_from_text_legacy(self, text):\n tokens = self._annotate_tokens(self._tokenize_words(text))\n return self._build_sentence_list(text, tokens)",
"def segment(text: str):\n\tlines = segment_by_lines(text)\n\tsentences = []\n\tfor line in lines:\n\t\tstripped_line = line.strip()\n\t\tif stripped_line != '':\n\t\t\tsentences.extend(segment_by_punctuation(stripped_line))\n\treturn sentences",
"def split_clauses(sent_text: str, nlp: Language) -> list:\n initial_sents = []\n if 'Quotation' in sent_text:\n initial_sents.extend(_chunk_quotations(sent_text))\n else:\n initial_sents.append(sent_text)\n final_with_conn_words = []\n for initial_sent in initial_sents:\n if initial_sent.startswith('Quotation'):\n final_with_conn_words.append(initial_sent)\n continue\n new_sents = _split_by_conjunctions(initial_sent, nlp)\n split_sents = []\n # Split by advcl IF these have their own subject/verb\n # Example: 'When I went to the store, I met George.' ('when ...' is an adverbial clause)\n for sent in new_sents:\n adv_sents = _split_advcl_clauses(sent, nlp)\n # Split by ccomp IF these have their own subject/verb\n # Example: 'He said Joe is ill.' ('Joe is ill' is a clausal complement)\n for adv_sent in adv_sents:\n comp_sents = _split_complement_clauses(adv_sent, nlp)\n for comp_sent in comp_sents:\n split_sents.extend(_split_by_conjunctions(comp_sent, nlp))\n # Check relcl\n split_sents2 = []\n for sent in split_sents:\n split_sents2.extend(_split_relcl_clauses(sent, nlp))\n # Check for advcls that are not directly associated with the root verb but still have a subj or obj and verb\n for sent in split_sents2:\n sent_span = next(nlp(sent).sents)\n advcl_verbs = []\n for token in sent_span:\n advcl_verbs.extend([child for child in token.children if child.dep_ == 'advcl'])\n new_chunks = []\n for advcl_verb in advcl_verbs: # There are some advcls remaining that are not associated w/ the root verb\n connectors = [conn for conn in advcl_verb.children if conn.dep_ in ('advmod', 'mark')]\n # Process the verb and the first connector (there should only be 1)\n if connectors:\n connector = connectors[0]\n chunks = _get_chunks(advcl_verb, connector, sent_span, 'advcl', None)\n revised_chunks = []\n for chunk in chunks:\n # Remove the connector from the middle of the text of the sentence\n revised_chunks.append(chunk.replace(f' {connector} ', space))\n new_chunks.extend(revised_chunks)\n final_with_conn_words.extend(new_chunks if new_chunks else [sent])\n # Chunks may still have beginning or trailing 'mark' words (such as 'that' in 'she claimed that')\n final_chunks = []\n for clause in final_with_conn_words:\n # Relative and connector words may be present at the beginning or end of the clauses, and should be removed\n # TODO: Is the ordering (relcl to conj) correct?\n for word in relative_clause_words:\n if word in clause.lower():\n clause = _remove_connector_text(clause, word)\n for word in conjunction_words:\n if word in clause.lower():\n clause = _remove_connector_text(clause, word)\n # May still have \"special mark\"s that need to be addressed in the semantics\n for word in special_marks:\n if word in clause.lower():\n revised_clause = _remove_connector_text(clause, word)\n if clause != revised_clause:\n clause = f'{revised_clause}$&{word}'\n final_chunks.append(_remove_start_end_commas(clause))\n return final_chunks",
"def sentence_split(text, properties={'annotators': 'ssplit', 'outputFormat': 'json'}):\n annotated = nlp.annotate(text, properties)\n sentence_split = list()\n for sentence in annotated['sentences']:\n s = [t['word'] for t in sentence['tokens']]\n k = [item.lower() for item in s if item not in [\",\", \".\", '...', '..']]\n sentence_split.append(\" \".join(k))\n return sentence_split",
"def wrap_text(text, wrap_chars):\r\n\r\n # split text on sentence punctuation and remove empty strings\r\n text_paragraph = re.split('([^.?!]+[.?!])', text)\r\n text_paragraph = [i for i in text_paragraph if i != '']\r\n\r\n # wrap text lines, append space item for paragraph spacing line\r\n text_list = []\r\n for text_line in text_paragraph:\r\n text_line_wrap = textwrap.wrap(\r\n text_line, int(wrap_chars), break_on_hyphens=False\r\n )\r\n for text_line_wrap_line in text_line_wrap:\r\n text_list.append(text_line_wrap_line.strip())\r\n text_list.append(' ')\r\n\r\n # remove last added space item for paragraph spacing line, return list\r\n if len(text_list) > 0:\r\n while text_list[-1] == ' ':\r\n text_list.pop()\r\n\r\n return text_list",
"def paper_tokenize(text, sentences_as_lists=False, preserve_order=False):\n permitted_titles = set(Reader().open_file(PERMITTED_TITLES_SOURCE))\n\n # Split the text into sections\n if preserve_order:\n split_text_1 = re.split(\"@&#\", text)\n split_text = zip(split_text_1, range(len(split_text_1)))\n else:\n split_text = re.split(\"@&#\", text)\n\n # The key value. This value is changed if a permitted section title is encountered in the list.\n state = \"\"\n\n # After the for loop, this dictionary will have keys relating to each permitted section, and values corresponding\n # to the text of that section\n sentences_with_states = defaultdict(str)\n\n section_counts = defaultdict(int)\n\n if preserve_order:\n for text, pos in split_text:\n\n # Hack for proper sentence tokenization because NLTK tokeniser doesn't work properly for tokenising papers\n text = text.replace(\"etal.\", \"etal\")\n text = text.replace(\"et al.\", \"etal\")\n text = text.replace(\"Fig.\", \"Fig\")\n text = text.replace(\"fig.\", \"fig\")\n text = text.replace(\"Eq.\", \"Eq\")\n text = text.replace(\"eq.\", \"eq\")\n text = text.replace(\"pp.\", \"pp\")\n text = text.replace(\"i.e.\", \"ie\")\n text = text.replace(\"e.g.\", \"eg\")\n text = text.replace(\"ref.\", \"ref\")\n text = text.replace(\"Ref.\", \"Ref\")\n text = text.replace(\"etc.\", \"etc\")\n text = text.replace(\"Figs.\", \"Figs\")\n text = text.replace(\"figs.\", \"figs\")\n text = text.replace(\"No.\", \"No\")\n text = text.replace(\"eqs.\", \"eqs\")\n\n # Checks if text is a section title\n if text.lower() in permitted_titles:\n state = text\n section_counts[state] += 1\n else:\n if sentences_as_lists:\n if section_counts[state] > 1:\n state = state + \"_\" + str(section_counts[state])\n sentences_with_states[state] = ([preprocess_sentence(x) for x in sent_tokenize(text)], pos)\n else:\n if section_counts[state] > 1:\n state = state + \"_\" + str(section_counts[state])\n sentences_with_states[state] = (text, pos)\n\n if not preserve_order:\n for text in split_text:\n\n # Hack for proper sentence tokenization because NLTK tokeniser doesn't work properly for tokenising papers\n text = text.replace(\"etal.\", \"etal\")\n text = text.replace(\"et al.\", \"etal\")\n text = text.replace(\"Fig.\", \"Fig\")\n text = text.replace(\"fig.\", \"fig\")\n text = text.replace(\"Eq.\", \"Eq\")\n text = text.replace(\"eq.\", \"eq\")\n text = text.replace(\"pp.\", \"pp\")\n text = text.replace(\"i.e.\", \"ie\")\n text = text.replace(\"e.g.\", \"eg\")\n text = text.replace(\"ref.\", \"ref\")\n text = text.replace(\"Ref.\", \"Ref\")\n text = text.replace(\"etc.\", \"etc\")\n text = text.replace(\"Figs.\", \"Figs\")\n text = text.replace(\"figs.\", \"figs\")\n text = text.replace(\"No.\", \"No\")\n text = text.replace(\"eqs.\", \"eqs\")\n\n # Checks if text is a section title\n if text.lower() in permitted_titles:\n state = text\n section_counts[state] += 1\n else:\n if sentences_as_lists:\n if section_counts[state] > 1:\n state = state + \"_\" + str(section_counts[state])\n sentences_with_states[state] = [preprocess_sentence(x) for x in sent_tokenize(text)]\n else:\n if section_counts[state] > 1:\n state = state + \"_\" + str(section_counts[state])\n sentences_with_states[state] = text\n\n return sentences_with_states",
"def __parse_sentences_from_text(text: str) -> List[Sentence]:\n doc = nlp(text)\n return doc.sentences",
"def realign_punctuated_text(df, text, skip_1st=0, margin=2):\n # Built-in str.split doesn't retain starting/trailing spaces correctly.\n # Probably would be fine but just keep this since it took a while to get\n # right and I don't want to break it.\n words = re.split(' ', text)\n rows = []\n start_i = 0\n for i, chunk in df.iterrows():\n chunk_words = re.split(' ', chunk.text)\n length = len(chunk_words)\n punct_words = words[start_i:start_i + length + margin]\n suff = ' '.join(chunk_words[-2:])\n scores = []\n bigrams = zip(punct_words[skip_1st:], punct_words[skip_1st + 1:])\n # Avoid list comp so we can exit early if we find a perfect match.\n for j, gram in enumerate(bigrams):\n score = fuzz.ratio(suff, ' '.join(gram).lower())\n if score == 100:\n argmax = j\n break\n scores.append(score)\n else:\n argmax = np.argmax(scores)\n if max(scores) < 80:\n warnings.warn(\n 'Max score < 80. Your rows may have gotten misaligned '\n f'at row {i}: {chunk.text}'\n )\n punct_len = skip_1st + argmax + 2\n rows.append(' '.join(words[start_i:start_i + punct_len]))\n start_i += punct_len\n\n new_df = pd.DataFrame(rows, columns=['text'])\n return pd.concat((new_df, df.reset_index()[['start', 'duration']].copy()),\n axis=1)",
"def _split_paragraphs(self, text):\n\n import re\n import textwrap\n\n text = textwrap.dedent(text).strip()\n text = re.sub('\\n\\n[\\n]+', '\\n\\n', text)\n\n last_sub_indent = None\n paragraphs = list()\n for line in text.splitlines():\n (indent, sub_indent) = self._indents(line)\n is_text = len(line.strip()) > 0\n\n if is_text and indent == sub_indent == last_sub_indent:\n paragraphs[-1] += ' ' + line\n else:\n paragraphs.append(line)\n\n if is_text:\n last_sub_indent = sub_indent\n else:\n last_sub_indent = None\n\n return paragraphs",
"def find_conclusion_sentences(self):\n for sentence in self.knowledge:\n new_mines=sentence.known_mines()\n new_safes=sentence.known_safes()\n if len(new_mines)>0:\n for mine in new_mines:\n self.mark_mine(mine)\n elif len(new_safes)>0:\n for safe in new_safes:\n self.mark_safe(safe)\n else:\n continue #skips next lines and goes to next sentence\n # if known_mines or safes is successful, all cells are marked mine or safe\n # then \"concluded\" sentence can be removed from knowledge base\n self.knowledge.remove(sentence) # only runs when if or elif is true because of \"continue\"",
"def spoil_text(text: str, modify_articles_rate=0.5, modify_prepositions_rate=0.25,\n modify_synonyms_rate=0.2) -> str:\n tokens = text.split(' ')\n tokens = list(filter(lambda token: len(token) > 0 and not token.isspace(), tokens))\n\n articles = ['a', 'an', 'the', '']\n prepositions = ['on', 'in', 'into', 'at']\n for i, token in enumerate(tokens):\n if token in articles:\n if np.random.binomial(1, modify_articles_rate) == 1:\n tokens[i] = np.random.choice(articles)\n elif token in prepositions:\n if np.random.binomial(1, modify_prepositions_rate) == 1:\n tokens[i] = np.random.choice(prepositions)\n elif np.random.binomial(1, modify_synonyms_rate) == 1:\n synonyms = [l.name() for syn in wordnet.synsets(token)[:1] for l in syn.lemmas()]\n if len(synonyms) > 0:\n syn = np.random.choice(synonyms)\n tokens[i] = syn.replace('_', ' ')\n return ' '.join(tokens)",
"def _build_sentence_list(self, text, tokens):\n # Most of the work here is making sure that we put the right\n # pieces of whitespace back in all the right places.\n\n # Our position in the source text, used to keep track of which\n # whitespace to add:\n pos = 0\n\n # A regular expression that finds pieces of whitespace:\n WS_REGEXP = re.compile(r'\\s*')\n\n sentence = ''\n for aug_tok in tokens:\n tok = aug_tok.tok\n\n # Find the whitespace before this token, and update pos.\n ws = WS_REGEXP.match(text, pos).group()\n pos += len(ws)\n\n # Some of the rules used by the punkt word tokenizer\n # strip whitespace out of the text, resulting in tokens\n # that contain whitespace in the source text. If our\n # token doesn't match, see if adding whitespace helps.\n # If so, then use the version with whitespace.\n if text[pos:pos+len(tok)] != tok:\n pat = '\\s*'.join(re.escape(c) for c in tok)\n m = re.compile(pat).match(text,pos)\n if m: tok = m.group()\n\n # Move our position pointer to the end of the token.\n assert text[pos:pos+len(tok)] == tok\n pos += len(tok)\n\n # Add this token. If it's not at the beginning of the\n # sentence, then include any whitespace that separated it\n # from the previous token.\n if sentence:\n sentence += ws\n sentence += tok\n\n # If we're at a sentence break, then start a new sentence.\n if aug_tok.sentbreak:\n yield sentence\n sentence = ''\n\n # If the last sentence is emtpy, discard it.\n if sentence:\n yield sentence"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns True if the given text includes a sentence break.
|
def text_contains_sentbreak(self, text):
found = False # used to ignore last token
for t in self._annotate_tokens(self._tokenize_words(text)):
if found:
return True
if t.sentbreak:
found = True
return False
|
[
"def check_sentence(text):\n result = re.search(r\"^[A-Z][a-z\\s]*[.?!]$\", text)\n return result != None",
"def isWordIn(self, text):\n temp = text\n temp2 = \"\"\n temp = temp.lower()\n for c in temp:\n if c in \"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\"\":\n c = \" \"\n temp2 += c\n temp2 = temp2.split()\n\n\n if self.myWord.lower() in temp2:\n return True\n else:\n return False",
"def any_text_contains(\n self, text: str, deep: bool = True, separator: str = \"\", strip: bool = False\n ) -> bool:\n ...",
"def document_contains_text(docbody):\n found_non_space = 0\n for line in docbody:\n if not line.isspace():\n # found a non-whitespace character in this line\n found_non_space = 1\n break\n return found_non_space",
"def is_word_in(text):\r\n # translation table for conversion\r\n table = string.maketrans(\"\",\"\")\r\n # parse text to remove formatting\r\n text = text.lower().translate(table, string.punctuation)\r\n # iterate each word in text and check if word is there\r\n for words in text:\r\n if word.lower() in text:\r\n## print \"word:\", word\r\n## print True\r\n return True\r\n return False",
"def is_word_in_text(what_word, where_word) -> bool:\n return where_word.find(what_word) != -1",
"def is_html_like(text):\n if isinstance(text, str):\n text = text.strip()\n if text.startswith(\"<\"):\n return True\n return False\n return False",
"def is_real_sentence(only_token, sentence):\n \n first_word = \"\"\n if only_token:\n first_word = sentence[0]\n else:\n first_word = sentence[0][0]\n\n if '---------------------' in first_word or first_word == '-DOCSTART-':\n return False\n else:\n return True",
"def hasRawText(self, text):\r\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\\d)[^>]*?>.*</\\1>',\r\n re.S).sub('', text.strip()).strip()\r\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\r\n return '' != r",
"def is_sentence(self):\n return self.parent == 'S'",
"def is_unstructured_text(self):\r\n\r\n return not self.label.isupper()",
"def count_sentences(text):\n return len([w for w in text.replace('? ', '. ').split('.') if w != \"\"])",
"def is_html(text):\n if text is not None and '<html' in text[:300].lower():\n return True\n return False",
"def allInText(self, words, text):\n index = 0\n for word in words:\n index = text[index:].find(word)\n if index == -1:\n return False\n return True",
"def _is_line_break(self, char):\n return ord(char) == 10 or ord(char) == 13",
"def paragraph_mentions(text: str, keyword: str) -> bool:\n soup = BeautifulSoup(text, 'html5lib')\n paragraphs = [p.get_text() for p in soup('p')]\n\n return any(keyword.lower() in paragraph.lower()\n for paragraph in paragraphs)",
"def has_text(self):",
"def sentence_split(text: str,\n split_newline: Union[str, bool] = True,\n merge_apostrophe_word: bool = False,\n ) -> Generator[str, Any, None]:\n for sentence_tokens in sentence_split_tokens(text,\n split_newline=split_newline,\n merge_apostrophe_word=merge_apostrophe_word):\n sentence = ''.join(token.text for token in sentence_tokens).strip()\n if sentence:\n yield sentence",
"def test_sentence_segmentation(self):\n\n input = 'This is the first paragraph.\\n\\n\\nThis is the second paragraph.'\n re_paragraph_splitter = '\\n\\n+'\n result = self.datacleaner.sentence_segmentation(input, re_paragraph_splitter)\n self.assertEqual(result, ['This is the first paragraph.', 'This is the second paragraph.'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a text, generates the sentences in that text. Annotates all tokens, rather than just those with possible sentence breaks. Should produce the same results as ``sentences_from_text``.
|
def sentences_from_text_legacy(self, text):
tokens = self._annotate_tokens(self._tokenize_words(text))
return self._build_sentence_list(text, tokens)
|
[
"def __parse_sentences_from_text(text: str) -> List[Sentence]:\n doc = nlp(text)\n return doc.sentences",
"def split_into_sentences(text: str) -> typing.List[str]:\n\n return nltk.sent_tokenize(text)",
"def _build_sentence_list(self, text, tokens):\n # Most of the work here is making sure that we put the right\n # pieces of whitespace back in all the right places.\n\n # Our position in the source text, used to keep track of which\n # whitespace to add:\n pos = 0\n\n # A regular expression that finds pieces of whitespace:\n WS_REGEXP = re.compile(r'\\s*')\n\n sentence = ''\n for aug_tok in tokens:\n tok = aug_tok.tok\n\n # Find the whitespace before this token, and update pos.\n ws = WS_REGEXP.match(text, pos).group()\n pos += len(ws)\n\n # Some of the rules used by the punkt word tokenizer\n # strip whitespace out of the text, resulting in tokens\n # that contain whitespace in the source text. If our\n # token doesn't match, see if adding whitespace helps.\n # If so, then use the version with whitespace.\n if text[pos:pos+len(tok)] != tok:\n pat = '\\s*'.join(re.escape(c) for c in tok)\n m = re.compile(pat).match(text,pos)\n if m: tok = m.group()\n\n # Move our position pointer to the end of the token.\n assert text[pos:pos+len(tok)] == tok\n pos += len(tok)\n\n # Add this token. If it's not at the beginning of the\n # sentence, then include any whitespace that separated it\n # from the previous token.\n if sentence:\n sentence += ws\n sentence += tok\n\n # If we're at a sentence break, then start a new sentence.\n if aug_tok.sentbreak:\n yield sentence\n sentence = ''\n\n # If the last sentence is emtpy, discard it.\n if sentence:\n yield sentence",
"def get_sentences(text):\n\n\n lines = re.findall(r'\\s*([A-Z].+?[\\?\\!\\.])\\s+',text,flags=re.MULTILINE | re.DOTALL)\n \n\n return [line.replace('\\n',' ') for line in lines]",
"def segment(text: str):\n\tlines = segment_by_lines(text)\n\tsentences = []\n\tfor line in lines:\n\t\tstripped_line = line.strip()\n\t\tif stripped_line != '':\n\t\t\tsentences.extend(segment_by_punctuation(stripped_line))\n\treturn sentences",
"def extract_sentences(text: str) -> List[str]:\n doc = nlp(text)\n return [sent.text for sent in doc.sents]",
"def sentences_from_text(self, text, realign_boundaries=True):\n sents = [text[sl] for sl in self._slices_from_text(text)]\n if realign_boundaries:\n sents = self._realign_boundaries(sents)\n return sents",
"def sentence_split(text, properties={'annotators': 'ssplit', 'outputFormat': 'json'}):\n annotated = nlp.annotate(text, properties)\n sentence_split = list()\n for sentence in annotated['sentences']:\n s = [t['word'] for t in sentence['tokens']]\n k = [item.lower() for item in s if item not in [\",\", \".\", '...', '..']]\n sentence_split.append(\" \".join(k))\n return sentence_split",
"def split_sentences(cls, text):\n last_index = 0\n intervaled = []\n for match in cls.SENTENCE_SPLITTER.finditer(text):\n begin, end = match.span()\n intervaled.append(text[last_index:begin])\n intervaled.append(text[begin:end])\n last_index = end\n intervaled.append(text[last_index:])\n\n if len(intervaled) > 1 and not intervaled[-1].strip():\n end = intervaled.pop()\n sep = intervaled.pop()\n intervaled[-1] += sep + end\n\n return intervaled",
"def sentence_tokenize(input_text):\n sent_lst = []\n sent_pipe = PARSER.create_pipe(\"sentencizer\")\n PARSER.add_pipe(sent_pipe)\n doc = PARSER(input_text)\n for sent in doc.sents:\n sent_lst.append(sent.text)\n return sent_lst",
"def generate_sentence(text, num_of_words=15):\n result_dict = markov_chain(text)\n final_result_sentence = __generate_text(result_dict, num_of_words)\n return final_result_sentence",
"def splitSentences(self,txt):\n \n txt = txt.split()\n #txt = txt.split(\"\\s\") #DM to account for longer documents in formative evaluation - change back for impression sections only\n\n #attribute side header to each corresponding sentence\n sentences = []\n wordLoc = 0\n \n\n while(wordLoc < len(txt) ):\n currentWord = txt[wordLoc]\n if( currentWord[-1] in '.?!' ):\n if( currentWord in self.exceptionTerms ):\n wordLoc += 1\n # per discussion with A.G. dropped this exception, since assuming numbers only use decimal points if there \n # are actual decimal point digits expressed and thus the period would not be the last character of the word.\n #elif( self.digits.intersection(currentWord) and \n #not set('()').intersection(currentWord)): # word doesn't include parentheses. Is this necessary?\n #wordLoc += 1\n else:\n sentences.append(unicode(\" \"+' '.join(txt[:wordLoc+1]))) \n txt = txt[wordLoc+1:]\n wordLoc = 0\n else:\n wordLoc += 1\n\n # if any texts remains (due to failure to identify a final sentence termination,\n # then take all remaining text and put into a sentence\n if( txt ):\n sentences.append(unicode(\" \"+' '.join(txt)) )\n \n #print sentences;raw_input()\n return sentences",
"def sentence_extractor(self):\n self.text_sentences = []\n for text in self.texts:\n sentences = nltk.sent_tokenize(text)\n tokens_sentences = []\n for sentence in sentences:\n tokens = nltk.word_tokenize(sentence)\n if self.text_cleaner is not None:\n tokens = self.text_cleaner(tokens)\n if self.stem_words:\n tokens = stem_words(tokens)\n \n tokens_sentences.append(tokens)\n self.text_sentences.append(tokens_sentences)",
"def tokenize(text):\n cde_p = Paragraph(text)\n tokens = cde_p.tokens\n toks = []\n for sentence in tokens:\n toks.append([])\n for tok in sentence:\n toks[-1].append(tok.text)\n return toks",
"def get_sentences(raw_text):\n return [\"\".join(s['sentences']) for s in format_doc(raw_text)]",
"def _merge_sentences(text):\n\n return [word for sentence in text for word in sentence]",
"def sentences_from_tokens(self, tokens):\n tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens))\n sentence = []\n for aug_tok in tokens:\n sentence.append(aug_tok.tok)\n if aug_tok.sentbreak:\n yield sentence\n sentence = []\n if sentence:\n yield sentence",
"def split_into_sentences(text):\n if \".)\" in text: text = text.replace(\".)\", \"<prd>)\")\n sentences = text.split(\".\")\n text = text.replace(\"<prd>\", \".\")\n for s in sentences:\n s = s.replace(\"<prd>\", \".\")\n return sentences",
"def sentence_tokenize(self, text_list):\n return [sent_tokenize(text, language=self.lang) for text in text_list]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a sequence of tokens, generates lists of tokens, each list corresponding to a sentence.
|
def sentences_from_tokens(self, tokens):
tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens))
sentence = []
for aug_tok in tokens:
sentence.append(aug_tok.tok)
if aug_tok.sentbreak:
yield sentence
sentence = []
if sentence:
yield sentence
|
[
"def make_token_seq(seq):\n ret = []\n for name in seq: ret.append(make_token(name))\n return ret",
"def _build_sentence_list(self, text, tokens):\n # Most of the work here is making sure that we put the right\n # pieces of whitespace back in all the right places.\n\n # Our position in the source text, used to keep track of which\n # whitespace to add:\n pos = 0\n\n # A regular expression that finds pieces of whitespace:\n WS_REGEXP = re.compile(r'\\s*')\n\n sentence = ''\n for aug_tok in tokens:\n tok = aug_tok.tok\n\n # Find the whitespace before this token, and update pos.\n ws = WS_REGEXP.match(text, pos).group()\n pos += len(ws)\n\n # Some of the rules used by the punkt word tokenizer\n # strip whitespace out of the text, resulting in tokens\n # that contain whitespace in the source text. If our\n # token doesn't match, see if adding whitespace helps.\n # If so, then use the version with whitespace.\n if text[pos:pos+len(tok)] != tok:\n pat = '\\s*'.join(re.escape(c) for c in tok)\n m = re.compile(pat).match(text,pos)\n if m: tok = m.group()\n\n # Move our position pointer to the end of the token.\n assert text[pos:pos+len(tok)] == tok\n pos += len(tok)\n\n # Add this token. If it's not at the beginning of the\n # sentence, then include any whitespace that separated it\n # from the previous token.\n if sentence:\n sentence += ws\n sentence += tok\n\n # If we're at a sentence break, then start a new sentence.\n if aug_tok.sentbreak:\n yield sentence\n sentence = ''\n\n # If the last sentence is emtpy, discard it.\n if sentence:\n yield sentence",
"def tokenize_batch(self, seqs: List[str]) -> List[List[str]]:\n return [self.tokenize(s) for s in seqs]",
"def group_sentences(tokens, max_len=200):\n sentence = []\n for token in tokens:\n if token is None:\n if sentence and len(sentence) <= max_len:\n yield sentence\n sentence = []\n else:\n sentence.append(token)\n if sentence and len(sentence) <= max_len:\n yield sentence",
"def _segment_tokens(tokens: List[str]) -> List[List[str]]:\n output = []\n for word in tokens:\n word_subwords = []\n new_word = [out for segment in bpe_codes._isolate_glossaries(word)\n for out in apply_bpe.encode(segment,\n bpe_codes.bpe_codes,\n bpe_codes.bpe_codes_reverse,\n bpe_codes.vocab,\n bpe_codes.separator,\n bpe_codes.version,\n bpe_codes.cache,\n bpe_codes.glossaries)]\n\n for item in new_word[:-1]:\n word_subwords.append(item + bpe_codes.separator)\n word_subwords.append(new_word[-1])\n\n output.append(word_subwords)\n\n return output",
"def get_tokens(annotated_sentence):\n\n return [seq for seq in annotated_sentence.split('\\n') if seq]",
"def tokenize(self, sequence):\n return [x for x in sequence]",
"def sentence_tokenize(self, text_list):\n return [sent_tokenize(text, language=self.lang) for text in text_list]",
"def sentence_tokenize(input_text):\n sent_lst = []\n sent_pipe = PARSER.create_pipe(\"sentencizer\")\n PARSER.add_pipe(sent_pipe)\n doc = PARSER(input_text)\n for sent in doc.sents:\n sent_lst.append(sent.text)\n return sent_lst",
"def tag_tokenized_sentences(self, sentences):\n return [self.tag_tokenized(sentence) for sentence in sentences]",
"def tokenize(text):\n cde_p = Paragraph(text)\n tokens = cde_p.tokens\n toks = []\n for sentence in tokens:\n toks.append([])\n for tok in sentence:\n toks[-1].append(tok.text)\n return toks",
"def tokenize(sentence):\n doc = nlp(sentence)\n token, tag, ner, lemma = [], [], [], []\n for word in doc:\n token.append(word.text)\n tag.append(word.tag_)\n ner.append(word.ent_type_)\n lemma.append(word.lemma_)\n return token, tag, ner, lemma",
"def _token_seq_generator(self):\n\t\tfor document in self.documents:\n\t\t\tyield [\n\t\t\t\ttok.lower() for sentence in document.sentences for tok in\n\t\t\t\tself._gen_grams(self._filter(sentence.__dict__[self.tokens]))\n\t\t\t]",
"def ids2tokens(vocab, tokids):\n return [Doc(vocab, words=[vocab[t].orth_ for t in ids]) for ids in tokids]",
"def sentence_combination(list_1, list_2):\n return [(term_1 + ' ' + term_2) for term_1 in list_1 for term_2 in list_2]",
"def prep_text(mission):\n sentences = nltk.sent_tokenize(mission)\n sentences = [nltk.word_tokenize(sent) for sent in sentences]\n return sentences",
"def trigram_creation(tokens):\n global trigrams_list\n trigrams_list = []\n for n in range(0, len(tokens) - 2):\n trigrams_list.append([str(tokens[n]) + ' ' + str(tokens[n + 1]), tokens[n + 2]])",
"def tokenize(self, sentence):\n ...",
"def tokens(text, tok_size=3):\n return [text[i : i + tok_size] for i in range(len(text) - tok_size + 1)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a set of tokens augmented with markers for linestart and paragraphstart, returns an iterator through those tokens with full annotation including predicted sentence breaks.
|
def _annotate_tokens(self, tokens):
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = self._annotate_first_pass(tokens)
# Make a second pass through the document, using token context
# information to change our preliminary decisions about where
# sentence breaks, abbreviations, and ellipsis occurs.
tokens = self._annotate_second_pass(tokens)
return tokens
|
[
"def _annotate_first_pass(self, tokens):\n for aug_tok in tokens:\n self._first_pass_annotation(aug_tok)\n yield aug_tok",
"def generate_tokenized_sentences(paragraph: str) -> Iterator[str]:\n word_tokenizer = RegexpTokenizer(r'[-\\'\\w]+')\n\n for sentence in sent_tokenize(paragraph):\n tokenized_sentence = word_tokenizer.tokenize(sentence)\n if tokenized_sentence:\n tokenized_sentence.append('[END]')\n yield tokenized_sentence",
"def sentences_from_tokens(self, tokens):\n tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens))\n sentence = []\n for aug_tok in tokens:\n sentence.append(aug_tok.tok)\n if aug_tok.sentbreak:\n yield sentence\n sentence = []\n if sentence:\n yield sentence",
"def get_tokens(annotated_sentence):\n\n return [seq for seq in annotated_sentence.split('\\n') if seq]",
"def annotate(self, registry: Registry = None) -> Words:\n if not registry:\n registry = self.registry\n\n for entry in registry:\n # Get data points for phrase and its start and end\n edtext_start = entry[\"data\"][0]\n edtext_end = entry[\"data\"][1] + 1\n edtext_lvl = entry[\"lvl\"] + 1 # Reledmac 1-indexes the levels.\n edtext = self.words[edtext_start:edtext_end]\n\n # Identify search words and ellipsis\n search_ws, ellipsis = self._define_search_words(edtext)\n\n if ellipsis:\n # If we have a lemma note with ellipsis, we need to establish\n # context for both ellipsis elements (which may be nested\n # inside the edtext).\n ell_sidx = edtext.index(search_ws[0], default=0) + edtext_start\n ell_eidx = edtext.rindex(search_ws[1], default=0) + edtext_start\n\n el1_ctxt = self._get_contexts(self.words, ell_sidx)\n el2_ctxt = self._get_contexts(self.words, ell_eidx)\n contexts = el1_ctxt + el2_ctxt\n else:\n # Establish the context\n ctxt_before = self._get_context_before(self.words, edtext_start)\n ctxt_after = self._get_context_after(self.words, edtext_end)\n contexts = [w.get_text() for w in ctxt_before] + [\n w.get_text() for w in ctxt_after\n ]\n\n # Is there a match in either context?\n if search_ws and self._in_context(contexts, search_ws, ellipsis):\n\n # Annotate the edtext\n # -------------------\n if ellipsis:\n sidx = edtext.index(search_ws[0], default=0)\n eidx = edtext.rindex(search_ws[1], default=0)\n if self._in_context(el1_ctxt, search_ws[0:1], ellipsis):\n self._add_sameword(edtext[sidx : sidx + 1], edtext_lvl)\n if self._in_context(el2_ctxt, search_ws[-1:], ellipsis):\n self._add_sameword(edtext[eidx : eidx + 1], edtext_lvl)\n else:\n try:\n with temp_settings({\"sensitive_context_match\": False}):\n sidx, eidx = self._find_index(edtext, search_ws)\n except TypeError:\n raise ValueError(\n \"Looks like edtext and lemma content \"\n \"don't match in \"\n \"'{}'\".format(edtext.write())\n )\n\n self._process_annotation(edtext, sidx, eidx, edtext_lvl)\n\n # Annotate the lemma if relevant\n # ------------------\n if r\"\\lemma\" in edtext[-1].ann_apps[-1].cont:\n # get the relevant app Element\n app_note = edtext[-1].ann_apps[-1]\n # split up the apparatus note into before, lem, after\n s, e = self._find_lemma_pos(app_note)\n if ellipsis:\n # Tokenize the lemma words and ellipsis\n # Annotate the lemma word where the context matches\n # We want to annotate words even though they may not\n # be first or last index in tokenized text. So we get\n # the indexes of those (list comp `idxs`) and then\n # use those to index into the tokenized list in\n # replacing.\n lemma = self._find_ellipsis_words(app_note.cont[s:e])\n idxs = [i for i, w in enumerate(lemma) if w.content]\n if self._in_context(el1_ctxt, search_ws[0:1], ellipsis):\n lemma[idxs[0]] = self._add_sameword(\n lemma[idxs[0] : idxs[0] + 1], level=0\n )[0]\n if self._in_context(el2_ctxt, search_ws[-1:], ellipsis):\n lemma[idxs[-1]] = self._add_sameword(\n lemma[idxs[-1] : idxs[-1] + 1], level=0\n )[0]\n\n else:\n lemma = Tokenizer(app_note.cont[s:e]).wordlist\n lemma = self._process_annotation(lemma, 0, len(lemma), 0)\n\n # patch app note up again with new lemma content\n bef = app_note.cont[:s]\n after = app_note.cont[e:]\n new = bef + lemma.write() + after\n # update the app note Element with the new content\n edtext[-1].update_element(app_note, new)\n\n # Then annotate the contexts\n # ------------------------------\n if ellipsis:\n for pos, word in zip([ell_sidx, ell_eidx], search_ws):\n ctxt = self._get_context_before(\n self.words, pos\n ) + self._get_context_after(self.words, pos + 1)\n if self._in_context(ctxt, [word], ellipsis):\n self._annotate_context(ctxt, [word])\n else:\n for ctxt in [ctxt_before, ctxt_after]:\n self._annotate_context(ctxt, search_ws)\n\n return self.words",
"def annotations(self):\n for line in self.fp:\n self.lineno += 1\n if not line or line[0] == '!':\n # This is a comment line\n continue\n try:\n # append the organism name to the line, the file.\n # Some wiggleling is necessary, because the last\n # part of the line is actually a newline and three tab\n line = line[0:-2] + self.organism_name\n yield Annotation(line)\n except TypeError as ex:\n raise SyntaxError(\"cannot parse annotation\", self.lineno)",
"def _parse_and_tokenize(\n self, sequences, candidate_labels, hypothesis_template, padding=True, add_special_tokens=True, **kwargs\n ):\n sequence_pairs = self._args_parser(sequences, candidate_labels, hypothesis_template)\n inputs = self.tokenizer(\n sequence_pairs,\n add_special_tokens=add_special_tokens,\n return_tensors=self.framework,\n padding=padding,\n truncation=\"only_first\",\n )\n\n return inputs",
"def tokenize(self):",
"def text_preceding_points(view, points):\n lines = [view.line(point) for point in points]\n lines_to_point = [sublime.Region(line.begin(), point) for line, point in zip(lines, points)]\n return [view.substr(region) for region in lines_to_point]",
"def _token_seq_generator(self):\n\t\tfor document in self.documents:\n\t\t\tyield [\n\t\t\t\ttok.lower() for sentence in document.sentences for tok in\n\t\t\t\tself._gen_grams(self._filter(sentence.__dict__[self.tokens]))\n\t\t\t]",
"def _segment_tokens(tokens: List[str]) -> List[List[str]]:\n output = []\n for word in tokens:\n word_subwords = []\n new_word = [out for segment in bpe_codes._isolate_glossaries(word)\n for out in apply_bpe.encode(segment,\n bpe_codes.bpe_codes,\n bpe_codes.bpe_codes_reverse,\n bpe_codes.vocab,\n bpe_codes.separator,\n bpe_codes.version,\n bpe_codes.cache,\n bpe_codes.glossaries)]\n\n for item in new_word[:-1]:\n word_subwords.append(item + bpe_codes.separator)\n word_subwords.append(new_word[-1])\n\n output.append(word_subwords)\n\n return output",
"def _parse_entities(self, tagged_text):\n return (m.groups() for m in INLINEXML_EPATTERN.finditer(tagged_text))",
"def citation_marks(self) -> Iterator[Dict[str, Any]]:\n for mark in self.mark_regex.finditer(self.marked_text):\n mark: re.Match\n start, end = mark.span()\n yield {\n 'marked_text': self.marked_text, # memory-safe, no copy is made.\n 'span': (start, end)\n }",
"def extract_spans(in_file, out_file, kb = None):\n if kb is not None:\n kb = load_embedding(kb)\n\n counter = 0\n flag = False\n spans = []\n with open(in_file) as handle:\n for line in handle:\n line = line.strip()\n if line.startswith(\"DOCSTART\") or line.startswith(\"DOCEND\") or line == \"*NL*\" or len(line) == 0:\n continue\n elif line.startswith(\"MMSTART\"):\n assert not flag\n entity = line.strip().split()[-1]\n\n if kb is not None:\n entity = normalize_entity(kb, entity, kb.prefix) \n \n spans.append([entity, [], counter, counter-1])\n flag = True\n elif line.startswith(\"MMEND\"):\n flag = False\n elif flag:\n spans[-1][-1] += 1\n spans[-1][1].append(line)\n counter += 1\n else:\n counter += 1\n\n spans.sort(key = lambda x:(x[-2], x[-1]))\n\n with open(out_file, \"w\") as whandle:\n for entity, surface, start, end in spans:\n surface = \" \".join(surface)\n whandle.write(f\"{start}\\t{end}\\t{entity}\\t{surface}\\n\")",
"def __segment_bounding_box_generator(pts, other_pt_lists = None):\r\n segs=list(zip(pts[:-1],pts[1:]))\r\n for i, obj in enumerate(list(zip(range(len(segs)),segs))):\r\n # Get left, bottom, right and top coordinate values\r\n seg=obj[1]\r\n p1=seg[0]\r\n p2=seg[1]\r\n left, bottom, right, top=__bounding_box(p1,p2)\r\n # Yield tuple as desired by rtree (sequence: LBRT)\r\n yield (i,(left, bottom, right, top),None)\r\n # do the same for the other lines\r\n if other_pt_lists != None:\r\n cur_id = len(pts)*2+1\r\n for pt_list in other_pt_lists:\r\n segs=list(zip(pt_list[:-1],pt_list[1:]))\r\n for i, obj in enumerate(list(zip(range(len(segs)),segs))):\r\n # Get left, bottom, right and top coordinate values\r\n seg=obj[1]\r\n p1=seg[0]\r\n p2=seg[1]\r\n left, bottom, right, top=__bounding_box(p1,p2)\r\n # Yield tuple as desired by rtree (sequence: LBRT)\r\n yield (cur_id,(left, bottom, right, top),None)\r\n cur_id +=1",
"def _crawl_indent_points(elements: ReflowSequenceType) -> Iterator[_IndentPoint]:\n last_line_break_idx = None\n indent_balance = 0\n untaken_indents: Tuple[int, ...] = ()\n for idx, elem in enumerate(elements):\n if isinstance(elem, ReflowPoint):\n indent_impulse, indent_trough = elem.get_indent_impulse()\n\n # Is it a line break? AND not a templated one.\n if has_untemplated_newline(elem) and idx != last_line_break_idx:\n yield _IndentPoint(\n idx,\n indent_impulse,\n indent_trough,\n indent_balance,\n last_line_break_idx,\n True,\n untaken_indents,\n )\n last_line_break_idx = idx\n has_newline = True\n # Is it otherwise meaningful as an indent point?\n # NOTE: a point at idx zero is meaningful because it's like an indent.\n # NOTE: Last edge case. If we haven't yielded yet, but the\n # next element is the end of the file. Yield.\n elif (\n indent_impulse\n or indent_trough\n or idx == 0\n or elements[idx + 1].segments[0].is_type(\"end_of_file\")\n ):\n yield _IndentPoint(\n idx,\n indent_impulse,\n indent_trough,\n indent_balance,\n last_line_break_idx,\n False,\n untaken_indents,\n )\n has_newline = False\n\n # Strip any untaken indents above the new balance.\n # NOTE: We strip back to the trough, not just the end point\n # if the trough was lower than the impulse.\n untaken_indents = tuple(\n x\n for x in untaken_indents\n if x\n <= (\n indent_balance + indent_impulse + indent_trough\n if indent_trough < indent_impulse\n else indent_balance + indent_impulse\n )\n )\n\n # After stripping, we may have to add them back in.\n if indent_impulse > indent_trough and not has_newline:\n for i in range(indent_trough, indent_impulse):\n untaken_indents += (indent_balance + i + 1,)\n\n # Update values\n indent_balance += indent_impulse",
"def get_sub_paras(para, tokenizer, max_seq_length, doc_stride, total):\n if not para[\"context\"]:\n return []\n max_tokens_for_doc = max_seq_length - 2 # -2 for [CLS] and [SEP]\n para_tokens, para_char_to_token = bert_utils_v2.preprocess_text(\n para[\"context\"], tokenizer)\n\n # Get mention token start and ends.\n mentions = []\n for im, ment in enumerate(para[\"mentions\"]):\n if ment[\"start\"] + len(ment[\"text\"]) - 1 >= len(para_char_to_token):\n tf.logging.warn(\"Mention not within paragraph: (%s, %s)\",\n json.dumps(ment), para[\"context\"])\n continue\n st_tok = para_char_to_token[ment[\"start\"]][0]\n en_tok = para_char_to_token[ment[\"start\"] + len(ment[\"text\"]) - 1][1]\n mentions.append({\n \"kb_id\": ment[\"kb_id\"],\n \"text\": ment[\"text\"],\n \"start_token\": st_tok,\n \"end_token\": en_tok,\n \"orig_index\": im,\n })\n if not mentions:\n return []\n\n # Get sub para spans.\n sub_paras = []\n start_offset = 0\n while start_offset < len(para_tokens):\n length = len(para_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n sub_paras.append((start_offset, length))\n if start_offset + length == len(para_tokens):\n break\n start_offset += min(length, doc_stride)\n if not sub_paras:\n return []\n\n # Assign each mention to a sub_para.\n sub_para_to_mentions = {i: [] for i in range(len(sub_paras))}\n for ment in mentions:\n best_score, best_index = None, None\n for ii, subp in enumerate(sub_paras):\n subp_end = subp[0] + subp[1] - 1\n if ment[\"start_token\"] < subp[0] or ment[\"end_token\"] > subp_end:\n continue\n score = min(ment[\"start_token\"] - subp[0], subp_end - ment[\"end_token\"])\n if best_score is None or score > best_score:\n best_score = score\n best_index = ii\n if best_index is None:\n best_index = 0\n ment[\"start_token\"] -= sub_paras[best_index][0]\n ment[\"end_token\"] -= sub_paras[best_index][0]\n if (ment[\"start_token\"] < sub_paras[best_index][1] and\n ment[\"end_token\"] < sub_paras[best_index][1]):\n sub_para_to_mentions[best_index].append(ment)\n\n # Create a list of sub_para objects.\n sub_para_objects = []\n for ii, subp in enumerate(sub_paras):\n sub_para_objects.append({\n \"id\": total[0],\n \"mentions\": sub_para_to_mentions[ii],\n \"tokens\": para_tokens[subp[0]:subp[0] + subp[1]],\n })\n total[0] += 1\n\n return sub_para_objects",
"def process_predictions(predictions, output_path):\n\n # Only use this bit if I haven't reconsituted the sentence piece tokens into full tokens\n entity_pmids = []\n entity_labels = []\n whole_tokens = []\n sub_token = False\n entity_label = \"\"\n entity_pmid = \"\"\n prev_label = \"\"\n token_main = \"\"\n token_cnt = 0\n # Very annoying loops to reconstitute bert tokens\n for pred in predictions:\n line = pred.split(\"\\t\")\n label = line[2].strip()\n pmid = line[0]\n token = line[1]\n if label == \"X\":\n sub_token = True\n token_sub = token\n token_main += token_sub\n else:\n # Some tokens will have no sub tokens, some will, so I have to keep track\n # of both cases.\n if sub_token == True or (sub_token == False and token_cnt > 0):\n whole_tokens.append(token_main)\n entity_pmids.append(entity_pmid)\n entity_labels.append(entity_label)\n entity_label = label\n entity_pmid = pmid\n token_main = token\n sub_token = False\n token_cnt += 1\n\n combined_labels = []\n combined_pmids = []\n combined_tokens = []\n i_token_state = False\n b_token_state = False\n o_label_state = False\n b_token = \"\"\n prev_label = \"\"\n token_label = \"\"\n entity_pmid = \"\"\n i_cnt = 0\n b_cnt = 0\n cnt = 0\n for pmid, token, label in zip(entity_pmids, whole_tokens, entity_labels):\n if label == \"O\":\n prev_label = \"O\"\n o_label_state = True\n continue\n elif label.startswith(\"B\"):\n # Account for entities that have B- and I- labels and those that have just B-\n # Check if the loop previously visited the I condition.\n if i_token_state == True or (b_token_state == True and i_token_state == False):\n if b_token != \"\":\n combined_labels.append(token_label)\n combined_pmids.append(entity_pmid)\n combined_tokens.append(b_token)\n i_token_state = False\n b_token_state = True\n o_label_state = False\n entity_pmid = pmid\n b_token = token\n token_label = label\n b_cnt += 1\n elif label.startswith(\"I\"):\n # Append an inner entity to the previous entity\n i_cnt += 1\n i_token_state = True\n b_token_state = False\n b_token += \" \" + token\n prev_label = label\n cnt += 1 \n\n print(\"Inner and Beginning entity count\")\n print(i_cnt, b_cnt)\n with open(output_path,'w') as writer:\n for pmid, token, label in zip(combined_pmids, combined_tokens, combined_labels):\n writer.write(\"{0}\\t{1}\\t{2}\\n\".format(pmid, token, label))",
"def split_sentences(self):\n all_sentences = [self.title] + split_abstract(self.abstract)\n\n full_text = \"{} {}\".format(self.title, self.abstract)\n\n sent_idx = 0 # starting index of current sentence\n annot_idx = 0 # index of annotation that is within current sentence\n\n res = []\n M = len(self.annotations)\n for i, sentence in enumerate(all_sentences):\n # The sentence splitter isn't perfect. It recognizes \"i.v.\" as a\n # sentence. Since there can be multiple instances of \"sentences\"\n # like \"i.v.\" (e.g., PMID 10840460), we need to make sure that\n # we are checking for the first instance starting at the current\n # position (since find always finds the first instance otherwise).\n assert full_text.find(sentence, sent_idx) == sent_idx, (\n \"PMID {0} {1} text mismatch!\".format(self.pmid, sentence))\n\n sent_stop = sent_idx + len(sentence)\n\n start_annot = annot_idx\n while annot_idx < M and self.annotations[annot_idx].stop <= sent_stop:\n annot_idx += 1\n\n # should be one past\n res.append(Sentence(self.pmid, i, sentence,\n sent_idx, sent_stop, self.annotations[start_annot : annot_idx]))\n\n sent_idx += len(sentence) + 1 # all sentences separated by one space\n\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given the original text and the list of augmented word tokens, construct and return a tokenized list of sentence strings.
|
def _build_sentence_list(self, text, tokens):
# Most of the work here is making sure that we put the right
# pieces of whitespace back in all the right places.
# Our position in the source text, used to keep track of which
# whitespace to add:
pos = 0
# A regular expression that finds pieces of whitespace:
WS_REGEXP = re.compile(r'\s*')
sentence = ''
for aug_tok in tokens:
tok = aug_tok.tok
# Find the whitespace before this token, and update pos.
ws = WS_REGEXP.match(text, pos).group()
pos += len(ws)
# Some of the rules used by the punkt word tokenizer
# strip whitespace out of the text, resulting in tokens
# that contain whitespace in the source text. If our
# token doesn't match, see if adding whitespace helps.
# If so, then use the version with whitespace.
if text[pos:pos+len(tok)] != tok:
pat = '\s*'.join(re.escape(c) for c in tok)
m = re.compile(pat).match(text,pos)
if m: tok = m.group()
# Move our position pointer to the end of the token.
assert text[pos:pos+len(tok)] == tok
pos += len(tok)
# Add this token. If it's not at the beginning of the
# sentence, then include any whitespace that separated it
# from the previous token.
if sentence:
sentence += ws
sentence += tok
# If we're at a sentence break, then start a new sentence.
if aug_tok.sentbreak:
yield sentence
sentence = ''
# If the last sentence is emtpy, discard it.
if sentence:
yield sentence
|
[
"def engTokenize(text):\n return [token.text for token in eng.tokenizer(text)]",
"def prep_text(mission):\n sentences = nltk.sent_tokenize(mission)\n sentences = [nltk.word_tokenize(sent) for sent in sentences]\n return sentences",
"def sentence_tokenize(self, text_list):\n return [sent_tokenize(text, language=self.lang) for text in text_list]",
"def word_tokenize(self, text_list):\n return [word_tokenize(text, language=self.lang) for text in text_list]",
"def sentences_from_text_legacy(self, text):\n tokens = self._annotate_tokens(self._tokenize_words(text))\n return self._build_sentence_list(text, tokens)",
"def sentence_tokenize(input_text):\n sent_lst = []\n sent_pipe = PARSER.create_pipe(\"sentencizer\")\n PARSER.add_pipe(sent_pipe)\n doc = PARSER(input_text)\n for sent in doc.sents:\n sent_lst.append(sent.text)\n return sent_lst",
"def tokenize_2(review_list):\n texts_list = []\n for doc in tqdm(review_list):\n\n # Parse the doc into tokens\n tokenizer = RegexpTokenizer(r'\\w+')\n raw = doc.lower()\n tokens = tokenizer.tokenize(raw)\n\n # Remove stop words\n en_stop = stopwords.words('english')\n stopped_tokens = [i for i in tokens if not i in en_stop]\n\n # Stem the words\n p_stemmer = PorterStemmer()\n texts = [p_stemmer.stem(i) for i in stopped_tokens]\n texts_list.append(texts)\n return texts_list",
"def tokenization(text):\r\n list_of_punctuations_and_more = ['(', ')', ',', ':', '!', ' ', '\\n', '.', '']\r\n tokens = []\r\n token = ''\r\n for idx, character in enumerate(text):\r\n if any(character in s for s in list_of_punctuations_and_more):\r\n if '\\'' in token:\r\n splitted_word = token.split('\\'')\r\n for contraction in get_contractions():\r\n if contraction[0] == splitted_word[1]:\r\n if contraction[0] == 't':\r\n is_on_list = True\r\n for additional_contraction in get_additional_contractions():\r\n if additional_contraction[0] == splitted_word[0]:\r\n tokens.append(additional_contraction[1])\r\n is_on_list = False\r\n if is_on_list:\r\n tokens.append(splitted_word[0][:-1])\r\n tokens.append(contraction[1])\r\n else:\r\n tokens.append(splitted_word[0])\r\n tokens.append(contraction[1])\r\n else:\r\n tokens.append(token)\r\n tokens.append(character)\r\n token = ''\r\n else:\r\n token = token + character\r\n\r\n unwanted_characters = {'', ' ', '\\n'}\r\n tokens = [ele for ele in tokens if ele not in unwanted_characters] # remove unwanted characters\r\n print('Tokens: ', tokens)\r\n return tokens",
"def _merge_sentences(text):\n\n return [word for sentence in text for word in sentence]",
"def tokenize(text):\n cde_p = Paragraph(text)\n tokens = cde_p.tokens\n toks = []\n for sentence in tokens:\n toks.append([])\n for tok in sentence:\n toks[-1].append(tok.text)\n return toks",
"def tokenize(text):\n\n # normalize case and remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n\n #splitting the sentence into words:\n tokens = word_tokenize(text)\n\n #secondly, lemmatize the words\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).strip()\n clean_tokens.append(clean_tok)\n return clean_tokens",
"def sentence_extractor(self):\n self.text_sentences = []\n for text in self.texts:\n sentences = nltk.sent_tokenize(text)\n tokens_sentences = []\n for sentence in sentences:\n tokens = nltk.word_tokenize(sentence)\n if self.text_cleaner is not None:\n tokens = self.text_cleaner(tokens)\n if self.stem_words:\n tokens = stem_words(tokens)\n \n tokens_sentences.append(tokens)\n self.text_sentences.append(tokens_sentences)",
"def tokenizedText(files, directory):\n tokens =[]\n for filename in files:\n if '.txt' in filename:\n lines = open(directory + '/'+ filename, 'r').read()\n sentences = re.compile(r'(?<=[.!?;])\\s*').split(lines)\n sentences_with_tag = '';\n for sentence in sentences:\n sentences_with_tag += ' START ' + sentence + ' END '\n try:\n tokens += word_tokenize(sentences_with_tag.decode('utf8')) \n except:\n pass\n return tokens",
"def reconstruct_tokenized(tokenized_text: List[List[str]]) -> Generator[AlignedToken, None, None]:\n SPACES_BEFORE: str = \"([“«\"\n NO_SPACE_BEFORE: str = \".,:!?)]”»\"\n\n orig_pos: int = 0\n adj_pos: int = 0\n\n for s_idx, s in enumerate(tokenized_text):\n if s_idx > 0:\n yield AlignedToken(\"\\n\", (orig_pos, orig_pos + 1), (adj_pos, adj_pos + 1))\n orig_pos += 1\n adj_pos += 1\n\n prev_token: str = \"\"\n for w_idx, w in enumerate(s):\n w_stripped = w.strip()\n\n if not w_stripped:\n # If original text contained a space(-es), let's adjust original position for it\n # + one space after\n orig_pos += len(w)\n if w_idx > 0:\n orig_pos += 1\n\n continue\n\n if w_idx > 0:\n if w_stripped not in NO_SPACE_BEFORE and not prev_token in SPACES_BEFORE:\n yield AlignedToken(\" \", (orig_pos, orig_pos + 1), (adj_pos, adj_pos + 1))\n orig_pos += 1\n adj_pos += 1\n else:\n # If we are omitting the space (for example, before comma), we\n # adjusting original position as if it's there\n orig_pos += 1\n\n yield AlignedToken(w_stripped, (orig_pos, orig_pos + len(w)), (adj_pos, adj_pos + len(w_stripped)))\n\n orig_pos += len(w)\n adj_pos += len(w_stripped)\n\n prev_token = w_stripped",
"def sentence_extractor(self):\n self.text_sentences = []\n for text in self.texts:\n sentences = nltk.sent_tokenize(text)\n tokens_sentences = []\n for sentence in sentences:\n # tokens = nltk.word_tokenize(sentence)\n tokens = GetNounPhrases(sentence)\n if self.text_cleaner is not None:\n tokens = self.text_cleaner(tokens)\n if self.stem_words:\n tokens = stem_words(tokens)\n \n tokens_sentences.append(tokens)\n self.text_sentences.append(tokens_sentences)",
"def split_into_sentences(text: str) -> typing.List[str]:\n\n return nltk.sent_tokenize(text)",
"def tokenize(text):\n # for this function we will be using the same cleaning steps as used in clean_tokenzie exercise during the NLP pipeline lesson\n #as learnt in building NLP pipeline process, we will clean data first by first replacing anything that is not a digit or an alphabet with space\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n # Now that the text has been cleaned i.e. punctuations have been removed we can tokenize\n tokens = word_tokenize(text)\n #after using tokenization, we can initiate lemmatizer\n lemmatizer = WordNetLemmatizer()\n #Now we can initiate Stemming\n stemmer = PorterStemmer()\n clean_tokens = []\n for tok in tokens:\n #we will now lemmatize token using verb, and make it lower case and remove empty space\n clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()\n #now we can stem token by using stemmer\n clean_tok = stemmer.stem(clean_tok)\n clean_tokens.append(clean_tok)\n\n return clean_tokens",
"def process_text(text, stem=True):\r\n #text = text.translate(None,string.punctuation)\r\n tokens = word_tokenize(text)\r\n \r\n if stem:\r\n stemmer = PorterStemmer()\r\n tokens = [stemmer.stem(t) for t in tokens]\r\n \r\n return tokens",
"def tokenize(text):\n # regex for URLs to be replaced with a placeholder\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n detected_urls = re.findall(url_regex,text)\n for url in detected_urls:\n text = text.replace(url,\"urlplaceholder\")\n # the words in the text input to then be split, tokenised and lemmatized, removing stop words. \n words = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n tokens = [lemmatizer.lemmatize(word) for word in words if word not in stopwords.words(\"english\")]\n return tokens"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Return the offsets of the tokens in s, as a sequence of ``(start, end)`` tuples, by splitting the string at each successive match of regexp.
|
def regexp_span_tokenize(s, regexp):
left = 0
for m in finditer(regexp, s):
right, nxt = m.span()
if right != 0:
yield left, right
left = nxt
yield left, len(s)
|
[
"def span_tokenize(self, text):\n return [(sl.start, sl.stop) for sl in self._slices_from_text(text)]",
"def preprocess_with_offsets(text: str) -> List[Tuple[int, str]]:\n\n def finditer():\n offset = 0\n\n for mo in __PARAGRAPH_SEP.finditer(text):\n yield (offset, text[offset:mo.start()])\n offset = mo.end()\n\n yield (offset, text[offset:])\n\n return list(finditer())",
"def split_pos(s):\n w = []\n pos = []\n # for i, t in enumerate(s):\n for i in range(len(s)):\n t = s[i]\n ind = t.rindex('/')\n w.append(t[:ind])\n pos.append(t[ind + 1:])\n return w, pos",
"def find_all(s, pattern):\n shift_on_match = 1\n i = 0\n indexes = []\n while 1:\n i = string.find(s, pattern, i)\n if i >= 0:\n indexes.append(i)\n i = i + shift_on_match\n else:\n break\n return indexes",
"def find(self, str):\n return [m.start(0) + self.preLength for m in\n self.re.finditer(str)]",
"def get_stripped_offsets(text: str, tag: str) -> Offset:\n stripped_text = text.strip()\n start = text.find(stripped_text)\n end = start + len(stripped_text)\n return Offset(start, end, tag)",
"def mapSplits(splits, string, stringOffset):\n begin = 0\n tuples = []\n for split in splits:\n offset = string.find(split, begin)\n assert offset != -1\n tuples.append( (split, PorterStemmer.stem(split), (offset,len(split))) )\n begin = offset + len(split)\n return tuples",
"def get_chunks(text, start_tok, end_tok):\n ltext = text.lower()\n idx = -1\n result = []\n while True:\n start = ltext.find(start_tok.lower(), idx+1)\n if start == -1:\n break\n end = ltext.find(end_tok.lower(), start+len(start_tok))\n if start != -1 and end != -1:\n chunk = text[start:end+len(end_tok)]\n result.append(chunk)\n idx = max([idx, start, end]) # sanity\n\n return result",
"def extract_character_spans(string):\n\n if isinstance(string, basestring):\n string = numpy.fromstring(string, \"S1\")\n elif isinstance(string, numpy.ndarray) and string.dtype == numpy.dtype(\"S1\"):\n pass\n else:\n raise ValueError(\"Unable to process input string: %r\" % string)\n\n char_edges = list(numpy.flatnonzero(string[1:] != string[:-1]) + 1)\n\n return numpy.array([(string[start], start, end) for start, end in zip([0] + char_edges, char_edges + [len(string)])], dtype=[(\"type\", \"S1\"), (\"start\", int), (\"end\", int)])",
"def find_all_substring_indexes(string: str, substring: str) -> list:\n start = 0\n while True:\n start = string.find(substring, start)\n if start == -1:\n return\n yield start\n start += len(substring)",
"def split_newlines(s):\n return re.findall(NEWLINE_PTR, s)",
"def find_all_regex(self, find_s: str, s: str) -> list[int]:\n flags = re.MULTILINE\n if self.ignore_case:\n flags |= re.IGNORECASE\n return [m.start() for m in re.finditer(find_s, s, flags)]",
"def span_tokenize(self, text):\n return [sl for sl in self._slices_from_text(text)]",
"def _substring_indexes(substring, string):\n last_found = -1 # Begin at -1 so the next position to search from is 0\n while True:\n # Find next index of substring, by starting after its last known position\n last_found = string.find(substring, last_found + 1)\n if last_found == -1: \n break # All occurrences have been found\n yield last_found",
"def _find_indexes(self, word: str, text: str):\n temp = re.match(r\"\\[([0-9\\-]{0,}):([0-9\\-]{0,})\\]\", word)\n if temp:\n start = int(temp.group(1)) if temp.group(1) != \"\" else 0\n end = int(temp.group(2)) if temp.group(2) != \"\" else len(text)\n start = len(text) + start if start < 0 else start\n end = len(text) + end if end < 0 else end\n return [(start, end)]\n indexes = []\n index = text.find(word)\n while index != -1:\n indexes.append((index, index + len(word)))\n index = text.find(word, index + len(word))\n return indexes",
"def findAll(s,t):\n indices = []\n i = s.find(t)\n while i > -1:\n indices.append(i)\n i = s.find(t,i+1)\n return indices",
"def get_tokens_before_cursor(self):\n count = 0\n result = []\n for c in self.get_tokens():\n if count + len(c[1]) < self.cursor_position:\n result.append(c)\n count += len(c[1])\n elif count < self.cursor_position:\n result.append((c[0], c[1][:self.cursor_position - count]))\n break\n else:\n break\n return result",
"def _slice_tokens(tokens: Tokens, delimiters: Iterable[Text]) -> SplitResult:\n pos = len(tokens)\n for pos, t in enumerate(tokens, 1):\n if t.surface in delimiters:\n break\n return tokens[:pos], tokens[pos:]",
"def get_first_and_last_occurences(s):\r\n first_occ = dict()\r\n last_occ = dict()\r\n for i, run in enumerate(s):\r\n if run.char not in first_occ:\r\n first_occ[run.char] = i\r\n last_occ[run.char] = i\r\n last_occ[run.char] = i\r\n return first_occ, last_occ"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
User adds a new stock. Displays a message requesting the user to enter a stock symbol.
|
def addNewStock(bot, update):
if update.message.chat.username is None:
# User has no username
update.message.reply_text(
"It seems you do not have a Telegram Username.\nI'll need your username in order to function :( /start me up when you have one! (You can set your username in Settings.)")
else:
# User has username
update.message.reply_text(
"Enter the ticker symbol of the stock you'd like to add:")
return ADDTICKERSYMBOL
|
[
"def create_stock():\n return {\n \"code\": \"success\",\n \"message\": \"stock created\"\n }",
"def display_stock():",
"def addStock(self, stock_id, quantity , unit_price, commission_price, date, trans_type):\n self.conn.execute(\n \"\"\"INSERT INTO portfolio (stock_id, quantity , unit_price, commission_price, date, trans_type) values (?,?,?,?,?,?) \"\"\",\n (stock_id, int(quantity) , unit_price, commission_price, date, trans_type))\n\n if trans_type.lower() == \"sell\":\n self.addUninvested(float((int(quantity) * float(unit_price)) + float(commission_price)))\n elif trans_type.lower() == \"buy\":\n self.subtractUninvested(float((int(quantity) * float(unit_price)) + float(commission_price)))\n self.conn.commit()",
"def test_add_stock_details_no_stock(self):\n self.assertFalse(self.stock.add_stock_details('NOSTOCKNAME'))",
"async def info(self, ctx, stock: str):\n info = self.db.get_stock(stock)\n if len(info) == 0:\n await ctx.channel.send(embed=self.embed(\"No stock called '%s' found in database.\" % stock))\n return\n rating, maxrating = await self.cf.get_rating(stock), await self.cf.get_best_rating(stock)\n market = 0\n for owner, quantity in info:\n if owner == -1:\n market = quantity\n e = Embed(title=\"Stock info for %s\" % stock, color=Color.dark_blue())\n e.add_field(name=\"Current Value\", value=\"**$%.2f**\" % self.stock_value(rating), inline=False)\n e.add_field(name=\"Max. Value\", value=\"$%.2f\" % self.stock_value(maxrating), inline=False)\n e.add_field(name=\"Available Stocks in market\", value=\"%d\" % market, inline=False)\n e.set_footer(text=\"Requested by \"+str(ctx.author), icon_url=ctx.author.avatar_url)\n await ctx.channel.send(embed=e)",
"def quote():\n\tif request.method == \"POST\":\n\t\tquery = request.form.get(\"stock\")\n\t\tstock = lookup(query)\n\t\treturn render_template(\"quote.html\", name=stock['name'] ,price=stock['price'])\n\telse:\n\t\treturn render_template(\"quote.html\")",
"def buy_stock(self, stock_symbol, quantity, price):\n picked_stock = self.get_stock(stock_symbol)\n\n timestamp = time.time()\n new_trade = Trade(\n timestamp,\n quantity,\n Trade.BUY_INDICATOR,\n price\n )\n\n picked_stock.record_trade(new_trade)",
"def add():\n product=db.product[request.args[0]]\n orderline = db.orderline.insert(salesorder=request.vars.salesorder,\n product = product.id,\n price = product.price)\n calc_orderline(db, db.orderline[orderline])\n calc_salesorder(db, db.salesorder[request.vars.salesorder])\n response.js = \"closedialog();refresh('orderline')\"\n return dict()",
"def show_not_enough_stock_message(self, quantity):\n msg = widgets.QMessageBox()\n msg.setIcon(widgets.QMessageBox.Warning)\n msg.setText(\"Not enough stock available\")\n msg.setInformativeText(\"You have requested {0} of item {1} and only {2} are available in stock so the order cannot be fulfilled.\".format(quantity, self.item.name, self.item.loc['stock']))\n msg.setWindowTitle(\"Stock issue\")\n msg.setStandardButtons(widgets.QMessageBox.Ok)\n msg.exec_()",
"def create_new_stock(ticker, name):\n if not validate_ticker(ticker):\n raise Exception(\"Invalid Ticker\")\n stock = Stock(name=name, ticker=ticker)\n stock.save()\n return stock",
"def add_fake_stock(self, api):\n range_start = int(PyMkmHelper.prompt_string(\"Range pid start\"))\n range_end = int(PyMkmHelper.prompt_string(\"Range pid end\"))\n if PyMkmHelper.prompt_bool(\"Sure?\"):\n print(\"Adding fake stock...\")\n product_list = []\n for product_no in range(range_start, range_end):\n product_list.append(\n {\n \"idProduct\": product_no,\n \"idLanguage\": 1,\n \"count\": 1,\n \"price\": 1,\n \"comments\": \"TEST ARTICLE DO NOT BUY\",\n \"condition\": \"PO\",\n \"isFoil\": \"false\",\n }\n )\n\n api.add_stock(product_list)",
"def test_add_stock_details(self):\n s = self.stock.add_stock_details('AAPL')\n self.assertEqual(s.country, ('US',))\n self.assertEqual(s.currency, ('USD',))\n self.assertEqual(s.name, ('Apple Inc',))\n self.assertEqual(s.phone, ('14089961010',))",
"def buyStock(self, amount, asset): # buyStock function\n\tamount_f = float(amount) \n\tif amount_f * asset.price > self.cash: # if there is not enough cash to buy\n\t self.tr += \"Failed to buy the stock\" # record of failed transaction\n\t return \"Not enough cash in portfolio\"\n\tif amount != int(amount): # if the amount input is not proper\n\t self.tr += \"Failed to buy the stock\" # record of failed transaction\n\t return \"You can only buy stocks as whole\"\n\telse: \n\t self.stock += amount_f # add to stocks when you can buy\n\t self.cash -= amount_f * asset.price # subtract the corr. amount from cash\n \t self.tr += \"Bought {0} {1}\\n\".format(amount, asset) # record of transaction",
"def add_to_stock(self, ingredient: str, quantity: int) -> None:\n existing_stock = StockManager.get_instance().stock.get(ingredient, 0)\n StockManager.get_instance().stock[ingredient] = existing_stock + quantity",
"def check_stock(self):\n quantity = int(self.quantityEdit.text())\n \n if len(self.item) > 0 and not self.stock_item:#item pd.Series() is set and not adding stock\n if quantity > self.item.loc['stock']:\n self.show_not_enough_stock_message(quantity)",
"async def buy(self, ctx, stock: str, amount: int):\n if not self.trading:\n await ctx.channel.send(embed=self.embed(\"Trading has been disabled currently!\"))\n return\n if ctx.author.id not in self.users:\n await ctx.channel.send(embed=self.embed(\"You need to set your handle using the `+register` command first.\"))\n return\n if amount <= 0:\n await ctx.channel.send(embed=self.embed(\"You must buy atleast 1 stock.\"))\n return\n info = self.db.get_stock(stock)\n rating = await self.cf.get_rating(stock)\n money = self.db.get_balance(ctx.author.id)\n if len(info) == 0:\n await ctx.channel.send(embed=self.embed(\"No stock called '%s' found in database.\" % stock, 0xFF0000))\n return\n market = 0\n owned = 0\n owns = False\n for owner, quantity in info:\n if owner == ctx.author.id:\n owns = True\n owned = quantity\n if owner == -1:\n market = quantity\n if amount > market:\n await ctx.channel.send(embed=self.embed(\"You cannot buy more stocks than avaiable in the market!\"))\n return\n cost = amount * self.stock_value(rating)\n if cost > money:\n await ctx.channel.send(embed=self.embed(\"You do not have enough money to purchase %d stocks!\" % amount))\n return\n self.db.set_balance(ctx.author.id, money - cost)\n if owns:\n self.db.update_holding(ctx.author.id, stock, owned + amount)\n else:\n self.db.create_holding(ctx.author.id, stock, owned + amount)\n self.db.update_market(stock, market - amount)\n\n await ctx.channel.send(\n embed=self.embed(ctx.author.mention + \", Successfully purchased %d stocks of **%s** for **$%.2f!**\"\n \"\\n\\n Your new balance is **$%.2f**.\"\n % (amount, stock, cost, money-cost), 0x00FF00))",
"def symbolAdded(self, symbol: ghidra.program.model.symbol.Symbol, type: int, addr: ghidra.program.model.address.Address, oldValue: object, newValue: object) -> None:\n ...",
"def stock_interface(request, ticker):\n\n ticker = ticker.upper()\n stock = get_object_or_404(\n Stock, Q(stock_ticker=ticker) | Q(stock_name__iexact=ticker))\n ticker = stock.stock_ticker\n feed = feedparser.parse(\n \"http://articlefeeds.nasdaq.com/nasdaq/symbols?symbol=\" + ticker)\n sanitized_feed = []\n for entry in feed.entries:\n description = re.sub(\"<.*?>\", \"\", entry.description)\n sanitized_feed.append({\n 'title': entry.title,\n 'link': entry.link,\n 'description': description + \"...\"\n })\n stock = Stock.objects.get(stock_ticker=ticker)\n risk_history = []\n price_history = []\n for risk in stock.stock_risk.all().order_by('risk_date'):\n risk_history.append(_calculate_risk(risk))\n for price in stock.stock_price.all().order_by('date'):\n price_history.append(_calculate_price(price))\n\n context = {\n 'stock_name': stock.stock_name,\n 'stock_ticker': ticker,\n 'stock_sector': stock.stock_sector,\n 'stock_feeds': sanitized_feed,\n 'risk_history': json.dumps(risk_history),\n 'price_history': json.dumps(price_history),\n 'current_price': stock.stock_price.all().order_by('date').last().value,\n 'sentiment_value': sentiment.get_stock_sentiment(ticker)\n }\n return render_to_response('modal/stock_interface.html', context)",
"def insert(self):\n self.cursor.insertText(self.symbol)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Permanently removes user from application and ends conversation.
|
def exit(bot, update, user_data):
update.message.reply_text(
"Thank you for using me! All your data has been cleared and you will no longer receive notifications.")
bots.clearChatFromApp(update.message.chat.id)
user_data.clear()
return ConversationHandler.END
|
[
"def end() -> None:\n session.pop(KEY_USER_ID, None)\n session.pop(KEY_USER_AUTH_TOKEN, None)\n session.permanent = False",
"def remove_user():\r\n user_input = input(\"| Enter the name of the User |\")\r\n aduser.ADUser.from_cn(user_input).delete()\r\n return \"| User removed |\"",
"def delete_user(self):\n \n User.user_list.remove(self)",
"def delete_oauth_for_user_application(sender, instance, **kwargs):\n if instance:\n Application.objects.filter(name=instance.uuid).delete()",
"def delete_user():\n os.remove(_user_path())",
"def decline_invitation(user: models.User, game: models.Game):\n if game.invited != user:\n raise RequestError(2111)\n _end_socket_session(game.host, game)\n game.delete_instance()",
"def handler_delete_user(sender, instance, **kwargs):\n rocket_admin = RocketChat(\n settings.ROCKETCHAT_USER,\n settings.ROCKETCHAT_PASSWORD\n )\n rocket_admin.users_delete(instance.chat.chat_user_id)",
"def del_user_window(self):\n global DEL_WINDOW\n DEL_WINDOW = DeleteUserWindow(self.server, self.database)",
"def delete_user():\n if session.get('user_id'):\n response = nb_session.delete(\n f'https://{nation_slug}.nationbuilder.com/api/v1/people/{session[\"user_id\"]}',\n )\n session.pop('user_id')\n session.clear()\n\n return redirect(url_for('people'))",
"def logout():\n session.pop('user')\n pass",
"def close_game(self):\n\t\tfor key in self.RestReceiver.get_controlled_entities():\n\t\t\tself.client.unsubscribe(key)\n\t\tself.ACTIVE = False",
"def persona_logout():\n if 'user_id' in session:\n del session['user_id']\n return 'OK'",
"def delete_User(self):\n User.user_lst.remove(self)",
"def del_account(self):\n self.user_choice.initialize_connection()\n self.user_choice.cursor.execute(\"DELETE FROM users WHERE pseudo = %s;\", (self.pseudo,))\n self.user_choice.connection.commit()\n self.user_choice.close_connection()",
"def clear(user_id=None):",
"def unbecome_user(request):\n session = request.session\n if 'impersonate_id' in session:\n del session['impersonate_id']\n session.save()\n return HttpResponseRedirect(request.GET.get('next', reverse('threads')))",
"def remove_user(self, user, room_number):\n if user in self.users:\n self.users.remove(user)\n user.set_room(room_number)\n self.write_to_logs(f\">>> Client has been disconnected. {user} <<<\")",
"def end_connection(self, username):\n if username in self.connected_users:\n del(self.connected_users[username])\n\n if username in self.message_queues:\n del(self.message_queues[username])\n\n self.send_message(username, \"Left chatty.\", message_type=\"status\")",
"def remove(self, user: Optional[str] = None):\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sends registered users a notification if their saved threshold was exceeded. JEN first updates prices for all stocks saved in the application. For each stock with an exceeded threshold, JEN sends a notification to the corresponding user.
|
def notifyUsersIfThresholdExceeded(bot, job):
bots.updatePriceOfExistingStocks()
userIDs, messages = bots.extractTriggeredStocks()
for i in range(len(userIDs)):
print(userIDs[i], messages[i])
bot.send_message(chat_id=userIDs[i],
text=messages[i], parse_mode='HTML')
|
[
"def notification_trigger(self):\n self.today = self.entry_date.strftime(\"%Y-%m-%d\")\n #finding notify items\n self.df_notify = self.df_user.loc[self.df_user[\"notify (days)\"] <= self.today] \n self.name_notify = list(self.df_notify[\"title\"])\n #EXPIRED THINGS\n self.df_exp_dead = self.df_user.loc[self.df_user[\"expiration (days)\"] < self.today]\n self.names_expired = list(self.df_exp_dead[\"title\"])\n #NOTIFY ITEMS\n self.list_notify_notexpired = [x for x in self.name_notify if x not in self.names_expired]\n\n self.result.config(text=\"EXPIRES SOON:\")\n self.result3.config(text=\", \".join(self.list_notify_notexpired))\n self.result4.config(text=\"EXPIRED ITEMS: \"+\", \".join(self.names_expired))",
"def poll(self):\n amount, currency = get_bitcoin_sell_price()\n\n if not self.min_limit <= amount <= self.max_limit:\n message = \"Alert: Current price of 1 BTC is {:.2f} {}\".format(amount, currency)\n send_sms(self.recipient, message)\n print(message)\n else:\n print(\"Current price of 1 BTC is {:.2f} {}. Within bounds, doing nothing.\".format(amount, currency))",
"def notify_grades(bot, _):\n all_users = db.fetch_all_logins()\n for user in all_users:\n logging.debug(f'updating grades for {user[0]}')\n chat_id = user[0]\n grade_count = user[1]\n login = user[2]\n\n try:\n current_grades = _fetch_grades(login)\n except HTWAuthenticationException:\n db.remove_login(chat_id)\n bot.send_message(chat_id=chat_id,\n text='Der Server der HTW meint dein Login sei nicht (mehr) valide. Ich habe diesen aus '\n 'meiner Datenbank entfernt und werde *nicht* mehr für dich nach neuen Noten schauen. '\n 'Solltest du nur dein Passwort geändert haben, so sende mir bitte erneut den '\n '/login Befehl mit deinen Logindetails.',\n parse_mode=ParseMode.MARKDOWN)\n continue\n except HTWServerException as e:\n # stop the entire update if the server is down\n logging.error(f'grades api error {e}')\n return\n\n if grade_count == -1:\n # grades have never been fetched for this user, not sending a notification at this time\n db.update_grade_count_for_user(login.s_number, len(current_grades))\n time.sleep(2)\n continue\n\n if len(current_grades) == grade_count:\n time.sleep(2)\n continue\n\n grade_diff = len(current_grades) - grade_count\n db.update_grade_count_for_user(login.s_number, len(current_grades))\n\n if grade_diff < 0:\n bot.send_message(chat_id=chat_id, text='{} Noten wurden entfernt! /noten?'.format(-grade_diff))\n elif grade_diff == 1:\n bot.send_message(chat_id=chat_id, text='Eine neue Note ist verfügbar! /noten?'.format(grade_diff))\n else:\n bot.send_message(chat_id=chat_id, text='{} neue Noten sind verfügbar! /noten?'.format(grade_diff))\n\n time.sleep(2) # just in case, don't want to stress the endpoint too much",
"def update_stats():\n\n html_message = render_to_string('stats_email.html', {\n 'users': CustomUser.objects.filter(is_active=True).order_by('username'),\n 'pair_min': pair_minimum,\n 'gen_date': datetime.now().date()\n })\n\n try:\n send_mail('AE Pairs monthly report', '', secret_dict['em_user'],\n [admin['email'] for admin in secret_dict['admins']], html_message=html_message)\n\n except SMTPException as e:\n logger.warning('An exception occurred while sending the monthly report. SMTP exception: {0}.'.format(e))\n\n with open('Monthly_report_date_{0}.html'.format(datetime.now().date()), 'w') as file:\n file.write(html_message)\n\n for user in CustomUser.objects.all():\n user.profit = 0\n user.pairs_count = 0\n user.save(update_fields=['profit', 'pairs_count'])\n\n logger.info('Users statistics update completed.')",
"def main():\n bots.setup_database()\n updater = Updater(token=TOKEN)\n jobQueue = updater.job_queue\n dispatcher = updater.dispatcher\n\n # Set price updater and notifier to execute every 24 hours\n job_minute = jobQueue.run_repeating(\n notifyUsersIfThresholdExceeded, interval=86400, first=0)\n\n # Set the conversation handler\n conv_handler = ConversationHandler( # Handles different commands, states.\n entry_points=[CommandHandler('start', start)],\n states={\n MENU: [RegexHandler('^(' + emoji.emojize(':heavy_plus_sign: Add a stock :heavy_plus_sign:', use_aliases=True)+')$', addNewStock),\n RegexHandler(\n '^(' + emoji.emojize(':eyes: View all stocks :eyes:', use_aliases=True)+')$', viewUserStocks),\n RegexHandler(\n '^(' + emoji.emojize(':cross_mark: Delete a stock :cross_mark:', use_aliases=True)+')$', deleteStock),\n MessageHandler(Filters.text, unknownCommand, pass_user_data=True)],\n ADDTICKERSYMBOL: [MessageHandler(Filters.text, addTickerOffer, pass_user_data=True)],\n ADDTICKERVERIFICATION: [MessageHandler(Filters.text, addTickerVerification, pass_user_data=True)],\n ADDTICKERTRIGGER: [MessageHandler(Filters.text, addTickerTrigger, pass_user_data=True)],\n ADDTICKERCONFIRMATION: [MessageHandler(Filters.text, addTickerConfirmation, pass_user_data=True)],\n DELETESTOCK: [MessageHandler(Filters.text, deleteIdentifiedStock)]\n },\n fallbacks=[CommandHandler('exit', exit, pass_user_data=True),\n CommandHandler('help', instructions, pass_user_data=True),\n CommandHandler('seeya', seeya, pass_user_data=True),\n RegexHandler('^Main Menu$', start),\n CommandHandler('menu', start)]\n )\n\n dispatcher.add_handler(conv_handler)\n dispatcher.add_error_handler(error)\n updater.start_polling()\n updater.idle()",
"def get(self):\n datastore_hooks.SetPrivilegedRequest()\n sheriffs_to_email_query = sheriff.Sheriff.query(\n sheriff.Sheriff.stoppage_alert_delay > 0)\n for sheriff_entity in sheriffs_to_email_query:\n _SendStoppageAlertEmail(sheriff_entity)",
"def ninety_five_percent_spend_email(self):\n accounts = Client.objects.filter(salesprofile__ppc_status=1)\n\n now = datetime.datetime.now()\n email_records_this_month = SentEmailRecord.objects.filter(month=now.month, year=now.year)\n already_sent_email_accounts = [record.account for record in email_records_this_month]\n\n accounts_at_issue = [account for account in accounts if\n account.default_budget is not None and account.default_budget.calculated_spend > (\n 0.95 * account.default_budget.calculated_budget)]\n\n for account in accounts_at_issue:\n if account in already_sent_email_accounts:\n continue\n\n team_lead_emails = [team_lead.user.email for team_lead in account.team_leads]\n msg_body = str(account) + ' has reached 95% of its montly spend.'\n send_mail(msg_body, msg_body, settings.EMAIL_HOST_USER, team_lead_emails, fail_silently=False,\n html_message=msg_body)\n SentEmailRecord.objects.create(account=account, email_type=0, month=now.month, year=now.year)\n\n return 'ninety_five_percent_spend_email'",
"def notify_custom(self, request):\n selected_users = request.POST.getlist(\"_selected_action\")\n email_address = User.objects.filter(pk__in=selected_users).values('email')\n send_email.delay('notification', 'email content', email_address)\n self.message_user(request, \"an email notification sent to users\")",
"def fetch_notifications_esi(self, user: User = None) -> None:\n notifications_count_all = 0\n self.notifications_last_update_ok = None\n self.notifications_last_update_at = now()\n self.save()\n token = self.fetch_token(rotate_characters=True)\n\n try:\n notifications = self._fetch_notifications_from_esi(token)\n except OSError as ex:\n message_id = (\n f\"{__title__}-fetch_notifications-{self.pk}-{type(ex).__name__}\"\n )\n title = f\"{__title__}: Failed to update notifications for {self}\"\n message = f\"{self}: Failed to update notifications from ESI due to {ex}\"\n logger.exception(message)\n notify_admins_throttled(\n message_id=message_id,\n title=title,\n message=message,\n level=\"danger\",\n timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT,\n )\n self.notifications_last_update_ok = False\n self.save()\n raise ex\n else:\n notifications_count_new = self._store_notifications(notifications)\n self._process_moon_notifications()\n if notifications_count_new > 0:\n logger.info(\n \"%s: Received %d new notifications from ESI\",\n self,\n notifications_count_new,\n )\n self._process_timers_for_notifications(token)\n notifications_count_all += notifications_count_new\n\n else:\n logger.info(\"%s: No new notifications received from ESI\", self)\n\n self.notifications_last_update_ok = True\n self.save()\n\n if user:\n self._send_report_to_user(\n topic=\"notifications\",\n topic_count=notifications_count_all,\n user=user,\n )",
"def dao_timeout_notifications(cutoff_time, limit=100000):\n updated_at = datetime.utcnow()\n current_statuses = [NOTIFICATION_SENDING, NOTIFICATION_PENDING]\n new_status = NOTIFICATION_TEMPORARY_FAILURE\n\n notifications = (\n Notification.query.filter(\n Notification.created_at < cutoff_time,\n Notification.status.in_(current_statuses),\n Notification.notification_type.in_([SMS_TYPE, EMAIL_TYPE]),\n )\n .limit(limit)\n .all()\n )\n\n Notification.query.filter(\n Notification.id.in_([n.id for n in notifications]),\n ).update({\"status\": new_status, \"updated_at\": updated_at}, synchronize_session=False)\n\n db.session.commit()\n return notifications",
"def update_mrt_alert(context: CallbackContext):\n db.execute(\"SELECT * FROM mrt_updates ORDER BY datetime DESC\")\n latest_msg = db.fetchone()\n # If latest_msg is not None. None occurs when mrt_updates table is empty\n if latest_msg is not None:\n latest_msg = latest_msg[0]\n\n if get_mrt_alerts() != latest_msg and get_mrt_alerts() != 'All Train Services Working Normally 👍':\n db.execute(\"SELECT * FROM all_users WHERE receive_alerts='Yes'\")\n users = db.fetchall()\n for user in users:\n context.bot.send_message(chat_id=user[0], text=get_mrt_alerts())\n db.execute(\"INSERT INTO mrt_updates VALUES (%s, %s) ON CONFLICT (message) DO NOTHING\",\n (get_mrt_alerts(), str(datetime.utcnow() + timedelta(hours=8)).split('.')[0]))",
"def check_stock(self):\n quantity = int(self.quantityEdit.text())\n \n if len(self.item) > 0 and not self.stock_item:#item pd.Series() is set and not adding stock\n if quantity > self.item.loc['stock']:\n self.show_not_enough_stock_message(quantity)",
"def notify_all_superusers(user):\n if user:\n try:\n superusers = Users.objects.filter(is_superuser=True)\n for superuser in superusers:\n Notifications.objects.create(\n notification_name=\"New user \" + user.login + \" registered\",\n notification_information=\"Registered as \" + user.name + \" \" + user.lastname,\n category=notification_category['registered_new_user'],\n is_read=False,\n create_time=timezone.now(),\n user_id=superuser.id\n )\n except DatabaseError:\n error(user.id, _(\"DataBase - Problem with create a notifications\"))",
"def scheduled_stocktake_reports():\n\n # Sleep a random number of seconds to prevent worker conflict\n time.sleep(random.randint(1, 5))\n\n # First let's delete any old stocktake reports\n delete_n_days = int(common.models.InvenTreeSetting.get_setting('STOCKTAKE_DELETE_REPORT_DAYS', 30, cache=False))\n threshold = datetime.now() - timedelta(days=delete_n_days)\n old_reports = part.models.PartStocktakeReport.objects.filter(date__lt=threshold)\n\n if old_reports.count() > 0:\n logger.info(f\"Deleting {old_reports.count()} stale stocktake reports\")\n old_reports.delete()\n\n # Next, check if stocktake functionality is enabled\n if not common.models.InvenTreeSetting.get_setting('STOCKTAKE_ENABLE', False, cache=False):\n logger.info(\"Stocktake functionality is not enabled - exiting\")\n return\n\n report_n_days = int(common.models.InvenTreeSetting.get_setting('STOCKTAKE_AUTO_DAYS', 0, cache=False))\n\n if report_n_days < 1:\n logger.info(\"Stocktake auto reports are disabled, exiting\")\n return\n\n if not check_daily_holdoff('STOCKTAKE_RECENT_REPORT', report_n_days):\n logger.info(\"Stocktake report was recently generated - exiting\")\n return\n\n # Let's start a new stocktake report for all parts\n part.stocktake.generate_stocktake_report(update_parts=True)\n\n # Record the date of this report\n record_task_success('STOCKTAKE_RECENT_REPORT')",
"def miner_handler(ema: float):\n miner_on = miner_is_on()\n logger.info(f'current gas price EMA: {ema} GWEI')\n logger.info(f'miner status: {\"on\" if miner_on else \"off\"}')\n if ema >= GAS_PRICE_THRESHOLD and not miner_on:\n logger.info(f'turning miner on...')\n toggle_miner_on_off()\n elif ema < GAS_PRICE_THRESHOLD and miner_on:\n logger.info(f'turning miner off...')\n toggle_miner_on_off()\n else:\n logger.info('no action necessary')",
"def powerIsLarger(self):\n self.userPkmn.battleDelegate.stats[self.stat] = 20\n self.targetPkmn.battleDelegate.stats[self.stat] = 25\n power = self.delegate.getPower(self.user, self.target)\n \n assert power > StatRatioRangeDelegate.base, \"Power should be larger when user's stat decreases\"",
"def update_stockcounter(self, stock):\n\n bg = stock.get_mw_price()\n self.update_portfolio()\n stock.counter = int(float(self.buyingpower / bg / stock.tradeshares))\n print \" --- Updated Net Worth: %s | Buying Power: %s ---\" % (self.networth, self.buyingpower)",
"def _notify_for_ob(cls): # pylint: disable=too-many-locals\n unpaid_status = (\n InvoiceStatus.SETTLEMENT_SCHEDULED.value, InvoiceStatus.PARTIAL.value, InvoiceStatus.CREATED.value)\n notification_date = datetime.today() - timedelta(days=current_app.config.get('NOTIFY_AFTER_DAYS'))\n # Get distinct accounts with pending invoices for that exact day\n notification_pending_accounts = db.session.query(InvoiceModel.payment_account_id).distinct().filter(and_(\n InvoiceModel.invoice_status_code.in_(unpaid_status),\n InvoiceModel.payment_method_code == PaymentMethod.ONLINE_BANKING.value,\n # cast is used to get the exact match stripping the timestamp from date\n cast(InvoiceModel.created_on, Date) == notification_date.date()\n )).all()\n current_app.logger.debug(f'Found {len(notification_pending_accounts)} invoices to notify admins.')\n for payment_account in notification_pending_accounts:\n try:\n payment_account_id = payment_account[0]\n total = db.session.query(func.sum(InvoiceModel.total).label('total')).filter(and_(\n InvoiceModel.invoice_status_code.in_(unpaid_status),\n InvoiceModel.payment_account_id == payment_account_id,\n InvoiceModel.payment_method_code == PaymentMethod.ONLINE_BANKING.value\n )).group_by(InvoiceModel.payment_account_id).all()\n pay_account: PaymentAccountModel = \\\n PaymentAccountModel.find_by_id(payment_account_id)\n\n cfs_account = CfsAccountModel.find_effective_by_account_id(payment_account_id)\n\n # emit account mailer event\n addition_params_to_mailer = {'transactionAmount': float(total[0][0]),\n 'cfsAccountId': cfs_account.cfs_account,\n 'authAccountId': pay_account.auth_account_id,\n }\n mailer.publish_mailer_events('ob.outstandingInvoice', pay_account, addition_params_to_mailer)\n except Exception as e: # NOQA # pylint: disable=broad-except\n capture_message(f'Error on notifying mailer OB Pending invoice: account id={pay_account.id}, '\n f'auth account : {pay_account.auth_account_id}, ERROR : {str(e)}', level='error')\n current_app.logger.error(e)",
"def send_to_pubsub_topic(self, stocks):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Main function to be executed. If database does not exist, JEN proceeds to set it up. A job to update existing stock prices is added to the jobQueue once every 24 hours. During which, JEN checks if users' threshold has been exceeded and proceeds to send a notification accordingly. The polling process runs continuously.
|
def main():
bots.setup_database()
updater = Updater(token=TOKEN)
jobQueue = updater.job_queue
dispatcher = updater.dispatcher
# Set price updater and notifier to execute every 24 hours
job_minute = jobQueue.run_repeating(
notifyUsersIfThresholdExceeded, interval=86400, first=0)
# Set the conversation handler
conv_handler = ConversationHandler( # Handles different commands, states.
entry_points=[CommandHandler('start', start)],
states={
MENU: [RegexHandler('^(' + emoji.emojize(':heavy_plus_sign: Add a stock :heavy_plus_sign:', use_aliases=True)+')$', addNewStock),
RegexHandler(
'^(' + emoji.emojize(':eyes: View all stocks :eyes:', use_aliases=True)+')$', viewUserStocks),
RegexHandler(
'^(' + emoji.emojize(':cross_mark: Delete a stock :cross_mark:', use_aliases=True)+')$', deleteStock),
MessageHandler(Filters.text, unknownCommand, pass_user_data=True)],
ADDTICKERSYMBOL: [MessageHandler(Filters.text, addTickerOffer, pass_user_data=True)],
ADDTICKERVERIFICATION: [MessageHandler(Filters.text, addTickerVerification, pass_user_data=True)],
ADDTICKERTRIGGER: [MessageHandler(Filters.text, addTickerTrigger, pass_user_data=True)],
ADDTICKERCONFIRMATION: [MessageHandler(Filters.text, addTickerConfirmation, pass_user_data=True)],
DELETESTOCK: [MessageHandler(Filters.text, deleteIdentifiedStock)]
},
fallbacks=[CommandHandler('exit', exit, pass_user_data=True),
CommandHandler('help', instructions, pass_user_data=True),
CommandHandler('seeya', seeya, pass_user_data=True),
RegexHandler('^Main Menu$', start),
CommandHandler('menu', start)]
)
dispatcher.add_handler(conv_handler)
dispatcher.add_error_handler(error)
updater.start_polling()
updater.idle()
|
[
"def create_jobs_and_queue(self):\n new_job_exists = False\n\n #c = self.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n #c.execute(\"SELECT * FROM deepstyle_job WHERE job_status='Q'\")\n c = self.safe_execute_sql(\"SELECT * FROM deepstyle_job WHERE job_status='Q'\", curs_fact=True)\n\n row = c.fetchone()\n\n while row is not None:\n try:\n self.job_queue.put(job(j_id=row['id'],\n im_name1= row['input_image'],\n im_name2= row['style_image'],\n output_name= row['output_image'],\n content_weight=row['content_weight'],\n content_blend=row['content_weight_blend'],\n style_weight=row['style_weight'],\n style_scale=row['style_scale'],\n style_blend=row['style_blend_weights'],\n style_layer_weight_exp=row['style_layer_weight_exp'],\n iterations=row['iterations'],\n preserve_color=row['preserve_color'],\n width=row['output_width'])\n )\n\n # Set queue status of current row's id to be 'queued'\n self.safe_execute_sql(\"UPDATE deepstyle_job SET job_status='P' WHERE id = (%s)\", True, (row['id'],))\n # c.execute(\"UPDATE deepstyle_job SET job_status='P' WHERE id = (%s)\", (row['id'],))\n new_job_exists = True\n self.logger.log.info(\"Job %d set In Progress\" % row['id'])\n\n\n except Exception as e:\n self.logger.log.error(\"Job %d could not be set In Progress\" % row['id'])\n self.logger.log.exception(e)\n\n #z = self.db.cursor()\n #z.execute(\"UPDATE deepstyle_job SET job_status='F' WHERE id = (%s)\", (row['id'],))\n self.safe_execute_sql(\"UPDATE deepstyle_job SET job_status='F' WHERE id = (%s)\", True, (row['id'],))\n\n try:\n row = c.fetchone()\n except:\n break\n\n c.close()\n\n if new_job_exists:\n self.db.commit()",
"def worker():\n global PREV_WORKER_TIME # pylint: disable=global-statement\n global NEXT_WORKER_TIME # pylint: disable=global-statement\n PREV_WORKER_TIME = NEXT_WORKER_TIME # pylint: disable=used-before-assignment\n NEXT_WORKER_TIME = time()\n\n running_jobs_count = 0\n\n inventory = Collection(\"inventory\")\n\n for prcuuid in inventory.find_objuuids(type=\"procedure\"):\n procedure = inventory.get_object(prcuuid)\n\n if \"enabled\" not in procedure.object:\n logging.warning('setting \"enabled\" to false')\n procedure.object[\"enabled\"] = False\n procedure.set()\n\n if \"seconds\" not in procedure.object:\n logging.warning('setting \"seconds\" to \"0\"')\n procedure.object[\"seconds\"] = \"0\"\n procedure.set()\n\n if \"minutes\" not in procedure.object:\n logging.warning('setting \"minutes\" to \"*\"')\n procedure.object[\"minutes\"] = \"*\"\n procedure.set()\n\n if \"hours\" not in procedure.object:\n logging.warning('setting \"hours\" to \"*\"')\n procedure.object[\"hours\"] = \"*\"\n procedure.set()\n\n if \"dayofmonth\" not in procedure.object:\n logging.warning('setting \"dayofmonth\" to \"*\"')\n procedure.object[\"dayofmonth\"] = \"*\"\n procedure.set()\n\n if \"dayofweek\" not in procedure.object:\n logging.warning('setting \"dayofweek\" to \"*\"')\n procedure.object[\"dayofweek\"] = \"*\"\n procedure.set()\n\n if \"year\" not in procedure.object:\n logging.warning('setting \"year\" to \"*\"')\n procedure.object[\"year\"] = \"*\"\n procedure.set()\n\n if procedure.object[\"enabled\"] in (True, \"true\"):\n for epoch_time in range(int(PREV_WORKER_TIME), int(NEXT_WORKER_TIME)):\n now = datetime.fromtimestamp(epoch_time).now()\n # pylint: disable=too-many-boolean-expressions\n if (\n eval_cron_field(procedure.object[\"seconds\"], now.second) and\n eval_cron_field(procedure.object[\"minutes\"], now.minute) and\n eval_cron_field(procedure.object[\"hours\"], now.hour) and\n eval_cron_field(procedure.object[\"dayofmonth\"], now.day) and\n eval_cron_field(procedure.object[\"dayofweek\"], now.weekday()) and\n eval_cron_field(procedure.object[\"year\"], now.year)\n ):\n for hstuuid in procedure.object[\"hosts\"]:\n queue_procedure(hstuuid, procedure.objuuid, None)\n break\n\n try:\n JOB_LOCK.acquire()\n\n # Concurrency conditioning\n for key in list(JOBS.keys()):\n try:\n assert int(JOBS[key][\"host\"][\"concurrency\"]) > 0\n except (AssertionError, KeyError, ValueError):\n logging.warning('host concurrency defaulting to 1')\n JOBS[key][\"host\"][\"concurrency\"] = \"1\"\n\n try:\n assert int(JOBS[key][\"console\"][\"concurrency\"]) > 0\n except (AssertionError, KeyError, ValueError):\n logging.warning('console concurrency defaulting to 1')\n JOBS[key][\"console\"][\"concurrency\"] = \"1\"\n\n running_jobs_counts = {}\n for key in list(JOBS.keys()):\n running_jobs_counts[JOBS[key][\"host\"][\"objuuid\"]] = 0\n running_jobs_counts[JOBS[key][\"console\"][\"objuuid\"]] = 0\n\n for key in list(JOBS.keys()):\n if JOBS[key][\"process\"] is not None:\n if JOBS[key][\"process\"].is_alive():\n running_jobs_count += 1\n running_jobs_counts[JOBS[key][\"host\"][\"objuuid\"]] += 1\n running_jobs_counts[JOBS[key][\"console\"][\"objuuid\"]] += 1\n else:\n release_display_row(JOBS[key][\"display row\"])\n del JOBS[key]\n\n for key in list(JOBS.keys()):\n if running_jobs_count < int(get_config()[\"concurrency\"]):\n if JOBS[key][\"process\"] is None:\n if (\n running_jobs_counts[JOBS[key][\"host\"][\"objuuid\"]] < \\\n int(JOBS[key][\"host\"][\"concurrency\"]) and\n running_jobs_counts[JOBS[key][\"console\"][\"objuuid\"]] < \\\n int(JOBS[key][\"console\"][\"concurrency\"])\n ):\n\n JOBS[key][\"process\"] = Process(\n target=run_procedure,\n args=(\n JOBS[key][\"host\"],\n JOBS[key][\"procedure\"],\n JOBS[key][\"console\"],\n JOBS[key][\"ctruuid\"],\n JOBS[key][\"display row\"]\n )\n )\n\n JOBS[key][\"start time\"] = time()\n JOBS[key][\"process\"].start()\n\n running_jobs_count += 1\n running_jobs_counts[JOBS[key][\"host\"][\"objuuid\"]] += 1\n running_jobs_counts[JOBS[key][\"console\"][\"objuuid\"]] += 1\n\n kvstore.touch(\"queueState\")\n except Exception as exception: # pylint: disable=broad-except\n logging.error(exception)\n finally:\n JOB_LOCK.release()\n start_timer()",
"def database_start(job_name, db=None, url=None):\n\n parts = urlparse(db)\n db_name = parts.path[1:]\n msg = 'Running expensive time consuming job on *{name}*'.format(\n name=db_name\n )\n\n requests.post(url, json={\n 'attachments': [{\n 'fallback': msg,\n 'color': 'warning',\n 'title': 'Running job on %s' % db_name,\n 'author_name': job_name,\n \"text\": msg,\n }]\n })",
"def _cron(self):\n while True:\n self.check_update()\n sleep(60)",
"def run_job():",
"def run(self):\n try:\n # Connection to SQLite database\n self._dbconn = sqlite3.connect(self.database_path)\n except sqlite3.Error, e:\n raise LagartoException(e.args[0])\n\n while self._run_db:\n # Gets the SQL query from the queue\n query = self._queue.get()\n self._db_response = None\n self._db_error = None \n \n try:\n c = self._dbconn.cursor()\n # Create table\n c.execute(query)\n \n if (query.startswith(\"SELECT\")):\n # Get response\n self._db_response = c.fetchall()\n else:\n # Commit changes\n self._dbconn.commit()\n except sqlite3.Error, e:\n self._db_error = e.args[0]\n finally:\n # Job done\n self._queue.task_done()",
"def start(self):\n if self.running:\n raise AlreadyRunningException\n self._stopped = False\n self.log.debug('eric master start...')\n\n self.start_subscribe_thread()\n\n while True:\n now = datetime.now(self.timezone)\n wait_seconds = None\n with distributed_lock(**settings.DISTRIBUTED_LOCK_CONFIG):\n for job_id, job_key, serialized_job in self.jobstore.get_due_jobs(now):\n # enqueue due job into redis queue\n self._enqueue_job(job_key, serialized_job)\n # update job's information, such as next_run_time\n job = Job.deserialize(serialized_job)\n last_run_time = Job.get_serial_run_times(job, now)\n if last_run_time:\n next_run_time = Job.get_next_trigger_time(job, last_run_time[-1])\n if next_run_time:\n job.next_run_time = next_run_time\n self.update_job(job)\n else:\n # if job has no next run time, then remove it from jobstore\n self.remove_job(job)\n\n # get next closet run time job from jobstore and set it to be wake up time\n closest_run_time = self.jobstore.get_closest_run_time()\n\n if closest_run_time is not None:\n wait_seconds = min(max(timedelta_seconds(closest_run_time - now), 0), self.MIN_WAIT_TIME)\n self.log.debug('Next wakeup is due at %s (in %f seconds)' % (closest_run_time, wait_seconds))\n self._event.wait(wait_seconds if wait_seconds is not None else self.MIN_WAIT_TIME)\n self._event.clear()",
"def check_database(self):\n orgs = Organization.objects.filter(github_path__isnull=False)\n projects = Project.objects.filter(github_path__isnull=False)\n\n self.queue_items(orgs)\n self.queue_items(projects)\n\n self.check_database_timer = DaemonTimer(self.interval_check_database, self.check_database)\n self.check_database_timer.start()",
"def monitorDatabase(self):\n self.running = True\n self.setupXMLRPC()\n self.setupBonjour()\n while self.running:\n # Grab the latest device and orders data\n self.cursor.execute(\"SELECT * FROM device_loc\")\n self.device_list = []\n for device in self.cursor:\n device = Device(device)\n self.device_list.append(device)\n self.cursor.execute(\"SELECT * FROM orders_db\")\n self.orders_list = []\n for order in self.cursor:\n order = Order(order)\n self.orders_list.append(order)\n \n # Look for devices with assignments and non-zero locations\n for device in self.device_list:\n if device.lat != 0 and device.long != 0 and device.status_code > 0:\n try:\n order = self.orders_list[self.orders_list.index(device.status_code)]\n except ValueError as error:\n pass\n try:\n self.dispatched_orders.index(order.id)\n except ValueError as error:\n if order.lat != 0 and order.long != 0:\n # Dispatch the order\n self.dispatchOrder(order, device)\n self.dispatched_orders.append(order)\n time.sleep(1)",
"def run_job(self):\n\n try:\n job_item = self.job_queue.get(block=False, timeout=1)\n except Exception:\n self.log.debug(\n \"Directord server found nothing to do, cooling down\"\n \" the poller.\"\n )\n return 512, time.time()\n else:\n restrict_sha256 = job_item.get(\"restrict\")\n if restrict_sha256:\n if job_item[\"task_sha256sum\"] not in restrict_sha256:\n self.log.debug(\n \"Job restriction %s is unknown.\", restrict_sha256\n )\n return 512, time.time()\n\n job_targets = job_item.pop(\"targets\", list())\n # NOTE(cloudnull): We run on all targets if query is used.\n run_query = job_item[\"verb\"] == \"QUERY\"\n\n if job_targets and not run_query:\n targets = list()\n for job_target in job_targets:\n job_target = job_target.encode()\n if job_target in self.workers:\n targets.append(job_target)\n else:\n self.log.critical(\n \"Target %s is in an unknown state.\", job_target\n )\n return 512, time.time()\n else:\n targets = self.workers.keys()\n\n if job_item.get(\"run_once\", False) and not run_query:\n self.log.debug(\"Run once enabled.\")\n targets = [targets[0]]\n\n if run_query:\n job_item[\"targets\"] = [i.decode() for i in targets]\n\n task = job_item.get(\"task\", utils.get_uuid())\n job_info = self.create_return_jobs(\n task=task, job_item=job_item, targets=targets\n )\n self.log.debug(\"Sending job:%s\", job_item)\n for identity in targets:\n if job_item[\"verb\"] in [\"ADD\", \"COPY\"]:\n for file_path in job_item[\"from\"]:\n job_item[\"file_sha256sum\"] = utils.file_sha256(\n file_path=file_path\n )\n if job_item[\"to\"].endswith(os.sep):\n job_item[\"file_to\"] = os.path.join(\n job_item[\"to\"],\n os.path.basename(file_path),\n )\n else:\n job_item[\"file_to\"] = job_item[\"to\"]\n\n if job_item[\"file_to\"] not in job_info[\"TRANSFERS\"]:\n job_info[\"TRANSFERS\"].append(job_item[\"file_to\"])\n\n self.log.debug(\n \"Sending file transfer message for\"\n \" file_path:%s to identity:%s\",\n file_path,\n identity.decode(),\n )\n self.driver.socket_send(\n socket=self.bind_job,\n identity=identity,\n command=job_item[\"verb\"].encode(),\n data=json.dumps(job_item).encode(),\n info=file_path.encode(),\n )\n else:\n self.log.debug(\n \"Sending job message for job:%s to identity:%s\",\n job_item[\"verb\"].encode(),\n identity.decode(),\n )\n self.driver.socket_send(\n socket=self.bind_job,\n identity=identity,\n command=job_item[\"verb\"].encode(),\n data=json.dumps(job_item).encode(),\n )\n\n self.log.debug(\"Sent job %s to %s\", task, identity)\n else:\n self.return_jobs[task] = job_info\n\n return 128, time.time()",
"def run(self):\n\n self.logger.info(\"Workload Scaler is running\")\n\n if self.management_type == 'prometheus_alert_api':\n\n \"\"\"\n python3 run.py\n -w Deployment -n php-apache -ns default -s 1 -max 10 -min 2 -ti 60\n -mt prometheus_alert_api -ph localhost -pp 9090\n -son php-apache-scaling-out -sin php-apache-scaling-in\n \"\"\"\n\n self.logger.info(self.common_log + f\"(host: {self.host}, port: {self.port}, \"\n f\"scaling_out_name: {self.scaling_out_name}, \"\n f\"scaling_in_name: {self.scaling_in_name})\")\n\n manager = PrometheusAlertAPI(\n self.workload,\n self.name,\n self.namespace,\n self.scaling_range,\n self.max_number,\n self.min_number,\n self.kube_config,\n self.host,\n self.port,\n self.scaling_out_name,\n self.scaling_in_name,\n )\n\n while True:\n manager.control_alert_and_trigger_scaling()\n self.logger.info(f\"Waiting {self.time_interval} seconds for the next query if alarm is firing\")\n sleep(self.time_interval)\n elif self.management_type == 'prometheus_metric_api':\n\n \"\"\"\n python3 run.py\n -w Deployment -n php-apache -ns default -s 1 -max 10 -min 2 -ti 60\n -mt prometheus_metric_api -ph localhost -pp 9090\n -mn apache_accesses_total -l kubernetes_name=apache-exporter\n -sotv 0.8 -sitv 0.2 -r 300\n \"\"\"\n\n self.logger.info(self.common_log + f\"(host: {self.host}, port: {self.port}, \"\n f\"metric_name: {self.metric_name}, labels: {self.label_list}, \"\n f\"scaling_out_threshold_value: {self.scaling_out_threshold_value}, \"\n f\"scaling_in_threshold_value: {self.scaling_in_threshold_value}, \"\n f\"range_value:{self.rate_value})\")\n manager = PrometheusMetricAPI(\n self.workload,\n self.name,\n self.namespace,\n self.scaling_range,\n self.max_number,\n self.min_number,\n self.kube_config,\n self.host,\n self.port,\n self.metric_name,\n self.label_list,\n self.scaling_out_threshold_value,\n self.scaling_in_threshold_value,\n self.rate_value\n )\n while True:\n manager.control_and_trigger_scaling()\n self.logger.info(f\"Waiting {self.time_interval} seconds for the next query if there is any\"\n f\" violation\")\n sleep(self.time_interval)\n else:\n self.logger.error(f\"Not valid management_type: {self.management_type}\")\n raise Exception(\"Not valid management_type\")",
"def setup_job_queues(self):\n self.conn = Redis('localhost', 6379)\n self.generate_queue = Queue('generate', connection=self.conn, default_timeout=\"1h\")\n self.email_queue = Queue('notify_email', connection=self.conn)",
"def scheduled_stocktake_reports():\n\n # Sleep a random number of seconds to prevent worker conflict\n time.sleep(random.randint(1, 5))\n\n # First let's delete any old stocktake reports\n delete_n_days = int(common.models.InvenTreeSetting.get_setting('STOCKTAKE_DELETE_REPORT_DAYS', 30, cache=False))\n threshold = datetime.now() - timedelta(days=delete_n_days)\n old_reports = part.models.PartStocktakeReport.objects.filter(date__lt=threshold)\n\n if old_reports.count() > 0:\n logger.info(f\"Deleting {old_reports.count()} stale stocktake reports\")\n old_reports.delete()\n\n # Next, check if stocktake functionality is enabled\n if not common.models.InvenTreeSetting.get_setting('STOCKTAKE_ENABLE', False, cache=False):\n logger.info(\"Stocktake functionality is not enabled - exiting\")\n return\n\n report_n_days = int(common.models.InvenTreeSetting.get_setting('STOCKTAKE_AUTO_DAYS', 0, cache=False))\n\n if report_n_days < 1:\n logger.info(\"Stocktake auto reports are disabled, exiting\")\n return\n\n if not check_daily_holdoff('STOCKTAKE_RECENT_REPORT', report_n_days):\n logger.info(\"Stocktake report was recently generated - exiting\")\n return\n\n # Let's start a new stocktake report for all parts\n part.stocktake.generate_stocktake_report(update_parts=True)\n\n # Record the date of this report\n record_task_success('STOCKTAKE_RECENT_REPORT')",
"def main(self):\n self.validate_servers()\n for interval, checks in self.checks.iteritems():\n logger.debug(\"Create scheduler job for interval %d and checks %s\",\n interval, checks)\n self.sched.add_interval_job(self.run_checks, minutes=interval,\n args=(checks,), misfire_grace_time=10)\n logger.debug(\"Starting scheduler\")\n try:\n # blocking\n self.sched.start()\n except (KeyboardInterrupt, SystemExit):\n # This accepts sigterm cleanly\n logger.debug(\"stopping scheduler\")\n self.sched.shutdown()",
"def main():\n\n if len(sys.argv) < 2:\n print \"Usage: ./kafka_producer_stock.py stock-topic\"\n sys.exit(1)\n\n if len(sys.argv) >= 3:\n wait_time = float(sys.argv[2])\n else:\n wait_time = 0\n\n # Set up kafka brokers\n ipfile = open(ipfile_path, 'r')\n ips = ipfile.read()[:-1]\n ipfile.close()\n ips = ips.split('\\n')\n\n producer_user = (KafkaProducer(bootstrap_servers=ips, \n value_serializer=lambda v: json.dumps(v).encode('utf-8')))\n\n ticker_list = create_stock_list(ticker_list_path)\n\n # simulate data \n while True:\n cur_time = datetime.datetime.now()\n time_string = str(cur_time.hour).zfill(2) + \":\" + str(cur_time.minute).zfill(2) \\\n + \":\" + str(cur_time.second).zfill(2)\n for ticker_name in ticker_list:\n stock_price = float(random.randint(20, 20000)/10)\n stock_record = {\"ticker\": ticker_name, \"price\": stock_price, \"time\": time_string}\n \n # send the messages to separate topics\n producer_user.send(sys.argv[1], stock_record) \n time.sleep(wait_time)",
"def cron_hourly(self):\n self.poll() # Performs a /status/scan\n self.show_messages() # Display any messages (in real client this could be handled various ways\n self.expire_data()",
"def runAllJobs():\n minSeconds = appConf.getint(\"TrendCron\", \"minSeconds\")\n\n enabled = db.PlaceJob.selectBy(enabled=True)\n queued = enabled.filter(jobs.orCondition())\n\n print(\"Starting PlaceJob cron_jobs\")\n print(f\" queued items: {queued.count()}\")\n\n for placeJob in queued:\n start = time.time()\n\n requestTrends(placeJob)\n\n duration = time.time() - start\n print(\" took {0}s\".format(int(duration)))\n diff = minSeconds - duration\n if diff > 0:\n time.sleep(diff)",
"async def run(self):\n last_update = await self._get_last_update()\n if not last_update or last_update['created_at'].date() != datetime.datetime.utcnow().date():\n await self._update_prices()\n else:\n self._schedule_next_update()",
"def main_loop(self):\n \n while self.running:\n # handle_network() will block for at most 1 second during\n # the select() syscall\n self.handle_network()\n self.check_queue()\n self.handle_cron()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function takes list of array and number of trials as argument. It prints time taken to perfrom giftwrap algorithm for given lists
|
def analyse_time(size_to_test, no_of_trials):
if sys.version_info < (3, 3):
get_time = time.clock
else:
get_time = time.perf_counter
REZ = time.get_clock_info('perf_counter').resolution
total_time = 0
for trial in range(no_of_trials):
list_to_test = generate_random_array(size_to_test)
start = get_time()
sol = giftwrap_e(list_to_test)
end = get_time()
total_time += (end - start)
time_taken_per_locate = (1.0*total_time) / no_of_trials
print('finish timing for array with {} random points'.format(size_to_test))
#Uncomment if want graph
#draw_graph(list_to_test, sol)
print(size_to_test)
#print(time_taken_per_locate)
return time_taken_per_locate
|
[
"def time_it(input_list):\n for i in range(501):\n start = time.time()\n radix_sort(input_list)\n time_passed = time.time() - start\n avg_time = time_passed / 500\n return avg_time",
"def timing_analysis(func, start, stop, inc, runs):\n\n for n in range(start, stop, inc): # for every input size n\n acc = 0.0 # initialize accumulator\n\n for i in range(runs): # repeat runs times:\n acc += timing(func, n) # run func on input size n\n # and accumulates run times\n # print average run times for input size n\n format_str = 'Run time of {}({}) is {:.7f} seconds.'\n print(format_str.format(func.__name__, n, acc / runs))",
"def print_timeit_table(code_strings):\n print '{0:40}:{1:>7}'.format('Code', 'Time Taken')\n print '-'*51\n for code in code_strings:\n loops, time, representation = timeit(code, output='return')\n print '{0:40}:{1:7.1f}{2:>3}'.format(code, time, representation)",
"def print_times(v, L):\n # Get list.index's running time.\n t1 = time.perf_counter()\n L.index(v)\n t2 = time.perf_counter()\n index_time = (t2 - t1) * 1000.0\n\n # Get the other three running times.\n while_time = time_it(linear_search_1.linear_search, L, v)\n for_time = time_it(linear_search_2.linear_search, L, v)\n sentinel_time = time_it(linear_search_3.linear_search, L, v)\n print(\"{0}\\t{1:>6.1f}\\t{2:>6.1f}\\t{3:>6.1f}\\t{4:>6.1f}\".format(\n v, while_time, for_time, sentinel_time, index_time))",
"def main():\n list_size = [500, 1000, 10000]\n sort_result = {'insertion':0, 'shell':0, 'python':0}\n for i in list_size:\n list_count = 0\n while list_count < 100:\n random_number_list = get_me_random_list(i)\n sort_result['insertion']+=insertion_sort(random_number_list)\n sort_result['shell'] += shell_sort(random_number_list)\n sort_result['python'] += python_sort(random_number_list)\n list_count+=1\n\n for key, val in sort_result.items():\n print('%s sort took %10.7f seconds to run, on average'%(key, val/100))",
"def main():\n parser = argparse.ArgumentParser(description='Count steps to Philosophy wikipedia page')\n parser.add_argument('-n', dest=\"num\", default=500, type=int, help=\"choose how many pages to run on\")\n parser.add_argument('-v', dest=\"verbose\", action='store_true', default=False,\n help=\"verbose flag\")\n args = parser.parse_args()\n\n num_random = int(args.num)\n verbose = args.verbose\n\n if num_random < 1:\n print \"Input n must be a positive integer\"\n return\n\n wiki_list = get_random_wikis(num_random)\n\n all_the_parents, num_steps_dict = dict(), dict()\n avg_time_per_page, all_counts = [], []\n total_time = 0\n\n for i, wiki_href in enumerate(wiki_list):\n if verbose:\n print \"\\n0 : {}\".format(wiki_href)\n else:\n print \"{} : {}\".format(i+1, wiki_href)\n start_time = datetime.datetime.now()\n count = count_steps(wiki_href, 0, all_the_parents, verbose)\n end_time = datetime.datetime.now()\n total_time += (end_time - start_time).seconds + (end_time - start_time).microseconds/1E6\n avg_time_per_page.append(total_time/float(i+1))\n if count in num_steps_dict.keys():\n num_steps_dict[count].append(wiki_href)\n else:\n num_steps_dict[count] = [wiki_href]\n if count > -1:\n all_counts.append(count)\n\n num_successful = len(all_counts)\n print(\"Percentage successful: %.2f\" % (100.0*num_successful/num_random))\n\n if len(all_counts) > 0:\n print \"Average: %.2f\" % numpy.mean(all_counts)\n print \"Standard Deviation: %.2f\" % numpy.std(all_counts)\n\n # # To plot data and time per page\n # # How many pages took n steps\n # max_num_pages = max([len(x) for x in num_steps_dict.values()])\n # steps_per_page = [len(num_steps_dict.get(i+1) or []) for i in range(max_num_pages)]\n\n # # Plot number of steps frequency\n # plt.plot([i+1 for i in range(max_num_pages)], steps_per_page)\n # plt.title('Number of Steps to Philosophy')\n # plt.xlabel('Number of steps')\n # plt.ylabel('Number of pages taking n steps')\n # plt.savefig('steps_to_philosophy.png')\n # plt.clf()\n # # Plot average time per page\n # plt.plot([i+1 for i in range(num_random)], avg_time_per_page)\n # plt.axis([0, num_random, 0, math.ceil(max(avg_time_per_page))])\n # plt.title('Average Time Per Page')\n # plt.xlabel('Number of pages tested')\n # plt.ylabel('Average time per page (in seconds)')\n # plt.savefig('avg_time_per_page.png')",
"def nucdivind(tuplelist, numsnps):\n\tif numsnps == 1:\n\t\tnucdivlist = []\n\t\tfor pop in tuplelist:\n\t\t\talleledic1 = {}\n\t\t\tzipped = zip(pop[0].split('/'), pop[1].split('/'))\n\t\t\tfor allele in zipped:\n\t\t\t\tif allele[0] in alleledic1:\n\t\t\t\t\talleledic1[allele[0]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic1[allele[0]] = int(allele[1])\n\t\t\tsumalleles = sum([alleleidepth for allelei, alleleidepth in alleledic1.items()])\n\t\t\tnucdiv = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic1.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdivlist.append(((nucdiv / 42), sumalleles))\n\t\treturn nucdivlist\n\telif numsnps == 2:\n\t\tnucdivlist = []\n\t\tfor pop in tuplelist:\n\t\t\talleledic1 = {}\n\t\t\talleledic2 = {}\n\t\t\tzipped = zip(pop[0].split('/'), pop[1].split('/'))\n\t\t\tfor allele in zipped:\n\t\t\t\tif allele[0][0] in alleledic1:\n\t\t\t\t\talleledic1[allele[0][0]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic1[allele[0][0]] = int(allele[1])\n\t\t\t\tif allele[0][1] in alleledic2:\n\t\t\t\t\talleledic2[allele[0][1]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic2[allele[0][1]] = int(allele[1])\n\t\t\tsumalleles = sum([alleleidepth for allelei, alleleidepth in alleledic1.items()])\n\t\t\tnucdiv1 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic1.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdiv2 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic2.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdivlist.append((((nucdiv1 + nucdiv2) / 42), sumalleles))\n\t\treturn nucdivlist\n\telif numsnps == 3:\n\t\tnucdivlist = []\n\t\tfor pop in tuplelist:\n\t\t\talleledic1 = {}\n\t\t\talleledic2 = {}\n\t\t\talleledic3 = {}\n\t\t\tzipped = zip(pop[0].split('/'), pop[1].split('/'))\n\t\t\tfor allele in zipped:\n\t\t\t\tif allele[0][0] in alleledic1:\n\t\t\t\t\talleledic1[allele[0][0]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic1[allele[0][0]] = int(allele[1])\n\t\t\t\tif allele[0][1] in alleledic2:\n\t\t\t\t\talleledic2[allele[0][1]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic2[allele[0][1]] = int(allele[1])\n\t\t\t\tif allele[0][2] in alleledic3:\n\t\t\t\t\talleledic3[allele[0][2]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic3[allele[0][2]] = int(allele[1])\n\t\t\tsumalleles = sum([alleleidepth for allelei, alleleidepth in alleledic1.items()])\n\t\t\tnucdiv1 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic1.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdiv2 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic2.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdiv3 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic3.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdivlist.append((((nucdiv1 + nucdiv2 + nucdiv3) / 42), sumalleles))\n\t\treturn nucdivlist",
"def runtime_example1():\n\n # this need to be runned outside of this function\n %timeit rand_nums = np.random.rand(1000)\n\n # here we save the runtime to a variable using \"-o\" after\n # the %timeit clause\n times = %timeit -o rand_nums = np.random.rand(1000)\n return times",
"def time_track_print():\n\tglobal _time_track_dict\n#\tif not _time_track_dict.values(): return\n\tmax_time = max(_time_track_dict.values())\n\ttupel_list = [(fn_name, \"%.2f%%\" % (100*exe_time/max_time), \"%fs\" % exe_time) for (fn_name, exe_time) in sorted(_time_track_dict.items(), key=operator.itemgetter(1), reverse=True)]\n\tmax_len_item_1 = max([len(x) for (x,_,_) in tupel_list])\n\tmax_len_item_2 = max([len(x) for (_,x,_) in tupel_list])\n\tmax_len_item_3 = max([len(x) for (_,_,x) in tupel_list])\n\tfor (x,y,z) in tupel_list:\n\t\tprint x.ljust(max_len_item_1 + 3), y.rjust(max_len_item_2), z.rjust(max_len_item_3 + 3)",
"def calculate_time_difs(times_list, fails_list):\n total_times_list = list()\n incomp_times_list = list()\n comp_times_list = list()\n\n for i, machine in enumerate(times_list):\n total_machine_list = list()\n incomp_machine_list = list()\n comp_machine_list = list()\n\n for j, failure in enumerate(machine):\n total_failure_list = list()\n incomp_failure_list = list()\n comp_failure_list = list()\n\n for index in range(len(failure) - 1):\n total_failure_list.append((failure[index + 1] -\n failure[index]).days)\n\n if fails_list[i][j][index] == 0:\n incomp_failure_list.append((failure[index + 1] -\n failure[index]).days)\n\n elif fails_list[i][j][index] == 1:\n comp_failure_list.append((failure[index + 1] -\n failure[index]).days)\n\n total_machine_list.append(total_failure_list)\n incomp_machine_list.append(incomp_failure_list)\n comp_machine_list.append(comp_failure_list)\n\n total_times_list.append(total_machine_list)\n incomp_times_list.append(incomp_machine_list)\n comp_times_list.append(comp_machine_list)\n\n return total_times_list, incomp_times_list, comp_times_list",
"def multi_results(benchmark):\n # Read in results\n tensat_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n taso_root = os.path.join(os.path.dirname(tensat_root), \"TASO\")\n\n taso_benchmark_name = benchmark\n if benchmark == 'nasneta':\n taso_benchmark_name = 'nasnet_a'\n elif benchmark == 'vgg':\n taso_benchmark_name = 'vgg19-7'\n taso_runtime_file = os.path.join(taso_root, \"examples/{}_time.txt\".format(taso_benchmark_name))\n\n with open(taso_runtime_file, 'r') as f:\n content = f.readlines()\n\n orig_runtimes = []\n for line in content[-5:]:\n times = line.split('\\t')\n orig_runtimes.append(float(times[0]))\n orig_mean = np.mean(orig_runtimes)\n\n\n # iter=0\n mean_iter_0, mean_sat_iter_0, mean_ext_iter_0, mean_nodes_iter_0 = get_iter_stats(benchmark, tensat_root, iter=0)\n\n # iter=1\n mean_iter_1, mean_sat_iter_1, mean_ext_iter_1, mean_nodes_iter_1 = get_iter_stats(benchmark, tensat_root, iter=1)\n\n # iter=2\n mean_iter_2, mean_sat_iter_2, mean_ext_iter_2, mean_nodes_iter_2 = get_iter_stats(benchmark, tensat_root, iter=2)\n\n # iter=3\n mean_iter_3, mean_sat_iter_3, mean_ext_iter_3, mean_nodes_iter_3 = get_iter_stats(benchmark, tensat_root, iter=3)\n\n # Plot runtime & optimizer time v.s. iter\n speedup = [orig_mean/mean_iter_0, orig_mean/mean_iter_1, orig_mean/mean_iter_2]\n optimizer_time = [mean_sat_iter_0+mean_ext_iter_0, mean_sat_iter_1+mean_ext_iter_1, mean_sat_iter_2+mean_ext_iter_2]\n if mean_iter_3 > 0:\n speedup.append(orig_mean/mean_iter_3)\n optimizer_time.append(mean_sat_iter_3+mean_ext_iter_3)\n\n speedup = [(i-1)*100 for i in speedup]\n\n nodes = [mean_nodes_iter_0, mean_nodes_iter_1, mean_nodes_iter_2, mean_nodes_iter_3]\n\n result = {}\n result['speedup'] = speedup\n result['optimizer'] = optimizer_time\n result['nodes'] = nodes\n\n return result",
"def trials(N = 1000, n = 100):\n data = {}\n trial = [0 for i in range(n+1)]\n test_wins = 0\n \n for test in range(N):\n test_wins = sim(n = n)\n trial[test_wins] += 1\n \n data[(0,0)] = trial\n \n for die1 in range (1,7):\n \n for die2 in range (die1, 7):\n trial = [0 for i in range(n+1)]\n \n for test in range(N):\n test_wins = sim(n = n, weight1 = die1, weight2 = die2)\n trial[test_wins] += 1\n \n data[(die1, die2)] = trial\n \n trial = [0 for i in range(n+1)] \n \n for test in range(N):\n test_wins = sim(n = n, weight1 = die1)\n trial[test_wins] += 1 \n \n data[(die1, 0)] = trial\n \n return data",
"def check_runs(tile):\r\n results = []\r\n for j in tile:\r\n threesame = j\r\n for i in tile:\r\n num_runs = ''\r\n if(int(i[0]) == int(threesame[0])+1\r\n and i[1] != threesame[1]):\r\n num_runs = i\r\n results = ''\r\n for k in tile:\r\n if(int(k[0]) == int(num_runs[0])+1 and\r\n k[1] != num_runs[1]\r\n and k[1] != threesame[1]):\r\n results = k\r\n results.append([threesame,\r\n num_runs, results])\r\n\r\n return results",
"def statistics(benchmarks):\n optimal_lengths = [bench[0].path_length for bench in benchmarks]\n optimal_time = [bench[0].time_elapsed for bench in benchmarks]\n random_lengths = [bench[1].path_length for bench in benchmarks]\n random_time = [bench[1].time_elapsed for bench in benchmarks]\n print \"Random Planner Mean Path Length and Standard Deviation: \", np.mean(random_lengths), np.std(random_lengths)\n print \"Random Planner Mean Elapsed Time and Standard Deviation: \", np.mean(random_time), np.std(random_time)\n print \"Optimal Planner Mean Path Length and Standard Deviation: \", np.mean(optimal_lengths), np.std(optimal_lengths)\n print \"Optimal Planner Mean Elapsed Time and Standard Deviation: \", np.mean(optimal_time), np.std(optimal_time)",
"def print_timeit(code_strings):\n print\n for code in code_strings:\n print code\n print '-'*50\n timeit(code)\n print '-'*50\n print",
"def print_perf_info(self, result_list, output_file=None):\n pass",
"def statistics(runtime = 100):\n count = 0\n results = []\n while count < runtime:\n d = Dice(20,4,4,3)\n results.append((d.ToHit(), d.ToWound(), d.WoundsDealt()))\n count += 1\n\n print(\"The maximum number of hits:\" + str(max([r[0] for r in results])))\n print(\"The minimum number of hits:\" + str(min([r[0] for r in results])))\n print(\"The average number of hits:\" + str(sum([r[0] for r in results])/len([r[0] for r in results])))\n print(\"The maximum number of wounds:\" + str(max([r[1] for r in results])))\n print(\"The minimum number of wounds:\" + str(min([r[1] for r in results])))\n print(\"The average number of wounds:\" + str(sum([r[1] for r in results])/len([r[1] for r in results])))\n print(\"The maximum number of saves:\" + str(max([r[2] for r in results])))\n print(\"The minimum number of saves:\" + str(min([r[2] for r in results])))\n print(\"The average number of saves:\" + str(sum([r[2] for r in results])/len([r[2] for r in results])))",
"def generic_list_test(reporter, test_id, n, fns, *args, **kwargs):\n results = time_multi_list(n, fns, *args, **kwargs)\n reporter.report(test_id, n, results, str(args))",
"def time_function(function, array, string):\n import time\n t0=time.clock()\n function(array)\n t1=time.clock()\n\n print '{}: {} seconds'.format(string, t1-t0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save a contact probability matrix as an RR file.
|
def save_rr_file(filename, probs, domain, sequence,
method='dm-contacts-resnet'):
assert len(sequence) == probs.shape[0]
assert len(sequence) == probs.shape[1]
with tf.io.gfile.GFile(filename, 'w') as f:
f.write(RR_FORMAT.format(domain, method, sequence))
for i in range(probs.shape[0]):
for j in range(i + 1, probs.shape[1]):
f.write('{:d} {:d} {:d} {:d} {:f}\n'.format(
i + 1, j + 1, 0, 8, probs[j, i]))
f.write('END\n')
|
[
"def export_matrix(self):\n self.matrix_filename = f'similarity_matrix_{self.m1}_{self.m2}_{self.type}_{self.parce}_{self.net}_{self.corr}'\n\n self.path_matrix_final = f'{self.output}/similarity_matrices/{self.type}/{self.parce}/{self.corr}'\n if not os.path.exists(self.path_matrix_final):\n os.makedirs(self.path_matrix_final)\n\n with open(f'{self.path_matrix_final}/{self.matrix_filename}', 'w') as f:\n np.savetxt(f, self.sim_matrix, fmt='%1.3f')\n f.flush\n \n return None",
"def save_matrix(self, matrix):\n print(\"dumping \")\n path = self._create_path(self.dataset)\n print(path)\n print(matrix.sum())\n np.save(path, matrix)\n print(\"dumped to %s\" % path)",
"def save(self, filename=None, mode=\"homer\", usePFM=False):\n assert filename, \"no filename specified\"\n\n matrix_to_use = self.__matrix\n if usePFM:\n assert self.__original_PFM is not None, \"pwm.save: No PFM is avaialble for this pwm\"\n matrix_to_use = self.__original_PFM\n\n if mode == \"homer\":\n oh = open(filename, \"w\")\n\n oh.write(\">%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.name, self.name, 0, 0, 0, \"T:0(0),B:0(0),P(0)\"))\n for i in matrix_to_use:\n nl = numpy.array([0.0, 0.0, 0.0, 0.0]) if sum(i) == 0 else i/float(sum(i))\n print(nl)\n oh.write(\"%s\\n\" % \"\\t\".join([str(b) for b in nl])) \n\n elif mode == \"counts\":\n oh = open(filename, \"w\")\n\n oh.write(\">%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.name, self.name, 0, 0, 0, \"T:0(0),B:0(0),P(0)\"))\n for i in matrix_to_use:\n oh.write(\"%s\\n\" % \"\\t\".join(str(b) for b in nl)) \n\n return(None)",
"def save_csv(self, filename) -> None:\n np.savetxt(filename, self.matrix, fmt='%d', delimiter=',',\n header=','.join(self.class_names))",
"def save_coefficients(mtx, dist, path):\n print(\"Saving the coefficients\")\n cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_WRITE)\n cv_file.write(\"K\", mtx)\n cv_file.write(\"D\", dist)\n # note you *release* you don't close() a FileStorage object\n cv_file.release()",
"def save_moc_matrix(moc_matrix, outfile):\n moc_matrix.to_csv(outfile, sep=\",\", header=True, index=True)\n print(f\"MoC matrix saved in {outfile}.\")",
"def save_mat(ar, filename):\n mat_dict = {'data': ar}\n sio.savemat(filename, mat_dict, do_compression=True)\n return",
"def saveCoefficients(mtx, dist, path):\n cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_WRITE)\n cv_file.write(\"mtx\", mtx)\n cv_file.write(\"dist\", dist)\n # note you *release* you don't close() a FileStorage object\n cv_file.release()",
"def save_txt(output_path, matrix):\n np.savetxt(output_path, matrix)",
"def _write_matrix(matrix, output_matrix):\n numpy.savetxt(output_matrix, matrix, delimiter=' ', newline='\\n')",
"def write_numpy_matrix_csv(matrix, output_file):\n\n numpy.savetxt(output_file, matrix, delimiter=\";\")",
"def export_to_mat(self, filename=None):\n xv, yv, cor = self.AC_2D\n\n if filename is None:\n filename = self.path.split('.')[0] + '.mat'\n\n scipy.io.savemat(filename, mdict={'x': xv, 'y': yv, 'AC': cor})",
"def __write_to_file(output_dir, p_values, nans, fname):\n fname = output_dir + \"/\" + fname\n \n f = open(fname, 'w')\n f.write('name\\tp-val\\tenrinched in\\n')\n p_values.sort()\n \n for tp in p_values:\n pval = (\"%.12f\" % __round_sig(tp[0])).rstrip('0')\n attr_name = str(tp[1])\n enriched_in = str(tp[2])\n f.write(attr_name + \"\\t\" + pval + \"\\t\" + enriched_in + \"\\n\")\n\n for n in nans:\n attr_name = str(n[1])\n f.write(attr_name + \"\\tn/a\\n\")\n\n f.close()",
"def save(self, M, filename):\n m, n = M.shape\n\n np.savetxt(filename, M, fmt='%d', header=\"{} {}\".format(m, n), comments='')",
"def save(self, filename):\n if (filename[-5:] != '.hmat'):\n filename += '.hmat'\n h5f = h5py.File(filename, 'w')\n h5f.create_dataset('matrix', data=self.matrix, compression = 'gzip', compression_opts=9)\n h5f.create_dataset('idx', data=self.idx, compression = 'gzip', compression_opts=9)\n h5f.create_dataset('applyedMethods', data=cPickle.dumps(self._applyedMethods))\n if hasattr(self,\"genome\") and hasattr(self,\"resolution\"):\n h5f.create_dataset('genome',data = cPickle.dumps(self.genome))\n h5f.create_dataset('resolution',data = cPickle.dumps(self.resolution))\n else:\n warnings.warn(\"No genome and resolution is specified, attributes are recommended for matrix.\")\n \n h5f.close()",
"def _write_niftyreg_matrix(matrix, txt_path):\n matrix = np.linalg.inv(matrix)\n np.savetxt(txt_path, matrix, fmt='%.8f')",
"def write_to_npz(filename, ranks, trace_cnt, key_probs):\n print(\"Saving file\")\n output_file = filename\n np.savez(output_file, ranks=ranks, trace_cnt=trace_cnt, key_probs=key_probs)",
"def write_scores(correlation, resultsfile, runresfile):\r\n\r\n filename = os.path.basename(resultsfile)\r\n\r\n with open(runresfile, 'a') as outfile:\r\n outfile.write(str(filename))\r\n outfile.write('\\t')\r\n outfile.write(str(correlation))\r\n outfile.write('\\n')",
"def __writeToFile(self, score):\n with open(self.file, \"w\") as f:\n f.write(str(score))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save Torsions to a file as pickle of a dict.
|
def save_torsions(torsions_dir, filebase, sequence, torsions_probs):
filename = os.path.join(torsions_dir, filebase + '.torsions')
t_dict = dict(probs=torsions_probs, sequence=sequence)
with tf.io.gfile.GFile(filename, 'w') as fh:
pickle.dump(t_dict, fh, protocol=2)
|
[
"def pickle_save_dict(f, d):\n import pickle\n pickle.dump( d, open( f, 'wb' ) )",
"def pickle_dump(what, file):\n with open(file, 'wb') as f:\n pickle.dump(what, f)",
"def storeMoments(filename, data):\n\tfileObject = open(filename, 'wb')\n\tpickle.dump(data, fileObject)\n\tprint ('Data successfully written on disk')",
"def save_as(self, filename: str):\n\n toml.dump(self.to_dict(), filename)",
"def save_to_file(the_experiment, filename):\n #Pickle dumps\n datas = dumps(the_experiment)\n f = open(filename, 'w')\n f.write(datas)\n f.close()",
"def save(self):\n my_dict = {}\n for obj in self.__objects:\n my_dict[obj] = self.__objects[obj].to_dict()\n\n with open(self.__file_path, mode='w') as f:\n json.dump(my_dict, f)",
"def poincare_save(data, filename):\n with open(filename, 'wb') as output:\n pickle.dump(data, output)",
"def save_pickled(self, obj, filename):\n path = os.path.join(pickle_dir, filename)\n with open(path, 'wb+') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def write_tour_list(self, tours, filename):\n write_file = file(filename, 'wb')\n cPickle.dump(tours, write_file)\n write_file.close()",
"def serialize(obj, file):\n\tpickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)",
"def save(self, filename):\n\t\tf = open(filename,\"w\")\n\t\tpickle.dump(self.stats,f)\n\t\tf.close()",
"def createPickle(pickleFile, file):\r\n os.chdir(r'D:\\PEFilesIamges\\DataSet')\r\n with open(pickleFile, 'wb') as fileObject:\r\n pkl.dump(file, fileObject)\r\n fileObject.close()",
"def save_trajectories(self, trajectories):\n data = {}\n for trajectory in trajectories:\n data[trajectory.name] = [self.properties_to_json(trajectory), self.points_to_json(trajectory)]\n try:\n with open(self.file_name, \"w\") as f:\n json.dump(data, f, indent=4, sort_keys=True)\n except PermissionError as ie:\n print(ie)",
"def save_teds(datateds, fileteds):\n with open(fileteds + '.ted', 'w') as auxfile:\n json.dump(datateds, auxfile, indent=1)",
"def save(self,file):\n assert \".pymodel\" in file\n with open(file,\"w\") as stream:\n pickle.dump(self,stream)",
"def Write_Data_To_Pickle(data, file_name):\n pickle.dump( data, open( file_name, \"wb\" ) )",
"def dump(self, path):\n data = self.to_dic()\n save_dict(data, path)",
"def save(self):\n pickle.dump(self.keyValue, open(\"brain.dump\", \"w+\"))\n print \"Successfully saved file\"",
"def saveData(fname, grating, params, lines, meta):\r\n pickle.dump((grating, params, lines, meta), open(fname, \"wb\"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Take weather data in weewx format and transform to param/value dict
|
def format_weather_data(data_str):
## Data sample direct from weewx (shortened)
# "altimeter: 72.317316, ... maxSolarRad: None, ... windGustDir: 359.99994, windSpeed: 5.1645e-09"
# Replace "None" values with 0's
data_str = data_str.replace("None", "0.0")
# Grab the list of param/values
pairs_list = [p.strip() for p in data_str.strip().split(',')]
# Capture each param/value in a dict
pairs_dict = {}
for p in pairs_list:
k,v = p.split(':')
pairs_dict[k.strip()] = v.strip()
return pairs_dict
|
[
"def get_weather(self):\n to_ret = {}\n weather = self.get_location_weather()\n\n to_ret['relative_humidity'] = weather['main']['humidity']\n to_ret['temperature'] = weather['main']['temp']\n to_ret['wind_speed'] = weather['wind']['speed']\n to_ret['wind_direction'] = weather['wind']['deg']\n to_ret['text'] = str(weather)\n \n return self.source_label, to_ret",
"def map_weather(response):\n return {\n \"id\": response[\"id\"],\n \"city\": f\"{response['name']}, {response['sys']['country']}\",\n \"temp\": response[\"main\"][\"temp\"],\n \"weather\": response[\"weather\"][0]\n }",
"def parse_weather(weather_data_raw):\n\n parsed_weather = {}\n parsed_weather['sunrise'] = dt.fromtimestamp(weather_data_raw.get(\"city\").get(\"sunrise\")).time()\n parsed_weather['sunset'] = dt.fromtimestamp(weather_data_raw.get(\"city\").get(\"sunset\")).time()\n\n\n for period in weather_data_raw['list']:\n # limiting the parsed weather data to weather for the next day\n if dt.fromtimestamp(period.get(\"dt\")).date() == dt.today().date() + timedelta(days=1):\n time_period = dt.fromtimestamp(period.get(\"dt\"))\n # the dict key for each period is a 2-dight 24-hour time, e.g 15 for 3.00pm\n parsed_weather[str(time_period.time())[:2]] = [\n str(time_period.time())[:2],\n round(period.get(\"main\").get(\"temp\")),\n period.get(\"weather\")[0].get(\"main\").center(15),\n str(period.get(\"clouds\").get(\"all\")).zfill(3),\n str(round(period.get(\"wind\").get(\"speed\"))).zfill(3)\n ]\n return parsed_weather",
"def wx_arguments(self):\n latest_wx = self.fetch_wx() # pylint: disable = invalid-name\n parsed_wx = Metar(latest_wx[\"code\"])\n present_weather = parsed_wx.present_weather()\n if not present_weather:\n present_weather = \"no precipitation\"\n arguments = {\n \"radar_id\": \"BGM\",\n \"latest_wx\": {\n \"wx_time\": latest_wx.get(\"time\"),\n \"location\": \"Ithaca Airport\",\n \"temp\": latest_wx.get(\"temp\"),\n \"dewpt\": latest_wx.get(\"dewpt\"),\n \"humidity\": latest_wx.get(\"humidity\"),\n \"wind_speed\": latest_wx.get(\"wind_speed\"),\n \"wind_alpha\": wind_alpha(latest_wx.get(\"wind_dir\")),\n \"wind_dir\": latest_wx.get(\"wind_dir\"),\n \"pressure\": latest_wx.get(\"pressure\"),\n \"sky\": latest_wx.get(\"sky\"),\n \"present_weather\": present_weather\n }\n }\n return arguments",
"def _normalise_data(data: Dict[str, Any]) -> Dict[str, Any]:\n\n return {\n \"now\": {\n \"date\": Weather._get_date(data[\"current\"][\"dt\"], data[\"timezone\"]),\n \"time\": Weather._get_time(data[\"current\"][\"dt\"], data[\"timezone\"]),\n \"description\": data[\"current\"][\"weather\"][0][\"description\"],\n \"id\": data[\"current\"][\"weather\"][0][\"icon\"],\n\n \"temperature\": round(data[\"current\"][\"temp\"]),\n \"feels like\": round(data[\"current\"][\"feels_like\"]),\n\n \"clouds\": Weather._get_percentage(data[\"current\"].get(\"clouds\", 0)),\n \"pressure\": data[\"current\"].get(\"pressure\", None),\n \"wind\": {\n \"speed\": round(data[\"current\"].get(\"wind_speed\", 0)),\n \"direction\": Weather._get_direction(data[\"current\"].get(\"wind_deg\", None)),\n },\n\n \"humidity\": Weather._get_percentage(data[\"current\"].get(\"humidity\", 0)),\n\n \"uv index\": Weather._get_uv_index(data[\"current\"][\"uvi\"]),\n },\n \"forecast\": {\n \"daily\": [\n {\n \"date\": Weather._get_date(data[\"daily\"][i][\"dt\"], data[\"timezone\"]),\n \"description\": data[\"daily\"][i][\"weather\"][0][\"description\"],\n \"id\": data[\"daily\"][i][\"weather\"][0][\"icon\"],\n\n \"temperature\": {\n \"min\": round(data[\"daily\"][i][\"temp\"][\"min\"]),\n \"max\": round(data[\"daily\"][i][\"temp\"][\"max\"]),\n },\n\n \"clouds\": Weather._get_percentage(data[\"daily\"][i].get(\"clouds\", 0)),\n \"pressure\": data[\"daily\"][i].get(\"pressure\", None),\n \"wind\": {\n \"speed\": round(data[\"daily\"][i].get(\"wind_speed\", 0)),\n \"direction\": Weather._get_direction(data[\"daily\"][i].get(\"wind_deg\", None)),\n },\n\n \"humidity\": Weather._get_percentage(data[\"daily\"][i].get(\"humidity\", 0)),\n\n \"sunrise\": Weather._get_time(data[\"daily\"][i][\"sunrise\"], data[\"timezone\"]),\n \"sunset\": Weather._get_time(data[\"daily\"][i][\"sunset\"], data[\"timezone\"]),\n\n } for i in range(len(data[\"daily\"]))\n ],\n \"hourly\": [\n {\n \"date\": Weather._get_date(data[\"hourly\"][i][\"dt\"], data[\"timezone\"]),\n \"time\": Weather._get_time(data[\"hourly\"][i][\"dt\"], data[\"timezone\"]),\n \"description\": data[\"hourly\"][i][\"weather\"][0][\"description\"],\n \"id\": data[\"hourly\"][i][\"weather\"][0][\"icon\"],\n\n \"temperature\": round(data[\"hourly\"][i][\"temp\"]),\n\n \"clouds\": Weather._get_percentage(data[\"hourly\"][i].get(\"clouds\", 0)),\n \"pressure\": data[\"hourly\"][i].get(\"pressure\", None),\n \"wind\": {\n \"speed\": round(data[\"hourly\"][i].get(\"wind_speed\", 0)),\n \"direction\": Weather._get_direction(data[\"hourly\"][i].get(\"wind_deg\", None)),\n },\n\n \"humidity\": Weather._get_percentage(data[\"hourly\"][i].get(\"humidity\", 0)),\n\n } for i in range(len(data[\"hourly\"]))\n ],\n },\n }",
"def get_weather(self):\n self.yql_query = f'select * from weather.forecast where woeid={self.woeID}'\n self.yql_url = self.baseurl + urlencode({'q': self.yql_query}) + self.format\n result = requests.get(self.yql_url).text\n weather_data = json.loads(result)\n\n return weather_data",
"def weather_param(file, epsilon, Format, S):\n df_weather = weather_data_to_df(file, S['Period_start'], S['Period_end'], S['Time_step'])\n df_weather.drop(df_weather.tail(1).index, inplace=True)\n \n # External temperature - format Ext_T[Day_index,Hour_index]\n Ext_T = reshape_day_hour((df_weather['Temperature'].values), *Format)\n Ext_T[np.abs(Ext_T) < epsilon] = 0\n \n # Global irradiance\n Irradiance = reshape_day_hour((df_weather['Irradiance'].values), *Format)\n Irradiance[np.abs(Irradiance) < epsilon] = 0\n \n return Ext_T, Irradiance, df_weather.index",
"def weather_context_processor():\n\n return dict(weather2=current_weather)",
"def weather_data(soup):\n timeslots = soup.find_all(class_=\"wr-time-slot wr-js-time-slot\")\n weather_today = {} \n\n for timeslot in timeslots:\n for tag in timeslot.find_all('span', class_='wr-time-slot-primary__hours'):\n time = (tag.getText())\n for tag in timeslot.find_all('div', class_='wr-weather-type__icon'):\n weathertype = tag.get('title')\n \n #Organises the weather at each time into a dictionary\n weather_today.update({\n time : weathertype\n })\n\n return weather_today",
"def get_weather(city_name, weather_api):\n\n response = requests.get(\n \"https://community-open-weather-map.p.rapidapi.com/weather?mode=json&q={}\".format(city_name),\n headers={\n \"X-RapidAPI-Host\": \"community-open-weather-map.p.rapidapi.com\",\n \"X-RapidAPI-Key\": weather_api\n })\n\n content = json.loads(response.content.decode('utf8').replace(\"'\", '\"'))\n\n res_dict = dict()\n res_dict['humidity'] = content['main']['humidity']\n res_dict['temp'] = round((content['main']['temp'] - 273.15), 1)\n res_dict['temp_max'] = round((content['main']['temp_max'] - 273.15), 1)\n res_dict['temp_min'] = round((content['main']['temp_min'] - 273.15), 1)\n res_dict['weather'] = content['weather'][0]['main']\n\n return res_dict",
"def raw_owm_to_df(*, data: Dict) -> pd.DataFrame:\n # 1) Only store what we don't know. Dropping location info.\n weather_data = data[\"Weather\"]\n # 2) Flatten nested data\n temperature_data = weather_data.pop(\"temperature\")\n for k, v in temperature_data.items():\n weather_data[k] = v\n pressure_data = weather_data.pop(\"pressure\")\n weather_data[\"pressure\"] = pressure_data[\"press\"]\n weather_data[\"sea_level\"] = pressure_data[\"sea_level\"]\n wind_data = weather_data.pop(\"wind\")\n weather_data[\"wind_speed\"] = wind_data[\"speed\"]\n weather_data[\"wind_direction\"] = wind_data[\"deg\"]\n rain_data = weather_data.pop(\"rain\")\n for k, v in rain_data.items():\n weather_data[f\"rain_{k}\"] = v\n snow_data = weather_data.pop(\"snow\")\n for k, v in snow_data.items():\n weather_data[f\"snow_{k}\"] = v\n # 3) Get index\n time_stamp = weather_data.pop(\"reference_time\")\n index = [pd.Timestamp(time_stamp, unit=\"s\", tz=\"utc\")]\n # 4) Gen data frame\n for k, v in weather_data.items():\n if v is None:\n weather_data[k] = np.nan\n df = pd.DataFrame(weather_data, index=index)\n\n return df",
"def sample_weather_scenario():\n times = pd.date_range('1/1/2000', periods=72, freq='6H')\n latitude = np.linspace(0, 10, 11)\n longitude = np.linspace(0, 10, 11)\n wsp_vals = np.full((72, 11, 11), 10.0)\n wdi_vals = np.full((72, 11, 11), 0.0)\n cusp_vals = np.full((72, 11, 11), 0.0)\n cudi_vals = np.full((72, 11, 11), 0.0)\n wadi_vals = np.full((72, 11, 11), 0.0)\n wahi_vals = np.full((72, 11, 11), 0.0)\n wisp = xr.DataArray(wsp_vals, dims=['time', 'lon_b', 'lat_b'],\n coords={'time': times,\n 'lon_b': longitude,\n 'lat_b': latitude})\n widi = xr.DataArray(wdi_vals, dims=['time', 'lon_b', 'lat_b'],\n coords={'time': times,\n 'lon_b': longitude,\n 'lat_b': latitude})\n cusp = xr.DataArray(cusp_vals, dims=['time', 'lon_b', 'lat_b'],\n coords={'time': times,\n 'lon_b': longitude,\n 'lat_b': latitude})\n cudi = xr.DataArray(cudi_vals, dims=['time', 'lon_b', 'lat_b'],\n coords={'time': times,\n 'lon_b': longitude,\n 'lat_b': latitude})\n wahi = xr.DataArray(cusp_vals, dims=['time', 'lon_b', 'lat_b'],\n coords={'time': times,\n 'lon_b': longitude,\n 'lat_b': latitude})\n wadi = xr.DataArray(cudi_vals, dims=['time', 'lon_b', 'lat_b'],\n coords={'time': times,\n 'lon_b': longitude,\n 'lat_b': latitude})\n return wisp, widi, cusp, cudi, wahi, wadi",
"def weatherdata(self, weatherdata):\n self._data[\"WEATHER DATA\"] = weatherdata",
"def get_local_weather(station: str) -> dict[str, str]:\n\n url = f\"http://w1.weather.gov/xml/current_obs/{station}.xml\"\n response = urlopen(url)\n xmlroot = ElementTree.fromstring(response.read())\n weatherdata: dict[str, str] = {\n \"observation_time_rfc822\": None,\n \"temp_c\": None,\n \"relative_humidity\": None,\n \"pressure_mb\": None,\n \"weather\": None,\n }\n for tag in weatherdata:\n element = xmlroot.find(tag)\n if element is not None:\n weatherdata[tag] = element.text\n return weatherdata",
"def openweathermap_get(api_city):\n\n url = f'http://api.openweathermap.org/data/2.5/weather?q={api_city[1]}&appid={api_city[0]}'\n response = requests.get(url)\n data = json.loads(response.text)\n\n temperature = data['main']['temp']\n current_weather = data['weather'][0]['main']\n description = data['weather'][0]['description']\n weather_param = []\n weather_param = [api_city[1], current_weather, description, temperature]\n return weather_param",
"def get_weather_info(req):\n\n CITYID = \"2964574\"\n WEATHER = \"http://api.openweathermap.org/data/2.5/forecast\"\n APIKEY = \"89b3e577901486c8ad601fab00edd389\"\n\n r = requests.get(WEATHER, params={\"APPID\": APIKEY, \"id\": CITYID})\n js = json.loads(r.text)\n\n for i in range(len(js['list']) - 1, 0, -1):\n date, time = js['list'][i]['dt_txt'].split(' ')\n time = datetime.datetime.strptime(time, \"%H:%M:%S\")\n req_time = datetime.datetime.strptime(req['time'], \"%H:%M\")\n\n wind_speed = 0.0\n rain = 0.0\n\n if date == req['date'] and time <= req_time:\n wind_speed = js['list'][i]['wind']['speed']\n if js['list'][i]['rain'] != {}:\n rain = js['list'][i]['rain']['3h']/3\n break\n\n return rain, wind_speed",
"def get_weather_data(requested_date, location):\n output = {}\n requested_date = datetime.date(requested_date.year, requested_date.month,\n requested_date.day)\n valid_input, error_text = validate_date_input(requested_date)\n if valid_input:\n weather_json, error_text = get_forecast(requested_date, location)\n if error_text:\n output[\"Status\"] = ERROR_STATUS\n output[\"ErrorDescription\"] = error_text\n else:\n output[\"Status\"] = SUCCESS_STATUS\n output[\"ErrorDescription\"] = None\n output[\"MinTempFar\"] = round((weather_json['MinTempCel'] * 9 / 5)\n + 32)\n output[\"MaxTempFar\"] = round((weather_json['MaxTempCel'] * 9 / 5)\n + 32)\n output.update(weather_json)\n else:\n output[\"Status\"] = ERROR_STATUS\n output[\"ErrorDescription\"] = error_text\n return output",
"def getWeatherInfo(msg):\n parsed_json = json.loads(str(msg.payload))\n latitude = parsed_json[\"Data\"][\"gpsLatitude\"]\n longitude = parsed_json[\"Data\"][\"gpsLongitude\"]\n observation_list = owm.weather_around_coords(float(latitude), float(longitude))\n w = observation_list[0].get_weather()\n humidity = w.get_humidity()\n temperature = w.get_temperature('celsius')\n sendMQTTData(temperature, humidity)",
"def clean_weather_api_response(raw_data: dict = dict()):\n cleaned_data = {}\n if raw_data['cod'] == 200:\n temperature = raw_data['main']['temp']\n temperature = int(temperature - 273.15)\n normalized_city = unicodedata.normalize('NFKD', raw_data.get('name', 'N/A')).encode('ascii', 'ignore')\n normalized_city = str(normalized_city).replace(\"b'\", \"\").replace(\"'\",\"\")\n cleaned_data = {\n \"location_name\": f\"{normalized_city}, {raw_data.get('sys', {}).get('country')}\",\n \"temperature\": f\"{temperature} C\",\n \"wind\": raw_data[\"wind\"],\n \"cloudines\": raw_data[\"weather\"][0][\"description\"].capitalize(),\n \"presure\": f\"{raw_data['main']['pressure']} hpa\",\n \"humidity\": f\"{raw_data['main']['humidity']}%\",\n \"sunrise\": f\"{datetime.datetime.fromtimestamp(raw_data['sys']['sunrise']).strftime('%H:%M')}\",\n \"sunset\": f\"{datetime.datetime.fromtimestamp(raw_data['sys']['sunset']).strftime('%H:%M')}\",\n \"geo_coordinates\": f\"[{raw_data['coord']['lat']}, {raw_data['coord']['lon']}]\",\n \"requested_time\": f\"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\",\n }\n else:\n cleaned_data['message'] = raw_data['message']\n return cleaned_data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inject the Eetlijst client from cache, if available. Otherwise, create a new one.
|
def inject_client(func):
@functools.wraps(func)
def _inner():
username = request.args.get("username")
password = request.args.get("password")
if not username or not password:
return abort(400)
# Fetch eetlijst client from cache
key = username + "-" + password
client = cache.get(key)
if client:
try:
client = cPickle.loads(client)
except cPickle.UnpicklingError:
client = None
if not client:
app.logger.debug("Creating new client")
try:
client = eetlijst.Eetlijst(username=username, password=password,
login=True)
except eetlijst.LoginError:
return abort(401)
else:
app.logger.debug("Continuing existing client")
# Invoke original method
try:
result = func(client)
# Store in cache again
cache.set(key, cPickle.dumps(client,
protocol=cPickle.HIGHEST_PROTOCOL), timeout=60)
except:
app.logger.debug("Client state NOT updated due to exception")
raise
return result
return _inner
|
[
"def _create_client(self):\r\n self.association_refresh_time = {}\r\n auth_plugin = k_loading.load_auth_from_conf_options(\r\n cfg.CONF, 'placement')\r\n client = k_loading.load_session_from_conf_options(\r\n cfg.CONF, 'placement', auth=auth_plugin)\r\n client.additional_headers = {'accept': 'application/json'}\r\n return client",
"def get_memcache_client():\n # TODO avoid global variables\n # implement a memcache client initialized per HTTP incoming request\n global shared_memcache_client\n if shared_memcache_client is not None:\n return shared_memcache_client\n\n client = memcache.Client()\n shared_memcache_client = client\n return client",
"def getClientOrCreate(self, guid, name, team=None):\n client = self.clients.getByCID(guid)\n if client is None:\n client = self.clients.newClient(guid, guid=guid, team=TEAM_UNKNOWN)\n client.last_update_time = time.time()\n client.save()\n client.ping = None\n client.score = None\n client.kills = None\n client.deaths = None\n if name:\n old_name = client.name\n client.name = name\n if old_name != name:\n client.save()\n if team:\n client.team = self.getTeam(team)\n return client",
"def get_client(self, args):\n try:\n # Load existing session, so as to keep current dir etc.\n with open(self.session_path, \"rb\") as fhandle:\n client = pickle.load(fhandle)\n except (IOError, pickle.PickleError):\n # Init a new RadonClient\n client = self.create_client(args)\n \n if args[\"--url\"]:\n if client.url != args[\"--url\"]:\n # Init a fresh RadonClient\n client = self.create_client(args)\n client.session = requests.Session()\n return client",
"def client(ert_storage_client, monkeypatch):\n return ert_storage_client",
"def __set_cache(ctx, cls, source=None, repository=None):\n ctx.cache.set(source, repository)",
"def __init__(self):\n self.clients = {}",
"def start_using_cache(self, cache):\n self._cache = cache\n self._counter = -1\n self.use_cache = True",
"def _get_client_from_cache(self, client_id):\n data = self._read_uaa_cache()\n\n # Only if we've cached any for this issuer\n if self.uri not in data:\n return\n\n for client in data[self.uri]:\n if client['id'] == client_id:\n return client",
"def cache_load(forced=False):\n global cache_main\n if forced or (cache_main is None):\n cache_main = apt.Cache(memonly=True)\n return cache_main",
"def client():\n if es:\n return es\n else:\n raise StorageClientException(STORAGE_NAME,\n \"client used before initialization.\")",
"def _get_keystone_client(client, conf, keystone_ep, lcp_id):\n global _KEYSTONES\n try:\n if keystone_ep not in _KEYSTONES:\n # Instantiate the Keystone client according to the configuration\n _KEYSTONES[keystone_ep] = client.Client(\n username=conf.mech_id,\n password=conf.password,\n tenant_name=conf.tenant_name,\n auth_url=keystone_ep + '/v' + conf.version)\n\n return _KEYSTONES[keystone_ep]\n except Exception:\n logger.critical(\n 'CRITICAL|CON{}KEYSTONE001|Cannot reach Keystone EP: {} of '\n 'region {}. Please contact Keystone team.'.format(\n dictator.get('service_name', 'ORM'), keystone_ep, lcp_id))\n raise",
"def init_cache(self):\n\t\tself.cache = {}\n\t\ttry:\n\t\t\twith open(os.path.join(self.root, \"make.cache\"), 'r') as f:\n\t\t\t\tcache_raw = f.read()\n\n\t\t\tself.cache = json.loads(cache_raw)\n\t\texcept IOError:\n\t\t\tpass",
"def client():\n client = Client(\n RAISE_NOTIMPLEMENTEDERROR_FOR_UNIMPLEMENTED_API_ENDPOINTS=True,\n VERBOSE_RESPONSE_LOGGING=True,\n VERIFY_WEBUI_CERTIFICATE=False,\n )\n client.auth_log_in()\n client.app.preferences = dict(\n # enable RSS fetching\n rss_processing_enabled=True,\n # prevent banning IPs\n web_ui_max_auth_fail_count=1000,\n web_ui_ban_duration=1,\n )\n client.func = staticmethod(partial(get_func, client))\n try:\n add_torrent(client, ORIG_TORRENT_URL, ORIG_TORRENT_HASH)\n except Exception:\n pytest.exit(\"failed to add orig_torrent during setup\")\n return client",
"def create_client(self, name):\n if name in self._clients:\n return self._clients[name]\n\n if name not in self._registry:\n return None\n\n overwrite, config = self._registry[name]\n client_cls = config.pop('client_cls', None)\n\n if client_cls and client_cls.OAUTH_APP_CONFIG:\n kwargs = client_cls.OAUTH_APP_CONFIG\n kwargs.update(config)\n else:\n kwargs = config\n\n kwargs = self.generate_client_kwargs(name, overwrite, **kwargs)\n framework = self.framework_integration_cls(name, self.cache)\n if client_cls:\n client = client_cls(framework, name, **kwargs)\n elif kwargs.get('request_token_url'):\n client = self.oauth1_client_cls(framework, name, **kwargs)\n else:\n client = self.oauth2_client_cls(framework, name, **kwargs)\n\n self._clients[name] = client\n return client",
"def _init_client(self):\n pass",
"def get_client(self, service, region, public=True, cached=True):\r\n if not self.authenticated:\r\n raise exc.NotAuthenticated(\"You must authenticate before trying \"\r\n \"to create clients.\")\r\n clt = ep = None\r\n mapped_service = self.service_mapping.get(service) or service\r\n svc = self.services.get(mapped_service)\r\n if svc:\r\n ep = svc.endpoints.get(region)\r\n if ep:\r\n if cached:\r\n clt = ep.client if public else ep.client_private\r\n else:\r\n clt = ep.get_new_client(public=public)\r\n if not clt:\r\n raise exc.NoSuchClient(\"There is no client available for the \"\r\n \"service '%s' in the region '%s'.\" % (service, region))\r\n return clt",
"def _init_cache(self):\r\n logging.debug('Looking for cache file: %s', self.cachefile)\r\n if os.path.exists(self.cachefile) and os.path.getsize(self.cachefile) > 0:\r\n with open(self.cachefile, 'rb') as self.open_cache:\r\n self.cache = pickle.load(self.open_cache)\r\n logging.debug('Cache: %s', (self.cache))\r\n else:\r\n logging.debug('Cache file not found. Creating from scratch')\r\n self._build_cache()",
"def init_client(self):\n self._transport = RequestsHTTPTransport(url=self._url,\n use_json=True,\n headers={\n \"Content-type\":\n \"application/json\",\n \"Authorization\":\n \"bearer \" +\n str(self._token).strip()\n },\n verify=False)\n self._client = Client(retries=3,\n transport=self._transport,\n fetch_schema_from_transport=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Lowd representation is a 21element vector First 20 elements are the count of each amino acid present in the active site Last element is the average distance between the center of each residue
|
def get_low_d_rep(active_site):
aas = [sum(res.type == AA for res in active_site.residues) for AA in AAs]
aas.append(np.nanmean(distance_matrix(active_site.residues).flatten()))
return(np.array(aas))
|
[
"def get_totalleng(self):\n length_count = 0\n for base in (self.sequence):\n if base:\n length_count += 1\n exon_count = 0\n for base in (self.sequence):\n if base == exon:\n exon_count += 1\n if base != exon:\n self.exon_l.append(exon_count)\n exon_count = 0\n self.exonlength = sum(int(exon_count) for exon_count in self.exon_l)\n intron_count = 0\n for base in (self.sequence):\n if base == intron:\n intron_count += 1\n if base != intron:\n self.intron_l.append(intron_count)\n intron_count = 0\n self.intronlength = sum(int(intron_count) for intron_count in self.intron_l)\n self.percent_exonic = ((self.exonlength) / (length_count)) * 100\n self.percent_intronic = ((self.intronlength) / (length_count)) * 100\n self.average_exon = sum(self.exon_l) * 1.0 / len(self.exon_l)\n variance_exon = sum(map(lambda x: (x - self.average_exon) ** 2, self.exon_l))\n self.standard_deviation_exon = math.sqrt(self.average_exon(variance_exon))\n self.average_intron = sum(self.intron_l) * 1.0 / len(self.intron_l)\n variance_intron = sum(map(lambda x: (x - self.average_intron) ** 2, self.intron_l))\n self.standard_deviation_intron = math.sqrt(variance_intron / (len(self.intron_l) - 1))",
"def lowIa( Av ) :\n return( SNANAdust( Av, sigma=0.15, tau=0.15, R0=1 ) )",
"def centroid(micro):\r\n return micro['ls']/micro['n']",
"def segmentByEnergy(self,thr,width,min_width=450):\n data = np.abs(self.data)\n E = np.zeros(len(data))\n E[width] = np.sum(data[:2*width+1])\n for i in range(width+1,len(data)-width):\n E[i] = E[i-1] - data[i-width-1] + data[i+width]\n E = E/(2*width)\n\n # TODO: Automatic energy gain (normalisation method)\n\n # This thing is noisy, so I'm going to median filter it. SoundID doesn't seem to?\n Em = np.zeros(len(data))\n for i in range(width,len(data)-width):\n Em[i] = np.median(E[i-width:i+width])\n for i in range(width):\n Em[i] = np.median(E[0:2*i])\n Em[-i] = np.median(E[-2 * i:])\n\n # TODO: Better way to do this?\n threshold = np.mean(Em) + thr*np.std(Em)\n\n # Pick out the regions above threshold and the argmax of each, assuming they are wide enough\n starts = []\n ends = []\n insegment = False\n for i in range(len(data)-1):\n if not insegment:\n if Em[i]<threshold and Em[i+1]>threshold:\n starts.append(i)\n insegment = True\n if insegment:\n if Em[i]>threshold and Em[i+1]<threshold:\n ends.append(i)\n insegment = False\n if insegment:\n ends.append(len(data))\n maxpoints = []\n Emm = np.zeros(len(data))\n for i in range(len(starts)):\n if ends[i] - starts[i] > min_width:\n maxpoints.append(np.argmax(Em[starts[i]:ends[i]]))\n Emm[starts[i]:ends[i]] = Em[starts[i]:ends[i]]\n\n # TODO: SoundID appears to now compute the 44 LPC coeffs for each [midpoint-width:midpoint+width]\n # TODO: And then compute the geometric distance to templates\n\n segs = []\n for i in range(len(starts)):\n segs.append([float(starts[i])/self.fs,float(ends[i])/self.fs])\n return segs",
"def heterozygosity(self):\n allele_freqs = [1-sum(self.aaf)] + self.aaf\n return 1 - sum(map(lambda x: x**2, allele_freqs))",
"def compute_load(self):\n #pdb.set_trace()\n load = [ 0 for _ in range(self.signal_space.length) ]\n meanings = self.meaning_space.meanings()\n for position in range(self.signal_space.length):\n comparisons = 0\n for meaning in meanings:\n utterances = self.speak(meaning, pick=False)\n for utterance in utterances:\n neighbors = self.signal_space.compute_neighbors(utterance,position)\n for neighbor in neighbors:\n understandings = self.hear(neighbor, pick=False)\n for understanding in understandings:\n mdist = self.meaning_space.hamming(meaning,understanding)\n load[position] += (mdist / self.meaning_space.length)\n comparisons += 1\n load[position] /= comparisons\n #pdb.set_trace()\n return load",
"def identitfy_variants(self, min_freq_threshold=0.0, time_series_data=False):\n\t \tself.min_freq_threshold = min_freq_threshold\n\t \t#get time-point info\n\t \ttpoints = set()\n\t \ttpoint_totals = {}\n\t \tif time_series_data:\n\t \t\tfor i in self.data:\n\t \t\t\ttpoint = float(i['id'].split('_')[0])\n\t \t\t\ttpoints.update([tpoint])\n\t \t\t\ttry:\n\t \t\t\t\ttpoint_totals[tpoint] += i['count']\n\t \t\t\texcept KeyError:\n\t \t\t\t\ttpoint_totals[tpoint] = float(i['count'])\n\t \t\tfirst_tpoint = min(tpoints)\n\t \telse:\n\t \t\ttpoints = set([1])\n\t \t\ttpoint_totals[1] = self.total\n\t \t\tfirst_tpoint = 1\n\t \t#this will be a 3 dimensional variable. 1st dim: a list, each element corresponds to a column in the alignment. 2nd dim: a dic, indexed by time-point, so each element corresponds to a time-point. 3rd dim: a dic, indexed by allele, so each element corresponds to each allele found at that position, at that time-point. Definition is the freq for that allele, at that time-point, in that position.\n\t \tvariants = []\n\t \tnum_cols = len(self.data[0]['seq'])",
"def _get_lun_distribution_info(self, luns):\n\n ctr_info = [0, 0]\n for lun in luns:\n if (lun[6].startswith(VOL_AND_SNAP_NAME_PREFIX) and\n lun[8] == 'THICK'):\n if lun[4] == 'A':\n ctr_info[0] += 1\n else:\n ctr_info[1] += 1\n return ctr_info",
"def calc_aa_propensity(seq):\n\n # count absolute number of each residue in the input string\n number_each_aa_dict = {}\n\n all_aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n # create an dictionary of the numbers {\"A\" : 57, \"C\" : 5, ...} etc\n for aa in all_aa:\n number_each_aa_dict[aa] = seq.count(aa)\n\n # create a dictionary to hold the propensity of each residue\n aa_propensity_dict = {}\n length = len(seq)\n for aa in number_each_aa_dict:\n aa_propensity_dict[aa] = number_each_aa_dict[aa] / length\n\n # turn the dictionary into a pd.Series\n aa_prop_ser = pd.Series(aa_propensity_dict)\n # normalise so that all the aa propensities add up to 1.0\n # this is important if \"X\" or \"U\" is in the sequences\n aa_prop_norm_ser = aa_prop_ser / aa_prop_ser.sum()\n # name the index column\n aa_prop_norm_ser.index.name = \"freq\"\n return aa_prop_norm_ser",
"def d10_to_ls(d10):\n filcenter_dist = d10 * 2 / 3\n face_dist = filcenter_dist - 0.5 * 9 - 0.5 * 16\n return face_dist",
"def len2Idx(self, l_um):\n assert l_um < self.params.L\n segPos = np.array([seg.x for seg in self.axon]) * self.params.L\n return np.argmin(np.abs(segPos-l_um))",
"def return_MST_elongation(distance_arr):\n graph_half_length = numpy.average(distance_arr) \n g_unique, counts = numpy.unique(distance_arr, return_counts=True)\n graph_half_width = numpy.average(counts) / 2.\n mst_elongation = float(graph_half_length) / float(graph_half_width) + 1 \n\n return mst_elongation",
"def aaf(self):\n num_chroms = 0.0\n allele_counts = Counter()\n for s in self.samples:\n if s.gt_type is not None:\n for a in s.gt_alleles:\n allele_counts.update([a])\n num_chroms += 1\n return [allele_counts[str(i)]/num_chroms for i in range(1, len(self.ALT)+1)]",
"def light_distance(self):\n \treturn self.sight_dist()",
"def ang_size(D, L):\n D = u.Quantity(D, u.Mpc)\n L = u.Quantity(L, u.m)\n d = D.to(u.m)\n r = (u.arcsec)*L*206265.0/d\n return r",
"def _getLog2NormalizedReadcounts(self):\n df = self._makeDFFromCSV(FILENAME_NORMALIZED)\n return df.set_index(cn.GENE_ID)",
"def lehmer_mean(self, SF):\n if not len(SF):\n return 0\n return float(sum([f ** 2 for f in SF])) / sum(SF)",
"def signed_area(self):\n flat = self.flatten()\n area = 0\n for s in flat.asSegments():\n area = area + (s.start.x * s.end.y) - (s.start.y * s.end.x)\n area = area / 2.0\n return area",
"def lav_top_id(lav):\n top_id = 0\n for hit in lav:\n for alignment in hit:\n total_id = 0\n total_length = 0\n for block in alignment:\n length = block.query['length']\n total_length += length\n total_id += length * block.identity\n id_pct = total_id / total_length\n logging.debug(\" {}\".format(round(id_pct, 2)))\n if id_pct > top_id:\n top_id = id_pct\n return top_id"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
assumes L is a list of lists whose elements are ints Mutates L such that it reverses its elements and also reverses the order of the int elements in every element of L. It does not return anything.
|
def deep_reverse(L):
L.reverse()
for i in L:
i.reverse()
|
[
"def deep_reverse(L):\n temp = list(L)\n for i in range(len(L)):\n # reverse top list\n L[len(L) - 1 - i] = temp[i]\n\n # reverse inner list\n inL = L[len(L) - 1 - i]\n temp2 = list(inL)\n for j in range(len(inL)):\n inL[len(inL) - 1 - j] = temp2[j]",
"def deepReverse(L):\n if L == []:\n return []\n if isinstance(L[-1], list):\n return [deepReverse(L[-1])] + deepReverse(L[0:len(L)-1])\n return [L[-1]] + deepReverse(L[0:len(L)-1])",
"def reverse(L):\r\n return L[::-1]",
"def reverse_list(l):\n new_l = l\n new_l.reverse()\n\n return new_l",
"def reverse_list(l):\n\n return l[::-1]",
"def reversal(input_list):\r\n input_list.reverse()\r\n for e in input_list:\r\n if type(e) == list:\r\n reversal(e)",
"def reverse_lists(lists):\n\n return list(map(list, map(reversed, lists)))",
"def sort_reverse(list_of_integers):",
"def reverselist(lista):\n return list(reversed(lista))",
"def reverse_rec(int_list): # must use recursion\n if int_list is None: #If list is None, ValueError is raised \n raise ValueError\n if len(int_list) == 0: #If list is empty, return empty list\n return []\n else:\n out_list = [int_list.pop()] + reverse_rec(int_list) #Takes last item of list and adds the rest of the list reversed\n return out_list\n pass",
"def reverse_linked_list1(list_to_reverse):\n\n my_new_list = LinkedList()\n all_elements = []\n\n while len(list_to_reverse) > 0:\n element = list_to_reverse.pop()\n all_elements.append(element)\n\n for e in all_elements[::-1]:\n my_new_list.add(e)\n\n return my_new_list",
"def elements_reversed(seq):\n return seq[::-1]",
"def reversedEnumerate(l):\n return zip(range(len(l)-1, -1, -1), l[::-1])",
"def test_list_reverse():\n print(\"Computed:\", list_reverse([]), \"Expected: []\")\n print(\"Computed:\", list_reverse([1]), \"Expected: [1]\")\n print(\"Computed:\", list_reverse([1, 2, 3]), \"Expected: [3, 2, 1]\")\n print(\"Computed:\", list_reverse([2, 3, 1]), \"Expected: [1, 3, 2]\")",
"def reversed(seq):\n\n l=list(seq)\n l.reverse()\n return l",
"def test_list_reverse():\r\n print \"Computed:\", list_reverse([]), \"Expected: []\"\r\n print \"Computed:\", list_reverse([1]), \"Expected: [1]\"\r\n print \"Computed:\", list_reverse([1, 2, 3]), \"Expected: [3, 2, 1]\"\r\n print \"Computed:\", list_reverse([2, 3, 1]), \"Expected: [1, 3, 2]\"\r\n print \"Computed:\", list_reverse([1, 2, 3, 4, 5, 6, 7, 8]), \"Expected: [8, 7, 6, 5, 4, 3, 2, 1]\"",
"def reverse_rec(int_list): # must use recursion\n\n # raises ValueError if list is None\n if int_list == None:\n raise ValueError\n else:\n rev_list = list() # creates reversed list\n index = len(int_list)-1 # defines initial index\n return recursion(index, rev_list, int_list) # calls recursion function\n pass",
"def reverse(self):\n self.flips.reverse()\n for e in self.flips:\n self.permute(e, False)\n self.flips = []",
"def reverse_rlist_iterative(s):\n \"*** YOUR CODE HERE ***\"\n newlist = rlist(first(s), empty_rlist)\n while rest(s) != empty_rlist:\n s = rest(s)\n newlist = rlist(first(s), newlist)\n return newlist"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.