query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Parsing a server descriptor when the bridge's networkstatus document didn't have a digest of the server descriptor should raise a MissingServerDescriptorDigest.
|
def test_Bridge_checkServerDescriptor(self):
# Create a networkstatus descriptor without a server descriptor digest:
filename = self._networkstatusFile + "-missing-digest"
fh = open(filename, 'w')
invalid = BRIDGE_NETWORKSTATUS.replace("c4EVu2rO/iD/DJYBX/Ll38DGQWI", "foo")
fh.seek(0)
fh.write(invalid)
fh.flush()
fh.close()
realdigest = "738115BB6ACEFE20FF0C96015FF2E5DFC0C64162"
#networkstatus = descriptors.parseNetworkStatusFile(filename)
#self.bridge.updateFromNetworkStatus(networkstatus[0])
#self.assertRaises(bridges.MissingServerDescriptorDigest,
# self.bridge.updateFromNetworkStatus,
# networkstatus[0])
|
[
"def test_Bridge_checkServerDescriptor_digest_missing(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n self.bridge.descriptorDigest = None\n self.assertRaises(bridges.MissingServerDescriptorDigest,\n self.bridge._checkServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_checkServerDescriptor_digest_mismatch_sd(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n self.bridge.descriptorDigest = 'deadbeef'\n self.assertRaises(bridges.ServerDescriptorDigestMismatch,\n self.bridge._checkServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_checkServerDescriptor_digest_mismatch_ns(self):\n # Create a networkstatus descriptor without a server descriptor digest:\n filename = self._networkstatusFile + \"-mismatched-digest\"\n fh = open(filename, 'w')\n invalid = BRIDGE_NETWORKSTATUS.replace(\"c4EVu2rO/iD/DJYBX/Ll38DGQWI\",\n \"c4EVu2r1/iD/DJYBX/Ll38DGQWI\")\n fh.seek(0)\n fh.write(invalid)\n fh.flush()\n fh.close()\n\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n networkstatus = descriptors.parseNetworkStatusFile(filename)\n self.bridge.updateFromNetworkStatus(networkstatus[0])\n #self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n\n self.assertRaises(bridges.ServerDescriptorDigestMismatch,\n self.bridge.updateFromServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_updateFromServerDescriptor_no_networkstatus(self):\n self.assertRaises(bridges.ServerDescriptorWithoutNetworkstatus,\n self.bridge.updateFromServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_updateFromServerDescriptor_ignoreNetworkstatus_no_networkstatus(self):\n self.bridge.updateFromServerDescriptor(self.serverdescriptor,\n ignoreNetworkstatus=True)\n self.assertIsNone(self.bridge.descriptors['networkstatus'])\n self.assertIsNotNone(self.bridge.descriptors['server'])",
"def test_Bridge_descriptorDigest(self):\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.assertEqual(self.bridge.descriptorDigest, realdigest)",
"def test_01_server_reply_unparseable_reply(self):\n self.fake_sfile.reply_buf = ['not even remotely parseable\\r\\n']\n self.failUnlessRaises(gnats.GnatsNetworkException,\n self.conn._server_reply)",
"def test_Bridge_updateFromExtraInfoDescriptor_bad_signature_changed(self):\n # Make the signature uppercased\n BEGIN_SIG = '-----BEGIN SIGNATURE-----'\n doc, sig = BRIDGE_EXTRAINFO.split(BEGIN_SIG)\n ei = BEGIN_SIG.join([doc, sig.upper()])\n self._writeExtrainfo(ei)\n self._parseAllDescriptorFiles()\n\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 0)\n self.assertIsNone(self.bridge.descriptors['extrainfo'])",
"def get_image_info_from_digest(self, digest: str) -> Union[dict, None]:\n url = self.dockerd_socket_baseurl + \"/images/json\"\n try:\n response = self.session.get(url)\n except requests.exceptions.ConnectionError:\n emit.debug(\n \"Cannot connect to /var/run/docker.sock , please ensure dockerd is running.\",\n )\n return None\n\n if response.status_code != 200:\n emit.debug(f\"Bad response when validating local image: {response.status_code}\")\n return None\n\n for image_info in response.json():\n if image_info[\"RepoDigests\"] is None:\n continue\n if any(digest in repo_digest for repo_digest in image_info[\"RepoDigests\"]):\n return image_info\n return None",
"def _check_descriptor_dependencies(self, session, descriptor):\n if not descriptor.get(\"netslice-subnet\"):\n return\n for nsd in descriptor[\"netslice-subnet\"]:\n nsd_id = nsd[\"nsd-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = nsd_id\n if not self.db.get_list(\"nsds\", filter_q):\n raise EngineException(\"Descriptor error at 'netslice-subnet':'nsd-ref'='{}' references a non \"\n \"existing nsd\".format(nsd_id), http_code=HTTPStatus.CONFLICT)",
"def gattc_read_descriptor(\n self,\n descriptor: Union[_DescriptorHandle, _DescriptorTuple],\n /,\n ) -> bytes:\n ...",
"async def parse_node_server_defs(self, slot: str):\n _LOGGER.info(\"Parsing node server slot %s\", slot)\n node_server_profile = {\n key: value\n for (key, value) in self._profiles.items()\n if key.startswith(slot)\n }\n\n node_defs_impl = getDOMImplementation()\n editors_impl = getDOMImplementation()\n node_defs_xml = node_defs_impl.createDocument(None, TAG_ROOT, None)\n editors_xml = editors_impl.createDocument(None, TAG_ROOT, None)\n nls_lookup: dict = {}\n\n for file, contents in node_server_profile.items():\n contents_xml = \"\"\n file = file.lower()\n if file.endswith(\".xml\"):\n try:\n contents_xml = minidom.parseString(contents).firstChild\n except XML_ERRORS:\n _LOGGER.error(\n \"%s while parsing Node Server %s file %s\",\n XML_PARSE_ERROR,\n slot,\n file,\n )\n continue\n if \"nodedef\" in file:\n node_defs_xml.firstChild.appendChild(contents_xml)\n if \"editors\" in file:\n editors_xml.firstChild.appendChild(contents_xml)\n if \"nls\" in file and \"en_us\" in file:\n nls_list = [\n line\n for line in contents.split(\"\\n\")\n if not line.startswith(\"#\") and line != \"\"\n ]\n if nls_list:\n nls_lookup = dict(re.split(r\"\\s?=\\s?\", line) for line in nls_list)\n self._node_server_nls.append(\n NodeServerNLS(\n slot=slot,\n nls=nls_lookup,\n )\n )\n\n # Process Node Def Files\n node_defs = node_defs_xml.getElementsByTagName(TAG_NODE_DEF)\n for node_def in node_defs:\n node_def_id = attr_from_element(node_def, ATTR_ID)\n nls_prefix = attr_from_element(node_def, ATTR_NLS)\n sts = node_def.getElementsByTagName(TAG_ST)\n statuses = {}\n for st in sts:\n status_id = attr_from_element(st, ATTR_ID)\n editor = attr_from_element(st, ATTR_EDITOR)\n statuses.update({status_id: editor})\n\n cmds_sends = node_def.getElementsByTagName(TAG_SENDS)[0]\n cmds_accepts = node_def.getElementsByTagName(TAG_ACCEPTS)[0]\n cmds_sends_cmd = cmds_sends.getElementsByTagName(TAG_CMD)\n cmds_accepts_cmd = cmds_accepts.getElementsByTagName(TAG_CMD)\n sends_commands = []\n accepts_commands = []\n\n for cmd in cmds_sends_cmd:\n sends_commands.append(attr_from_element(cmd, ATTR_ID))\n for cmd in cmds_accepts_cmd:\n accepts_commands.append(attr_from_element(cmd, ATTR_ID))\n\n status_names = {}\n name = node_def_id\n if nls_lookup:\n if (name_key := f\"ND-{node_def_id}-NAME\") in nls_lookup:\n name = nls_lookup[name_key]\n for st in statuses:\n if (key := f\"ST-{nls_prefix}-{st}-NAME\") in nls_lookup:\n status_names.update({st: nls_lookup[key]})\n\n self._node_server_node_definitions.append(\n NodeServerNodeDefinition(\n node_def_id=node_def_id,\n name=name,\n nls_prefix=nls_prefix,\n slot=slot,\n statuses=statuses,\n status_names=status_names,\n sends_commands=sends_commands,\n accepts_commands=accepts_commands,\n )\n )\n # Process Editor Files\n editors = editors_xml.getElementsByTagName(ATTR_EDITOR)\n for editor in editors:\n editor_id = attr_from_element(editor, ATTR_ID)\n editor_range = editor.getElementsByTagName(TAG_RANGE)[0]\n uom = attr_from_element(editor_range, ATTR_UNIT_OF_MEASURE)\n subset = attr_from_element(editor_range, ATTR_SUBSET)\n nls = attr_from_element(editor_range, ATTR_NLS)\n\n values = None\n if nls_lookup and uom == \"25\":\n values = {\n key.partition(\"-\")[2]: value\n for (key, value) in nls_lookup.items()\n if key.startswith(nls)\n }\n\n self._node_server_node_editors.append(\n NodeServerNodeEditor(\n editor_id=editor_id,\n unit_of_measurement=uom,\n subset=subset,\n nls=nls,\n slot=slot,\n values=values,\n )\n )\n\n _LOGGER.debug(\"ISY parsed node server profiles\")",
"def _check_descriptor_dependencies(self, session, descriptor):\n if session[\"force\"]:\n return\n member_vnfd_index = {}\n if descriptor.get(\"constituent-vnfd\") and not session[\"force\"]:\n for vnf in descriptor[\"constituent-vnfd\"]:\n vnfd_id = vnf[\"vnfd-id-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = vnfd_id\n vnf_list = self.db.get_list(\"vnfds\", filter_q)\n if not vnf_list:\n raise EngineException(\"Descriptor error at 'constituent-vnfd':'vnfd-id-ref'='{}' references a non \"\n \"existing vnfd\".format(vnfd_id), http_code=HTTPStatus.CONFLICT)\n # elif len(vnf_list) > 1:\n # raise EngineException(\"More than one vnfd found for id='{}'\".format(vnfd_id),\n # http_code=HTTPStatus.CONFLICT)\n member_vnfd_index[vnf[\"member-vnf-index\"]] = vnf_list[0]\n\n # Cross references validation in the descriptor and vnfd connection point validation\n for vld in get_iterable(descriptor.get(\"vld\")):\n for referenced_vnfd_cp in get_iterable(vld.get(\"vnfd-connection-point-ref\")):\n # look if this vnfd contains this connection point\n vnfd = member_vnfd_index.get(referenced_vnfd_cp[\"member-vnf-index-ref\"])\n for vnfd_cp in get_iterable(vnfd.get(\"connection-point\")):\n if referenced_vnfd_cp.get(\"vnfd-connection-point-ref\") == vnfd_cp[\"name\"]:\n break\n else:\n raise EngineException(\n \"Error at vld[id='{}']:vnfd-connection-point-ref[member-vnf-index-ref='{}']:vnfd-\"\n \"connection-point-ref='{}' references a non existing conection-point:name inside vnfd '{}'\"\n .format(vld[\"id\"], referenced_vnfd_cp[\"member-vnf-index-ref\"],\n referenced_vnfd_cp[\"vnfd-connection-point-ref\"], vnfd[\"id\"]),\n http_code=HTTPStatus.UNPROCESSABLE_ENTITY)",
"def test_04_read_server_parsed(self):\n self.fake_sfile.set_reply_buf('x\\037y\\036\\r\\na\\037b\\037c\\036\\r\\n.\\r\\n')\n out = self.conn._read_server(True)\n self.assertEquals(len(out), 2)\n self.assertEquals(len(out[0]), 2)\n self.assertEquals(len(out[1]), 3)\n self.assertEquals(out[1][1], 'b')",
"def test_create_server_invalid_flavor(self):\n\n newServer = self.os.servers.create(name=\"testserver2\",\n\t image=\"http://glance1:9292/v1/images/1\",\n\t flavor=\"http://172.19.0.3:8774/v1.1/flavors/99999999\")",
"def decode_sentinelhub_err_msg(response):\n try:\n server_message = []\n for elem in decode_data(response.content, MimeType.XML):\n if 'ServiceException' in elem.tag or 'Message' in elem.tag:\n server_message.append(elem.text.strip('\\n\\t '))\n return ''.join(server_message)\n except ElementTree.ParseError:\n return response.text",
"def get_descriptors( type_descriptor):\n log.debug(\"Get %s descriptors\", type_descriptor)\n try:\n client = Client()\n if type_descriptor == 'nsd':\n result = client.nsd_list()\n\n elif type_descriptor == 'vnfd':\n result = client.vnfd_list()\n\n except Exception as e:\n log.exception(e)\n result = {}\n return result",
"def test_02_server_reply_bad_reply_type(self):\n self.fake_sfile.reply_buf = ['666+Evil Reply Type\\r\\n']\n self.failUnlessRaises(gnats.GnatsNetworkException,\n self.conn._server_reply)",
"def make_server_description(server, hosts):\n ismaster_response = {}\n ismaster_response['tags'] = server['tags']\n ismaster_response['ok'] = True\n ismaster_response['hosts'] = hosts\n\n server_type = server['type']\n\n if server_type != \"Standalone\" and server_type != \"Mongos\":\n ismaster_response['setName'] = True\n if server_type == \"RSPrimary\":\n ismaster_response['ismaster'] = True\n elif server_type == \"RSSecondary\":\n ismaster_response['secondary'] = True\n elif server_type == \"Mongos\":\n ismaster_response['msg'] = 'isdbgrid'\n\n return ServerDescription(clean_node(server['address']),\n IsMaster(ismaster_response),\n round_trip_time=server['avg_rtt_ms'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parsing a server descriptor whose digest doesn't match the one given in the bridge's networkstatus document should raise a ServerDescriptorDigestMismatch.
|
def test_Bridge_checkServerDescriptor_digest_mismatch_ns(self):
# Create a networkstatus descriptor without a server descriptor digest:
filename = self._networkstatusFile + "-mismatched-digest"
fh = open(filename, 'w')
invalid = BRIDGE_NETWORKSTATUS.replace("c4EVu2rO/iD/DJYBX/Ll38DGQWI",
"c4EVu2r1/iD/DJYBX/Ll38DGQWI")
fh.seek(0)
fh.write(invalid)
fh.flush()
fh.close()
realdigest = "738115BB6ACEFE20FF0C96015FF2E5DFC0C64162"
networkstatus = descriptors.parseNetworkStatusFile(filename)
self.bridge.updateFromNetworkStatus(networkstatus[0])
#self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.assertRaises(bridges.ServerDescriptorDigestMismatch,
self.bridge.updateFromServerDescriptor,
self.serverdescriptor)
|
[
"def test_Bridge_checkServerDescriptor_digest_mismatch_sd(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n self.bridge.descriptorDigest = 'deadbeef'\n self.assertRaises(bridges.ServerDescriptorDigestMismatch,\n self.bridge._checkServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_checkServerDescriptor(self):\n # Create a networkstatus descriptor without a server descriptor digest:\n filename = self._networkstatusFile + \"-missing-digest\"\n fh = open(filename, 'w')\n invalid = BRIDGE_NETWORKSTATUS.replace(\"c4EVu2rO/iD/DJYBX/Ll38DGQWI\", \"foo\")\n fh.seek(0)\n fh.write(invalid)\n fh.flush()\n fh.close()\n\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n\n #networkstatus = descriptors.parseNetworkStatusFile(filename)\n #self.bridge.updateFromNetworkStatus(networkstatus[0])\n #self.assertRaises(bridges.MissingServerDescriptorDigest,\n # self.bridge.updateFromNetworkStatus,\n # networkstatus[0])",
"def test_Bridge_checkServerDescriptor_digest_missing(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n self.bridge.descriptorDigest = None\n self.assertRaises(bridges.MissingServerDescriptorDigest,\n self.bridge._checkServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_descriptorDigest(self):\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.assertEqual(self.bridge.descriptorDigest, realdigest)",
"def test_Bridge_updateFromServerDescriptor_no_networkstatus(self):\n self.assertRaises(bridges.ServerDescriptorWithoutNetworkstatus,\n self.bridge.updateFromServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_updateFromServerDescriptor_ignoreNetworkstatus_no_networkstatus(self):\n self.bridge.updateFromServerDescriptor(self.serverdescriptor,\n ignoreNetworkstatus=True)\n self.assertIsNone(self.bridge.descriptors['networkstatus'])\n self.assertIsNotNone(self.bridge.descriptors['server'])",
"def get_image_info_from_digest(self, digest: str) -> Union[dict, None]:\n url = self.dockerd_socket_baseurl + \"/images/json\"\n try:\n response = self.session.get(url)\n except requests.exceptions.ConnectionError:\n emit.debug(\n \"Cannot connect to /var/run/docker.sock , please ensure dockerd is running.\",\n )\n return None\n\n if response.status_code != 200:\n emit.debug(f\"Bad response when validating local image: {response.status_code}\")\n return None\n\n for image_info in response.json():\n if image_info[\"RepoDigests\"] is None:\n continue\n if any(digest in repo_digest for repo_digest in image_info[\"RepoDigests\"]):\n return image_info\n return None",
"def test_Bridge_updateFromExtraInfoDescriptor_bad_signature_changed(self):\n # Make the signature uppercased\n BEGIN_SIG = '-----BEGIN SIGNATURE-----'\n doc, sig = BRIDGE_EXTRAINFO.split(BEGIN_SIG)\n ei = BEGIN_SIG.join([doc, sig.upper()])\n self._writeExtrainfo(ei)\n self._parseAllDescriptorFiles()\n\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 0)\n self.assertIsNone(self.bridge.descriptors['extrainfo'])",
"def parse_digests(digests):\n def _atom(orig):\n s = orig\n if s.startswith(\"hash://\"):\n s = os.path.split(s[len(\"hash://\"):])[1]\n if ':' in s:\n # e.g. \"md5:asdaddas\"\n s = s.split(':')[-1]\n if '_' in s:\n # e.g. \"sha1_asdsads\"\n s = s.split('_')[-1]\n s = s.lower()\n res = {32: ('md5', s),\n 40: ('sha1', s),\n 64: ('sha256', s),\n 128: ('sha512', s)}.get(len(s), None)\n if not res:\n raise ValueError(\"invalid digest string: %s\" % (orig,))\n return res\n\n if isinstance(digests, (dict,)):\n return dict([_atom(v) for v in digests.values()])\n if not isinstance(digests, (tuple, list)):\n digests = (digests,)\n return dict([_atom(digest) for digest in digests])",
"def test_01_server_reply_unparseable_reply(self):\n self.fake_sfile.reply_buf = ['not even remotely parseable\\r\\n']\n self.failUnlessRaises(gnats.GnatsNetworkException,\n self.conn._server_reply)",
"def gattc_read_descriptor(\n self,\n descriptor: Union[_DescriptorHandle, _DescriptorTuple],\n /,\n ) -> bytes:\n ...",
"def _check_descriptor_dependencies(self, session, descriptor):\n if not descriptor.get(\"netslice-subnet\"):\n return\n for nsd in descriptor[\"netslice-subnet\"]:\n nsd_id = nsd[\"nsd-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = nsd_id\n if not self.db.get_list(\"nsds\", filter_q):\n raise EngineException(\"Descriptor error at 'netslice-subnet':'nsd-ref'='{}' references a non \"\n \"existing nsd\".format(nsd_id), http_code=HTTPStatus.CONFLICT)",
"def test_create_server_invalid_flavor(self):\n\n newServer = self.os.servers.create(name=\"testserver2\",\n\t image=\"http://glance1:9292/v1/images/1\",\n\t flavor=\"http://172.19.0.3:8774/v1.1/flavors/99999999\")",
"def test_bad_info_hash(self):\n\t\trequest = self._build_announce_request_object(\n\t\t\tinfo_hash='\\x98H\\x16\\xfd2\\x96\"\\x87n\\x14\\x90v4&No3.\\x9f\\xb2'\n\t\t)\n\t\tresponse_data = bencode.bdecode(views.announce(request).data)\n\t\tfailure = {\n\t\t\t'failure reason': 'info_hash not found in the database',\n\t\t\t'failure code': 200,\n\t\t}\n\t\tself.assertEqual(response_data, failure)",
"def test_directives_wrong_misformatted_signature(self):\n handle_servername = ServerName(**{\n \"domain\" : self.valid_domain,\n }\n )\n self.assertRaises(\n ValueError,\n setattr,\n handle_servername,\n \"directives\",\n {\"signature\" : \"wrong_format\"}\n )\n del handle_servername",
"def _check_descriptor_dependencies(self, session, descriptor):\n if session[\"force\"]:\n return\n member_vnfd_index = {}\n if descriptor.get(\"constituent-vnfd\") and not session[\"force\"]:\n for vnf in descriptor[\"constituent-vnfd\"]:\n vnfd_id = vnf[\"vnfd-id-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = vnfd_id\n vnf_list = self.db.get_list(\"vnfds\", filter_q)\n if not vnf_list:\n raise EngineException(\"Descriptor error at 'constituent-vnfd':'vnfd-id-ref'='{}' references a non \"\n \"existing vnfd\".format(vnfd_id), http_code=HTTPStatus.CONFLICT)\n # elif len(vnf_list) > 1:\n # raise EngineException(\"More than one vnfd found for id='{}'\".format(vnfd_id),\n # http_code=HTTPStatus.CONFLICT)\n member_vnfd_index[vnf[\"member-vnf-index\"]] = vnf_list[0]\n\n # Cross references validation in the descriptor and vnfd connection point validation\n for vld in get_iterable(descriptor.get(\"vld\")):\n for referenced_vnfd_cp in get_iterable(vld.get(\"vnfd-connection-point-ref\")):\n # look if this vnfd contains this connection point\n vnfd = member_vnfd_index.get(referenced_vnfd_cp[\"member-vnf-index-ref\"])\n for vnfd_cp in get_iterable(vnfd.get(\"connection-point\")):\n if referenced_vnfd_cp.get(\"vnfd-connection-point-ref\") == vnfd_cp[\"name\"]:\n break\n else:\n raise EngineException(\n \"Error at vld[id='{}']:vnfd-connection-point-ref[member-vnf-index-ref='{}']:vnfd-\"\n \"connection-point-ref='{}' references a non existing conection-point:name inside vnfd '{}'\"\n .format(vld[\"id\"], referenced_vnfd_cp[\"member-vnf-index-ref\"],\n referenced_vnfd_cp[\"vnfd-connection-point-ref\"], vnfd[\"id\"]),\n http_code=HTTPStatus.UNPROCESSABLE_ENTITY)",
"def test_02_server_reply_bad_reply_type(self):\n self.fake_sfile.reply_buf = ['666+Evil Reply Type\\r\\n']\n self.failUnlessRaises(gnats.GnatsNetworkException,\n self.conn._server_reply)",
"def diff(self, digest: str, otherdigest: str) -> bool:\n return digest != otherdigest",
"def test_mismatchedOpaqueChecksum(self):\n credentialFactory = FakeDigestCredentialFactory('md5', 'test realm')\n\n d = credentialFactory.getChallenge(clientAddress)\n\n def _test(challenge):\n key = '%s,%s,%s' % (challenge['nonce'],\n clientAddress.host,\n '0')\n\n digest = md5(key + 'this is not the right pkey').hexdigest()\n\n badChecksum = '%s-%s' % (digest,\n key.encode('base64').strip('\\n'))\n\n self.assertRaises(\n error.LoginFailed,\n credentialFactory.verifyOpaque,\n badChecksum,\n challenge['nonce'],\n clientAddress.host)\n return d.addCallback(_test)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parsing a server descriptor when the corresponding networkstatus descriptor didn't include a server bridge.descriptorDigest that matches should raise a ServerDescriptorDigestMismatch exception.
|
def test_Bridge_checkServerDescriptor_digest_mismatch_sd(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.descriptorDigest = 'deadbeef'
self.assertRaises(bridges.ServerDescriptorDigestMismatch,
self.bridge._checkServerDescriptor,
self.serverdescriptor)
|
[
"def test_Bridge_checkServerDescriptor(self):\n # Create a networkstatus descriptor without a server descriptor digest:\n filename = self._networkstatusFile + \"-missing-digest\"\n fh = open(filename, 'w')\n invalid = BRIDGE_NETWORKSTATUS.replace(\"c4EVu2rO/iD/DJYBX/Ll38DGQWI\", \"foo\")\n fh.seek(0)\n fh.write(invalid)\n fh.flush()\n fh.close()\n\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n\n #networkstatus = descriptors.parseNetworkStatusFile(filename)\n #self.bridge.updateFromNetworkStatus(networkstatus[0])\n #self.assertRaises(bridges.MissingServerDescriptorDigest,\n # self.bridge.updateFromNetworkStatus,\n # networkstatus[0])",
"def test_Bridge_checkServerDescriptor_digest_mismatch_ns(self):\n # Create a networkstatus descriptor without a server descriptor digest:\n filename = self._networkstatusFile + \"-mismatched-digest\"\n fh = open(filename, 'w')\n invalid = BRIDGE_NETWORKSTATUS.replace(\"c4EVu2rO/iD/DJYBX/Ll38DGQWI\",\n \"c4EVu2r1/iD/DJYBX/Ll38DGQWI\")\n fh.seek(0)\n fh.write(invalid)\n fh.flush()\n fh.close()\n\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n networkstatus = descriptors.parseNetworkStatusFile(filename)\n self.bridge.updateFromNetworkStatus(networkstatus[0])\n #self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n\n self.assertRaises(bridges.ServerDescriptorDigestMismatch,\n self.bridge.updateFromServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_checkServerDescriptor_digest_missing(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n self.bridge.descriptorDigest = None\n self.assertRaises(bridges.MissingServerDescriptorDigest,\n self.bridge._checkServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_descriptorDigest(self):\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.assertEqual(self.bridge.descriptorDigest, realdigest)",
"def test_Bridge_updateFromServerDescriptor_no_networkstatus(self):\n self.assertRaises(bridges.ServerDescriptorWithoutNetworkstatus,\n self.bridge.updateFromServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_updateFromServerDescriptor_ignoreNetworkstatus_no_networkstatus(self):\n self.bridge.updateFromServerDescriptor(self.serverdescriptor,\n ignoreNetworkstatus=True)\n self.assertIsNone(self.bridge.descriptors['networkstatus'])\n self.assertIsNotNone(self.bridge.descriptors['server'])",
"def test_Bridge_updateFromExtraInfoDescriptor_bad_signature_changed(self):\n # Make the signature uppercased\n BEGIN_SIG = '-----BEGIN SIGNATURE-----'\n doc, sig = BRIDGE_EXTRAINFO.split(BEGIN_SIG)\n ei = BEGIN_SIG.join([doc, sig.upper()])\n self._writeExtrainfo(ei)\n self._parseAllDescriptorFiles()\n\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 0)\n self.assertIsNone(self.bridge.descriptors['extrainfo'])",
"def gattc_read_descriptor(\n self,\n descriptor: Union[_DescriptorHandle, _DescriptorTuple],\n /,\n ) -> bytes:\n ...",
"def _check_descriptor_dependencies(self, session, descriptor):\n if not descriptor.get(\"netslice-subnet\"):\n return\n for nsd in descriptor[\"netslice-subnet\"]:\n nsd_id = nsd[\"nsd-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = nsd_id\n if not self.db.get_list(\"nsds\", filter_q):\n raise EngineException(\"Descriptor error at 'netslice-subnet':'nsd-ref'='{}' references a non \"\n \"existing nsd\".format(nsd_id), http_code=HTTPStatus.CONFLICT)",
"def get_image_info_from_digest(self, digest: str) -> Union[dict, None]:\n url = self.dockerd_socket_baseurl + \"/images/json\"\n try:\n response = self.session.get(url)\n except requests.exceptions.ConnectionError:\n emit.debug(\n \"Cannot connect to /var/run/docker.sock , please ensure dockerd is running.\",\n )\n return None\n\n if response.status_code != 200:\n emit.debug(f\"Bad response when validating local image: {response.status_code}\")\n return None\n\n for image_info in response.json():\n if image_info[\"RepoDigests\"] is None:\n continue\n if any(digest in repo_digest for repo_digest in image_info[\"RepoDigests\"]):\n return image_info\n return None",
"def _check_descriptor_dependencies(self, session, descriptor):\n if session[\"force\"]:\n return\n member_vnfd_index = {}\n if descriptor.get(\"constituent-vnfd\") and not session[\"force\"]:\n for vnf in descriptor[\"constituent-vnfd\"]:\n vnfd_id = vnf[\"vnfd-id-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = vnfd_id\n vnf_list = self.db.get_list(\"vnfds\", filter_q)\n if not vnf_list:\n raise EngineException(\"Descriptor error at 'constituent-vnfd':'vnfd-id-ref'='{}' references a non \"\n \"existing vnfd\".format(vnfd_id), http_code=HTTPStatus.CONFLICT)\n # elif len(vnf_list) > 1:\n # raise EngineException(\"More than one vnfd found for id='{}'\".format(vnfd_id),\n # http_code=HTTPStatus.CONFLICT)\n member_vnfd_index[vnf[\"member-vnf-index\"]] = vnf_list[0]\n\n # Cross references validation in the descriptor and vnfd connection point validation\n for vld in get_iterable(descriptor.get(\"vld\")):\n for referenced_vnfd_cp in get_iterable(vld.get(\"vnfd-connection-point-ref\")):\n # look if this vnfd contains this connection point\n vnfd = member_vnfd_index.get(referenced_vnfd_cp[\"member-vnf-index-ref\"])\n for vnfd_cp in get_iterable(vnfd.get(\"connection-point\")):\n if referenced_vnfd_cp.get(\"vnfd-connection-point-ref\") == vnfd_cp[\"name\"]:\n break\n else:\n raise EngineException(\n \"Error at vld[id='{}']:vnfd-connection-point-ref[member-vnf-index-ref='{}']:vnfd-\"\n \"connection-point-ref='{}' references a non existing conection-point:name inside vnfd '{}'\"\n .format(vld[\"id\"], referenced_vnfd_cp[\"member-vnf-index-ref\"],\n referenced_vnfd_cp[\"vnfd-connection-point-ref\"], vnfd[\"id\"]),\n http_code=HTTPStatus.UNPROCESSABLE_ENTITY)",
"def test_create_server_invalid_flavor(self):\n\n newServer = self.os.servers.create(name=\"testserver2\",\n\t image=\"http://glance1:9292/v1/images/1\",\n\t flavor=\"http://172.19.0.3:8774/v1.1/flavors/99999999\")",
"def test_Bridge_updateFromExtraInfoDescriptor_pt_died(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 4)\n\n # Remove the obfs3 transport from the extrainfo descriptor:\n self.extrainfo.transport.pop('obfs3')\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 3)\n\n for pt in self.bridge.transports:\n self.failIfEqual(pt.methodname, 'obfs3')",
"def parse_digests(digests):\n def _atom(orig):\n s = orig\n if s.startswith(\"hash://\"):\n s = os.path.split(s[len(\"hash://\"):])[1]\n if ':' in s:\n # e.g. \"md5:asdaddas\"\n s = s.split(':')[-1]\n if '_' in s:\n # e.g. \"sha1_asdsads\"\n s = s.split('_')[-1]\n s = s.lower()\n res = {32: ('md5', s),\n 40: ('sha1', s),\n 64: ('sha256', s),\n 128: ('sha512', s)}.get(len(s), None)\n if not res:\n raise ValueError(\"invalid digest string: %s\" % (orig,))\n return res\n\n if isinstance(digests, (dict,)):\n return dict([_atom(v) for v in digests.values()])\n if not isinstance(digests, (tuple, list)):\n digests = (digests,)\n return dict([_atom(digest) for digest in digests])",
"def test_create_server_invalid_image(self):\n newServer = self.os.servers.create(name=\"testserver2\",\n\t image=\"http://glance1:9292/v1/images/9999\",\n\t flavor=\"http://172.19.0.3:8774/v1.1/flavors/3\")",
"def decode_sentinelhub_err_msg(response):\n try:\n server_message = []\n for elem in decode_data(response.content, MimeType.XML):\n if 'ServiceException' in elem.tag or 'Message' in elem.tag:\n server_message.append(elem.text.strip('\\n\\t '))\n return ''.join(server_message)\n except ElementTree.ParseError:\n return response.text",
"def test_01_server_reply_unparseable_reply(self):\n self.fake_sfile.reply_buf = ['not even remotely parseable\\r\\n']\n self.failUnlessRaises(gnats.GnatsNetworkException,\n self.conn._server_reply)",
"def test_bad_info_hash(self):\n\t\trequest = self._build_announce_request_object(\n\t\t\tinfo_hash='\\x98H\\x16\\xfd2\\x96\"\\x87n\\x14\\x90v4&No3.\\x9f\\xb2'\n\t\t)\n\t\tresponse_data = bencode.bdecode(views.announce(request).data)\n\t\tfailure = {\n\t\t\t'failure reason': 'info_hash not found in the database',\n\t\t\t'failure code': 200,\n\t\t}\n\t\tself.assertEqual(response_data, failure)",
"def test_Bridge_verifyExtraInfoSignature_good_signature(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.assertIsNone(self.bridge._verifyExtraInfoSignature(self.extrainfo))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parsing a server descriptor when the corresponding networkstatus descriptor didn't include a server bridge.descriptorDigest should raise a MissingServerDescriptorDigest exception.
|
def test_Bridge_checkServerDescriptor_digest_missing(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.descriptorDigest = None
self.assertRaises(bridges.MissingServerDescriptorDigest,
self.bridge._checkServerDescriptor,
self.serverdescriptor)
|
[
"def test_Bridge_checkServerDescriptor(self):\n # Create a networkstatus descriptor without a server descriptor digest:\n filename = self._networkstatusFile + \"-missing-digest\"\n fh = open(filename, 'w')\n invalid = BRIDGE_NETWORKSTATUS.replace(\"c4EVu2rO/iD/DJYBX/Ll38DGQWI\", \"foo\")\n fh.seek(0)\n fh.write(invalid)\n fh.flush()\n fh.close()\n\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n\n #networkstatus = descriptors.parseNetworkStatusFile(filename)\n #self.bridge.updateFromNetworkStatus(networkstatus[0])\n #self.assertRaises(bridges.MissingServerDescriptorDigest,\n # self.bridge.updateFromNetworkStatus,\n # networkstatus[0])",
"def test_Bridge_checkServerDescriptor_digest_mismatch_sd(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n self.bridge.descriptorDigest = 'deadbeef'\n self.assertRaises(bridges.ServerDescriptorDigestMismatch,\n self.bridge._checkServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_checkServerDescriptor_digest_mismatch_ns(self):\n # Create a networkstatus descriptor without a server descriptor digest:\n filename = self._networkstatusFile + \"-mismatched-digest\"\n fh = open(filename, 'w')\n invalid = BRIDGE_NETWORKSTATUS.replace(\"c4EVu2rO/iD/DJYBX/Ll38DGQWI\",\n \"c4EVu2r1/iD/DJYBX/Ll38DGQWI\")\n fh.seek(0)\n fh.write(invalid)\n fh.flush()\n fh.close()\n\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n networkstatus = descriptors.parseNetworkStatusFile(filename)\n self.bridge.updateFromNetworkStatus(networkstatus[0])\n #self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n\n self.assertRaises(bridges.ServerDescriptorDigestMismatch,\n self.bridge.updateFromServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_descriptorDigest(self):\n realdigest = \"738115BB6ACEFE20FF0C96015FF2E5DFC0C64162\"\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.assertEqual(self.bridge.descriptorDigest, realdigest)",
"def test_Bridge_updateFromServerDescriptor_no_networkstatus(self):\n self.assertRaises(bridges.ServerDescriptorWithoutNetworkstatus,\n self.bridge.updateFromServerDescriptor,\n self.serverdescriptor)",
"def test_Bridge_updateFromServerDescriptor_ignoreNetworkstatus_no_networkstatus(self):\n self.bridge.updateFromServerDescriptor(self.serverdescriptor,\n ignoreNetworkstatus=True)\n self.assertIsNone(self.bridge.descriptors['networkstatus'])\n self.assertIsNotNone(self.bridge.descriptors['server'])",
"def gattc_read_descriptor(\n self,\n descriptor: Union[_DescriptorHandle, _DescriptorTuple],\n /,\n ) -> bytes:\n ...",
"def _check_descriptor_dependencies(self, session, descriptor):\n if not descriptor.get(\"netslice-subnet\"):\n return\n for nsd in descriptor[\"netslice-subnet\"]:\n nsd_id = nsd[\"nsd-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = nsd_id\n if not self.db.get_list(\"nsds\", filter_q):\n raise EngineException(\"Descriptor error at 'netslice-subnet':'nsd-ref'='{}' references a non \"\n \"existing nsd\".format(nsd_id), http_code=HTTPStatus.CONFLICT)",
"def get_image_info_from_digest(self, digest: str) -> Union[dict, None]:\n url = self.dockerd_socket_baseurl + \"/images/json\"\n try:\n response = self.session.get(url)\n except requests.exceptions.ConnectionError:\n emit.debug(\n \"Cannot connect to /var/run/docker.sock , please ensure dockerd is running.\",\n )\n return None\n\n if response.status_code != 200:\n emit.debug(f\"Bad response when validating local image: {response.status_code}\")\n return None\n\n for image_info in response.json():\n if image_info[\"RepoDigests\"] is None:\n continue\n if any(digest in repo_digest for repo_digest in image_info[\"RepoDigests\"]):\n return image_info\n return None",
"def get_descriptors( type_descriptor):\n log.debug(\"Get %s descriptors\", type_descriptor)\n try:\n client = Client()\n if type_descriptor == 'nsd':\n result = client.nsd_list()\n\n elif type_descriptor == 'vnfd':\n result = client.vnfd_list()\n\n except Exception as e:\n log.exception(e)\n result = {}\n return result",
"def test_Bridge_updateFromExtraInfoDescriptor_bad_signature_changed(self):\n # Make the signature uppercased\n BEGIN_SIG = '-----BEGIN SIGNATURE-----'\n doc, sig = BRIDGE_EXTRAINFO.split(BEGIN_SIG)\n ei = BEGIN_SIG.join([doc, sig.upper()])\n self._writeExtrainfo(ei)\n self._parseAllDescriptorFiles()\n\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 0)\n self.assertIsNone(self.bridge.descriptors['extrainfo'])",
"def _check_descriptor_dependencies(self, session, descriptor):\n if session[\"force\"]:\n return\n member_vnfd_index = {}\n if descriptor.get(\"constituent-vnfd\") and not session[\"force\"]:\n for vnf in descriptor[\"constituent-vnfd\"]:\n vnfd_id = vnf[\"vnfd-id-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = vnfd_id\n vnf_list = self.db.get_list(\"vnfds\", filter_q)\n if not vnf_list:\n raise EngineException(\"Descriptor error at 'constituent-vnfd':'vnfd-id-ref'='{}' references a non \"\n \"existing vnfd\".format(vnfd_id), http_code=HTTPStatus.CONFLICT)\n # elif len(vnf_list) > 1:\n # raise EngineException(\"More than one vnfd found for id='{}'\".format(vnfd_id),\n # http_code=HTTPStatus.CONFLICT)\n member_vnfd_index[vnf[\"member-vnf-index\"]] = vnf_list[0]\n\n # Cross references validation in the descriptor and vnfd connection point validation\n for vld in get_iterable(descriptor.get(\"vld\")):\n for referenced_vnfd_cp in get_iterable(vld.get(\"vnfd-connection-point-ref\")):\n # look if this vnfd contains this connection point\n vnfd = member_vnfd_index.get(referenced_vnfd_cp[\"member-vnf-index-ref\"])\n for vnfd_cp in get_iterable(vnfd.get(\"connection-point\")):\n if referenced_vnfd_cp.get(\"vnfd-connection-point-ref\") == vnfd_cp[\"name\"]:\n break\n else:\n raise EngineException(\n \"Error at vld[id='{}']:vnfd-connection-point-ref[member-vnf-index-ref='{}']:vnfd-\"\n \"connection-point-ref='{}' references a non existing conection-point:name inside vnfd '{}'\"\n .format(vld[\"id\"], referenced_vnfd_cp[\"member-vnf-index-ref\"],\n referenced_vnfd_cp[\"vnfd-connection-point-ref\"], vnfd[\"id\"]),\n http_code=HTTPStatus.UNPROCESSABLE_ENTITY)",
"def test_Bridge_updateFromExtraInfoDescriptor_pt_died(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 4)\n\n # Remove the obfs3 transport from the extrainfo descriptor:\n self.extrainfo.transport.pop('obfs3')\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 3)\n\n for pt in self.bridge.transports:\n self.failIfEqual(pt.methodname, 'obfs3')",
"def make_server_description(server, hosts):\n ismaster_response = {}\n ismaster_response['tags'] = server['tags']\n ismaster_response['ok'] = True\n ismaster_response['hosts'] = hosts\n\n server_type = server['type']\n\n if server_type != \"Standalone\" and server_type != \"Mongos\":\n ismaster_response['setName'] = True\n if server_type == \"RSPrimary\":\n ismaster_response['ismaster'] = True\n elif server_type == \"RSSecondary\":\n ismaster_response['secondary'] = True\n elif server_type == \"Mongos\":\n ismaster_response['msg'] = 'isdbgrid'\n\n return ServerDescription(clean_node(server['address']),\n IsMaster(ismaster_response),\n round_trip_time=server['avg_rtt_ms'])",
"async def parse_node_server_defs(self, slot: str):\n _LOGGER.info(\"Parsing node server slot %s\", slot)\n node_server_profile = {\n key: value\n for (key, value) in self._profiles.items()\n if key.startswith(slot)\n }\n\n node_defs_impl = getDOMImplementation()\n editors_impl = getDOMImplementation()\n node_defs_xml = node_defs_impl.createDocument(None, TAG_ROOT, None)\n editors_xml = editors_impl.createDocument(None, TAG_ROOT, None)\n nls_lookup: dict = {}\n\n for file, contents in node_server_profile.items():\n contents_xml = \"\"\n file = file.lower()\n if file.endswith(\".xml\"):\n try:\n contents_xml = minidom.parseString(contents).firstChild\n except XML_ERRORS:\n _LOGGER.error(\n \"%s while parsing Node Server %s file %s\",\n XML_PARSE_ERROR,\n slot,\n file,\n )\n continue\n if \"nodedef\" in file:\n node_defs_xml.firstChild.appendChild(contents_xml)\n if \"editors\" in file:\n editors_xml.firstChild.appendChild(contents_xml)\n if \"nls\" in file and \"en_us\" in file:\n nls_list = [\n line\n for line in contents.split(\"\\n\")\n if not line.startswith(\"#\") and line != \"\"\n ]\n if nls_list:\n nls_lookup = dict(re.split(r\"\\s?=\\s?\", line) for line in nls_list)\n self._node_server_nls.append(\n NodeServerNLS(\n slot=slot,\n nls=nls_lookup,\n )\n )\n\n # Process Node Def Files\n node_defs = node_defs_xml.getElementsByTagName(TAG_NODE_DEF)\n for node_def in node_defs:\n node_def_id = attr_from_element(node_def, ATTR_ID)\n nls_prefix = attr_from_element(node_def, ATTR_NLS)\n sts = node_def.getElementsByTagName(TAG_ST)\n statuses = {}\n for st in sts:\n status_id = attr_from_element(st, ATTR_ID)\n editor = attr_from_element(st, ATTR_EDITOR)\n statuses.update({status_id: editor})\n\n cmds_sends = node_def.getElementsByTagName(TAG_SENDS)[0]\n cmds_accepts = node_def.getElementsByTagName(TAG_ACCEPTS)[0]\n cmds_sends_cmd = cmds_sends.getElementsByTagName(TAG_CMD)\n cmds_accepts_cmd = cmds_accepts.getElementsByTagName(TAG_CMD)\n sends_commands = []\n accepts_commands = []\n\n for cmd in cmds_sends_cmd:\n sends_commands.append(attr_from_element(cmd, ATTR_ID))\n for cmd in cmds_accepts_cmd:\n accepts_commands.append(attr_from_element(cmd, ATTR_ID))\n\n status_names = {}\n name = node_def_id\n if nls_lookup:\n if (name_key := f\"ND-{node_def_id}-NAME\") in nls_lookup:\n name = nls_lookup[name_key]\n for st in statuses:\n if (key := f\"ST-{nls_prefix}-{st}-NAME\") in nls_lookup:\n status_names.update({st: nls_lookup[key]})\n\n self._node_server_node_definitions.append(\n NodeServerNodeDefinition(\n node_def_id=node_def_id,\n name=name,\n nls_prefix=nls_prefix,\n slot=slot,\n statuses=statuses,\n status_names=status_names,\n sends_commands=sends_commands,\n accepts_commands=accepts_commands,\n )\n )\n # Process Editor Files\n editors = editors_xml.getElementsByTagName(ATTR_EDITOR)\n for editor in editors:\n editor_id = attr_from_element(editor, ATTR_ID)\n editor_range = editor.getElementsByTagName(TAG_RANGE)[0]\n uom = attr_from_element(editor_range, ATTR_UNIT_OF_MEASURE)\n subset = attr_from_element(editor_range, ATTR_SUBSET)\n nls = attr_from_element(editor_range, ATTR_NLS)\n\n values = None\n if nls_lookup and uom == \"25\":\n values = {\n key.partition(\"-\")[2]: value\n for (key, value) in nls_lookup.items()\n if key.startswith(nls)\n }\n\n self._node_server_node_editors.append(\n NodeServerNodeEditor(\n editor_id=editor_id,\n unit_of_measurement=uom,\n subset=subset,\n nls=nls,\n slot=slot,\n values=values,\n )\n )\n\n _LOGGER.debug(\"ISY parsed node server profiles\")",
"def distinguishingDescriptor(self, descriptor):\n if descriptor == slipnet.letter:\n return False\n if descriptor == slipnet.group:\n return False\n for number in slipnet.numbers:\n if number == descriptor:\n return False\n return True",
"def advapi32_IsValidSecurityDescriptor(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pSecurityDescriptor\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def _inspect_descriptor(descriptor):\n # TODO memoize to cache these results\n data_keys = descriptor.data_keys\n is_external = defaultdict(lambda: False)\n for data_key, data_key_dict in data_keys.items():\n if (data_key_dict and 'external' in data_key_dict):\n is_external[data_key] = bool(data_key_dict['external'])\n return is_external",
"def get_server_status(server):\n\n global green\n global yellow\n global red\n\n try:\n health = get('https://{host}:{port}/status'.format(host=server[1], port=server[2]), timeout=0.4, verify=False).json()\n cpu = health['cpu']\n ram = health['ram']\n\n if cpu > 85 or ram > 85:\n icon = 'yellow.png'\n yellow += 1\n else:\n icon = 'green.png'\n green += 1\n\n row = '''<tr><td><a href=\"server?server={name}\">{name}</a></td><td>{host}:{port}\n </td><td>{cpu}%</td><td>{ram}%</td><td><img src=\"images/{icon}\"></td><td>\n <a onclick=\"return confirm('Are you sure you want to remove this server?')\"\n href=\"remove-server?server={name}\">X</a></td></tr>'''\\\n .format(name=server[0], host=server[1], port=server[2], cpu=str(cpu), ram=str(ram), icon=icon)\n\n return row\n\n except (exceptions.RequestException, ValueError):\n row = '''<tr><td><a href=\"server?server={name}\">{name}</a></td><td>{host}:{port}</td><td>N/A</td><td>N/A</td><td>\n <img src=\"images/red.png\"></td><td><a onclick=\"return confirm('Are you sure you want to remove this server?')\"\n href=\"remove-server?server={name}\">X</a></td></tr>'''.format(name=server[0], host=server[1], port=server[2])\n\n red += 1\n\n return row\n\n except KeyError:\n row = '''<tr><td><a href=\"server?server={name}\">{name}</a></td><td>{host}:{port}</td><td>N/A</td><td>N/A</td>\n <td>Not Auth</td><td><a onclick=\"return confirm('Are you sure you want to remove this server?')\"\n href=\"remove-server?server={name}\">X</a></td></tr>'''.format(name=server[0], host=server[1], port=server[2])\n\n red += 1\n\n return row"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine with a valid request should return a bridge line.
|
def test_Bridge_getBridgeLine_request_valid(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(True)
line = self.bridge.getBridgeLine(request)
self.assertIsNotNone(line)
self.assertIn('179.178.155.140:36489', line)
self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)
|
[
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']",
"def line_flight(self, line):\n ret_val = self._line_flight(line)\n return ret_val",
"def draw_bridge_over(self, tile, rotation, payload, track_type, has_tram, source_tile_owner):\n\n bec = self.torb_edge_rgb\n bew = self.bridge_edge_width\n d = self.ss\n bd = 0.25 * d\n\n self.transform_to_tile(tile, rotation)\n\n self.draw_line(-0.5 * d, -bd, 0.5 * d, -bd, bec, bew)\n self.draw_line(-0.5 * d, bd, 0.5 * d, bd, bec, bew)\n\n if payload == \"road\":\n self.draw_road_line(\n -0.5 * d, 0, 0.5 * d, 0,\n line_mode=\"both\", owner=source_tile_owner\n )\n if has_tram:\n self.draw_tram_line(-0.5 * d, 0, 0.5 * d, 0, owner=source_tile_owner)\n else:\n self.draw_rail_line(\n -0.5 * d, 0, 0.5 * d, 0, track_type,\n line_mode=\"both\", owner=source_tile_owner\n )\n\n self.end_transform_to_tile()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine with an invalid request should return None.
|
def test_Bridge_getBridgeLine_request_invalid(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(False)
self.assertIsNone(self.bridge.getBridgeLine(request))
|
[
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def get_line(node: Any) -> Optional[int]:\n if node is None:\n return None\n if node.line is not None:\n return node.line\n else:\n return get_line(node.parent)",
"def draw_unknown_bridge_over(self, tiles, tile, rotation, line_mode=\"outer\"):\n\n source_tile = self.seek_bridge_ramp(tiles, tile.row, tile.col, rotation)\n payload_kind = 0\n track_type = 0\n has_tram = False\n source_tile_owner = None\n\n if source_tile:\n payload_kind = source_tile.occupant.payload_kind\n source_tile_owner = source_tile.owner\n if payload_kind == 0:\n track_type = source_tile.occupant.track_type\n if source_tile.occupant.tram_type == 1:\n has_tram = True\n\n if payload_kind == 0:\n self.draw_rail_bridge_over(tile, rotation, track_type, source_tile_owner)\n elif payload_kind == 1:\n self.draw_road_bridge_over(tile, rotation, has_tram, source_tile_owner)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() on a Bridge without any vanilla addresses should return None.
|
def test_Bridge_getBridgeLine_no_vanilla_addresses(self):
request = BridgeRequestBase()
request.isValid(True)
self.assertIsNone(self.bridge.getBridgeLine(request))
|
[
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def find_bridge(source, destination):\n \n for b in source.bridges:\n if destination in b.get_edges():\n return b\n return None",
"def get_bridge_ip(hue_nupnp):\r\n try:\r\n response = requests.get(hue_nupnp)\r\n return response.json()[0]['internalipaddress']\r\n except:\r\n sys.exit('Could not resolve Hue Bridge IP address. Please ensure your bridge is connected')",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with a valid request for bridges not blocked in Iran should return a bridge line.
|
def test_Bridge_getBridgeLine_request_without_block_in_IR(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(True)
request.withoutBlockInCountry('IR')
line = self.bridge.getBridgeLine(request)
self.assertIsNotNone(line)
self.assertIn('179.178.155.140:36489', line)
self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)
|
[
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_setBlockedIn_IR_address(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('IR', address='179.178.155.140')\n self.assertTrue(self.bridge.isBlockedIn('ir'))\n self.assertFalse(self.bridge.isBlockedIn('cn'))",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def is_bridge(self):\r\n # Since this does not change over time (?) check whether we already\r\n # know the answer. If so, there is no need to go further\r\n if self._is_bridge is not None:\r\n return self._is_bridge\r\n # if not, we have to get it from the zone topology. This will set\r\n # self._is_bridge for us for next time, so we won't have to do this\r\n # again\r\n self._parse_zone_group_state()\r\n return self._is_bridge",
"def getline(self, bno):\r\n return self.breakpt[bno]['line']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with a valid request for bridges not blocked in Iran, when the bridge is completely blocked in Iran, shouldn't return a bridge line.
|
def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
self.bridge.setBlockedIn('ir')
request = BridgeRequestBase()
request.isValid(True)
request.withoutBlockInCountry('IR')
line = self.bridge.getBridgeLine(request)
self.assertIsNone(line)
|
[
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_setBlockedIn_IR_address(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('IR', address='179.178.155.140')\n self.assertTrue(self.bridge.isBlockedIn('ir'))\n self.assertFalse(self.bridge.isBlockedIn('cn'))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_isBlockedIn_IS(self):\n self.assertFalse(self.bridge.isBlockedIn('IS'))",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_Bridge_setBlockedIn_GB_address_port(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n # Should block the obfs4 bridge:\n self.bridge.setBlockedIn('GB', address='179.178.155.140', port=36493)\n self.assertTrue(self.bridge.isBlockedIn('GB'))\n self.assertTrue(self.bridge.isBlockedIn('gb'))\n self.assertTrue(self.bridge.transportIsBlockedIn('GB', 'obfs4'))\n self.assertTrue(self.bridge.addressIsBlockedIn('GB', '179.178.155.140', 36493))\n self.assertFalse(self.bridge.addressIsBlockedIn('gb', '179.178.155.140', 36488))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with a valid request for obfs3 bridges not blocked in Iran, when the obfs3 line is blocked in Iran, shouldn't return a bridge line.
|
def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
self.bridge.setBlockedIn('ir', methodname="obfs3")
request = BridgeRequestBase()
request.isValid(True)
request.withoutBlockInCountry('IR')
request.withPluggableTransportType('obfs3')
line = self.bridge.getBridgeLine(request)
self.assertIsNone(line)
|
[
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_setBlockedIn_IR_address(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('IR', address='179.178.155.140')\n self.assertTrue(self.bridge.isBlockedIn('ir'))\n self.assertFalse(self.bridge.isBlockedIn('cn'))",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_isBlockedIn_IS(self):\n self.assertFalse(self.bridge.isBlockedIn('IS'))",
"def test_Bridge_setBlockedIn_CN_obfs2(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('CN', methodname='obfs2')\n self.assertTrue(self.bridge.isBlockedIn('CN'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with a valid request for obfs4 bridges not blocked in Iran, when the obfs3 line is blocked in Iran, should return a bridge line.
|
def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
self.bridge.setBlockedIn('ir', methodname="obfs3")
request = BridgeRequestBase()
request.isValid(True)
request.withoutBlockInCountry('IR')
request.withPluggableTransportType('obfs4')
line = self.bridge.getBridgeLine(request)
self.assertIsNotNone(line)
self.assertIn('obfs4', line)
self.assertIn('179.178.155.140:36493', line)
self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)
|
[
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_setBlockedIn_IR_address(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('IR', address='179.178.155.140')\n self.assertTrue(self.bridge.isBlockedIn('ir'))\n self.assertFalse(self.bridge.isBlockedIn('cn'))",
"def is_bridge(self):\r\n # Since this does not change over time (?) check whether we already\r\n # know the answer. If so, there is no need to go further\r\n if self._is_bridge is not None:\r\n return self._is_bridge\r\n # if not, we have to get it from the zone topology. This will set\r\n # self._is_bridge for us for next time, so we won't have to do this\r\n # again\r\n self._parse_zone_group_state()\r\n return self._is_bridge",
"def _identify_ridge_lines(matr, max_distances, gap_thresh):\n if(len(max_distances) < matr.shape[0]):\n raise ValueError('Max_distances must have at least as many rows as matr')\n \n all_max_cols = PeakFind._boolrelextrema(matr, numpy.greater, axis=1, order=1)\n #Highest row for which there are any relative maxima\n has_relmax = numpy.where(all_max_cols.any(axis=1))[0]\n if(len(has_relmax) == 0):\n return []\n start_row = has_relmax[-1]\n #Each ridge line is a 3-tuple:\n #rows, cols,Gap number\n ridge_lines = [[[start_row],\n [col],\n 0] for col in numpy.where(all_max_cols[start_row])[0]]\n final_lines = []\n rows = numpy.arange(start_row - 1, -1, -1)\n cols = numpy.arange(0, matr.shape[1])\n for row in rows:\n this_max_cols = cols[all_max_cols[row]]\n \n #Increment gap number of each line,\n #set it to zero later if appropriate\n for line in ridge_lines:\n line[2] += 1\n \n #XXX These should always be all_max_cols[row]\n #But the order might be different. Might be an efficiency gain\n #to make sure the order is the same and avoid this iteration\n prev_ridge_cols = numpy.array([line[1][-1] for line in ridge_lines])\n #Look through every relative maximum found at current row\n #Attempt to connect them with existing ridge lines.\n for ind, col in enumerate(this_max_cols):\n \"\"\"\n If there is a previous ridge line within\n the max_distance to connect to, do so.\n Otherwise start a new one.\n \"\"\"\n line = None\n if(len(prev_ridge_cols) > 0):\n diffs = numpy.abs(col - prev_ridge_cols)\n closest = numpy.argmin(diffs)\n if diffs[closest] <= max_distances[row]:\n line = ridge_lines[closest]\n if(line is not None):\n #Found a point close enough, extend current ridge line\n line[1].append(col)\n line[0].append(row)\n line[2] = 0\n else:\n new_line = [[row],\n [col],\n 0]\n ridge_lines.append(new_line)\n \n #Remove the ridge lines with gap_number too high\n #XXX Modifying a list while iterating over it.\n #Should be safe, since we iterate backwards, but\n #still tacky.\n for ind in range(len(ridge_lines) - 1, -1, -1):\n line = ridge_lines[ind]\n if line[2] > gap_thresh:\n final_lines.append(line)\n del ridge_lines[ind]\n \n out_lines = []\n for line in (final_lines + ridge_lines):\n sortargs = numpy.array(numpy.argsort(line[0]))\n rows, cols = numpy.zeros_like(sortargs), numpy.zeros_like(sortargs)\n rows[sortargs] = line[0]\n cols[sortargs] = line[1]\n out_lines.append([rows, cols])\n \n return out_lines"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with a valid request for IPv6 bridges should return a bridge line.
|
def test_Bridge_getBridgeLine_IPv6(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(True)
request.withIPv6()
line = self.bridge.getBridgeLine(request)
self.assertIsNotNone(line)
self.assertTrue(
line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))
self.assertNotIn('179.178.155.140:36493', line)
self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)
|
[
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def _ParseIp6Neighbors(self):\n ip6neigh = subprocess.Popen(IP6NEIGH, stdout=subprocess.PIPE)\n out, _ = ip6neigh.communicate(None)\n result = []\n\n for line in out.splitlines():\n fields = line.split()\n if len(fields) < 5:\n continue\n ip6 = tr.helpers.NormalizeIPAddr(fields[0])\n dev = fields[2]\n mac = fields[4]\n try:\n type(self)._MacValidator.Set( # pylint:disable=protected-access\n self, mac)\n except ValueError:\n continue\n active = 'REACHABLE' in line\n result.append((mac, ip6, dev, active))\n return result",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def BgpEthernetSegmentV6(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpethernetsegmentv6 import BgpEthernetSegmentV6\n return BgpEthernetSegmentV6(self)._select()",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def ipv6_gateway(self):\n try:\n return ipaddress.ip_address(self._ipv6['gateway'])\n except (KeyError, ValueError, TypeError):\n return None",
"def loopback_ip6(self):\n ret = self._get_attr(\"loopbackIp6\")\n return ret",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_resolveOnlyIPv6(self):\n self._resolveOnlyTest([IPv6Address], AF_INET6)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine(includeFingerprint=False) with a valid request for IPv6 bridges should return a bridge line without the fingerprint.
|
def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(True)
request.withIPv6()
line = self.bridge.getBridgeLine(request, includeFingerprint=False)
self.assertIsNotNone(line)
self.assertTrue(
line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))
self.assertNotIn('179.178.155.140:36493', line)
self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)
|
[
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def _ParseIp6Neighbors(self):\n ip6neigh = subprocess.Popen(IP6NEIGH, stdout=subprocess.PIPE)\n out, _ = ip6neigh.communicate(None)\n result = []\n\n for line in out.splitlines():\n fields = line.split()\n if len(fields) < 5:\n continue\n ip6 = tr.helpers.NormalizeIPAddr(fields[0])\n dev = fields[2]\n mac = fields[4]\n try:\n type(self)._MacValidator.Set( # pylint:disable=protected-access\n self, mac)\n except ValueError:\n continue\n active = 'REACHABLE' in line\n result.append((mac, ip6, dev, active))\n return result",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def BgpEthernetSegmentV6(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpethernetsegmentv6 import BgpEthernetSegmentV6\n return BgpEthernetSegmentV6(self)._select()",
"def ipv6fragtotpktsforward(self) :\n try :\n return self._ipv6fragtotpktsforward\n except Exception as e:\n raise e",
"def FilterIpv6UnicastFlowSpec(self):\n return self._get_attribute('filterIpv6UnicastFlowSpec')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with a request for IPv6 obfs3 bridges (when the Bridge doesn't have any) should raise a PluggableTransportUnavailable exception.
|
def test_Bridge_getBridgeLine_obfs3_IPv6(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(True)
request.withIPv6()
request.withPluggableTransportType('obfs3')
self.assertRaises(bridges.PluggableTransportUnavailable,
self.bridge.getBridgeLine,
request)
|
[
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def discover(): \n global prefs\n if 'bridge_cache' in prefs:\n try:\n bridgeip = prefs['bridge_cache']['ip']\n reply = requests.get('http://%s/api/' % (bridgeip), timeout=3).json()\n if len(reply) > 0 and 'error' in reply[0] and reply[0]['error']['type'] == 4:\n # good bridge, use it\n return bridgeip\n except requests.exceptions.ConnectTimeout:\n # fallback to rendezvous point\n pass\n\n print(\"Discovering bridge...\")\n try:\n bridgeip = requests.get('https://www.meethue.com/api/nupnp').json()[0]['internalipaddress']\n prefs['bridge_cache'] = {'ip': bridgeip}\n return bridgeip\n except Exception as except_inst:\n print(\"Bridge discovery failed:\", except_inst)\n raise CliFatalError()",
"def _ParseIp6Neighbors(self):\n ip6neigh = subprocess.Popen(IP6NEIGH, stdout=subprocess.PIPE)\n out, _ = ip6neigh.communicate(None)\n result = []\n\n for line in out.splitlines():\n fields = line.split()\n if len(fields) < 5:\n continue\n ip6 = tr.helpers.NormalizeIPAddr(fields[0])\n dev = fields[2]\n mac = fields[4]\n try:\n type(self)._MacValidator.Set( # pylint:disable=protected-access\n self, mac)\n except ValueError:\n continue\n active = 'REACHABLE' in line\n result.append((mac, ip6, dev, active))\n return result",
"def BgpEthernetSegmentV6(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpethernetsegmentv6 import BgpEthernetSegmentV6\n return BgpEthernetSegmentV6(self)._select()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with a request for an unknown PT should raise a PluggableTransportUnavailable exception.
|
def test_Bridge_getBridgeLine_googlygooglybegone(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(True)
request.withPluggableTransportType('googlygooglybegone')
self.assertRaises(bridges.PluggableTransportUnavailable,
self.bridge.getBridgeLine,
request)
|
[
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_PluggableTransport_getTransportLine_ptargs_space_delimited(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine()\n self.assertTrue(\n (\"password=unicorns sharedsecret=foobar\" in bridgeLine) or\n (\"sharedsecret=foobar password=unicorns\" in bridgeLine))",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_PluggableTransport_runChecks_invalid_pt_args(self):\n try:\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n 'sharedsecret=foobar')\n except Exception as error:\n self.failUnlessIsInstance(error,\n bridges.MalformedPluggableTransport)",
"def test_PluggableTransport_getTransportLine_content_order(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine()\n\n # We have to check for substrings because we don't know which order\n # the PT arguments will end up in the bridge line. We also have to\n # check for the lowercased transport name. Fortunately, the following\n # three are the only ones which are important to have in order:\n self.assertTrue(bridgeLine.startswith(\"voltronpt\"))\n self.assertSubstring(\"voltronpt 1.2.3.4:443 \" + self.fingerprint,\n bridgeLine)\n # These ones can be in any order, but they should be at the end of the\n # bridge line:\n self.assertSubstring(\"password=unicorns\", bridgeLine)\n self.assertSubstring(\"sharedsecret=foobar\", bridgeLine)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with bridgePrefix=True should prefix the returned bridge line with 'Bridge '.
|
def test_Bridge_getBridgeLine_bridge_prefix(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(True)
line = self.bridge.getBridgeLine(request, bridgePrefix=True)
self.assertIsNotNone(line)
self.assertIn('179.178.155.140:36489', line)
self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)
self.assertTrue(line.startswith('Bridge'))
|
[
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_include_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def bridge_test():\n\ttext = ''\n\tthicknesses = [0.05, 0.1, 0.2]\n\tlengths = [0.4, 0.8, 1.6, 3.2]\n\tnum_bridges = 3\n\tbridge_height = 0.5\n\tbridge_base_width = 0.2\n\tbridge_base_length = 1.6\n\tbuff = 0\n\tmax_x = 0; min_x = 0\n\tmax_y = 0; min_y = 0\n\tfor i in range(len(lengths)):\n\t\tl = lengths[i]\n\t\toffset = {}\n\t\toffset['x'] = max_x+buff\n\t\tfor j in range(len(thicknesses)):\n\t\t\tt = thicknesses[j]\n\t\t\tprint l, t\n\t\t\toffset['y'] = j*(bridge_base_length+buff)\n\t\t\tbridge_base = {}\n\t\t\tbridge_base['corner'] = (offset['x'], offset['y'], 0)\n\t\t\tbridge_base['v1'] = (bridge_base_width, 0, 0)\n\t\t\tbridge_base['v2'] = (0, bridge_base_length, 0)\n\t\t\tbridge_base['v3'] = (0, 0, bridge_height+t)\n\t\t\ttext += write_prism(bridge_base)\n\t\t\tbridge_base['corner'] = (offset['x']+bridge_base_width+l, offset['y'], 0)\n\t\t\tbridge_base['v1'] = (bridge_base_width, 0, 0)\n\t\t\tbridge_base['v2'] = (0, bridge_base_length, 0)\n\t\t\tbridge_base['v3'] = (0, 0, bridge_height+t)\n\t\t\ttext += write_prism(bridge_base)\n\t\t\tfor k in range(num_bridges):\n\t\t\t\tbridge = {}\n\t\t\t\tbridge['corner'] = (offset['x']+bridge_base_width, offset['y']+(k+1)*bridge_base_length/(num_bridges+1)-t/2, bridge_height)\n\t\t\t\tbridge['v1'] = (l, 0, 0)\n\t\t\t\tbridge['v2'] = (0, t, 0)\n\t\t\t\tbridge['v3'] = (0, 0, t)\n\t\t\t\ttext += write_prism(bridge)\n\t\t\tmax_x = max(max_x, offset['x']+2*bridge_base_width+l)\n\t\t\tmax_y = max(max_y, offset['y']+bridge_base_length)\n\tbase = {}\n\tbase['min_x'] = min_x; base['max_x'] = max_x; base['min_y'] = min_y; base['max_y'] = max_y\n\tbase['height'] = 0.5\n\tbase['buffer'] = 3\n\ttext += write_base(base)\n\treturn text",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def set_service_line(self):\n # Brighton is on every line, so need to check the destination\n if self.source == \"BTN\":\n stationToFind = self.destination\n else: # Otherwise check which line source resides on\n stationToFind = self.source\n\n for lineName, stationList in util.stations.items():\n if stationToFind in stationList:\n self.line = lineName\n break\n if self.line is None: # Default = Hayward's Heath\n self.line = \"HHE\"",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def bridgeName(self):\n ret = libvirtmod.virNetworkGetBridgeName(self._o)\n if ret is None: raise libvirtError ('virNetworkGetBridgeName() failed', net=self)\n return ret",
"def _get_bridge_name(self):\n command = ovs_vsctl.VSCtlCommand(\n 'find',\n ('Bridge',\n 'datapath_id=%s' % dpid_lib.dpid_to_str(self.datapath_id)))\n self.run_command([command])\n if not isinstance(command.result, list) or len(command.result) != 1:\n raise OVSBridgeNotFound(\n datapath_id=dpid_lib.dpid_to_str(self.datapath_id))\n return command.result[0].name",
"def GetBridgeInfoFromConf():\n bridges = {}\n with open('/usr/local/bluedon/www/cache/waf_bridge.conf', 'r') as f:\n for line in f.readlines():\n bridgeInfo = line.strip().split() # br0 vEth0,vEth1 num\n if len(bridgeInfo) == 3:\n bridges[bridgeInfo[0]] = [bridgeInfo[1]]\n return bridges",
"def _prefix_line(self, str, lines):\n\n new_lines = \"\"\n\n line_list = lines.split(\"\\n\")\n\n # Added since an empty line seems to be added.\n del line_list[-1]\n\n for l in line_list:\n new_lines = new_lines + str + l + \"\\n\"\n\n return new_lines"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getBridgeLine() with includeFingerprint=False should return a bridge line without a fingerprint.
|
def test_Bridge_getBridgeLine_no_include_fingerprint(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
request = BridgeRequestBase()
request.isValid(True)
line = self.bridge.getBridgeLine(request, includeFingerprint=False)
self.assertIsNotNone(line)
self.assertIn('179.178.155.140:36489', line)
self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)
|
[
"def test_Bridge_getBridgeLine_IPv6_no_fingerprint(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request, includeFingerprint=False)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertNotIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_integration_getConfigLine_vanilla_withFingerprint(self):\n bridge = bridges.Bridge('fpr', '23.23.23.23', 2323,\n id_digest=self.id_digest,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine(includeFingerprint=True)\n self.assertIsNotNone(bridgeLine)\n self.assertSubstring(self.fingerprint, bridgeLine)\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_integration_getConfigLine_vanilla_withoutFingerprint(self):\n #self.skip = True\n bridge = bridges.Bridge('nofpr', '23.23.23.23', 2323, self.fingerprint,\n or_addresses=self.or_addresses)\n bridgeLine = bridge.getConfigLine()\n ip = bridgeLine.split(':')[0]\n self.assertTrue(ipaddr.IPAddress(ip))",
"def test_Bridge_getBridgeLine_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertTrue(\n line.startswith('[6bf3:806b:78cd:d4b4:f6a7:4ced:cfad:dad4]:36488'))\n self.assertNotIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_bridge_prefix(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request, bridgePrefix=True)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)\n self.assertTrue(line.startswith('Bridge'))",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_no_vanilla_addresses(self):\n request = BridgeRequestBase()\n request.isValid(True)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def test_Bridge_constructBridgeLine_IPv6(self):\n bridge = bridges.Bridge()\n addrport = (u'6bf3:806b:78cd::4ced:cfad:dad4', 36488, 6)\n\n bridgeline = bridge._constructBridgeLine(addrport,\n includeFingerprint=False,\n bridgePrefix=True)\n self.assertEqual(bridgeline, 'Bridge [6bf3:806b:78cd::4ced:cfad:dad4]:36488')",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_googlygooglybegone(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withPluggableTransportType('googlygooglybegone')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_PluggableTransport_getTransportLine_bridge_prefix(self):\n pt = bridges.PluggableTransport(self.fingerprint,\n \"voltronPT\", \"1.2.3.4\", 443,\n {'sharedsecret': 'foobar',\n 'password': 'unicorns'})\n bridgeLine = pt.getTransportLine(bridgePrefix=True)\n self.assertTrue(bridgeLine.startswith(\"Bridge \"))",
"def test_Bridge_getBridgeLine_request_valid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_obfs3_IPv6(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withIPv6()\n request.withPluggableTransportType('obfs3')\n\n self.assertRaises(bridges.PluggableTransportUnavailable,\n self.bridge.getBridgeLine,\n request)",
"def test_Bridge_getBridgeLine_request_invalid(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(False)\n\n self.assertIsNone(self.bridge.getBridgeLine(request))",
"def draw_unknown_bridge_over(self, tiles, tile, rotation, line_mode=\"outer\"):\n\n source_tile = self.seek_bridge_ramp(tiles, tile.row, tile.col, rotation)\n payload_kind = 0\n track_type = 0\n has_tram = False\n source_tile_owner = None\n\n if source_tile:\n payload_kind = source_tile.occupant.payload_kind\n source_tile_owner = source_tile.owner\n if payload_kind == 0:\n track_type = source_tile.occupant.track_type\n if source_tile.occupant.tram_type == 1:\n has_tram = True\n\n if payload_kind == 0:\n self.draw_rail_bridge_over(tile, rotation, track_type, source_tile_owner)\n elif payload_kind == 1:\n self.draw_road_bridge_over(tile, rotation, has_tram, source_tile_owner)",
"def has_line(self, angles=None, line_length=15):\n return probabilistic_hough_line(\n self.interior(crop=self.crop),\n line_length=line_length,\n line_gap=2,\n theta=angles)",
"def _get_linecard(self):\n return self.__linecard"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getNetworkstatusLastPublished() should tell us the last published time of the Bridge's serverdescriptor.
|
def test_Bridge_getNetworkstatusLastPublished(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
published = self.bridge.getNetworkstatusLastPublished()
self.assertIsNotNone(published)
self.assertIsInstance(published, datetime.datetime)
self.assertEqual(str(published), '2014-12-22 21:51:27')
|
[
"def test_Bridge_getDescriptorLastPublished(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n\n published = self.bridge.getDescriptorLastPublished()\n self.assertIsNotNone(published)\n self.assertIsInstance(published, datetime.datetime)\n self.assertEqual(str(published), '2014-12-22 21:51:27')",
"def _get_last_received_port_profile_info(self):\n return self.__last_received_port_profile_info",
"def get_network_state(self):\n return self.__send_poll(\"network\")",
"def _get_connection_time(self):\n return self.__connection_time",
"def handle_tlm_latest():\n for pkt_type, state in packet_states.items():\n packet_states[pkt_type][\"dntoeu\"] = replace_datetimes(state[\"dntoeu\"])\n\n with Sessions.current() as session:\n counters = session.tlm_counters\n return json.dumps({\"states\": packet_states, \"counters\": counters})",
"def get_network_status(self, network):\n\n with self._lock:\n with sqlite3.connect(self._database_name) as connection:\n status_query = connection.execute(f\"SELECT * from networks WHERE name='{network}'\")\n\n return status_query.fetchone()[4]",
"def recent(self):\n name = 'RECENT'\n typ, dat = self._untagged_response('OK', [None], name)\n if dat[-1]:\n return typ, dat\n typ, dat = self.noop() # Prod server for response\n return self._untagged_response(typ, dat, name)",
"def offline_since(self):\n return self._dt_offline",
"def get_last_status(self,\n headers=None,\n **query_parameters):\n return self.get_last_config_backup_status(\n headers=headers,\n **query_parameters\n )",
"def get_using_network_time():\n ret = salt.utils.mac_utils.execute_return_result(\"systemsetup -getusingnetworktime\")\n\n return (\n salt.utils.mac_utils.validate_enabled(salt.utils.mac_utils.parse_return(ret))\n == \"on\"\n )",
"def test_Bridge_updateFromServerDescriptor_ignoreNetworkstatus_no_networkstatus(self):\n self.bridge.updateFromServerDescriptor(self.serverdescriptor,\n ignoreNetworkstatus=True)\n self.assertIsNone(self.bridge.descriptors['networkstatus'])\n self.assertIsNotNone(self.bridge.descriptors['server'])",
"def checkNetworkStatus(self):\r\n pass",
"def latest_info():",
"def omniPingStatus(self):\n status = -1\n try:\n status = self.netcool.getPingStatus(system=self.getOrganizerName())\n status = self.convertStatus(status)\n except Exception: pass\n return status",
"def GetLastUsedWiredNetwork(self):\n profileList = self.config.sections()\n for profile in profileList:\n if misc.to_bool(self.config.get(profile, \"lastused\")):\n return profile\n return None",
"async def get_server_time(self):\r\n return await self.client_helper(\"get_server_time\")",
"def originalPublishTime(self, key):\n return self.storage.get(key).metadata.originallyPublished",
"def currently_publishing():\n c = CurrentlyPublishingStatus()\n c.update_status()",
"def last_geo_failover_time(self) -> str:\n return pulumi.get(self, \"last_geo_failover_time\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling getDescriptorLastPublished() should tell us the last published time of the Bridge's serverdescriptor.
|
def test_Bridge_getDescriptorLastPublished(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
published = self.bridge.getDescriptorLastPublished()
self.assertIsNotNone(published)
self.assertIsInstance(published, datetime.datetime)
self.assertEqual(str(published), '2014-12-22 21:51:27')
|
[
"def test_Bridge_getNetworkstatusLastPublished(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n published = self.bridge.getNetworkstatusLastPublished()\n self.assertIsNotNone(published)\n self.assertIsInstance(published, datetime.datetime)\n self.assertEqual(str(published), '2014-12-22 21:51:27')",
"def _get_last_received_port_profile_info(self):\n return self.__last_received_port_profile_info",
"def originalPublishTime(self, key):\n return self.storage.get(key).metadata.originallyPublished",
"def latest_info():",
"def last_modified(self):\n return self.metadata.last_modified",
"def _get_connection_time(self):\n return self.__connection_time",
"def print_last_stats_entry(self):\n pass",
"def recent(self):\n name = 'RECENT'\n typ, dat = self._untagged_response('OK', [None], name)\n if dat[-1]:\n return typ, dat\n typ, dat = self.noop() # Prod server for response\n return self._untagged_response(typ, dat, name)",
"def last_modified(self):\n return remote_to_local_datetime(self.last_modified_string)",
"async def get_server_time(self):\r\n return await self.client_helper(\"get_server_time\")",
"def getLastTime(self):\n return self.lastTime",
"def _last_updated():\n #TODO: implement\n return datetime.datetime.now()",
"def last_modified(self):\n if self.modified:\n return self.modified\n \n latest = never\n for t in self.__items:\n if t.modified > latest:\n latest = t.modified\n \n self.modified = latest\n return self.modified",
"def server_information(self):",
"def get_last_video_link(self):\n\t\treturn self.video_link",
"def last(self) -> MispEvent:\n return self.list(limit=1, direction='desc')[0]",
"def getPublishDateOfLastReleaseData(self):\n sql = \"SELECT date FROM public.deter_publish_date\"\n \n return self.__execSQL(sql)",
"def getDateOfLastReleaseData(self):\n sql = \"SELECT MAX(date) as date \"\n sql +=\"FROM terrabrasilis.deter_table \"\n sql +=\"WHERE date <= (SELECT date FROM public.deter_publish_date)\"\n\n return self.__execSQL(sql)",
"def get_last_seen(self):\n \n return self.last_seen"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling isBlockedIn('IS') should return False when the bridge isn't blocked in Iceland.
|
def test_Bridge_isBlockedIn_IS(self):
self.assertFalse(self.bridge.isBlockedIn('IS'))
|
[
"def test_Bridge_setBlockedIn_IR_address(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('IR', address='179.178.155.140')\n self.assertTrue(self.bridge.isBlockedIn('ir'))\n self.assertFalse(self.bridge.isBlockedIn('cn'))",
"def isNotBlocked(self) -> bool:\n\n return not self.isBlocked",
"def test_Bridge_setBlockedIn_CN_obfs2(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('CN', methodname='obfs2')\n self.assertTrue(self.bridge.isBlockedIn('CN'))",
"def test_Bridge_setBlockedIn_GB_address_port(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n # Should block the obfs4 bridge:\n self.bridge.setBlockedIn('GB', address='179.178.155.140', port=36493)\n self.assertTrue(self.bridge.isBlockedIn('GB'))\n self.assertTrue(self.bridge.isBlockedIn('gb'))\n self.assertTrue(self.bridge.transportIsBlockedIn('GB', 'obfs4'))\n self.assertTrue(self.bridge.addressIsBlockedIn('GB', '179.178.155.140', 36493))\n self.assertFalse(self.bridge.addressIsBlockedIn('gb', '179.178.155.140', 36488))",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def is_blocked(self, name):\n return name in self._name2plugin and self._name2plugin[name] is None",
"def check_is_blocked(cls, version):\n return version.is_blocked",
"def isBlocked(self, row, col):\n return (row, col) in self.blockedLocs",
"def isin_bond(self):\n return 'bond' in self.flags",
"def blocked(self):\n return self.__blocked",
"def check_page_blocked(self):\n blocker = self.driver.find_element_by_id(\"blockingDiv\")\n return blocker.is_displayed()",
"def inbound_connections_blocked(self):\n if \"inboundConnectionsBlocked\" in self._prop_dict:\n return self._prop_dict[\"inboundConnectionsBlocked\"]\n else:\n return None",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def is_blocked(self, ip):\n blocked = True\n\n if ip in self.allowed_admin_ips:\n blocked = False\n\n for allowed_range in self.allowed_admin_ip_ranges:\n if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range):\n blocked = False\n\n return blocked",
"def is_blocked(mymap, objects, x, y):\n #first see if the map tile itself is blocking\n if mymap[x][y].blocked:\n return True\n #now check for any objects that are blocking\n for item in objects:\n if item.blocks and item.x == x and item.y == y:\n return True\n \n return False",
"def graph_object_is_unmanaged_asset(graph_obj: Dict) -> bool:\r\n return graph_obj.get(\"type\") == \"vm\" and graph_obj.get(\"id\", '').startswith(\"ip:\")",
"def _is_ignored_ip_address():\r\n ignore_ip_addresses = current_app.config['SPLIT_IGNORE_IP_ADDRESSES']\r\n return request.remote_addr in ignore_ip_addresses",
"def _bcastIsOwn(self, host):\n netinfo = NetworkInfo()\n local_addresses = netinfo.get_local_addresses()\n return host in local_addresses",
"def test_Bridge_getBridgeLine_request_without_block_in_IR(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('179.178.155.140:36489', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling setBlockedIn('CN', 'obfs2') should mark all obfs2 transports of the bridge as being blocked in CN.
|
def test_Bridge_setBlockedIn_CN_obfs2(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
self.bridge.setBlockedIn('CN', methodname='obfs2')
self.assertTrue(self.bridge.isBlockedIn('CN'))
|
[
"def test_Bridge_setBlockedIn_GB_address_port(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n # Should block the obfs4 bridge:\n self.bridge.setBlockedIn('GB', address='179.178.155.140', port=36493)\n self.assertTrue(self.bridge.isBlockedIn('GB'))\n self.assertTrue(self.bridge.isBlockedIn('gb'))\n self.assertTrue(self.bridge.transportIsBlockedIn('GB', 'obfs4'))\n self.assertTrue(self.bridge.addressIsBlockedIn('GB', '179.178.155.140', 36493))\n self.assertFalse(self.bridge.addressIsBlockedIn('gb', '179.178.155.140', 36488))",
"def test_Bridge_setBlockedIn_IR_address(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('IR', address='179.178.155.140')\n self.assertTrue(self.bridge.isBlockedIn('ir'))\n self.assertFalse(self.bridge.isBlockedIn('cn'))",
"def test_Bridge_isBlockedIn_IS(self):\n self.assertFalse(self.bridge.isBlockedIn('IS'))",
"def set_conn2bb(self):\n self.conn2bb = [None]*self.mg.mol.natoms\n for bba in self.bb2adj:\n for c,ca in bba.items():\n self.conn2bb[c] = self.abb[ca]\n return",
"def test_Bridge_getBridgeLine_blocked_obfs3_and_request_without_block_obfs4(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs4')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNotNone(line)\n self.assertIn('obfs4', line)\n self.assertIn('179.178.155.140:36493', line)\n self.assertIn('2C3225C4805331025E211F4B6E5BF45C333FDD2C', line)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"def block_portlets(ob, *args, **kw):\n pl_managers = kw['managers']\n blockstatus = kw['blockstatus']\n for pl_managername, pl_manager in pl_managers.items():\n portletManager = getUtility(IPortletManager, name=pl_managername)\n assignable = getMultiAdapter(\n (ob, portletManager, ), ILocalPortletAssignmentManager)\n assignable.setBlacklistStatus(CONTEXT_CATEGORY, blockstatus)",
"def testBlockableWithImproperGlobalWhitelistRule(self):\n santa_blockable = test_utils.CreateSantaBlockable()\n santa_blockable.state = constants.STATE.UNTRUSTED\n santa_blockable.put()\n\n test_rule = rule_models.SantaRule(\n parent=santa_blockable.key,\n rule_type=constants.RULE_TYPE.BINARY,\n policy=constants.RULE_POLICY.WHITELIST,\n in_effect=True)\n test_rule.put()\n\n ballot_box = api.SantaBallotBox(santa_blockable.key.id())\n ballot_box.blockable = santa_blockable\n\n ballot_box._CheckRules()\n\n rule_query = rule_models.SantaRule.query()\n\n self.assertEqual(rule_query.count(), 1)\n\n rule = rule_query.get()\n\n self.assertFalse(rule.in_effect)",
"def update_fabric_network_to_inband(module, switch, task, msg):\n output = ''\n cli = pn_cli(module)\n clicopy = cli\n\n cli = clicopy\n cli += ' fabric-info format fabric-network '\n fabric_network = run_command(module, cli, task, msg).split()[1]\n if fabric_network != 'in-band':\n cli = clicopy\n cli += ' switch ' + switch\n cli += ' fabric-local-modify fabric-network in-band '\n run_command(module, cli, task, msg)\n\n output += ' %s: Updated fabric network to in-band \\n' % switch\n\n return output",
"def update_syllabus_blacklist(sender, instance, **kwargs):\n syllabi = Syllabus.objects.filter(\n course_instance__instructors=instance.instructor)\n if instance.permission_allowed is False:\n syllabi.exclude(blacklisted=True).update(blacklisted=True)\n else:\n for syllabus in syllabi:\n if syllabus.has_permission():\n syllabus.blacklisted = False\n syllabus.save()",
"def testBlockableWithImproperBlacklistRule(self):\n santa_blockable = test_utils.CreateSantaBlockable()\n santa_blockable.state = constants.STATE.UNTRUSTED\n santa_blockable.put()\n\n test_rule = rule_models.SantaRule(\n parent=santa_blockable.key,\n rule_type=constants.RULE_TYPE.BINARY,\n policy=constants.RULE_POLICY.BLACKLIST,\n in_effect=True)\n test_rule.put()\n\n ballot_box = api.SantaBallotBox(santa_blockable.key.id())\n ballot_box.blockable = santa_blockable\n\n ballot_box._CheckRules()\n\n rule_query = rule_models.SantaRule.query()\n\n self.assertEqual(rule_query.count(), 1)\n\n rule = rule_query.get()\n\n self.assertFalse(rule.in_effect)",
"def blacklist(self, peer, query):\n self.checkstat(\"blacklist\")",
"def set_atom2bb(self):\n self.abb = [None]*self.mg.mol.natoms\n for ibb, bb in enumerate(self.mg.clusters):\n for ia in bb:\n self.abb[ia] = ibb\n return",
"def open(self, use_10bit_address=False):\n if not os.path.exists('/dev/i2c-%i' % self.bus_num):\n cape_manager.load('BB-I2C%i' % self.hw_bus, auto_unload=False)\n bbio.common.delay(10)\n # Make sure it initialized correctly:\n assert os.path.exists('/dev/i2c-%i' % self.bus_num), \\\n 'could not enable I2C bus %i' % self.hw_bus\n\n super(I2CBus, self).open(use_10bit_address=use_10bit_address)",
"def blocked_items(self, blocked_items):\n\n self._blocked_items = blocked_items",
"def testUntrustedBlockableWithImproperGlobalWhitelistRules(self):\n santa_blockable = test_utils.CreateSantaBlockable()\n santa_blockable.state = constants.STATE.UNTRUSTED\n santa_blockable.put()\n\n test_rule = rule_models.SantaRule(\n parent=santa_blockable.key,\n rule_type=constants.RULE_TYPE.BINARY,\n policy=constants.RULE_POLICY.WHITELIST,\n in_effect=True)\n test_rule.put()\n\n ballot_box = api.SantaBallotBox(santa_blockable.key.id())\n ballot_box.blockable = santa_blockable\n\n ballot_box._CheckRules()\n\n rule_query = rule_models.SantaRule.query()\n\n self.assertEqual(rule_query.count(), 1)\n\n rule = rule_query.get()\n\n self.assertFalse(rule.in_effect)",
"def reset_connections_to_barrier_and_firewall(self):\n # undo all connections\n for loadpath in self.listLoadpaths:\n for component in loadpath.listComponents:\n component.connectedToBarrier = False\n component.connectedToFirewall = False\n component.leftNode.onBarrier = False\n component.leftNode.onFirewall = False\n component.rightNode.onBarrier = False\n component.rightNode.onFirewall = False\n for crossComp in self.listCrossComponents:\n crossComp.connectedToBarrier = False\n crossComp.connectedToFirewall = False\n crossComp.leftNode.onBarrier = False\n crossComp.leftNode.onFirewall = False\n crossComp.rightNode.onBarrier = False\n crossComp.rightNode.onFirewall = False\n # redo all connections\n for loadpath in self.listLoadpaths:\n leftLimit = min(comp.leftNode.position\n for comp in loadpath.listComponents)\n rightLimit = max(comp.rightNode.position\n for comp in loadpath.listComponents)\n frontNodes = [comp.leftNode\n for comp in loadpath.listComponents\n if comp.leftNode.position == leftLimit]\n backNodes = [comp.rightNode\n for comp in loadpath.listComponents\n if comp.rightNode.position == rightLimit]\n for frontNode in frontNodes:\n frontNode.onBarrier = True\n for comp in frontNode.towardsFirewall:\n comp.link_to_barrier()\n for backNode in backNodes:\n backNode.onFirewall = True\n for comp in backNode.towardsBarrier:\n comp.link_to_firewall()\n self.draw() ##",
"def b2(self, b2):\n\n self._b2 = b2"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling setBlockedIn('IR', address) should mark all matching addresses of the bridge as being blocked in IR.
|
def test_Bridge_setBlockedIn_IR_address(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
self.bridge.setBlockedIn('IR', address='179.178.155.140')
self.assertTrue(self.bridge.isBlockedIn('ir'))
self.assertFalse(self.bridge.isBlockedIn('cn'))
|
[
"def test_Bridge_setBlockedIn_GB_address_port(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n # Should block the obfs4 bridge:\n self.bridge.setBlockedIn('GB', address='179.178.155.140', port=36493)\n self.assertTrue(self.bridge.isBlockedIn('GB'))\n self.assertTrue(self.bridge.isBlockedIn('gb'))\n self.assertTrue(self.bridge.transportIsBlockedIn('GB', 'obfs4'))\n self.assertTrue(self.bridge.addressIsBlockedIn('GB', '179.178.155.140', 36493))\n self.assertFalse(self.bridge.addressIsBlockedIn('gb', '179.178.155.140', 36488))",
"def test_Bridge_isBlockedIn_IS(self):\n self.assertFalse(self.bridge.isBlockedIn('IS'))",
"def test_Bridge_setBlockedIn_CN_obfs2(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('CN', methodname='obfs2')\n self.assertTrue(self.bridge.isBlockedIn('CN'))",
"def set_ir_filter(self, address):\n address = int(address, 0)\n (status, null) = self.__device.set_ir_filter(address)\n self.__device.decode_error_status(status, cmd='set_ir_filter(%d)' % address, print_on_error=True)",
"def blocked_items(self, blocked_items):\n\n self._blocked_items = blocked_items",
"def set_interrupt(self, interrupt_mask):\n self.ipcon.send_request(self, BrickletIO4.FUNCTION_SET_INTERRUPT, (interrupt_mask,), 'B', '')",
"def test_Bridge_getBridgeLine_blocked_and_request_without_block(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir')\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)",
"async def blacklist_add(self, ctx: commands.Context, *, ip):\r\n async with self.config.blacklisted() as data:\r\n data.append(ip)\r\n await ctx.tick()",
"def SetIsMine(self, unused_value):\n raise endpoints.BadRequestException('isMine can\\'t be set.')",
"def blocked(message):\n status_set(WorkloadState.BLOCKED, message)",
"def setBanned(cls, account, equipment, email, reason=None, registry=DEFAULT_ACLS_REGISTRY):\n cls.setRule(account, equipment, email, EquipmentACL.banned(), reason, registry)",
"def set_region_of_interests(self, rois):\n self._region_of_interests = rois",
"def blacklist(self):\n self.blacklisted = True\n self.save()",
"def blacklist(self, peer, query):\n self.checkstat(\"blacklist\")",
"def on_read_ip_range(start, end):\n self.blocklist.add_rule(start, end, BLOCK_RANGE)\n self.num_blocked += 1",
"def addBlocked(self, row, col):\n self.blockedLocs.add( (row, col) )",
"def network_in(self, network_in):\n\n self._network_in = network_in",
"def setIgnored(self, ignore: 'SbBool') -> \"void\":\n return _coin.SoField_setIgnored(self, ignore)",
"def test_Bridge_getBridgeLine_blocked_pt_and_request_without_block_pt(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('ir', methodname=\"obfs3\")\n\n request = BridgeRequestBase()\n request.isValid(True)\n request.withoutBlockInCountry('IR')\n request.withPluggableTransportType('obfs3')\n line = self.bridge.getBridgeLine(request)\n\n self.assertIsNone(line)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling setBlockedIn('GB', address, port) should mark all matching
|
def test_Bridge_setBlockedIn_GB_address_port(self):
self.bridge.updateFromNetworkStatus(self.networkstatus)
self.bridge.updateFromServerDescriptor(self.serverdescriptor)
self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)
# Should block the obfs4 bridge:
self.bridge.setBlockedIn('GB', address='179.178.155.140', port=36493)
self.assertTrue(self.bridge.isBlockedIn('GB'))
self.assertTrue(self.bridge.isBlockedIn('gb'))
self.assertTrue(self.bridge.transportIsBlockedIn('GB', 'obfs4'))
self.assertTrue(self.bridge.addressIsBlockedIn('GB', '179.178.155.140', 36493))
self.assertFalse(self.bridge.addressIsBlockedIn('gb', '179.178.155.140', 36488))
|
[
"def test_Bridge_setBlockedIn_IR_address(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('IR', address='179.178.155.140')\n self.assertTrue(self.bridge.isBlockedIn('ir'))\n self.assertFalse(self.bridge.isBlockedIn('cn'))",
"def whitelist(self, peer, query):\n self.checkstat(\"whitelist\")",
"def blacklist(self, peer, query):\n self.checkstat(\"blacklist\")",
"def block_ip():\r\n\r\n\tdata=temp_data.get()\r\n\tall_ips=data.split(\";\")\r\n\tfile_ips=[]\r\n\twith open(\"fw_rules.txt\",\"r\") as fw:\r\n\t\tfile_data=fw.read()\r\n\t\tfile_ips=file_data.split('\\n')\r\n\twith open(\"fw_rules.txt\",\"a+\") as fw:\r\n\t\tfor i in all_ips:\r\n\t\t\ti=socket.gethostbyname(i)\r\n\t\t\tprint (i)\r\n\t\t\tif i not in file_ips:\r\n\t\t\t\tfw.write('\\n'+i)",
"def on_read_ip_range(start, end):\n self.blocklist.add_rule(start, end, BLOCK_RANGE)\n self.num_blocked += 1",
"def test_Bridge_isBlockedIn_IS(self):\n self.assertFalse(self.bridge.isBlockedIn('IS'))",
"def test_request_blocked(self):\n # Arrange\n self.ruleset = Callback(lambda ip: False)\n self.ipfilter = IPFilter(self.app, ruleset=self.ruleset)\n\n # Act\n response = self.client.get(\"/\", environ_base=self.request_env)\n\n # Assert\n self.assertEqual(response.status_code, 403)",
"def test_Bridge_setBlockedIn_CN_obfs2(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.bridge.setBlockedIn('CN', methodname='obfs2')\n self.assertTrue(self.bridge.isBlockedIn('CN'))",
"def block():\n\n class RouteExistsError(Exception):\n \"\"\"Exception for when trying to insert a route that already exists.\"\"\"\n pass\n\n routing_table = get_routingtable()\n blocklist = db_read(DB_FILE)\n\n # process the WHITELIST entries\n whitelisted = []\n for entry in WHITELIST:\n if '/' in entry:\n # assume it's a network\n whitelisted.append(ipaddress.ip_network(entry))\n else:\n # single IP address\n whitelisted.append(ipaddress.ip_address(entry))\n\n # add IPs from logfile to our blocklist\n for ip_addr, attempts in getfailed_logins(SOURCE_LOG).items():\n # ignore addresses configured in WHITELIST\n skip = False\n ip_obj = ipaddress.ip_address(ip_addr)\n for item in whitelisted:\n if isinstance(item, (ipaddress.IPv4Address,\n ipaddress.IPv6Address)):\n if ip_obj == item:\n print(\"IP from Logfile ({}) is whitelisted\".format(ip_obj))\n skip = True\n break\n\n elif isinstance(item, (ipaddress.IPv4Network,\n ipaddress.IPv6Network)):\n if ip_obj in item:\n print(\"IP from Logfile ({}) is whitelisted via network {}\"\n .format(ip_obj, item))\n skip = True\n break\n\n # we found a whitelisted address; skip processing it\n if skip:\n continue\n\n if ip_addr in blocklist:\n # ignore ip addresses from log file if already in our blockist\n continue\n\n if len(attempts) >= BAN_THRESHOLD:\n blocklist[ip_addr] = datetime.strftime(datetime.now(),\n \"%Y %b %d %H:%M:%S\")\n else:\n if VERBOSE:\n print(\"{} number of connection attempts below threshold\"\n .format(ip_addr),\n \"({}<{}). Not blocking.\"\n .format(len(attempts), BAN_THRESHOLD))\n\n # then iterate over the IPs in the resulting blocklist and create routes\n for ip_addr in blocklist:\n try:\n for route in routing_table:\n if ip_addr in route:\n raise RouteExistsError(ip_addr)\n if VERBOSE:\n print(\"Blocking IP (blocklist)\" + ip_addr)\n blackhole(\"add\", ip_addr)\n except RouteExistsError as err:\n if VERBOSE:\n print(str(err) + \" is already blackholed\")\n # finally save the block list in its current state\n db_store(blocklist)",
"def add_ban(self, mask):\n mask = mask.lower()\n if mask in self.banned:\n return\n\n self.banned.add(mask)\n self.bot.db.set_plugin_value(PLUGIN, 'banned', list(self.banned))\n self.banned_re = self.re_join(sopel.tools.get_hostmask_regex(b).pattern for b in self.banned)",
"async def blacklist_add(self, ctx: commands.Context, *, ip):\r\n async with self.config.blacklisted() as data:\r\n data.append(ip)\r\n await ctx.tick()",
"def SetIsMine(self, unused_value):\n raise endpoints.BadRequestException('isMine can\\'t be set.')",
"def block_numbers():\n print 'Blocking numbers'\n client = create_client()\n result = client.block_numbers([\"+61412345678\"])\n\n print 'Blocked: %d, failed: %d' % (result._blocked, result._failed)",
"def blocked(message):\n status_set(WorkloadState.BLOCKED, message)",
"async def blacklist_view(self, ctx: commands.Context):\r\n blacklisted = await self.config.blacklisted() or [\"None\"]\r\n await ctx.author.send(\r\n f\"The following IP addresses are blocked: {humanize_list(blacklisted)}\"\r\n )",
"def block_portlets(ob, *args, **kw):\n pl_managers = kw['managers']\n blockstatus = kw['blockstatus']\n for pl_managername, pl_manager in pl_managers.items():\n portletManager = getUtility(IPortletManager, name=pl_managername)\n assignable = getMultiAdapter(\n (ob, portletManager, ), ILocalPortletAssignmentManager)\n assignable.setBlacklistStatus(CONTEXT_CATEGORY, blockstatus)",
"def testBlockableWithImproperGlobalWhitelistRule(self):\n santa_blockable = test_utils.CreateSantaBlockable()\n santa_blockable.state = constants.STATE.UNTRUSTED\n santa_blockable.put()\n\n test_rule = rule_models.SantaRule(\n parent=santa_blockable.key,\n rule_type=constants.RULE_TYPE.BINARY,\n policy=constants.RULE_POLICY.WHITELIST,\n in_effect=True)\n test_rule.put()\n\n ballot_box = api.SantaBallotBox(santa_blockable.key.id())\n ballot_box.blockable = santa_blockable\n\n ballot_box._CheckRules()\n\n rule_query = rule_models.SantaRule.query()\n\n self.assertEqual(rule_query.count(), 1)\n\n rule = rule_query.get()\n\n self.assertFalse(rule.in_effect)",
"def setBanned(cls, account, equipment, email, reason=None, registry=DEFAULT_ACLS_REGISTRY):\n cls.setRule(account, equipment, email, EquipmentACL.banned(), reason, registry)",
"def new_address(self, name, address):\n if address not in self.ip_addresses:\n if any([regex.findall(name) for regex in self.regex_set]):\n self.ip_addresses.update([address])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Send an ARP reply. reply_to is a PacketIn event corresponding to an ARP request mac is the MAC address to reply with src_mac is the MAC address that the reply comes from (the L2 address)
|
def send_arp_reply (reply_to, mac, src_mac = None):
if mac is False:
mac = reply_to.connection.eth_addr
elif mac is True:
mac = reply_to.connection.ports[reply_to.port].hw_addr
mac = EthAddr(mac)
if src_mac is None:
src_mac = mac
elif src_mac is False:
src_mac = reply_to.connection.eth_addr
elif src_mac is True:
src_mac = reply_to.connection.ports[reply_to.port].hw_addr
src_mac = EthAddr(src_mac)
arpp = reply_to.parsed.find('arp')
r = arp()
r.opcode = r.REPLY
r.hwdst = arpp.hwsrc
r.protodst = arpp.protosrc
r.hwsrc = mac
r.protosrc = IPAddr(arpp.protodst)
e = ethernet(type=ethernet.ARP_TYPE, src=src_mac, dst=r.hwdst)
e.payload = r
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port = reply_to.port))
msg.in_port = of.OFPP_NONE
reply_to.connection.send(msg)
|
[
"def send_arp_reply(reply_to, mac, src_mac=None):\n if mac is False:\n mac = reply_to.connection.eth_addr\n elif mac is True:\n mac = reply_to.connection.ports[reply_to.port].hw_addr\n mac = EthAddr(mac)\n\n if src_mac is None:\n src_mac = mac\n elif src_mac is False:\n src_mac = reply_to.connection.eth_addr\n elif src_mac is True:\n src_mac = reply_to.connection.ports[reply_to.port].hw_addr\n src_mac = EthAddr(src_mac)\n\n arpp = reply_to.parsed.find('arp')\n r = arp()\n r.opcode = r.REPLY\n r.hwdst = arpp.hwsrc\n r.protodst = arpp.protosrc\n r.hwsrc = mac\n r.protosrc = IPAddr(arpp.protodst)\n e = ethernet(type=ethernet.ARP_TYPE, src=src_mac, dst=r.hwdst)\n e.payload = r\n msg = of.ofp_packet_out()\n msg.data = e.pack()\n msg.actions.append(of.ofp_action_output(port=reply_to.port))\n msg.in_port = of.OFPP_NONE\n reply_to.connection.send(msg)",
"def send_arp_reply (self, reply_to, mac, src_mac = _default_mac):\n if src_mac is _default_mac:\n src_mac = self.default_reply_src_mac\n return send_arp_reply(reply_to, mac, src_mac)",
"def answer_arp(self, mac):\n packet = self.event.parsed\n if not isinstance(packet.next, arp): return\n a = packet.next\n if a.opcode == arp.REQUEST:\n r = arp()\n r.hwtype = a.hwtype\n r.prototype = a.prototype\n r.hwlen = a.hwlen\n r.protolen = a.protolen\n r.opcode = arp.REPLY\n r.hwdst = a.hwsrc\n r.protodst = a.protosrc\n r.protosrc = a.protodst\n r.hwsrc = mac\n e = ethernet(type=packet.type, src=mac, dst=a.hwsrc)\n e.set_payload(r)\n # log.debug(\"%i %i answering ARP for %s\" % (dpid, inport,str(r.protosrc)))\n msg = of.ofp_packet_out()\n msg.data = e.pack()\n msg.actions.append(of.ofp_action_output(port = of.OFPP_IN_PORT))\n msg.in_port = self.inport\n self.event.connection.send(msg)",
"def send_arp_request(self, ip_addr):\n\n # Forge de la trame :\n frame = [\n ### ETHERNET header###\n # Destination MAC address (=broadcast) :\n pack(\"!6B\", *(0xFF,) * 6),\n # Source MAC address :\n self.smac,\n # Type of protocol (=ARP) :\n pack(\"!H\", 0x0806),\n ### ARP payload###\n # Type of protocol hw/soft (=Ethernet/IP) :\n pack(\"!HHBB\", 0x0001, 0x0800, 0x0006, 0x0004),\n # Operation (=ARP Request) :\n pack(\"!H\", 0x0001),\n # Source MAC address :\n self.smac,\n # Source IP address :\n int_to_bytes(int(self.sip)),\n # Destination MAC address (what we are looking for) (=00*6) :\n pack(\"!6B\", *(0,) * 6),\n # Target IP address:\n int_to_bytes(int(ip_addr)),\n ]\n\n self.transport.write(b\"\".join(frame)) # Sending",
"def test_arp_reply_from_host(self):\n arp_replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": FAUCET_MAC,\n \"arp_code\": arp.ARP_REPLY,\n \"arp_source_ip\": \"10.0.0.1\",\n \"arp_target_ip\": \"10.0.0.254\",\n },\n )[self.DP_ID]\n self.assertTrue(arp_replies)\n self.assertFalse(ValveTestBases.packet_outs_from_flows(arp_replies))",
"def send_fake_arp_replay(self, target_ip, imposter_ip, target_mac):\n my_mac = get_if_hwaddr(self.iface)\n fake_arp_replay = Ether(src=my_mac, dst=target_mac) / ARP(op=2, psrc=imposter_ip, hwsrc=my_mac, pdst=target_ip,\n hwdst=target_mac)\n sendp(fake_arp_replay, verbose=False, iface=self.iface)",
"def parse_arp_packet(eth):\n a = eth.data\n if a.op == dpkt.arp.ARP_OP_REPLY and a.hln == 6 and a.pln == 4:\n ip = format_ip(a.spa)\n mac = format_mac(a.sha)\n update_arp(mac, ip)\n\n ip = format_ip(a.tpa)\n mac = format_mac(a.tha)\n update_arp(mac, ip)\n\n pass",
"def incoming_reply(pkt):\n return pkt[ARP].psrc != str(get_if_addr(conf.iface)) and pkt[ARP].op == 2",
"def reply(self, reply=None, failure=None, log_failure=True):\n if self._reply_to:\n response = marshal_response(reply=reply, failure=failure)\n response.correlation_id = self._correlation_id\n LOG.debug(\"Replying to %s\", self._correlation_id)\n task = ReplyTask(self._reply_to, response, log_failure)\n self.listener.driver._ctrl.add_task(task)\n else:\n LOG.debug(\"Ignoring reply as no reply address available\")",
"def process(self, pkt):\n if ARP in pkt:\n # build arp replay, imposter to imposter\n self.send_fake_arp_replay(pkt[ARP].psrc, pkt[ARP].pdst, pkt[ARP].hwsrc)\n else:\n # build icmp echo-replay, imposter to imposter\n my_mac = get_if_hwaddr(self.iface)\n fake_echo_replay = Ether(src=my_mac, dst=pkt[Ether].src) / IP(src=pkt[IP].dst, dst=pkt[IP].src) / ICMP()\n fake_echo_replay[ICMP].type = 0\n\n if Padding in pkt: # if the target send also a padding - we return in too\n fake_echo_replay /= pkt[Padding]\n\n # send the fake replay back:\n sendp(fake_echo_replay, verbose=False, iface=self.iface)",
"def spoof(target_ip=\"10.0.2.1\", target_mac=\"\", des_ip=\"10.0.2.1\"):\n # Op=2 para response, no request\n # pdst=\"10.0.2.15\" .. la dir ip target\n # hwdst=\"08:00:27:e7:53:c8\" target mac\n # psrc=\"10.0.2.1\" router ip\n if target_mac == \"\":\n target_mac = scan(target_ip)[0][\"mac\"]\n\n catch = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=des_ip)\n scapy.send(catch, verbose=False)",
"def test_bogon_arp_for_controller(self):\n replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": mac.BROADCAST_STR,\n \"arp_code\": arp.ARP_REQUEST,\n \"arp_source_ip\": \"8.8.8.8\",\n \"arp_target_ip\": \"10.0.0.254\",\n },\n )[self.DP_ID]\n # Must be no ARP reply to an ARP request not in our subnet.\n self.assertFalse(ValveTestBases.packet_outs_from_flows(replies))",
"def arp(cmd, *args, **argv):\n \n def arp_show():\n from _arp_deal import arp as show_arp\n arplists = show_arp()\n\tfrom _prettytable import PrettyTable\n\tarp_t = PrettyTable([\"IPadress\", \"HWaddress\", \"Flags\", \"Iface\"])\n\tarp_t.align = \"l\"\n\tarp_t.padding_width = 1\n\tarp_t.border = False\n\tfor arplist in arplists:\n arp_t.add_row(arplist)\n\t\n context.write(\"%s\" % arp_t)\n \n \n context = argv[\"context\"]\n\n leng = len(args)\n if leng:\n cmd_next = args[0]\n args = list(args)\n args.pop(0)\n context.func_next(cmd, cmd_next, args)\n\n else:\n arp_show()",
"def gen_arp_response(target_ip, spoof_ip):\n\n if scan_results := networkscan.get_clients(\n target_ip, 10\n ): # checks to see if the target is reachable on the network\n target = scan_results[0]\n packet = scapy.ARP(\n op=2, # ARP response (op=1 would be ARP request). We are spoofing a request packet\n pdst=target_ip,\n hwdst=target.mac_addr,\n psrc=spoof_ip, # ip adddress we are spoofing (pretending to be)\n )\n return packet",
"def print_arp(pkt):\n if pkt[ARP].op == 1:\n print(pkt[ARP].hwsrc, ' who has ', pkt[ARP].pdst)\n else:\n print(pkt[ARP].psrc, ' is at ', pkt[ARP].hwsrc)",
"def handle_packet(self, rx_bytes, logger):\n ARP_ETHTYPE = b'\\x08\\x06'\n IPv4_ETHTYPE = b'\\x08\\x00'\n IPv6_ETHTYPE = b'\\x86\\xdd'\n ICMP_PROTO = b'\\x01'\n UDP_PROTO = b'\\x11'\n CAPWAP_CTRL_PORT = b'\\x14\\x7e'\n CAPWAP_DATA_PORT = b'\\x14\\x7f'\n WLAN_ASSOC_RESP = b'\\x00\\x10'\n WLAN_DEAUTH = b'\\x00\\xc0'\n WLAN_DEASSOC = b'\\x00\\xa0'\n ARP_REQ = b'\\x00\\x01'\n ARP_REP = b'\\x00\\x02'\n ICMP_REQ = b'\\x08'\n\n def handle_arp():\n def AP_ARP_RESP_TEMPLATE(src_mac, dst_mac, src_ip, dst_ip):\n return (\n dst_mac + src_mac + ARP_ETHTYPE + # Ethernet\n b'\\x00\\x01\\x08\\x00\\x06\\x04\\x00\\x02' + src_mac + src_ip + dst_mac + dst_ip # ARP\n )\n src_ip = rx_bytes[28:32]\n src_ip_str = socket.inet_ntoa(bytes(src_ip))\n dst_ip = rx_bytes[38:42]\n dst_ip_str = socket.inet_ntoa(bytes(dst_ip))\n if src_ip == dst_ip: # GARP\n return\n elif not self.is_ap_ip(dst_ip_str): # check IP\n return\n ap = self._get_ap_by_id(dst_ip_str)\n src_mac = rx_bytes[6:12]\n dst_mac = rx_bytes[:6]\n\n if dst_mac not in (b'\\xff\\xff\\xff\\xff\\xff\\xff', ap.mac_bytes): # check MAC\n logger.warning('Bad MAC (%s) of AP %s' %\n (dst_mac, ap.name))\n return\n\n if rx_bytes[20:22] == ARP_REQ: # 'who-has'\n logger.debug('received ARP who-has')\n tx_pkt = AP_ARP_RESP_TEMPLATE(\n src_mac=ap.mac_bytes,\n dst_mac=src_mac,\n src_ip=dst_ip,\n dst_ip=src_ip,\n )\n self.pkt_pipe.send(tx_pkt)\n\n elif rx_bytes[20:22] == ARP_REP: # 'is-at'\n if src_ip == ap.wlc_ip_bytes:\n # assume response from wlc\n ap.wlc_mac_bytes = src_mac\n ap.wlc_mac = str2mac(src_mac)\n ap.logger.debug(\"received ARP 'is-at\")\n ap._wake_up()\n\n def handle_icmp():\n rx_pkt = Ether(rx_bytes)\n icmp_pkt = rx_pkt[ICMP]\n if icmp_pkt.type == 8: # echo-request\n logger.debug(\"received ping for {}\".format(rx_pkt[IP].dst))\n ap = self._get_ap_by_id(rx_pkt[IP].dst)\n if rx_pkt[IP].dst == ap.ip: # ping to AP\n tx_pkt = rx_pkt.copy()\n tx_pkt.src, tx_pkt.dst = tx_pkt.dst, tx_pkt.src\n tx_pkt[IP].src, tx_pkt[IP].dst = tx_pkt[IP].dst, tx_pkt[IP].src\n tx_pkt[ICMP].type = 'echo-reply'\n del tx_pkt[ICMP].chksum\n self.pkt_pipe.send(bytes(tx_pkt))\n\n def handle_ipv4():\n\n def handle_udp():\n\n def process_capwap_ctrl():\n # do not forward capwap control if not reconstructed\n forward = False\n\n def capwap_reassemble(ap, rx_pkt_buf):\n \"\"\"Return the reassembled packet if 'rx_pkt_buf' is the last fragmented,\n or None if more fragmented packets are expected, or the packet itself if not fragmented.\n The returned packet is a CAPWAP CTRL / PAYLOAD\"\"\"\n capwap_assemble = ap.capwap_assemble\n\n # is_fragment\n if struct.unpack('!B', rx_pkt_buf[3:4])[0] & 0x80:\n rx_pkt = CAPWAP_CTRL(rx_pkt_buf)\n if capwap_assemble:\n assert capwap_assemble[\n 'header'].fragment_id == rx_pkt.header.fragment_id, 'Got CAPWAP fragments with out of order (different fragment ids)'\n control_str = bytes(\n rx_pkt[CAPWAP_Control_Header_Fragment])\n if rx_pkt.header.fragment_offset * 8 != len(capwap_assemble['buf']):\n ap.logger.error(\n 'Fragment offset and data length mismatch')\n capwap_assemble.clear()\n return\n\n capwap_assemble['buf'] += control_str\n\n if rx_pkt.is_last_fragment():\n capwap_assemble['assembled'] = CAPWAP_CTRL(\n header=capwap_assemble['header'],\n control_header=CAPWAP_Control_Header(\n capwap_assemble['buf'])\n )\n else:\n if rx_pkt.is_last_fragment():\n ap.logger.error(\n 'Got CAPWAP first fragment that is also last fragment!')\n return\n if rx_pkt.header.fragment_offset != 0:\n ap.logger.error(\n 'Got out of order CAPWAP fragment, does not start with zero offset')\n return\n capwap_assemble['header'] = rx_pkt.header\n capwap_assemble['header'].flags &= ~0b11000\n capwap_assemble['buf'] = bytes(\n rx_pkt[CAPWAP_Control_Header_Fragment])\n capwap_assemble['ap'] = ap\n elif capwap_assemble:\n logger.error(\n 'Got not fragment in middle of assemble of fragments (OOO).')\n capwap_assemble.clear()\n else:\n capwap_assemble['assembled'] = rx_pkt_buf\n return rx_pkt_buf\n\n # forward = False\n\n if (not ap.is_dtls_established or ap.state < APState.DTLS or not ap.wlc_mac_bytes):\n if rx_bytes[42:43] == b'\\0': # capwap header, discovery response\n capwap_bytes = rx_bytes[42:]\n capwap_hlen = (struct.unpack('!B', capwap_bytes[1:2])[\n 0] & 0b11111000) >> 1\n ctrl_header_type = struct.unpack(\n '!B', capwap_bytes[capwap_hlen + 3:capwap_hlen + 4])[0]\n if ctrl_header_type != 2:\n return\n if not ap.wlc_ip:\n ap.wlc_ip_bytes = rx_bytes[26:30]\n ap.wlc_ip = str2ip(ap.wlc_ip_bytes)\n if rx_bytes[26:30] == ap.wlc_ip_bytes:\n ap.wlc_mac_bytes = rx_bytes[6:12]\n ap.wlc_mac = str2mac(ap.wlc_mac_bytes)\n result_code = CAPWAP_PKTS.parse_message_elements(\n capwap_bytes, capwap_hlen, ap, self)\n ap.logger.debug(\n \"received discovery response\")\n ap._wake_up()\n ap.rx_responses[ctrl_header_type] = result_code\n\n elif rx_bytes[42:43] == b'\\1': # capwap dtls header\n # forward message to ap\n logger.debug(\n \"received dtls handshake message destination: %s\" % mac2str(dst_mac))\n try:\n ap.logger.debug(\"packet to service: %s\",\n ap.active_service)\n with self.services_lock:\n self.stl_services[ap.active_service]['pipe']._on_rx_pkt(\n rx_bytes, None)\n except KeyError:\n # no service registered, drop\n pass\n else:\n ap.logger.debug(\n \"dropping non expected packet\")\n if (rx_bytes[46:47] == b'\\x15'): # DTLS alert\n ap.logger.error(\n \"Server sent DTLS alert to AP\")\n ap.got_disconnect = True\n\n return\n\n is_dtls = struct.unpack('?', rx_bytes[42:43])[0]\n if not is_dtls: # dtls is established, ctrl should be encrypted\n ap.logger.error(\n \"received not encrypted capwap control packet, dropping\")\n return\n\n if (rx_bytes[46:47] == b'\\x15'): # DTLS alert\n ap.logger.error(\n \"Server sent DTLS alert to AP\")\n ap.got_disconnect = True\n\n rx_pkt_buf = ap.decrypt(rx_bytes[46:])\n if not rx_pkt_buf:\n return\n # definitely not CAPWAP... should we debug it?\n if rx_pkt_buf[0:1] not in (b'\\0', b'\\1'):\n ap.logger.debug('Not CAPWAP, skipping')\n return\n\n ap.last_recv_ts = time.time()\n # get reassembled if needed\n # capwap_assemble = ap.capwap_assemble\n rx_pkt_buf = capwap_reassemble(ap, rx_pkt_buf)\n if not rx_pkt_buf or rx_pkt_buf[0:1] != b'\\0':\n return\n ap.capwap_assemble.clear()\n\n # send to AP services rx_bytes[:46] + rx_pkt_buf\n reconstructed = rx_bytes[:42] + rx_pkt_buf\n # send the last fragmented packet reconstructed, with the last packet's header\n for service in ap.services.values():\n if service.active:\n ap.logger.debug(\n \"forwarding capwap packet to service: {}\".format(service.name))\n service._on_rx_pkt(reconstructed)\n\n capwap_hlen = (struct.unpack('!B', rx_pkt_buf[1:2])[\n 0] & 0b11111000) >> 1\n ctrl_header_type = struct.unpack(\n '!B', rx_pkt_buf[capwap_hlen + 3:capwap_hlen + 4])[0]\n\n if ctrl_header_type == 7: # Configuration Update Request\n\n CAPWAP_PKTS.parse_message_elements(\n rx_pkt_buf, capwap_hlen, ap, self) # get info from incoming packet\n seq = struct.unpack(\n '!B', rx_pkt_buf[capwap_hlen + 4:capwap_hlen + 5])[0]\n tx_pkt = ap.get_config_update_capwap(seq)\n encrypted = ap.encrypt(tx_pkt)\n if encrypted:\n self.pkt_pipe.send(ap.wrap_capwap_pkt(\n b'\\1\\0\\0\\0' + encrypted))\n\n elif ctrl_header_type == 14: # Echo Response\n ap.logger.debug(\"received echo reply\")\n ap.echo_resp_timer = None\n\n elif ctrl_header_type == 17: # Reset Request\n logger.error(\n 'AP %s got Reset request, shutting down' % ap.name)\n ap.got_disconnect = True\n\n elif ctrl_header_type in (4, 6, 12):\n result_code = CAPWAP_PKTS.parse_message_elements(\n rx_pkt_buf, capwap_hlen, ap, self)\n ap.rx_responses[ctrl_header_type] = result_code\n\n else:\n logger.error(\n 'Got unhandled capwap header type: %s' % ctrl_header_type)\n\n def process_capwap_data():\n\n def handle_client_arp():\n ip = dot11_bytes[58:62]\n mac_bytes = dot11_bytes[4:10]\n mac = mac2str(mac_bytes)\n from_mac_bytes = dot11_bytes[10:16]\n client = self._get_client_by_id(mac)\n if not client:\n return\n self.logger.info(\n \"client {} received an arp\".format(mac))\n if not client:\n return\n if client.ap is not ap:\n self.logger.warn('Got ARP to client %s via wrong AP (%s)' %\n (client.mac, ap.name))\n return\n\n if dot11_bytes[40:42] == ARP_REQ: # 'who-has'\n if dot11_bytes[48:52] == dot11_bytes[58:62]: # GARP\n return\n if not hasattr(client, \"ip_bytes\") or not client.ip_bytes:\n return\n tx_pkt = ap.wrap_client_ether_pkt(client, ap.get_arp_pkt(\n 'is-at', src_mac_bytes=client.mac_bytes, src_ip_bytes=client.ip_bytes, dst_ip_bytes=from_mac_bytes))\n self.pkt_pipe.send(tx_pkt)\n\n elif dot11_bytes[40:42] == ARP_REP: # 'is-at'\n client.seen_arp_reply = True\n client.logger.debug(\"received arp reply\")\n ap._wake_up()\n\n def handle_client_icmp():\n mac_bytes = dot11_bytes[4:10]\n mac = mac2str(mac_bytes)\n client = self._get_client_by_id(mac)\n if not client:\n self.logger.error(\"Received ICMP packet for non-existing MAC {}\".format(mac))\n return\n self.logger.info(\n \"client {} received an ICMP\".format(client.mac))\n if client.ap is not ap:\n self.logger.warn('Got ICMP to client %s via wrong AP (%s)' %\n (client.mac, ap.name))\n return\n\n if dot11_bytes[54:55] == ICMP_REQ:\n rx_pkt = Dot11_swapped(dot11_bytes)\n tx_pkt = Ether(src=client.mac, dst=rx_pkt.addr3) / \\\n rx_pkt[IP].copy()\n tx_pkt[IP].src, tx_pkt[IP].dst = tx_pkt[IP].dst, tx_pkt[IP].src\n tx_pkt[ICMP].type = 'echo-reply'\n del tx_pkt[ICMP].chksum\n tx_pkt = ap.wrap_client_ether_pkt(client, bytes(tx_pkt))\n self.pkt_pipe.send(tx_pkt)\n\n logger.debug(\"received capwap data\")\n if ord(rx_bytes[45:46]) & 0b1000: # CAPWAP Data Keep-alive\n ap.got_keep_alive = True\n ap.logger.debug(\n \"received CAPWAP Data Keep-alive\")\n ap._wake_up()\n if ap.state >= APState.JOIN:\n assert ap.session_id is not None\n if ap.got_keep_alive:\n if not ap.expect_keep_alive_response:\n # have to respond\n self.pkt_pipe.send(ap.wrap_capwap_pkt(\n CAPWAP_PKTS.keep_alive(ap), dst_port=5247))\n ap.expect_keep_alive_response = True\n else:\n # response to ap's keep alive\n ap.expect_keep_alive_response = False\n else:\n ap.logger.debug(\n \"Received CAPWAP Data Keep-alive for non joined AP\")\n return\n\n dot11_offset = 42 + \\\n ((ord(rx_bytes[43:44]) & 0b11111000) >> 1)\n dot11_bytes = rx_bytes[dot11_offset:]\n\n # assume 802.11 frame for client\n mac_bytes = dot11_bytes[4:10]\n mac = mac2str(mac_bytes)\n\n # send packet to client services that are active\n packet_l3_type = dot11_bytes[32:34]\n try:\n dest_client = self.client_by_id[mac]\n for service in dest_client.services.values():\n if service.active:\n dest_client.logger.debug(\n \"forwarding packet of type {} to service: {}\".format(packet_l3_type, service.name))\n service._on_rx_pkt(dot11_bytes)\n except KeyError:\n # non local client\n pass\n\n if packet_l3_type == ARP_ETHTYPE:\n handle_client_arp()\n\n elif packet_l3_type == IPv4_ETHTYPE and dot11_bytes[43:44] == ICMP_PROTO:\n handle_client_icmp()\n\n udp_port_str = rx_bytes[36:38]\n udp_src = rx_bytes[34:36]\n\n if udp_src == CAPWAP_CTRL_PORT:\n process_capwap_ctrl()\n elif udp_src == CAPWAP_DATA_PORT:\n process_capwap_data()\n return\n\n ip = rx_bytes[30:34] # destination ip (ap)\n ip_str = socket.inet_ntoa(bytes(ip))\n if not self.is_ap_ip(ip_str): # check IP\n return\n ap = self._get_ap_by_id(ip_str)\n dst_mac = rx_bytes[:6]\n if dst_mac not in ('\\xff\\xff\\xff\\xff\\xff\\xff', ap.mac_bytes): # check MAC\n logger.warning('dropped packet: bad MAC (%s), although IP of AP (%s)' % (\n str2mac(dst_mac), str2ip(ip)))\n return\n\n ip_proto = rx_bytes[23:24]\n\n # demultiplex layer-4 protocol\n if ip_proto == ICMP_PROTO:\n handle_icmp()\n elif ip_proto == UDP_PROTO:\n handle_udp()\n else:\n # drop\n logger.debug(\n 'dropped packet: layer-4 protocol not supported: {}'.format(ip_proto))\n return\n\n # by default, forward to AP services, disabled for fragmented capwap control\n # (forwarding the reconstructed packet)\n forward = True\n\n ether_type = rx_bytes[12:14]\n\n # demultiplex layer-3 protocol\n if ether_type == ARP_ETHTYPE:\n handle_arp()\n elif ether_type == IPv4_ETHTYPE:\n handle_ipv4()\n else:\n logger.debug(\n 'dropped packet: layer-3 protocol not supported: {}'.format(ether_type))\n\n # forwarding to ap services\n if forward:\n try:\n mac = mac2str(rx_bytes[:6])\n ap = self.ap_by_mac[mac]\n\n for service in ap.services.values():\n if service.active:\n ap.logger.debug(\n \"forwarding packet to service: {}\".format(service.name))\n service._on_rx_pkt(rx_bytes)\n except KeyError:\n # non local ap\n pass",
"def configure_arp_entry(self, destination_ip=None, **kwargs):\n pass",
"def outgoing_req(pkt):\n return pkt[ARP].psrc == str(get_if_addr(conf.iface)) and pkt[ARP].op == 1",
"def get_arp_table(self):\n\n output = self.device.send_command('show arpentry')\n parser_regexp = (\"(?P<interface>^\\w+)\\s+\"\n \"(?P<ip>\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s+\"\n \"(?P<mac>([0-9A-F]{2}[:-]){5}([0-9A-F]{2}))\\s+\"\n \"(?P<type>(\\w+(/\\w+)*))\")\n\n return self._parse_output(output, parser_regexp)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Send an ARP reply reply_to is a an ARP request PacketIn event mac is the MAC address to reply with, True for the port MAC or False for the "DPID MAC". src_mac can be a MAC, True/False as above, None to use "mac", or if unspecified, defaults to self.default_src_mac.
|
def send_arp_reply (self, reply_to, mac, src_mac = _default_mac):
if src_mac is _default_mac:
src_mac = self.default_reply_src_mac
return send_arp_reply(reply_to, mac, src_mac)
|
[
"def send_arp_reply(reply_to, mac, src_mac=None):\n if mac is False:\n mac = reply_to.connection.eth_addr\n elif mac is True:\n mac = reply_to.connection.ports[reply_to.port].hw_addr\n mac = EthAddr(mac)\n\n if src_mac is None:\n src_mac = mac\n elif src_mac is False:\n src_mac = reply_to.connection.eth_addr\n elif src_mac is True:\n src_mac = reply_to.connection.ports[reply_to.port].hw_addr\n src_mac = EthAddr(src_mac)\n\n arpp = reply_to.parsed.find('arp')\n r = arp()\n r.opcode = r.REPLY\n r.hwdst = arpp.hwsrc\n r.protodst = arpp.protosrc\n r.hwsrc = mac\n r.protosrc = IPAddr(arpp.protodst)\n e = ethernet(type=ethernet.ARP_TYPE, src=src_mac, dst=r.hwdst)\n e.payload = r\n msg = of.ofp_packet_out()\n msg.data = e.pack()\n msg.actions.append(of.ofp_action_output(port=reply_to.port))\n msg.in_port = of.OFPP_NONE\n reply_to.connection.send(msg)",
"def send_arp_reply (reply_to, mac, src_mac = None):\n if mac is False:\n mac = reply_to.connection.eth_addr\n elif mac is True:\n mac = reply_to.connection.ports[reply_to.port].hw_addr\n mac = EthAddr(mac)\n\n if src_mac is None:\n src_mac = mac\n elif src_mac is False:\n src_mac = reply_to.connection.eth_addr\n elif src_mac is True:\n src_mac = reply_to.connection.ports[reply_to.port].hw_addr\n src_mac = EthAddr(src_mac)\n\n arpp = reply_to.parsed.find('arp')\n r = arp()\n r.opcode = r.REPLY\n r.hwdst = arpp.hwsrc\n r.protodst = arpp.protosrc\n r.hwsrc = mac\n r.protosrc = IPAddr(arpp.protodst)\n e = ethernet(type=ethernet.ARP_TYPE, src=src_mac, dst=r.hwdst)\n e.payload = r\n msg = of.ofp_packet_out()\n msg.data = e.pack()\n msg.actions.append(of.ofp_action_output(port = reply_to.port))\n msg.in_port = of.OFPP_NONE\n reply_to.connection.send(msg)",
"def answer_arp(self, mac):\n packet = self.event.parsed\n if not isinstance(packet.next, arp): return\n a = packet.next\n if a.opcode == arp.REQUEST:\n r = arp()\n r.hwtype = a.hwtype\n r.prototype = a.prototype\n r.hwlen = a.hwlen\n r.protolen = a.protolen\n r.opcode = arp.REPLY\n r.hwdst = a.hwsrc\n r.protodst = a.protosrc\n r.protosrc = a.protodst\n r.hwsrc = mac\n e = ethernet(type=packet.type, src=mac, dst=a.hwsrc)\n e.set_payload(r)\n # log.debug(\"%i %i answering ARP for %s\" % (dpid, inport,str(r.protosrc)))\n msg = of.ofp_packet_out()\n msg.data = e.pack()\n msg.actions.append(of.ofp_action_output(port = of.OFPP_IN_PORT))\n msg.in_port = self.inport\n self.event.connection.send(msg)",
"def send_fake_arp_replay(self, target_ip, imposter_ip, target_mac):\n my_mac = get_if_hwaddr(self.iface)\n fake_arp_replay = Ether(src=my_mac, dst=target_mac) / ARP(op=2, psrc=imposter_ip, hwsrc=my_mac, pdst=target_ip,\n hwdst=target_mac)\n sendp(fake_arp_replay, verbose=False, iface=self.iface)",
"def test_arp_reply_from_host(self):\n arp_replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": FAUCET_MAC,\n \"arp_code\": arp.ARP_REPLY,\n \"arp_source_ip\": \"10.0.0.1\",\n \"arp_target_ip\": \"10.0.0.254\",\n },\n )[self.DP_ID]\n self.assertTrue(arp_replies)\n self.assertFalse(ValveTestBases.packet_outs_from_flows(arp_replies))",
"def send_arp_request(self, ip_addr):\n\n # Forge de la trame :\n frame = [\n ### ETHERNET header###\n # Destination MAC address (=broadcast) :\n pack(\"!6B\", *(0xFF,) * 6),\n # Source MAC address :\n self.smac,\n # Type of protocol (=ARP) :\n pack(\"!H\", 0x0806),\n ### ARP payload###\n # Type of protocol hw/soft (=Ethernet/IP) :\n pack(\"!HHBB\", 0x0001, 0x0800, 0x0006, 0x0004),\n # Operation (=ARP Request) :\n pack(\"!H\", 0x0001),\n # Source MAC address :\n self.smac,\n # Source IP address :\n int_to_bytes(int(self.sip)),\n # Destination MAC address (what we are looking for) (=00*6) :\n pack(\"!6B\", *(0,) * 6),\n # Target IP address:\n int_to_bytes(int(ip_addr)),\n ]\n\n self.transport.write(b\"\".join(frame)) # Sending",
"def incoming_reply(pkt):\n return pkt[ARP].psrc != str(get_if_addr(conf.iface)) and pkt[ARP].op == 2",
"def parse_arp_packet(eth):\n a = eth.data\n if a.op == dpkt.arp.ARP_OP_REPLY and a.hln == 6 and a.pln == 4:\n ip = format_ip(a.spa)\n mac = format_mac(a.sha)\n update_arp(mac, ip)\n\n ip = format_ip(a.tpa)\n mac = format_mac(a.tha)\n update_arp(mac, ip)\n\n pass",
"def process(self, pkt):\n if ARP in pkt:\n # build arp replay, imposter to imposter\n self.send_fake_arp_replay(pkt[ARP].psrc, pkt[ARP].pdst, pkt[ARP].hwsrc)\n else:\n # build icmp echo-replay, imposter to imposter\n my_mac = get_if_hwaddr(self.iface)\n fake_echo_replay = Ether(src=my_mac, dst=pkt[Ether].src) / IP(src=pkt[IP].dst, dst=pkt[IP].src) / ICMP()\n fake_echo_replay[ICMP].type = 0\n\n if Padding in pkt: # if the target send also a padding - we return in too\n fake_echo_replay /= pkt[Padding]\n\n # send the fake replay back:\n sendp(fake_echo_replay, verbose=False, iface=self.iface)",
"def spoof(target_ip=\"10.0.2.1\", target_mac=\"\", des_ip=\"10.0.2.1\"):\n # Op=2 para response, no request\n # pdst=\"10.0.2.15\" .. la dir ip target\n # hwdst=\"08:00:27:e7:53:c8\" target mac\n # psrc=\"10.0.2.1\" router ip\n if target_mac == \"\":\n target_mac = scan(target_ip)[0][\"mac\"]\n\n catch = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=des_ip)\n scapy.send(catch, verbose=False)",
"def put_src_mac(self, src_mac):\n if(len(src_mac.split(':')) != 6):\n print \"Invalid src_mac format\"\n print \"Example valid SRC MAC:\"\n print \" 02:EA:00:00:00:01\"\n return\n (status, null) = self.__device.put_src_mac(src_mac)",
"def configure_arp_entry(self, destination_ip=None, **kwargs):\n pass",
"def reply(self, reply=None, failure=None, log_failure=True):\n if self._reply_to:\n response = marshal_response(reply=reply, failure=failure)\n response.correlation_id = self._correlation_id\n LOG.debug(\"Replying to %s\", self._correlation_id)\n task = ReplyTask(self._reply_to, response, log_failure)\n self.listener.driver._ctrl.add_task(task)\n else:\n LOG.debug(\"Ignoring reply as no reply address available\")",
"def outgoing_req(pkt):\n return pkt[ARP].psrc == str(get_if_addr(conf.iface)) and pkt[ARP].op == 1",
"def test_ip_input_icmp_reply(self):\n #\n # hop limit - ICMP replies\n #\n p_version = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6, hlim=1)\n / inet6.UDP(sport=1234, dport=1234)\n / Raw(b\"\\xa5\" * 100)\n )\n\n rxs = self.send_and_expect_some(self.pg0, p_version * NUM_PKTS, self.pg0)\n\n for rx in rxs:\n icmp = rx[ICMPv6TimeExceeded]\n # 0: \"hop limit exceeded in transit\",\n self.assertEqual((icmp.type, icmp.code), (3, 0))",
"def print_arp(pkt):\n if pkt[ARP].op == 1:\n print(pkt[ARP].hwsrc, ' who has ', pkt[ARP].pdst)\n else:\n print(pkt[ARP].psrc, ' is at ', pkt[ARP].hwsrc)",
"def gen_arp_response(target_ip, spoof_ip):\n\n if scan_results := networkscan.get_clients(\n target_ip, 10\n ): # checks to see if the target is reachable on the network\n target = scan_results[0]\n packet = scapy.ARP(\n op=2, # ARP response (op=1 would be ARP request). We are spoofing a request packet\n pdst=target_ip,\n hwdst=target.mac_addr,\n psrc=spoof_ip, # ip adddress we are spoofing (pretending to be)\n )\n return packet",
"def restore(target_ip=\"10.0.2.1\", target_mac=\"\", des_ip=\"10.0.2.1\"):\n # Op=2 para response, no request\n # pdst=\"10.0.2.15\" .. la dir ip target\n # hwdst=\"08:00:27:e7:53:c8\" target mac\n # psrc=\"10.0.2.1\" router ip\n if target_mac == \"\":\n target_mac = scan(target_ip)[0][\"mac\"]\n\n des_mac = scan(des_ip)[0][\"mac\"]\n\n catch = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=des_ip,\n hwsrc=des_mac)\n scapy.send(catch, count=4, verbose=False)",
"def reply(self, *args, **kwargs):\n kwargs['reply_to'] = self.message.id\n return self._client.send_message(self.input_chat, *args, **kwargs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets stock input and output account according to the difference of old price and new price.
|
def onchange_price(self, cr, uid, ids, new_price, context=None):
if context is None:
context = {}
product_obj = self.pool.get('product.product').browse(cr, uid, context.get('active_id', False), context=context)
price = product_obj.standard_price
diff = price - new_price
if diff > 0 :
return {'value' : {'enable_stock_in_out_acc':True}}
else :
return {'value' : {'enable_stock_in_out_acc':False}}
|
[
"def _update_buy_amount_from_new_sell_amount(\n buy_amount_old, sell_amount_new, sell_amount_old\n ):\n buy_amount_new = buy_amount_old * sell_amount_new / sell_amount_old\n return buy_amount_new.to_integral_value(rounding=ROUND_UP)",
"def change_price(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n rec_id = context and context.get('active_id', False)\n assert rec_id, _('Active ID is not set in Context.')\n prod_obj = self.pool.get('product.product')\n res = self.browse(cr, uid, ids, context=context)\n datas = {\n 'new_price' : res[0].new_price,\n 'stock_output_account' : res[0].stock_account_output.id,\n 'stock_input_account' : res[0].stock_account_input.id,\n 'stock_journal' : res[0].stock_journal.id\n }\n prod_obj.do_change_standard_price(cr, uid, [rec_id], datas, context)\n return {'type': 'ir.actions.act_window_close'}",
"def set_price_changes(self):\n self.market_data['pricechange'] = self.market_data['adj_close'].diff(1)\n self.market_data['percentchange'] = (np.log(self.market_data['adj_close']) - np.log(self.market_data['adj_close'].shift(1))).fillna(0)",
"def test_stock_price(self):\n\n # Make a mock object for testing.\n sALE = Stock('ALE', 'common', 23, nan, 60)\n\n # A stock without trades has a ticker price equal to its par value.\n self.assertEqual(sALE.stock_price(), 60)\n\n # Add some mock Trades.\n sALE.buy(500, 25)\n sALE.sell(300, 15)\n self.assertEqual(len(sALE._trades), 2)\n\n # Easy case for ticker price with two Trades.\n self.assertEqual(sALE.stock_price(), ((500*25)+(300*15))/(500+300))\n\n # Add some mock Trades in the distant past (such that they are excluded\n # from the average).\n sALE.buy(100, 87, datetime.datetime.now() -\n datetime.timedelta(minutes=16))\n sALE.buy(23, 34, datetime.datetime.now() -\n datetime.timedelta(minutes=15))\n self.assertEqual(len(sALE._trades), 4)\n\n # Stock price should be unchanged.\n self.assertEqual(sALE.stock_price(), ((500*25)+(300*15))/(500+300))",
"def changeTicker(self, old_stock_id : str, new_stock_id):\n self.conn.execute(\"UPDATE portfolio SET stock_id=? WHERE stock_id=?\", (new_stock_id, old_stock_id))\n self.conn.commit()",
"def sell(self, stock_price, stock_balance):\n\n stock_value = stock_balance * stock_price\n\n new_cash_balance = self.balance + \\\n stock_value - \\\n self.transaction_fee\n\n return (new_cash_balance, 0)",
"def update(self, btcprice):\n if btcprice <= self.buyPrice():\n if usd.hasFunds(self.distributedBalance):\n buy(self.distributedBalance, btcprice)\n else:\n self.usd.insufficientFunds()\n for transaction in self.book:\n if btcprice >= transaction.sellPrice():\n print 'Profit: ',\n self.sell(transaction, btcprice)\n if btcprice <= (transaction.initial_btcprice * 0.999):\n print 'Loss: ',\n self.sell(transaction, btcprice)",
"def price_change(self, name):\n crypto = Cryptocurrency(name, self._allData)\n currentPrice = crypto.get_price('current')\n changedPrice1d = crypto.get_price('1d')\n changedPrice7d = crypto.get_price('7d')\n return currentPrice, changedPrice1d, changedPrice7d",
"def test_update_depends_stock(self):\n with mn.model() as m:\n Foo = mn.stock('Foo', lambda: 1, (), lambda x: x, ('Bar',))\n Bar = mn.constant('Bar', 99)\n\n self.assertEqual(m['Foo'][''], 99)\n m['Bar'][''] = 90\n m.recalculate()\n self.assertEqual(m['Foo'][''], 90)\n m.step()\n self.assertEqual(m['Foo'][''], 91)",
"def update(self, ask):\n\t\tself.price.append(ask)\n\t\tself.strategy()",
"def trigger_stock_move_changes(self):\n old = self.TD['old'] or {}\n new = self.TD['new'] or {}\n dirty_product_ids = []\n for product_id in [ old.get('product_id'), new.get('product_id') ]:\n if not product_id: continue\n dirty_product_ids.append(product_id)\n self.mark_products_dirty(dirty_product_ids)",
"def update(self, target):\n change = (self.coeff * (target - self.price) +\n self.momentum * self.last_change)\n self.last_change = change\n \n limiter = self.buyer and min or max\n self.price = int(limiter(self.price + change, self.limit))",
"def backtest(self):\n # Cut off most recent history closing price since it is not complete and would effect the calculations\n #kline_array = self.client.get_historical_klines(symbol=pair, interval=Client.KLINE_INTERVAL_5MINUTE, start_str= '1' + ' month ago UTC')\n kline_array = self.client.get_historical_klines(symbol=self.pair, interval=self.asset_interval, start_str= self.time_look_back)\n self.closing_times = [dt.datetime.utcfromtimestamp(x[6]/1000) for x in kline_array][0:-1]\n self.closing_price_array = [float(x[4]) for x in kline_array][0:-1]\n self.checked_prices = []\n\n gain, loss = 0, 0\n for x in range(0, len(self.closing_price_array)-1):\n change = self.closing_price_array[x+1] - self.closing_price_array[x]\n self.checked_prices.append(self.closing_price_array[x+1])\n self.checked_times.append(self.closing_times[x+1])\n if change > 0:\n gain += change\n elif change < 0:\n loss += abs(change)\n\n #Get first rsi simple moving average\n if x == self.rsi_period:\n self.avg_gain = self.simple_moving_average(gain, self.rsi_period)\n self.avg_loss = self.simple_moving_average(loss, self.rsi_period)\n self.rsi = self.rsi_calc(self.avg_gain, self.avg_loss)\n self.rsi_array.append(self.rsi)\n gain, loss = 0, 0\n\n #Use wilders moving average to continue calculating rsi values\n elif x > self.rsi_period:\n self.avg_gain = self.wilders_moving_average(self.rsi_period, gain, self.avg_gain)\n self.avg_loss = self.wilders_moving_average(self.rsi_period, loss, self.avg_loss)\n self.rsi = self.rsi_calc(self.avg_gain, self.avg_loss)\n self.rsi_array.append(self.rsi)\n gain, loss = 0, 0\n\n # When there are enough rsi values begin to calculate stoch_rsi\n if len(self.rsi_array) >= self.stoch_period:\n k_fast = self.k_fast_stoch(self.rsi_array[len(self.rsi_array) - self.stoch_period:])\n self.k_fast_array['k_fast'].append(k_fast)\n self.k_fast_array['time'].append(self.closing_times[x])\n\n # When there are enough %K_FAST values begin to calculate %K_SLOW values = sma of n %K_FAST values\n if len(self.k_fast_array['k_fast']) >= self.k_slow_period:\n k_slow = self.simple_moving_average(self.k_fast_array['k_fast'][-1*self.k_slow_period:], self.k_slow_period)\n self.k_slow_array['k_slow'].append(k_slow)\n self.k_slow_array['time'].append(self.closing_times[x])\n\n # When there are enough %K_SLOW values begin to calculate %D_SLOW values = sma of n %K_SLOW values\n if len(self.k_slow_array['k_slow']) >= self.d_slow_period:\n d_slow = self.simple_moving_average(self.k_slow_array['k_slow'][-1*self.d_slow_period:], self.d_slow_period)\n self.d_slow_array['d_slow'].append(d_slow)\n self.d_slow_array['time'].append(self.closing_times[x])\n\n self.bollinger_bands(self.checked_prices, self.sma_period, self.deviation, self.checked_times[x])\n\n #Once all values start to be calculated we can determine whether to buy or sell until we hit the last\n self.buy_sell(current_time = self.checked_times[x])\n\n self.plot_orders() #Plot orders on graph",
"def test_update_depends_stock_chain(self):\n with mn.model() as m:\n Foo = mn.stock('Foo', lambda: 1, (), lambda x: x, ('Bar',))\n Bar = mn.constant('Bar', lambda x: x, 'Baz')\n Baz = mn.constant('Baz', 99)\n\n self.assertEqual(m['Foo'][''], 99)\n m['Baz'][''] = 90\n m.recalculate()\n self.assertEqual(m['Foo'][''], 90)\n m.step()\n self.assertEqual(m['Foo'][''], 91)",
"def change_sale_price(self):\n sale = self.find_brokered_sale_by_id(self.lhs)\n if sale.owner != self.caller.player_ob.Dominion:\n raise self.BrokerError(\"You can only change the price of your own sales.\")\n price = self.get_amount(self.rhs, \"price\")\n if price == sale.price:\n raise self.BrokerError(\n \"The new price must be different from the current price.\"\n )\n sale.change_price(price)\n if not sale.pk:\n self.msg(\n \"You have changed the price to %s, merging with an existing sale.\"\n % price\n )\n return\n amount_remaining = sale.amount\n if sale.broker_type == BrokeredSale.SALE:\n amount_remaining = self.check_for_buyers(sale)\n if amount_remaining:\n self.msg(\"You have changed the price to %s.\" % price)",
"def update_stockcounter(self, stock):\n\n bg = stock.get_mw_price()\n self.update_portfolio()\n stock.counter = int(float(self.buyingpower / bg / stock.tradeshares))\n print \" --- Updated Net Worth: %s | Buying Power: %s ---\" % (self.networth, self.buyingpower)",
"def price_change():\n with db.session.connection(execution_options={\"schema_translate_map\":{\"tenant\":session['schema']}}):\n shift_id = int(request.form.get(\"shift\"))\n cost_price = request.form.get(\"cost_price\")\n selling_price = request.form.get(\"selling_price\")\n cost_price = request.form.get(\"cost_price\")\n product_id = request.form.get(\"product\")\n product= Product.query.filter_by(id=product_id).first()\n try:\n \n price = Price.query.filter(and_(Price.shift_id==shift_id,Price.product_id==product.id)).first()\n price.cost_price = cost_price\n price.selling_price= selling_price\n product.selling_price = selling_price\n product.cost_price = cost_price\n db.session.commit()\n flash('Done','info')\n return redirect(url_for('readings_entry'))\n except:\n\n db.session.rollback()\n flash('Something is wrong, try again','warning')\n return redirect(url_for('readings_entry'))",
"def update_position_price(self):\r\n ticker_cur = self.ticker.prices[self.currency_pair]\r\n \r\n if self.position_type == 'long':\r\n self.cur_price = Decimal(str(ticker_cur['bid']))\r\n else:\r\n self.cur_price = Decimal(str(ticker_cur['ask']))\r\n \r\n self.profit_base = self.calculate_profit_base()\r\n self.profit_perc = self.calculate_profit_perc()",
"def update_asset_value(self) -> None:\n base = \"BALN\"\n quote = \"bnUSD\"\n dex_score = self._dex_score.get()\n oracle_address = self._oracle.get()\n try:\n dex = self.create_interface_score(dex_score, DexInterface)\n oracle = self.create_interface_score(oracle_address, OracleInterface)\n price = dex.getBalnPrice()\n priceData = oracle.get_reference_data('USD', 'ICX')\n self._last_price.set(priceData['rate'] * price // EXA)\n self._price_update_time.set(self.now())\n self.OraclePrice(base + quote, self._oracle_name.get(), dex_score, price)\n except BaseException as e:\n revert(f'{base + quote}, {self._oracle_name.get()}, {dex_score}, Exception: {e}')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Changes the Standard Price of Product. And creates an account move accordingly.
|
def change_price(self, cr, uid, ids, context=None):
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
prod_obj = self.pool.get('product.product')
res = self.browse(cr, uid, ids, context=context)
datas = {
'new_price' : res[0].new_price,
'stock_output_account' : res[0].stock_account_output.id,
'stock_input_account' : res[0].stock_account_input.id,
'stock_journal' : res[0].stock_journal.id
}
prod_obj.do_change_standard_price(cr, uid, [rec_id], datas, context)
return {'type': 'ir.actions.act_window_close'}
|
[
"def change_sale_price(self):\n sale = self.find_brokered_sale_by_id(self.lhs)\n if sale.owner != self.caller.player_ob.Dominion:\n raise self.BrokerError(\"You can only change the price of your own sales.\")\n price = self.get_amount(self.rhs, \"price\")\n if price == sale.price:\n raise self.BrokerError(\n \"The new price must be different from the current price.\"\n )\n sale.change_price(price)\n if not sale.pk:\n self.msg(\n \"You have changed the price to %s, merging with an existing sale.\"\n % price\n )\n return\n amount_remaining = sale.amount\n if sale.broker_type == BrokeredSale.SALE:\n amount_remaining = self.check_for_buyers(sale)\n if amount_remaining:\n self.msg(\"You have changed the price to %s.\" % price)",
"def onchange_price(self, cr, uid, ids, new_price, context=None):\n if context is None:\n context = {}\n product_obj = self.pool.get('product.product').browse(cr, uid, context.get('active_id', False), context=context)\n price = product_obj.standard_price\n diff = price - new_price\n if diff > 0 :\n return {'value' : {'enable_stock_in_out_acc':True}}\n else :\n return {'value' : {'enable_stock_in_out_acc':False}}",
"def price_change():\n with db.session.connection(execution_options={\"schema_translate_map\":{\"tenant\":session['schema']}}):\n shift_id = int(request.form.get(\"shift\"))\n cost_price = request.form.get(\"cost_price\")\n selling_price = request.form.get(\"selling_price\")\n cost_price = request.form.get(\"cost_price\")\n product_id = request.form.get(\"product\")\n product= Product.query.filter_by(id=product_id).first()\n try:\n \n price = Price.query.filter(and_(Price.shift_id==shift_id,Price.product_id==product.id)).first()\n price.cost_price = cost_price\n price.selling_price= selling_price\n product.selling_price = selling_price\n product.cost_price = cost_price\n db.session.commit()\n flash('Done','info')\n return redirect(url_for('readings_entry'))\n except:\n\n db.session.rollback()\n flash('Something is wrong, try again','warning')\n return redirect(url_for('readings_entry'))",
"def update_price(self, company: Company):\n pass",
"def save(self, *args, **kwargs):\n orders = Order.objects.filter(product=self)\n\n # We exclude completed orders\n orders = orders.exclude(state=\"COM\")\n\n for order in orders:\n order.unit_price = self.unit_price\n order.save()\n\n super().save(*args, **kwargs)",
"def price(self, price):\n CCAPI.set_product_base_price(product_id=self.id, price=price)\n self._price = price",
"def test_transMaxByChangePrice(self):\n self._setupTrans()\n \n # Confirm we can't add a price required item\n self.log.info(\"Adding price required item to transaction\")\n pos.click_speed_key(\"Item 1\")\n pos.enter_keypad(1, after=\"Enter\")\n self._confirmMessage()\n \n # Confirm we can't raise Item 2's price above $5\n self.log.info(\"Overriding Item 2's price\")\n pos.click_function_key(\"Override\")\n # Assume default reason code and enter price\n pos.enter_keypad(501, after=\"Enter\")\n self._confirmMessage(\"Unable to change price on item.\")\n \n self._clearTrans()",
"def update_price(origin_price: float, price: float):\n return (get_current_price() / origin_price) * price",
"def update(self, btcprice):\n if btcprice <= self.buyPrice():\n if usd.hasFunds(self.distributedBalance):\n buy(self.distributedBalance, btcprice)\n else:\n self.usd.insufficientFunds()\n for transaction in self.book:\n if btcprice >= transaction.sellPrice():\n print 'Profit: ',\n self.sell(transaction, btcprice)\n if btcprice <= (transaction.initial_btcprice * 0.999):\n print 'Loss: ',\n self.sell(transaction, btcprice)",
"def update_price_by_product_id(self, updated_product):\n pass",
"def _adjust_price(self):\n\n # Go through each topping and add the money amount for topping\n topping_additional_money = 0\n for topping in self._toppings:\n topping_additional_money += topping.getPrice()\n\n self._price = self._base_price + topping_additional_money",
"def _add_price(self):\n\n instrument = self._instrument\n date = self._price_date\n rate = self._price\n market = acm.FParty['internal']\n\n existing_price = None\n prices = acm.FPrice.Select('instrument = {0}'.format(instrument.Name()))\n for price in prices:\n if price.Market() == market and price.Day() == date:\n if not self._recalculate:\n raise ValueError('Rate already exists for this date.')\n else:\n existing_price = price\n break\n\n if existing_price:\n # If self._recalculate is False, an exception would be raised\n # That means we're recalculating.\n price = existing_price\n else:\n price = acm.FPrice()\n price.Instrument(instrument)\n price.Day(date)\n price.Market(market)\n price.Currency(acm.FInstrument['ZAR'])\n\n price.Ask(rate)\n price.Bid(rate)\n price.High(rate)\n price.Low(rate)\n price.Settle(rate)\n price.Last(rate)\n price.Commit()\n\n log('The price was updated in SACPI.')",
"def reduce_price(self, reduction):\r\n self._price = self._price - reduction",
"def sellPrice(self):\n return self.initial_btcprice * (1 + FEE + self.strategy)",
"def compute_set_product_price(self):\n self.ensure_one()\n phantom_boms = self.bom_ids.filtered(lambda b: b.type == \"phantom\")\n\n if not phantom_boms:\n raise UserError(\n _(\n \"No phantom BoM found for product %s. Please create\"\n \" a phantom BoM to compute the price of the set product.\"\n % self.name\n )\n )\n\n products_2compute = self.product_variant_ids\n date_now = fields.Datetime.now()\n dummy_so = self.env[\"sale.order\"].create(\n {\n \"name\": \"Phantom Bom Price Compute: %s, %s\"\n % (self.id, date_now.strftime(\"%d-%m-%Y\")),\n \"partner_id\": 12515, # Ahmet Altınışık test\n \"partner_invoice_id\": 12515,\n \"partner_shipping_id\": 12515,\n \"pricelist_id\": 136, # USD pricelist\n \"warehouse_id\": 1,\n \"company_id\": 1,\n \"currency_id\": 2, # USD\n \"date_order\": fields.Datetime.now(),\n }\n )\n for product in products_2compute:\n bom = self.env[\"mrp.bom\"].sudo()._bom_find(product=product)\n if not bom.type == \"phantom\":\n continue\n # Create a new sale order line\n dummy_sol = self.env[\"sale.order.line\"].create(\n {\n \"order_id\": dummy_so.id,\n \"product_id\": product.id,\n \"product_uom_qty\": 1,\n \"product_uom\": product.uom_id.id,\n \"price_unit\": product.v_fiyat_dolar,\n }\n )\n # Explode the phantom bom\n dummy_sol.explode_set_contents()\n # Compute the price\n dummy_so.recalculate_prices()\n # Update the product price\n _logger.info(\n \"Updating product price for product %s: %s -> %s\"\n % (product.display_name, product.v_fiyat_dolar, dummy_so.amount_untaxed)\n )\n product.v_fiyat_dolar = dummy_so.amount_untaxed\n # Clear sale order lines\n dummy_so.order_line.unlink()\n # Clear the dummy sale order\n dummy_so.unlink()\n self.env.cr.commit()\n return True",
"def change_price(self, symbol, account, nft_ids, price):\n \n nft_list = []\n if not isinstance(nft_ids, list):\n nft_list = [str(nft_ids)]\n else:\n for n in nft_ids:\n nft_list.append(str(n))\n contract_payload = {\"symbol\": symbol.upper(), \"nfts\": nft_list, \"price\": str(price)}\n json_data = {\"contractName\":\"nftmarket\",\"contractAction\":\"changePrice\",\n \"contractPayload\":contract_payload}\n assert self.blockchain.is_hive\n tx = self.blockchain.custom_json(self.ssc_id, json_data, required_auths=[account])\n return tx",
"def set_priced_current_price_and_period(self, price):\n self.currentPeriod = {\n 'date_utc': None,\n 'open': price,\n 'close': price,\n 'high': price,\n 'low': price\n }\n self.currentPrice = price",
"def update_position_price(self):\r\n ticker_cur = self.ticker.prices[self.currency_pair]\r\n \r\n if self.position_type == 'long':\r\n self.cur_price = Decimal(str(ticker_cur['bid']))\r\n else:\r\n self.cur_price = Decimal(str(ticker_cur['ask']))\r\n \r\n self.profit_base = self.calculate_profit_base()\r\n self.profit_perc = self.calculate_profit_perc()",
"def save(self, *args, **kwargs):\n self.lineitem_total = self.comp.price * self.quantity\n super().save(*args, **kwargs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Draw a Koch curve.
|
def draw_koch_curve(t, order, x):
if order == 0:
t.forward(x)
else:
for angle in [60, -120, 60, 0]:
draw_koch_curve(t, order-1, x/3)
t.left(angle)
|
[
"def draw_snowflake(t, order, x):\n for i in range(3):\n draw_koch_curve(t, order, x)\n t.rt(120)",
"def drawK():\r\n\r\n turtle.down()\r\n turtle.left(90)\r\n turtle.forward(40)\r\n turtle.left(180)\r\n turtle.forward(20)\r\n turtle.left(45)\r\n turtle.forward(27)\r\n turtle.left(180)\r\n turtle.forward(27)\r\n turtle.right(90)\r\n turtle.forward(27)\r\n\r\n pass\r\n\r\n turtle.up()",
"def plot_curve(self):\r\n plt.plot(self.k_range, self.k_error)\r\n plt.title(\"Error under different choice of K\")\r\n plt.xlabel(\"Value of K for KNN\")\r\n plt.ylabel(\"Error\")\r\n plt.show()",
"def user_input():\n print(\"Welcome to drawing a Koch curve.\\n\")\n order = int(input(\"Please enter the order of magnitude for the Koch curve: \"))\n x = int(input(\"Please enter a length x: \"))\n # Instantiate the Turtle\n bob = turtle.Turtle()\n bob.hideturtle()\n draw_snowflake(bob, order, x)",
"def draw_koch(n):\n\n s = Screen() # create screen\n t = Turtle() # create turtle\n directions = koch(n) # obtain directions to draw koch(n)\n\n for move in directions: # follow specified moves\n if move == 'F':\n t.forward(300 / 3 ** n) # move forward, length normalized\n if move == 'L':\n t.lt(60) # rotate left 60 degrees\n if move == 'R':\n t.rt(120) # rotate right 60 degrees\n\n s.bye()",
"def draw_kotch():\n import turtle\n turt = turtle.Turtle()\n win = turtle.Screen()\n turt.color(\"green\")\n kotch(20*3**2, turt)\n win.exitonclick()",
"def _draw_curve(self):\n print('\\n\\nDrawing polynomials\\n================')\n degree = None\n while not degree:\n degree = input('Please enter the degree of the polynomial [default: 1]: ')\n if degree == '':\n degree = 1\n else:\n try:\n degree = int(degree)\n except ValueError:\n degree = None\n print('{start_color}Unable to parse input. Please try again.{end_color}'.format(\n start_color=self.bcolors.WARNING,\n end_color=self.bcolors.ENDC\n ))\n\n formula_string = ''\n param_letter_code = 97\n for deg in range(degree, 1, -1):\n formula_string += '{param_letter}x^{deg} + '.format(\n param_letter=chr(param_letter_code),\n deg=deg\n )\n param_letter_code += 1\n\n formula_string += '{param_letter}x + '.format(\n param_letter=chr(param_letter_code)\n )\n param_letter_code += 1\n formula_string += '{param_letter}'.format(\n param_letter=chr(param_letter_code)\n )\n\n print(\n '\\n\\nDrawing a polinome of the form: {start_color}{formula}{end_color} from '\n '{start_color}xmin{end_color} to {start_color}xmax{end_color}. '\n '\\nPlease enter the parameters:'.format(\n formula=formula_string,\n start_color=self.bcolors.OKGREEN,\n end_color=self.bcolors.ENDC\n ))\n\n params = self._get_function_parameters(\n tuple(\n (chr(param_letter_code), float) for param_letter_code in range(97, 97 + degree + 1)\n ) +\n (('xMin', float),\n ('xMax', float))\n )\n\n # Draw\n self.range = npy.arange(params[-2], params[-1], .1)\n polynomials = [param * npy.power(self.range, power) for param, power in zip(params, range(degree, 1, -1))]\n y = sum(polynomials) + npy.multiply(self.range, params[-4]) + params[-3]\n plt.plot(self.range, y, \"g-\")\n\n plt.title('Function: $f(x) = {formula_string}$'.format(formula_string=formula_string), fontsize=20)\n plt.grid(True)\n plt.draw()\n plt.show()",
"def main():\n t.setup(800, 800)\n t.pu()\n t.goto(-300, 150)\n t.pd()\n t.pensize(2)\n level = 3\n for i in range(3):\n koch(600, level)\n t.right(120)\n t.hideturtle()\n t.done()",
"def main(start):\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Exercise 1\r\n ##\r\n # Calculate a Koch curve of at least 3 degrees.\r\n # Draw the resulting points.\r\n # Create a Koch snowflake from three, triangulated Koch curves.\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n # Initialize a 'start' point for the Koch curve.\r\n\r\n # Specify how many iterations should be performed.\r\n DEGREE = 3\r\n # Instantiate a turtle object.\r\n sheldon = turtle.Turtle()\r\n sheldon.speed(1000)\r\n\r\n # Retrieve the window the turtle will use for drawing.\r\n screen = sheldon.getscreen()\r\n screen.title(\"Koch Curve: \" + str(DEGREE) + \"°\")\r\n screen.reset()\r\n\r\n for x in range(6):\r\n drawSnowflake(sheldon,start ,DEGREE)\r\n start = [start[0]*0.75,start[1]*0.75]\r\n screen.exitonclick()",
"def curveSketchCtx(*args, **kwargs):\n\n pass",
"def draw_roc_curve(fpr, tpr):\n\n plt.plot(fpr, tpr, color='orange', label='ROC')\n plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic (ROC) Curve')\n plt.legend()\n plt.show()",
"def draw_learning_curve(numbers):\r\n \r\n plt.xlabel('Simulation Epoch')\r\n plt.ylabel('Success Rate')\r\n plt.title('Learning Curve')\r\n plt.grid(True)\r\n\r\n plt.plot(numbers['x'], numbers['success_rate'], 'r', lw=1)\r\n plt.show()",
"def drawChamber( self ):\n self.win = GraphWin( \"Gravity Chamber\",\n self.offset * self.width,\n self.offset * self.height,\n autoflush = False )\n \n self.win.setCoords( -( ( self.offset - 1 ) / 2 ) * self.width,\n -( ( self.offset - 1 ) / 2 ) * self.height,\n ( ( self.offset - 1 ) / 2 + 1 ) * self.width,\n ( ( self.offset - 1 ) / 2 + 1 ) * self.height ) \n Line( Point( 0, 0 ), Point( 0, self.height ) ).draw( self.win )\n Line( Point( 0, self.height ),\n Point( self.width, self.height ) ).draw( self.win )\n Line( Point( self.width, self.height ),\n Point( self.width, 0 ) ).draw( self.win )\n Line( Point( self.width, 0 ), Point( 0, 0 ) ).draw( self.win )\n return",
"def drawkey(win, p):\n text = Text(p, \"Point Values:\" +\n \"\\nYellow: 9\"\n \"\\nRed: 7\"\n \"\\nBlue: 5\"\n \"\\nBlack: 3\"\n \"\\nWhite: 1\")\n text.setSize(15)\n text.draw(win)",
"def curveEPCtx(*args, **kwargs):\n\n pass",
"def _generate_curve(losses, probs_of_exceedance):\n\n mean_losses = collect(loop(losses, lambda x, y: mean([x, y])))\n return shapes.Curve(zip(mean_losses, probs_of_exceedance))",
"def _plot_curve(x, y, title, x_lab, y_lab, save_path=False, show=False):\n plt.title(title)\n plt.plot(x, y, 'k')\n plt.plot([(0, 0), (1, 1)], 'r--')\n plt.xlim([-0.1, 1.1])\n plt.ylim([-0.1, 1.1])\n plt.ylabel(x_lab)\n plt.xlabel(y_lab)\n if save_path is not False:\n plt.savefig(save_path)\n if show:\n plt.show()",
"def draw_smoothly_interpolated_closed_curve(self, points):\n\n control_points = [ self.project_point_to_canvas(point) for point in points ]\n control_vectors = self._compute_autosmooth_control_vectors(control_points, is_path_closed = True)\n path_command = self._make_svg_path_M_and_C_command(control_points + [ control_points[0], ], control_vectors)\n path_command += self._make_svg_path_Z_command()\n self.insert_svg_path_command(path_command)\n return",
"def curve(replace=bool, periodic=bool, objectSpace=bool, bezier=bool, worldSpace=bool, point=\"string\", append=bool, editPoint=\"string\", knot=float, pointWeight=\"string\", degree=float):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Draw a snowflake using three Koch curves
|
def draw_snowflake(t, order, x):
for i in range(3):
draw_koch_curve(t, order, x)
t.rt(120)
|
[
"def main():\n t.setup(800, 800)\n t.pu()\n t.goto(-300, 150)\n t.pd()\n t.pensize(2)\n level = 3\n for i in range(3):\n koch(600, level)\n t.right(120)\n t.hideturtle()\n t.done()",
"def main(start):\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Exercise 1\r\n ##\r\n # Calculate a Koch curve of at least 3 degrees.\r\n # Draw the resulting points.\r\n # Create a Koch snowflake from three, triangulated Koch curves.\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n # Initialize a 'start' point for the Koch curve.\r\n\r\n # Specify how many iterations should be performed.\r\n DEGREE = 3\r\n # Instantiate a turtle object.\r\n sheldon = turtle.Turtle()\r\n sheldon.speed(1000)\r\n\r\n # Retrieve the window the turtle will use for drawing.\r\n screen = sheldon.getscreen()\r\n screen.title(\"Koch Curve: \" + str(DEGREE) + \"°\")\r\n screen.reset()\r\n\r\n for x in range(6):\r\n drawSnowflake(sheldon,start ,DEGREE)\r\n start = [start[0]*0.75,start[1]*0.75]\r\n screen.exitonclick()",
"def drawK():\r\n\r\n turtle.down()\r\n turtle.left(90)\r\n turtle.forward(40)\r\n turtle.left(180)\r\n turtle.forward(20)\r\n turtle.left(45)\r\n turtle.forward(27)\r\n turtle.left(180)\r\n turtle.forward(27)\r\n turtle.right(90)\r\n turtle.forward(27)\r\n\r\n pass\r\n\r\n turtle.up()",
"def draw_koch(n):\n\n s = Screen() # create screen\n t = Turtle() # create turtle\n directions = koch(n) # obtain directions to draw koch(n)\n\n for move in directions: # follow specified moves\n if move == 'F':\n t.forward(300 / 3 ** n) # move forward, length normalized\n if move == 'L':\n t.lt(60) # rotate left 60 degrees\n if move == 'R':\n t.rt(120) # rotate right 60 degrees\n\n s.bye()",
"def draw_koch_curve(t, order, x):\n if order == 0:\n t.forward(x)\n else:\n for angle in [60, -120, 60, 0]:\n draw_koch_curve(t, order-1, x/3)\n t.left(angle)",
"def draw_kotch():\n import turtle\n turt = turtle.Turtle()\n win = turtle.Screen()\n turt.color(\"green\")\n kotch(20*3**2, turt)\n win.exitonclick()",
"def user_input():\n print(\"Welcome to drawing a Koch curve.\\n\")\n order = int(input(\"Please enter the order of magnitude for the Koch curve: \"))\n x = int(input(\"Please enter a length x: \"))\n # Instantiate the Turtle\n bob = turtle.Turtle()\n bob.hideturtle()\n draw_snowflake(bob, order, x)",
"def draw_housing():\r\n\r\n tess.pensize(3)\r\n\r\n tess.color(\"black\", \"darkgrey\")\r\n\r\n tess.begin_fill()\r\n\r\n tess.forward(80)\r\n\r\n tess.left(90)\r\n\r\n tess.forward(200)\r\n\r\n tess.circle(40, 180)\r\n\r\n tess.forward(200)\r\n\r\n tess.left(90)\r\n\r\n tess.end_fill()",
"def test3():\n import pylab as pl\n r,p,rho,u,r_s,p_s,rho_s,u_s,shock_speed = \\\n sedov(t=0.05, E0=5.0, rho0=5.0, g=5.0/3.0, nu=2)\n\n print 'rho shock', rho_s\n print 'p shock', p_s\n print 'u shock', u_s\n print 'r shock', r_s\n \n area = pi*r*r\n dv = area.copy()\n dv[1:] = diff(dv)\n\n # thermal and kinetic energy\n te = (p*dv/(5.0/3.0-1))\n ke = (rho*u*u*0.5*dv)\n #pl.plot(arange(te.size), ke, 'x')\n #pl.show()\n print 'r0', r[:2]\n energy = te.sum() + ke.sum()\n mass = 0.5*inner(rho[1:]+rho[:-1],dv[1:])\n\n print 'density', mass / (pi * r_s**2)\n print 'energy', energy\n print 'shock speed', shock_speed\n pl.plot(r/r_s,rho/rho_s, 'b,',label=r'$\\rho/\\rho_s$')\n pl.plot(r/r_s,p/p_s,'r',label=r'$p/p_s$')\n pl.plot(r/r_s,u/u_s, 'g,',label=r'$u/u_s$')\n pl.legend(loc='upper left')\n pl.show()",
"def visualizeQuakes(k, r):\r\n eq_dict = readeqf()\r\n centroids = createCentroids(k, eq_dict)\r\n clusters = createClusters(k, centroids, eq_dict, r)\r\n\r\n w = 1800 #Window width.\r\n h = 900 #Window height.\r\n bg_pic = \"better_worldmap1800_900.gif\"\r\n\r\n t.setup(width=w, height=h)\r\n t.bgpic(bg_pic)\r\n t.speed(\"fastest\")\r\n t.hideturtle()\r\n t.up()\r\n\r\n w_factor = ((w / 2) / 180)\r\n h_factor = ((h / 2) / 90)\r\n\r\n color_list = [\"dark red\", \"dark green\", \"dark blue\", \"dark orange\",\r\n \"dark orchid\", \"dark goldenrod\", \"dark violet\",\r\n \"pink\", \"magenta\", \"sky blue\", \"plum\", \"dark salmon\",\r\n \"goldenrod\", \"chartreuse\", \"dark sea green\", \"cornsilk\",\r\n \"dark olive green\", \"bisque\", \"blanched almond\",\r\n \"dark cyan\", \"royal blue\", \"papaya whip\", \"peach puff\",\r\n \"misty rose\", \"mint cream\", \"lavender blush\", \"hot pink\",\r\n \"dark khaki\", \"cornflower blue\", \"chocolate\"]\r\n\r\n for cluster_index in range(k):\r\n t.color(color_list[cluster_index])\r\n for akey in clusters[cluster_index]:\r\n lon = (eq_dict[akey][0]) * w_factor\r\n lat = (eq_dict[akey][1]) * h_factor\r\n t.goto(lon, lat)\r\n t.dot()\r\n return None",
"def diagram():\n g= GWGraphics()\n g.SCREEN( 8 )\n g.PRINT( \"INVOLUTE of a CIRCLE\" )\n g.LOCATE( 3,12 ); g.PRINT( \"Y\" )\n g.LOCATE( 12,50 ); g.PRINT( \"X\" )\n g.LOCATE( 12,11 ); g.PRINT( \"O\" )\n g.LOCATE( 7,11 ); g.PRINT( \"A\" )\n g.LOCATE( 17,11 ); g.PRINT( \"B\" )\n S=0.42 #! :REM'aspect ratio for screen 2\n R=92; H=91; PI=3.141593\n g.LINE(R,H+R*S,R,25)\n g.LINE(R,H,381,H, pattern=0x5555 )\n g.CIRCLE (R,H,R)\n I=80; L=0\n for Z in range(1,I+1): # FOR Z=1 TO I\n L=L+PI/I\n X=R*((SIN(L)-L*COS(L)))+R\n Y=H-(R*(COS(L)+L*SIN(L)))*S\n g.PSET (X,Y)\n I=10; L=0\n for Z in range(1,I+1): # FOR Z=1 TO I\n L=L+PI/I\n X=R*((SIN(L)-L*COS(L)))+R\n Y=H-(R*(COS(L)+L*SIN(L)))*S\n A=R*SIN(L); B=R*COS(L)*S\n g.LINE(R+A,H-B,X,Y, pattern=0x1111)\n if Z == 7: # IF Z<>7 THEN 500\n g.LINE(R+A,H-B,X,Y)\n g.LINE(R,H,X,Y)\n g.LINE(R+A,H-B,R,H)\n g.LOCATE( 14,21 ); g.PRINT( \"E\" )\n g.LOCATE( 5,37 ); g.PRINT( \"C\" )\n g.CIRCLE (R,H,R/2,start=1.8*PI,end=PI/2 )\n g.LOCATE (11,19); g.PRINT(\"phi\")\n g.LOCATE (7,32); g.PRINT( \"a\" )\n g.LINE(R,Y,X,Y, pattern=0x5555 )\n g.LOCATE (17,50); g.PRINT( \"D\" )\n #530 LOCATE 19:PRINT STRING$(80,45)\n g.display( \"Involute of a Circle\" )",
"def plot_KS_and_RMSE_gamma(x, y, KS, LE1, Fs, Gs, colors, obs_type):",
"def draw_flower_bed():\n # Place the cursor in place, without drawing\n turtle.up()\n turtle.forward(200)\n turtle.left(180)\n turtle.down()\n # Drawing three flowers\n draw_flower_advance()\n draw_flower_advance()\n draw_flower_advance()",
"def SurfacePoints(SRF, INTU, INTV,ATTR):\n\n backPt = {}\n frontPt = {}\n\n thickFactor = 5\n\n Udomain = rs.SurfaceDomain(SRF, 0)\n Vdomain = rs.SurfaceDomain(SRF, 1)\n\n stepU = (Udomain[1] - Udomain[0])/INTU\n stepV = (Vdomain[1] - Vdomain[0])/INTV\n\n #Plotting points of the cubic frame on a surface\n count = 0\n for i in range(INTU + 1):\n for j in range(INTV + 1):\n \n if i == 0 or j == 0 or i == INTU or j == INTV:\n u = Udomain[0] + i * stepU\n v = Vdomain[0] + j * stepV \n \n else:\n\n u = Udomain[0] + i * stepU + rd.random()*2\n v = Vdomain[0] + j * stepV + rd.random()*2\n\n\n pt = rs.EvaluateSurface(SRF, u, v)\n\n # rs.AddTextDot((i,j), pt)\n # rs.AddPoint(pt)\n\n backPt[(i, j)] = pt\n \n #Generate the normal direction points\n VectNorm = rs.SurfaceNormal(SRF, (u, v))\n VectNorm = rs.VectorScale(VectNorm, thickFactor)\n ptFront = rs.PointAdd(VectNorm, pt)\n\n # rs.AddTextDot((i,j), ptFront)\n # rs.AddPoint(ptFront)\n \n frontPt[(i, j)] = ptFront\n count += 1\n\n CenterCubic(ATTR, backPt, frontPt, INTU, INTV)",
"def plot(self):\n\n plt.plot(self.su[:, 0], self.su[:, 1], 'b', label=\"cubic spline\") # spline\n plt.plot(self.control_points[:, 0], self.control_points[:, 1], '-.r', label=\"control polygon\") # control polygon\n plt.scatter(self.control_points[:, 0], self.control_points[:, 1], color='red') # de Boor points\n\n plt.title(\"Cubic Spline\")\n plt.legend()\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.grid()\n plt.show()",
"def drawNurbsCurves(x,knots,color=None,alpha=1.0,samplingTolerance=5.0):\n nctrl,ndim = x.shape[-2:]\n nknots = asarray(knots).shape[-1]\n order = nknots-nctrl\n if order > 8:\n import warnings\n warnings.warn('Nurbs curves of degree > 7 can currently not be drawn! You can create some approximation by evaluating the curve at some points.')\n return\n\n if x.ndim == 2:\n x = x.reshape(-1,nctrl,ndim)\n if color is not None and color.ndim == 2:\n color = color.reshape(-1,nctrl,color.shape[-1])\n\n if color is not None:\n pf.debug('Coords shape: %s' % str(x.shape))\n pf.debug('Color shape: %s' % str(color.shape))\n if color.ndim == 1:\n pf.debug('Single color')\n elif color.ndim == 2 and color.shape[0] == x.shape[0]:\n pf.debug('Element color: %s colors' % color.shape[0])\n elif color.shape == x.shape[:-1] + (3,):\n pf.debug('Vertex color: %s colors' % str(color.shape[:-1]))\n else:\n raise ValueError,\"Number of colors (%s) should equal 1 or the number of curves(%s) or the number of curves * number of vertices\" % (color.shape[0],x.shape[0]) \n\n pf.debug(\"Color shape = %s\" % str(color.shape))\n if color.shape[-1] not in (3,4):\n raise ValueError,\"Expected 3 or 4 color components\"\n\n if color is not None:\n pf.debug(\"Final Color shape = %s\" % str(color.shape))\n\n nurb = GLU.gluNewNurbsRenderer()\n if not nurb:\n raise RuntimeError,\"Could not create a new NURBS renderer\"\n\n GLU.gluNurbsProperty(nurb,GLU.GLU_SAMPLING_TOLERANCE,samplingTolerance)\n \n mode = {3:GL.GL_MAP1_VERTEX_3, 4:GL.GL_MAP1_VERTEX_4}[ndim]\n\n if color is not None and color.ndim == 1:\n # Handle single color\n pf.debug('Set single color: OK')\n glColor(color)\n color = None\n \n ki = knots\n for i,xi in enumerate(x):\n if color is not None and color.ndim == 2:\n # Handle element color\n glColor(color[i])\n if knots.ndim > 1:\n ki = knots[i]\n GLU.gluBeginCurve(nurb)\n if color is not None and color.ndim == 3:\n # Handle vertex color\n ci = color[i]\n if ci.shape[-1] == 3:\n # gluNurbs always wants 4 colors\n ci = growAxis(ci,1,axis=-1,fill=alpha)\n GLU.gluNurbsCurve(nurb,ki,ci,GL.GL_MAP1_COLOR_4)\n GLU.gluNurbsCurve(nurb,ki,xi,mode)\n GLU.gluEndCurve(nurb)\n\n GLU.gluDeleteNurbsRenderer(nurb)",
"def DrawSun(Height,spacebetween):\r\n t.up()\r\n t.forward(spacebetween)\r\n t.left(90)\r\n t.forward(2.5*Height)\r\n t.down()\r\n t.circle(Height/3)\r\n t.up()\r\n t.backward(2.5*Height)\r\n t.right(90)",
"def plot_hand_confs(lhkpss,rhkpss,fn=None):\n handconfs = {'left':[],'right':[]};\n nImg = len(lhkpss);\n nPs = len(lhkpss[0]);\n for i in range(nImg):\n handconfs['left'].append(sum(lhkpss[i][:,2])/nPs);\n handconfs['right'].append(sum(rhkpss[i][:,2])/nPs);\n avLeft = sum(handconfs['left'])/nImg;\n avRight = sum(handconfs['right'])/nImg;\n plt.figure();\n plt.scatter(range(nImg),handconfs['left'],c='g',label='Left hand');\n plt.scatter(range(nImg),handconfs['right'],c='r',label='Right hand');\n plt.xlabel('Frame');\n plt.ylabel('Average confidence over 21 keypoints');\n plt.ylim(top=0.85);\n plt.axhline(y=avLeft,c='g',linestyle='dashed',label='Average (left)');\n plt.axhline(y=avRight,c='r',linestyle='dashed',label='Average (right)');\n plt.legend(loc='upper center',ncol=2);\n if fn is None:\n plt.show();\n else:\n plt.savefig(fn);",
"def curveSketchCtx(*args, **kwargs):\n\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prompt user to input the length for the Koch curve
|
def user_input():
print("Welcome to drawing a Koch curve.\n")
order = int(input("Please enter the order of magnitude for the Koch curve: "))
x = int(input("Please enter a length x: "))
# Instantiate the Turtle
bob = turtle.Turtle()
bob.hideturtle()
draw_snowflake(bob, order, x)
|
[
"def KochCurveLength(L, iteration):\n print(f\"Steps Taken: {iteration}\")\n num_segs, seg_len = input_segs(iteration)\n print(f\"Number of Segments: {num_segs}\")\n print(f\"Length of Segments: {seg_len}\")\n\n L = L * (num_segs/seg_len)\n\n return print(f\"Curve Length: {Fraction(L).limit_denominator()}\")",
"def input_scale() -> str:\n return input(\"What's the number of the scale or mode you seek? \")",
"def main(start):\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Exercise 1\r\n ##\r\n # Calculate a Koch curve of at least 3 degrees.\r\n # Draw the resulting points.\r\n # Create a Koch snowflake from three, triangulated Koch curves.\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n # Initialize a 'start' point for the Koch curve.\r\n\r\n # Specify how many iterations should be performed.\r\n DEGREE = 3\r\n # Instantiate a turtle object.\r\n sheldon = turtle.Turtle()\r\n sheldon.speed(1000)\r\n\r\n # Retrieve the window the turtle will use for drawing.\r\n screen = sheldon.getscreen()\r\n screen.title(\"Koch Curve: \" + str(DEGREE) + \"°\")\r\n screen.reset()\r\n\r\n for x in range(6):\r\n drawSnowflake(sheldon,start ,DEGREE)\r\n start = [start[0]*0.75,start[1]*0.75]\r\n screen.exitonclick()",
"def __init__(self):\r\n self.length = float(input(\"Enter Length : \"))\r\n self.breadth = float(input(\"Enter Breadth : \"))",
"def arcLengthDimension(curve ,surface):\n pass",
"def obtain_n():\n print(\"Input number of terms n (must be > 0):\")\n n = int(input(\"> \"))\n return n",
"def get_length():\n length = int(input(\"How long is the list? \"))\n return length",
"def promptOperationParam():\n\tmaxEirp = input(\"Enter Max EIRP (dBm/MHz) (-137 to +37): \")\n\tfreqRange = promptFrequencyRange()\n\treturn OperationParam(maxEirp, freqRange)",
"def test_input_k(data, k):\n assert type(k) == int",
"def study_session_length():\r\n # The next code group asks user for study time and verifies input\r\n length_proceed = False\r\n # Had to declare study_length variable before while loops to remove\r\n # pycharm warning.\r\n study_length = 0\r\n while not length_proceed:\r\n float_proceed = False\r\n while not float_proceed:\r\n # Ask how long they intend to study\r\n study_length = input(\"In 0.5 hour increments, how many hours do \"\r\n \"you intend to study for? \")\r\n # Verify user input is a float.\r\n float_proceed = is_float(study_length)\r\n study_length = float(study_length)\r\n # Verify user input is above 0 and divisible by 0.5.\r\n length_proceed = is_valid_length(study_length)\r\n return study_length",
"def plot_curve(self):\r\n plt.plot(self.k_range, self.k_error)\r\n plt.title(\"Error under different choice of K\")\r\n plt.xlabel(\"Value of K for KNN\")\r\n plt.ylabel(\"Error\")\r\n plt.show()",
"def prompt_dimension_size(dimension=\"row\"):\n prompt = \"Input number of {}s: \"\n while 1:\n # raw_dim_size = raw_input(prompt.format(dimension))\n raw_dim_size = raw_input()\n try:\n dim_size = int(raw_dim_size)\n except:\n print \"Please input integer value.\"\n else:\n break\n\n return dim_size",
"def take_input():\n size = int(input(\"Input size of the board: \"))\n return size",
"def paramDimension(curve ,surface):\n pass",
"def get_gravity_input(prompt: str) -> float:\n valid_range = between(1.0, 1.2)\n gravity = float(input(prompt))\n valid_range(gravity)\n\n return gravity",
"def draw_koch(n):\n\n s = Screen() # create screen\n t = Turtle() # create turtle\n directions = koch(n) # obtain directions to draw koch(n)\n\n for move in directions: # follow specified moves\n if move == 'F':\n t.forward(300 / 3 ** n) # move forward, length normalized\n if move == 'L':\n t.lt(60) # rotate left 60 degrees\n if move == 'R':\n t.rt(120) # rotate right 60 degrees\n\n s.bye()",
"def getNumEpochsFromUser():\n epochs = inputPrompt(\"How many epochs would you like to train each network?: \", int)\n return epochs",
"def main():\n user_height = input_height()\n check_height(user_height)",
"def draw_koch_curve(t, order, x):\n if order == 0:\n t.forward(x)\n else:\n for angle in [60, -120, 60, 0]:\n draw_koch_curve(t, order-1, x/3)\n t.left(angle)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wait for the bucket to available.
|
def _wait_for_bucket(self, retries: int = 0, delay: int = 0) -> None:
try:
waiter = self.client.get_waiter('bucket_exists')
waiter.wait(
Bucket=self._bucket,
WaiterConfig={
'Delay': delay,
'MaxAttempts': retries
}
)
except ClientError as exc:
self._handle_client_error(exc)
|
[
"def __ensure_bucket_availability(self):\n storage_client = storage.Client()\n if storage_client.lookup_bucket(self.__bucket_name) is None:\n # Create the new bucket\n storage_client.create_bucket(self.__bucket_name)",
"def wait(self):\n self._lock.acquire()",
"def wait(self, timeout=None):\n\n try:\n res = self._q.get(timeout=timeout)\n except Queue.Empty:\n pass\n else:\n self._q.put(res)",
"def wait_for_component_toplevel_file(context, package, version, ecosystem, bucket):\n timeout = 300 * 60\n sleep_amount = 10\n\n key = S3Interface.component_key(ecosystem, package, version)\n\n start_time = datetime.datetime.now(datetime.timezone.utc)\n\n for _ in range(timeout // sleep_amount):\n current_date = datetime.datetime.now(datetime.timezone.utc)\n try:\n last_modified = context.s3interface.read_object_metadata(bucket, key,\n \"LastModified\")\n delta = current_date - last_modified\n # print(current_date, \" \", last_modified, \" \", delta)\n if delta.days == 0 and delta.seconds < sleep_amount * 2:\n # print(\"done!\")\n read_core_data_from_bucket(context, \"component toplevel\", package, version,\n ecosystem, bucket)\n return\n except ClientError:\n print(\"No analyses yet (waiting for {t})\".format(t=current_date - start_time))\n time.sleep(sleep_amount)\n raise Exception('Timeout waiting for the job metadata in S3!')",
"def _wait_until_ready(self):\n # Wait for the manager to be ready before we spawn any endpoints\n msg = self._manage_queue.get()\n if not msg == READY:\n raise SyncError('IterableQueue: manager not ready: %s' % msg)\n self._master_sync_pipe.send(READY)",
"def wait(self):\n logging.info(\"waiting for {} jobs to complete\".format(len(self.submissions)))\n while not self.shutdown:\n time.sleep(1)",
"def wait_released(self):\n while not self.is_released():\n time.sleep(0.01)",
"def wait(self, handle):\n return",
"def wait_until_empty(\n self, tube, timeout=float(\"inf\"), poll_interval=0.2, initial_delay=0.0\n ):\n deadline = time.time() + timeout\n if initial_delay > 0.0:\n time.sleep(initial_delay)\n stats = self.stats_tube(tube)\n while (\n stats[\"current-jobs-ready\"] or stats[\"current-jobs-reserved\"]\n ) and time.time() < deadline:\n time.sleep(poll_interval)\n stats = self.stats_tube(tube)",
"def wait_for_update(bucket, key_arn):\n response = client.get_bucket_encryption(Bucket=bucket)\n failure_counter = 0\n while not 'ServerSideEncryptionConfiguration' in response and \\\n 'Rules' in response['ServerSideEncryptionConfiguration'] and \\\n 'ApplyServerSideEncryptionByDefault' in response['ServerSideEncryptionConfiguration']['Rules'][0] and \\\n 'KMSMasterKeyID' in response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault'] and \\\n key_arn == response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['KMSMasterKeyID']:\n if failure_counter > 5:\n print(\"Bucket not reflecting encryption update, aborting\")\n sys.exit(1)\n failure_counter += 1\n time.sleep(10)",
"def wait_for_completion(job, headers={}):\n while is_running(job, headers):\n time.sleep(3)",
"def test_bucket_exists(self):\n self.assertFalse(self.storage.bucket_exists(self.temp_bucket_name))\n self.storage.make_bucket(self.temp_bucket_name)\n self.assertTrue(self.storage.bucket_exists(self.temp_bucket_name))\n self.storage.remove_bucket(self.temp_bucket_name)",
"def wait_to_secret_creation(self, secret_name, namespace):\n try:\n self.get(name=secret_name, namespace=namespace)\n return True\n except K8sNotFoundException:\n return False",
"def _get_bucket(self):\n return self.driver.get_container(self.bucket)",
"def wait(self, timeout=None):\n if hasattr(self, '_result'):\n return\n try:\n self.get(timeout)\n except Exception:\n pass",
"def wait_for_job(self):\n # deactivate any job\n self.job_checker.deactivateJob()\n active_job = None\n\n # progress while we're not\n while not self.shutting_down:\n # grab the first active job\n active_job = self.job_checker.getFirstActiveJob()\n if active_job is not None:\n status = CaptureStatus(active_job[\"status\"])\n if status.running:\n self.job_checker.activateJob(active_job)\n break\n else:\n self.log.debug(\"active job is stopped\")\n else:\n self.log.debug(\"no active job\")\n\n # pause for a while\n time.sleep(5)",
"def wait_to_secret_deletion(self, secret_name, namespace):\n try:\n self.get(name=secret_name, namespace=namespace)\n return False\n except K8sNotFoundException:\n logger.info(\"Finished waiting before the timeout\")\n return True",
"def check_bucket(file,bucketID):\r\n\tbucket = storage_client.bucket(bucketID)\r\n\tcheck = storage.Blob(bucket=bucket, name=file).exists(storage_client)\r\n\treturn check",
"def test_get_bucket_success(self):\n bucket = self.cm.get_bucket(\"testVaultName\")\n self.assertEqual(bucket.name, \"testVaultName\")\n self.assertEqual(bucket.id, 274)",
"def gcs_bucket_exists(bucket_name: str):\n client = storage.Client()\n bucket = client.bucket(bucket_name)\n\n exists = bucket.exists()\n\n return exists"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the current store session for this application.
|
def current_session(cls) -> 'Store':
g = get_application_global()
if g is None:
return cls.get_session()
if 'store' not in g:
g.store = cls.get_session()
store: Store = g.store
return store
|
[
"def get_session():\n return DatabaseService.connector.get_session()",
"def __get_session__(self):\n session = boto3.session.Session()\n return session",
"def _get_session(self):\n session = Session.object_session(self)\n if not session:\n session = sessionmaker(bind=self.engine)()\n return session",
"def session(self):\n ret = self._get_attr(\"session\")\n return ISession(ret)",
"def session(self):\n ret = self.AUTH_ARGS.get(\"session\", \"\")\n return ret",
"def get_session():\n if not hasattr(current_app, 'Session'):\n engine = create_engine(current_app.config.get('DATABASE_URL'),\n convert_unicode=True)\n current_app.Session = sessionmaker(bind=engine)\n\n if not hasattr(request, 'db_session'):\n request.db_session = current_app.Session()\n\n return request.db_session",
"def get_session(self, sessid):\r\n global _SESSIONS\r\n if not _SESSIONS:\r\n from src.server.sessionhandler import SESSIONS as _SESSIONS\r\n return _SESSIONS.session_from_player(self, sessid)",
"def get_or_create_session (self):\n\n session_name = self.session_class.name_prefix + '_session'\n session = cherrypy.session.get (session_name, self.session_class ())\n cherrypy.session[session_name] = session\n return session",
"def session(self) -> ProfileSession:\n return self._session",
"def get_session(self, sessid):\r\n return self.sessions.get(sessid, None)",
"def get_session_key(self):\n return self.model['session_key']",
"def getSession(self, blocking=True):\n if not blocking:\n return self.__sf\n\n self.__lock.acquire(blocking)\n try:\n sf = self.__sf\n if not sf:\n raise omero.ClientError(\"No session available\")\n return sf\n finally:\n self.__lock.release()",
"def get_authenticated_session():\n # initialize a WSK session, specifying email as project identifier\n session = WSK(environment=cfg.LN_ENVIRONMENT, project_id=cfg.LN_PROJECT_ID)\n # authenticate with the web service\n session.authenticate(username=cfg.LN_USERNAME,\n password=cfg.LN_PASSWORD)\n return session",
"def session_store(decoy: Decoy) -> SessionStore:\n return decoy.create_decoy(spec=SessionStore)",
"def session(self):\n\n return {\n \"api_key\": self._api_key,\n \"app_secret\": self._app_secret,\n \"session_key\": self._session_key,\n \"session_secret\": self._session_secret,\n }",
"def session(self):\n ret = self._get_attr(\"session\")\n return IGuestSession(ret)",
"def scoped_session_maker(self):\n return self._scoped_session_maker",
"def get_session(self, user_id):\n return self._repo.get_session(user_id)",
"def get_session_key(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Download and unzip data set folder from url
|
def download_and_unzip(url, dataset_name, data_folder):
print("Downloading", dataset_name, "data set...")
data_zip = wget.download(url, out=data_folder)
print("\tunzipping...")
zip_ = zipfile.ZipFile(data_zip, "r")
zip_.extractall(data_folder)
zip_.close()
print("\tdone")
|
[
"def download_and_uncompress_tarball(tarball_url, dataset_dir):",
"def download_and_unzip_data(\n url = \"https://storage.googleapis.com/simpeg/bookpurnong/bookpurnong_inversion.tar.gz\"\n):\n # download the data\n downloads = Utils.download(url)\n\n # directory where the downloaded files are\n directory = downloads.split(\".\")[0]\n\n # unzip the tarfile\n tar = tarfile.open(downloads, \"r\")\n tar.extractall()\n tar.close()\n\n return downloads, directory",
"def download_data(path):\n import requests\n import zipfile\n import os\n\n # download file\n resp = requests.get('http://files.grouplens.org/datasets/movielens/ml-100k.zip', allow_redirects=True, stream=True)\n\n if resp.status_code == 200:\n print('Successfully downloaded the data')\n elif resp.status_code == 404:\n print('File Not Found. Could not download the dataset.')\n \n filename = 'ml-100k.zip'\n zfile = open(filename, 'wb')\n zfile.write(resp.content)\n zfile.close()\n\n zipf = zipfile.ZipFile(filename, 'r') \n zipf.extractall(path)\n zipf.close()\n\n os.remove(filename)",
"def download_training_set(file_url, ts_name):\n\n tmpfile = \"downloads/tmp.zip\"\n\n # Create directories\n for d in [\"data\", \"downloads\"]:\n if not os.path.exists(d):\n os.makedirs(d)\n\n logger.info(\"Downloading training data ...\")\n r = requests.get(file_url)\n if r.status_code == 200:\n with open(tmpfile, \"wb\") as f:\n f.write(r.content)\n logger.info(f\"File saved at {tmpfile}\")\n else:\n logger.error(f\"Error download file from {file_url}\")\n raise ValueError(f\"Error download file from {file_url}\")\n\n try:\n logger.info(f\"Unziping training data from {tmpfile}...\")\n with zipfile.ZipFile(tmpfile, 'r') as zip_ref:\n zip_ref.extractall(\"data\")\n dataset_path = os.path.join(\"data\", ts_name)\n return dataset_path\n\n except Exception as e:\n logger.exception(e)\n raise ValueError(f\"Error while unzip file {tmpfile}\")",
"def download(url, dataset):\n print(\"Downloading data\")\n r = requests.get(url, allow_redirects=True)\n data_file_path = get_directory() + \"/data/raw/\"\n open(data_file_path + dataset, \"wb\").write(r.content)\n tar = tarfile.open(data_file_path + dataset)\n tar.extractall(path=data_file_path)\n tar.close()\n return True",
"def download_data(self):\n res = requests.get(self.url, headers={'User-Agent': 'Mozilla 5.0'})\n soup = BeautifulSoup(res.text, 'html.parser')\n\n try:\n os.mkdir(self.folder)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n for link in soup.find_all('a', string=\"ZIP\"):\n name = link['href'].rsplit('/', 1)[-1]\n\n filename = os.path.join(self.folder, name)\n\n if os.path.isfile(filename):\n continue\n\n file_url = self.url + link['href']\n file = requests.get(file_url, headers={'User-Agent': 'Mozilla 5.0'})\n\n f = open(filename, 'wb')\n f.write(file.content)",
"def download_extract(url, folder):\n r = requests.get(url)\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(folder)",
"def download_similarity_datasets():\n # this is the URL we are downloading\n url = \"http://www.socsci.uci.edu/~mdlee/all.zip\"\n\n # download the file and extract its contents.\n request = requests.get(url)\n dest = os.path.join(\"data\", \"similarity_data\")\n zipfile.ZipFile(BytesIO(request.content)).extractall(dest)\n\n return dest",
"def download_dataset_and_uncompress(dataset_dir: str,\n url: str,\n filename: str=None):\n filename = filename or url.split('/')[-1]\n\n if not os.path.isfile(filename):\n with DLProgress(unit='B',\n unit_scale=True,\n miniters=1,\n desc='download dataset') as pbar:\n urlretrieve(\n url,\n filename,\n pbar.hook)\n\n if not os.path.exists(dataset_dir):\n os.mkdir(dataset_dir)\n\n with tarfile.open(filename, 'r:gz') as tar:\n tar.extractall(dataset_dir)\n tar.close()\n\n statinfo = os.stat(filename)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')",
"def maybe_download_and_extract_dataset(data_url, dest_directory):\n if not data_url:\n return\n print_info(\"Checking destination directory : \" + dest_directory)\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n else:\n print_info(\"SR dataset already exists!\")\n return\n filename = data_url.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n try:\n filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)\n except:\n tf.logging.error('Failed to download URL: %s to folder: %s', data_url,\n filepath)\n tf.logging.error('Please make sure you have enough free space and'\n ' an internet connection')\n raise\n print()\n statinfo = os.stat(filepath)\n tf.logging.info('Successfully downloaded %s (%d bytes)', filename,\n statinfo.st_size)\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download_dataset(self):\n data_dir = self.get_download_data_dir()\n cache_dir = self.get_download_cache_dir()\n self.download_dataset_files(data_dir, cache_dir)",
"def unzip_file(url, save_dir):\n with zipfile.ZipFile(url, 'r') as zip_ref:\n zip_ref.extractall(save_dir)",
"def download_and_extract():\n logger.info(\"Downloading. This takes at least 30 mins on a fast connection!\")\n url = 'https://clinicaltrials.gov/AllPublicXML.zip'\n\n # download and extract\n container = tempfile.mkdtemp(\n prefix=settings.STORAGE_PREFIX.rstrip(os.sep), dir=settings.WORKING_VOLUME)\n try:\n data_file = os.path.join(container, \"data.zip\")\n wget_file(data_file, url)\n # Can't \"wget|unzip\" in a pipe because zipfiles have index at end of file.\n with contextlib.suppress(OSError):\n shutil.rmtree(settings.WORKING_DIR)\n subprocess.check_call([\"unzip\", \"-q\", \"-o\", \"-d\", settings.WORKING_DIR, data_file])\n finally:\n shutil.rmtree(container)",
"def download():\n # Download the zip\n target = 'https://github.com/downloads/banterability/pluggablemaps-congressionaldistricts/cd99_110_shp.zip'\n destination = os.path.join(data_dir, 'cd99_110_shp.zip')\n urllib.urlretrieve(target, destination)\n # Unzip it\n fh = open(destination, 'rb')\n zfile = zipfile.ZipFile(fh)\n for name in zfile.namelist():\n path = os.path.join(data_dir, name)\n out = open(path, 'wb')\n out.write(zfile.read(name))\n out.close()\n fh.close()",
"def download_zip(urls, path=\"dataset/RAVDESS\"):\n #pwd = os.getcwd()\n #if os.path.exists(os.path.join(pwd, path)):\n if os.path.exists(path):\n logging.debug(\"Directory already exists. Seeking for zip files.\")\n else:\n #os.mkdir(os.path.join(pwd, path))\n os.mkdir(path)\n #zip_path = os.path.join(pwd,path+\"/zip_files\")\n zip_path = os.path.join(path,\"zip_files\")\n os.mkdir(zip_path)\n # print(zip_path)\n for i in tqdm.tqdm(urls, desc=\"Downloading the Audio zip files: \"):\n # takes the name of the zipfile being downloaded to save\n zip_name = i.split(\"?\")[0].split(\"/\")[-1]\n urllib.request.urlretrieve(i, zip_path + \"/\" + zip_name)\n logging.debug(\"\\nZip Files downloaded\")",
"def download_unzip(url, targetdir):\n filepath = os.path.join(targetdir, url.split('/')[-1])\n\n if not os.path.exists(targetdir):\n print(\"* Creating target directory {}...\".format(targetdir))\n os.makedirs(targetdir)\n\n # Download and unzip if the target directory is empty.\n if not os.listdir(targetdir):\n unzip(download(url, targetdir))\n # Skip downloading if the zipped data is already available.\n elif os.path.exists(filepath):\n print(\"* Found zipped data - skipping download...\")\n unzip(filepath)\n # Skip download and unzipping if the unzipped data is already available.\n else:\n print(\"* Found unzipped data for {}, skipping download and unzip...\"\n .format(targetdir))",
"def download_and_unpack(self, download_dir):\n pass",
"def download_data(root_dir, files):\n for data_url, _ in files:\n logging.info('Downloading: %s', data_url)\n content = net.url_read(data_url)\n if content is None:\n raise Exception('Failed to download %s' % data_url)\n with zipfile.ZipFile(StringIO.StringIO(content)) as zip_file:\n zip_file.extractall(root_dir)",
"def download_and_unzip_celeba():\n file_list = (\"images\", \"partitions\", \"attributes\")\n data_to_path = {}\n\n for url, file_item in zip(\n [_ALIGNED_IMGS_URL, _PARTITIONS_URL, _ATTRIBUTES_URL], file_list):\n filename = url.split('?')[0].split('/')[-1]\n filepath = os.path.join(FLAGS.dataset_dir, filename)\n\n print('Downloading file %s' % filename)\n print(filepath)\n\n if not tf.gfile.Exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %.1f%%' %\n (float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)\n if '.zip' in filename:\n print('Extracting..')\n with zipfile.ZipFile(filepath, 'r') as f:\n f.extractall(FLAGS.dataset_dir)\n\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded and extracted %s, size %s bytes.' %\n (filename, size))\n\n data_to_path[file_item] = filepath\n\n return data_to_path"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts categorical features to dummy variables in the data frame
|
def convertColumnsToDummy(df):
#Users categorical information to dummy!
res = pd.get_dummies(df['gender'])
df = df.join(res)
res = pd.get_dummies(df['income'])
df = df.join(res)
res = pd.get_dummies(df['marital'])
df = df.join(res)
res = pd.get_dummies(df['education'])
df = df.join(res)
#Images categorical information to dummy!
res = pd.get_dummies(df['bairro1'], prefix="bairro1")
df = df.join(res)
res = pd.get_dummies(df['graffiti1'], prefix="graffiti1")
df = df.join(res)
res = pd.get_dummies(df['bairro2'], prefix="bairro2")
df = df.join(res)
res = pd.get_dummies(df['graffiti2'], prefix="graffiti2")
df = df.join(res)
return df
|
[
"def dummization(self):\n #TODO: use sklearn ColumnTransformer instead\n\n return pd.get_dummies(\n self.simple_imputer(),\n prefix_sep='_',\n prefix=self.categorical_cols,\n columns=self.categorical_cols,\n drop_first=False\n )",
"def transform_categorical(df):\n categoricals = []\n\n for feature in ['gender', 'race', 'postal_code', 'occupation']:\n categoricals.append(pd.get_dummies(df[feature], prefix=feature))\n del(df[feature])\n\n df = pd.concat([df] + categoricals, axis=1)\n return df",
"def _dumify_categorical_features(df):\n prepped = pd.DataFrame(index=df.index)\n for feature in df.columns:\n # print feature, df.dtypes[feature]\n if df.dtypes[feature] == 'object':\n dummied = _dummy_text_feature(df, feature)\n prepped = prepped.join(dummied)\n else:\n prepped = prepped.join(df[feature])\n return prepped",
"def create_dummy(self, drop_first=True):\n for feature_name, feature_values in self.CATEGORICAL_FEATURES:\n nb_possible_values = len(feature_values)\n # append every possible values of the feature to real feature column\n enhanced_feature_series = self.df[feature_name].append(feature_values)\n # get dummy features\n dummy_features_df = pd.get_dummies(enhanced_feature_series, prefix=feature_name, drop_first=drop_first)[:-nb_possible_values]\n # drop old feature column and add dummy features\n self.df.drop(feature_name, axis=1, inplace=True)\n self.df[dummy_features_df.columns] = dummy_features_df.astype(int)",
"def _convert_categorical_features_to_numeric(self,\n df):\n is_categorical_feature = [False for _ in df]\n\n for i, column in enumerate(df):\n if schema_util.is_categorical_feature(\n schema_util.get_feature(self._schema, column)):\n # Encode categorical columns\n df[column] = np.unique(df[column].values, return_inverse=True)[1]\n is_categorical_feature[i] = True\n return is_categorical_feature",
"def one_hot_encoder(dataframe, nan_as_category = True):\r\n df = dataframe\r\n original_columns = list(df.columns)\r\n df = pd.get_dummies(df, dummy_na= True,drop_first=True)\r\n debug(df.info(memory_usage='deep'))\r\n df = df.loc[:,~df.columns.duplicated()]\r\n debug(df.info(memory_usage='deep'))\r\n new_columns = [c for c in df.columns if c not in original_columns]\r\n const_columns = [c for c in new_columns if df[c].dtype!='object' \\\r\n and np.sum(df[c]) == 0 and np.std(df[c]) == 0]\r\n df.drop(const_columns, axis = 1, inplace = True)\r\n new_columns = list(set(new_columns).difference(set(const_columns)))\r\n return df, new_columns",
"def apply_one_hot_vector_encoding(self,\n df_categorical:pd.DataFrame=None):\n\n assert(isinstance(df_categorical, pd.DataFrame))\n # apply one hot vector enncoding only to colnames in this list\n colnames = self.one_hot_vector_category_column_names\n df_one_hot_vector = pd.get_dummies(df_categorical, columns=colnames)\n\n\n return df_one_hot_vector",
"def _set_dummies(self):\n data_reduced = self.data[self.antecedent]\n self.data_dummies = pd.get_dummies(data_reduced, columns=self.antecedent)",
"def create_dummy(data, target, use_dummies=None):\n\n dummies = []\n\n numerical = list(data.select_dtypes(include=[np.number]))\n categorical_f = [\n col for col in data if col not in numerical and col not in target\n ]\n\n for f in categorical_f:\n if f not in target:\n dummy = pd.get_dummies(data[f], prefix=f, drop_first=False)\n data = pd.concat([data, dummy], axis=1)\n data.drop(f, axis=1, inplace=True)\n\n dummies.extend(dummy)\n\n if use_dummies:\n missing = set(use_dummies) - set(dummies)\n for m in missing:\n data[m] = 0\n\n # set new columns to category\n for dummy in dummies:\n data[dummy] = data[dummy].astype('category')\n\n return data, dummies",
"def expand_categorical_features(df, dependent):\n\n # Find categorical columns\n df_categorical = pd.Series(\n [df[c].value_counts().index[0]\n if df[c].dtype == np.dtype('O')\n for c in df],\n index=df.columns)\n df_categorical = df.fillna(df_fill)\n\n\n data_set_dependent[:, 0] = LabelEncoder().fit_transform(\n data_set_dependent[:, 0]\n )\n\n data_set_dependent = OneHotEncoder(\n categorical_features = [0]\n ).fit_transform(data_set_dependent).toarray()\n\n data_set_independent = LabelEncoder().fit_transform(data_set_independent)",
"def prepare_data(df, num_features, cat_features, target=None):\r\n\r\n\talgo_df = pd.DataFrame()\r\n\t\r\n\tfor feature in num_features:\r\n\t\talgo_df[feature] = df[feature]\r\n\t\r\n\tfor f in cat_features:\r\n\t\tdf_dummy = pd.get_dummies(df[f], prefix=f)\r\n\t\talgo_df = pd.concat((algo_df, df_dummy), axis=1)\r\n\r\n\treturn algo_df",
"def one_hot_encoding(X):\n X_cat = pd.get_dummies(X.select_dtypes(include=['object']))\n X_num = X.select_dtypes(exclude=['object'])\n res = pd.concat([X_num, X_cat], axis=1, sort=False)\n \n return res",
"def __init__(self, df, categorical_features, encoding_types, handle_na=False):\n self.df = df\n self.cat_feats = categorical_features\n self.enc_types = encoding_types\n self.handle_na = handle_na\n self.label_encoder = dict()\n self.binary_encoder = dict()\n self.ohe_encoder = dict()\n\n\n if self.handle_na:\n for c in self.cat_feats:\n self.df.loc[:,c] = self.df.loc[:,c].astype(str).fillna(-9999)\n self.output_df = self.df.copy(deep= True)",
"def get_category_dummies(df, categories=categories):\n cat_list_col = df['cats'].str.split(',')\n dummied = pd.DataFrame(index=df['cats'].index)\n for cat in categories:\n dummied['category_' + cat] = cat_list_col.apply(lambda cats: cat in cats).astype(int)\n \n dummied_df = pd.concat([df, dummied], axis=1)\n\n return dummied_df",
"def design_matrices(df):\n \n X_list = []\n for ii in range(df.shape[1]):\n #TODO: category gives error\n if (df.iloc[:, ii].dtype=='object'):\n X_list.append(pd.get_dummies(df.iloc[:, ii], \n drop_first=True).values)\n else:\n X_list.append(df.iloc[:, ii].values[:, np.newaxis])\n \n return X_list",
"def get_dummy_cat(self):\n cat = pd.get_dummies(self.sales_df.cat_id)[self.sales_df.cat_id.unique()].values\n x = torch.from_numpy(cat).type(torch.get_default_dtype())\n assert x.shape == (self.num_timeseries, self.num_cats)\n return x",
"def __convert_categorical_values(df,\n ordinal_categorical_fields_mapping,\n nominal_categorical_fields=None\n ):\n\n \"\"\"\n addr_state_mapping = {\n label: idx for idx, label in\n enumerate(np.unique(df['addr_state']))\n }\n\n zip_code_mapping = {\n label: idx for idx, label in\n enumerate(np.unique(df['zip_code']))\n }\n\n purpose_cat_mapping = {\n label: idx for idx, label in\n enumerate(np.unique(df['purpose_cat']))\n }\n \"\"\"\n\n # Convert ordinal categorical values to the numerical values\n if ordinal_categorical_fields_mapping is not None:\n df.replace(ordinal_categorical_fields_mapping, inplace=True)\n\n # df.replace(addr_state_mapping, inplace=True)\n # df.replace(zip_code_mapping, inplace=True)\n # df.replace(purpose_cat_mapping, inplace=True)\n\n # Convert nominal categorical values to the one-hot encoded fields\n for field_name in nominal_categorical_fields:\n dummies = pd.get_dummies(df[field_name]).rename(columns=lambda x: 'is_' + field_name + '_' + str(x))\n df = pd.concat([df, dummies], axis=1)\n df = df.drop([field_name], axis=1)\n\n return df",
"def _prepare_features(self, X: np.ndarray):\n # Determine feature types and unique values\n self.feature_types = {}\n for feature_i in range(X.shape[1]):\n # All unique, non-NULL values\n unique_feature_values = get_unique_values(X[:, feature_i])\n # Some values are strings\n if any([isinstance(val, str) for val in unique_feature_values]):\n self.feature_types[feature_i] = 'categorical'\n # All are numbers\n else:\n self.feature_types[feature_i] = 'numerical'",
"def prepare_data(df, num_col, cat_col):\r\n df_num = df[num_col]\r\n df_cat = df[cat_col]\r\n# Standardize numerical data\r\n scaler = preprocessing.StandardScaler().fit(df_num)\r\n df_num = pd.DataFrame(scaler.transform(df_num), columns = df_num.columns, index = df_num.index)\r\n# Prepare categorical data and join with numerical data\r\n df_final = df_num\r\n for i in df_cat.columns:\r\n df_final = df_final.join(pd.get_dummies(df_cat[i], prefix=i),how = 'inner')\r\n return df_final, scaler"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks the importances of features considering the best configuration of classifiers previously tested
|
def test_features_importances(classifiers_names, predictors_agrad, answer_agrad, predictors_seg, answer_seg, group=""):
classifiers = load_classifiers_wodraw(group)#load_classifiers_rnr(group)#load_classifiers_3classes(group)
classifiers_agrad = [classifiers[0][0]]
classifiers_seg = [classifiers[1][0]]
for pair in [ ["Pleasantness", predictors_agrad, answer_agrad, classifiers_agrad], ["Safety", predictors_seg, answer_seg, classifiers_seg] ]:
for classifier_index in range(0, len(pair[3])):
clf = pair[3][classifier_index]
clf_name = classifiers_names[classifier_index]
#Training with all data!
clf.fit(pair[1], pair[2])
try:
importances_dic = {}
importances = clf.feature_importances_
for index in range(0, len(list_of_predictors)):
importances_dic[list_of_predictors[index]] = importances[index]
sorted_dic = sorted(importances_dic.items(), key=operator.itemgetter(1), reverse=True)
print ">>>> G " + group + " Q " + pair[0] + " C " + clf_name
#print str(sorted_dic)
print '\n'.join([str(tuple[0]) + " " + str(tuple[1]) for tuple in sorted_dic])
#print "FEATURES " + str(", ".join(list_of_predictors))
#print(clf.feature_importances_)
plot_importances(clf, pair, group)
# RECURSIVE! Create the RFE object and compute a cross-validated score.
#svc = SVC(kernel="linear")
#if pair[0] == "Pleasantness":
# svc = load_classifiers_wodraw(group)[0][0]
#else:
# svc = load_classifiers_wodraw(group)[1][0]
# The "accuracy" scoring is proportional to the number of correct classifications
#rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(pair[2], 5),
# scoring='accuracy')
#rfecv.fit(pair[1], pair[2])
#print("Optimal number of features : %d" % rfecv.n_features_)
#print "Ranking " + str(rfecv.ranking_)
#importances_dic = {}
#importances = rfecv.ranking_
#for index in range(0, len(list_of_predictors)):
# importances_dic[list_of_predictors[index]] = importances[index]
#
#sorted_dic = sorted(importances_dic.items(), key=operator.itemgetter(1))
#print ">>>> G " + group + " Q " + pair[0] + " C " + clf_name
##print str(sorted_dic)
#print '\n'.join([str(tuple[0]) + " " + str(tuple[1]) for tuple in sorted_dic])
# RECURSIVE!
#SELECT FROM MODEL! Quais as features?
#print ">>>> G " + group + " Q " + pair[0] + " C " + clf_name
#model = SelectFromModel(clf, prefit=True)
#X_new = model.transform(pair[1])
#print model.inverse_transform(X_new)
#print X_new
#SELECT FROM MODEL!
except Exception as inst:
print "Exception! "
print type(inst)
print inst.args
except:
print "Unexpected error:", sys.exc_info()[0]
|
[
"def test_calculated_feature_importances(self):\n # Setting up lorax\n lrx = TheLorax(global_clf, data, id_col='entity_id')\n lrx_out = lrx.explain_example(idx=1, pred_class=1, graph=False)\n\n feature1_contrib = lrx_out.contribution.loc['feature1']\n feature5_contrib = lrx_out.contribution.loc['feature5']\n\n # Test cases for correct feature importances\n self.assertEqual(feature1_contrib, 0.04889021376498209)\n self.assertEqual(feature5_contrib, -0.31556073962118303)\n self.assertFalse('feature3' in lrx_out.contribution)",
"def test_add_overall_feature_importance(self):\n sample_importance = [('feature1', 0.2), ('feature2', 0.4)]\n overall_importance = [('feature1', 0.6), ('feature2', 0.1)]\n\n result = add_overall_feature_importance(sample_importance, overall_importance)\n true_result = [('feature2', 1, 0.1, 2, 1),\n ('feature1', 2, 0.6, 1, -1)]\n\n for i in range(len(true_result)):\n self.assertTupleEqual(true_result[i], result[i])\n\n # Setting up lorax\n lrx = TheLorax(global_clf, data, id_col='entity_id')\n lrx_out = lrx.explain_example(idx=1, pred_class=1, graph=False)\n\n feature1_overall_imp = global_clf.feature_importances_[0]\n\n self.assertEqual(feature1_overall_imp, lrx_out.overall_imp.loc['feature1'])\n self.assertEqual(lrx_out.overall_rank.loc['feature2'], 3)\n self.assertEqual(lrx_out.rank_change.loc['feature5'], -2)",
"def evaluate_features():\n # training set is from Stanford Sentiment Training Set\n training_set = parse_stanford(\"data/stanfordSentimentTreebank/stanfordSentimentTreebank/dictionary.txt\", \n \"data/stanfordSentimentTreebank/stanfordSentimentTreebank/sentiment_labels.txt\")\n # train weights for maxent model\n weights = train_maxent(training_set)\n # sort weights in descending order\n sorted_weights = { sentiment: sorted(weights[sentiment].iteritems(), \n key=lambda x:x[1], \n reverse=True) \n for sentiment in weights}\n\n # evaluate model for the top i weights, in this range (There should be # ~130000 weights total)\n for i in range(10000, 130000, 10000):\n # get the top i weights\n new_weights = {\"positive\": {}, \"negative\": {}, \"neutral\": {}}\n for sentiment in sorted_weights:\n new_weights[sentiment] = {w[0]:weights[sentiment][w[0]] \n for w in sorted_weights[sentiment][:i-1]}\n\n # load the episode that has gold standard features already assigned\n episode = parse_goldstandard(\"data/s1e9_gold.txt\", 1, 9)\n # calculate bag of words sentiments\n word_sentiments = parse_NRC(\"data/NRC-Emotion-Lexicon-v0.92/NRC-Emotion-Lexicon-v0.92/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt\")\n bag_of_words(episode, word_sentiments)\n # calculate maxent sentiments\n run_maxent(episode, new_weights)\n\n # evaulate maxent and bag_of_words sentiments against baseline\n print \"%s max_ent vs gold: %s\" % (i, compare_scores(episode, \n score1=\"maxent_score\", \n score2=\"gold_score\"))\n print \"%s bow vs gold: %s\" % (i, compare_scores(episode, \n \"bow_score\", \n score2=\"gold_score\"))",
"def feature_importances_(self):\n check_is_fitted(self)\n\n return self.tree_.compute_feature_importances()",
"def get_feat_import(self, grid_search):\n if type(grid_search) is not GridSearchCV:\n print(\"ERROR: grid_search is not a valid GridSearchCV object\")\n return 1\n feature_importances = grid_search.best_estimator_.feature_importances_\n # extra_attribs = [\"rooms_per_household\", \"population_per_household\", \"bedrooms_per_room\"]\n attributes = self._numattribs # + extra_attribs\n\n for cat_attrib in self._catattribs:\n cat_encoder = OneHotEncoder(categories='auto')\n cat_encoder.fit_transform(self._test[cat_attrib].values.reshape(-1, 1))\n cat_one_hot_attribs = cat_encoder.categories_[0].tolist()\n attributes += cat_one_hot_attribs\n return sorted(zip(feature_importances, attributes), reverse=True)\n\n # plt.figure(figsize=(12, 8))\n # data = pd.DataFrame({'feature': self._attribs,\n # \"importance\": feature_importances})\n # sns.barplot(data=data, y='feature', x='importance')\n # plt.title('feature importance')",
"def calc_feature_importances_permutation(\n self, x=None, y=None, features=None, classifier=AdaBoostClassifier,\n parameters={'n_estimators': [10, 30, 100, 300, 1000]},\n model_metric='accuracy', num_repeats=1000, scale=True, plt_name='',\n test=False\n ):\n\n # Checks argument values are suitable for running the function\n if x is None:\n x = copy.deepcopy(self.x)\n if y is None:\n y = copy.deepcopy(self.y)\n if features is None:\n features = copy.deepcopy(self.features)\n\n if type(x) != np.ndarray:\n raise TypeError(\n 'Expect \"x\" to be a (2D) array of fluorescence readings'\n )\n\n if len(x.shape) != 2:\n raise ValueError(\n 'Expect \"x\" to be a (2D) array of fluorescence readings'\n )\n\n if type(y) != np.ndarray:\n raise TypeError(\n 'Expect \"y\" to be a (1D) array of class labels'\n )\n\n if len(y.shape) != 1:\n raise ValueError(\n 'Expect \"y\" to be a (1D) array of class labels'\n )\n\n if type(features) != list:\n raise TypeError(\n 'Expect \"features\" to be a list of the column ids in \"x\"'\n )\n\n if x.shape[0] != y.shape[0]:\n raise ValueError(\n 'Mismatch between the number of rows in \"x\" and the number of '\n 'entries in \"y\"'\n )\n\n if x.shape[1] != len(features):\n raise ValueError(\n 'Mismatch between the number of columns in \"x\" and the number '\n 'of column ids in \"features\"'\n )\n\n if not type(parameters) in [dict, OrderedDict]:\n raise TypeError(\n 'Expect \"parameters\" to be a dictionary of parameter names '\n '(keys) and arrays of values to consider for them (values) in a'\n ' grid search'\n )\n\n metrics_list = [\n 'accuracy', 'balanced_accuracy', 'top_k_accuracy',\n 'average_precision','neg_brier_score', 'f1', 'f1_micro', 'f1_macro',\n 'f1_weighted','f1_samples', 'neg_log_loss', 'precision',\n 'precision_micro','precision_macro', 'precision_weighted',\n 'precision_samples', 'recall','recall_micro', 'recall_macro',\n 'recall_weighted', 'recall_samples','jaccard', 'jaccard_micro',\n 'jaccard_macro', 'jaccard_weighted','jaccard_samples', 'roc_auc',\n 'roc_auc_ovr', 'roc_auc_ovo','roc_auc_ovr_weighted',\n 'roc_auc_ovo_weighted'\n ]\n if type(model_metric) == sklearn.metrics._scorer._PredictScorer:\n pass\n else:\n if not model_metric in metrics_list:\n raise ValueError(\n 'Value provided for \"model_metric\" not recognised - please '\n 'specify one of the strings in the list below:\\n'\n '{}'.format(metrics_list)\n )\n\n if type(num_repeats) != int:\n raise TypeError(\n '\"num_repeats\" should be set to a positive integer value'\n )\n else:\n if num_repeats <= 0:\n raise ValueError(\n '\"num_repeats\" should be set to a positive integer value'\n )\n\n if type(scale) != bool:\n raise TypeError(\n '\"scale\" should be set to a Boolean value'\n )\n\n if type(plt_name) != str:\n raise TypeError(\n '\"plt_name\" should be a string value'\n )\n\n # Fits classifiers\n permutation_feature_importances = OrderedDict()\n for col in features:\n permutation_feature_importances[col] = [np.nan for n in range(num_repeats)]\n\n # For speed reasons, perform one grid search to obtain \"optimal\"\n # parameters on the original data, rather than re-running for each\n # bootstrapped dataset => greatly increases function speed whilst having\n # little effect upon performance (an OK set of parameter values is\n # expected to work well for all of the bootstrapped datasets)\n if test is False:\n orig_model = copy.deepcopy(classifier)()\n else:\n try:\n orig_model = copy.deepcopy(classifier)(random_state=1)\n except TypeError:\n orig_model = copy.deepcopy(classifier)()\n orig_grid_search = GridSearchCV(\n estimator=orig_model, param_grid=parameters, error_score=np.nan,\n scoring=model_metric\n )\n\n if scale is True:\n scaled_x = RobustScaler().fit_transform(x)\n orig_grid_search.fit(X=scaled_x, y=y)\n else:\n orig_grid_search.fit(X=x, y=y)\n best_params = orig_grid_search.best_params_\n\n for n in range(num_repeats):\n # Uses bootstrapping to create a \"new\" dataset\n temp_x, temp_y = bootstrap_data(x, y, features, scale, test)\n temp_x = temp_x.to_numpy()\n temp_y = np.array(temp_y)\n\n if test is True:\n best_params['random_state'] = 1\n model = copy.deepcopy(classifier)(**best_params)\n model.fit(temp_x, temp_y)\n\n if test is False:\n results = permutation_importance(\n model, temp_x, temp_y, scoring=model_metric, n_jobs=-1\n )\n else:\n results = permutation_importance(\n model, temp_x, temp_y, scoring=model_metric, n_jobs=-1,\n random_state=1\n )\n\n for col, importance in enumerate(results.importances_mean):\n col = features[col]\n permutation_feature_importances[col][n] = importance\n\n plt_name = '{}_Permutation'.format(plt_name)\n if test is False:\n importance_df = make_feat_importance_plots(\n permutation_feature_importances, self.results_dir, plt_name,\n test\n )\n else:\n (\n importance_df, cols, cols_all, all_vals, median_vals,\n lower_conf_limit_vals, upper_conf_limit_vals\n ) = make_feat_importance_plots(\n permutation_feature_importances, self.results_dir, plt_name,\n test\n )\n\n return importance_df, permutation_feature_importances",
"def select_best_features():\n\n sd = StressDetector(wav_path, ALL_FEATURES)\n sd.get_features('./data/complete_features.tsv')\n\n mlp = MLPClassifier(\n random_state=42,\n )\n\n nn = KNeighborsClassifier(\n n_jobs=-1,\n )\n\n svm = SVC(\n random_state=42,\n probability=True,\n )\n\n rf = RandomForestClassifier(\n random_state=42,\n n_jobs=-1,\n )\n\n classifiers = [mlp, nn, svm, rf]\n\n names = [\n \"Neural Net\",\n \"Nearest Neighbors\",\n \"SVM\",\n \"Random Forest\",\n ]\n\n feat_group1 = {\n 'Other Features': ['pos', 'pros'],\n 'Duration Features': ['nucl_dur', 'syll_dur', 'nucl_dur_norm',\n 'nucl_dur_vnorm', 'syll_dur_norm',\n 'nucl_dur_left', 'nucl_dur_right', 'nucl_dur_v_left',\n 'nucl_dur_v_right', 'syll_dur_left', 'syll_dur_right'],\n 'Loudness Features': ['rms', 'int_peak',\n 'rms_norm', 'int_peak_norm',\n 'rms_left', 'rms_right',\n 'int_peak_left', 'int_peak_right',\n ],\n 'Spectral Features': ['spect_b1', 'spect_b2', 'spect_b3',\n 'spect_b1_left', 'spect_b2_left', 'spect_b3_left', 'spect_b1_right', 'spect_b2_right', 'spect_b3_right'],\n 'Pitch Features': ['trajectory', 'f0_max', 'f0_mean', 'f0_meanST', 'f0_max_styl',\n 'f0_max_norm', 'f0_mean_norm', 'f0_max_styl_norm', 'f0_meanST_norm',\n 'intersyllab', 'f0_max_left', 'f0_max_right', 'f0_mean_left', 'f0_mean_right',\n 'f0_max_styl_left', 'f0_max_styl_right', 'f0_meanST_left', 'f0_meanST_right'\n ]\n }\n\n feat_group2 = {\n 'Absolute': [\n 'nucl_dur', 'syll_dur', 'rms', 'int_peak', 'spect_b1', 'spect_b2',\n 'spect_b3', 'trajectory', 'f0_max', 'f0_mean', 'f0_meanST', 'f0_max_styl'\n ],\n 'Normalized': ['nucl_dur_norm',\n 'nucl_dur_vnorm', 'syll_dur_norm',\n 'rms_norm', 'int_peak_norm',\n 'f0_max_norm', 'f0_mean_norm', 'f0_max_styl_norm', 'f0_meanST_norm',\n ],\n 'Contextual': [\n 'nucl_dur_left', 'nucl_dur_right', 'nucl_dur_v_left',\n 'nucl_dur_v_right', 'syll_dur_left', 'syll_dur_right',\n 'rms_left', 'rms_right', 'int_peak_left', 'int_peak_right',\n 'spect_b1_left', 'spect_b2_left', 'spect_b3_left', 'spect_b1_right', 'spect_b2_right', 'spect_b3_right',\n 'intersyllab', 'f0_max_left', 'f0_max_right', 'f0_mean_left', 'f0_mean_right',\n 'f0_max_styl_left', 'f0_max_styl_right', 'f0_meanST_left', 'f0_meanST_right'\n ],\n 'Norm + Cont': ['nucl_dur_norm',\n 'nucl_dur_vnorm', 'syll_dur_norm',\n 'rms_norm', 'int_peak_norm',\n 'f0_max_norm', 'f0_mean_norm', 'f0_max_styl_norm', 'f0_meanST_norm',\n 'nucl_dur_left', 'nucl_dur_right', 'nucl_dur_v_left',\n 'nucl_dur_v_right', 'syll_dur_left', 'syll_dur_right',\n 'rms_left', 'rms_right', 'int_peak_left', 'int_peak_right',\n 'spect_b1_left', 'spect_b2_left', 'spect_b3_left', 'spect_b1_right', 'spect_b2_right', 'spect_b3_right',\n 'intersyllab', 'f0_max_left', 'f0_max_right', 'f0_mean_left', 'f0_mean_right',\n 'f0_max_styl_left', 'f0_max_styl_right', 'f0_meanST_left', 'f0_meanST_right'\n ],\n 'Abs + Cont': ['nucl_dur', 'syll_dur', 'rms', 'int_peak', 'spect_b1', 'spect_b2',\n 'spect_b3', 'trajectory', 'f0_max', 'f0_mean', 'f0_meanST', 'f0_max_styl',\n 'nucl_dur_left', 'nucl_dur_right', 'nucl_dur_v_left',\n 'nucl_dur_v_right', 'syll_dur_left', 'syll_dur_right',\n 'rms_left', 'rms_right', 'int_peak_left', 'int_peak_right',\n 'spect_b1_left', 'spect_b2_left', 'spect_b3_left', 'spect_b1_right', 'spect_b2_right', 'spect_b3_right',\n 'intersyllab', 'f0_max_left', 'f0_max_right', 'f0_mean_left', 'f0_mean_right',\n 'f0_max_styl_left', 'f0_max_styl_right', 'f0_meanST_left', 'f0_meanST_right'\n ],\n 'Abs + Norm + Cont': [\n 'nucl_dur', 'syll_dur', 'rms', 'int_peak', 'spect_b1', 'spect_b2',\n 'spect_b3', 'trajectory', 'f0_max', 'f0_mean', 'f0_meanST', 'f0_max_styl',\n 'nucl_dur_norm',\n 'nucl_dur_vnorm', 'syll_dur_norm',\n 'rms_norm', 'int_peak_norm',\n 'f0_max_norm', 'f0_mean_norm', 'f0_max_styl_norm', 'f0_meanST_norm',\n 'nucl_dur_left', 'nucl_dur_right', 'nucl_dur_v_left',\n 'nucl_dur_v_right', 'syll_dur_left', 'syll_dur_right',\n 'rms_left', 'rms_right', 'int_peak_left', 'int_peak_right',\n 'spect_b1_left', 'spect_b2_left', 'spect_b3_left', 'spect_b1_right', 'spect_b2_right', 'spect_b3_right',\n 'intersyllab', 'f0_max_left', 'f0_max_right', 'f0_mean_left', 'f0_mean_right',\n 'f0_max_styl_left', 'f0_max_styl_right', 'f0_meanST_left', 'f0_meanST_right'\n ]\n }\n\n for clf, name in zip(classifiers, names):\n outfile_name = f'feature_evaluation/feat_groups1_{name}.tsv'\n outfile_name = outfile_name.replace(' ', '_')\n sd.test_feature_groups(clf, name, feat_group1, outfile_name)\n\n # # ==> remove 'other' features\n\n for clf, name in zip(classifiers, names):\n outfile_name = f'feature_evaluation/feat_groups2_{name}.tsv'\n outfile_name = outfile_name.replace(' ', '_')\n sd.test_feature_groups(clf, name, feat_group2, outfile_name)\n\n # ==> use 'Abs + Cont' and 'Abs + Norm + Cont' for gridsearch\n\n # try to remove similar or collinear measures manually\n\n # e.g. removing syllable based measures\n selected_features = [\n 'nucl_dur', 'nucl_dur_norm', # duration\n 'nucl_dur_vnorm', # duration normalised\n 'nucl_dur_left', 'nucl_dur_right', 'nucl_dur_v_left',\n 'nucl_dur_v_right', # duration context\n 'rms', 'int_peak', # loudness\n 'rms_norm', 'int_peak_norm', # loudness normalised\n 'rms_left', 'rms_right',\n 'int_peak_left', 'int_peak_right', # loudness context\n 'spect_b1', 'spect_b2', 'spect_b3', # spectrum\n 'spect_b1_left', 'spect_b2_left', 'spect_b3_left', 'spect_b1_right', 'spect_b2_right', 'spect_b3_right', # spectrum context\n 'trajectory', 'f0_max', 'f0_mean', 'f0_meanST', 'f0_max_styl', # pitch\n 'f0_max_norm', 'f0_mean_norm', 'f0_max_styl_norm', 'f0_meanST_norm', # pitch normalised\n # pitch context\n 'intersyllab', 'f0_max_left', 'f0_max_right', 'f0_mean_left', 'f0_mean_right',\n 'f0_max_styl_left', 'f0_max_styl_right', 'f0_meanST_left', 'f0_meanST_right'\n ]\n\n sd2 = StressDetector(wav_path, selected_features)\n sd2.get_features('./data/complete_features.tsv')\n\n print(sd2.test_classifiers(classifiers, names))\n\n # ==> worse result than without removing them, leave all features",
"def low_importance(features, target, target_type=\"classification\", importance_thresh=None, n_folds=None):\n # type: (ndarray, ndarray, str, float) -> ndarray\n # Selecting the model based on target_type\n\n if importance_thresh is None:\n importance_thresh = 1 / features.shape[1]\n\n if n_folds is None:\n if (features.shape[0] < 100) and (features.shape[0] > 20):\n n_folds = 5\n elif features.shape[0] < 21 and (features.shape[0] > 3):\n n_folds = 3\n elif features.shape[0] < 4:\n n_folds = 1\n else:\n n_folds = 10\n\n if target_type.lower() == \"regression\":\n model = RandomForestRegressor()\n elif target_type.lower() == \"classification\":\n model = RandomForestClassifier()\n else:\n raise ValueError('Target type must be \"regression\" or \"classification\" ')\n\n feature_importance = [0] * features.shape[1]\n # creating k-fold cross validation with 10 folds\n skf = StratifiedKFold(n_splits=n_folds, random_state=None, shuffle=False)\n for train_index, test_index in skf.split(features, target):\n features_train = features[train_index]\n targets_train = target[train_index]\n features_test = features[test_index]\n targets_test = target[test_index]\n # Fitting the model\n model.fit(features_train, targets_train)\n model.score(features_test, targets_test)\n feature_importance += model.feature_importances_\n # normalizing the feature importance values\n feat_imp_norm = feature_importance / skf.get_n_splits(features, target)\n feat_importance = []\n for i in range(len(feat_imp_norm)):\n feat_importance += [(i, feat_imp_norm[i])]\n feat_importance = np.array(feat_importance)\n feat_importance = feat_importance[feat_importance[:, 1].argsort()]\n x = feat_importance[:, 0]\n y = feat_importance[:, 1]\n x = x.astype(str)\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 8), sharex=True, sharey=True)\n axes.barh(x, y, align='center', color=\"orange\")\n axes.set_title(\"Important Values of features using Random Forest\", fontsize=16)\n plt.tight_layout\n plt.show()\n # Creating a template (copy) to sort low important features\n feat_imp_temp = feat_imp_norm.copy()\n low_imp = []\n\n for i in range(feat_imp_temp.shape[0]):\n # finding the index of the minimum value and save it to low_importance\n low_imp += [feat_imp_temp.argmin()]\n # set the minimum to a large number in feat_imp_norm to find the next minimum value\n feat_imp_temp[feat_imp_temp.argmin()] = 1000\n\n low_importance_col = []\n for i in range(len(low_imp)):\n if feat_imp_norm[low_imp[i]] < importance_thresh:\n low_importance_col += [low_imp[i]]\n print('Features with importnace value below the importance threshold of %0.3f: \\t%s\\n'\n % (importance_thresh, low_importance_col))\n low_importance_col = np.array(low_importance_col)\n return low_importance_col",
"def get_feature_importances(self):\n feat_names = [str(compare) for compare in self.comp_eng.raw_compares]\n f_names = [f'f{i}' for i in range(len(feat_names))]\n\n dfs = []\n for type_ in ['weight', 'gain', 'cover', 'total_cover', 'total_gain']:\n booster = self.model.get_booster()\n # booster.feature_names=var_names\n imp_vals = booster.get_score(importance_type=type_)\n feats_imp = pd.DataFrame(imp_vals, index=np.arange(2)).T\n feats_imp.iloc[:, 0] = feats_imp.index\n feats_imp.columns = ['feature', type_]\n feats_imp.sort_values(type_, inplace=True, ascending=False)\n feats_imp.reset_index(drop=True, inplace=True)\n dfs.append(feats_imp)\n df = reduce(lambda x, y: pd.merge(x, y, on='feature'), dfs)\n df = df.replace(dict(zip(f_names, feat_names)))\n\n return df",
"def test_fit():\n X_train, X_test, y_train, y_test = get_testing_data()\n\n fs = ReliefF(n_neighbors=100, n_features_to_keep=5)\n fs.fit(X_train, y_train)\n\n with np.load(\"data/test_arrays.npz\") as arrays:\n correct_top_features = arrays['correct_top_features']\n correct_feature_scores = arrays['correct_feature_scores']\n\n assert np.all(np.equal(fs.top_features, correct_top_features))\n assert np.all(np.equal(fs.feature_scores, correct_feature_scores))",
"def compute_feature_importances(self):\n self.feature_importances = np.zeros(len(self.forest[0].feature_importances_))\n for i in xrange(self.n_trees):\n self.feature_importances = self.feature_importances + self.forest[i].feature_importances_\n\n self.feature_importances = self.feature_importances/self.n_trees",
"def evaluate_features(experiment_seed_tuple):\n\n experiment = experiment_seed_tuple[0]\n seed = experiment_seed_tuple[1]\n\n X_train = experiment[\"train\"]\n X_valid = experiment[\"valid\"]\n X_test = experiment[\"test\"]\n\n Y_train = Y_target[\"train\"]\n Y_valid = Y_target[\"valid\"]\n Y_test = Y_target[\"test\"]\n\n def df_to_dmatrix(features, target):\n x = features.drop(columns=[\"Date\"], errors=\"ignore\") # Ignore if not exist\n y = target\n dmatrix = xgb.DMatrix(x, label=y)\n return dmatrix\n\n dm_train = df_to_dmatrix(X_train, Y_train)\n dm_valid = df_to_dmatrix(X_valid, Y_valid)\n dm_test = df_to_dmatrix(X_test, Y_test)\n\n # set seed in XGBoost params\n CMP_XGB_PARAMS[\"seed\"] = seed\n\n # Determine optimal model size\n evals = [(dm_train, \"train\"), (dm_valid, \"valid\")]\n model_bst = xgb.train(\n params=CMP_XGB_PARAMS,\n dtrain=dm_train,\n evals=evals,\n num_boost_round=CMP_NUM_BOOST_ROUND,\n early_stopping_rounds=CMP_EARLY_STOPPING_ROUNDS,\n )\n best_ntree_limit = model_bst.best_ntree_limit\n\n # OPTIONAL: Append train and valid set and train on both sets\n\n # Retrain on all training data\n evals2 = [(dm_train, \"train\"), (dm_test, \"test\")]\n model_final = xgb.train(\n params=CMP_XGB_PARAMS,\n dtrain=dm_train,\n evals=evals2,\n num_boost_round=best_ntree_limit,\n )\n\n # Feature importance (Information Gain)\n feature_information_gain = model_final.get_score(importance_type=\"gain\")\n feature_importance = pd.DataFrame(\n list(feature_information_gain.items()), columns=[\"feature\", \"information_gain\"]\n )\n feature_importance[\"algorithm\"] = experiment[\"name\"]\n feature_importance[\"seed\"] = seed\n # Reorder columns\n feature_importance = feature_importance[FEATURE_IMPORTANCE_COLUMNS]\n\n # Predict values of test set\n y_pred = model_final.predict(dm_test)\n y_true = dm_test.get_label()\n preds = pd.DataFrame()\n preds[\"y_true\"] = y_true\n preds[\"y_pred\"] = y_pred\n preds[\"algorithm\"] = experiment[\"name\"]\n preds[\"seed\"] = seed\n # Reorder columns\n preds = preds[PREDS_COLUMNS]\n\n # Calculate and save error metrics\n r2 = r2_score(y_true=y_true, y_pred=y_pred)\n mse = mean_squared_error(y_true=y_true, y_pred=y_pred)\n rmse = sqrt(mse)\n metrics = pd.DataFrame(\n [[experiment[\"name\"], seed, mse, rmse, r2]], columns=EVAL_COLUMNS\n )\n\n eval_dict = {\n \"name\": experiment[\"name\"],\n \"metrics\": metrics,\n \"preds\": preds,\n \"feature_importance\": feature_importance,\n }\n\n return eval_dict",
"def testTrainFnChiefFeatureSelectionWithGoodSplits(self):\n with self.cached_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.max_number_of_unique_feature_columns = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float_0\"] = array_ops.ones([4, 1], dtypes.float32)\n # Feature 1 is predictive and is in our selected features so it will be\n # used even when we're at the limit.\n features[\"dense_float_1\"] = array_ops.constant([0, 0, 1, 1],\n dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.VariableV1(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\":\n predictions,\n \"predictions_no_dropout\":\n predictions,\n \"partition_ids\":\n partition_ids,\n \"ensemble_stamp\":\n ensemble_stamp,\n \"num_trees\":\n 12,\n \"num_used_handlers\":\n array_ops.constant(1, dtype=dtypes.int64),\n \"used_handlers_mask\":\n array_ops.constant([False, True], dtype=dtypes.bool),\n }\n\n labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n\n self.assertEquals(len(output.trees), 1)\n self.assertAllClose(output.tree_weights, [0.1])\n self.assertEquals(stamp_token.eval(), 2)\n expected_tree = \"\"\"\n nodes {\n dense_float_binary_split {\n feature_column: 1\n left_id: 1\n right_id: 2\n }\n node_metadata {\n gain: 0.5\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.0\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: -0.5\n }\n }\n }\"\"\"\n self.assertProtoEquals(expected_tree, output.trees[0])",
"def exec_classifiers(self, dataset):\n f = Features()\n pt = param_tuning.ParamTuning()\n\n start_time = time.time()\n Xtrain, Xtest, ytrain, ytest = self._load_and_split_data(dataset)\n print(\"Loaded train/test datasets in {} sec.\".format(time.time() - start_time))\n\n fX_train = f.build(Xtrain)\n fX_test = f.build(Xtest)\n print(\"Build features from train/test data in {} sec\".format(time.time() - start_time))\n\n for clf in config.MLConf.clf_custom_params:\n print('Method {}'.format(clf))\n print('=======', end='')\n print(len(clf) * '=')\n\n tot_time = time.time(); start_time = time.time()\n # 1st phase: train each classifier on the whole train dataset (no folds)\n # estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])\n estimator = pt.clf_names[clf][0](random_state=config.seed_no)\n estimator.set_params(**config.MLConf.clf_custom_params[clf])\n estimator = pt.trainClassifier(fX_train, ytrain, estimator)\n\n print(\"Finished training model on dataset; {} sec.\".format(time.time() - start_time))\n\n start_time = time.time()\n # 2nd phase: test each classifier on the test dataset\n res = pt.testClassifier(fX_test, ytest, estimator)\n self._print_stats(clf, res['metrics'], res['feature_imp'], start_time)\n # if not os.path.exists('output'):\n # os.makedirs('output')\n # np.savetxt(f'output/{clf}_default_stats.csv', res['metrics']['stats'], fmt=\"%u\")\n\n print(\"The whole process took {} sec.\\n\".format(time.time() - tot_time))",
"def test_init_Reg_feature_selector():\n feature_selector = Reg_feature_selector()\n assert feature_selector.strategy == \"l1\"\n assert feature_selector.threshold == 0.3\n assert not feature_selector._Reg_feature_selector__fitOK\n assert feature_selector._Reg_feature_selector__to_discard == []",
"def test_train():\n test_clf = train.train()\n assert isinstance(test_clf, RandomForestClassifier)\n assert 8 == test_clf.n_features_",
"def best_features(self):\n return list()",
"def feature_importance(self, xg_boost=True, extra_trees=False):\n output_folder = self.output_folder\n feature_names = self.feature_names\n\n X = self.X_df\n y = self.y_df\n\n if xg_boost:\n print('\\n********** Method 4: Calculating the feature importance using XGBoost. **********\\n')\n ''' feature importance using XGBoost '''\n feature_names = feature_names\n housing_dmatrix = xgb.DMatrix(X, y, feature_names=feature_names)\n # Create the parameter dictionary: params\n params = {\"objective\": \"reg:squarederror\", \"max_depth\": \"4\"}\n # Train the model: xg_reg\n xg_reg = xgb.train(dtrain=housing_dmatrix, params=params, num_boost_round=10)\n\n feature_imp = dict(\n sorted(xg_reg.get_score(importance_type='weight').items(), key=lambda kv: kv[1], reverse=True))\n print('\\nFeatures - Importance\\n')\n for key, value in feature_imp.items():\n print('%s: %.5f' % (key, value))\n print('\\n')\n\n # Plot the feature importances\n xgb.plot_importance(xg_reg)\n\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n fig = plt.gcf()\n fig.set_size_inches(15, 10.5)\n plt.title('XGBoost Feature Importance')\n fig.savefig(output_folder + 'xgb_fs', dpi=100)\n plt.close()\n print('saved plot in {}/{}'.format(output_folder, 'xgb_fs'))\n\n if extra_trees:\n print('\\n********** Method 5: Calculating the feature importance using Extra Trees. **********\\n')\n model = ExtraTreesRegressor(n_estimators=100, random_state=42)\n model.fit(X, y)\n feature_imp = {}\n for i in range(len(model.feature_importances_)):\n # print('%s: %.5f' % (columns[i], model.feature_importances_[i]))\n feature_imp[feature_names[i]] = model.feature_importances_[i]\n feature_imp = dict(sorted(feature_imp.items(), key=lambda kv: kv[1], reverse=True))\n print('\\nFeatures - Importance\\n')\n for key, value in feature_imp.items():\n print('%s: %.5f' % (key, value))\n print('\\n')\n # print(model.feature_importances_)\n # use inbuilt class feature_importances of tree based classifiers\n # plot graph of feature importances for better visualization\n feat_importances = pd.Series(model.feature_importances_, index=X.columns)\n feat_importances.nlargest(20).plot(kind='barh')\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n fig = plt.gcf()\n fig.set_size_inches(15, 10.5)\n plt.title('Extra Trees Feature Importance')\n fig.savefig(output_folder + 'extratrees_fs.png', dpi=100)\n plt.close()\n print('saved plot in {}/{}'.format(output_folder, 'extratrees_fs.png'))",
"def test_fit_Reg_feature_selector():\n feature_selector = Reg_feature_selector()\n df_train = pd.read_csv(\"data_for_tests/clean_train.csv\")\n y_train = pd.read_csv(\"data_for_tests/clean_target.csv\", squeeze=True)\n with pytest.raises(ValueError):\n feature_selector.fit(None, y_train)\n with pytest.raises(ValueError):\n feature_selector.fit(df_train, None)\n feature_selector.fit(df_train, y_train)\n assert feature_selector._Reg_feature_selector__fitOK\n feature_selector.set_params(strategy=\"variance\")\n feature_selector.fit(df_train, y_train)\n assert feature_selector._Reg_feature_selector__fitOK\n feature_selector.set_params(strategy=\"rf_feature_importance\")\n feature_selector.fit(df_train, y_train)\n assert feature_selector._Reg_feature_selector__fitOK\n feature_selector.set_params(strategy=\"wrond_strategy\")\n with pytest.raises(ValueError):\n feature_selector.fit(df_train, y_train)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
All room details are stored inside typeclasses.Attributes db_value as a dictionary. For each attribute that is a valid dictionary, we'll need to create a RoomDetail object that points to the objectdb_set[0] object that represents the room, and do a create_or_update on that RoomDetail object with the dictionary values. The attribute will need to be deleted after conversion, or if it's not a valid RoomDetail.
|
def convert_room_details(apps, schema_editor):
Attribute = apps.get_model("typeclasses", "Attribute")
RoomDetail = apps.get_model("room_extensions", "RoomDetail")
qs = Attribute.objects.filter(db_key="details")
num = 0
total = len(qs)
fails = 0
bad_dict_count = 0
success = 0
if total:
print(f"\nConverting room details: {total} records.")
for attr in qs:
try:
num += 1
progress = num / total
print(ProgressBar(progress, "Progress: "), end="\r", flush=True)
objdb = attr.objectdb_set.all()[0]
if not isinstance(attr.db_value, dict):
attr.delete()
fails += 1
continue
for name, description in attr.db_value.items():
if not description or not name:
bad_dict_count += 1
continue
try:
name = str(name)
description = str(description)
except (TypeError, ValueError):
bad_dict_count += 1
continue
RoomDetail.objects.update_or_create(
room=objdb,
name=name,
defaults={"description": description},
)
success += 1
attr.delete()
except IndexError:
attr.delete()
if total:
print("\n")
print(f"Total fails: {fails}")
print(f"Total bad dicts: {bad_dict_count}")
print(f"Total successes: {success}")
|
[
"def __data_to_attributes(self, data):\n\n if not self.__is_row:\n raise Exception(\"A non data bound class cannot be bound to data without being converted\")\n\n for k, v in self.fields.items():\n if v.default_parameters['null']:\n if k not in data:\n data[k] = None\n\n # Ensure that the retrieved data and the defined model match\n if not data.keys() == self.__fields_keys:\n raise Exception(\"Mismatch between model defined columns and database columns\")\n\n for key, value in self.fields.items():\n # In the future the to_python_type() method on each field class can be uses to convert to the correct type\n # when the given DB package (e.g mysqldb) does not do it by default\n value = data[key]\n\n # Assign the given data values to a class attribute (using __dict__)\n self.__dict__[key] = value\n\n # Store a back up of the data values so the row can be found when updating the model\n self._database_values[key] = value",
"def to_attr(data):\r\n\r\n def iter_db2id(item):\r\n \"\"\"\r\n recursively looping through stored iterables, replacing objects with ids.\r\n (Python only builds nested functions once, so there is no overhead for nesting)\r\n \"\"\"\r\n dtype = type(item)\r\n if dtype in (basestring, int, float): # check the most common types first, for speed\r\n return item\r\n elif hasattr(item, \"id\") and hasattr(item, \"db_model_name\") and hasattr(item, \"db_key\"):\r\n db_model_name = item.db_model_name\r\n if db_model_name == \"typeclass\":\r\n db_model_name = GA(item.dbobj, \"db_model_name\")\r\n return PackedDBobject(item.id, db_model_name, item.db_key)\r\n elif dtype == tuple:\r\n return tuple(iter_db2id(val) for val in item)\r\n elif dtype in (dict, PackedDict):\r\n return dict((key, iter_db2id(val)) for key, val in item.items())\r\n elif hasattr(item, '__iter__'):\r\n return list(iter_db2id(val) for val in item)\r\n else:\r\n return item\r\n\r\n dtype = type(data)\r\n\r\n if dtype in (basestring, int, float):\r\n return (\"simple\",data)\r\n elif hasattr(data, \"id\") and hasattr(data, \"db_model_name\") and hasattr(data, 'db_key'):\r\n # all django models (objectdb,scriptdb,playerdb,channel,msg,typeclass)\r\n # have the protected property db_model_name hardcoded on themselves for speed.\r\n db_model_name = data.db_model_name\r\n if db_model_name == \"typeclass\":\r\n # typeclass cannot help us, we want the actual child object model name\r\n db_model_name = GA(data.dbobj, \"db_model_name\")\r\n return (\"dbobj\", PackedDBobject(data.id, db_model_name, data.db_key))\r\n elif hasattr(data, \"__iter__\"):\r\n return (\"iter\", iter_db2id(data))\r\n else:\r\n return (\"simple\", data)",
"def populate_animal_races_and_breeds(apps, schema_editor):\n Attribute = apps.get_model(\"typeclasses\", \"Attribute\")\n Race = apps.get_model(\"character_extensions\", \"Race\")\n Characteristic = apps.get_model(\"character_extensions\", \"Characteristic\")\n CharacteristicValue = apps.get_model(\"character_extensions\", \"CharacteristicValue\")\n CharacterSheet = apps.get_model(\"character_extensions\", \"CharacterSheet\")\n CharacterSheetValue = apps.get_model(\"character_extensions\", \"CharacterSheetValue\")\n breed = Characteristic.objects.create(name=\"breed\")\n animal = Race.objects.create(name=\"animal\", race_type=LARGE_ANIMAL)\n small_animal = Race.objects.create(name=\"small animal\", race_type=SMALL_ANIMAL)\n # mapping of sheet to sheet value\n sheet_values = {}\n qs = Attribute.objects.filter(db_key=\"species\")\n if qs:\n total = len(qs)\n num = 0\n print(f\"\\nConverting {total} species\")\n for attr in qs:\n num += 1\n progress = num / total\n print(ProgressBar(progress, aPrefix), end=\"\\r\", flush=True)\n # if the attribute is empty skip it\n if not attr.db_value:\n attr.delete()\n continue\n try:\n character = attr.objectdb_set.all()[0]\n npc_type = character.agentob.agent_class.type\n if npc_type == 5:\n race = animal\n else:\n race = small_animal\n # get or create character sheet associated with the object\n try:\n sheet = character.charactersheet\n except CharacterSheet.DoesNotExist:\n sheet = CharacterSheet.objects.create(objectdb=character)\n # get or create the CharacterSheetValue for the sheet/characteristic pair\n if sheet.pk in sheet_values:\n sheet_value = sheet_values[sheet.pk]\n else:\n sheet_value = CharacterSheetValue(\n character_sheet=sheet, characteristic=breed\n )\n sheet_values[sheet.pk] = sheet_value\n # get or create the CharacteristicValue for the attr/characteristic pair\n characteristic_value, _ = CharacteristicValue.objects.get_or_create(\n value=attr.db_value.lower(), characteristic=breed\n )\n # set the sheet_value to point at that characteristic value\n sheet_value.characteristic_value = characteristic_value\n sheet_value.save()\n # add the characteristic_value as allowed for the race\n race.allowed_characteristic_values.add(characteristic_value)\n except (\n IndexError,\n ValueError,\n TypeError,\n ObjectDoesNotExist,\n AttributeError,\n ):\n pass\n attr.delete()",
"def details(self, room, **kwargs):\n if isinstance(room, Room):\n roomId = room.id\n elif isinstance(room, basestring):\n roomId = room\n else:\n raise ValueError(\"missing room Id\")\n apiparm = []\n return Room(self.api.session.get(self._uri_append(roomId), apiparm, **kwargs))",
"def get_attributes(self):\n retdict = {}\n if self.lane_id == None:\n raise ValueError('lane id is not set correctly.')\n retdict['id'] = str(self.lane_id)\n retdict['type'] = enum2str(self.lane_type)\n retdict['level'] = 'false'\n return retdict",
"def from_attr(datatuple):\r\n # nested functions\r\n def id2db(data):\r\n \"\"\"\r\n Convert db-stored dbref back to object\r\n \"\"\"\r\n mclass = CTYPEGET(model=data.db_model).model_class()\r\n try:\r\n return mclass.objects.get(id=data.id)\r\n\r\n except AttributeError:\r\n try:\r\n return mclass.objects.get(id=data.id)\r\n except mclass.DoesNotExist: # could happen if object was deleted in the interim.\r\n return None\r\n\r\n def iter_id2db(item):\r\n \"\"\"\r\n Recursively looping through stored iterables, replacing ids with actual objects.\r\n We return PackedDict and PackedLists instead of normal lists; this is needed in order for\r\n the user to do dynamic saving of nested in-place, such as obj.db.attrlist[2]=3. What is\r\n stored in the database are however always normal python primitives.\r\n \"\"\"\r\n dtype = type(item)\r\n if dtype in (basestring, int, float, long, bool): # check the most common types first, for speed\r\n return item\r\n elif dtype == PackedDBobject or hasattr(item, '__class__') and item.__class__.__name__ == \"PackedDBobject\":\r\n return id2db(item)\r\n elif dtype == tuple:\r\n return tuple([iter_id2db(val) for val in item])\r\n elif dtype in (dict, PackedDict):\r\n return dict(zip([key for key in item.keys()],\r\n [iter_id2db(val) for val in item.values()]))\r\n elif hasattr(item, '__iter__'):\r\n return list(iter_id2db(val) for val in item)\r\n else:\r\n return item\r\n\r\n typ, data = datatuple\r\n\r\n if typ == 'simple':\r\n # single non-db objects\r\n return data\r\n elif typ == 'dbobj':\r\n # a single stored dbobj\r\n return id2db(data)\r\n elif typ == 'iter':\r\n # all types of iterables\r\n return iter_id2db(data)",
"def from_dict(data: dict):\n return Room(data[\"id\"], data[\"creatorId\"], data[\"name\"], data[\"description\"], data[\"inserted_at\"],\n data[\"isPrivate\"], data[\"numPeopleInside\"],\n list(map(UserPreview.from_dict, data[\"peoplePreviewList\"])))",
"def convert_room_info_to_dict(room):\n\tresponse_text = {\n\t\t'name': room.name,\n\t\t'video_url': room.video_url,\n\t\t'video_id': room.video_id,\n\t\t'owner': room.owner.username,\n\t\t'owner_pk': room.owner.pk,\n\t\t'room_pk': room.pk,\n\t\t'description': room.room_description,\n\t\t'created_at': timezone.localtime(room.created_at).strftime(\"%m/%d/%Y %H:%M:%S\"),\n\t}\n\treturn response_text",
"def load_data(obj):\n if not obj:\n return\n\n # Get app, model and key names.\n app = obj.attributes.get(key=\"app\", category=DATA_INFO_CATEGORY, strattr=True)\n if not app:\n return False\n\n model = obj.attributes.get(key=\"model\", category=DATA_INFO_CATEGORY, strattr=True)\n if not model:\n return False\n\n key = obj.attributes.get(key=\"key\", category=DATA_INFO_CATEGORY, strattr=True)\n if not key:\n return False\n\n # Get db model\n model_obj = get_model(app, model)\n if not model_obj:\n logger.log_errmsg(\"%s can not open model %s\" % (key, model))\n return False\n \n # Get data record.\n data_info = model_obj.objects.filter(key=key)\n if not data_info:\n logger.log_errmsg(\"%s can not find key %s\" % (key, key))\n return False\n\n info = data_info[0]\n\n if info.typeclass:\n set_obj_typeclass(obj, info.typeclass)\n if info.name:\n set_obj_name(obj, info.name)\n if info.alias:\n set_obj_alias(obj, info.alias)\n if info.location:\n set_obj_location(obj, info.location)\n if info.home:\n set_obj_home(obj, info.home)\n if info.desc:\n set_obj_desc(obj, info.desc)\n if info.lock:\n set_obj_lock(obj, info.lock)\n if info.destination:\n set_obj_destination(obj, info.destination)\n\n # Set attributes.\n attributes = {}\n if info.attributes:\n try:\n # info.attributes: (string) Attribues in form of python dict. Such as: \"{'age':'22', 'career':'warrior'}\"\n # Convert string to dict\n attributes = ast.literal_eval(info.attributes)\n except Exception, e:\n logger.log_errmsg(\"%s can't load attributes %s: %s\" % (get_info_key(obj), info.attributes, e))\n\n # Add other fields to attributes.\n known_fields = {\"key\",\n \"name\",\n \"alias\",\n \"typeclass\",\n \"location\",\n \"home\",\n \"desc\",\n \"lock\",\n \"destination\",\n \"attributes\"}\n\n for field in model_obj._meta.fields:\n if not field.name in known_fields:\n attributes[field.name] = info.serializable_value(field.name)\n\n set_obj_attributes(obj, attributes)\n\n return True",
"def _init_extended_attrs(self):\n\n db = firestore.client()\n doc = db.collection(self._collection_path).document(self.uid).get()\n for attr in self._extended_attrs:\n value = None\n if doc.exists:\n try:\n value = doc.get(attr)\n except KeyError:\n pass\n\n self.__dict__[attr] = value",
"def _parse_oem_attributes(self):\n oem_json_body = (self.core_resource.json.get('Oem').\n get(self.oem_property_name))\n for attr, field in _collect_oem_fields(self):\n # Hide the Field object behind the real value\n setattr(self, attr, field._load(oem_json_body, self))\n\n for attr, field in _collect_base_fields(self):\n # Hide the Field object behind the real value\n setattr(self, attr, field._load(self.core_resource.json, self))",
"def AttrsToAttribute(attrs) -> Attribute:\n\n if attrs['type'].lower() == 'array':\n data_type = DataType(name=attrs['entryType'])\n else:\n data_type = DataType(name=attrs['type'])\n\n if 'minLength' in attrs:\n data_type.min_length = ParseInt(attrs['minLength'])\n\n if 'length' in attrs:\n data_type.max_length = ParseInt(attrs['length'])\n\n if 'min' in attrs:\n data_type.min_value = ParseInt(attrs['min'], data_type)\n\n if 'max' in attrs:\n data_type.max_value = ParseInt(attrs['max'], data_type)\n\n field = Field(\n data_type=data_type,\n code=ParseInt(attrs['code']),\n name='',\n is_list=(attrs['type'].lower() == 'array')\n )\n\n attribute = Attribute(definition=field)\n\n if attrs.get('optional', \"false\").lower() == 'true':\n attribute.definition.qualities |= FieldQuality.OPTIONAL\n\n if attrs.get('isNullable', \"false\").lower() == 'true':\n attribute.definition.qualities |= FieldQuality.NULLABLE\n\n if attrs.get('readable', \"true\").lower() == 'true':\n attribute.qualities |= AttributeQuality.READABLE\n\n if attrs.get('writable', \"false\").lower() == 'true':\n attribute.qualities |= AttributeQuality.WRITABLE\n\n # TODO(#22937): NOSUBSCRIBE attribute tag is not available - could find no\n # clear source to get this info.\n\n # NOTE: default values are also present in this XML, however generally IDL\n # **DATA** definitions would not care about the defaults. The\n # defaults should be used to initializ storage for devices, hence\n # they are part of endpoint definition/composition. We are not doing\n # that here, so defaults are ignored.\n\n return attribute",
"def convert_room_info_item_2_dict(room_info_item):\n room_dict = {\n # \"hotel_id\": room_info_item.hotel_code,\n \"room_id\": room_info_item.room_id,\n \"room_type\": room_info_item.room_type,\n \"floor\": room_info_item.floor,\n \"net_service\": room_info_item.net_service,\n \"net_service_fee\": room_info_item.net_service_fee,\n \"bed_type\": room_info_item.bed_type,\n \"breakfast\": room_info_item.breakfast,\n \"area\": room_info_item.area,\n }\n return room_dict",
"def save_state(self, sqlite_database_name):\n if os.path.isfile('./'+sqlite_database_name+'.db'):\n return ('That file name already exists, '\\\n + 'please provide a different filename')\n else:\n engine = create_engine('sqlite:///'+sqlite_database_name+'.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n Base.metadata.create_all(engine)\n \"\"\" Check whether living_space dictionary has items to be added to\n the database and add them to the Rooms table in the database\"\"\"\n if len(self.living_space) > 0:\n for key in self.living_space:\n new_room = Rooms(\n room_name = self.living_space[key].room_name,\n members = ' '.join(self.living_space[key].members),\n max_num = self.living_space[key].max_num,\n room_type ='LivingSpace')\n session.add(new_room)\n session.commit()\n \"\"\"Checks whether office dictionary has items to be added to the\n database and add them to the Rooms table in the database\"\"\"\n if len(self.office) > 0:\n for key in self.office:\n new_room = Rooms(\n room_name = self.office[key].room_name,\n members = ' '.join(self.office[key].members),\n max_num = self.office[key].max_num,\n room_type ='Office')\n session.add(new_room)\n session.commit()\n #Checks whether persons dictionay has people to be added to the database\n if len(self.persons) > 0:\n #This For loop is used to traverse all keys in the persons dictionary\n for key in self.persons:\n #This if statement is used to add Staff to People table in the database\n if self.persons[key].person_type.upper() == 'STAFF':\n new_person = People(\n person_id = self.persons[key].person_id,\n last_name = self.persons[key].last_name,\n first_name = self.persons[key].first_name,\n person_type = 'STAFF',\n proom_name = self.persons[key].proom_name)\n session.add(new_person)\n session.commit\n #This if statement is used to add Fellows to People table in the database\n if self.persons[key].person_type.upper() == 'FELLOW':\n new_person = People(\n person_id = self.persons[key].person_id,\n last_name = self.persons[key].last_name,\n first_name = self.persons[key].first_name,\n person_type = 'FELLOW',\n wants_accommodation = self.persons[key].wants_accommodation,\n proom_name = self.persons[key].proom_name,\n lroom_name = self.persons[key].lroom_name)\n session.add(new_person)\n session.commit()\n \"\"\"The if staements below are used to store list names and there\n values as a string in the Lists Database Table\"\"\"\n if len(self.fellows_with_living_room_list) > 0:\n new_list = Lists(\n list_name = 'fellows_with_living_room_list',\n list_string = ' '.join(self.fellows_with_living_room_list)\n )\n session.add(new_list)\n session.commit()\n if len(self.fellows_with_office_list) > 0:\n new_list = Lists(list_name = 'fellows_with_office_list',\n list_string = ' '.join(self.fellows_with_office_list))\n session.add(new_list)\n session.commit()\n if len(self.fellows_who_missed_living_space) > 0:\n new_list = Lists(list_name = 'fellows_who_missed_living_space',\n list_string = ' '.join(self.fellows_who_missed_living_space))\n session.add(new_list)\n session.commit()\n if len(self.fellows_who_dont_want_living_space) > 0:\n new_list = Lists(list_name = 'fellows_who_dont_want_living_space',\n list_string = ' '.join(self.fellows_who_dont_want_living_space))\n session.add(new_list)\n session.commit()\n if len(self.fellows_who_missed_office) > 0:\n new_list = Lists(list_name = 'fellows_who_missed_office',\n list_string = ' '.join(self.fellows_who_missed_office))\n session.add(new_list)\n session.commit()\n if len(self.staff_with_office_list) > 0:\n new_list = Lists(list_name = 'staff_with_office_list',\n list_string = ' '.join(self.staff_with_office_list))\n session.add(new_list)\n session.commit()\n if len(self.staff_who_missed_office_list) > 0:\n new_list = Lists(list_name = 'staff_who_missed_office_list',\n list_string = ' '.join(self.staff_who_missed_office_list))\n session.add(new_list)\n session.commit()\n if len(self.all_rooms_list) > 0:\n new_list = Lists(list_name = 'all_rooms_list',\n list_string = ' '.join(self.all_rooms_list))\n session.add(new_list)\n session.commit()\n session.close() #Close Database session\n return \"State saved succesfully\\n\"",
"def fix_attributes_only(self, attrs, onu_db, olt_db):\n successes = 0\n failures = 0\n me_map = self._device.me_map\n\n # Collect up attributes on a per CID/EID basis. This will result in\n # the minimal number of operations to either the database of over\n # the OMCI-CC to the ONU\n\n attr_map = dict()\n for cid, eid, attribute in attrs:\n if (cid, eid) not in attr_map:\n attr_map[(cid, eid)] = {attribute}\n else:\n attr_map[(cid, eid)].add(attribute)\n\n for entity_pair, attributes in attr_map.items():\n cid = entity_pair[0]\n eid = entity_pair[1]\n\n # Skip MEs we cannot encode/decode\n if cid not in me_map:\n self.log.warn('no-me-map-decoder', class_id=cid)\n failures += 1\n continue\n\n if self.deferred.called: # Check if task canceled\n break\n\n # Build up MIB set commands and ONU Set (via OMCI) commands\n # based of the attributes\n me_entry = me_map[cid]\n mib_data_to_save = dict()\n onu_data_to_set = dict()\n olt_attributes = olt_db[cid][eid][ATTRIBUTES_KEY]\n onu_attributes = onu_db[cid][eid][ATTRIBUTES_KEY]\n\n for attribute in attributes:\n map_access = next((attr.access for attr in me_entry.attributes\n if attr.field.name == attribute), set())\n writeable = AA.Writable in map_access or AA.SetByCreate in map_access\n\n # If only in ONU database snapshot, save it to OLT\n if attribute in onu_attributes and attribute not in olt_attributes:\n # On onu only\n mib_data_to_save[attribute] = onu_attributes[attribute]\n\n elif writeable:\n # On olt only or in both. Either way OLT wins\n onu_data_to_set[attribute] = olt_attributes[attribute]\n\n # Now do the bulk operations For both, check to see if the target\n # is still the same as when the audit was performed. If it is, do\n # the commit. If not, mark as a failure so an expedited audit will\n # occur and check again.\n\n if len(mib_data_to_save):\n results = yield self.fix_attributes_only_in_mib(cid, eid, mib_data_to_save)\n successes += results[0]\n failures += results[1]\n\n if len(onu_data_to_set):\n results = yield self.fix_attributes_only_on_olt(cid, eid, onu_data_to_set, olt_db, me_entry)\n successes += results[0]\n failures += results[1]\n\n returnValue((successes, failures))",
"def get_db_avails(building_name, room_name, date):\n # 1. find room. 2. find avail object. 3. get avails filtered by date.\n fa = get_fa_for_room(building_name, room_name)\n freetimeranges = fa.freetimerange_set.filter(date=date)\n return [(ftr.time.start, ftr.time.end) for ftr in freetimeranges]",
"def __init__(self, room_name):\n self.name = room_name\n self.description = None\n self.linked_rooms = {}\n self.character = None\n self.item = None",
"def testForDBModel(self):\n class Books(db.Model):\n item_freq = db.StringProperty()\n freq = db.IntegerProperty()\n details = db.TextProperty()\n released = db.BooleanProperty()\n\n entity = Books()\n entity.item_freq = '5'\n entity.freq = 4\n entity.details = 'Test Entity'\n entity.released = True\n entity.put()\n\n expected_dict = {'freq': 4, 'item_freq': '5', 'details': 'Test Entity',\n 'released': True}\n self.assertEqual(melange_db.toDict(entity), expected_dict)",
"def model2dict(self):\n\n def to_primitive(obj_list):\n data = []\n for item in obj_list:\n if isinstance(item, str) or isinstance(item, int):\n data.append(item)\n else:\n data.append(item.as_dict())\n return data\n\n result = {}\n for attribute_name, attribute_obj in self.get_attributes().items():\n if isinstance(attribute_obj, attributes.MapAttribute):\n result[attribute_name] = getattr(self, attribute_name).as_dict()\n elif isinstance(attribute_obj, attributes.ListAttribute):\n result[attribute_name] = to_primitive(getattr(self, attribute_name))\n else:\n result[attribute_name] = getattr(self, attribute_name)\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a test binary image.
|
def create_test_img_bin(sz_img):
img = np.zeros(sz_img)
img[3:7, 2:8] = 1
return img
|
[
"def make_binary_image(im):",
"def test_create(self):\n \n image = StringIO.StringIO(base64.b64decode(image_base64))\n \n img = self.pdfu.images.create(image, 'path/to/test_image.jpg')\n\n self.assertTrue(img)\n self.assertEqual(img.get('src'), 'path/to/test_image.jpg')\n self.assertEqual(img.get('uri'), '/v1/images/' + img.get('id'))\n \n doc = self.pdfu.documents.create(\n source = '<doc size=\"b5\"><page><row><cell>Hello World!</cell><cell><img src=\"path/to/test_image.jpg\" /></cell></row></page></doc>',\n pdf = True\n )\n self.assertTrue(re.match('%PDF', doc))\n \n img = self.pdfu.images.create('unicorn.jpg', 'path/to/test_image2.jpg')\n\n self.assertTrue(img)\n self.assertEqual(img.get('src'), 'path/to/test_image2.jpg')\n self.assertEqual(img.get('uri'), '/v1/images/' + img.get('id'))",
"def create_binary_image(img_read, filename, folder):\n img_conv = rgb2gray(img_read)\n\n img_gaussian = filters.gaussian(img_conv, filter_binary_gaussian_strength)\n img_threshold = filters.threshold_mean(img_conv)\n\n # Threshold comparison\n img_binary = img_gaussian < img_threshold\n\n imsave(folder + filename + \"_binary\" + '.png', img_as_uint(img_binary))\n\n return img_binary",
"def create_test_image(self):\n return np.tile(np.arange(0, 255).repeat(2), (100, 1)).astype(np.float32) / 255",
"def test_png(self):\n\n test_image = np.random.randint(0, 256, size=(256, 224, 3)).astype(\"uint8\")\n with tempfile.TemporaryDirectory() as tempdir:\n filename = os.path.join(tempdir, \"test_image.png\")\n itk_np_view = itk.image_view_from_array(test_image, is_vector=True)\n itk.imwrite(itk_np_view, filename)\n output_name = \"test_image/test_image_trans.png\"\n self._cmp(filename, (3, 224, 256), \"itkreader\", \"itkreader\", output_name, \".png\")\n self._cmp(filename, (3, 224, 256), \"itkreader\", \"PILReader\", output_name, \".png\")\n self._cmp(filename, (3, 224, 256), \"itkreader\", \"nibabelreader\", output_name, \".png\")",
"def generate_image(self) -> None:",
"def create_image(self, **kw):\n cmd = \"rbd create \" + kw.get(\"image_name\") + \" -s 1G\"\n if kw.get(\"features\"):\n cmd = cmd + \" --image-feature \" + kw[\"features\"]\n self.exec_cmd(cmd)",
"def test_add_image_with_image_data_as_file(self):\n fixture = {'name': 'fake public image',\n 'is_public': True,\n 'disk_format': 'vhd',\n 'container_format': 'ovf',\n 'size': 19,\n 'properties': {'distro': 'Ubuntu 10.04 LTS'},\n }\n\n image_data_fixture = r\"chunk00000remainder\"\n\n tmp_image_filepath = '/tmp/rubbish-image'\n\n if os.path.exists(tmp_image_filepath):\n os.unlink(tmp_image_filepath)\n\n tmp_file = open(tmp_image_filepath, 'wb')\n tmp_file.write(image_data_fixture)\n tmp_file.close()\n\n new_image = self.client.add_image(fixture, open(tmp_image_filepath))\n new_image_id = new_image['id']\n self.assertEquals(3, new_image_id)\n\n if os.path.exists(tmp_image_filepath):\n os.unlink(tmp_image_filepath)\n\n new_meta, new_image_chunks = self.client.get_image(3)\n\n new_image_data = \"\"\n for image_chunk in new_image_chunks:\n new_image_data += image_chunk\n\n self.assertEquals(image_data_fixture, new_image_data)\n for k, v in fixture.items():\n self.assertEquals(v, new_meta[k])",
"def create(IMGSIZE=...) -> retval:\n ...",
"def test_image_create(self):\n with self.mock_post('images/private/123') as m:\n i = self.client.image_create(654, 'Test-Image', 'This is a test')\n\n self.assertIsNotNone(i)\n self.assertEqual(i.id, 'private/123')\n\n self.assertEqual(m.call_url, '/images')\n\n self.assertEqual(m.call_data, {\n \"disk_id\": 654,\n \"label\": \"Test-Image\",\n \"description\": \"This is a test\",\n })",
"def test_write_small():\n data = random_data('uint8', (1, 1))\n with TempFileName('small') as fname:\n imwrite(fname, data)\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 1\n page = tif.pages[0]\n assert page.is_contiguous\n assert page.planarconfig == CONTIG\n assert page.photometric != RGB\n assert page.imagewidth == 1\n assert page.imagelength == 1\n assert page.samplesperpixel == 1\n image = tif.asarray()\n assert_array_equal(data, image)\n assert__str__(tif)",
"def testimg():\n return testimage().array()",
"def create_output_image(img, instances):\n pass",
"def test_binary_string_image_to_numpy_image():\n\n expected = np.ones(shape=(32, 32, 3))\n\n _, encoded_image = cv2.imencode('.jpg', expected)\n binary_string = encoded_image.tostring()\n\n actual = traffic.utilities.binary_rgb_image_string_to_numpy_image(binary_string)\n\n assert np.all(expected == actual)",
"def test_create_imagenet(self):\n width_crop = 192\n nb_training = 2\n nb_validation = 2\n path_to_training = 'datasets/imagenet/pseudo_data/pseudo_training_data.npy'\n path_to_validation = 'datasets/imagenet/pseudo_data/pseudo_validation_data.npy'\n \n # The images in the folder \"datasets/imagenet/pseudo_data/\"\n # are large. Therefore, none of them is dumped.\n datasets.imagenet.imagenet.create_imagenet('datasets/imagenet/pseudo_data/',\n width_crop,\n nb_training,\n nb_validation,\n path_to_training,\n path_to_validation)\n pseudo_training_data = numpy.load(path_to_training)\n path_to_folder_vis = 'datasets/imagenet/pseudo_visualization/'\n for i in range(nb_training):\n tls.save_image(os.path.join(path_to_folder_vis, 'training_{}.png'.format(i)),\n pseudo_training_data[i, :, :, 0])\n pseudo_validation_data = numpy.load(path_to_validation)\n for i in range(nb_validation):\n tls.save_image(os.path.join(path_to_folder_vis, 'validation_{}.png'.format(i)),\n pseudo_validation_data[i, :, :, 0])",
"def test_create_image(self):\n with self.override_role():\n self._create_image()",
"def _GenerateTestBits(self, tempdir):\n build_root = cros_build_lib.GetSysroot(board=self.board)\n cwd = os.path.join(build_root, BOARD_BUILD_DIR)\n tarball_funcs = [commands.BuildAutotestControlFilesTarball,\n commands.BuildAutotestPackagesTarball,\n commands.BuildAutotestTestSuitesTarball,\n commands.BuildAutotestServerPackageTarball]\n for tarball_func in tarball_funcs:\n tarball_func(build_root, cwd, tempdir)",
"def create_new_image(self):\n logging.info('Starting image \\'' + self.name + '\\' creation')",
"def img_file(tmpdir: str) -> str:\n from PIL.Image import fromarray\n\n img_file_ = tmpdir.join(\"test_img.bmp\")\n img = fromarray(np.random.randint(255, size=(10, 10, 3)).astype(\"uint8\"))\n img.save(str(img_file_))\n return str(img_file_)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load data saved in a NumPy .npz file.
|
def load_npz(npz_file):
data = np.load(npz_file, encoding="latin1")
return data['arr_0']
|
[
"def loadnpz(npzfile):\n return np.load(npzfile, allow_pickle=True)",
"def load_data_from_npz():\n \n file_data = np.load(\"data.npz\")\n latitudes = file_data[\"lats\"][:] \n longitudes = file_data[\"lons\"][:]\n times = file_data[\"time\"][:] \n air_temperatures = file_data[\"air\"][:] \n \n return latitudes, longitudes, times, air_temperatures",
"def load_npz(file, obj, path='', strict=True):\n with numpy.load(file) as f:\n d = NpzDeserializer(f, path=path, strict=strict)\n d.load(obj)",
"def load_npy(self, filename):\n self.set_data(np.load(filename))",
"def load_ndarray(filename):\r\n if filename.endswith('.npy'):\r\n compress = False\r\n status = 'uncompressed' # Everything OK!\r\n elif filename.endswith('.7z'):\r\n compress = True\r\n status = 'compressed'\r\n else:\r\n file_npy = filename + '.npy'\r\n if file_npy in os.listdir():\r\n filename = file_npy\r\n compress = False\r\n status = 'uncompressed'\r\n else:\r\n file_7z = filename + '.7z'\r\n if file_7z in os.listdir():\r\n filename = file_7z\r\n compress = True\r\n status = 'compressed'\r\n else:\r\n raise FileNotFoundError\r\n\r\n # ---------------------------------\r\n size = os.stat(filename).st_size\r\n print('Loading {0:,} [{1}] bytes from disk... File: {2}'\r\n .format(size, status, filename))\r\n\r\n if compress:\r\n if shutil.which('7z') is None:\r\n raise FileNotFoundError('7z not found on the PATH!')\r\n subprocess.Popen('7z e ' + filename + ' -mmt', shell=True).wait()\r\n ndarray = np.load(filename[:-3] + '.npy')\r\n else:\r\n ndarray = np.load(filename)\r\n print('Succesfully loaded {0!s}-array ({1:,} bytes) from {2}!'\r\n .format(ndarray.shape, size, filename))\r\n\r\n return ndarray",
"def test_load_file(self):\n loader = Loader('./tests/example.npz')\n loader.load_file()\n self.assertIsNotNone(loader.data)",
"def nploadbz(fname):\n f = bz2.BZ2File(fname, \"r\")\n d = np.load(f)\n f.close()\n return d",
"def load_npy(name):\n\twith open(name, \"rb\") as fr:\n\t\treturn np.load(fr)",
"def save_npz(self):\n path_npz = \"data/\"+self.iD+\"-\"+str(self.N)+\".npz\" # @todo: create folder if not.\n np.savez(path_npz, self)",
"def read_npzdata(folder, file, *arg):\n #import pdb; pdb.set_trace()\n full_path = os.path.join(folder, file)\n fold.file_exists(full_path)\n npz_data = np.load(full_path)\n \n parameters = []\n for param in arg:\n param_read = npz_data[param]\n parameters.append(param_read)\n \n del npz_data, param_read\n return parameters",
"def load_data(filename):\n\n data = scipy.io.loadmat(_os.path.join(data_dir, filename))\n return data",
"def save_npy(self, filename):\n np.save(filename, self.data)",
"def load_data_array(fname):\n data = np.genfromtxt(fname)\n #data = np.load(fname)\n return data",
"def load_numpy_object_demo(file_name: Path)\\\n -> Union[np.ndarray, np.recarray]:\n return np.load(str(file_name), allow_pickle=True)",
"def load_example_data():\n from pkg_resources import resource_stream\n data = np.load(resource_stream(__name__, 'example_data/CCF1.npy'))\n return data",
"def np_from_file(file_path, compresslevel):\n if not tf.io.gfile.exists(file_path):\n raise FileNotFoundError(file_path)\n res = []\n with tf.io.gfile.GFile(file_path, 'rb') as f:\n with gzip.GzipFile(fileobj=f, compresslevel=compresslevel) as gzipf:\n while True:\n try:\n res.append(np.load(gzipf, allow_pickle=False))\n except Exception: # pylint: disable=broad-except\n break\n return res",
"def load_npy():\n cell_data = []\n arr = np.load(INPUT_NPY_PATH + '/' + FILE_TO_READ)\n label_arr = np.load(INPUT_NPY_PATH + '/' + FILE_TO_READ.split('.')[0] + '_labels.npy')\n\n IMAGE_ID = FILE_TO_READ.split('.')[0] + '.jpg'\n\n # read table image; the path is where you store the images for each table\n img_cv = cv2.imread(IMAGE_PATH.format(IMAGE_ID))\n\n # add image name, should be deleted after we have image id as input\n row_num = 0\n for row in arr:\n if label_arr[row_num] == 0 or row[0] == row[2] or row[1] == row[3]:\n row_num += 1\n continue\n row = row.tolist()\n row.insert(0, label_arr[row_num]) # insert cell data type\n cell_data.append(row)\n row_num += 1\n\n sort_data(cell_data, img_cv)",
"def save_images_to_npz(path):\n path = Path(path).resolve()\n image_data = read_image_data()\n np.savez_compressed(path, **{data[\"path\"]: data[\"data\"] for data in image_data})",
"def _load_numpy_array(self, image):\n\n self._send_to_ztv(('load-numpy-array', image))",
"def load_dataarray(filename_or_obj, **kwargs):\n if \"cache\" in kwargs:\n raise TypeError(\"cache has no effect in this context\")\n\n with open_dataarray(filename_or_obj, **kwargs) as da:\n return da.load()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test data object for the PlantCV transform submodule.
|
def transform_test_data():
return TransformTestData()
|
[
"def getTestingData(self):",
"def learn_test_data():\n return LearnTestData()",
"def load_test_dataset(self):\n\n self.xyzrph = test_xyzrph\n self.load_from_existing_xyzrph()",
"def test(self):\n\n reconstructed_data = torch.zeros(size=(1, self.dataset_dims))\n\n for i, data in enumerate(self.test_dataloader, 0):\n real, real_labels = data\n real = real.cuda().squeeze().view(-1, self.dataset_dims)\n\n with torch.no_grad():\n\n mean, logvar = self.model.encode(real)\n latent_vector = self.reparametrize(mean, logvar)\n\n generated = self.model.decode(latent_vector)\n reconstructed_data = torch.cat([reconstructed_data, generated], dim=0)\n\n return reconstructed_data[1:, :].cpu().detach().numpy()",
"def test_calibration_constructor(self) -> None:\n data = {\n \"sampling_size\": \"10, 50, 100, 200\",\n \"dataloader\": {\n \"last_batch\": \"rollover\",\n \"batch_size\": 2,\n \"dataset\": {\n \"TestDataset\": {\n \"dataset_param\": \"/some/path\",\n \"bool_param\": True,\n \"list_param\": [\"item1\", \"item2\"],\n },\n },\n \"transform\": {\n \"TestTransform\": {\"shape\": [1000, 224, 224, 3], \"some_op\": True},\n \"AnotherTestTransform\": {\n \"shape\": [10, 299, 299, 3],\n \"some_op\": False,\n },\n },\n \"filter\": {\n \"LabelBalance\": {\"size\": 1},\n },\n },\n }\n calibration = Calibration(data)\n\n self.assertEqual(calibration.sampling_size, \"10, 50, 100, 200\")\n self.assertEqual(calibration.dataloader.last_batch, \"rollover\")\n self.assertEqual(\n calibration.dataloader.batch_size,\n 2,\n ) # Calibration batch size should be always set to 1\n self.assertIsNotNone(calibration.dataloader.dataset)\n self.assertEqual(calibration.dataloader.dataset.name, \"TestDataset\")\n self.assertDictEqual(\n calibration.dataloader.dataset.params,\n {\n \"dataset_param\": \"/some/path\",\n \"bool_param\": True,\n \"list_param\": [\"item1\", \"item2\"],\n },\n )\n transform_name, transform = list(calibration.dataloader.transform.items())[0]\n self.assertEqual(transform_name, \"TestTransform\")\n self.assertEqual(transform.name, \"TestTransform\")\n self.assertDictEqual(\n transform.parameters,\n {\n \"shape\": [1000, 224, 224, 3],\n \"some_op\": True,\n },\n )\n transform_name, transform = list(calibration.dataloader.transform.items())[1]\n self.assertEqual(transform_name, \"AnotherTestTransform\")\n self.assertEqual(transform.name, \"AnotherTestTransform\")\n self.assertDictEqual(\n transform.parameters,\n {\"shape\": [10, 299, 299, 3], \"some_op\": False},\n )\n self.assertIsNotNone(calibration.dataloader.filter)\n self.assertIsNotNone(calibration.dataloader.filter.LabelBalance)\n self.assertEqual(calibration.dataloader.filter.LabelBalance.size, 1)",
"def test_kah_DATA_object():\n \n # Test that the object is returned.\n assert DATA\n\n # Test that only one subject remains.\n for dataset in DATASETS:\n assert np.all(getattr(DATA, dataset)['subject'] == TESTSUBJ)",
"def setUp(self):\n self.convert = Convert()\n self.create_csv_test_file(self.TESTS_DATA)",
"def _creat_testdata(file):\n # Creating testdata and visualizing\n filedir = \"__testfiles__/\" + file\n sol.run(filedir, \"__output__\")\n vis.run(\"__output__\")\n plt.pause(5)\n # Saving testdata if approved\n check = input(\"Should this data be used as testdata [y/n]? \")\n if check == \"y\":\n newdirwf = \"__unittestfiles__/\" + file.split(\".\")[0] + \"_wf.dat\"\n newdirenergy = \"__unittestfiles__/\" + file.split(\".\")[0]\\\n + \"_energy.dat\"\n testdatawf = np.loadtxt(\"__output__/wavefuncs.dat\")\n np.savetxt(newdirwf, testdatawf)\n testdataenergy = np.loadtxt(\"__output__/energies.dat\")\n np.savetxt(newdirenergy, testdataenergy)\n plt.close('all')\n if check == \"n\":\n plt.close('all')",
"def test_erp_data():\n\n # Check that ERPData returns properly.\n assert ERPData('test', ['test'])",
"def test_ParticleDataset_from_path():\n pass",
"def test_data(self):\n test_data = self.datasets.get(\"test\", None)\n if test_data is None:\n raise KeyError(f\"dataset '{self.NAME}' does not contain test data\")\n return test_data",
"def test_ParticleDataset_from_array():\n pass",
"def fixture_vm_data():\n return VmData(\n project_id=\"project_id_mock\", virtual_machine_id=\"virtual_machine_id_mock\"\n )",
"def testSKPCA():\n pass",
"def test_transform_simple(self, dataset, preprocessor, bert):\n (actual_processed_dataset, actual_encoded_mentions, actual_encoded_mentions_split_sizes,\n actual_targets, actual_targets_split_sizes) = \\\n preprocessor.transform(dataset, bert)\n\n # TODO 1 Example should include corefs\n expected_processed_dataset = {\n 'train': {\n 'WH_train_0': {\n 'mentions': [[]],\n 'query': \"participant_of juan rossell\",\n 'candidate_indices': {\n '1996 summer olympics': [],\n 'olympic games': [],\n 'sport': [],\n }\n },\n 'WH_train_1': {\n 'mentions': [\n [\n {'text': 'english', 'corefs': []},\n {'text': 'spanish', 'corefs': []},\n ],\n [\n {'text': 'nahuatl', 'corefs': []},\n {'text': 'spanish', 'corefs': []},\n ]\n ],\n 'query': \"languages_spoken_or_written john osteen\",\n 'candidate_indices': {\n 'english': [0],\n 'greek': [],\n 'koine greek': [],\n 'nahuatl': [2],\n 'spanish': [1, 3],\n }\n }\n }\n }\n expected_encoded_mentions_split_sizes = {'train': [0, 4]}\n expected_targets = torch.tensor([1, 0, 0, 1, 0, 0, 0, 0])\n expected_targets_split_sizes = {'train': [3, 5]}\n\n assert expected_processed_dataset == actual_processed_dataset\n # 4 because there are four mentions and 768 b/c it is the size of BERT encodings\n assert actual_encoded_mentions['train'].shape == (4, 768)\n assert expected_encoded_mentions_split_sizes == actual_encoded_mentions_split_sizes\n assert torch.equal(expected_targets, actual_targets['train'])\n assert expected_targets_split_sizes, actual_targets_split_sizes['train']",
"def test_init_with_engine_and_data_attributes(self):\n # when\n source = EngineSource(engine=self.engine,\n point_scalars=\"TEMPERATURE\")\n\n # then\n self.assertEqual(source.engine, self.engine)\n self.assertIn(source.dataset, self.datasets)\n self.assertEqual(source.point_scalars_name, \"TEMPERATURE\")",
"def test_case_for_example(test_data):\n\n # This class definition placed inside method to prevent discovery by test loader\n class TestExampleDate(unittest.TestCase):\n def testFormat(self):\n # verify initial conditions\n self.assertTrue(hasattr(self, 'test_data'), 'testdata field not set on test object')\n\n expected = self.test_data['format']\n actual = infer.infer(self.test_data['examples'])\n\n self.assertEqual(expected,\n actual,\n '{0}: Inferred `{1}`!=`{2}`'.format(self.test_data['name'], actual, expected))\n\n test_case = TestExampleDate(methodName='testFormat')\n test_case.test_data = test_data\n return test_case",
"def testOsteosarcomaDataset(self):\n if \"keiser\" in hostname:\n DATA_DIR = \"/srv/nas/mk1/users/dwong/tifs/\" #where the raw images are located\n else:\n DATA_DIR = \"/data1/wongd/tifs/\"\n csvName = \"csvs/cyclin_dataset.csv\"\n dataset = OsteosarcomaDataset(csvName, DATA_DIR)\n generator = data.DataLoader(dataset, sampler = SubsetRandomSampler(list(range(0, len(dataset)))))\n i = 0\n ## iterate over a random subset of our data to test \n for names, local_batch, local_labels in generator:\n ## make sure data range is bounded correctly\n self.assertTrue(0 <= torch.max(local_batch) <= 255)\n ## make sure inputs and labels are correctly shaped\n self.assertEqual(tuple(local_batch.shape), (1, 1, 1104, 1104))\n self.assertEqual(tuple(local_labels.shape), (1, 1104, 1104))\n i += 1\n if i > sample_size:\n break",
"def test_projection_logic(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make new client instance. Client needs to specify endpoint for wmgr (host and port number) and optionally provide athentication parameters auth type and either secretFile or (user, passwd) pair. If auth is 'none' then authentication is not used and all other parameters are not used. If none of the secretFile or user/passwd is given it is equivalent to auth='none. For authentication to work auth must be set to one of the 'basic' or 'digest' and either secretFile or user/passwd must be given.
|
def __init__(self, host, port, secretFile=None, user=None, passwd=None, auth="digest"):
if secretFile and (user or passwd):
raise ValueError('WmgrClient: cannot specify secretFile and user or passwd')
if auth not in ('none', 'basic', 'digest'):
raise ValueError('WmgrClient: auth is not one of none, basic or digest')
# read secret file
if secretFile:
user, passwd = self.readSecret(secretFile)
self.host = host
if self.host == 'localhost':
self.host = '127.0.0.1'
self.port = port
self.auth = None
if user is not None or passwd is not None:
if auth == 'basic':
self.auth = requests.auth.HTTPBasicAuth(user, passwd)
elif auth == 'digest':
self.auth = requests.auth.HTTPDigestAuth(user, passwd)
|
[
"def create_auth_client(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.auth_token)\n return client",
"def init_client(self):\n self._transport = RequestsHTTPTransport(url=self._url,\n use_json=True,\n headers={\n \"Content-type\":\n \"application/json\",\n \"Authorization\":\n \"bearer \" +\n str(self._token).strip()\n },\n verify=False)\n self._client = Client(retries=3,\n transport=self._transport,\n fetch_schema_from_transport=False)",
"def _init_client():\n return _Client(_ARM_WS_URL)",
"def _get_client(self):\n options = {\n 'webdav_hostname': 'https://'+self.stg_auth.get_credential('hostname'),\n 'webdav_login': self.stg_auth.get_credential('login'),\n 'webdav_password': self.stg_auth.get_credential('password')\n }\n return Client(options=options)",
"def init_client(self, conf):\n if conf.password is not None:\n account = MyPlexAccount(conf.user, conf.password)\n return account.resource(conf.host).connect()\n else:\n return PlexServer(conf.host, conf.token)",
"def build_client(self):\n pass",
"def _setup_client(self, create=False, container=None):\n\n if container is None:\n container = self.args.container\n\n try:\n values = self.conf.get_container(container)\n except ValueError as ex:\n self.log.error(ex)\n return (None, None)\n\n auth = dict(authurl = self.args.authurl,\n user = values['username'],\n key = values['password'],\n )\n\n if self.args.keystone:\n try:\n from keystoneclient.v2_0 import client as _check_for_ksclient\n except ImportError:\n sys.exit(\"auth 2.0 (keystone) requires python-keystoneclient\")\n else:\n self.log.debug(\"using auth 2.0 (keystone)\")\n\n if self.args.keystone_separator not in values['username']:\n self.log.error(\"%s: separator not found in %r\" % (container, values['username']))\n return (None, None)\n\n keystone_auth = values['username'].split(self.args.keystone_separator, 1)\n auth['tenant_name'], auth['user'] = keystone_auth\n auth['auth_version'] = '2.0'\n auth['os_options'] = dict(service_type = self.args.keystone_service,\n endpoint_type = self.args.keystone_endpoint,\n region_name = self.args.keystone_region,\n )\n self.log.debug(\"os_options: %r\" % auth['os_options'])\n\n self.auth = auth\n cli = client.Connection(**auth)\n\n try:\n headers, _ = cli.get_container(container)\n except (socket.error, client.ClientException) as ex:\n if getattr(ex, 'http_status', None) == 404:\n if create:\n self.log.warning(\"%s doesn't exist, will be created\" % container)\n return (cli, dict())\n else:\n self.log.error(\"%s doesn't exist\" % container)\n else:\n self.log.error(ex)\n return (None, None)\n\n self.log.debug(headers)\n\n meta = getMeta(headers)\n self.log.debug(\"Meta: %s\" % meta)\n\n if not meta:\n self.log.error(\"%s hasn't been setup to be used with swiftnbd\" % container)\n return (None, None)\n\n return (cli, meta)",
"def create_client(self, username=None, service=None, host=None):\n return create_client(username, service, host)",
"def make_client(instance):\n ceilometer_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating ceilometer client: %s', ceilometer_client)\n\n # Remember interface only if interface is set\n kwargs = utils.build_kwargs_dict('endpoint_type', instance.interface)\n\n client = ceilometer_client(\n session=instance.session,\n region_name=instance.region_name,\n **kwargs\n )\n\n return client",
"def _makeClient(repos):\n cfg = conarycfg.ConaryConfiguration(False)\n cfg.name = 'rBuilder'\n cfg.contact = 'rbuilder'\n return conaryclient.ConaryClient(cfg=cfg, repos=repos)",
"def get_desktop_client():\n print(\"Emulating desktop app\")\n\n consumer_key = input('Please enter consumer key: > ')\n consumer_secret = input('Please enter key secret: > ')\n config = upwork.Config({'consumer_key': consumer_key, 'consumer_secret': consumer_secret})\n \"\"\"Assign access_token and access_token_secret if they are known\n config = upwork.Config({\\\n 'consumer_key': 'xxxxxxxxxxx',\\\n 'consumer_secret': 'xxxxxxxxxxx',\\\n 'access_token': 'xxxxxxxxxxx',\\\n 'access_token_secret': 'xxxxxxxxxxx'})\n \"\"\"\n\n client = upwork.Client(config)\n\n try:\n config.access_token\n config.access_token_secret\n except AttributeError:\n verifier = input(\n 'Please enter the verification code you get '\n 'following this link:\\n{0}\\n\\n> '.format(\n client.get_authorization_url()))\n\n print('Retrieving keys.... ')\n access_token, access_token_secret = client.get_access_token(verifier)\n print('OK')\n\n # For further use you can store ``access_toket`` and\n # ``access_token_secret`` somewhere\n\n return client",
"def _create_client(self):\r\n self.association_refresh_time = {}\r\n auth_plugin = k_loading.load_auth_from_conf_options(\r\n cfg.CONF, 'placement')\r\n client = k_loading.load_session_from_conf_options(\r\n cfg.CONF, 'placement', auth=auth_plugin)\r\n client.additional_headers = {'accept': 'application/json'}\r\n return client",
"def make_client(instance):\n application_catalog_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug(\"Instantiating application-catalog client: {0}\".format(\n application_catalog_client))\n\n kwargs = {\n 'session': instance.session,\n 'service_type': 'application-catalog',\n 'region_name': instance._region_name\n }\n\n murano_packages_service = \\\n instance.get_configuration().get('murano_packages_service')\n\n if murano_packages_service == 'glare':\n glare_endpoint = instance.get_configuration().get('glare_url')\n if not glare_endpoint:\n try:\n # no glare_endpoint and we requested to store packages in glare\n # check keystone catalog\n glare_endpoint = \\\n instance.get_endpoint_for_service_type('artifact')\n except Exception:\n raise exc.CommandError(\n \"You set murano-packages-service to {}\"\n \" but there is not 'artifact' endpoint in keystone\"\n \" Either register one or specify endpoint \"\n \" via either --glare-url or env[GLARE_API]\".format(\n murano_packages_service))\n\n artifacts_client = art_client.Client(\n endpoint=glare_endpoint,\n type_name='murano',\n type_version=1,\n token=instance.auth_ref['token']['id'])\n kwargs['artifacts_client'] = artifacts_client\n\n client = application_catalog_client(\n instance.get_configuration().get('murano_url'), **kwargs)\n return client",
"def createClient(self, secure):\n props = self.getPropertyMap()\n if not secure:\n insecure = self.getSession().getConfigService().getConfigValue(\"omero.router.insecure\")\n if insecure is not None and insecure != \"\":\n props[\"Ice.Default.Router\"] = insecure\n else:\n self.__logger.warn(\"Could not retrieve \\\"omero.router.insecure\\\"\")\n\n nClient = omero.client(props)\n nClient.__insecure = not secure\n nClient.setAgent(\"%s;secure=%s\" % (self.__agent, secure))\n nClient.joinSession(self.getSessionId())\n return nClient",
"def _client(self) -> hvac.Client:\n if \"session\" not in self.kwargs:\n # If no session object provide one with retry as per hvac documentation:\n # https://hvac.readthedocs.io/en/stable/advanced_usage.html#retrying-failed-requests\n adapter = HTTPAdapter(\n max_retries=Retry(\n total=3,\n backoff_factor=0.1,\n status_forcelist=[412, 500, 502, 503],\n raise_on_status=False,\n )\n )\n session = Session()\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n self.kwargs[\"session\"] = session\n\n _client = hvac.Client(url=self.url, **self.kwargs)\n if self.auth_type == \"approle\":\n self._auth_approle(_client)\n elif self.auth_type == \"aws_iam\":\n self._auth_aws_iam(_client)\n elif self.auth_type == \"azure\":\n self._auth_azure(_client)\n elif self.auth_type == \"gcp\":\n self._auth_gcp(_client)\n elif self.auth_type == \"github\":\n self._auth_github(_client)\n elif self.auth_type == \"kubernetes\":\n self._auth_kubernetes(_client)\n elif self.auth_type == \"ldap\":\n self._auth_ldap(_client)\n elif self.auth_type == \"radius\":\n self._auth_radius(_client)\n elif self.auth_type == \"token\":\n self._set_token(_client)\n elif self.auth_type == \"userpass\":\n self._auth_userpass(_client)\n else:\n raise VaultError(f\"Authentication type '{self.auth_type}' not supported\")\n\n if _client.is_authenticated():\n return _client\n else:\n raise VaultError(\"Vault Authentication Error!\")",
"def _create_suds_client(self):\n\n self.client = Client(const.WSDLLOCAL)\n self.client.set_options(service = ApiClient._sdict[self.service][0],\n headers = {'user-agent': const.USERAGENT})\n\n # put username (and password if necessary) into the headers.\n # note that another way to do this is to call betdaq.set_user,\n # so the username and password in const.py do not need to be\n # specified.\n self.set_headers(const.BDAQUSER, const.BDAQPASS)",
"def _connect(self):\n\n wrapper_headers, wrapper_body = self._create_wrapper_request()\n\n self.wrapper_user = self._get_wrapper_user(wrapper_headers)\n self.wrapper_key = self._get_wrapper_key(wrapper_body)\n\n self.websocket = self._get_websocket()\n\n return self.init()",
"def make_client(instance):\n\n # Defer client import until we actually need them\n from .monitoringclient import client as monitoring_client\n\n if _monitoring_api_version is not None:\n version = _monitoring_api_version\n else:\n version = instance._api_version[API_NAME]\n LOG.debug('Instantiating monitoring client for V%s', version)\n\n # Remember interface only if it is set\n kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface)\n\n client = monitoring_client.Client(\n version,\n session=instance.session,\n timings=instance.timing,\n region_name=instance._region_name,\n **kwargs\n )\n return client",
"def registry_client(e2e_ws_scope: OperationScope, auth: ClientSecretCredential) -> MLClient:\n return MLClient(\n credential=auth,\n subscription_id=e2e_ws_scope.subscription_id,\n resource_group_name=e2e_ws_scope.resource_group_name,\n logging_enable=getenv(E2E_TEST_LOGGING_ENABLED),\n registry_name=\"testFeed\",\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the list of database names.
|
def databases(self):
_log.debug('get database list')
result = self._requestJSON('dbs', '')
return self._getKey(result, 'name')
|
[
"def list_database(db=None):\n if db is None:\n return CONNECTION.get_connection().database_names()\n return CONNECTION.get_connection()[db].collection_names()",
"def get_glue_database_names(self):\n try:\n self.response = self.glue_client.get_databases()\n database_names = []\n for idx, i in enumerate(self.response['DatabaseList']):\n database_names.append(self.response['DatabaseList'][idx]['Name'])\n return database_names\n except Exception as e:\n print(e)",
"def listDB(self):\n # Responses: list of db names\n return self.get(\"/_all_dbs\", descr='listDB').addCallback(\n self.parseResult)",
"def list_databases(self):\n\n _conn = self.get_mongo_client()\n return [i for i in _conn.list_databases()]",
"def print_database_names(self) -> None:\n n = self._database_connection.database_names()\n print(n)",
"def _get_db_names(self, dbs, strict=True):\r\n dbs = utils.coerce_string_to_list(dbs)\r\n db_names = [utils.get_name(db) for db in dbs]\r\n if strict:\r\n good_dbs = self.instance.list_databases()\r\n good_names = [utils.get_name(good_db) for good_db in good_dbs]\r\n bad_names = [db_name for db_name in db_names\r\n if db_name not in good_names]\r\n if bad_names:\r\n bad = \", \".join(bad_names)\r\n raise exc.NoSuchDatabase(\"The following database(s) were not \"\r\n \"found: %s\" % bad)\r\n return db_names",
"def databases():\n\tg.db = mysqladm.core.db_connect()\n\n\t## Load servers\n\trows = mysqladm.databases.get_all_databases(cmd_line=True)\n\t\n\tfor row in rows:\n\t\tprint(row['shortserver'] + '/' + row['name'])",
"def cli_cosmosdb_database_list(client):\n return list(client.ReadDatabases())",
"def get_databases_list(self):\n databases_lst = {} # {instance: list of databases}\n for instance in self:\n db_names = [db_info.name\n for db_info in instance.instance_database_ids]\n databases_lst.update({\n instance.name: db_names\n })\n return databases_lst",
"def all_databases(self):\n return \"\"\"--all-databases\"\"\"",
"def list_databases(middcourses_only='true'):\n\n if middcourses_only == 'true':\n query = 'psql -t -c \"select datname from pg_database where datname like \\'middcourses%\\'\"'\n else:\n query = 'psql -t -c \"select datname from pg_database\"'\n\n databases = local(query, capture=True)\n databases = [db.strip(' ') for db in databases.split('\\n')]\n\n puts(blue('Databases:'))\n for db in databases:\n puts(indent(db, spaces=2))",
"def get_databases(verbose=True):\n from balsam import django_config\n from balsam.django_config.db_index import refresh_db_index\n from ipywidgets import interact\n import os\n databasepaths = []\n try:\n databasepaths.extend(refresh_db_index())\n if verbose:\n print(f'There are {len(databasepaths)} Balsam databases available:')\n for i,db in enumerate(databasepaths):\n print(f'{i}: {db}')\n except Excpetion as e:\n print('🛑 Exception caught during balsam.django_config.db_index.refresh_db_index:')\n print(e, '\\n')\n return databasepaths",
"def showDatabases(self):\n self.databasesList.addItems(self.dbMan.getListNamesDatabases())",
"def get_databases():\n try:\n database_probe = DatabaseProbe()\n result_set = database_probe.execute_query(\"SELECT * FROM SYS.DATABASES;\")\n click.echo(result_set)\n return result_set\n except Exception as e:\n click.secho(e, bold=True, fg=\"red\")\n finally:\n database_probe.dispose()",
"def ls_dbs(dbUsername=config[\"db_server_root_username\"], dbPassword=config[\"db_server_root_password\"], dbHost=config[\"db_server\"]):\n mysqlDbs = get_dbs(dbUsername, dbPassword, dbHost)\n print(\"\\n\".join(mysqlDbs))\n print(\"DONE\")",
"def showDatabases(self):\n self.databasesList.clear()\n self.databasesList.addItems(self.dbMan.getListNamesDatabases())",
"def GetDatabases(self, databases=[\"\"]):\n\n\t\t# Check\n\t\tif databases and not isinstance(databases, list): raise TypeError\n\n\t\t# Create return array\n\t\trtn=[]\n\n\t\t# Create cursor\n\t\tcursor=self.__dbConnection.cursor()\n\n\t\t# Retrieve all databases\n\t\tfor db in databases:\n\t\t\tquery=\"SHOW DATABASES%s\" % (\" LIKE '%s'\" % db)\n\t\t\tcursor.execute(query)\n\n\t\t\t# Gather rows of databases\n\t\t\tif cursor.rowcount==0:\n\t\t\t\tif len(databases) and databases[0]!=\"\":\n\t\t\t\t\traise NoDatabaseFound(db)\n\t\t\t\telse:\n\t\t\t\t\traise NoDatabasesFound\n\t\t\t\t# End if\n\t\t\telse:\n\t\t\t\t[ rtn.append(row[0]) for row in cursor.fetchall() ]\n\t\t\t# End if\n\t\t# End for\n\n\t\t# Close cursor\n\t\tcursor.close()\n\n\t\t# Return databases-array\n\t\treturn rtn",
"def get_all_databases(self):\n\n self.cmd = [self.pg_psql]\n self.cmd.append(\"-A\") # No align for output without separators\n self.cmd.append(\"-q\") # No welcome messages, row counters\n self.cmd.append(\"-t\") # No column names\n self.cmd.extend([\"-F\", \" \"]) # Field separator\n\n pg_query = \"\"\"\n SELECT datname FROM pg_database;\n \"\"\"\n\n self.cmd.extend([\"-h\", self.pg_host,\n \"-p\", self.pg_port,\n \"-U\", self.pg_user,\n \"-d\", self.pg_db,\n \"-c\", pg_query])\n\n proc = subprocess.Popen(self.cmd, env={\"PGPASSWORD\":self.postgres_password},\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n\n out, err = proc.communicate()\n rc = proc.returncode\n\n if rc == 0:\n logging.info(\"Receive all databases from host '{0}:{1}'.\".format(self.pg_host, self.pg_port))\n return out\n else:\n raise Exception(err)",
"def databases(self) -> dict:\n return self.config[\"databases\"]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create new table. Table schema ("CREATE TABLE ...") may be specified in schema argument, if schema is None then table schema will be loaded from CSS. If chunkColumns is True then delete colums "_chunkId", "_subChunkId" from table (if they exist) and add columns "chunkId", "subChunkId" (if they don't exist).
|
def createTable(self, dbName, tableName, schema=None, chunkColumns=False):
_log.debug('create table: %s.%s', dbName, tableName)
data = dict(table=tableName, chunkColumns=str(int(chunkColumns)))
if schema:
data['schema'] = schema
else:
data['schemaSource'] = 'CSS'
self._requestJSON('dbs', dbName + '/tables', method='POST', data=data)
|
[
"def createTable(name, directory, schema, rz_id=None, column_wise=True):\n\n # Make sure the table is being put in an HBOOK file.\n if not isinstance(directory, Directory):\n raise TypeError, \"directory is not in an HBOOK file\"\n # Make sure the file is writable.\n if not directory.writable:\n raise hep.fs.AccessError, \"%s is not writable\" % directory\n\n if rz_id is None:\n rz_id = directory._make_rz_id()\n\n # Check that the names and types of columns are consistent with\n # the ntuple type.\n for column in schema.columns:\n if column_wise:\n if len(column.name) > 32:\n raise ValueError, \"column names in a column-wise \" \\\n \"ntuple may not be longer than 32 characters\"\n if column.type \\\n not in (\"int32\", \"int64\", \"float32\", \"float64\"):\n raise ValueError, \"unsupported type '%s' \" \\\n \"for column-wise ntuple\" % column.type\n else:\n if len(column.name) > 8:\n raise ValueError, \"column names in a row-wise \" \\\n \"ntuple may not be longer than 8 characters\"\n if column.type != \"float32\":\n raise ValueError, \"columns in a row-wise ntuple \" \\\n \"must be \\\"float32\\\" type\"\n\n rz_path = directory.rz_path\n if column_wise:\n table = ext.createColumnWiseNtuple(rz_id, name, rz_path, schema)\n if len(schema) != len(table.schema):\n # A problem occured with one or more columns.\n raise RuntimeError, \"error creating columns of %s\" % path\n else:\n table = ext.createRowWiseNtuple(rz_id, name, rz_path, schema)\n table.file = directory.file\n return table",
"def init_chunks(metadata):\n return Table(\"chunks\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(cn.chunk_bx, Integer),\n Column(cn.chunk_by, Integer),\n Column(cn.chunk_bz, Integer),\n Column(cn.chunk_ex, Integer),\n Column(cn.chunk_ey, Integer),\n Column(cn.chunk_ez, Integer),\n Column(cn.chunk_tag, Text))",
"def createTable(conn, table, num_cols=10, engine=\"INNODB\"):\n cursor = conn.cursor()\n subquery = \",\".join([\"col\" + str(i) + \" INT\" for i in range(1,num_cols+1)])\n query = \"create table if not exists {} ({}) ENGINE={};\".format(table, subquery, engine)\n cursor.execute(query)\n conn.commit()",
"def createTable(self, tableName, columnFamilies):\n pass",
"def create_table(self):\n table_path = os.path.join(self.opts[\"data_dir\"], self.table_name())\n self.output_file = open_fw(table_path, encoding=self.encoding)\n self.output_file.write(u'<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n self.output_file.write(u'\\n<root>')\n self.table_names.append((self.output_file, table_path))\n self.auto_column_number = 1\n\n # Register all tables created to enable\n # testing python files having custom download function\n if self.script.name not in self.script_table_registry:\n self.script_table_registry[self.script.name] = []\n self.script_table_registry[self.script.name].append(\n (self.table_name(), self.table))",
"def createChunk(self, dbName, tableName, chunkId, overlap):\n\n _log.debug('create table: %s.%s', dbName, tableName)\n overlapFlag = 'yes' if overlap else 'no'\n data = dict(chunkId=chunkId, overlapFlag=overlapFlag)\n resource = dbName + '/tables/' + tableName + '/chunks'\n self._requestJSON('dbs', resource, method='POST', data=data)",
"def init_seg_table(metadata, tablename, segid_colname=cn.seg_id, chunked=True):\n columns = [Column(\"id\", BigInteger, primary_key=True),\n Column(cn.seg_id, Integer, index=True),\n Column(cn.size, Integer),\n # Centroid coordinates\n Column(cn.centroid_x, Float),\n Column(cn.centroid_y, Float),\n Column(cn.centroid_z, Float),\n # Bounding box\n Column(cn.bbox_bx, Integer),\n Column(cn.bbox_by, Integer),\n Column(cn.bbox_bz, Integer),\n Column(cn.bbox_ex, Integer),\n Column(cn.bbox_ey, Integer),\n Column(cn.bbox_ez, Integer)]\n\n if chunked:\n # Chunk id - None if merged across chunks\n columns.append(Column(cn.chunk_tag, Text, index=True))\n\n return Table(tablename, metadata, *columns)",
"def Create_table(self, tableName):\n \n return \"CREATE TABLE {} AS \\n\".format(tableName)",
"def create_table(self):\n file_df = pd.read_csv(self.file_path)\n columns = file_df.columns\n data_type = []\n for col in columns:\n if file_df[col].dtype == int:\n data_type.append((col, 'int(10)'))\n elif file_df[col].dtype == object:\n data_type.append((col, 'varchar(20)'))\n \n variables = ''.join(['{} {}, '.format(col, dtype) for col, dtype in data_type])[:-2]\n\n try:\n drop_query = \"\"\"DROP TABLE {table_name}\"\"\".format(table_name=self.file_name)\n result = self.db.exec_query(drop_query)\n except:\n pass\n\n query = \"\"\"CREATE TABLE {file_name} ({variables});\n \"\"\".format(file_name=self.file_name, variables=variables)\n print (query)\n\n # query to create file\n result = self.db.exec_query(query)",
"def create_tables(cur, conn):\n sql = sql_queries.Queries()\n for query in sql.create_table_queries:\n cur.execute(query)\n conn.commit()",
"def create_sql_table_from_create_table_stmt(self, parsed_stmt):\n if not self.is_create_table_sql(parsed_stmt):\n raise ValueError(\"parsed_stmt should be a create-table statement. \"\n \"Value: {0}\".format(parsed_stmt))\n table = sql_entities.SQLTable(self._get_table_name(parsed_stmt))\n table.columns.extend(self._get_columns(parsed_stmt))\n return table",
"def compile_create(self, blueprint, command, _):\n columns = ', '.join(self._get_columns(blueprint))\n\n sql = 'CREATE TABLE %s (%s' % (self.wrap_table(blueprint), columns)\n\n sql += self._add_foreign_keys(blueprint)\n\n sql += self._add_primary_keys(blueprint)\n\n return sql + ')'",
"def create_table_3(new_table_name):\n\n create_table = f\"CREATE TABLE IF NOT EXISTS {new_table_name} (\\\n ID BIGINT PRIMARY KEY,\\\n region_subregion_country_area TEXT,\\\n country_code BIGINT,\\\n '1950' FLOAT, '1955' FLOAT,\\\n '1960' FLOAT, '1965' FLOAT,\\\n '1970' FLOAT, '1975' FLOAT,\\\n '1980' FLOAT, '1985' FLOAT,\\\n '1990' FLOAT, '1995' FLOAT,\\\n '2000' FLOAT, '2005' FLOAT,\\\n '2010' FLOAT, '2015' FLOAT);\"\n return create_table",
"def create_table (self, tablename = 'motif'):\n\n c = self.connection.cursor()\n\n # create\n\n c.execute('''create table ? (matrix text, source text, factorName text, species\n text, pmid integer, domain text, structureCategory text )''', (tablename))\n\n self.connection.commit()\n\n c.close()",
"def _create_table(self, table_name):\n raise NotImplementedError()",
"def create_stage_table(db_table, columns):\n conn, cursor = common.database_connection()\n # drop table if it exists\n try:\n drop_table = 'DROP TABLE IF EXISTS ' + db_table\n cursor.execute(drop_table)\n logging.info(\"If exists, table \" + db_table + \" was deleted\")\n except Exception as e:\n logging.exception(\"Failed to delete staging table:: %s\", e)\n raise\n # creates the table using the columns provided in url_mapping.py file\n try:\n create_table = 'CREATE TABLE ' + db_table + ' ( ' + ' VARCHAR, '.join(columns) + ' VARCHAR)'\n cursor.execute(create_table)\n conn.commit()\n conn.close()\n logging.info(\"Table \" + db_table + \" was created\")\n except Exception as e:\n logging.exception(\"Failed to create staging table:: %s\", e)\n raise",
"def create_table(self, table_name, column_names, column_types):\n log.info(\"Creating table %s\",table_name)\n sql_command = \"CREATE TABLE %s (\" % (table_name)\n for name, data_type in zip(column_names, column_types):\n sql_command += \"{0} {1},\".format(name, self.type_to_name[data_type])\n # replace last comma for a parenthesis\n self.execute(sql_command[0:-1] + \")\")\n self.commit()",
"def create_table(table_name, column_names):\n meta_data = xml_metadata(table_name, column_names)\n file_name = str(table_name).strip() + \".csv\"\n with open(\"metadata.txt\", \"a\") as f:\n f.write(meta_data)\n os.system(\"touch \" + file_name)",
"def setup_table(cursor, table_name, data, **options):\n cursor.execute(\"DROP TABLE IF EXISTS \" + table_name)\n options = options.items()\n sql_statement = \"CREATE TABLE \" + table_name + \"(\"\n for index, columns in enumerate(options):\n if columns == options[-1]:\n sql_statement += columns[0] + \" \" + columns[1].upper()\n else:\n sql_statement += columns[0] + \" \" + columns[1] + \", \"\n sql_statement += \")\"\n print sql_statement\n cursor.execute(sql_statement)\n cursor.executemany(\n \"INSERT INTO \" + table_name + \" VALUES(?, ?, ?)\", data)\n import ipdb\n ipdb.set_trace()\n return cursor.lastrowid"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete existing table. If dropChunks is True then delete all chunks tables as well. If mustExist is True and table does not exist then exception is raised.
|
def dropTable(self, dbName, tableName, dropChunks=True, mustExist=True):
_log.debug('drop table: %s.%s', dbName, tableName)
params = dict(dropChunks=str(int(dropChunks)))
try:
self._requestJSON('dbs', dbName + '/tables/' + tableName, method='DELETE', params=params)
except ServerError as exc:
# if db does not exist then it's OK
if exc.code != 404 or mustExist:
raise
|
[
"def delete_if_exists(self) -> None:\n\t\tif self.exists:\n\t\t\tself._database_api.execute_query('DROP TABLE ' + self.table_name + ';', True)\n\t\t\tself._exists = False",
"def _delete(self):\n self.db_engine.execute(\"drop table if exists {}\".format(self.distance_table))",
"def delete(self):\n self.connection.delete_table(self.table_name)\n return True",
"def delete_table(self, table):\n exp = \"\"\"\n DROP TABLE %s\n \"\"\" % (table,)\n\n try:\n curs = self.conn.cursor()\n curs.execute(exp)\n return True\n except Exception:\n return False",
"def delete_table(self) -> Callable[[metastore.DeleteTableRequest], metastore.Table]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_table\" not in self._stubs:\n self._stubs[\"delete_table\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.bigquery.biglake.v1.MetastoreService/DeleteTable\",\n request_serializer=metastore.DeleteTableRequest.serialize,\n response_deserializer=metastore.Table.deserialize,\n )\n return self._stubs[\"delete_table\"]",
"def DeleteTable(self, table_name):\n pass",
"async def _drop_table(self, message):\n import_pkt = message.get_packet(packets.ImportTablePacket)\n dest_pkt = message.get_packet(packets.DestinationPacket)\n\n table = import_pkt.table\n destination = dest_pkt.destination\n identifier = destination.get_identifier(table)\n\n self.logger.info(\"Dropping table %s\", table)\n\n try:\n await destination.execute(\"DROP TABLE {0}\".format(identifier))\n except exceptions.QueryException as ex:\n if not self.quiet:\n raise\n\n self.logger.debug(\" \".join([\n \"QueryException caught when dropping table %s,\",\n \"ignoring as it most likely means the table doesn't exist\"\n ]), table)\n\n self.logger.debug(ex.response)",
"def PBH_TABLE_delete(db, table_name):\n\n ctx = click.get_current_context()\n\n table_name_validator(ctx, db.cfgdb_pipe, table_name)\n\n table = str(PBH_TABLE_CDB)\n key = str(table_name)\n\n try:\n del_entry(db.cfgdb_pipe, table, key)\n except Exception as err:\n exit_with_error(\"Error: {}\".format(err), fg=\"red\")",
"def delete_data_table():",
"def genericDelete(table_name):\n\n db = connect()\n\n query = \"DELETE FROM %s\" % table_name\n\n c = db.cursor()\n \n c.execute(query)\n\n db.commit()\n db.close()",
"def test_drop_table_if_exists():\n RANDOM_TABLE_NAME = 'cids_{}'.format(str(uuid.uuid4()).replace('-', '_')[:30-len('cids_')]).upper()\n expected_result = 'Table {} does not exist, ignoring DROP'.format(RANDOM_TABLE_NAME)\n result = drop_table_if_exists(RANDOM_TABLE_NAME, fetchDBConnection())\n assert_equals(\n result,\n expected_result,\n 'Could not drop non-existing table'\n )",
"def drop_table(self):\n self.connect()\n try:\n sql = \"drop table if exists {0}\".format(self.tablename)\n self.cursor.execute(sql)\n except Exception as err:\n print(err)\n finally:\n self.disconnect()",
"def drop(self):\n\n if not self.enable_delete:\n raise Exception(\"Deleting not enabled\")\n\n # sorted by foreign key dependency\n for table in reversed(self.metadata.sorted_tables):\n\n # Leave spatial tables alone.\n if table.name not in ['spatial_ref_sys']:\n sql = 'DROP TABLE IF EXISTS \"{}\" CASCADE'.format(table.name)\n\n self.connection.execute(sql)",
"def delete(self, table, where, using=None, vars=None, _test=False):\n if vars is None: vars = {}\n where = self._where(where, vars)\n\n q = 'DELETE FROM ' + table\n if using: q += ' USING ' + sqllist(using)\n if where: q += ' WHERE ' + where\n\n if _test: return q\n\n db_cursor = self._db_cursor()\n self._db_execute(db_cursor, q)\n if not self.ctx.transactions:\n self.ctx.commit()\n return db_cursor.rowcount",
"def test_drop_table_10():\n print_test_separator(\"Starting test_drop_table_10\")\n cleanup()\n cat = CSVCatalog.CSVCatalog()\n\n cds = []\n cds.append(CSVCatalog.ColumnDefinition(\"playerID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"nameLast\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"nameFirst\", column_type=\"text\"))\n\n t = cat.create_table(\"people\",\"../data/People.csv\", cds)\n\n t.define_index(\"full_name\", ['nameFirst', 'nameLast'])\n print(json.dumps(t.describe_table(),indent=2))\n \n print_test_separator(\"Drop the column:nameLast, the index should also be dropped\")\n t.drop_column_definition('nameLast')\n print(json.dumps(t.describe_table(),indent=2))\n\n print_test_separator(\"Completed test_drop_table_10\")",
"def drop_tables():\n commands = (\n \"\"\"\n DROP TABLE album CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE compositor CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE autor CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE concerto CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE criticamusica CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE utilizador CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE playlist CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE grupomusical CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE artista CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE editora CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE periodoeditora CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE criticaalbum CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_concerto CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE grupomusical_artista CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_utilizador CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_playlist CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_genero CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_album CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE genero CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE utilizador_musica_ficheiro CASCADE\n \"\"\")\n\n try:\n\n conn = psycopg2.connect(host=\"localhost\",database=\"dropmusic\", user=\"postgres\", password=\"postgres\")\n cur = conn.cursor()\n # DROP table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()",
"def delete(self,table,where=None,delete_all=False,status_check=True,constraint_check=True):\n\n if not where and not delete_all:\n raise ValueError(\"To delete all rows, you must specify delete_all=True\")\n \n if status_check:\n table.check_on_db(self)\n where_s = ' WHERE %s' % (where) if where is not None else ''\n query_template = 'DELETE FROM `%s`%s' % (table.name,where_s)\n res = self.query(query_template)\n if constraint_check:\n table.check_constraints_on_db(self)\n return res",
"def cleanup_temp_tables(self):\n\n def cleanup(table_name):\n logger.info(\n f\"removing table {table_name} in workgroup {self.workgroup} if it exists\"\n )\n _, future = self.cursor.execute(f\"DROP TABLE IF EXISTS {table_name};\")\n return future\n\n fs = [cleanup(table_name) for table_name in self.input_table_names()]\n fs.append(cleanup(self.export_table_name()))\n return self._validate_futures(fs)",
"def _do_final_cleanup(conn, logger, is_locked, tables_to_delete):\n if is_locked:\n with conn.cursor() as cursor:\n cursor.execute('SELECT pg_advisory_unlock(%s::BIGINT)', [hash_string_64bit('dirbs-classify')])\n\n with conn.cursor() as cursor:\n remaining_tables_to_delete = copy.copy(tables_to_delete)\n for t in tables_to_delete:\n try:\n cursor.execute(sql.SQL('DROP TABLE IF EXISTS {0} CASCADE').format(sql.Identifier(t)))\n conn.commit()\n remaining_tables_to_delete.remove(t)\n except: # noqa: E722\n for t_not_deleted in remaining_tables_to_delete:\n logger.warn('Failed to drop table {0} due to exception. Please issue '\n '\\'DROP TABLE IF EXISTS {0}\\' manually!'.format(t_not_deleted))\n raise"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the list of chunks in given table.
|
def chunks(self, dbName, tableName):
_log.debug('get chunks, table: %s.%s', dbName, tableName)
resource = dbName + '/tables/' + tableName + '/chunks'
result = self._requestJSON('dbs', resource)
return self._getKey(result, 'chunkId')
|
[
"def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:",
"def get_chunks_list(self):\n return self._chunkFiles",
"def get_partitions(self, table, db=\"default\"):\n partitions = []\n try:\n return self.get(\"ddl/database/%s/table/%s/partition\" % (db, table))['partitions']\n except Exception, ex:\n raise Exception(\"\"\"Templeton: error on getting partitions: %s\"\"\" % str(ex))\n return partitions",
"def _chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]",
"def chunk(self, chunks, dim=0): # real signature unknown; restored from __doc__\n return []",
"def table_to_group_list(table):\n return [[x for x in table[i]] for i in range(len(table))]",
"def createListFromTable(self, table):\n\t\tlistOfData = []\n\n\t\tfor data in table:\n\t\t\tlistOfData.append(data)\n\n\t\treturn listOfData",
"def chunks(l, n):\n o = int(np.round(len(l)/n))\n out = []\n # For item i in a range that is a length of l,\n for i in range(0, n):\n # Create an index range for l of n items:\n if i == n-1:\n sub = l[i*o:]\n else:\n sub = l[i*o:i*o+o]\n \n if len(sub):\n out.append(sub)\n return out",
"def make_split(data: pd.DataFrame, size: int = 500) -> list:\n \n # determine number of batches by dividing rows of dataframe by size\n n = np.ceil(data.shape[0] / size) \n \n # split data into n sections\n splitted_data = np.array_split(data, n)\n \n return splitted_data",
"def chunks(l, n):\n \n if n<1:\n n=1\n return [l[i:i+n] for i in range(0, len(l), n)]",
"def query_for_lines(self, table_name):\n return []",
"def xrootdChunks(self, dbName):\n _log.debug('list chunks in xrootd: %s', dbName)\n result = self._requestJSON('xrootd', 'dbs/' + dbName, method='GET')\n return self._getKey(result, 'chunkId')",
"def chunks(lst, amount):\n return [lst[i:i + amount] for i in range(0, len(lst), amount)]",
"def partition_specs(self, table):\n self._tables_init()\n\n # Cache results for later.\n # If we don't know the partitions yet, get them now.\n if 'partitions' not in self.tables[table].keys():\n partition_descs = self.query('SET hive.cli.print.header=false; SHOW PARTITIONS {0};'.format(table)).splitlines()\n # Convert the desc format to spec format and return that\n self.tables[table]['partitions'] = [\n self.partition_spec_from_partition_desc(p)\n for p in partition_descs\n ]\n\n return self.tables[table]['partitions']",
"def init_chunks(metadata):\n return Table(\"chunks\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(cn.chunk_bx, Integer),\n Column(cn.chunk_by, Integer),\n Column(cn.chunk_bz, Integer),\n Column(cn.chunk_ex, Integer),\n Column(cn.chunk_ey, Integer),\n Column(cn.chunk_ez, Integer),\n Column(cn.chunk_tag, Text))",
"def iter_chunks(self) -> Iterable[Hashable]:\n pass",
"def chunks(collection, chunkSize):\n \n for i in range(0, len(collection), chunkSize):\n yield collection[i:i + chunkSize]",
"def chunks(data_list, chunk_size):\n data_info, frequency, bits = data_list\n\n some_data_list = []\n for i in range(0, len(data_info), chunk_size):\n some_data_list.append(data_info[i:i+chunk_size])\n return some_data_list",
"def getTableRegions(self, tableName):\n self.send_getTableRegions(tableName)\n return self.recv_getTableRegions()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create new chunk, this should work with both tables and view. If overlap is True then create overlap table in addition to chunk table.
|
def createChunk(self, dbName, tableName, chunkId, overlap):
_log.debug('create table: %s.%s', dbName, tableName)
overlapFlag = 'yes' if overlap else 'no'
data = dict(chunkId=chunkId, overlapFlag=overlapFlag)
resource = dbName + '/tables/' + tableName + '/chunks'
self._requestJSON('dbs', resource, method='POST', data=data)
|
[
"def init_overlap_tables(metadata):\n init_overlap_table(metadata, \"chunk_overlaps\", chunked=True)\n init_overlap_table(metadata, \"max_overlaps\", chunked=False)",
"def init_chunks(metadata):\n return Table(\"chunks\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(cn.chunk_bx, Integer),\n Column(cn.chunk_by, Integer),\n Column(cn.chunk_bz, Integer),\n Column(cn.chunk_ex, Integer),\n Column(cn.chunk_ey, Integer),\n Column(cn.chunk_ez, Integer),\n Column(cn.chunk_tag, Text))",
"def init_seg_table(metadata, tablename, segid_colname=cn.seg_id, chunked=True):\n columns = [Column(\"id\", BigInteger, primary_key=True),\n Column(cn.seg_id, Integer, index=True),\n Column(cn.size, Integer),\n # Centroid coordinates\n Column(cn.centroid_x, Float),\n Column(cn.centroid_y, Float),\n Column(cn.centroid_z, Float),\n # Bounding box\n Column(cn.bbox_bx, Integer),\n Column(cn.bbox_by, Integer),\n Column(cn.bbox_bz, Integer),\n Column(cn.bbox_ex, Integer),\n Column(cn.bbox_ey, Integer),\n Column(cn.bbox_ez, Integer)]\n\n if chunked:\n # Chunk id - None if merged across chunks\n columns.append(Column(cn.chunk_tag, Text, index=True))\n\n return Table(tablename, metadata, *columns)",
"def _create_tablet(self, fn, cgroup):\n\t\tself._check_transaction()\n\n\t\t# Create a tablet at a given path, for cgroup 'cgroup'\n\t\tassert os.access(fn, os.R_OK) == False\n\n\t\t# Create the cell directory if it doesn't exist\n\t\tpath = fn[:fn.rfind('/')];\n\t\tif not os.path.exists(path):\n\t\t\tutils.mkdir_p(path)\n\n\t\t# Create the tablet\n\t\tlogger.debug(\"Creating tablet %s\" % (fn))\n\t\tfp = tables.open_file(fn, mode='w')\n\n\t\t# Force creation of the main subgroup\n\t\tself._get_row_group(fp, 'main', cgroup)\n\n\t\treturn fp",
"def create_as_merge(self, node=None):\n user_name = f\"user_{getuid()}\"\n table_name = f\"table_{getuid()}\"\n source_table_name = f\"source_table_{getuid()}\"\n exitcode, message = errors.not_enough_privileges(name=f\"{user_name}\")\n\n if node is None:\n node = self.context.node\n\n with table(node, f\"{source_table_name}\"):\n with user(node, f\"{user_name}\"):\n\n try:\n with When(\"I grant CREATE TABLE privilege to a user\"):\n node.query(f\"GRANT CREATE TABLE ON {table_name} TO {user_name}\")\n\n with And(\"I grant SELECT privilege on the source table\"):\n node.query(f\"GRANT SELECT ON {source_table_name} TO {user_name}\")\n\n with Then(\"I try to create a table as another table\"):\n node.query(f\"CREATE TABLE {table_name} AS merge(default,'{source_table_name}')\", settings = [(\"user\", f\"{user_name}\")])\n\n finally:\n with Finally(\"I drop the tables\"):\n node.query(f\"DROP TABLE IF EXISTS {table_name}\")",
"def create_table_from_query(self, table_name, query, ):\r\n query = 'CREATE MATERIALIZED VIEW %s AS ' %table_name + query\r\n self.engine.execute( query)",
"def create_block_file(self):\n # setup header block\n self.hdr_blk = HunkHeaderBlock()\n blks = [self.hdr_blk]\n sizes = []\n for seg in self.segments:\n size = seg.create(blks)\n sizes.append(size)\n # add HUNK_END\n blks.append(HunkEndBlock())\n # finally setup header\n self.hdr_blk.setup(sizes)\n # create HunkBlockFile\n return HunkBlockFile(blks)",
"def create_block_file(self):\n # setup header block\n self.hdr_blk = HunkHeaderBlock()\n blks = [self.hdr_blk]\n sizes = []\n for seg in self.segments:\n size = seg.create(blks)\n sizes.append(size)\n # add HUNK_END\n blks.append(HunkEndBlock())\n # finally setup header\n self.hdr_blk.setup(sizes)\n # create HunkBlockFile\n return HunkBlockFile(blks)",
"def create_table_3(new_table_name):\n\n create_table = f\"CREATE TABLE IF NOT EXISTS {new_table_name} (\\\n ID BIGINT PRIMARY KEY,\\\n region_subregion_country_area TEXT,\\\n country_code BIGINT,\\\n '1950' FLOAT, '1955' FLOAT,\\\n '1960' FLOAT, '1965' FLOAT,\\\n '1970' FLOAT, '1975' FLOAT,\\\n '1980' FLOAT, '1985' FLOAT,\\\n '1990' FLOAT, '1995' FLOAT,\\\n '2000' FLOAT, '2005' FLOAT,\\\n '2010' FLOAT, '2015' FLOAT);\"\n return create_table",
"def create_table_8(new_table_name):\n\n create_table = f\"CREATE TABLE IF NOT EXISTS {new_table_name} (\\\n ID BIGINT PRIMARY KEY,\\\n country TEXT,\\\n code TEXT,\\\n country_code BIGINT,\\\n continent TEXT,\\\n capital TEXT,\\\n latitude FLOAT,\\\n longitude FLOAT);\"\n return create_table",
"def createTable(self, dbName, tableName, schema=None, chunkColumns=False):\n\n _log.debug('create table: %s.%s', dbName, tableName)\n data = dict(table=tableName, chunkColumns=str(int(chunkColumns)))\n if schema:\n data['schema'] = schema\n else:\n data['schemaSource'] = 'CSS'\n self._requestJSON('dbs', dbName + '/tables', method='POST', data=data)",
"def init_seg_tables(metadata, segid_colname=cn.seg_id):\n init_seg_table(metadata, \"chunk_segs\", segid_colname=segid_colname)\n init_seg_table(metadata, \"merged_segs\", segid_colname=segid_colname,\n chunked=False)",
"def test_chunk_create(self):\n with self.mock_post('prediction_chunks') as m:\n client = self.client.prediction.chunk_create()\n\n self.assertIsNotNone(client)\n self.assertEqual(client.id, 5678)\n self.assertEqual(client.label, 'longview5678')\n\n self.assertEqual(m.call_url, '/prediction_chunks')\n self.assertEqual(m.call_data, {})",
"def _create_cell(args, cell_body):\n if args['command'] == 'dataset':\n try:\n datalab.bigquery.Dataset(args['name']).create(friendly_name=args['friendly'],\n description=cell_body)\n except Exception as e:\n print('Failed to create dataset %s: %s' % (args['name'], e))\n else:\n if cell_body is None:\n print('Failed to create %s: no schema specified' % args['name'])\n else:\n try:\n record = datalab.utils.commands.parse_config(cell_body,\n datalab.utils.commands.notebook_environment(),\n as_dict=False)\n schema = datalab.bigquery.Schema(record)\n datalab.bigquery.Table(args['name']).create(schema=schema, overwrite=args['overwrite'])\n except Exception as e:\n print('Failed to create table %s: %s' % (args['name'], e))",
"def create(persister=None):\n\n persister.exec_stmt(\n RangeShardingSpecification.CREATE_RANGE_SPECIFICATION)",
"def create_block(self):\n #\n # raise an exception if the block already exists\n site_sport = self.get_site_sport()\n start = self.get_start()\n end = self.get_end()\n cutoff_time = self.get_cutoff() # datetime.time() object\n logger.info('Attempting to create Multi Block sport: %s | start: %s | end: %s' % (\n site_sport, start, end\n ))\n\n try:\n #\n # set fields: dfsday_start (datetime), dfsday_end (datetime), cutoff_time (time object)\n Block.objects.get(site_sport=site_sport,\n dfsday_start=start,\n dfsday_end=end,\n cutoff_time=cutoff_time)\n err_msg = 'a %s scheduled block already exists' % site_sport.name\n logger.warning(err_msg)\n raise self.BlockExistsException(err_msg)\n except Block.DoesNotExist:\n pass\n\n # create it\n block = Block.objects.create(site_sport=site_sport,\n dfsday_start=start,\n dfsday_end=end,\n cutoff_time=cutoff_time)\n return block",
"def _create_table(self, table_name):\n raise NotImplementedError()",
"def create_body(self):\n for pos in STARTING_POS:\n self.add_fragment(pos)",
"def create_table(self, h5_file):\n if self.verbose:\n print \"Creating and populating\", self.file_prefix, self.type, \"table\"\n group_exists = False\n for x in h5_file:\n if x._v_name == self.file_prefix:\n group_exists = True\n group = x\n if not group_exists:\n group = h5_file.create_group(\"/\", self.file_prefix, self.file_prefix)\n table_def = {}\n for k in self.data_format:\n table_def[k] = self.data_format[k][1]\n table = h5_file.create_table(group, self.type, table_def)\n individual = table.row\n for x in self.read():\n for y in x:\n individual[y] = x[y]\n individual.append()\n table.flush()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return index data (array of [objectId, chunkId, subChunkId] arrays). This only works on partitined tables and is only supposed to be used with director table. If chunkId is None then index data for all chunks is returned, otherwise only for specified chunkId (must be an integer number). Optional parameter columns can be yused to specify a sequence of three column names for for objectId, chunkId, and subChunkId (in that order), by default ("objectId", "chunkId", "subChunkId") is used.
|
def getIndex(self, dbName, tableName, chunkId=None, columns=None):
if columns is None:
columns = "objectId,chunkId,subChunkId"
else:
columns = ','.join(columns)
if chunkId is None:
resource = dbName + '/tables/' + tableName + '/index'
else:
resource = dbName + '/tables/' + tableName + '/chunks/' + str(chunkId) + '/index'
result = self._requestJSON('dbs', resource, params=dict(columns=columns))
return self._getKey(result, 'rows')
|
[
"def _chunk_index_dataframe(data_path: str, amount_chunks: int, asset_id_col: str) -> Generator[\n Tuple[int, int], None, None]:\n\n # reading in the index to make ranges for reading in the dataframe\n asset_index = pd.read_csv(data_path, usecols=[asset_id_col], dtype={asset_id_col: str})\n asset_index['range_index'] = asset_index.index\n\n # seeing if index is sorted\n if (asset_index[asset_id_col] >= asset_index[asset_id_col].shift(1)).sum() != asset_index.shape[0] - 1:\n raise ValueError('The index column is not sorted. \\n Sort the index and rewrite to the file')\n\n # aggregating the min and max values of the index\n ranges = asset_index.groupby(asset_id_col).range_index.agg([min, max])\n del asset_index\n\n chunk_len = int(ranges.shape[0] / amount_chunks) + 1\n for i in range(0, ranges.shape[0], chunk_len):\n chunk = ranges.iloc[i:i + chunk_len]\n yield chunk['min'].min(), chunk['max'].max()",
"def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:",
"def retrieve_samples_by_index():\n # grab sid, n and N\n sid = request.form['sid']\n i_start = int(request.form['i_start'])\n i_end = int(request.form['i_end'])\n # Retrieve cached dataframe and grab a chunk from it\n chunker = EegChunker()\n chunk_df = chunker.chunk_by_index(sid, i_start, i_end)\n\n response_data = {\n \"eeg_chunk\": chunk_df.to_json()\n }\n return make_response(jsonify(response_data))",
"def index(self):\n if self.query['queryType'] == 'scan':\n if not self.query.get('columns') or '__time' in self.query['columns']:\n return ['__time']\n return []\n if self.query['queryType'] in {'groupBy', 'topN', 'timeseries'}:\n index_fields = [] if self.query['granularity'] == 'all' else ['timestamp']\n if self.query['queryType'] == 'groupBy':\n return index_fields + self.query['dimensions']\n elif self.query['queryType'] == 'topN':\n return index_fields + [self.query['dimension']]\n elif self.query['queryType'] == 'timeseries':\n return index_fields",
"def build_indices(self, columns: Iterable[str]):\n if self.label is None:\n return self\n\n new_indices = {}\n for col in columns:\n possible_values: Set[str] = set()\n col_in_partition = False\n for df in self.data.values():\n\n if col in df:\n possible_values = possible_values | set(df[col].dropna().unique())\n col_in_partition = True\n\n if (self.label is not None) and (not col_in_partition):\n raise RuntimeError(\n \"Column `{corrupt_col}` could not be found in the partition `{partition_label}` \"\n \"with tables `{tables}`. Please check for any typos and validate your dataset.\".format(\n corrupt_col=col,\n partition_label=self.label,\n tables=sorted(self.data.keys()),\n )\n )\n\n # There is at least one table with this column (see check above), so we can get the dtype from there. Also,\n # shared dtypes are ensured to be compatible.\n dtype = list(\n meta.field(col).type\n for meta in self.table_meta.values()\n if col in meta.names\n )[0]\n new_index = ExplicitSecondaryIndex(\n column=col,\n index_dct={value: [self.label] for value in possible_values},\n dtype=dtype,\n )\n if (col in self.indices) and self.indices[col].loaded:\n new_indices[col] = self.indices[col].update(new_index)\n else:\n new_indices[col] = new_index\n\n return self.copy(indices=new_indices)",
"def find(self, **kwargs) -> list[tuple[int, int]]:\n result = []\n for field, value in kwargs.items():\n value = self._normalize_index(value)\n try:\n result.extend(self._index_data[field][value])\n except KeyError:\n pass\n return result",
"def split_dataset_by_indices():",
"def indexes(\n ctx,\n path,\n tables,\n aux,\n nl,\n arrays,\n csv,\n tsv,\n no_headers,\n table,\n fmt,\n json_cols,\n load_extension,\n):\n sql = \"\"\"\n select\n sqlite_master.name as \"table\",\n indexes.name as index_name,\n xinfo.*\n from sqlite_master\n join pragma_index_list(sqlite_master.name) indexes\n join pragma_index_xinfo(index_name) xinfo\n where\n sqlite_master.type = 'table'\n \"\"\"\n if tables:\n quote = sqlite_utils.Database(memory=True).quote\n sql += \" and sqlite_master.name in ({})\".format(\n \", \".join(quote(table) for table in tables)\n )\n if not aux:\n sql += \" and xinfo.key = 1\"\n ctx.invoke(\n query,\n path=path,\n sql=sql,\n nl=nl,\n arrays=arrays,\n csv=csv,\n tsv=tsv,\n no_headers=no_headers,\n table=table,\n fmt=fmt,\n json_cols=json_cols,\n load_extension=load_extension,\n )",
"def xrootdChunks(self, dbName):\n _log.debug('list chunks in xrootd: %s', dbName)\n result = self._requestJSON('xrootd', 'dbs/' + dbName, method='GET')\n return self._getKey(result, 'chunkId')",
"def __get_index_map(sample_ids: List[str], sample_block_count: int,\n sql_ctx: SQLContext) -> Dict[str, List[str]]:\n\n assert check_argument_types()\n\n sample_id_df = sql_ctx.createDataFrame([Row(values=sample_ids)])\n make_sample_blocks_fn = SparkContext._jvm.io.projectglow.transformers.blockvariantsandsamples.VariantSampleBlockMaker.makeSampleBlocks\n output_jdf = make_sample_blocks_fn(sample_id_df._jdf, sample_block_count)\n output_df = DataFrame(output_jdf, sql_ctx)\n output_df.printSchema()\n index_map = {r.sample_block: r.values for r in output_df.collect()}\n\n assert check_return_type(index_map)\n return index_map",
"def get_columns(self, columns, with_index=True):\n if self.index_name and with_index:\n columns = [self.index_name] + [c for c in columns if c != self.index_name]\n return self[:, columns]",
"def collect_indexes(df, columns):\n ### look for unused columns to pivot around\n data_used = columns\n data_columns = df.columns.values\n data_index = [x for x in data_columns if x not in data_used]\n\n return(data_index)",
"def _get_ds_indexes(ds_path):\n with xr.open_dataset(ds_path) as ds:\n return ds.indexes",
"def get_chunk_results(self, chunk_keys: List[str]) -> List:\n raise NotImplementedError",
"def init_chunks(metadata):\n return Table(\"chunks\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(cn.chunk_bx, Integer),\n Column(cn.chunk_by, Integer),\n Column(cn.chunk_bz, Integer),\n Column(cn.chunk_ex, Integer),\n Column(cn.chunk_ey, Integer),\n Column(cn.chunk_ez, Integer),\n Column(cn.chunk_tag, Text))",
"def chunks(self, dbName, tableName):\n _log.debug('get chunks, table: %s.%s', dbName, tableName)\n resource = dbName + '/tables/' + tableName + '/chunks'\n result = self._requestJSON('dbs', resource)\n return self._getKey(result, 'chunkId')",
"def indexing_to_chunk_indices(output_chunk):\n input_indices = [] # index in the chunk of the mutable tensor\n value_indices = [] # index in the chunk of the assigned value\n for d, s in zip(output_chunk.op.indexes, output_chunk.op.inputs[0].shape):\n # expand the index (slice)\n idx = np.r_[slice(*d.indices(s)) if isinstance(d, slice) else d]\n input_indices.append(idx)\n if not isinstance(d, Integral):\n value_indices.append(np.arange(len(idx)))\n return input_indices, value_indices",
"def get_index_list(cls, column_header_string, volume_type):\n header_column_index_list = {}\n header_column_index_list[ID] = column_header_string.index(ID)\n header_column_index_list[USERNAME] = column_header_string.index(USERNAME)\n header_column_index_list[DATACENTER] = column_header_string.index(DATACENTER)\n header_column_index_list[STORAGE_TYPE] = column_header_string.index(STORAGE_TYPE)\n header_column_index_list[CAPACITY_GB] = column_header_string.index(CAPACITY_GB)\n header_column_index_list[BYTES_USED] = column_header_string.index(BYTES_USED)\n header_column_index_list[IP_ADDR] = column_header_string.index(IP_ADDR)\n if volume_type == FILE:\n header_column_index_list[MOUNT_ADDR] = column_header_string.index(MOUNT_ADDR)\n header_column_index_list[NOTES] = column_header_string.index(NOTES)\n return header_column_index_list",
"def getIndex(self, *args):\n return _coin.SoEngineOutputData_getIndex(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reset chunk cache (a.k.a. empty chunks list) for specified database name.
|
def resetChunksCache(self, dbName):
_log.debug('reset chunk cache: %s', dbName)
# resource URL
resource = dbName + '/chunks/cache'
result = self._requestJSON('dbs', resource, method='PUT')
|
[
"def reset():\n global GROUPS, NODES, PIPES, JOBS, _ID\n GROUPS = {}\n NODES = {}\n PIPES = {}\n JOBS = {}\n _ID = count(1)\n logger.info(\"Cleared cache and reset counter.\")",
"def reset_mysql_db(self, db_name=None):\n self._mysql_db = self._get_mysql_connection(db_name=db_name)",
"def reset(self):\r\n self._cache_locks(self.obj.lock_storage)\r\n self.cache_lock_bypass(self.obj)",
"def mixed_reset(self):\n if self.repository.reset_head():\n self.index.reset(self.repository.head)",
"def clear_caches(self):",
"def _reset_database(self):\r\n self._delete_tables()\r\n self._create_tables()",
"def dropDatabase(self, name):\n # simple enough\n yield self.place(\"DROP DATABASE {};\".format(name))\n # all done\n return",
"def deterfresh_manager(root, db_name):\n\n if root:\n if db_name in [\"predicted_missions\", \"missions\"]:\n mission_manager.refresh_memebers()\n elif db_name == \"emotions\":\n emotion_manager.refresh_members()\n else:\n mission_manager.refresh_members()",
"def reset_grid_data(self, name: str = None):\n if name is None:\n for _, v in self._get_grids().items():\n _reset_to_odata(v)\n else:\n _reset_to_odata(self._get_grid(name))",
"def clear_design_cache():\n sqlcmd = 'delete from design_cache;'\n print sqlcmd\n cldb.execute(sqlcmd)\n cldb.commit()",
"def reset(self):\n self.current_shard = None\n self.current_shard_n = None\n self.current_offset = None",
"def reset_database():\n if os.path.exists(testinit.database_file):\n os.remove(testinit.database_file)\n shutil.copy(testinit.clean_db, testinit.database_file)",
"def xrootdChunks(self, dbName):\n _log.debug('list chunks in xrootd: %s', dbName)\n result = self._requestJSON('xrootd', 'dbs/' + dbName, method='GET')\n return self._getKey(result, 'chunkId')",
"def clear_data():\n from bempp.api.utils import pool\n\n pool.execute(_clear_data_worker)",
"def clear():\n\n print(\"Clearing redis cache that is specified in the config\")\n redis_conn.flushdb()",
"def reset_refresh():\n dcterms = rdflib.Namespace('http://purl.org/dc/terms/')\n # Reset the db\n reset_database()\n # Get db connection and cursor\n conn, c = connect_database()\n # Check we have rdf, else download\n if not os.isdir(RDF_CATALOG_PATH):\n download_index_file()\n # Go through all rdf files\n print(\"Parsing RDF files. If this process is stopped, the progress is\" +\n \" lost.\")\n for index, directory in \\\n tqdm(list(enumerate(os.listdir(RDF_CATALOG_PATH)))):\n rdf_file_name = RDF_CATALOG_PATH + '/' + directory + '/pg' +\\\n directory + '.rdf'\n g = rdflib.Graph()\n try:\n g.load(rdf_file_name)\n except Exception:\n continue\n # Get the title from rdf file\n if (None, dcterms.title, None) not in g:\n continue\n title = g.objects(None, dcterms.title).next()\n the_id = directory\n # Put title and id in db\n c.execute('''INSERT ON CONFLICT IGNORE\n INTO books (id, title, html_file_name, pdf_file_name, url)\n VALUES (?, ?, ?, ?, ?)''',\n (the_id, title.lower(), '', '', ''))\n if index > 5000 and index % 5000 == 0:\n c.commit()\n print(\"Processed \" + index)\n # Commit the query\n conn.commit()",
"def hard_reset(self):\n if self.repository.reset_head():\n self.index.reset(self.repository.head)\n self.working_directory.reset(self.index)",
"def reset_memory(self):\n global replay_states, replay_actions, replay_rewards, replay_next_states, replay_return_from_states\n del replay_states[:], replay_actions[:], replay_rewards[:], replay_next_states[:], replay_return_from_states[:]",
"def _safeReplace(self, controlDb, name):\n if self._dbExists(controlDb, name):\n temp = '_old_%s_%s' % (name, os.urandom(6).encode('hex'))\n bouncerDb = self._getBouncerConnection()\n ccu = controlDb.cursor()\n for x in range(5):\n self._doBounce(bouncerDb, \"KILL \" + name)\n try:\n controlDb.runAutoCommit(ccu.execute,\n 'ALTER DATABASE \"%s\" RENAME TO \"%s\"' % (name, temp))\n break\n except CursorError, err:\n if 'is being accessed by other users' in str(err):\n time.sleep(1)\n continue\n raise\n yield temp\n try:\n self._doBounce(bouncerDb, \"RESUME \" + name)\n bouncerDb.close()\n except:\n log.exception(\"Failed to resume database %s; continuing:\", name)\n else:\n yield None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the list of service names.
|
def services(self):
_log.debug('get service list')
result = self._requestJSON('services', '')
return self._getKey(result, 'name')
|
[
"def service_names(self):\n return self.services.keys()",
"def get_services_names(self):\n return self._speakers[0].get_services_names()",
"def getServiceNames(self):\n self.send_getServiceNames()\n return self.recv_getServiceNames()",
"def CustomServiceNames(self) -> ServiceNameCollection:",
"def list_services(ctx):\n\n ctx.respond(ctx._(\"I am running: {services}\").format(\n services=\", \".join(ctx.bot.services))\n )",
"def get_services(self):\n xpath = [\"Services\", \"Service\"]\n return self.find_anywhere(xpath)",
"def __get_pod_service_list(pod_items):\n out_names = set()\n for pod_item in pod_items:\n if pod_item.spec.service_account:\n out_names.add(pod_item.spec.service_account)\n else:\n out_names.add(pod_item.metadata.name)\n return out_names",
"def services(self):\n return self.__services",
"def get_services(self, names: typing.List[str] = None) -> typing.List[ServiceInfo]:\n query = None\n if names is not None:\n query = {'names': ','.join(names)}\n resp = self._request('GET', '/v1/services', query)\n return [ServiceInfo.from_dict(info) for info in resp['result']]",
"def service_completer(ctx, args, incomplete) -> List:\n result = [(str(svc.uid)) for svc in service_registry.values()]\n result.extend(svc.name for svc in service_registry.values())\n return result",
"def service_types(self):\n service_types = set()\n for e in self._delegate_tools.get_registry()['SERVICES']:\n service_types.add(e['service_type'])\n return list(service_types)",
"def _get_workload_service_names(cls):\n raise NotImplementedError(\"No wokload service names defined.\")",
"def get_all_local_services(self):\n return self._services",
"def get_availables_services(self):\r\n self._service_locator.get_availables_services()",
"def list_service_usernames(args):\n usernames = get_usernames_for_passwords()\n action_set({'usernames': usernames or []})",
"def getServicesList( self ):\n\n res = self.rsS.getServicesList()\n if not res[ 'OK' ]:\n raise RSSException, where( self, self.getServicesList ) + \" \" + res[ 'Message' ]\n\n return res",
"def get_services(self):\n\n # try to get services\n try:\n\n # get services\n command = str('kubectl get services')\n subprocess.call(command.split())\n\n # handle exception\n except:\n\n # raise Exception\n raise Exception('I could not get the list of services')",
"def _get_services_container_names(self):\n services = {}\n sellables = set(os.listdir(Two1Composer.SERVICES_DIR)).intersection(\n set(Two1Composer.GRID_SERVICES))\n for service in sellables:\n services[service] = \"sell_\" + service\n return services",
"def services_all(ctx):\n ctx.run(KUBERNETES_GET_SERVICES_ALL_CMD)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return service state. This method returns string describing current service state, currently defined states are "active" and "stopped".
|
def serviceState(self, service):
_log.debug('get service state: %s', service)
result = self._requestJSON('services', service)
return self._getKey(result, 'state')
|
[
"def status(self):\n if self.state == service_states.SHUTTING_DOWN or \\\n self.state == service_states.SHUT_DOWN or \\\n self.state == service_states.UNSTARTED or \\\n self.state == service_states.WAITING_FOR_USER_ACTION:\n pass\n elif self._check_daemon('slurmd'):\n self.state = service_states.RUNNING\n self.num_restarts = 0 # Reset the restart counter once we're running\n elif self.state != service_states.STARTING:\n self.state = service_states.ERROR\n log.error(\"Slurm error: slurmd not running; setting service state \"\n \"to {0}\".format(self.state))\n if self.max_restarts > self.num_restarts:\n self.num_restarts += 1\n log.debug(\"Automatically trying to restart slurmd (attempt {0}/{1}\"\n .format(self.num_restarts, self.max_restarts))\n self.start()\n return self.state",
"def _state_status(self, bot_model):\n if bot_model.state:\n prefix = \"State\"\n suffix = \":\"\n state = \"\\n%s\" % bot_model.state\n else:\n prefix = \"No state\"\n suffix = \".\"\n state = \"\"\n update = bot_model.last_state_update_time\n if update:\n last_update = \"(last state update %s)\" % update\n else:\n last_update = \"(state was never updated)\"\n return \"%s for %s %s%s%s\" % (\n prefix, bot_model.name, last_update, suffix, state\n )",
"def str_state(self):\n return self.IMAGE_STATES[int(self.state)]",
"def current_state():\n current_state = app_manager.current_status()\n click.echo(current_state)",
"def state(self):\n return self._state_env.state",
"def _get_systemd_svc_state(self, svc):\n cmd = 'sudo systemctl show ' + svc\n ret, stdout, stderr = self.cli_cmd_sync(cmd, shell=True)\n\n if ret != 0:\n logger.debug('Error executing systemctl show command, code: %d' % ret)\n return None, None\n\n load_state = None\n active_state = None\n\n lines = [x.strip() for x in stdout.split('\\n')]\n for line in lines:\n parts = line.split('=', 2)\n if len(parts) < 2:\n continue\n\n cmd, val = [x.strip().lower() for x in parts]\n if cmd == 'loadstate':\n load_state = val\n if cmd == 'activestate':\n active_state = val\n return load_state, active_state",
"def get_current_state(self):\n s = RobotState()\n c_str = self._g.get_current_state()\n conversions.msg_from_string(s, c_str)\n return s",
"def get_state(self):\n return self._skuld.cmd(SkuldCmd(name='get_state',\n args=None, block=True))",
"def get_raw_service_name(self):\n return self.state['service_name']",
"def state_string(self):\n return SupvisorsStates._to_string(self.state)",
"def GetStates(self):\n return self.system.States",
"def get_state(self):\n \n return self._instance.state",
"def state(self) -> str:\n return IssueStates[self.data['state'].upper()]",
"def state(self):\n if self.device.vacuum_status is not None:\n return STATE_CODE_TO_STATE[self.device.vacuum_status]",
"def state(self):\n if self.device.vacuum_status is not None and self.device.is_available == True:\n return STATE_CODE_TO_STATE[self.device.vacuum_status]",
"def state_description(self):\n return self._state_description",
"def state(self):\n return self._state.value",
"def rls_state(self) -> str:\n return self.run_device_command(\"rls-state\")[0]",
"def current_operation(self):\n if self.device.mode == 'cool':\n return STATE_COOL\n elif self.device.mode == 'heat':\n return STATE_HEAT\n elif self.device.mode == 'range':\n return STATE_AUTO\n elif self.device.mode == 'off':\n return STATE_OFF\n else:\n return STATE_UNKNOWN"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the list of database names known to xrootd.
|
def xrootdDbs(self):
_log.debug('get xrd db list')
result = self._requestJSON('xrootd', 'dbs')
return self._getKey(result, 'name')
|
[
"def list_database(db=None):\n if db is None:\n return CONNECTION.get_connection().database_names()\n return CONNECTION.get_connection()[db].collection_names()",
"def databases(self):\n _log.debug('get database list')\n result = self._requestJSON('dbs', '')\n return self._getKey(result, 'name')",
"def databases():\n\tg.db = mysqladm.core.db_connect()\n\n\t## Load servers\n\trows = mysqladm.databases.get_all_databases(cmd_line=True)\n\t\n\tfor row in rows:\n\t\tprint(row['shortserver'] + '/' + row['name'])",
"def get_glue_database_names(self):\n try:\n self.response = self.glue_client.get_databases()\n database_names = []\n for idx, i in enumerate(self.response['DatabaseList']):\n database_names.append(self.response['DatabaseList'][idx]['Name'])\n return database_names\n except Exception as e:\n print(e)",
"def listDB(self):\n # Responses: list of db names\n return self.get(\"/_all_dbs\", descr='listDB').addCallback(\n self.parseResult)",
"def _get_db_names(self, dbs, strict=True):\r\n dbs = utils.coerce_string_to_list(dbs)\r\n db_names = [utils.get_name(db) for db in dbs]\r\n if strict:\r\n good_dbs = self.instance.list_databases()\r\n good_names = [utils.get_name(good_db) for good_db in good_dbs]\r\n bad_names = [db_name for db_name in db_names\r\n if db_name not in good_names]\r\n if bad_names:\r\n bad = \", \".join(bad_names)\r\n raise exc.NoSuchDatabase(\"The following database(s) were not \"\r\n \"found: %s\" % bad)\r\n return db_names",
"def list_databases(self):\n\n _conn = self.get_mongo_client()\n return [i for i in _conn.list_databases()]",
"def print_database_names(self) -> None:\n n = self._database_connection.database_names()\n print(n)",
"def get_databases(verbose=True):\n from balsam import django_config\n from balsam.django_config.db_index import refresh_db_index\n from ipywidgets import interact\n import os\n databasepaths = []\n try:\n databasepaths.extend(refresh_db_index())\n if verbose:\n print(f'There are {len(databasepaths)} Balsam databases available:')\n for i,db in enumerate(databasepaths):\n print(f'{i}: {db}')\n except Excpetion as e:\n print('🛑 Exception caught during balsam.django_config.db_index.refresh_db_index:')\n print(e, '\\n')\n return databasepaths",
"def get_databases():\n try:\n database_probe = DatabaseProbe()\n result_set = database_probe.execute_query(\"SELECT * FROM SYS.DATABASES;\")\n click.echo(result_set)\n return result_set\n except Exception as e:\n click.secho(e, bold=True, fg=\"red\")\n finally:\n database_probe.dispose()",
"def all_databases(self):\n return \"\"\"--all-databases\"\"\"",
"def get_databases_list(self):\n databases_lst = {} # {instance: list of databases}\n for instance in self:\n db_names = [db_info.name\n for db_info in instance.instance_database_ids]\n databases_lst.update({\n instance.name: db_names\n })\n return databases_lst",
"def ls_dbs(dbUsername=config[\"db_server_root_username\"], dbPassword=config[\"db_server_root_password\"], dbHost=config[\"db_server\"]):\n mysqlDbs = get_dbs(dbUsername, dbPassword, dbHost)\n print(\"\\n\".join(mysqlDbs))\n print(\"DONE\")",
"def list_databases(middcourses_only='true'):\n\n if middcourses_only == 'true':\n query = 'psql -t -c \"select datname from pg_database where datname like \\'middcourses%\\'\"'\n else:\n query = 'psql -t -c \"select datname from pg_database\"'\n\n databases = local(query, capture=True)\n databases = [db.strip(' ') for db in databases.split('\\n')]\n\n puts(blue('Databases:'))\n for db in databases:\n puts(indent(db, spaces=2))",
"def list_dbs_instances():\n from DAS.core.das_mapping_db import DASMapping\n dasconfig = das_readconfig()\n dasmapping = DASMapping(dasconfig)\n return dasmapping.dbs_instances()",
"def get_db_families(self):\n return [\"mysql5.1\", \"mysql5.5\", \"mysql5.6\",\n \"oracle-ee-11.2\", \"oracle-ee-12.1\",\n \"oracle-se-11.2\", \"oracle-se-12.1\",\n \"oracle-se1-11.2\", \"oracle-se1-12.1\",\n \"postgres9.3\", \"postgres9.4\",\n \"sqlserver-ee-10.50\", \"sqlserver-ee-11.00\",\n \"sqlserver-ex-10.50\", \"sqlserver-ex-11.00\",\n \"sqlserver-se-10.50\", \"sqlserver-se-11.00\",\n \"sqlserver-web-10.50\", \"sqlserver-web-11.00\"]",
"def cli_cosmosdb_database_list(client):\n return list(client.ReadDatabases())",
"def showDatabases(self):\n self.databasesList.addItems(self.dbMan.getListNamesDatabases())",
"def databases(self) -> dict:\n return self.config[\"databases\"]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns list of chunks for a given database which are known to xrootd.
|
def xrootdChunks(self, dbName):
_log.debug('list chunks in xrootd: %s', dbName)
result = self._requestJSON('xrootd', 'dbs/' + dbName, method='GET')
return self._getKey(result, 'chunkId')
|
[
"def chunks(self, dbName, tableName):\n _log.debug('get chunks, table: %s.%s', dbName, tableName)\n resource = dbName + '/tables/' + tableName + '/chunks'\n result = self._requestJSON('dbs', resource)\n return self._getKey(result, 'chunkId')",
"def xrootdDbs(self):\n _log.debug('get xrd db list')\n result = self._requestJSON('xrootd', 'dbs')\n return self._getKey(result, 'name')",
"def get_bin_chunks(self, index):\n\n log.debug(\"get_bin_chunks(%d)\" % index)\n ptm = self.ptm\n mstate = ptm.cache.mstate\n dbg = self.ptm.dbg\n\n #ptm.mutex_lock(mstate)\n\n b = ptm.bin_at(mstate, index+1)\n if b == 0: # Not initialized yet\n return []\n\n p = mc.malloc_chunk(\n ptm, \n b, \n inuse=False, \n debugger=dbg,\n tcache=False,\n fast=False,\n allow_invalid=True)\n\n addresses = []\n while p.fd != int(b):\n addresses.append(p.address)\n p = mc.malloc_chunk(\n ptm, \n ptm.first(p), \n inuse=False, \n debugger=dbg,\n tcache=False,\n fast=False,\n allow_invalid=True)\n\n #ptm.mutex_unlock(mstate)\n\n return addresses",
"def chunks(self):\n for name in self.chunk_names():\n yield self.storage.open(name).read()",
"def get_chunks_list(self):\n return self._chunkFiles",
"def fill_chunks(url, bboxes):\n dframe = pd.DataFrame([bbox.astuple() for bbox in bboxes])\n dframe.columns = [cn.chunk_bx, cn.chunk_by, cn.chunk_bz,\n cn.chunk_ex, cn.chunk_ey, cn.chunk_ez]\n\n tags = [io.fname_chunk_tag(bbox) for bbox in bboxes]\n dframe[cn.chunk_tag] = tags\n\n dframe.index.name = \"id\"\n\n io.write_db_dframe(dframe, url, \"chunks\")",
"def databases(self):\n _log.debug('get database list')\n result = self._requestJSON('dbs', '')\n return self._getKey(result, 'name')",
"def divide(self):\n divided = []\n for dbinfo in self.debuginfo:\n source = dbinfo['debuginfo']['filename']\n exists = False\n for src_infos in divided:\n if len(src_infos) > 0 and src_infos[0]['debuginfo']['filename'] == source:\n src_infos.append(dbinfo)\n exists = True\n break\n if not exists:\n divided.append([dbinfo])\n\n return divided",
"def list_databases(middcourses_only='true'):\n\n if middcourses_only == 'true':\n query = 'psql -t -c \"select datname from pg_database where datname like \\'middcourses%\\'\"'\n else:\n query = 'psql -t -c \"select datname from pg_database\"'\n\n databases = local(query, capture=True)\n databases = [db.strip(' ') for db in databases.split('\\n')]\n\n puts(blue('Databases:'))\n for db in databases:\n puts(indent(db, spaces=2))",
"def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:",
"def get_golden_chunk_records(pattern, num_selfplays, iter_num, window_size, num_shard):\n if iter_num <= window_size:\n win_size=(iter_num)*num_selfplays + (window_size-iter_num)\n else:\n win_size=(window_size)*num_selfplays\n print('Train get_golden_chunks at iter = {} has win_size = {}'.format(iter_num, win_size))\n\n return sorted(tf.gfile.Glob(pattern), reverse=True)[:win_size*num_shard]",
"def get_tcache_bin_chunks(self, index):\n\n ptm = self.ptm\n tcache = ptm.cache.tcache\n dbg = self.ptm.dbg\n\n if tcache.entries[index] == 0:\n return []\n # I've seen uninitialized entries[] still holding old data i.e. non-null\n # even though the counts is 0\n if tcache.counts[index] == 0:\n return []\n\n addr = tcache.entries[index] - 2 * ptm.SIZE_SZ\n p = mc.malloc_chunk(ptm, addr, inuse=False, debugger=dbg, allow_invalid=True, tcache=True)\n if not p.initOK: # afaict should not happen in a normal scenario but better be safe\n return []\n\n addresses = []\n while True:\n addresses.append(p.address)\n if p.next == 0x0:\n break\n addr = p.next - 2 * ptm.SIZE_SZ\n p = mc.malloc_chunk(ptm, addr, inuse=False, debugger=dbg, allow_invalid=True, tcache=True)\n if not p.initOK: # same\n return addresses\n \n return addresses",
"def get_database_files():\n # list of rnacentral databases\n DATABASES_DIRECTORY = PROJECT_ROOT.parent / 'consumer' / 'databases'\n return [file for file in (DATABASES_DIRECTORY).glob('*.fasta')]",
"def get_sql_queries_by_database(database, limit=500000, period=3600):\n query = '@fields.datacenter: \"sjc\" AND @fields.environment: \"prod\" AND @context.db_name:\"{}\"'.format(database)\n\n entries = get_log_entries(query, period, limit, index_prefix='logstash-mediawiki-sql')\n\n return tuple(map(normalize_mediawiki_query_log_entry, entries))",
"def get_databases():\n try:\n database_probe = DatabaseProbe()\n result_set = database_probe.execute_query(\"SELECT * FROM SYS.DATABASES;\")\n click.echo(result_set)\n return result_set\n except Exception as e:\n click.secho(e, bold=True, fg=\"red\")\n finally:\n database_probe.dispose()",
"def list_databases(self, instance, limit=None, marker=None):\r\n return instance.list_databases(limit=limit, marker=marker)",
"def get_fast_bin_chunks(self, index):\n\n ptm = self.ptm\n mstate = ptm.cache.mstate\n dbg = self.ptm.dbg\n\n fb_base = int(mstate.address) + mstate.fastbins_offset\n\n p = mc.malloc_chunk(\n ptm,\n addr=fb_base - (2 * ptm.SIZE_SZ) + index * ptm.SIZE_SZ,\n fast=True,\n debugger=dbg,\n allow_invalid=True,\n )\n\n addresses = []\n while p.fd != 0:\n if p.fd is None:\n break\n addresses.append(p.fd)\n p = mc.malloc_chunk(\n ptm, \n p.fd, \n fast=True,\n debugger=dbg,\n allow_invalid=True,\n )\n \n return addresses",
"def listDatabases(path):\n\n\tprint 'Loading file...\\n'\n\tdatabases = []\n\tline_count = 0\n\tsql_file = open(path)\n\tlines = sql_file.readlines()\n\n\tfor line in lines:\n\t\tline_count += 1\n\t\tif line[0:12] == '-- Database:':\n\t\t\t# We've found the beginning of a database\n\t\t\tthe_db = (line_count, extractDbName(line)) # Tuple of first line of this db in the whole file and its name\n\t\t\tdatabases.append(the_db)\n\tsql_file.close()\n\t\n\t# Check to see if there were any databases in the file\n\tif len(databases) == 0:\n\t\tprint 'No databases found (or file contains a single database)'\n\t\texit(0)\n\n\t# Print out the findings and prompt for a number of database to extract\n\ti = 1\n\tfor db in databases:\n\t\tprint \"[%s] %s\" % (str(i), db[1])\n\t\ti += 1\n\t\n\tthe_db = -1\n\twhile the_db < 0 or the_db > len(databases):\n\t\t# Number entered is outside the range of the databases found\n\t\ttry:\n\t\t\tthe_db = int(raw_input('\\nExtract database: '))\n\t\texcept ValueError:\n\t\t\tprint 'Enter the number of a database to extract'\n\tthe_db -= 1\n\textractDatabase(lines, databases, the_db, databases[the_db][1], databases[the_db][0])",
"def get_databases(verbose=True):\n from balsam import django_config\n from balsam.django_config.db_index import refresh_db_index\n from ipywidgets import interact\n import os\n databasepaths = []\n try:\n databasepaths.extend(refresh_db_index())\n if verbose:\n print(f'There are {len(databasepaths)} Balsam databases available:')\n for i,db in enumerate(databasepaths):\n print(f'{i}: {db}')\n except Excpetion as e:\n print('🛑 Exception caught during balsam.django_config.db_index.refresh_db_index:')\n print(e, '\\n')\n return databasepaths"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns key value in the result, throw exception if key is not found. Result can be a dict or a list of dicts, for list the returned value is the list of values.
|
def _getKey(result, key):
try:
if isinstance(result, list):
return [obj[key] for obj in result]
else:
return result[key]
except KeyError:
raise ServerResponseError('Missing "%s" key' % key, result)
|
[
"def get_safe_result(result,key,default=None):\r\n\t\tif result is None:\r\n\t\t\treturn default\r\n\t\telif result.has_key(key):\r\n\t\t\treturn result[key]\r\n\t\telse:\r\n\t\t\treturn default",
"def __getitem__(self, key):\n query = select([self.store.c.value]).where(self.store.c.key == key)\n result = self.conn.execute(query).fetchone()\n if result:\n return result['value']\n raise KeyError",
"def get_value(self, key: str) -> Any:\r\n if self.get_index(key) is None:\r\n return None\r\n return self.hash_table[self.get_index(key)][1]",
"def get(self, key: str):\r\n\r\n index = self.hash(key)\r\n\r\n if self.array[index] is None:\r\n return None\r\n else:\r\n # Loop through all the key/value pairs at this index, and find if\r\n # our key exists. If it does, return the value.\r\n\r\n for kvp in self.array[index]:\r\n if kvp[0] == key:\r\n return kvp[1]\r\n\r\n return None",
"def __getitem__( self, key ):\n return self.read( key=key, default=None, raiseOnError=True )",
"def get_or_getattr(self, datasubset, key):\n try:\n if isinstance(datasubset, (list, set, tuple)):\n _, value = List(key, datasubset, self).execute()\n else:\n value = datasubset.get(key)\n except (AttributeError, ValueError):\n if hasattr(datasubset, key):\n value = getattr(datasubset, key)\n else:\n value = None\n return value",
"def get_item(self, key):\n\t\tif not key in self.items: return None\n\t\treturn self.items[ key ]",
"def __getitem__(self, key):\r\n try:\r\n result = super().__getitem__(key)\r\n except KeyError as ex:\r\n result = self.alias(key)\r\n if result is None:\r\n raise ex\r\n return result",
"def getitem(d:dict, k:list):\n # retrieve from a nested dictionary\n # possible to use dict.get() or operator.getitem()\n return functools.reduce(dict.__getitem__, k, d)",
"async def get_at(self, key, uid):\n log.debug(\"[%r] get_at key=%r uid=%r\", self._uid, key, uid)\n try:\n peer = await self._reach(uid)\n except KeyError as exc:\n raise KeyError(key) from exc\n\n out = await self._protocol.rpc(peer, \"value\", pack(key))\n if out[0] == b\"VALUE\":\n value = out[1]\n if hash(value) == key:\n # store it\n @h.transactional\n def add(tr, key, value):\n tr.add(\"QADOM:MAPPING\", key, \"value\", value)\n\n await self._run(add, self._hoply, key, value)\n # at last!\n return value\n else:\n log.warning(\"[%r] received bad value from %r\", peer)\n await self.blacklist(peer)\n return KeyError(key)\n else:\n raise KeyError(key)",
"def find_by_key(\n input_list: List[dict], key: str, value: str\n) -> Optional[Tuple[dict, int]]:\n for element in input_list:\n if element[key] == value:\n return element, input_list.index(element)\n raise ValueError(f\"No Element with {key}={value} found in the list\")",
"def __getitem__(self, key):\n # This is a degenerate case of wait_each(). Construct a tuple\n # containing only this 'key'. wait_each() will yield exactly one (key,\n # value) pair. Return just its value.\n for _, value in self.wait_each((key,)):\n return value",
"def get_value(self, keypath = ''):\n if not self._valid_key(keypath):\n return None\n elif keypath is '':\n return self._data\n key = self._parse_keypath(keypath)\n data = self.dict_for_key(key[:-1], False)\n if data is None:\n return None\n token = key[-1]\n if token in data:\n return data[token]\n return None",
"def cache_get(self, key=None, collection: str = None, target_value_name: str = None):\n try:\n return dict(getattr(self, f'{collection}').find_one({\"_id\": key}))[target_value_name]\n except ConnectionFailure:\n n = 0\n result = None\n while not result or n == 5:\n result = dict(getattr(self, f'{collection}').find_one({\"_id\": key}))[target_value_name]\n n += 1\n return result\n except TypeError:\n return None",
"def keyvalue(dictionary, key):\n\n return dictionary[key]",
"def get(self, key):\n #return none if the item isn't in the cache\n if key not in self.items:\n return None\n\n #retrieve the item from the dictionary\n item = self.items[key]\n\n #move it to the front of the list since it is the\n #most recently accessed item\n self._move_to_head(item)\n return item",
"def get(self, key):\n hash_ = self._hashing(key)\n for i, item in enumerate(self.hashtable[hash_]):\n if item[0] == key:\n return item[1]\n raise KeyError('Key not in hash table.')",
"def safe_get_value(maybe_dict, key: str):\n if isinstance(maybe_dict, dict):\n return maybe_dict.get(key, None)\n return maybe_dict",
"def __getitem__(self, key):\n hash_val = self._hash(key)\n if self.table[hash_val] != self.defVal and (isinstance(self.table[hash_val], tuple) and \n self.table[hash_val][0] == key and\n self.table[hash_val][2] == True):\n return self.table[hash_val][1]\n else:\n key_found = False\n iter_count = 0\n while not key_found:\n if hash_val >= self.capacity:\n hash_val = 0\n if self.table[hash_val] == self.defVal:\n \tbreak\n if self.table[hash_val][0] == key:\n if self.table[hash_val][2] == True:\n return self.table[hash_val][1]\n hash_val += 1\n iter_count += 1\n return self.defVal"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return expanded environment dictionary.
|
def expanded_env_dict():
return generate_expanded_env_dict()
|
[
"def _environment_variables() -> Dict[str, str]:\n return {key: value for key, value in os.environ.items() if _is_encodable(value)}",
"def user_env_as_dict(self):\r\n return {RESOURCE_REGISTRY: self.registry.as_dict(),\r\n PARAMETERS: self.params}",
"def environment(self):\r\n env = {}\r\n if self.PARAM_ENVIRONMENT in self.data:\r\n env_data = self.data[self.PARAM_ENVIRONMENT]\r\n if isinstance(env_data, dict):\r\n env = env_data\r\n else:\r\n env = self.format_parse(env_data,\r\n 'Environment')\r\n\r\n environment_format.default_for_missing(env)\r\n parameters = self.data.get(self.PARAM_USER_PARAMS, {})\r\n env[self.PARAM_USER_PARAMS].update(parameters)\r\n return env",
"def get_module_environment(env=None, function=None):\n result = {}\n if not env:\n env = {}\n for env_src in [env.get(\"__opts__\", {}), env.get(\"__pillar__\", {})]:\n fname = env.get(\"__file__\", \"\")\n physical_name = os.path.basename(fname).split(\".\")[0]\n section = os.path.basename(os.path.dirname(fname))\n m_names = [env.get(\"__virtualname__\")]\n if physical_name not in m_names:\n m_names.append(physical_name)\n for m_name in m_names:\n if not m_name:\n continue\n result.update(\n env_src.get(\"system-environment\", {})\n .get(section, {})\n .get(m_name, {})\n .get(\"_\", {})\n .copy()\n )\n if function is not None:\n result.update(\n env_src.get(\"system-environment\", {})\n .get(section, {})\n .get(m_name, {})\n .get(function, {})\n .copy()\n )\n\n return result",
"def environment_dict(\n self, *, python: PythonExecutable | PythonBuildStandaloneBinary | None = None\n ) -> Mapping[str, str]:\n d = dict(\n PATH=create_path_env_var(self._pex_environment.path),\n PEX_IGNORE_RCFILES=\"true\",\n PEX_ROOT=(\n os.path.relpath(self.pex_root, self._working_directory)\n if self._working_directory\n else str(self.pex_root)\n ),\n **self._pex_environment.subprocess_environment_dict,\n )\n if python:\n d[\"PEX_PYTHON\"] = python.path\n else:\n d[\"PEX_PYTHON_PATH\"] = create_path_env_var(self.interpreter_search_paths)\n return d",
"def _prepare_environment(self):\n env = {'HOME': self._make_mapping(HOME)}\n\n return env",
"def get_env():\n env.output_prefix = False\n run('export | sed -e \"s/declare -x/export/g\"')",
"def env_config():\n\n return {\n k[len(ENVPREFIX) :]: v for k, v in os.environ.items() if k.startswith(ENVPREFIX)\n }",
"def simple_environ(prefix='', env_value='value'):\n return {\n '{0}key'.format(prefix): env_value,\n 'a': 'b',\n }",
"def environment_info(self):\n\n return {\n \"application_environment\": {\n \"framework\": \"pylons\",\n \"env\": dict(os.environ),\n \"language\": \"python\",\n \"language_version\": sys.version.replace('\\n', ''),\n \"application_root_directory\": self.project_root()\n },\n \"client\": {\n \"name\": \"pylons-exceptional\",\n \"version\": __version__,\n \"protocol_version\": EXCEPTIONAL_PROTOCOL_VERSION\n }\n }",
"def inject_env():\n\n return dict(site.config, current_menu=current_menu)",
"def get_etl_env():\n return json.dumps({\n key: value for key, value in os.environ.items() if key.startswith('ETL_') or key.startswith('SQL_')})",
"def get_env_ax():\n value = {k: v for k, v in os.environ.items() if k.startswith(KEY_PRE)}\n value = {k: HIDDEN if k in KEYS_HIDDEN else v for k, v in value.items()}\n return value",
"def find_environment_info(extras: Optional[Dict[str, Any]]=None) -> Dict[str, str]:\n\tif extras is None: extras = {}\n\tmains = {\n\t\t\t'os_release': platform.platform(),\n\t\t\t'hostname': socket.gethostname(),\n\t\t\t'username': getpass.getuser(),\n\t\t\t'python_version': sys.version,\n\t\t\t'shell': os.environ['SHELL'],\n\t\t\t'disk_used': psutil.disk_usage('.').used,\n\t\t\t'disk_free': psutil.disk_usage('.').free,\n\t\t\t'memory_used': psutil.virtual_memory().used,\n\t\t\t'memory_available': psutil.virtual_memory().available,\n\t\t\t'sauronx_hash': GitTools.commit_hash(),\n\t\t\t'environment_info_capture_datetime': datetime.now().isoformat()\n\t}\n\treturn {k: str(v) for k, v in {**mains, **extras}.items()}",
"def format_env_dict(environ, human_readable=True):\n\n if human_readable:\n separator = '\\n'\n else:\n separator = '\\x00'\n return separator.join([\n '{}={}'.format(k, v)\n for k, v in environ.items()\n ])",
"def stringify_env(env):\n return dict(((str(key), str(val)) for key, val in env.items()))",
"def BuildEnv(self, unused_configuration=None):\n return os.environ.copy()",
"def extend_env(extra_env):\n env = os.environ.copy()\n env.update(extra_env)\n return env",
"def environ_parse(env: _Environ = environ) -> dict:\n\n _return = {}\n\n for var in env:\n try:\n _return[var] = json_parse(env[var])\n except JSONDecodeError:\n _return[var] = str(env[var])\n\n return _return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return dictionary containing all default global options.
|
def default_global_options():
return copy.deepcopy(ASTRALITY_DEFAULT_GLOBAL_SETTINGS)
|
[
"def get_default_options(self):\n return {}",
"def show_default_options(self):\n return copy.deepcopy(dict(must=VW_MUST_OPTIONS, defaults=VW_DEFAULT_OPTIONS))",
"def _RegisteredEnvironmentOptions(self):\n return {}",
"def get_default_state(self) -> dict:\n return {\n k: {k_: v_[\"default\"] for k_, v_ in v.items()}\n for k, v in self.options.items()\n }",
"def _default_options(cls):\n pass",
"def get_global_config(self) -> collections.OrderedDict:\n return self.__global_config",
"def get_options_dict(self):\n d = self.synchronizer.options if self.synchronizer else {}\n d.update(self.extra_opts)\n return d",
"def get_option_cfg(self):\n cfg = {}\n for varname, opt in self.options.items():\n value = opt.get_value()\n default = opt.get_default_value()\n opt.validate(value)\n cfg[varname] = (value, value == default)\n return cfg",
"def config_defaults(self):\n return {\n \"ingredients\": [data_ingredient, builder_ingredient],\n \"run_config\": copy(cd.run_config),\n \"loader_config\": copy(cd.loader_config),\n \"builder_config\": copy(cd.builder_config),\n \"tb_config\": copy(cd.tb_config),\n \"lr_config\": copy(cd.lr_config),\n }",
"def get_defaults(self):\n parser = self.build_parser(options=self._options)\n parsed, _ = parser.parse_known_args([])\n return vars(parsed)",
"def get_default_param_values(cls):\n return dict(\n rename_dict={},\n show_pins=False,\n debug=True,\n power_width_ntr=None,\n )",
"def read_default_ovf_settings():\n return dict(get_config('openvz').getlist('ovf-defaults'))",
"def get_default_param_values(cls):\n return dict(\n rename_dict={},\n show_pins=False,\n debug=False,\n clock_track=1,\n rst_list=None,\n clk_rst_sp=2,\n out_list=None,\n power_width_ntr=None,\n )",
"def getGlobals():\n # type: () -> Dict[String, Any]\n return {}",
"def _load_defaults(self):\n module = self._do_import(self._defaults_module_path)\n self._defaults = {\n k: v for k, v in module.__dict__.items()\n if k.isupper() # ignore anything that doesn't look like a setting\n }",
"def get_default_setting(self) -> Dict[str, Any]:\n return self.default_setting",
"def get_default_param_values(cls):\n return dict(\n nduml=4,\n ndumr=4,\n nsep=0,\n num_track_sep=1,\n io_width=1,\n rename_dict={},\n guard_ring_nf=0,\n show_pins=False,\n )",
"def get_default_service_options(self) -> PrivXAPIResponse:\n response_status, data = self._http_get(\n UrlEnum.HOST_STORE.SETTINGS,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)",
"def _merge_configurations(self):\n m = dict()\n m.update(self._default)\n m.update(self._repo)\n m.update(self._user)\n return m"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return path to test config directory.
|
def test_config_directory():
return Path(__file__).parent / 'test_config'
|
[
"def get_test_configuration_path() -> Path:\n return get_project_root() / '.test_configuration'",
"def get_tests_dir_path(): \n fmod_path = ctbto.tests.__path__\n \n test_dir = \"%s/conf_tests\" % fmod_path[0]\n \n return test_dir",
"def get_config_path():\n return _folder + \"/config\"",
"def _get_config_directory():\n try:\n # Assume we are running in the source mmtracking repo\n repo_dpath = dirname(dirname(dirname(dirname(__file__))))\n except NameError:\n # For IPython development when this __file__ is not defined\n import mmtrack\n repo_dpath = dirname(dirname(dirname(mmtrack.__file__)))\n config_dpath = join(repo_dpath, 'configs')\n if not exists(config_dpath):\n raise Exception('Cannot find config path')\n return config_dpath",
"def config_path():\n path = os.path.join(_path, \"log_config.toml\")\n return path",
"def config_dir(self):\n return self.client.fldigi.config_dir()",
"def config_path(self):\n return self.get_config_path(self.base_dir)",
"def get_config_path():\n return os.path.join(\".deploy\", \"config\")",
"def get_config_path():\n return get_project_home() + '/config.json'",
"def get_config_path(base_dir):\n return os.path.join(base_dir, 'config.pickle')",
"def get_configuration_directory():\n basedir = os.environ.get(\"XDG_CONFIG_HOME\",\n os.path.join(os.path.expanduser('~'),\n \".config\"))\n return os.path.join(basedir, \"envprobe\")",
"def data_test_dir():\n return Path(__file__).absolute().parent.parent.parent / \"test_data\"\n # return os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), \"test_data\")",
"def tests_dir():\n return Path(os.path.realpath(__file__)).parent",
"def configDir(self):\n p = os.path.dirname(self.cctDir())\n return p",
"def custom_config_path():\n return 'tests/test-config/valid-config.yaml'",
"def get_config_filepath():\n tmp = ABSOLUTE_HERE.split(\"/\")\n\n if SCRIPTS_PATH in tmp:\n tmp.remove(SCRIPTS_PATH)\n\n tmp.extend([\"config\", \"config-template.yaml\"])\n\n return \"/\".join(tmp)",
"def test_data_dir():\n # Test dir.\n test_data_dir_ = join(dirname(__file__), __TEST_DATA_SUBDIR)\n return test_data_dir_",
"def get_item_config_path():\n return os.getcwd() + '/config/'",
"def _get_test_template_dir():\n return os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'test_templates/')",
"def use_test_config():\n os.environ[\"TS_COLAB_CONFIG_DIR\"] = str(PROJECT_ROOT / \"tests\" / \".config\" / \"test\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return path to directory containing several context files.
|
def context_directory(test_config_directory):
return test_config_directory / 'context'
|
[
"def context_path(): # pragma: no cover",
"def get_template_dir(self) -> str:",
"def get_data_files_path():\n return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))",
"def _find_java_web_context(self):\n globPath = os.path.join(self.workingDir, '**')\n results = glob.glob(globPath, recursive=True)\n webContextDir = None\n for r in results:\n if 'WEB-INF' in r:\n webContextDir = r\n if not webContextDir:\n return \"web/\"\n\n webContextDir = webContextDir.split('WEB-INF')[0].replace(self.workingDir, '').lstrip('/')\n\n return webContextDir",
"def get_templates_path():\n module_path = get_module_path()\n templates_path = os.path.join(module_path, TEMPLATES)\n return templates_path",
"def base_directory():\n return os.path.dirname(os.path.realpath(__file__)) + os.path.sep",
"def resources_directory():\n # type: () -> ct.FilePath\n this_directory = os.path.dirname(os.path.abspath(__file__))\n resources_dir = os.path.join(this_directory, '../../resources')\n\n return ct.FilePath(resources_dir)",
"def getSubuserDir():\r\n return os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))))) # BLEGH!\r",
"def configDir(self):\n p = os.path.dirname(self.cctDir())\n return p",
"def get_path() -> str:\n places = os.walk(os.path.abspath(os.path.join(__file__, \"../..\")))\n\n def condition(files):\n return all(file in files for file in needed_files)\n\n return next((path for path, dirs, files in places if condition(files)), None)",
"def get_resource_directory():\n here = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(here, \"resources\")",
"def get_resources_path():\n\n file_dir = os.path.dirname(__file__)\n\n if \"\\\\\" in file_dir:\n file_dir = file_dir.replace(\"\\\\\", \"/\")\n\n return \"{}/resources\".format(file_dir)",
"def get_path(context):\n return context.paths_stack[-1]",
"def __root_directory__(config) :\n path_config = config.get('ContentPaths', {})\n return os.path.realpath(path_config.get('PService', os.path.join(os.environ['HOME'], '.toxaway')))",
"def cctDir(self):\n if self.isMaster:\n p = os.path.dirname(os.path.abspath(self.cctFilePath()))\n else:\n p = os.path.abspath(os.path.join(self.filePath(), \"..\", \"..\", \"..\", \"..\"))\n return p",
"def get_template_dir():\n return os.path.join(get_base_dir(), TEMPLATE_DIR)",
"def template_path(self):\n\n return super().template_path+[os.path.join(os.path.dirname(__file__), \"templates\")]",
"def _locater(app):\n return functools.partial(os.path.join, app.confdir)",
"def menpobench_dir():\n from pathlib import Path # to avoid cluttering the menpo.base namespace\n import os\n return Path(os.path.abspath(__file__)).parent",
"def get_templates_dirs(self):\n return []"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
inserting an campaign element in the queue
|
def insert(self, campaign):
self.queue.append(campaign)
|
[
"def add(self, element):\n\n self.queue.insert(0, element)",
"def push(queue, item):\n queue.append(item)",
"def enqueue(self, element):\n\t\tassert self.capacity < self.max_capacity, \"Capacity exceeded for queue\"\n\t\tself.queue[self.back] = element\n\t\tself.back = (self.back + 1) % self.max_capacity\n\t\tself.capacity += 1",
"def added_to_queue(self, link):",
"def _additem(self):\n\n self.queue.put(self._genitem())",
"def add_to_queue(self, sid, data):\n self.activation_queue.put((sid, data))",
"def test_add_to_queue(self):\n job = self.create_job()\n job.add_to_queue()\n self.verify_adds_to_queue(job.key, 'scheduled', '/api/tasks/send')",
"def _insert(self, entry):\n\n if entry[0] < self._latest_time:\n for i, e in enumerate(self._queue):\n if e[0] > entry[0]:\n #print(\"inserting\", entry)\n self._queue.insert(i, entry)\n return\n #print(\"insert: appending\", entry)\n self._queue.append(entry)\n self._latest_time = entry[0]",
"def enqueue(self, item):\r\n self.lock.acquire()\r\n self.queue.append(item)\r\n self.lock.release()",
"def __enqueue(self, lease):\n self.queue.enqueue(lease)",
"def record(self, item):\n self._queue.put(item)",
"def test_queues_add_item_to_queue_v1(self):\n pass",
"def enqueue_delay(self, actor, delay):\n\t\tself.turn_counter.enqueue_delay(actor, delay)",
"def __add_to_queue(self, _id, url):\n payload = dumps(dict(\n id=str(_id),\n url=url\n ))\n self.chan.basic_publish(\n exchange='',\n routing_key=cfg.settings.mq.queue_name,\n body=payload,\n properties=pika.BasicProperties(\n delivery_mode=2\n )\n )",
"def add_to_queue(self, person, time):\n self.queue.append(person)\n if len(self.queue) == 1:\n person.start_serving(self, time)",
"def add_to_queue(self, data):\n self.registration_queue.put(data)",
"def test_enqueue(self):\n self.fail()",
"def queue(position):\n global _playlist\n collection = get_collection()\n _playlist.append(collection[position])\n log.info(\"Adding : %s\" % collection[position])\n start_player()",
"def _put(self, item):\n if item not in self.queue:\n self.queue.append(item)\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loads the datasets, converts their images to the desired size and format, assembles them in one big dataset and saves it in a pickled file before returning it.
|
def load_and_pickle_datasets(augment=False):
subdirs = ['vehicles/GTI_Far',
'vehicles/GTI_Left',
'vehicles/GTI_MiddleClose',
'vehicles/GTI_Right',
'/object-dataset-select',
'non-vehicles/Extras',
'non-vehicles/GTI',
'non-vehicles-additional']
''' 1 if the corresponding element in `subdirs` is a directory with car images, 0 if it is a directory with non-car
images '''
subdirs_y = [1, 1, 1, 1, 1, 0, 0, 0]
dataset_x, dataset_y = [], []
for subdir, y in zip(subdirs, subdirs_y):
path_to_subdir = Params.dataset_base_dir + '/' + subdir
for fname in os.listdir(path_to_subdir):
if not fname.endswith('.png'):
continue
image = cv2.imread(path_to_subdir + '/' + fname)
assert image is not None
image = format_image(image)
dataset_x.append(image)
label = Params.car_label if y == 1 else Params.non_car_label
dataset_y.append(label)
if augment and label == Params.non_car_label:
flipped = np.fliplr(image)
dataset_x.append(flipped)
dataset_y.append(label)
dataset_x, dataset_y = shuffle(dataset_x, dataset_y, random_state=Params.random_seed)
''' Break down the dataset in several pickled files, so they are small enough to be allowed on GitHub;
generate n_intervals+1 pickled files '''
n_intervals = 5
entries_per_file = len(dataset_x) // n_intervals
counter =0
for offset in range(0, len(dataset_y), entries_per_file):
chunk_x = dataset_x[offset:offset+entries_per_file]
chunk_y = dataset_y[offset:offset + entries_per_file]
pickle_fname= Params.pickled_dataset_bname + '-' + str(counter) + '.p'
pickle.dump((chunk_x, chunk_y), open(pickle_fname, "wb"))
counter +=1
return dataset_x, dataset_y
|
[
"def _load_data(self, filename):\n\n # Load the pickled data-file.\n data = self._unpickle(filename)\n\n # Get the raw images.\n raw_images = data[b'data']\n\n # Get the class-numbers for each image. Convert to numpy-array.\n cls = np.array(data[b'labels'])\n #pdb.set_trace()\n\n # Convert the images.\n images = self._convert_images(raw_images)\n\n return images, cls",
"def generate_single_files_dataset():\n\toriginal_imgs_picked = None\n\tedgemaps_picked = None\n\tcount = 0\n\t\n\tfor batch_name in os.listdir(settings.PICKED_ORIGINALS_PATH):\n\n\t\toriginal_imgs = np.load(settings.PICKED_ORIGINALS_PATH+batch_name)\n\t\tedgemaps = np.load(settings.PICKED_EDGEMAPS_PATH+batch_name)\n\n\t\tif original_imgs_picked is None and edgemaps is None:\n\t\t\toriginal_imgs_picked = original_imgs\n\t\t\tedgemaps_picked = edgemaps\n\t\telse:\n\t\t\toriginal_imgs_picked = np.concatenate((original_imgs_picked, original_imgs), axis=0)\n\t\t\tedgemaps_picked = np.concatenate((edgemaps_picked, edgemaps), axis=0)\n\n\t\tcount += 1\n\t\tprint(\"{}/1000 is appended\".format(count))\n\n\tnp.save(settings.DATASET_PATH+\"original_images.npy\", original_imgs_picked)\n\tnp.save(settings.DATASET_PATH+\"edgemaps.npy\", edgemaps_picked)",
"def generate_picked_dataset():\n\tfor batch_name in os.listdir(ORIGINALS_PATH):\n\t\tif batch_name in os.listdir(settings.PICKED_ORIGINALS_PATH):\n\t\t\tprint(\"{} done already!\".format(batch_name))\n\t\t\tcontinue\n\t\n\t\toriginal_imgs = np.load(ORIGINALS_PATH + batch_name)\n\t\tedgemaps = np.load(EDGEMAPS_PATH + batch_name)\n\n\t\tassert original_imgs.shape[0] == edgemaps.shape[0]\n\n\t\tnum_imgs = original_imgs.shape[0]\n\t\tpicked_indices = np.random.choice(num_imgs, NUM_IMAGES_PICKED, replace=False)\n\t\toriginal_imgs_picked = original_imgs[picked_indices].transpose(0, 2, 3, 1)\n\t\tedgemaps_picked = edgemaps[picked_indices]\n\n\t\tnp.save(settings.PICKED_ORIGINALS_PATH+batch_name, original_imgs_picked)\n\t\tnp.save(settings.PICKED_EDGEMAPS_PATH+batch_name, edgemaps_picked)\n\n\t\tprint(\"{} done!\".format(batch_name))",
"def save_images(data_loaders, train_labels):\n training_images_rgb_folder = os.path.join(os.path.abspath(__file__), '..', 'training_images_rgb')\n training_images_grayscale_folder = os.path.join(os.path.abspath(__file__), '..', 'training_images_grayscale')\n test_images_rgb_folder = os.path.join(os.path.abspath(__file__), '..', 'test_images_rgb')\n test_images_grayscale_folder = os.path.join(os.path.abspath(__file__), '..', 'test_images_grayscale')\n\n if not os.listdir(training_images_rgb_folder):\n index = 0\n for sample in data_loaders[\"train\"].dataset.imgs:\n image_name = f\"Image_{index}_covid{train_labels[index].numpy()}.png\"\n plt.imsave(os.path.join(training_images_rgb_folder, image_name), sample[0])\n plt.imsave(os.path.join(training_images_grayscale_folder, image_name), sample[0], cmap='gray')\n print(f\"Saved {image_name}\")\n index += 1\n\n if not os.listdir(test_images_rgb_folder):\n index = 0\n for sample in data_loaders[\"test\"].dataset.imgs:\n image_name = f\"Image_{index}.png\"\n plt.imsave(os.path.join(test_images_rgb_folder, image_name), sample[0])\n plt.imsave(os.path.join(test_images_grayscale_folder, image_name), sample[0], cmap='gray')\n print(f\"Saved {image_name}\")\n index += 1",
"def load_animals(num_train_ex_per_class=300,\n num_test_ex_per_class=100,\n num_valid_ex_per_class=0,\n classes=None):\n\n num_channels = 3\n img_size = 299\n\n # The 2 if-else statements are just creating npz filename strings\n # that contains:\n # name of class\n # number of training instances\n # number of testing instances\n # number of validation instances\n if num_valid_ex_per_class == 0:\n valid_str = ''\n else:\n valid_str = '_valid-%s' % num_valid_ex_per_class\n\n if classes is None:\n classes = ['dog', 'cat', 'bird', 'fish', 'horse',\n 'monkey', 'zebra', 'panda', 'lemur', 'wombat']\n data_filename = os.path.join(BASE_DIR,\n 'dataset_train-%s_test-%s%s.npz'\n % (num_train_ex_per_class,\n num_test_ex_per_class,\n valid_str))\n else:\n data_filename = os.path.join(BASE_DIR,\n 'dataset_%s_train-%s_test-%s%s.npz'\n % ('-'.join(classes),\n num_train_ex_per_class,\n num_test_ex_per_class,\n valid_str))\n\n num_classes = len(classes)\n num_train_examples = num_train_ex_per_class * num_classes\n num_test_examples = num_test_ex_per_class * num_classes\n num_valid_examples = num_valid_ex_per_class * num_classes\n num_ex_per_class = num_train_ex_per_class+num_valid_ex_per_class+num_test_ex_per_class\n num_examples = num_train_examples + num_test_examples + num_valid_examples\n\n\n if os.path.exists(data_filename):\n print ('Loading data from pre-existed .npz file...')\n f = np.load(data_filename)\n X_train = f['X_train']\n X_test = f['X_test']\n Y_train = f['Y_train']\n Y_test = f['Y_test']\n X_valid = f['X_valid'] if 'X_valid' in f else None\n Y_valid = f['Y_valid'] if 'Y_valid' in f else None\n\n else:\n print('Creating .npz file from raw images...')\n # initialization\n X = np.zeros([num_examples, img_size, img_size, num_channels])\n Y = np.zeros([num_examples])\n\n for class_idx, class_string in enumerate(classes):\n print('class: %s' % class_string)\n i = 0\n num_filled = 0\n # no. of images that has been loaded in X_train\n while num_filled < num_ex_per_class:\n img_path = os.path.join(BASE_DIR,\n '%s/%s_%s.JPEG'%(class_string,\n class_string,\n i))\n if os.path.exists(img_path):\n fill(X,\n Y,\n num_filled+(num_ex_per_class*class_idx),\n class_idx,\n img_path,\n img_size)\n num_filled += 1\n i += 1\n\n X, Y = shuffle(X,Y)\n\n X_train = X[0:num_train_examples,...]\n Y_train = Y[0:num_train_examples, ...]\n X_test = X[num_train_examples:num_train_examples+num_test_examples,...]\n Y_test = Y[num_train_examples:num_train_examples+num_test_examples,...]\n X_valid = X[num_train_examples+num_test_examples:-1,...]\n Y_valid = Y[num_train_examples+num_test_examples:-1,...]\n\n # preprocess input with Inception V3 config\n X_train = preprocess_input(X_train)\n X_test = preprocess_input(X_test)\n X_valid = preprocess_input(X_valid)\n\n np.savez_compressed(data_filename,\n X_train=X_train,\n Y_train=Y_train,\n X_test=X_test,\n Y_test=Y_test,\n X_valid=X_valid,\n Y_valid=Y_valid)\n\n train = DataSet(X_train, Y_train) # see def of DataSet in influence/dataset\n test = DataSet(X_test, Y_test)\n validation = DataSet(X_valid, Y_valid) if X_valid and Y_valid else None\n\n # base: base utilities of tensorflow for loading datasets\n return base.Datasets(train=train, validation=validation, test=test)",
"def get_data_loader(image_type, image_dir='lrtohr', image_size=64, batch_size=8, num_workers=0):\n\n # resize and normalize the images\n transform1 = transforms.Compose([transforms.Resize((image_size, image_size)), # resize to 128x128\n transforms.ToTensor()])\n # get training and test directories\n # resize and normalize the images\n transform2 = transforms.Compose([transforms.Resize((256,256)), # resize to 128x128\n transforms.ToTensor()])\n\n image_path = './' + image_dir\n train_path = os.path.join(image_path, image_type)\n test_path = os.path.join(image_path, 'test_{}'.format(image_type))\n\n if image_type == 'lr':\n # define datasets using ImageFolder\n train_dataset = datasets.ImageFolder(train_path, transform1)\n test_dataset = datasets.ImageFolder(test_path, transform1)\n\n # create and return DataLoaders\n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=True)\n test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n if image_type == 'hr':\n # define datasets using ImageFolder\n train_dataset = datasets.ImageFolder(train_path, transform2)\n test_dataset = datasets.ImageFolder(test_path, transform2)\n\n # create and return DataLoaders\n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=True)\n test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n return train_loader, test_loader",
"def pack(name, f_name, img_size=(227,227),\n\t\tgreyscale=False, flatten=False, istest=False):\n\t \n\tdtype = \"Float64\" # Should be Float64\n\tdata_folder = \"DATA\"\n\thdfname = \"%s.hdf5\" % name\n\n\tf = h5py.File(\"%s/%s\" % (data_folder, hdfname), \"w\")\n\tif istest:\n\t\tX, paths = _load_testset(f_name, img_size=img_size,\n\t\t\tgreyscale=greyscale, flatten=flatten)\n\t\txfile = f.create_dataset(\"/data\", data=X, dtype=dtype)\n\telse:\n\t\tX, y = _load_dataset(f_name, img_size=img_size,\n\t\t\tgreyscale=greyscale, flatten=flatten)\n\t\tlookup, rev_lookup = _gen_lookup_table(y)\n\t\ty_n = np.array([rev_lookup[label] for label in y], dtype='uint8')\n\t\txfile = f.create_dataset(\"data\", data=X, dtype=dtype)\n\t\tyfile = f.create_dataset(\"label\", data=y_n, dtype=dtype)\n\t\tfor keys in lookup:\n\t\t\tyfile.attrs[str(keys)] = lookup[keys]\n\n\twith open(\"%s/%s.txt\" % (data_folder, name), \"w\") as ref:\n\t\tref.write(\"%s/%s\" % (data_folder, hdfname))\n\tprint(\"Created Datasets:\")\n\tfor name in f:\n\t\tprint(\" - %s\" % name)\n\tprint(\"Dimensions:\")\n\tprint(\" - %s\" % \", \".join(str(i) for i in X.shape))\n\tif not istest:\n\t\tprint(\" - %s\" % \", \".join(str(i) for i in y_n.shape))",
"def _load_dataset(f_name, img_size, greyscale, flatten):\n\n\timg_paths, labels = _read_file(f_name)\n\traw_imgs = [_get_img(path) for path in img_paths]\n\tprocessed_labels = [labels[i] for i in range(len(labels)) if raw_imgs[i]]\n\tprocessed_imgs = np.array([_adjust_img(raw_img, img_size,\n\t\tgreyscale=greyscale,\n\t\tflatten=flatten) for raw_img in raw_imgs if raw_img], dtype='Float64')\n\treturn processed_imgs, processed_labels",
"def process_train():\n\n train_entry = unpickle(train_file)\n train_dataset = train_entry[b'data']\n train_targets = train_entry[b'fine_labels'] # will need to edit for coarse\n train_dataset = np.vstack(train_dataset).reshape(-1, 3, 32, 32)\n train_dataset = train_dataset.transpose((0, 2, 3, 1)) \n\n meta_entry = unpickle(meta_file)\n meta_entry[b'fine_label_names']\n\n root_path = data_dir + '/cifar100/train/'\n for counter, item in enumerate(train_targets):\n make_dir_if_no_exist(root_path+str(item))\n # write data\n img = train_dataset[counter]\n #bgr_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)\n file_path = root_path+str(item)+'/'+\"train_img_{0}.jpg\".format(str(counter))\n #print(file_path)\n # something breaks here\n #cv2.imwrite(file_path, bgr_image)\n imageio.imwrite(file_path, img)",
"def load(self):\n if os.path.exists(self.loaded_data):\n with open(self.loaded_data, 'rb') as f:\n preloaded_data = pickle.load(f)\n # Train part\n self.class2imgid = preloaded_data['class2imgid']\n self.path2class_sketch = preloaded_data['path2class_sketch']\n self.class2path_sketch = preloaded_data['class2path_sketch']\n self.path2class_image = preloaded_data['path2class_image']\n self.class2path_image = preloaded_data['class2path_image']\n self.id2path = preloaded_data['id2path']\n # Test part\n self.class2id = preloaded_data['class2id']\n self.id2class = TEST_CLASS\n self.class2imgid_test = preloaded_data['class2imgid_test']\n self.class2path_sketch_test = preloaded_data['class2path_sketch_test']\n self.class2path_image_test = preloaded_data['class2path_image_test']\n self.path2class_sketch_test = preloaded_data['path2class_sketch_test']\n self.path2class_image_test = preloaded_data['path2class_image_test']\n # Shared part\n self.loaded_image = preloaded_data['loaded_image']\n return\n self.id2class = TEST_CLASS\n self.class2id = dict()\n for idx, cls in enumerate(self.id2class):\n self.class2id[cls] = idx\n\n self.class2imgid, self.path2class_sketch, self.class2path_sketch, self.path2class_image, self.class2path_image = \\\n self.load_stats(self.stats_file_train, TRAIN_CLASS, self.sketch_files_train, self.image_files_train)\n \n self.class2imgid_test, self.path2class_sketch_test, self.class2path_sketch_test, self.path2class_image_test, self.class2path_image_test = \\\n self.load_stats(self.stats_file_test, TEST_CLASS, self.sketch_files_test, self.image_files_test)\n\n for path in self.path2class_sketch.keys():\n self.loaded_image[path] = self.load_each_image(path)\n self.id2path.append(path)\n\n for path in self.path2class_image.keys():\n self.loaded_image[path] = self.load_each_image(path)\n \n for path in self.path2class_sketch_test.keys():\n self.loaded_image[path] = self.load_each_image(path)\n\n for path in self.path2class_image_test.keys():\n self.loaded_image[path] = self.load_each_image(path)\n \n assert len(self.id2path) == len(self.path2class_sketch.keys())\n preloaded_data = dict()\n # Train part\n preloaded_data['class2imgid'] = self.class2imgid\n preloaded_data['path2class_sketch'] = self.path2class_sketch\n preloaded_data['class2path_sketch'] = self.class2path_sketch\n preloaded_data['path2class_image'] = self.path2class_image\n preloaded_data['class2path_image'] = self.class2path_image\n preloaded_data['id2path'] = self.id2path\n # Test part\n preloaded_data['class2id'] = self.class2id\n preloaded_data['class2imgid_test'] = self.class2imgid_test\n preloaded_data['class2path_sketch_test'] = self.class2path_sketch_test\n preloaded_data['class2path_image_test'] = self.class2path_image_test\n preloaded_data['path2class_sketch_test'] = self.path2class_sketch_test\n preloaded_data['path2class_image_test'] = self.path2class_image_test\n # Shared part\n preloaded_data['loaded_image'] = self.loaded_image\n \n with open(self.loaded_data, 'wb') as f:\n pickle.dump(preloaded_data, f)\n return",
"def _get_data_loader_and_outfile(self):\n # TODO: unify with train.py\n assert self.method != 'maml' and self.method != 'maml_approx', 'maml do not support save_feature and run'\n\n # Defines image size\n if 'Conv' in self.backbone:\n image_size = 84\n else:\n image_size = 224\n\n path_to_data_file = get_path_to_json(self.dataset, self.split)\n\n # Defines output file for computed features\n #TODO no need for outfile anymore\n if self.save_iter != -1:\n outfile = os.path.join(self.checkpoint_dir,\n f'{self.split}_{self.save_iter}.hdf5')\n else:\n outfile = os.path.join(self.checkpoint_dir, self.split + \".hdf5\")\n\n # Return data loader TODO: why do we do batches here ?\n datamgr = SimpleDataManager(image_size, batch_size=64)\n data_loader = datamgr.get_data_loader(path_to_data_file, aug=False, shallow=self.shallow)\n\n dirname = os.path.dirname(outfile)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n\n return (data_loader, outfile)",
"def extract_data_from_pickles(cfg, tubs):\n t_paths = gather_tub_paths(cfg, tubs)\n for tub_path in t_paths:\n file_paths = glob.glob(join(tub_path, '*.pickle'))\n print(\n '[AiTraining:extract_data_from_pickles] found {} pickles writing json records and images in tub {}'.format(\n len(file_paths), tub_path))\n for file_path in file_paths:\n # print('loading data from {}'.format(file_paths))\n with open(file_path, 'rb') as f:\n p = zlib.decompress(f.read())\n data = pickle.loads(p)\n\n base_path = dirname(file_path)\n filename = splitext(basename(file_path))[0]\n image_path = join(base_path, filename + '.jpg')\n img = Image.fromarray(np.uint8(data['val']['cam/image_array']))\n img.save(image_path)\n\n data['val']['cam/image_array'] = filename + '.jpg'\n\n with open(join(base_path, 'record_{}.json'.format(filename)), 'w') as f:\n json.dump(data['val'], f)",
"def create_dataset(name, img_folder, lmdb_save_path, H_dst, W_dst, C_dst):\n # configurations\n read_all_imgs = False # whether real all images to memory with multiprocessing\n # Set False for use limited memory\n BATCH = 5000 # After BATCH images, lmdb commits, if read_all_imgs = False\n n_thread = 40\n ########################################################\n if not lmdb_save_path.endswith(\".lmdb\"):\n raise ValueError(\"lmdb_save_path must end with 'lmdb'.\")\n if osp.exists(lmdb_save_path):\n print(\"Folder [{:s}] already exists. Exit...\".format(lmdb_save_path))\n sys.exit(1)\n\n # read all the image paths to a list\n print(\"Reading image path list ...\")\n all_img_list = data_util._get_paths_from_images(img_folder)\n keys = []\n for img_path in all_img_list:\n split_rlt = img_path.split(\"/\")\n folder = split_rlt[-2]\n img_name = split_rlt[-1].split(\".png\")[0]\n keys.append(folder + \"_\" + img_name)\n\n if read_all_imgs:\n # read all images to memory (multiprocessing)\n dataset = {} # store all image data. list cannot keep the order, use dict\n print(\"Read images with multiprocessing, #thread: {} ...\".format(n_thread))\n pbar = util.ProgressBar(len(all_img_list))\n\n def mycallback(arg):\n \"\"\"get the image data and update pbar\"\"\"\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update(\"Reading {}\".format(key))\n\n pool = Pool(n_thread)\n for path, key in zip(all_img_list, keys):\n pool.apply_async(read_image_worker, args=(path, key), callback=mycallback)\n pool.close()\n pool.join()\n print(\"Finish reading {} images.\\nWrite lmdb...\".format(len(all_img_list)))\n\n # create lmdb environment\n data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes\n print(\"data size per image is: \", data_size_per_img)\n data_size = data_size_per_img * len(all_img_list)\n env = lmdb.open(lmdb_save_path, map_size=data_size * 10)\n\n # write data to lmdb\n pbar = util.ProgressBar(len(all_img_list))\n txn = env.begin(write=True)\n for idx, (path, key) in enumerate(zip(all_img_list, keys)):\n pbar.update(\"Write {}\".format(key))\n key_byte = key.encode(\"ascii\")\n data = dataset[key] if read_all_imgs else cv2.imread(path, cv2.IMREAD_UNCHANGED)\n\n assert len(data.shape) > 2 or C_dst == 1, \"different shape\"\n\n if C_dst == 1:\n H, W = data.shape\n assert H == H_dst and W == W_dst, \"different shape.\"\n else:\n H, W, C = data.shape\n assert H == H_dst and W == W_dst and C == 3, \"different shape.\"\n txn.put(key_byte, data)\n if not read_all_imgs and idx % BATCH == 0:\n txn.commit()\n txn = env.begin(write=True)\n txn.commit()\n env.close()\n print(\"Finish writing lmdb.\")\n\n # create meta information\n meta_info = {}\n meta_info[\"name\"] = name\n channel = C_dst\n meta_info[\"resolution\"] = \"{}_{}_{}\".format(channel, H_dst, W_dst)\n meta_info[\"keys\"] = keys\n pickle.dump(meta_info, open(osp.join(lmdb_save_path, \"meta_info.pkl\"), \"wb\"))\n print(\"Finish creating lmdb meta info.\")",
"def load_dataset(self):",
"def load_data(config):\n globals()[\"img_rows\"] = config['dataset']['img_rows']\n globals()[\"img_cols\"] = config['dataset']['img_cols']\n globals()[\"_mean_filename\"] = (\"caltech-101-{}-{}-mean.npy\"\n .format(img_rows, img_cols))\n # url of the binary data\n cache_dir = os.path.expanduser(os.path.join('~', '.keras/datasets'))\n path = os.path.join(cache_dir, 'kagglecatsanddogs_3367a')\n if not os.path.isdir(path):\n logging.info(\"Please download the Kaggle Cats and Dogs dataset from \"\n \"Microsoft to {} and extract it there.\"\n .format(path))\n sys.exit(-1)\n path = os.path.join(path, \"PetImages\")\n pickle_fpath = os.path.join(path,\n \"cat-dog-data-{}-{}.pickle\"\n .format(config['dataset']['img_rows'],\n config['dataset']['img_cols']))\n\n if not os.path.isfile(pickle_fpath):\n # Load data\n cat_path_glob = \"{}/Cat/*.jpg\".format(path)\n cats_fnames = glob.glob(cat_path_glob)\n dogs_path_glob = \"{}/Dog/*.jpg\".format(path)\n dogs_fnames = glob.glob(dogs_path_glob)\n print(\"{} in {}\".format(len(cats_fnames), cat_path_glob))\n print(\"{} in {}\".format(len(dogs_fnames), dogs_path_glob))\n\n # Make np arrays\n x = np.zeros((len(dogs_fnames) + len(cats_fnames),\n img_rows, img_cols, 3), dtype=np.uint8)\n y = np.zeros((len(dogs_fnames) + len(cats_fnames), 1), dtype=np.uint64)\n print(\"Start reading dogs\")\n for i, dog_fname in enumerate(dogs_fnames):\n x[i, :, :, :] = prepreprocess(dog_fname, img_cols, img_rows)\n y[i] = 1\n print(\"Start reading cats\")\n for i, cat_fname in enumerate(cats_fnames, start=len(dogs_fnames)):\n x[i, :, :, :] = prepreprocess(cat_fname, img_cols, img_rows)\n\n x_train, x_test, y_train, y_test = train_test_split(x, y,\n test_size=0.33,\n random_state=42,\n stratify=y)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,\n test_size=0.10,\n random_state=42,\n stratify=y_train)\n\n # both = cats_fnames + dogs_fnames\n # from random import shuffle\n # shuffle(both)\n # for el in both:\n # prepreprocess(el, img_cols, img_rows)\n\n data = {'x_train': x_train, 'y_train': y_train,\n 'x_val': x_val, 'y_val': y_val,\n 'x_test': x_test, 'y_test': y_test}\n\n with open(pickle_fpath, 'wb') as f:\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n with open(pickle_fpath, 'rb') as f:\n data = pickle.load(f)\n\n return data",
"def load_dataset(dataset, train_size, valid_size, test_size):\n\n if dataset == 'mnist_012':\n root = './data/mnist'\n num_classes = 3\n\n trans = transforms.Compose([transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), transforms.Normalize(mean=MNIST_MEAN, std=MNIST_STD)])\n train_valid_set = datasets.MNIST(root=root, train=True, transform=trans)\n test_set = datasets.MNIST(root=root, train=False, transform=trans)\n\n train_valid_set = MNIST_bis(dataset=train_valid_set, size=train_size+valid_size, digits_to_keep=[0,1,2])\n test_set = MNIST_bis(dataset=test_set, size=test_size, digits_to_keep=[0,1,2])\n\n train_sampler, valid_sampler = train_valid_split(dataset=train_valid_set, train_size=train_size)\n\n train_loader = DataLoader(dataset=train_valid_set, batch_size=BATCH_SIZE, sampler=train_sampler, num_workers=4, pin_memory=True, drop_last=True)\n valid_loader = DataLoader(dataset=train_valid_set, batch_size=BATCH_SIZE, sampler=valid_sampler, num_workers=4, pin_memory=True, drop_last=True)\n test_loader = DataLoader(dataset=test_set, batch_size=BATCH_SIZE, num_workers=4, pin_memory=True, drop_last=True)\n\n elif dataset == 'mnist_rot':\n root = './data/mnist'\n num_classes = 9\n\n train_trans = transforms.Compose([transforms.Grayscale(num_output_channels=1), transforms.Resize((26,26)), transforms.ToTensor(), transforms.Normalize(mean=MNIST_MEAN, std=MNIST_STD)])\n test_trans = transforms.Compose([transforms.Grayscale(num_output_channels=1), transforms.Resize((26,26)), transforms.RandomRotation((0,360)), transforms.ToTensor(), transforms.Normalize(mean=MNIST_MEAN, std=MNIST_STD)])\n train_valid_set = datasets.MNIST(root=root, train=True, transform=train_trans)\n test_set = datasets.MNIST(root=root, train=False, transform=test_trans)\n\n train_valid_set_bis = MNIST_bis(dataset=train_valid_set, size=train_size+valid_size, digits_to_keep=[0,1,2,3,4,5,6,7,8])\n test_set = MNIST_bis(dataset=test_set, size=test_size, digits_to_keep=[0,1,2,3,4,5,6,7,8])\n\n train_sampler, valid_sampler = train_valid_split(dataset=train_valid_set_bis, train_size=train_size)\n\n train_loader = DataLoader(dataset=train_valid_set_bis, batch_size=BATCH_SIZE, sampler=train_sampler, num_workers=4, pin_memory=True, drop_last=True)\n valid_loader = DataLoader(dataset=train_valid_set_bis, batch_size=BATCH_SIZE, sampler=valid_sampler, num_workers=4, pin_memory=True, drop_last=True)\n test_loader = DataLoader(dataset=test_set, batch_size=BATCH_SIZE, num_workers=4, pin_memory=True, drop_last=True)\n\n elif dataset == 'mnist_trans':\n root = './data/mnist'\n num_classes = 9\n\n train_trans = transforms.Compose([transforms.Grayscale(num_output_channels=1), transforms.Resize((26,26)), transforms.ToTensor(), transforms.Normalize(mean=MNIST_MEAN, std=MNIST_STD)])\n test_trans = transforms.Compose([transforms.Grayscale(num_output_channels=1), transforms.Resize((26,26)), RandomTranslation(horizontal=6, vertical=6), transforms.ToTensor(), transforms.Normalize(mean=MNIST_MEAN, std=MNIST_STD)])\n train_valid_set = datasets.MNIST(root=root, train=True, transform=train_trans)\n test_set = datasets.MNIST(root=root, train=False, transform=test_trans)\n \n train_valid_set_bis = MNIST_bis(dataset=train_valid_set, size=train_size+valid_size, digits_to_keep=[0,1,2,3,4,5,6,7,8])\n test_set = MNIST_bis(dataset=test_set, size=test_size, digits_to_keep=[0,1,2,3,4,5,6,7,8])\n\n train_sampler, valid_sampler = train_valid_split(dataset=train_valid_set_bis, train_size=train_size)\n\n train_loader = DataLoader(dataset=train_valid_set_bis, batch_size=BATCH_SIZE, sampler=train_sampler, num_workers=4, pin_memory=True, drop_last=True)\n valid_loader = DataLoader(dataset=train_valid_set_bis, batch_size=BATCH_SIZE, sampler=valid_sampler, num_workers=4, pin_memory=True, drop_last=True)\n test_loader = DataLoader(dataset=test_set, batch_size=BATCH_SIZE, num_workers=4, pin_memory=True, drop_last=True)\n\n elif dataset == 'eth80':\n root = './data/eth80'\n num_classes = 8\n\n trans = transforms.Compose([transforms.Grayscale(num_output_channels=1), transforms.Resize((50,50)), transforms.ToTensor(), transforms.Normalize(mean=ETH80_MEAN, std=ETH80_STD)])\n complete_set = datasets.ImageFolder(root=root, transform=trans)\n class_names = complete_set.classes\n\n train_sampler, valid_sampler, test_sampler = train_valid_test_split(dataset=complete_set, train_size=train_size, valid_size=valid_size)\n \n train_loader = DataLoader(dataset=complete_set, batch_size=BATCH_SIZE, sampler=train_sampler, num_workers=4, pin_memory=True, drop_last=True) \n valid_loader = DataLoader(dataset=complete_set, batch_size=BATCH_SIZE, sampler=valid_sampler, num_workers=4, pin_memory=True, drop_last=True) \n test_loader = DataLoader(dataset=complete_set, batch_size=BATCH_SIZE, sampler=test_sampler, num_workers=4, pin_memory=True, drop_last=True)\n\n else:\n raise ValueError('Specified dataset does not exist.')\n\n logger.debug('Class frequency train loader: {} validation loader: {} test loader: {}'.format(\n count_class_freq(train_loader, num_classes),count_class_freq(valid_loader, num_classes), count_class_freq(test_loader, num_classes))\n )\n logging.info('Loaded {} dataset with the split {}-{}-{} for the [train]-[valid]-[test] setup.'.format(dataset, len(train_loader)*BATCH_SIZE, len(valid_loader)*BATCH_SIZE, len(test_loader)*BATCH_SIZE))\n\n\n return train_loader, valid_loader, test_loader, get_dim(train_loader)",
"def load_data():\n\n # Get the data.\n train_data_filename = maybe_download('train-images-idx3-ubyte.gz')\n train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')\n test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')\n test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')\n\n # Extract it into numpy arrays.\n train_data = extract_data(train_data_filename, FLAGS.train_size + FLAGS.validation_size)\n train_labels = extract_labels(train_labels_filename, FLAGS.train_size + FLAGS.validation_size)\n test_data = extract_data(test_data_filename, FLAGS.test_size)\n test_labels = extract_labels(test_labels_filename, FLAGS.test_size)\n\n validation_data = train_data[:FLAGS.validation_size, ...]\n validation_labels = train_labels[:FLAGS.validation_size]\n train_data = train_data[FLAGS.validation_size:, ...]\n train_labels = train_labels[FLAGS.validation_size:]\n\n return train_data, train_labels, validation_data, validation_labels, test_data, test_labels",
"def createDataSets(smilePath, nonSmilePath, dataSetSize, testingSplit):\n\n trainingLabels = []\n trainingSetFiles = []\n testingLabels = []\n testingSetFiles = []\n\n # transform all smiling pictures\n for root, dirs, files in os.walk(smilePath, True):\n i=0\n #static for loop\n for name in files:\n #all images\n #for name in files:\n if name.endswith(\".jpg\") and (i<(dataSetSize/2) or dataSetSize == -1):\n if random.randint(1, 100) > testingSplit:\n trainingSetFiles.append(os.path.join(root, name))\n trainingLabels.append(np.array([1,0], np.int32))\n else:\n testingSetFiles.append(os.path.join(root, name))\n testingLabels.append(np.array([1,0], np.int32))\n i=i+1\n\n # transform all non-smiling pictures\n #the non smiling pictures are added to a random position in the trainingSet and labels and the testingSet and labels\n #the sets and labelled where already created in the above for loop.\n for root, dirs, files in os.walk(nonSmilePath, True):\n k=0\n #all images\n #for name in files:\n #static for loop\n for name in files:\n if name.endswith(\".jpg\") and (k<(dataSetSize/2) or dataSetSize == -1):\n if random.randint(1, 100) > testingSplit:\n # insert to a random position to avoid overfitting\n insertPosition = random.randint(0, len(trainingLabels))\n trainingSetFiles.insert(insertPosition, os.path.join(root, name))\n trainingLabels.insert(insertPosition, np.array([0, 1], np.int32))\n else:\n # insert to a random position to avoid overfitting\n insertPosition = random.randint(0, len(trainingLabels))\n testingSetFiles.insert(insertPosition, os.path.join(root, name))\n testingLabels.insert(insertPosition, np.array([0, 1], np.int32))\n k=k+1\n\n return trainingSetFiles,trainingLabels,testingSetFiles,testingLabels\n #TODO: Needs to be explained better Side note: Only the file names of the training images are provided to reduce memory consumption.",
"def load_data(self):\n\n data, label = self._generate_all_combinations_of_stripe_images();\n\n return data, label;"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Instantiates, trains and validates a SVM classifier on the given datasets, after optionally scaling them. The trained classifier and data scaler are saved in a pickled file. The method also prints validation statistics.
|
def fit_and_pickle_classifier(train_x, train_y, valid_x, valid_y, scale=False):
start = time()
train_feat_x = [compute_image_features(image) for image in train_x]
valid_feat_x = [compute_image_features(image) for image in valid_x]
if scale:
scaler = StandardScaler()
scaler.fit(train_feat_x)
train_feat_x = scaler.transform(train_feat_x)
valid_feat_x = scaler.transform(valid_feat_x)
else:
scaler = None
print('Computed features for training and validation set in', round(time() - start), 's')
start = time()
classifier = svm.LinearSVC(C=Params.SVM_C)
classifier = classifier.fit(train_feat_x, train_y)
print('Trained classifier in', round(time() - start), 's')
pickle_me = {'classifier': classifier, 'scaler': scaler}
pickle.dump(pickle_me, open(Params.pickled_classifier, "wb"))
valid_prediction = classifier.predict(valid_feat_x)
valid_accuracy = accuracy_score(valid_prediction, valid_y)
print('Accuracy on validation set', valid_accuracy)
precision, recall, fscore, support = precision_recall_fscore_support(y_true=valid_y, y_pred=valid_prediction)
print(' Table with stats on validation set.')
t = PrettyTable(['Class', 'Precision', 'Recall', 'F-score', 'Support'])
for item in zip(range(len(precision)), precision, recall, fscore, support):
t.add_row(['{}'.format(item[0]),
'{:.3f}'.format(item[1]),
'{:.3f}'.format(item[2]),
'{:.3f}'.format(item[3]),
'{}'.format(item[4])])
print(t)
return classifier, scaler
|
[
"def train_svm_classifier(dataset, labels):\n svm_classifier = SVC()\n return svm_classifier.fit(dataset, labels)",
"def train_svm():\n df = load_dataframe()\n X_train, X_test, y_train, y_test = get_train_test_split(df)\n\n classifier = svm.SVC()\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n\n print(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(y_test, y_pred)))\n\n # store trained SVM\n with open(config.SVM_PATH, 'wb') as file:\n pickle.dump(classifier, file)",
"def exec_classifiers(self, dataset):\n f = Features()\n pt = param_tuning.ParamTuning()\n\n start_time = time.time()\n Xtrain, Xtest, ytrain, ytest = self._load_and_split_data(dataset)\n print(\"Loaded train/test datasets in {} sec.\".format(time.time() - start_time))\n\n fX_train = f.build(Xtrain)\n fX_test = f.build(Xtest)\n print(\"Build features from train/test data in {} sec\".format(time.time() - start_time))\n\n for clf in config.MLConf.clf_custom_params:\n print('Method {}'.format(clf))\n print('=======', end='')\n print(len(clf) * '=')\n\n tot_time = time.time(); start_time = time.time()\n # 1st phase: train each classifier on the whole train dataset (no folds)\n # estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])\n estimator = pt.clf_names[clf][0](random_state=config.seed_no)\n estimator.set_params(**config.MLConf.clf_custom_params[clf])\n estimator = pt.trainClassifier(fX_train, ytrain, estimator)\n\n print(\"Finished training model on dataset; {} sec.\".format(time.time() - start_time))\n\n start_time = time.time()\n # 2nd phase: test each classifier on the test dataset\n res = pt.testClassifier(fX_test, ytest, estimator)\n self._print_stats(clf, res['metrics'], res['feature_imp'], start_time)\n # if not os.path.exists('output'):\n # os.makedirs('output')\n # np.savetxt(f'output/{clf}_default_stats.csv', res['metrics']['stats'], fmt=\"%u\")\n\n print(\"The whole process took {} sec.\\n\".format(time.time() - tot_time))",
"def train(self):\n for dataclass in self.Data.get_class_names():\n print('Training for ', dataclass, '... ', end='')\n # train\n self.Data.set_class_of_interest(dataclass)\n self.SVMs[dataclass] = SVM(self.Data, self.Solver, self.Kernel)\n t = -clock()\n self.SVMs[dataclass].train()\n t += clock()\n self.iter_tracker.loc[dataclass, 'k'] = self.SVMs[dataclass].solver_iter_count\n self.iter_tracker.loc[dataclass, 'train time'] = t\n print('Complete!')",
"def train():\n # get data from files\n tourism_file = open('classifier/tourism.txt', 'r')\n nontourism_file = open('classifier/nontourism.txt', 'r')\n\n # retrieve features\n data_set = process_data(tourism_file, nontourism_file)\n training_set = data_set[0]\n test_set = data_set[1]\n datamixed = data_set[2]\n size = data_set[3]\n feature_set = data_set[4]\n\n # classifiers\n classifier_nb = NaiveBayesClassifier\n classifier_lr = SklearnClassifier(LogisticRegression())\n classifier_svm = SklearnClassifier(LinearSVC())\n\n # get best classifier from cross-validation\n classifier = cross_validate(classifier_svm, training_set, test_set)['classifier'] # set classifier\n return classifier",
"def train_and_evaluate_sentiment(train_data, train_labels, val_data, val_labels,\n test_data=None, test_labels=None,\n parser_output_path=None, perl_script_path=None,args=None):\n print('Training the SVM on %d examples...' % train_data.shape[0])\n clf = svm.SVC()\n clf.fit(train_data, train_labels)\n\n # validate the configuration on the validation and test set (if provided)\n val_predictions = clf.predict(val_data)\n val_accuracy = accuracy_score(val_labels, val_predictions)\n print('Val acc: %.5f' % val_accuracy)\n test_accuracy = None\n if test_data is not None and test_labels is not None:\n test_predictions = clf.predict(test_data)\n test_accuracy = accuracy_score(test_labels, test_predictions)\n print('Test acc: %.5f' % test_accuracy)\n return val_accuracy, test_accuracy",
"def train_and_save(self):\n self.naive_bag_of_words.prepare_simple_data()\n self.naive_bag_of_words.train()\n\n self.naive_best_words.prepare_simple_data()\n self.naive_best_words.train()\n\n self.svm.prepare_data()\n self.svm.train()\n\n with open('naive_bag_of_words.pickle', 'wb') as f:\n pickle.dump(self.naive_bag_of_words, f, -1)\n\n with open('naive_best_words.pickle', 'wb') as f:\n pickle.dump(self.naive_best_words, f, -1)\n\n with open('svm.pickle', 'wb') as f:\n pickle.dump(self.svm, f, -1)",
"def train(self, inputs, targets):\n\n # We train the SVM classifier by solving the dual problem.\n # Calculate the Lagrange multipliers, alphas.\n alphas = self.solve_dual(inputs, targets)\n # Use the Lagrange multipliers to find the support vectors.\n support_vector_indices = self.find_support_vectors(inputs, targets, alphas)\n \n # Keep only the alpha's, x's and y's that correspond to the support\n # vectors found above.\n self.support_multipliers = alphas[support_vector_indices]\n self.support_vectors = inputs[support_vector_indices, :]\n print self.support_vectors.shape[0]\n self.support_vector_labels = targets[support_vector_indices]\n\n # Calculate the bias.\n self.bias = self.compute_bias(inputs, targets, alphas,\n support_vector_indices, self.kernel_func)",
"def train_svm(X_train, y_train, X_test_vecs, X_test_strs, y_test):\n\tsvm_clf = SVC(gamma='scale')\n\tsvm_clf.fit(X_train, y_train)\n\tpredictions = predict(svm_clf, X_test_vecs, X_test_strs)\n\n\t# find_false_positives(X_test_strs, y_test, predictions)\n\treturn precision_recall_fscore_support(y_test, predictions, average='binary')",
"def main():\n\n # Load data\n student_data = load_data()\n\n # Explore the data\n X, y = explore_student_data(student_data)\n\n #Preprocess features\n X = preprocess_features(X) \n print \"Number of preprocessed columns: \" + str(len(X.columns)) \n print \"Processed feature columns : \" + str(list(X.columns))\n\n features_data = X\n target_data = y\n \n #Stratified shuffle split \n X_train, y_train, X_test, y_test = strat_shuffle_split(features_data, target_data)\n\n print \"Training set (X, y): \" + str(y_train.shape[0])\n print \"Test set (X, y): \" + str(y_test.shape[0]) \n\n #or \n #print \"Training set (X, y): \" + str(X_train.shape[0])\n #print \"Test set (X, y): \" + str(X_test.shape[0])\n\n\n #Model 1: Support Vector Classifier Linear Kernel\n #from sklearn.svm import SVC\n SVM_clf = SVC()\n\n ###Code for Predicting, Source: http://scikit-learn.org/stable/tutorial/basic/tutorial.html###\n # #refer to README.md file for identifying the meaning of the numbers\n # new_data = [1, 0, 1, 0, 18, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, \n # 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 2, 4, \n # 1, 1, 1, 1, 1, 1, 1, 1, 0, 5, 2, 1, 1, 1, 5, 0]\n # print \"lenght of new_data:\"\n # print len(new_data)\n\n # print \"Predicting: \"\n # print SVM_clf.predict(new_data)\n\n #Model 2: Randomized Forest\n #from sklearn.ensemble import RandomForestClassifier\n RF_clf = RandomForestClassifier(n_estimators=15)\n\n #Model 3: Bagging with K Nearest Neighbors\n #from sklearn.neighbors import KNeighborsClassifier\n #from sklearn.ensemble import BaggingClassifier\n bagging_clf = BaggingClassifier(KNeighborsClassifier(n_neighbors=3),max_samples=0.5, max_features=0.5)\n\n #With training sizes 100, 200, 300\n train_num = [100, 200, 300] \n \n #models \n models = {\"SVM classifier\": SVM_clf, \"Randomized Forest\": RF_clf, \"Bagging Classifier with KNN\": bagging_clf}\n\n #parameters\n parameters = {'kernel':('linear','rbf', 'poly','sigmoid'), 'C':[1, 50], 'degree':[3,6]}\n\n ##creates CHARTS##\n create_chart(train_num, models, X, y)\n\n #fine_tuning_SVM(parameters, SVM_clf, features_data, target_data)#original code\n fine_tuning_SVM(parameters, SVM_clf, features_data, target_data, X_train, y_train, X_test, y_test)#modified code after review\n\n #tuning RF_clf model\n #RF_clf = RandomForestClassifier(n_estimators=15)\n #RandomForestClassifier(n_estimators=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False, class_weight=None)\n\n #tuning bagging_clf\n #BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0)\n #BaggingClassifier(KNeighborsClassifier(n_neighbors=3),max_samples=0.5, max_features=0.5)\n\n all_tables(models, train_num, X, y)\n\n\n print \"Finished\"",
"def build_classifier():\n X = pd.read_csv(os.path.join(PROJECT_DIR, \"train_features.csv\"), skiprows=1, header=None).as_matrix()\n Y = pd.read_csv(os.path.join(PROJECT_DIR, \"train_labels.csv\"), header=None).as_matrix().ravel()\n\n # Split data into training and cross validation sets\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=3131)\n\n std_scale = preprocessing.StandardScaler().fit(X_train)\n X_train_std = std_scale.transform(X_train)\n # X_test_std = std_scale.transform(X_test)\n\n pca_std = PCA(n_components=13).fit(X_train_std)\n X_train_std = pca_std.transform(X_train_std)\n # X_test_std = pca_std.transform(X_test_std)\n\n clf = svm.SVC(C=5)\n clf.fit(X_train_std, y_train)\n\n # Compare predictions of classifier on cross-validation sets with ground-truths\n # print clf.score(X_test_std, y_test)\n return clf, std_scale, pca_std",
"def train_all(self, classifier, name: str, save=False) -> None:\n\n train = self.features[self.features_list]\n target = self.features['stressed']\n scaler = StandardScaler().fit(train)\n train_scaled = scaler.transform(train)\n print(f'Currently Training {name} on all data')\n clf = classifier.fit(train_scaled, target)\n\n self.scaler = scaler\n self.classifier = clf\n self.clf_name = name\n\n if save:\n joblib.dump(scaler, 'models/scaler.pkl')\n joblib.dump(clf, f'models/classifier_{name}.pkl')",
"def train_svm_classifier(features, labels):\n svm_classifier = LinearSVC(random_state=9)\n svm_classifier.fit(features, labels)\n return svm_classifier",
"def train(self, images, labels, load):\n \n PATH='./trained.pickle'\n\n if os.path.isfile(PATH) and load:\n print 'Loading already existing training values from ' + PATH\n with open('trained.pickle') as f:\n self.classes, self.prClass, self.prPixelGivenClass = pickle.load(f)\n else:\n self.prClass = [0 for i in range(10)]\n self.classes = [i for i in range(10)]\n self.prPixelGivenClass = [[0 for i in range(14*14)] for j in range(10)]\n \n for i in range(len(labels)):\n self.prClass[labels[i]] += 1 # Count how many times a class appears in the labels list.\n for j in range(len(images[i])):\n if images[i][j] < 100:\n self.prPixelGivenClass[labels[i]][j] += 1 # For every class, count how many times\n # a pixel is black.\n \n for i in range(len(self.prPixelGivenClass)):\n for j in range(len(self.prPixelGivenClass[i])):\n self.prPixelGivenClass[i][j] /= float(self.prClass[i]) # Divide the count of black pixels\n # by the number of times a class\n # appears, to get a percentage.\n self.prClass[i] /= float(len(images)) # Divide the number of times a class\n # appears, by the total number of classes\n # to get a percentage.\n \n print ''\n for i in range(len(self.prClass)): # some useful output that shows the probability of each class.\n print 'Pr(C=' + str(i) + ') = ' + str(self.prClass[i])[:5]\n # print 'Probabilites of the individual pixel in this class:' \"\"Commented because we now have\n # self.print_ascii_probabilities(self.prPixelGivenClass[i]) \"\"'heat-maps' for each image\n # print''\n print ''\n with open('trained.pickle', 'w') as f:\n pickle.dump([self.classes, self.prClass, self.prPixelGivenClass], f)",
"def test_pipeline_methods_scaler_svm():\n iris = load_iris()\n X = iris.data\n y = iris.target\n # Test with Scaler + SVC\n clf = SVC(probability=True)\n scaler = Scaler()\n pipe = Pipeline([('scaler', scaler), ('svc', clf)])\n pipe.fit(X, y)\n pipe.predict(X)\n pipe.predict_proba(X)\n pipe.predict_log_proba(X)\n pipe.score(X, y)",
"def build_SupportVectorMachine_classifier(X_training, y_training):\n # Split Training Dataset into, Train and Validate Datasets\n X_train, X_val, y_train, y_val = train_test_split(X_training, y_training, test_size=0.2, random_state=2)\n\n # Define parameters to be tuned by GridSearchCV\n tuned_parameters = [{'kernel': ['rbf', 'linear'], 'C': [1, 10, 100, 1000]}]\n\n print(\"# Tuning hyper-parameters for precision using SVM\")\n print()\n\n # Find best parameters to use based on tuned_parameters. Score on precision\n svm_cv = GridSearchCV(\n SVC(), tuned_parameters, scoring='precision', n_jobs=-1\n )\n\n # Fit model to train data\n svm_cv.fit(X_train, y_train)\n\n print(\"Best parameters set found on SVM development set:\")\n print()\n print(svm_cv.best_params_)\n print()\n print(\"Grid scores on SVM development set:\")\n print()\n # Print mean, standard deviation and parameters of each combination of parameters\n means = svm_cv.cv_results_['mean_test_score']\n stds = svm_cv.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, svm_cv.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\n print()\n print(\"Detailed classification report for SVM:\")\n print()\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n print()\n # Print classification report using validation data\n y_true, y_pred = y_val, svm_cv.predict(X_val)\n print(classification_report(y_true, y_pred))\n print()\n\n # Set Support Vector Machine Classifier model with best parameters\n svm_classifier = SVC(kernel=svm_cv.best_params_['kernel'], C=svm_cv.best_params_['C'], random_state=1)\n\n # Train Support Vector Machine Classifier model with training dataset\n svm_classifier.fit(X_training, y_training)\n\n # Return Support Vector Machine Classifier model\n return svm_classifier",
"def ml_classification(x_train, y_train, x_test, y_test, cross_validation=False):\n from time import time\n from sklearn.naive_bayes import GaussianNB\n from sklearn.svm import SVC\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\n from sklearn.metrics import accuracy_score\n\n from sklearn.model_selection import KFold\n from sklearn.base import clone\n\n classifiers = (GaussianNB(), SVC(\n kernel=\"rbf\", ), DecisionTreeClassifier(), KNeighborsClassifier(\n n_neighbors=10), AdaBoostClassifier(), RandomForestClassifier(100))\n\n names = [\n \"Naive Bayes\", \"SVM\", \"Decision Trees\", \"KNeighbors\", \"AdaBoost\",\n \"Random Forest\"\n ]\n\n for idx, clf in enumerate(classifiers):\n\n clf_cv = clone(clf)\n\n print(\"\\n\", names[idx], \"\\n\", \"-\" * 20)\n\n t0 = time()\n # Fitting the model without cross validation\n clf.fit(x_train, y_train[:, 0])\n train_time = time() - t0\n y_pred = clf.predict(x_test)\n accuracy = accuracy_score(y_pred, y_test[:, 0])\n\n if cross_validation:\n k_fold = KFold(n_splits=10)\n\n t0 = time()\n # Fitting the model with cross validation\n for id_train, id_test in k_fold.split(x_train):\n # print(y_train[id_train, 0].shape)\n clf_cv.fit(x_train[id_train], y_train[id_train, 0])\n train_time_cv = time() - t0\n\n y_pred_cv = clf_cv.predict(x_test)\n accuracy_cv = accuracy_score(y_pred_cv, y_test[:, 0])\n\n print(\"Test Accuracy: \\t {:.3f}\".format(accuracy))\n if cross_validation:\n print(\"Test Accuracy CV:\\t {:.3f}\".format(accuracy_cv))\n\n print(\"Training Time: \\t {:.1f} ms\".format(train_time * 1000))\n if cross_validation:\n print(\n \"Training Time CV: \\t {:.1f} ms\".format(train_time_cv * 1000))",
"def fit(self) -> None:\n\n self.model = OneClassSVM(nu=self.outlier_fraction, kernel=self.kernel, gamma=self.gamma)\n self.model.fit(self.scale_data)",
"def runSVM():\n X,y=preprocess()\n print(supportVectorRegressor(X,y))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Debugging function that displays the given image with overlaid detection windows, one size of detection windows at a time.
|
def display_image_with_windows(image):
windows = Perspective_grid(image.shape[1], image.shape[0])
plt.subplots()
for enlargement in range(2, 4):
image_copy = np.copy(image)
color = [0, 255, 0]
for window in windows:
if window[2] - window[0] + 1 == 64 * enlargement:
draw_bounding_box(image_copy, *window, color)
color[1] = (color[1] - 64) % 256
color[2] = (color[2] + 64) % 256
cv2.imwrite('windows-' + str(enlargement) + '.png', image_copy)
plt.imshow(image_copy[:, :, ::-1])
plt.show()
|
[
"def generate_debug_image(image, detections):\n result = image.copy()\n\n for detection in detections:\n draw_detection_in_image(result, detection)\n\n return result",
"def show_images_in_windows(imgs, win_names, win_size):\r\n x = y = 0\r\n for i, img in enumerate(imgs):\r\n w_compress = img.shape[1] / win_size[0]\r\n h_compress = img.shape[0] / win_size[1]\r\n if w_compress > h_compress:\r\n w = win_size[0]\r\n h = img.shape[0] / w_compress\r\n else:\r\n w = img.shape[1] / h_compress\r\n h = win_size[1]\r\n w = int(w)\r\n h = int(h)\r\n\r\n win_name = win_names[i]\r\n cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)\r\n cv2.resizeWindow(win_name, w, h)\r\n cv2.moveWindow(win_name, x, y)\r\n cv2.imshow(win_name, img)\r\n x += w\r\n cv2.waitKey(0) & 0xFF # for 64-bit machine\r\n cv2.destroyAllWindows()",
"def show_debug_results_windows(actual_img, previous_img, yolo_detections_matched, yolo_names_list, sort_tracker,\n sort_matched_tracks, sort_unmatched_reliable_tracks, previous_tracks_showed,\n yolact_masks_dict, yolact_bbox_dict, angle_movements_axis_to_x_axis_dict,\n previous_p0_dict, currents_p0_dict, movements_to_show_dict,\n ids_to_debug: list, bbox_color, windows_proportion_vid: float):\n # Stream yolo detections on im0_yolo\n im0s_yolo = actual_img.copy()\n plot_yolo_detections(im0s_yolo, yolo_detections_matched, yolo_names_list, bbox_color)\n im0s_yolo = cv2.resize(im0s_yolo, (0, 0), fx=windows_proportion_vid, fy=windows_proportion_vid)\n cv2.imshow('Yolo detection', im0s_yolo)\n\n # Stream all tracks from SORT tracker on im0_all_tracks\n im0s_all_tracks = actual_img.copy()\n plot_all_tracks(im0s_all_tracks, sort_tracker.trackers, bbox_color)\n im0s_all_tracks = cv2.resize(im0s_all_tracks, (0, 0), fx=windows_proportion_vid, fy=windows_proportion_vid)\n cv2.imshow('All tracks', im0s_all_tracks)\n\n # Stream matched and reliable tracks on im0s_matched_and_reliable_tracks\n im0s_matched_and_reliable_tracks = actual_img.copy()\n plot_matched_and_reliable_tracks(im0s_matched_and_reliable_tracks, sort_matched_tracks, bbox_color,\n sort_unmatched_reliable_tracks, (0, 0, 0))\n im0s_matched_and_reliable_tracks = cv2.resize(im0s_matched_and_reliable_tracks, (0, 0),\n fx=windows_proportion_vid,\n fy=windows_proportion_vid)\n cv2.imshow('Matched and reliable tracks', im0s_matched_and_reliable_tracks)\n\n all_tracks_to_show = vstack([sort_matched_tracks, sort_unmatched_reliable_tracks]).astype(int)\n # Stream Optical Flow movement on im0s_movement\n for x1, y1, x2, y2, id_track in all_tracks_to_show:\n if id_track in ids_to_debug and id_track in previous_tracks_showed[:, -1]:\n # Create mask_roi\n previous_roi = previous_img.copy()[y1:y2, x1:x2]\n mask_roi = create_mask_roi_img(previous_roi, yolact_masks_dict[id_track],\n yolact_bbox_dict[id_track],\n angle_movements_axis_to_x_axis_dict[id_track])\n\n # Add movement point on im0s_roi_copy\n im0s_roi_copy = actual_img.copy()[y1:y2, x1:x2]\n plot_movements_points(im0s_roi_copy, previous_p0_dict[id_track], currents_p0_dict[id_track])\n\n # Create white_roi for movement results\n white_roi = create_white_roi_movement_img(im0s_roi_copy.shape,\n movements_to_show_dict[id_track],\n angle_movements_axis_to_x_axis_dict[id_track])\n\n # Concatenate mask_roi, im0s_roi_copy and white_roi to stream the results\n im0s_movement = concatenate((mask_roi, im0s_roi_copy, white_roi), axis=1)\n im0s_movement = cv2.resize(im0s_movement, (0, 0), fx=windows_proportion_vid * 2,\n fy=windows_proportion_vid * 2)\n cv2.imshow(f'Movement track n. {id_track}', im0s_movement)\n\n # Destroy dead windows 'Movement track n. {id_track}'\n for id_track in setdiff1d(previous_tracks_showed[:, -1], all_tracks_to_show[:, -1]):\n if id_track in ids_to_debug:\n cv2.destroyWindow(f'Movement track n. {id_track}')",
"def show_monitor_img(img):\n show_img = 'eog --fullscreen ' + img + ' &'\n #time.sleep(0.1)\n os.system(show_img)",
"def opening(img):\n kernel = numpy.ones((7, 7), numpy.uint8)\n opening_img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n cv2.imshow('Opening', opening_img)\n cv2.waitKey()\n cv2.destroyAllWindows()",
"def show_image(i, height, j, width, image, sub_images):\n h_start = 300 * i\n h_end = min(300 * (i+1), height)\n w_start = 300 * j\n w_end = min(300 * (j+1), width)\n sub_image = np.copy(image[h_start:h_end,w_start:w_end,:])\n sub_image[:,:,3] = 255\n sub_images.append(sub_image)\n plt.imshow(sub_image)\n plt.draw()\n plt.pause(.001)",
"def imshow(*images):\r\n\r\n print('Showing %d image(s)...' % (len(images)))\r\n\r\n for i, img in enumerate(images):\r\n print('Image %d of shape %s, dtype %s.' % (i + 1, img.shape, img.dtype))\r\n print(img, '\\n')\r\n cv2.imshow('Image ' + str(i + 1), img)\r\n print('Press Q to close image(s)...')\r\n cv2.waitKey(0)",
"def split_image_bits_result_windows(self, parent: ApoProjectCore, img_1: list, img_2: list, img_3: list,\n img_4: list, img_5: list, img_6: list, img_7: list, img_8: list,\n from_x: int, from_y: int, to_x: int, to_y: int):\n size_x = to_x - from_x\n size_y = to_y - from_y\n for i in range(1, 9):\n # Dynamically create windows and name variables for all 8 images, reducing amount of repetitive code by 8x\n exec(f\"bit_img_{i} = tk.Toplevel()\")\n img_title = \"Obraz wynikowy - obraz powstały z konkretnych bitów\"\n exec(f\"img_title_{i} = 'Obraz wynikowy - obraz powstały z rozdzielenia bitów'\")\n while img_title in parent.all_open_image_data.keys():\n exec(f\"helper_index_{i} += 1\")\n helper_index_helper = f\"helper_index_{i}\"\n img_title = f\"Obraz wynikowy - obraz powstały z {i}-ych bitów(\" + exec(helper_index_helper) + \")\"\n title_var_name_helper = f\"bit_img_{i}\"\n exec(f\"{title_var_name_helper}.title(\\'{img_title} - {i} bit\\')\")\n exec(f\"parent.edited_image_data[1] = img_{i}\")\n\n parent.pil_image_data = Image.new(\"L\",\n (size_x, size_y))\n exec(f\"parent.pil_image_data.putdata(img_{i})\")\n exec(f\"picture_label_{i} = tk.Label(bit_img_{i})\")\n exec(f\"picture_label_{i}.pack()\")\n\n parent.histogram_image_data = [\"Image out of specific bit\", parent.pil_image_data.getdata()]\n exec(f\"self.selected_picture_{i} = ImageTk.PhotoImage(parent.pil_image_data)\")\n exec(f\"picture_label_{i}.configure(image=self.selected_picture_{i})\")",
"def display_img():\n global o_img, p_img\n if o_img is None or p_img is None:\n messagebox.showinfo('Error', 'No image to compare.')\n return\n o_img_first = decode_resize_img(o_img[0])\n p_img_first = decode_resize_img(p_img[0])\n disp_window = Toplevel()\n o_img_label = ttk.Label(disp_window, text='Original Image')\n o_img_label.grid(column=0, row=0)\n o_img_canv = Canvas(disp_window, bg='white', width=500, height=300)\n o_img_canv.grid(column=0, row=1)\n o_img_canv.create_image(250, 200, image=o_img_first)\n p_img_label = ttk.Label(disp_window, text='Processed Image')\n p_img_label.grid(column=1, row=0)\n p_img_canv = Canvas(disp_window, bg='white', width=500, height=300)\n p_img_canv.grid(column=1, row=1)\n p_img_canv.create_image(250, 200, image=p_img_first)\n disp_window.mainloop()\n return None",
"def _show(container, start=-1, stop=-1):\n if start == stop:\n cont = [container[start]]\n else:\n cont = container[start:stop]\n\n try:\n for i, img in enumerate(cont):\n DebugDisplay.show_resized(str(i), img)\n except IndexError:\n print(\"No such value\")",
"def visualize_annotated_data():\n print(\"Press ESC to close the window. Any other key to continue\")\n while True:\n anno_file = random.choice(os.listdir(annotations_pool))\n xml_path = os.path.join(annotations_pool, anno_file)\n image_file, gt_boxes = read_content(xml_path)\n image_path = os.path.join(images_pool, image_file)\n\n image = cv2.imread(image_path)\n\n for box in gt_boxes:\n image = cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), color=(0,255,0), thickness=2)\n\n cv2.imshow(\"Visualization\", image)\n\n k = cv2.waitKey(0) & 0xFF\n if k == 27: # Esc key to stop\n break\n elif k == -1: # normally -1 returned,so don't print it\n continue\n\n cv2.destroyAllWindows()\n return",
"def displayImage(image):\n # Create a window of the correct dimensions\n width = image.getWidth()\n height = image.getHeight()\n imageWindow = cImage.ImageWin(\"Image Viewer\", width, height)\n\n # Display the image in this window\n image.draw(imageWindow)\n\n # Wait for a mouse click to close the window\n imageWindow.exitOnClick()",
"def find_lane_pixels_byWin(binary_warped):\n # Take a histogram of the bottom half of the image\n histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)\n \n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0]//2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n\n # HYPERPARAMETERS\n # Choose the number of sliding windows\n nwindows = 9\n # Set the width of the windows +/- margin\n margin = 100\n # Set minimum number of pixels found to recenter window\n minpix = 50\n\n # Set height of windows - based on nwindows above and image shape\n window_height = np.int(binary_warped.shape[0]//nwindows)\n \n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n # Current positions to be updated later for each window in nwindows\n leftx_current = leftx_base\n rightx_current = rightx_base\n\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n \n # Create an output image to draw on and visualize the result\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))\n \n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window+1)*window_height\n win_y_high = binary_warped.shape[0] - window*window_height\n \n ### Find the four below boundaries of the window ###\n win_xleft_low = leftx_current - margin # left side of window for left lane\n win_xleft_high = leftx_current + margin # right side of window for left lane\n win_xright_low = rightx_current - margin # left side of window for right lane\n win_xright_high = rightx_current + margin # right side of window for right lane\n # Draw the windows on the visualization image --- can be delted if no need\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),\n (win_xleft_high,win_y_high),(0,255,0), 2) \n cv2.rectangle(out_img,(win_xright_low,win_y_low),\n (win_xright_high,win_y_high),(0,255,0), 2) \n \n ### Identify the nonzero pixels in x and y within the window ### 数组逻辑运算!!!\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n \n \n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n\n ### If found > minpix pixels, recenter next window ###\n ### (`right` or `leftx_current`) on their mean position ### \n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n \n # Concatenate the arrays of indices (previously was a list of lists(arrays) of pixels)\n try:\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n except ValueError:\n # Avoids an error if the above is not implemented fully\n pass\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n \n return leftx, lefty, rightx, righty, out_img",
"def ShowSpots(image,spot_mask):\n fig, axes = plt.subplots(nrows = 1, ncols = 2, figsize = (20,10))\n axes[0].imshow(image, cmap = 'gray')\n axes[1].imshow(image, cmap = 'gray')\n axes[1].imshow(np.ma.array(spot_mask, mask = spot_mask==0), \n cmap = 'flag', alpha = 0.5)\n axes[0].title.set_text('original image')\n axes[1].title.set_text('overlay spots')\n plt.tight_layout()\n plt.show()\n return",
"def show_img(self, size, depth): # mostra imagem a partir do algoritimo de\n imagem_branco = self.generate_baseimage(size)\n print(\"imagem_branco\")\n list_triangles = self.create_depths_from_triangle(self.get_triangle_from_square(square.create_from_size(size)),\n depth)\n print(\"lista gerada\")\n imagem = self.draw_all_triangles_from_list( imagem_branco, list_triangles)\n print(\"imagem pronta\")\n cv2.imshow(\"test\", imagem) # testing\n cv2.waitKey()",
"def showImage(titleList,imageList):\n for title, image in zip(titleList, imageList):\n\n cv2.imshow(title,image)\n\n cv2.waitKey(5000)\n\n return 0",
"def plot(image, classified_boxes, window_size):\n fig1 = plt.figure(dpi=400)\n ax1 = fig1.add_subplot(1,1,1) \n ax1.imshow(image, cmap=plt.cm.gray)\n ax1.axis('off')\n for box in classified_boxes:\n x_min, y_min, x_max, y_max = box[0]-.5, box[1]-.5, box[0]+window_size[0]-.5, box[1]+window_size[1]-.5\n prediction, predict_score = box[2], box[3]\n ax1.text(x_min, y_min-3, \"%s %d%%\" % (prediction, predict_score*100), color=\"red\", fontsize=3)\n x = [x_max, x_max, x_min, x_min, x_max]\n y = [y_max, y_min, y_min, y_max, y_max]\n line, = ax1.plot(x,y,color=\"red\")\n line.set_linewidth(.5)\n fig1.savefig(\"classification.png\")\n plt.show()\n return",
"def show_frames(self, wait=0):\n n = 0\n for window, frame in zip(self.windows, self.get_frames()):\n cv2.imshow(window, frame)\n cv2.moveWindow(window, n * 660 + 20, 40)\n n += 1\n cv2.waitKey(wait)",
"def draw_debug_image(feature, filepath):\n img = Image.open(filepath)\n drawer = ImageDraw.Draw(img)\n r = 10\n pairs = [\n (0, 1),\n (0, 15),\n (0, 16),\n (1, 2),\n (1, 5),\n (1, 8),\n (2, 3),\n (3, 4),\n (5, 6),\n (6, 7),\n (8, 9),\n (8, 12),\n (9, 10),\n (10, 11),\n (11, 22),\n (11, 24),\n (12, 13),\n (13, 14),\n (14, 19),\n (14, 21),\n (15, 17),\n (16, 18),\n (19, 20),\n (22, 23)]\n # draw keypoints\n for point in feature:\n x, y = point\n drawer.ellipse((x-r, y-r, x+r, y+r), fill=(255, 0, 0))\n threads = []\n # draw lines\n for pair in pairs:\n start = pair[0]\n end = pair[1]\n drawer.line([feature[start][0], feature[start][1], feature[end][0],\n feature[end][1]], fill=(255, 0, 0), width=5)\n return img"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find cars bounding boxes in the given camera frame.
|
def find_bounding_boxes(frame, classifier, scaler):
windows = Perspective_grid(frame.shape[1], frame.shape[0])
total_windows, positive_windows = 0, 0
bounding_boxes = [] # Windows where cars are detected will be appended here
# Iterate over detection windows
for window in windows:
total_windows += 1
x0, y0, x1, y1 = window
# resize the window content as necessary
width = x1 - x0 + 1
height = y1 - y0 + 1
image = frame[y0:y1 + 1, x0:x1 + 1, :] # (rows, columns)
if width != Params.image_width or height != Params.image_height:
size = width * height
desired_size = Params.image_width * Params.image_height
interpolation = cv2.INTER_AREA if desired_size < size else cv2.INTER_LINEAR
image = cv2.resize(image,
(Params.image_width, Params.image_height),
interpolation=interpolation)
# Get the features vector for the image, and scale it if requested
features = compute_image_features(image)
if scaler is not None:
features = scaler.transform([features])
features = np.squeeze(features)
# Classify the window content and update the list of bounding boxes for positive detections
classification = classifier.predict([features])
if classification[0] == Params.car_label:
bounding_boxes.append((x0, y0, x1, y1))
positive_windows += 1
return bounding_boxes, total_windows
|
[
"def get_bounding_box(vehicle, camera):\n\n bb_cords = BBoxUtil._create_bb_points(vehicle)\n cords_x_y_z = BBoxUtil._vehicle_to_sensor(bb_cords, vehicle, camera)[:3, :]\n cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])\n bbox = np.transpose(np.dot(camera.calibration, cords_y_minus_z_x))\n camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)\n return camera_bbox",
"def get_bounding_boxes(vehicles, camera):\n\n bounding_boxes = [BBoxUtil.get_bounding_box(vehicle, camera) for vehicle in vehicles]\n # filter objects behind camera\n bounding_boxes = [bb for bb in bounding_boxes if all(bb[:, 2] > 0)]\n return bounding_boxes",
"def get_bounding_box(vehicle, camera):\n\n bb_cords = ClientSideBoundingBoxes._create_bb_points(vehicle)\n cords_x_y_z = ClientSideBoundingBoxes._vehicle_to_sensor(bb_cords, vehicle, camera)[:3, :]\n cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])\n bbox = np.transpose(np.dot(camera.calibration, cords_y_minus_z_x))\n camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)\n if all(camera_bbox[:, 2] > 0):\n x_min,y_min = np.min(camera_bbox,axis=0).tolist()[0][:2]\n x_max,y_max = np.max(camera_bbox,axis=0).tolist()[0][:2]\n if x_max < 0 or y_max < 0 or x_min > 1242 or y_min > 375 or x_max-x_min > 1242 or y_max-y_min > 375:\n return None\n else:\n # print(camera_bbox[:, 2])\n return [vehicle.id,x_min,y_min,x_max,y_max]\n else:\n return None",
"def get_bounding_boxes(vehicles, camera):\n\n bounding_boxes = [ClientSideBoundingBoxes.get_bounding_box(vehicle, camera) for vehicle in vehicles]\n # filter objects behind camera\n bounding_boxes = [bb for bb in bounding_boxes if bb is not None]\n return bounding_boxes",
"def get_bounding_box(self):\n\n\t\thalf_width = CAR_BOUNDING_BOX_WIDTH/2\n\t\thalf_height = CAR_BOUDNING_BOX_HEIGHT/2\n\t\tcar_rect = [\n\t\t\t(Vector(half_width, half_height).rotated(self.direction) + self.position).as_tuple(),\n\t\t\t(Vector(half_width, -half_height).rotated(self.direction) + self.position).as_tuple(),\n\t\t\t(Vector(-half_width, -half_height).rotated(self.direction) + self.position).as_tuple(),\n\t\t\t(Vector(-half_width, half_height).rotated(self.direction) + self.position).as_tuple()\n\t\t]\n\t\treturn car_rect",
"def request_bounding_box(frame, height=700):\n\tscale = frame.shape[0] / height\n\n\tbbox = cv2.selectROI(\"Select ROI\", imutils.resize(frame, height=height), False)\n\tcv2.destroyWindow(\"Select ROI\")\n\tcv2.waitKey(1)\n\n\tnew_bbox = (int(bbox[0] * scale), int(bbox[1] * scale),\n\t\t\t\tint(bbox[2] * scale), int(bbox[3] * scale))\n\treturn new_bbox",
"def get_bounding_boxes_from_segmented(frame, min_width=2, min_height=3):\n bboxes = []\n # Labels the connected segmented pixels.\n map_labeled = measure.label(frame, connectivity=1)\n # Extract the regions out of the labeled frames.\n for region in measure.regionprops(map_labeled):\n x_min = region.bbox[1]\n x_max = region.bbox[3]\n y_min = region.bbox[0]\n y_max = region.bbox[2]\n # Filter the bboxes that are extremely small.\n if x_max - x_min > min_width and y_max - y_min > min_height:\n bboxes.append((x_min, x_max, y_min, y_max))\n return bboxes",
"def get_bounding_boxes(actors, camera, calibration):\n\n bounding_boxes = [\n ClientSideBoundingBoxes.get_bounding_box(actor, camera, calibration)\n for actor in actors\n ]\n metadata = [ClientSideBoundingBoxes.get_metadata(actor) for actor in actors]\n # embed()\n # filter objects behind camera\n final_bboxes = []\n final_metadata = []\n for i in range(len(bounding_boxes)):\n if all(bounding_boxes[i][:, 2] > 0):\n final_bboxes.append(bounding_boxes[i])\n final_metadata.append(metadata[i])\n return final_bboxes, final_metadata",
"def _detect_face(self, frame):\n face_coords = list()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = self.detector(gray, 0)\n print(rects)\n # get bounding box for every face in the frame\n for i, d in enumerate(rects):\n x1 = d.left()-consts.PADDING\n y1 = d.top()-consts.PADDING\n x2 = d.right()+consts.PADDING\n y2 = d.bottom()+consts.PADDING\n face_coords.append((x1, y1, x2, y2))\n return face_coords",
"def bounding_boxes(frame, output, args):\n width = int(frame.shape[1]) \n height = int(frame.shape[0])\n op_count = 0 # Number of objects detected in the frame\n \n for box in output: # Output is squeezed here\n output_id = box[0]\n label = box[1]\n conf = box[2]\n \n # Break loop if first output in batch has id -1,\n # indicating no object further detected\n if output_id == -1:\n break\n \n # Draw box if object detected is person with conf>threshold\n elif (label == 1 and conf >= args.prob_threshold):\n x_min = int(box[3] * width)\n y_min = int(box[4] * height)\n x_max = int(box[5] * width)\n y_max = int(box[6] * height)\n cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 0, 255), 1)\n op_count += 1\n \n return frame, op_count",
"def _computeCameraBounds(self):\n cameraBounds = self.camera.node().getLens().makeBounds()\n cameraBounds.xform(self.camera.getMat(self.showbase.render))\n return cameraBounds",
"def find_orbs(self, frame):\n features = cv2.goodFeaturesToTrack(\n image=np.mean(frame, axis=2).astype(np.uint8),\n maxCorners=3000,\n qualityLevel=0.01,\n minDistance=3,\n )\n keypoints = []\n\n for feature in features:\n u, v = map(lambda x: int(round(x)), feature[0])\n cv2.circle(img=frame, center=(u, v), color=(0, 255, 0), radius=2)\n keypoint = cv2.KeyPoint(x=feature[0][0], y=feature[0][1], _size=20)\n keypoints.append(keypoint)\n\n return self.orb.compute(frame, keypoints)",
"def find_cars(img,\n params,\n svc, X_scaler,\n search_params):\n\n# # Draw bounding boxes on a copy of the original image.\n# img_detect = np.copy(img)\n#\n# bbox_list = []\n# windows = slide_window(img,\n# y_start_stop=search_params.y_start_stop,\n# xy_window=search_params.xy_window,\n# xy_overlap=search_params.xy_overlap)\n# for bbox in windows:\n# img_window = cv2.resize(img[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]],\n# (64, 64)) # Training images are size 64x64\n# features = extract_features(img_window, params)\n#\n# scaled_features = X_scaler.transform(features.reshape(1, -1))\n# pred = svc.predict(scaled_features)\n#\n# if pred == 1:\n# bbox_list.append(bbox)\n# cv2.rectangle(img_detect, bbox[0], bbox[1], (0, 0, 255), 6)\n#\n# return bbox_list, img_detect\n\n\n # Draw bounding boxes on a copy of the original image.\n img_detect = np.copy(img)\n\n #\n # Image pre-processing.\n #\n\n img = img.astype(np.float32) / 255 # normalize\n img = img[search_params.y_start_stop[0]:search_params.y_start_stop[1], :, :] # clip\n\n # Apply color conversion if necessary.\n if params.color_space in ['HSV', 'LUV', 'HLS', 'YUV', 'YCrCb']:\n if params.color_space == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif params.color_space == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif params.color_space == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif params.color_space == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif params.color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else:\n feature_image = np.copy(img)\n\n # Scale\n if search_params.scale != 1:\n imshape = feature_image.shape\n feature_image = cv2.resize(feature_image,\n (np.int(imshape[1] / search_params.scale),\n np.int(imshape[0] / search_params.scale)))\n\n #\n # Initialization\n #\n\n # Since we are using all three channels here for HOG features, we must\n # have set the MODEL_HOG_CHANNEL parameter to 'ALL' else we'll get an\n # error when trying to use the scaler below.\n if params.hog_channel == 'ALL':\n ch1 = feature_image[:, :, 0]\n ch2 = feature_image[:, :, 1]\n ch3 = feature_image[:, :, 2]\n else:\n ch1 = feature_image[:, :, params.hog_channel]\n\n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] // params.pix_per_cell) - params.cells_per_block + 1\n nyblocks = (ch1.shape[0] // params.pix_per_cell) - params.cells_per_block + 1\n# nfeat_per_block = orient * cells_per_block**2\n\n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n nblocks_per_window = (window // params.pix_per_cell) - params.cells_per_block + 1\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1\n\n # Compute individual channel HOG features for the entire image here so\n # we need only do it once.\n hog1 = get_hog_features(ch1, params.orient, params.pix_per_cell, params.cells_per_block, feature_vec=False)\n if params.hog_channel == 'ALL':\n hog2 = get_hog_features(ch2, params.orient, params.pix_per_cell, params.cells_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, params.orient, params.pix_per_cell, params.cells_per_block, feature_vec=False)\n\n #\n # Find cars\n #\n\n bbox_list = []\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * params.pix_per_cell\n ytop = ypos * params.pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(feature_image[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get color features\n spatial_features = bin_spatial(subimg, size=params.spatial_size, split_colors=True)\n hist_features = color_hist(subimg, nbins=params.hist_bins)\n\n # Scale features and make a prediction\n combined_features = np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)\n test_features = X_scaler.transform(combined_features)\n test_prediction = svc.predict(test_features)\n\n # If the model indicates the presence of a car, add the bounding\n # box to our list and draw it on the return image.\n if test_prediction == 1:\n xbox_left = np.int(xleft * search_params.scale)\n ytop_draw = np.int(ytop * search_params.scale)\n win_draw = np.int(window * search_params.scale)\n bbox = ((xbox_left, ytop_draw + search_params.y_start_stop[0]),\n (xbox_left + win_draw, ytop_draw + win_draw + search_params.y_start_stop[0]))\n bbox_list.append(bbox)\n cv2.rectangle(img_detect, bbox[0], bbox[1], (0, 0, 255), 6)\n\n return bbox_list, img_detect",
"def boundingbox(self):\n\n # angle = radians(self.theta + (self.delta * pos))\n cosr = cos(radians(self.rotation))\n sinr = sin(radians(self.rotation))\n radius = self.radius * self.radius_scale\n\n x_a = -cosr * radius.real\n x_b = -sinr * radius.imag\n x_c = radians(self.theta)\n x_d = radians(self.delta)\n\n y_a = -sinr * radius.real\n y_b = +cosr * radius.imag\n y_c = radians(self.theta)\n y_d = radians(self.delta)\n\n x_pos = [0, 1.0] + _find_solutions_for_arc(x_a, x_b, x_c, x_d)\n y_pos = [0, 1.0] + _find_solutions_for_arc(y_a, y_b, y_c, y_d)\n\n x_coords = []\n y_coords = []\n for pos in x_pos:\n p = self.point(pos)\n x_coords.append(p.real)\n for pos in y_pos:\n p = self.point(pos)\n y_coords.append(p.imag)\n\n x_min, x_max = min(x_coords), max(x_coords)\n y_min, y_max = min(y_coords), max(y_coords)\n return [x_min, y_min, x_max, y_max]",
"def match(self, model_frame):\n # find the box that contained this obs in model_frame\n shape = self.images.shape\n yx0 = model_frame.get_pixel(self.frame.get_sky_coord((0, 0)))\n # channels of model that are represented in this observation\n if self.frame.channels is model_frame.channels:\n origin = (0, *yx0)\n else:\n assert self.frame.channels is not None and model_frame.channels is not None\n cmin = list(model_frame.channels).index(self.frame.channels[0])\n cmax = list(model_frame.channels).index(self.frame.channels[-1])\n origin = (cmin, *yx0)\n self.bbox = Box(shape, origin=origin)\n self.slices = self.bbox.slices_for(model_frame.shape)\n\n # check dtype consistency\n if self.frame.dtype != model_frame.dtype:\n self.frame.dtype = model_frame.dtype\n self.images = self.images.copy().astype(model_frame.dtype)\n if type(self.weights) is np.ndarray:\n self.weights = self.weights.copy().astype(model_frame.dtype)\n\n # constrcut diff kernels\n self._diff_kernels = None\n if self.frame.psf is not model_frame.psf:\n assert self.frame.psf is not None and model_frame.psf is not None\n psf = fft.Fourier(self.frame.psf.update_dtype(model_frame.dtype).image)\n model_psf = fft.Fourier(\n model_frame.psf.update_dtype(model_frame.dtype).image\n )\n self._diff_kernels = fft.match_psfs(psf, model_psf)\n\n return self",
"def _draw_boxes(\n frame: np.ndarray,\n detections: List[Dict],\n) -> np.ndarray:\n\n # for each detection draw a corresponding labeled bounding box\n for detection in detections:\n\n # draw the bounding box of the face\n frame = \\\n cv2.rectangle(\n frame,\n (detection[\"start_x\"], detection[\"start_y\"]),\n (detection[\"end_x\"], detection[\"end_y\"]),\n _BOX_COLOR,\n _BOX_LINE_WIDTH,\n )\n\n # draw the object's label and probability value\n label = f\"{detection['class']}: {int(float(detection['confidence']) * 100)}%\"\n if detection[\"start_y\"] - 10 > 10:\n y = detection[\"start_y\"] - 10\n else:\n y = detection[\"start_y\"] + 10\n frame = cv2.putText(frame,\n label,\n (detection[\"start_x\"], y),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.45,\n _LABEL_COLOR,\n _LABEL_WIDTH)\n\n return frame",
"def bounding_box(rect, bboxes):\n x1 = 10000\n y1 = 10000\n x2 = 0\n y2 = 0\n\n for b in bboxes:\n x1 = min(b.x1, x1)\n y1 = min(b.y1, y1)\n x2 = max(b.x2, x2)\n y2 = max(b.y2, y2)\n\n rect = Rect(x1, y1, x2 - x1, y2 - y1, rect.prob, rect.text)\n return rect",
"def test_valid_bounding_box(self):\n detection = TestFaceDetector.defaultDetector.detectOne(image=VLIMAGE_ONE_FACE)\n self.assertBoundingBox(detection.boundingBox)\n detection = TestFaceDetector.defaultDetector.detect(images=[VLIMAGE_ONE_FACE])[0][0]\n self.assertBoundingBox(detection.boundingBox)",
"def boundingbox(self):\n g0 = self.control1 - self.start\n g1 = self.control2 - self.control1\n g2 = self.end - self.control2\n\n c0 = 3 * g0\n c1 = -6 * g0 + 6 * g1\n c2 = 3 * g0 - 6 * g1 + 3 * g2\n\n x_c0, x_c1, x_c2 = [c.real for c in [c0, c1, c2]]\n y_c0, y_c1, y_c2 = [c.imag for c in [c0, c1, c2]]\n\n x_cand = [0, 1] + _find_solutions_for_bezier(x_c2, x_c1, x_c0)\n y_cand = [0, 1] + _find_solutions_for_bezier(y_c2, y_c1, y_c0)\n\n x_coords = []\n y_coords = []\n for t in x_cand:\n p = self.point(t)\n x_coords.append(p.real)\n for t in y_cand:\n p = self.point(t)\n y_coords.append(p.imag)\n\n x_min, x_max = min(x_coords), max(x_coords)\n y_min, y_max = min(y_coords), max(y_coords)\n return [x_min, y_min, x_max, y_max]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Detects cars in all the test images coming with the project, and saves the resulting images, using the given classifier and scaler.
|
def process_test_images(classifier, scaler):
fnames = [name for name in glob.glob('test_images/*.jpg')] + [name for name in glob.glob('test_images/*.png')]
for fname in fnames:
frame = cv2.imread(fname)
start = time()
bounding_boxes, total_windows = find_bounding_boxes(frame, classifier, scaler)
print(fname, 'estimated fps {:.3f}'.format(1 / (time() - start)), 'Positive windows', len(bounding_boxes), '/',
total_windows)
for bbox in bounding_boxes:
draw_bounding_box(frame, *bbox)
base = os.path.basename(fname)
out_fname = 'test_images/out/' + base
cv2.imwrite(out_fname, frame)
|
[
"def process_test():\n\n test_entry = unpickle(test_file)\n test_dataset = test_entry[b'data']\n test_targets = test_entry[b'fine_labels']\n test_dataset = np.vstack(test_dataset).reshape(-1, 3, 32, 32)\n test_dataset = test_dataset.transpose((0, 2, 3, 1)) \n\n root_path = data_dir + '/cifar100/test/'\n for counter, item in enumerate(test_targets):\n make_dir_if_no_exist(root_path+str(item))\n # write data\n img = test_dataset[counter]\n #bgr_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)\n file_path = root_path+str(item)+'/'+\"test_img_{0}.jpg\".format(str(counter))\n #print(file_path)\n # something breaks here\n #cv2.imwrite(file_path, bgr_image)\n imageio.imwrite(file_path, img)",
"def detect_vehicles(img, classifier):\n\n if isinstance(img, str): # this is a file\n img = utils.read_image(img)\n elif not isinstance(img, np.ndarray):\n raise ValueError('bad image: {}'.format(img))\n\n h, w = img.shape[:2]\n windows = utils.slide_window(\n img,\n y_start_stop=[h/2, h],\n min_size=config.min_window_size,\n max_size=config.max_window_size,\n step_size=config.step_size_for_window\n )\n print(\"searching for cars in {} windows in image\".format(len(windows)))\n\n hot_windows = utils.search_windows(\n img,\n windows,\n classifier.model,\n classifier.scaler,\n color_space=config.color_space,\n spatial_size=config.spatial_size,\n hist_bins=config.hist_bins,\n hist_range=config.hist_range,\n orient=config.orient,\n pix_per_cell=config.pix_per_cell,\n cell_per_block=config.cell_per_block,\n hog_channel=config.hog_channel,\n spatial_feat=config.spatial_feat,\n hist_feat=config.hist_feat,\n hog_feat=config.hog_feat\n )\n print(\"found car in {} windows\".format(len(hot_windows)))\n\n image = Image(img, hot_windows)\n\n return image",
"def ProcessTestImages(self, image_dir, output_dir):\n images = glob.glob(os.path.join(image_dir, '*.jpg'))\n for fname in images:\n print('Processing image {}'.format(fname))\n _, name = os.path.split(fname)\n name, ext = os.path.splitext(name)\n\n # Read the image.\n img = mpimg.imread(fname) # RGB\n\n # Find vehicles\n self.FindVehicles(img, output_dir=output_dir, img_name=(name, ext))",
"def get_classifier_test_images():\n images, labels = get_images_labels_path((test_path + \"Doors/*.jpg\", test_path + \"Indoors/*.jpg\"), (1, 2))\n \n x_train = numpy.array(images)\n y_train = numpy.array(labels)\n \n return x_train, y_train",
"def process_train():\n\n train_entry = unpickle(train_file)\n train_dataset = train_entry[b'data']\n train_targets = train_entry[b'fine_labels'] # will need to edit for coarse\n train_dataset = np.vstack(train_dataset).reshape(-1, 3, 32, 32)\n train_dataset = train_dataset.transpose((0, 2, 3, 1)) \n\n meta_entry = unpickle(meta_file)\n meta_entry[b'fine_label_names']\n\n root_path = data_dir + '/cifar100/train/'\n for counter, item in enumerate(train_targets):\n make_dir_if_no_exist(root_path+str(item))\n # write data\n img = train_dataset[counter]\n #bgr_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)\n file_path = root_path+str(item)+'/'+\"train_img_{0}.jpg\".format(str(counter))\n #print(file_path)\n # something breaks here\n #cv2.imwrite(file_path, bgr_image)\n imageio.imwrite(file_path, img)",
"def save_images(data_loaders, train_labels):\n training_images_rgb_folder = os.path.join(os.path.abspath(__file__), '..', 'training_images_rgb')\n training_images_grayscale_folder = os.path.join(os.path.abspath(__file__), '..', 'training_images_grayscale')\n test_images_rgb_folder = os.path.join(os.path.abspath(__file__), '..', 'test_images_rgb')\n test_images_grayscale_folder = os.path.join(os.path.abspath(__file__), '..', 'test_images_grayscale')\n\n if not os.listdir(training_images_rgb_folder):\n index = 0\n for sample in data_loaders[\"train\"].dataset.imgs:\n image_name = f\"Image_{index}_covid{train_labels[index].numpy()}.png\"\n plt.imsave(os.path.join(training_images_rgb_folder, image_name), sample[0])\n plt.imsave(os.path.join(training_images_grayscale_folder, image_name), sample[0], cmap='gray')\n print(f\"Saved {image_name}\")\n index += 1\n\n if not os.listdir(test_images_rgb_folder):\n index = 0\n for sample in data_loaders[\"test\"].dataset.imgs:\n image_name = f\"Image_{index}.png\"\n plt.imsave(os.path.join(test_images_rgb_folder, image_name), sample[0])\n plt.imsave(os.path.join(test_images_grayscale_folder, image_name), sample[0], cmap='gray')\n print(f\"Saved {image_name}\")\n index += 1",
"def test_model(imdb, model, detection_file = None):\n N_WORDS = len(model['vocab']['words'])\n sc = np.zeros((imdb.num_images, N_WORDS), dtype=np.float)\n mil_prob = np.zeros((imdb.num_images, N_WORDS), dtype=np.float)\n for i in xrange(len(imdb.image_index)):\n im = cv2.imread(imdb.image_path_at(i))\n sc[i,:], mil_prob[i,:] = test_img(im, model['net'], model['base_image_size'], model['means'])\n utils.tic_toc_print(60, 'test_img : {:6d}/{:6d}'.format(i, len(imdb.image_index)))\n\n if detection_file is not None:\n utils.save_variables(detection_file, [sc, mil_prob, model['vocab'], imdb],\n ['sc', 'mil_prob', 'vocab', 'imdb'], overwrite = True)",
"def find_cars(img,\n params,\n svc, X_scaler,\n search_params):\n\n# # Draw bounding boxes on a copy of the original image.\n# img_detect = np.copy(img)\n#\n# bbox_list = []\n# windows = slide_window(img,\n# y_start_stop=search_params.y_start_stop,\n# xy_window=search_params.xy_window,\n# xy_overlap=search_params.xy_overlap)\n# for bbox in windows:\n# img_window = cv2.resize(img[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]],\n# (64, 64)) # Training images are size 64x64\n# features = extract_features(img_window, params)\n#\n# scaled_features = X_scaler.transform(features.reshape(1, -1))\n# pred = svc.predict(scaled_features)\n#\n# if pred == 1:\n# bbox_list.append(bbox)\n# cv2.rectangle(img_detect, bbox[0], bbox[1], (0, 0, 255), 6)\n#\n# return bbox_list, img_detect\n\n\n # Draw bounding boxes on a copy of the original image.\n img_detect = np.copy(img)\n\n #\n # Image pre-processing.\n #\n\n img = img.astype(np.float32) / 255 # normalize\n img = img[search_params.y_start_stop[0]:search_params.y_start_stop[1], :, :] # clip\n\n # Apply color conversion if necessary.\n if params.color_space in ['HSV', 'LUV', 'HLS', 'YUV', 'YCrCb']:\n if params.color_space == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif params.color_space == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif params.color_space == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif params.color_space == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif params.color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else:\n feature_image = np.copy(img)\n\n # Scale\n if search_params.scale != 1:\n imshape = feature_image.shape\n feature_image = cv2.resize(feature_image,\n (np.int(imshape[1] / search_params.scale),\n np.int(imshape[0] / search_params.scale)))\n\n #\n # Initialization\n #\n\n # Since we are using all three channels here for HOG features, we must\n # have set the MODEL_HOG_CHANNEL parameter to 'ALL' else we'll get an\n # error when trying to use the scaler below.\n if params.hog_channel == 'ALL':\n ch1 = feature_image[:, :, 0]\n ch2 = feature_image[:, :, 1]\n ch3 = feature_image[:, :, 2]\n else:\n ch1 = feature_image[:, :, params.hog_channel]\n\n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] // params.pix_per_cell) - params.cells_per_block + 1\n nyblocks = (ch1.shape[0] // params.pix_per_cell) - params.cells_per_block + 1\n# nfeat_per_block = orient * cells_per_block**2\n\n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n nblocks_per_window = (window // params.pix_per_cell) - params.cells_per_block + 1\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1\n\n # Compute individual channel HOG features for the entire image here so\n # we need only do it once.\n hog1 = get_hog_features(ch1, params.orient, params.pix_per_cell, params.cells_per_block, feature_vec=False)\n if params.hog_channel == 'ALL':\n hog2 = get_hog_features(ch2, params.orient, params.pix_per_cell, params.cells_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, params.orient, params.pix_per_cell, params.cells_per_block, feature_vec=False)\n\n #\n # Find cars\n #\n\n bbox_list = []\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * params.pix_per_cell\n ytop = ypos * params.pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(feature_image[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get color features\n spatial_features = bin_spatial(subimg, size=params.spatial_size, split_colors=True)\n hist_features = color_hist(subimg, nbins=params.hist_bins)\n\n # Scale features and make a prediction\n combined_features = np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)\n test_features = X_scaler.transform(combined_features)\n test_prediction = svc.predict(test_features)\n\n # If the model indicates the presence of a car, add the bounding\n # box to our list and draw it on the return image.\n if test_prediction == 1:\n xbox_left = np.int(xleft * search_params.scale)\n ytop_draw = np.int(ytop * search_params.scale)\n win_draw = np.int(window * search_params.scale)\n bbox = ((xbox_left, ytop_draw + search_params.y_start_stop[0]),\n (xbox_left + win_draw, ytop_draw + win_draw + search_params.y_start_stop[0]))\n bbox_list.append(bbox)\n cv2.rectangle(img_detect, bbox[0], bbox[1], (0, 0, 255), 6)\n\n return bbox_list, img_detect",
"def run_detect(gpus=None, caffenet=None, caffemodel=None,\n exp=None, thresh=None, class_thresh=None, obj_thresh=None,\n test_data=None,\n logger=None, interval=1, iterations=0, input_range=None, root=None, expid=None):\n if gpus is None:\n gpus = list(gpu_indices())\n num_gpu = len(gpus)\n logging.info(\"Detecting {} for {} on {} GPUs\".format(caffemodel, test_data, num_gpu))\n\n if exp is not None:\n imdb = exp.imdb\n else:\n assert test_data\n assert caffenet and caffemodel and op.isfile(caffenet) and op.isfile(caffemodel)\n name = None\n if op.isabs(test_data) and (op.isdir(test_data) or op.isfile(test_data)):\n imdb = ImageDatabase(test_data)\n else:\n # when test_data is a subdirectory inside 'data', quickdetection generated\n for fname in ['testX.tsv', 'test.tsv']:\n intsv_file = op.join('data', test_data, fname)\n if op.isfile(intsv_file):\n break\n name = op.basename(caffemodel) + \".\" + test_data\n imdb = ImageDatabase(intsv_file, name=test_data)\n if expid:\n name += \".{}\".format(expid)\n\n caffemodel_clone = None\n if op.isdir(\"/tmp\"):\n caffemodel_clone = op.join(\"/tmp\", \"{}.caffemodel\".format(ompi_rank()))\n shutil.copy(caffemodel, caffemodel_clone)\n if os.stat(caffemodel_clone).st_size < 1:\n logging.error(\"caffemodel: {} is not ready yet\".format(caffemodel))\n return\n exp = Experiment(imdb, caffenet, caffemodel, caffemodel_clone=caffemodel_clone,\n input_range=input_range, name=name, root=root, expid=expid)\n\n outtsv_file = exp.predict_path\n if op.isfile(outtsv_file):\n logging.info(\"Ignore already computed prediction: {} Experiment: {}\".format(outtsv_file, exp))\n return exp\n\n # create one detector for each GPU\n detectors = [\n Detector(exp, num_gpu=num_gpu, gpu=gpu) for gpu in gpus\n ]\n\n logging.info(\"Detection Experiment {}\".format(exp))\n\n if input_range is None:\n input_range = six.moves.range(len(imdb))\n else:\n assert input_range[0] >= 0, \"Invalid range: {} in {}\".format(input_range, imdb)\n if input_range[-1] >= len(imdb):\n logging.info(\"Last range corrected: {} in {}\".format(input_range, imdb))\n input_range = range(input_range[0], len(imdb))\n if len(input_range) == 0:\n logging.warning(\"Empty range: {} Experiment: {}\".format(input_range, exp))\n return exp\n total_count = len(input_range)\n assert total_count, \"No data to evaluate in experiment: {}\".format(exp)\n assert total_count < 0xFFFFFFFF, \"Too many images to evaluate\"\n processed = 0\n in_queue = Queue(2000 * len(gpus))\n\n def result_done(res):\n in_queue.put(res.result())\n\n writer = None\n reader = None\n try:\n # noinspection PyBroadException\n try:\n writer = Process(name=\"writer\", target=write_predict, args=(outtsv_file, in_queue,))\n writer.daemon = True\n writer.start()\n\n out_queue = Queue(400 * len(gpus))\n reader = Process(name=\"reader\", target=read_image, args=(imdb, input_range, out_queue,))\n reader.daemon = True\n reader.start()\n\n idx = 0\n while True:\n idx += 1\n out = out_queue.get()\n if not out:\n break\n key, im = out\n det_idx = idx % len(detectors)\n detector = detectors[det_idx]\n result = detector.detect_async(\n key, im=im,\n thresh=thresh, class_thresh=class_thresh, obj_thresh=obj_thresh\n )\n result.add_done_callback(result_done) # call when future is done to averlap\n processed += 1\n if logger and processed % 100 == 0:\n logger.set_iterations(iterations + interval * float(processed) / total_count)\n except Exception as e:\n logging.error(\"Exception thrown: {}\".format(e))\n raise\n finally:\n logging.info(\"Joining reader\")\n if reader:\n reader.join()\n logging.info(\"Shutting down the detectors\")\n for detector in detectors:\n detector.shutdown()\n if writer:\n in_queue.put(None)\n writer.join()\n return exp",
"def classify_images():\n tasks = load_batch_tasks()\n\n if tasks is not None:\n daemo.publish(\n project_key=PROJECT_KEY,\n tasks=tasks,\n approve=approve_correct_response,\n completed=rate_workers\n )",
"def main():\n MODEL_URL = \"https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.caffemodel\" # noqa: E501\n MODEL_LOCAL_PATH = HERE / \"./models/MobileNetSSD_deploy.caffemodel\"\n PROTOTXT_URL = \"https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.prototxt.txt\" # noqa: E501\n PROTOTXT_LOCAL_PATH = HERE / \"./models/MobileNetSSD_deploy.prototxt.txt\"\n CLASSES_FILE = HERE / \"./info/classes.txt\" \n \n with open(CLASSES_FILE) as f:\n CLASSES = [line.rstrip() for line in f]\n \n COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\n download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=23147564)\n download_file(PROTOTXT_URL, PROTOTXT_LOCAL_PATH, expected_size=29353)\n\n DEFAULT_CONFIDENCE_THRESHOLD = 0.5\n\n class Detection(NamedTuple):\n name: str\n prob: float\n\n class MobileNetSSDVideoTransformer(VideoTransformerBase):\n confidence_threshold: float\n result_queue: \"queue.Queue[List[Detection]]\"\n\n def __init__(self) -> None:\n self._net = cv2.dnn.readNetFromCaffe(\n str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)\n )\n self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD\n self.result_queue = queue.Queue()\n\n def _annotate_image(self, image, detections):\n # loop over the detections\n (h, w) = image.shape[:2]\n result: List[Detection] = []\n for i in np.arange(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n\n if confidence > self.confidence_threshold:\n # extract the index of the class label from the `detections`,\n # then compute the (x, y)-coordinates of the bounding box for\n # the object\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n name = CLASSES[idx]\n result.append(Detection(name=name, prob=float(confidence)))\n\n # display the prediction\n label = f\"{name}: {round(confidence * 100, 2)}%\"\n cv2.rectangle(image, (startX, startY), (endX, endY), COLORS[idx], 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(\n image,\n label,\n (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n COLORS[idx],\n 2,\n )\n return image, result\n\n def transform(self, frame: av.VideoFrame) -> np.ndarray:\n image = frame.to_ndarray(format=\"bgr24\")\n blob = cv2.dnn.blobFromImage(\n cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5\n )\n self._net.setInput(blob)\n detections = self._net.forward()\n annotated_image, result = self._annotate_image(image, detections)\n\n # NOTE: This `transform` method is called in another thread,\n # so it must be thread-safe.\n self.result_queue.put(result)\n\n return annotated_image\n\n webrtc_ctx = webrtc_streamer(\n key=\"object-detection\",\n mode=WebRtcMode.SENDRECV,\n client_settings=WEBRTC_CLIENT_SETTINGS,\n video_transformer_factory=MobileNetSSDVideoTransformer,\n async_transform=True,\n )\n\n confidence_threshold = st.slider(\n \"Confidence threshold\", 0.0, 1.0, DEFAULT_CONFIDENCE_THRESHOLD, 0.05\n )\n if webrtc_ctx.video_transformer:\n webrtc_ctx.video_transformer.confidence_threshold = confidence_threshold\n\n if st.checkbox(\"Show the detected labels\", value=True):\n if webrtc_ctx.state.playing:\n labels_placeholder = st.empty()\n # NOTE: The video transformation with object detection and\n # this loop displaying the result labels are running\n # in different threads asynchronously.\n # Then the rendered video frames and the labels displayed here\n # are not strictly synchronized.\n while True:\n if webrtc_ctx.video_transformer:\n try:\n result = webrtc_ctx.video_transformer.result_queue.get(\n timeout=1.0\n )\n except queue.Empty:\n result = None\n labels_placeholder.table(result)\n else:\n break\n\n st.markdown(\n \"This Template uses a model and code from \"\n \"https://github.com/robmarkcole/object-detection-app. and https://github.com/whitphx/streamlit-webrtc-example \"\n \"Many thanks to these projects.\"\n )",
"def give_CARS196_datasets(opt):\n image_sourcepath = opt.source_path+'/images'\n image_classes = sorted([x for x in os.listdir(image_sourcepath)])\n conversion = {i:x for i,x in enumerate(image_classes)}\n image_list = {i:sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key)]) for i,key in enumerate(image_classes)}\n image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]\n image_list = [x for y in image_list for x in y]\n\n image_dict = {}\n for key, img_path in image_list:\n if not key in image_dict.keys():\n image_dict[key] = []\n image_dict[key].append(img_path)\n\n\n keys = sorted(list(image_dict.keys()))\n # random.shuffle(keys)\n #Following \"Deep Metric Learning via Lifted Structured Feature Embedding\", we use the first half of classes for training.\n train,test = keys[:len(keys)//2], keys[len(keys)//2:]\n\n if opt.sampling=='learned':\n if opt.train_val_split_by_class:\n train_val_split = int(len(train)*opt.train_val_split)\n train, val = train[:train_val_split], train[train_val_split:]\n train_image_dict, val_image_dict, test_image_dict = {key:image_dict[key] for key in train}, {key:image_dict[key] for key in val}, {key:image_dict[key] for key in test}\n else:\n train_image_dict, val_image_dict = {},{}\n for key in train:\n train_ixs = np.random.choice(len(image_dict[key]), int(len(image_dict[key])*opt.train_val_split), replace=False)\n val_ixs = np.array([x for x in range(len(image_dict[key])) if x not in train_ixs])\n train_image_dict[key] = np.array(image_dict[key])[train_ixs]\n val_image_dict[key] = np.array(image_dict[key])[val_ixs]\n test_image_dict = {key:image_dict[key] for key in test}\n val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)\n val_dataset.conversion = conversion\n else:\n train_image_dict, test_image_dict = {key:image_dict[key] for key in train}, {key:image_dict[key] for key in test}\n val_dataset = None\n\n train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)\n test_dataset = BaseTripletDataset(test_image_dict, opt, is_validation=True)\n eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)\n\n train_dataset.conversion = conversion\n test_dataset.conversion = conversion\n eval_dataset.conversion = conversion\n\n return {'training':train_dataset, 'validation':val_dataset, 'testing':test_dataset, 'evaluation':eval_dataset}",
"def load_test_images(self):\n self._load_images(\n self.image_info.test_img_files,\n self.image_info.num_classes,\n self.test_data, self.test_labels, disp='test')\n self.test_data = self.test_data.astype('float32') / 255\n self.test_labels = self._format_labels(self.test_labels,\n self.image_info.test_img_files,\n self.image_info.num_classes)",
"def _get_detections(dataset, retinanet, image_size, score_threshold=0.05, max_detections=100, save_path=None, use_gpu=True):\n all_detections = [[None for i in range(dataset.num_classes())] for j in range(len(dataset))]\n\n retinanet.eval()\n regressBoxes = BBoxTransform()\n clipBoxes = ClipBoxes()\n pred_version = 'v1'\n with torch.no_grad():\n for index, imgid in enumerate(tqdm(dataset.image_ids)):\n if pred_version == 'v1':\n iter = imgid\n else:\n iter = index\n scores, labels, boxes = predict(dataset, model, image_size, iter, regressBoxes, clipBoxes,\n score_threshold, 0.5, pred_version)\n\n # select indices which have a score above the threshold\n indices = np.where(scores > score_threshold)[0]\n if indices.shape[0] > 0:\n # select those scores\n scores = scores[indices]\n\n # find the order with which to sort the scores\n scores_sort = np.argsort(-scores)[:max_detections]\n\n # select detections\n image_boxes = boxes[indices[scores_sort], :]\n image_scores = scores[scores_sort]\n image_labels = labels[indices[scores_sort]]\n image_detections = np.concatenate(\n [image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)\n\n # copy detections to all_detections\n for label in range(dataset.num_classes()):\n all_detections[index][label] = image_detections[image_detections[:, -1] == label, :-1]\n else:\n # copy detections to all_detections\n for label in range(dataset.num_classes()):\n all_detections[index][label] = np.zeros((0, 5))\n\n print('{}/{}'.format(index + 1, len(dataset)), end='\\r')\n\n return all_detections",
"def detect_dataset_faces(dirs_to_detect=None, save_images=False):\n # get dirs\n if not dirs_to_detect:\n images_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/images'\n cat_dirs = glob.glob(images_dir + '/*')\n else:\n cat_dirs = dirs_to_detect\n\n # detect faces for all cats\n with open('Data/inputs_file.txt', 'w') as file:\n for cat in cat_dirs:\n file.write(cat + '\\n')\n\n detect_faces('Data/Source_Images/Test_Images', 'Data/Source_Images/Test_Image_Detection_Results',\n multiple_inputs_flilepath='Data/inputs_file.txt', save_images=save_images)",
"def train_all(self, classifier, name: str, save=False) -> None:\n\n train = self.features[self.features_list]\n target = self.features['stressed']\n scaler = StandardScaler().fit(train)\n train_scaled = scaler.transform(train)\n print(f'Currently Training {name} on all data')\n clf = classifier.fit(train_scaled, target)\n\n self.scaler = scaler\n self.classifier = clf\n self.clf_name = name\n\n if save:\n joblib.dump(scaler, 'models/scaler.pkl')\n joblib.dump(clf, f'models/classifier_{name}.pkl')",
"def test_batch_detect_of_multiple_images(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detect(images=[VLIMAGE_SEVERAL_FACE, VLIMAGE_ONE_FACE])\n self.assertFaceDetection(detection[0], VLIMAGE_SEVERAL_FACE)\n self.assertFaceDetection(detection[1], VLIMAGE_ONE_FACE)\n assert 2 == len(detection)\n assert 5 == len(detection[0])\n assert 1 == len(detection[1])",
"def create_adjusted_images_and_save(idx, data, sal_map, target, ks, percentages, num_classes, dataset, method, approach = \"zero\"):\n\n \n\n # Get unnormalized image and pre-process salience map\n image = UNNORMALIZE(data.squeeze()) \n sal_map = sal_map.squeeze().cpu().detach().numpy()\n \n for k, percentage in zip(ks, percentages): \n\n # Get k indices and replace within image\n indices = return_k_index_argsort(sal_map, k, method)\n new_image = replace_pixels(image, indices, approach = approach)\n\n # Save adjusted images\n data_dir = f'dataset/roar_{method}/cifar-{num_classes}-{percentage*100}%-removed'\n save_imagefolder_image(data_dir, target, new_image, idx, dataset)",
"def cifar_demo():\n mpi.mkdir(FLAGS.output_dir)\n logging.info('Loading cifar data...')\n cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)\n cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)\n\n # try: use sub images\n #cifar = datasets.SubImageSet(cifar, [28,28], 1)\n #cifar_test = datasets.CenterRegionSet(cifar_test, [28,28])\n\n conv = pipeline.ConvLayer([\n pipeline.PatchExtractor([6,6], 1), # extracts patches\n pipeline.MeanvarNormalizer({'reg': 10}), # normalizes the patches\n pipeline.LinearEncoder({},\n trainer = pipeline.ZcaTrainer({'reg': 0.1})), # Does whitening\n pipeline.ThresholdEncoder({'alpha': 0.25, 'twoside': True},\n trainer = pipeline.OMPTrainer(\n {'k': 1600, 'max_iter':100})), # does encoding\n pipeline.SpatialPooler({'grid': (4,4), 'method': 'max'}) # average pool\n ])\n logging.info('Training the pipeline...')\n conv.train(cifar, 400000)\n logging.info('Dumping the pipeline...')\n if mpi.is_root():\n with open(os.path.join(FLAGS.output_dir, FLAGS.model_file),'w') as fid:\n pickle.dump(conv, fid)\n fid.close()\n logging.info('Extracting features...')\n Xtrain = conv.process_dataset(cifar, as_2d = True)\n mpi.dump_matrix_multi(Xtrain,\n os.path.join(FLAGS.output_dir, \n FLAGS.feature_file+'_train'))\n Ytrain = cifar.labels().astype(np.int)\n Xtest = conv.process_dataset(cifar_test, as_2d = True)\n mpi.dump_matrix_multi(Xtest,\n os.path.join(FLAGS.output_dir, \n FLAGS.feature_file+'_test'))\n Ytest = cifar_test.labels().astype(np.int)\n # normalization\n m, std = classifier.feature_meanstd(Xtrain)\n Xtrain -= m\n Xtrain /= std\n Xtest -= m\n Xtest /= std\n \n w, b = classifier.l2svm_onevsall(Xtrain, Ytrain, 0.005)\n if mpi.is_root():\n with open(os.path.join(FLAGS.output_dir, FLAGS.svm_file), 'w') as fid:\n pickle.dump({'m': m, 'std': std, 'w': w, 'b': b}, fid)\n accu = np.sum(Ytrain == (np.dot(Xtrain,w)+b).argmax(axis=1)) \\\n / float(len(Ytrain))\n accu_test = np.sum(Ytest == (np.dot(Xtest,w)+b).argmax(axis=1)) \\\n / float(len(Ytest))\n \n logging.info('Training accuracy: %f' % accu)\n logging.info('Testing accuracy: %f' % accu_test)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates a heatmap with the given bounding boxes, and provides a thresholded copy of it. The heatmap passed as parameter is modified and also returned. For every pixel that is in a bounding box, the corresponding pixel in the heatmap is incremented by 100. If the pixel is in multiple bounding boxes, the corresponding heatmap pixel is incremented multiple times. After update, the heatmap is averaged with the 14 previous heatmaps, each equally weighted. The updated heatmap is copied
|
def update_heat_map(heat_map, bounding_boxes):
threshold = 56
new_heat = np.zeros_like(heat_map)
for bbox in bounding_boxes:
x0, y0, x1, y1 = bbox
new_heat[y0:y1 + 1, x0:x1 + 1] += 100
heat_map = (14 * heat_map + new_heat) / 15
thresholded = np.rint(heat_map).astype(np.uint)
thresholded[heat_map < threshold] = 0
return heat_map, thresholded
|
[
"def add_heat(heatmap, bounding_boxes_list):\n # Iterate through list of bounding boxes\n for box in bounding_boxes_list:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n\n return heatmap",
"def add_heat(heatmap, bbox_list):\n\n # Iterate through list of bboxes\n for box in bbox_list:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n\n # Return updated heatmap\n return heatmap",
"def add_heat(self, bbox_list):\n # Iterate through list of bboxes\n for box in bbox_list:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n self.current_map[box[0][1]:box[1][1], box[0][0]:box[1][0]] += self.HEAT\n\n # Return current heatmap\n return self.current_map",
"def create_heatmap(img, bbox_list):\n heatmap = np.zeros_like(img[:,:,0]).astype(np.float)\n for box in bbox_list:\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n \n heatmap = np.clip(heatmap, 0, 255)\n\n return heatmap",
"def compute_heatmap(wing_img):\n\n global NUM_CLASSES, BB_COL, BB_ROW, BB_STRIDE, CHECKPOINT_DIR\n\n #\n # CONSTRUCT THE PIXEL DOMAIN AND THE BOUNDING BOX DOMAIN\n #\n\n IMG_ROW, IMG_COL, IMG_CHN = wing_img.shape\n\n pixel_points_row = np.arange(IMG_ROW)\n pixel_points_col = np.arange(IMG_COL)\n pixel_points_col, pixel_points_row = np.meshgrid(pixel_points_col,\n pixel_points_row)\n pixel_points_dom = np.hstack((pixel_points_row.ravel().reshape(-1, 1),\n pixel_points_col.ravel().reshape(-1, 1)\n ))\n\n bb_points_row = np.arange(0, IMG_ROW - BB_ROW, BB_STRIDE)\n bb_points_col = np.arange(0, IMG_COL - BB_COL, BB_STRIDE)\n bb_points_row, bb_points_col = np.meshgrid(bb_points_row,\n bb_points_col)\n\n bb_points_dom = np.hstack(( bb_points_row.ravel().reshape(-1, 1),\n bb_points_col.ravel().reshape(-1, 1)))\n\n # load the image and allocate space for the probabilities array\n probs = np.zeros((bb_points_dom.shape[0], NUM_CLASSES))\n\n #\n # TENSORFLOW GRAPH\n #\n\n with tf.Graph().as_default() as g:\n # load and process image for classification\n x = tf.placeholder(np.float32, shape = [BB_ROW, BB_COL, IMG_CHN])\n sm_image = tf.image.rgb_to_grayscale(x)\n sm_image = tf.image.per_image_standardization(sm_image)\n fd_image = tf.expand_dims(sm_image, 0)\n\n # perform inference on the image and\n # get softmax probabilities for each class\n logits = fc.inference(fd_image)\n softmax_probs = tf.nn.softmax(logits)\n\n saver = tf.train.Saver()\n tf.logging.set_verbosity(tf.logging.ERROR)\n\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(os.path.dirname(__file__) + CHECKPOINT_DIR)\n print(ckpt)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n for i, row in enumerate(bb_points_dom):\n temp_prob = sess.run(softmax_probs,\n feed_dict = {x: wing_img[row[0]:(row[0] + BB_ROW), \\\n row[1]:(row[1] + BB_COL), :]})\n\n probs[i, :] = temp_prob.reshape((1, NUM_CLASSES))\n\n # reweight the probabilities based off of distance to a central point\n heatmap = ch.construct_heatmap(bb_points_dom,\n probs,\n IMG_ROW,\n IMG_COL,\n bb_points_dom.shape[0])\n\n return(np.asarray(heatmap))",
"def _apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def apply_threshold(heatmap, threshold):\n\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n\n # Return thresholded map\n return heatmap",
"def preprocess_map(map_grid):\n h = map_grid.info.height\n w = map_grid.info.width\n res = map_grid.info.resolution\n radius, box_size = get_influence_area_size(map_grid)\n half_max_map_size_in_cells = int(math.ceil(max_map_size / res / 2))\n min_i = max(0, h / 2 - half_max_map_size_in_cells)\n max_i = min(h - 1, h / 2 + half_max_map_size_in_cells + 1)\n min_j = max(0, w / 2 - half_max_map_size_in_cells)\n max_j = min(w - 1, w / 2 + half_max_map_size_in_cells + 1)\n augmented_occ = {}\n for i in range(min_i, max_i + 1):\n for j in range(min_j, max_j + 1):\n occ = map_grid.data[i * w + j]\n # for each unsafe point, spread the circular influence area by robot radius\n if occ != -1 and occ >= occ_threshold:\n for p in get_points_in_radius(j, i, radius, box_size, w, h):\n if p not in augmented_occ or augmented_occ[p] < occ:\n augmented_occ[p] = occ\n return augmented_occ",
"def _resize_bboxes(self, results):\n for key in ['gt_bbox'] if 'gt_bbox' in results else []:\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['im_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes",
"def to_apply_mask(img, bbox):\n x1, y1, x2, y2 = bbox\n img[:,y1:y2,x1:x2] = img[:,y1:y2,x1:x2].normal_(0.0, 0.1) \n return img",
"def _create_frame_heatmap(self, classified_objects):\n frame_heatmap = np.zeros((self.image_height, self.image_width))\n for search_window, _, confidence in classified_objects:\n frame_heatmap[search_window.top:search_window.bottom,\n search_window.left:search_window.right] += confidence\n\n self.heatmaps.append(frame_heatmap)",
"def threshold_bboxes(bboxes, img, threshold_func = filters.threshold_li, \n min_local_threshold = 0.5, border=10):\n thresh_img = np.zeros_like(img, dtype = np.bool)\n nrows, ncols = img.shape\n global_thresh = threshold_func(img)\n \n for bbox in bboxes:\n minr, minc, maxr, maxc = bbox\n minr, minc = max(0, minr - border), max(0, minc - border)\n maxr, maxc = min(maxr + border, nrows-1), min(maxc + border, ncols - 1) \n local_thresh = threshold_func(img[minr:maxr, minc:maxc])\n thresh = max(local_thresh, global_thresh * min_local_threshold)\n local_img = img[minr:maxr, minc:maxc] > thresh\n thresh_img[minr:maxr, minc:maxc] = np.logical_or(local_img, thresh_img[minr:maxr, minc:maxc])\n return thresh_img",
"def selective_crop_and_resize(features,\n boxes,\n box_levels,\n boundaries,\n output_size=7,\n sample_offset=0.5,\n use_einsum_gather=False):\n (batch_size, num_levels, max_feature_height, max_feature_width,\n num_filters) = features.get_shape().as_list()\n if batch_size is None:\n batch_size = tf.shape(features)[0]\n _, num_boxes, _ = boxes.get_shape().as_list()\n\n kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 = compute_grid_positions(\n boxes, boundaries, output_size, sample_offset)\n x_indices = tf.cast(\n tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]),\n dtype=tf.int32)\n y_indices = tf.cast(\n tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]),\n dtype=tf.int32)\n\n if use_einsum_gather:\n # Blinear interpolation is done during the last two gathers:\n # f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T\n # [f10, f11]]\n # [[f00, f01],\n # [f10, f11]] = tf.einsum(tf.einsum(features, y_one_hot), x_one_hot)\n # where [hy, ly] and [hx, lx] are the bilinear interpolation kernel.\n\n # shape is [batch_size, boxes, output_size, 2, 1]\n grid_y_one_hot, grid_x_one_hot = get_grid_one_hot(box_gridy0y1,\n box_gridx0x1,\n max_feature_height,\n max_feature_width)\n\n # shape is [batch_size, num_boxes, output_size, height]\n grid_y_weight = tf.reduce_sum(\n tf.multiply(grid_y_one_hot, kernel_y), axis=-2)\n # shape is [batch_size, num_boxes, output_size, width]\n grid_x_weight = tf.reduce_sum(\n tf.multiply(grid_x_one_hot, kernel_x), axis=-2)\n\n # Gather for y_axis.\n # shape is [batch_size, num_boxes, output_size, width, features]\n features_per_box = tf.einsum('bmhwf,bmoh->bmowf', features,\n tf.cast(grid_y_weight, features.dtype))\n # Gather for x_axis.\n # shape is [batch_size, num_boxes, output_size, output_size, features]\n features_per_box = tf.einsum('bmhwf,bmow->bmhof', features_per_box,\n tf.cast(grid_x_weight, features.dtype))\n else:\n height_dim_offset = max_feature_width\n level_dim_offset = max_feature_height * height_dim_offset\n batch_dim_offset = num_levels * level_dim_offset\n\n batch_size_offset = tf.tile(\n tf.reshape(\n tf.range(batch_size) * batch_dim_offset, [batch_size, 1, 1, 1]),\n [1, num_boxes, output_size * 2, output_size * 2])\n box_levels_offset = tf.tile(\n tf.reshape(box_levels * level_dim_offset,\n [batch_size, num_boxes, 1, 1]),\n [1, 1, output_size * 2, output_size * 2])\n y_indices_offset = tf.tile(\n tf.reshape(y_indices * height_dim_offset,\n [batch_size, num_boxes, output_size * 2, 1]),\n [1, 1, 1, output_size * 2])\n x_indices_offset = tf.tile(\n tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]),\n [1, 1, output_size * 2, 1])\n\n indices = tf.reshape(\n batch_size_offset + box_levels_offset + y_indices_offset +\n x_indices_offset, [-1])\n\n features = tf.reshape(features, [-1, num_filters])\n # TODO(wangtao): replace tf.gather with tf.gather_nd and try to get similar\n # performance.\n features_per_box = tf.reshape(\n tf.gather(features, indices),\n [batch_size, num_boxes, output_size * 2, output_size * 2, num_filters])\n features_per_box = feature_bilinear_interpolation(features_per_box,\n kernel_y, kernel_x)\n\n return features_per_box",
"def statadjust(source_image, dest_image, boundingbox, **kwargs):\n source_image = Image.from_any(source_image)\n dest_image = Image.from_any(dest_image)\n return StatAdjustOperation(source_image, dest_image, offset=(boundingbox[1], boundingbox[0]), **kwargs).run()",
"def make_weight_map(self, masks):\n nrows, ncols = masks.shape[1:]\n masks = (masks > 0).astype(int)\n distMap = np.zeros((nrows * ncols, masks.shape[0]))\n X1, Y1 = np.meshgrid(np.arange(nrows), np.arange(ncols))\n X1, Y1 = np.c_[X1.ravel(), Y1.ravel()].T\n for i, mask in enumerate(masks):\n # find the boundary of each mask,\n # compute the distance of each pixel from this boundary\n bounds = find_boundaries(mask, mode='inner')\n X2, Y2 = np.nonzero(bounds)\n xSum = (X2.reshape(-1, 1) - X1.reshape(1, -1)) ** 2\n ySum = (Y2.reshape(-1, 1) - Y1.reshape(1, -1)) ** 2\n distMap[:, i] = np.sqrt(xSum + ySum).min(axis=0)\n ix = np.arange(distMap.shape[0])\n if distMap.shape[1] == 1:\n d1 = distMap.ravel()\n border_loss_map = self.w0 * np.exp((-1 * (d1) ** 2) / (2 * (self.sigma ** 2)))\n else:\n if distMap.shape[1] == 2:\n d1_ix, d2_ix = np.argpartition(distMap, 1, axis=1)[:, :2].T\n else:\n d1_ix, d2_ix = np.argpartition(distMap, 2, axis=1)[:, :2].T\n d1 = distMap[ix, d1_ix]\n d2 = distMap[ix, d2_ix]\n border_loss_map = self.w0 * np.exp((-1 * (d1 + d2) ** 2) / (2 * (self.sigma ** 2)))\n xBLoss = np.zeros((nrows, ncols))\n xBLoss[X1, Y1] = border_loss_map\n # class weight map\n loss = np.zeros((nrows, ncols))\n w_1 = 1 - masks.sum() / loss.size\n w_0 = 1 - w_1\n loss[masks.sum(0) == 1] = w_1\n loss[masks.sum(0) == 0] = w_0\n ZZ = xBLoss + loss\n return ZZ",
"def scale_bbox(self, boxes, old_width, new_width):\n boxes = copy.deepcopy(boxes)\n scale_percent = new_width / old_width\n for b in boxes:\n b.xmin = int(b.xmin * scale_percent)\n b.ymin = int(b.ymin * scale_percent)\n b.xmax = int(b.xmax * scale_percent)\n b.ymax = int(b.ymax * scale_percent)\n return boxes",
"def custom_mask(mask):\n \n new_mask = np.zeros(mask.shape[0]*mask.shape[1])\n new_mask = new_mask.reshape(mask.shape[0], mask.shape[1])\n for i in range(1):\n for j in range(mask.shape[0]):\n for k in range(mask.shape[1]):\n new_mask[j][k] = mask[j][k]\n if new_mask[j][k] > 0.5 :\n new_mask[j][k] = 1 \n else:\n new_mask[j][k] = 0\n \n return new_mask",
"def binarize_image(tile, im_nuclei_stain, foreground_threshold, local_radius_ratio=3, minimum_radius = 3):\n\n ## Apply initial global threshold\n img = cv2.cvtColor((im_nuclei_stain),cv2.COLOR_GRAY2RGB)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_gray_flat = img_gray.flatten()\n thresh = np.round(threshold_otsu(img_gray_flat[img_gray_flat<foreground_threshold]))\n img_bin = np.copy(img_gray)\n img_bin[img_gray<thresh] = 255\n img_bin[img_gray>=thresh] = 0\n\n ## Fill small holes in the image\n img_bin = binary_fill_holes(img_bin.astype(bool))\n img_bin = img_bin.astype(np.uint8)\n\n ## Remove small structures in the image based on minimum_radius\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(minimum_radius,minimum_radius))\n opening = cv2.morphologyEx(img_bin,cv2.MORPH_OPEN, kernel, iterations = 1)\n\n ## Identify connected regions(\"components\") in the image\n regions = cv2.connectedComponents(opening)[1]\n obj_props = regionprops(regions, intensity_image=im_nuclei_stain)\n\n ## Initialize mask\n im_fgnd_mask = np.zeros(im_nuclei_stain.shape).astype(np.uint8)\n\n ## Iterate through regions found via global thresholding\n for obj in obj_props:\n\n # Skip thresholding on background component\n if (obj.label == 0):\n continue\n\n # Expand bounding box based on local_radius_ratio\n # The idea is to include more background for local thresholding.\n bbox = obj.bbox\n equivalent_diameter = obj.equivalent_diameter\n min_row = np.max([0, np.round(bbox[0] - equivalent_diameter*local_radius_ratio)]).astype(np.int)\n max_row = np.min([tile.shape[0], np.round(bbox[2] + equivalent_diameter*local_radius_ratio)]).astype(np.int)\n min_col = np.max([0, np.round(bbox[1] - equivalent_diameter*local_radius_ratio)]).astype(np.int)\n max_col = np.min([tile.shape[1], np.round(bbox[3] + equivalent_diameter*local_radius_ratio)]).astype(np.int)\n region = im_nuclei_stain[min_row:max_row, min_col:max_col]\n region_flat = region.flatten()\n\n # If local threshold fail. Default to global threshold instead.\n try:\n thresh = np.round(threshold_otsu(region_flat[region_flat<foreground_threshold]))\n except:\n thresh = foreground_threshold\n\n # Copy local bbox mask to larger tile mask\n region_bin = np.copy(region)\n region_bin[region<thresh] = 1\n region_bin[region>=thresh] = 0\n im_fgnd_mask[min_row:max_row, min_col:max_col] = im_fgnd_mask[min_row:max_row, min_col:max_col] + region_bin.astype(np.uint8)\n im_fgnd_mask[im_fgnd_mask>0] = 1\n\n return(im_fgnd_mask)",
"def _update_block_mask(self, weights, threshold, mask):\n squeezed_weights = tf.squeeze(weights)\n if squeezed_weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:\n if self._pruning_method == 'threshold':\n return self._update_mask(weights, threshold)\n # random_cumulative removes at random taking into account previous\n # random modification. random_indepent simply removes at random.\n elif self._pruning_method in ['random_independent', 'random_cumulative']:\n return self._update_random_mask(weights, mask)\n else:\n raise ValueError('Unknown pruning method: %s' % self._pruning_method)\n\n if self._block_pooling_function not in ['AVG', 'MAX']:\n raise ValueError('Unknown pooling function for block sparsity: %s' %\n self._block_pooling_function)\n\n with tf.name_scope(weights.op.name + '_pruning_ops'):\n abs_weights = tf.abs(squeezed_weights)\n\n pool_window = [self._block_dim[0], self._block_dim[1]]\n pool_fn = pruning_utils.factorized_pool\n\n if not self._use_tpu:\n pool_fn = tf.pool\n abs_weights = tf.reshape(\n abs_weights,\n [1, abs_weights.get_shape()[0],\n abs_weights.get_shape()[1], 1])\n\n pooled_weights = pool_fn(\n abs_weights,\n window_shape=pool_window,\n pooling_type=self._block_pooling_function,\n strides=pool_window,\n padding='SAME',\n name=weights.op.name + '_pooled')\n\n if pooled_weights.get_shape().ndims != 2:\n pooled_weights = tf.squeeze(pooled_weights)\n\n if self._pruning_method == 'threshold':\n smoothed_threshold, new_mask = self._update_mask(\n pooled_weights, threshold)\n elif self._pruning_method in ['random_independent', 'random_cumulative']:\n smoothed_threshold, new_mask = self._update_random_mask(\n pooled_weights, mask)\n else:\n raise ValueError('Unknown pruning method: %s' % self._pruning_method)\n\n ## this is the process that updates the mask.\n updated_mask = pruning_utils.kronecker_product(new_mask,\n tf.ones(self._block_dim))\n sliced_mask = tf.slice(\n updated_mask, [0, 0],\n [squeezed_weights.get_shape()[0],\n squeezed_weights.get_shape()[1]])\n\n return smoothed_threshold, tf.reshape(sliced_mask, tf.shape(weights))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Change a user's plan details Does not touch Stripe fields
|
def change_plan(email: str, plan: str) -> int:
mdb = MongoClient(environ["MONGO_URI"])
plan_data = mdb.account.plan.find_one({"key": plan}, {"_id": 0})
if not plan_data:
print(f"No plan found for {plan}")
return 1
resp = mdb.account.user.update_one({"email": email}, {"$set": {"plan": plan_data}})
if not resp.matched_count:
print(f"No user found for {email}")
elif not resp.modified_count:
print(f"{email} is already on {plan_data['name']}")
else:
print(f"{email} has been set to {plan_data['name']}")
return 0
return 2
|
[
"def change_subscription(plan: Plan) -> bool:\n if not current_user.stripe:\n return False\n sub_id = current_user.stripe.subscription_id\n if not sub_id or current_user.plan == plan:\n return False\n sub = stripe.Subscription.retrieve(sub_id)\n sub.modify(\n sub_id,\n cancel_at_period_end=False,\n items=[{\"id\": sub[\"items\"][\"data\"][0].id, \"plan\": plan.stripe_id}],\n )\n current_user.stripe.subscription_id = sub.id\n current_user.plan = plan\n current_user.save()\n return True",
"def put(self, user_id):\n self.conn = pecan.request.db_conn\n self.conn.change_billing_owner(request.context,\n project_id=self.project_id,\n user_id=user_id)",
"def test_update_plan(self):\n body = Plans()\n response = self.client.open(\n '/phuthien007/test/1.0.0/api/plans',\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def edit_handler(request):\n\n # TODO prevent editing has_passed plans?\n\n plan = get_plan(request)\n\n # return errors, if any\n if isinstance(plan, dict):\n return JsonResponse(plan, status=400)\n\n form = PlanForm(get_post(request), instance=plan)\n if not form.is_valid():\n return JsonResponse(form.errors, status=400)\n\n form.save()\n return JsonResponse({'total': plan.total})",
"def sign_user_up_for_future_plan(conn):\n \n id, name = menu_selections('user id', 'subscription plan name')\n date = custom_select(\n \"Enter a date past the current date\", get_future_date)[1]\n\n with conn.cursor() as cur:\n try:\n cur.execute('SELECT id from users where id=%s;',(id,))\n if next(cur, None) is None:\n printc('r',f'user `{user_id}` does not exist; aborting')\n return\n cur.execute('SELECT name from plan where name=%s;',(name,))\n if next(cur, None) is None:\n printc('r',f'plan `{name}` does not exist; aborting')\n return\n \n cur.execute(\n \"\"\"\n SELECT *\n FROM subscription S JOIN plan P ON (S.plan_name = P.name)\n WHERE\n user_id = %s AND\n start_date <= %s AND\n start_date + month_length >= %s;\n \"\"\",\n (id, date, date)\n )\n if next(cur, None) is not None:\n printc('r',\n f'cannot sign up user {id} for new subscription plan starting on {date}; '\n 'that user has an overalpping subscription with that date'\n )\n return\n except Exception as e:\n print('sign_user_up_for_future_plan: error:', repr(e))\n return\n\n with conn.cursor() as cur:\n try:\n cur.execute(\n \"\"\"\n INSERT INTO subscription\n (user_id, plan_name, start_date, purchased_date)\n VALUES (%s, %s, %s, CURRENT_DATE);\n \"\"\",\n (id, name, date)\n )\n printc('g',f'signed user up for plan {name} starting on {date}')\n except Exception as e:\n print('sign_user_up_for_future_plan: error occcured:', repr(e))",
"def set_account_information(self, user_id, req):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n UPDATE Users\n SET \n username = ?,\n email = ?,\n fName = ?,\n lName = ?,\n streetAddress = ?,\n city = ?,\n state = ?,\n postCode = ?\n WHERE\n id = ?\n \"\"\",\n (\n req['username'],\n req['email'],\n req['fName'],\n req['lName'],\n req['streetAddress'],\n req['city'],\n req['state'],\n req['postCode'],\n user_id\n )\n )\n self.db.commit()\n except sqlite3.Error as e:\n log.error(e)\n raise Exception",
"def plan(self, value):\r\n from .plan import Plan\r\n if isinstance(value, Plan):\r\n plan_obj = value\r\n elif isinstance(value, basestring):\r\n plan_obj = self._commcell_object.plans.get(value)\r\n else:\r\n raise SDKException('Backupset', '102', 'Input value is not of supported type')\r\n\r\n plans_obj = self._commcell_object.plans\r\n entity_dict = {\r\n 'clientId': int(self._client_object.client_id),\r\n 'appId': int(self._agent_object.agent_id),\r\n 'backupsetId': int(self.backupset_id)\r\n }\r\n if plan_obj.plan_name in plans_obj.get_eligible_plans(entity_dict):\r\n request_json = {\r\n 'backupsetProperties': {\r\n 'planEntity': {\r\n 'planSubtype': int(plan_obj.subtype),\r\n '_type_': 158,\r\n 'planType': int(plan_obj.plan_type),\r\n 'planName': plan_obj.plan_name,\r\n 'planId': int(plan_obj.plan_id)\r\n }\r\n }\r\n }\r\n\r\n response = self._process_update_reponse(\r\n request_json\r\n )\r\n\r\n if response[0]:\r\n return\r\n else:\r\n o_str = 'Failed to asspciate plan to the backupset\\nError: \"{0}\"'\r\n raise SDKException('Backupset', '102', o_str.format(response[2]))\r\n else:\r\n raise SDKException(\r\n 'Backupset',\r\n '102',\r\n 'Plan not eligible to be associated with the backupset'\r\n )",
"def update_user_info():\n\n email = session.get('email')\n\n new_buying_power = request.form.get('buying-power')\n\n this_user = User.query.filter_by(email=email).first()\n # print(\"before update\", this_user)\n this_user.buying_power = new_buying_power\n # print(\"After update\", this_user)\n db.session.commit()\n \n return 'New information updated.'",
"def migrate_to_plan(self, migrate_to_plan):\n\n self._migrate_to_plan = migrate_to_plan",
"def membership_update(request):\n\n if not Profile.objects.get(user=request.user).membership:\n return redirect(reverse('memberships'))\n\n stripe.api_key = settings.STRIPE_SECRET_KEY\n # user's chosen membership\n membership = request.session['membership']\n\n # Asign correct price keys to the paid memberships\n if membership == 'Ultimate':\n price = settings.STRIPE_PRICE_ID_ULTIMATE\n elif membership == 'Supreme':\n price = settings.STRIPE_PRICE_ID_SUPREME\n else:\n price = settings.STRIPE_PRICE_ID_BASIC\n\n # Check if the user already exists in stripe system and\n # our database\n try:\n stripe_customer = StripeCustomer.objects.get(user=request.user)\n subscription = stripe.Subscription.retrieve(\n stripe_customer.stripeSubscriptionId)\n # Update existing membership with a new one\n stripe.Subscription.modify(\n subscription.id,\n cancel_at_period_end=False,\n proration_behavior='create_prorations',\n items=[{\n 'id': subscription['items']['data'][0].id,\n 'price': price,\n }]\n )\n\n # Attach new membership to the user's profile\n membership_type = get_object_or_404(Membership, name=membership)\n profile = get_object_or_404(Profile, user=request.user)\n profile.membership = membership_type\n profile.save()\n\n messages.success(request, 'Congrats!! You successfully changed'\n ' your membership to the '\n f'{membership} membership!')\n # Redirect the user to profiles page\n return redirect(reverse('profile'))\n\n # If user doesn't exist, return error\n except StripeCustomer.DoesNotExist:\n return messages.error(request, 'User does not exist')",
"def get_context_data(self, *args, **kwargs):\n context = super(ConfirmFormView, self).get_context_data(**kwargs)\n context['plan'] = Plan.objects.get(pk=self.kwargs['plan_id'])\n return context",
"def Update(self, request):\n test_plan_key = mtt_messages.ConvertToKey(\n ndb_models.TestPlan, request.test_plan_id)\n if not test_plan_key.get():\n raise endpoints.NotFoundException()\n test_plan = mtt_messages.Convert(\n request, ndb_models.TestPlan, from_cls=mtt_messages.TestPlan)\n _ValidateTestPlan(test_plan)\n test_plan.key = test_plan_key\n test_plan.put()\n test_scheduler.ScheduleTestPlanCronJob(test_plan.key.id())\n return mtt_messages.Convert(test_plan, mtt_messages.TestPlan)",
"def plan_context(request: HttpRequest) -> Dict:\n return {\n \"plans\": settings.PLANS\n }",
"def Update(apig,usageplan_id: str,purpose: str,throttle_rate: float,throttle_burst: int,quota_limit: int,quota_period: str,quota_offset: int):\n\t\t\t\t\t\n\t\t\t\tresponse = apig.client.update_usage_plan(\n\t\t\t\t\tusagePlanId=usageplan_id,\n\t\t\t\t\tpatchOperations=[\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t'op': 'replace',\n\t\t\t\t\t\t\t'path': '/description',\n\t\t\t\t\t\t\t'value': purpose,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t'op': 'replace',\n\t\t\t\t\t\t\t'path': '/throttle/burstLimit',\n\t\t\t\t\t\t\t'value': str(throttle_burst),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t'op': 'replace',\n\t\t\t\t\t\t\t'path': '/throttle/rateLimit',\n\t\t\t\t\t\t\t'value': str(throttle_rate),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t'op': 'replace',\n\t\t\t\t\t\t\t'path': '/quota/limit',\n\t\t\t\t\t\t\t'value': str(quota_limit),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t'op': 'replace',\n\t\t\t\t\t\t\t'path': '/quota/offset',\n\t\t\t\t\t\t\t'value': str(quota_offset),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t'op': 'replace',\n\t\t\t\t\t\t\t'path': '/quota/period',\n\t\t\t\t\t\t\t'value': str(quota_period),\n\t\t\t\t\t\t},\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t\t\treturn response",
"def downgrade_to_free():\n if not current_user.subscription.active:\n subscription = Subscription.query.get(current_user.subscription.id)\n free_plan = Plan.query.filter_by(name='Free').first()\n subscription.plan_id = free_plan.id\n db.session.commit()\n return redirect(url_for('home.dashboard'))",
"def setup_subscription(cls, request_user, plan_cost, group, active=False):\n current_date = timezone.now()\n\n # Add subscription plan to user\n subscription = cls.objects.create(\n user=request_user,\n subscription=plan_cost,\n date_billing_start=None,\n date_billing_end=None,\n date_billing_last=None,\n date_billing_next=None,\n active=active,\n cancelled=False,\n )\n\n # Add user to the proper group\n # try:\n # group.user_set.add(request_user)\n # except AttributeError:\n # No group available to add user to\n # pass\n\n return subscription",
"def apply_value_to_parameter(self, plan: Plan) -> None:\n parameter = plan.get_field_by_id(self.parameter_id)\n parameter.actual_value = self.value",
"def plan_details(request, plan_id):\n header = get_api_header()\n resp = r.get(_url_plans(_base_url(request)),\n headers=header, params={'id': plan_id},verify=False)\n if resp.status_code != 200:\n return {}\n else:\n result = json.loads(resp.content)\n if result[u'count'] == 0:\n return {}\n else:\n return result[u'results'][0]",
"def sync_plans():\n for plan in settings.DJSTRIPE_PLANS:\n stripe_plan = settings.DJSTRIPE_PLANS[plan]\n if stripe_plan.get(\"stripe_plan_id\"):\n try:\n # A few minor things are changed in the api-version of the create call\n api_kwargs = dict(stripe_plan)\n api_kwargs['id'] = api_kwargs['stripe_plan_id']\n api_kwargs['amount'] = api_kwargs['price']\n del(api_kwargs['stripe_plan_id'])\n del(api_kwargs['price'])\n del(api_kwargs['description'])\n\n Plan._api_create(**api_kwargs)\n print(\"Plan created for {0}\".format(plan))\n except Exception as e:\n print(\"ERROR: \" + str(e))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
versions up the given file based on other files in the same directory. The given filepath should not have a version at the end. e.g. given "/tmp/file.txt" this function will return "/tmp/file0000.txt" unless there is already a file0000.txt in /tmp, in which case it will return "/tmp/file0001.txt".
|
def version_file(filepath):
zero_padding = 4
dirpath, filename = os.path.split(filepath)
base, ext = os.path.splitext(filename)
searchpath = os.path.join(dirpath, "*")
files = glob.glob(searchpath)
versions = []
for file in files:
filename_to_match = os.path.basename(file)
if re.match(base+"[0-9]{%d}"%zero_padding+ext, filename_to_match):
versions.append(filename_to_match)
versions.sort()
version_num = 0
if len(versions) > 0:
latest = versions[-1]
latest_name = os.path.splitext(latest)[0]
idx = len(latest_name) - zero_padding
num_str = latest_name[idx:]
version_num = int(num_str) + 1
return os.path.join(dirpath, base+str(version_num).zfill(zero_padding)+ext)
|
[
"def manage_old_version_file(file_path):\n # Set old version file path\n file_path = Path(file_path)\n old_version_file = modify_filename_in_path(file_path,\n added='old_',\n prefix=True)\n\n # If old version exists, create a copy without prefix and return\n # that path. If not, create a copy with prefix and set it as the\n # new backup file.\n if old_version_file.exists():\n shutil.copy(str(old_version_file), str(file_path))\n output = file_path\n elif Path(file_path).exists():\n shutil.copy(file_path, old_version_file)\n output = file_path\n\n # Report if no file was found\n else:\n print(Path(file_path).name, 'FILE NOT FOUND IN', str(file_path.parent))\n output = None\n return output",
"def update_version(filepath, version):\n\n filepath = re_version_take.sub('_v{version:02}_t{take:02}'.format(\n **{\n 'version': version.version,\n 'take': version.take\n }\n ), filepath)\n\n return filepath",
"def gen_next_numeric_filename(full_path):\n highest_version_num = get_highest_version_num(full_path)\n path, ext = os.path.splitext(full_path)\n return_val = path + str(highest_version_num+1) + ext\n if os.path.isfile(return_val):\n raise ValueError(\"YOU CHEATED: \" + full_path)\n return return_val",
"def get_highest_version_filename(full_path):\n highest_version_num = get_highest_version_num(full_path)\n if highest_version_num > -1:\n return generate_version_path(full_path, highest_version_num)\n else:\n if os.path.isfile(full_path):\n return full_path\n else:\n return None",
"def _file_bamper(cur_version, new_version, file_path):\n _, copy_path = mkstemp()\n with open(copy_path, mode=\"w\", encoding=\"utf-8\") as cf:\n with open(file_path, encoding=\"utf-8\") as of:\n found = False\n for line in of.readlines():\n if _ver_is_found(cur_version, line):\n found = True\n line = line.replace(cur_version, new_version)\n cf.write(line)\n if not found:\n raise VersionNotFound()\n copystat(file_path, copy_path)\n\n return PathPair(file_path, copy_path)",
"def generate_version_path(generic_path, version_num):\n path, ext = os.path.splitext(generic_path)\n new_path = path + str(version_num)\n new_full_path = os.path.join(new_path, ext)\n return new_full_path",
"def get_current_version(file_path):\n\n return get_local_working_version(file_path)",
"def find_latest(filepath):\n\n search_pattern = re_version_take_user.sub('_v*', filepath)\n\n results = glob.glob(search_pattern)\n # print results\n if not results:\n return None\n\n path_by_version = {}\n for r in results:\n path_by_version[extract_version(r)] = r\n\n # sort by version and then by take\n sorted_versions = sorted(path_by_version.iterkeys(), key=attrgetter('version', 'take'))\n return path_by_version[sorted_versions[-1]]",
"def file_in_same_dir(ref_file, desired_file):\n return os.path.join(*(split_path(ref_file)[:-1] + [desired_file]))",
"def makeFilePath(self, file_path):\n return '%s/%s' % (os.path.dirname(__file__), file_path)",
"def update_filepath(self, filepath):\n raise NotImplementedError",
"def get_local_working_version(file_path):\n\n if not file_path or not os.path.exists(file_path):\n return -1\n\n client = get_artella_client()\n\n current_version = client.file_current_version(file_path=file_path)\n\n return current_version",
"def changeFileNoInFilePath(path: str, fileNo: int) -> str:\n\n separator = r\"[0-9]+\\.\"\n splitted_path = re.split(separator, path, 1)\n new_path = splitted_path[0] + str(fileNo) + \".\" + splitted_path[1]\n return new_path",
"def get_backup_filepath(filepath):\r\n filename = os.path.split(filepath)[1]\r\n return os.path.join(get_backup_path(filepath), timestamp_file(filename))",
"def sort_files(src_name, src_file_path, \r\n src_file_ext, directories, main_path, unsorted_dir_name\r\n ):\r\n \r\n # if file is a directory:\r\n if os.path.isdir(src_file_path):\r\n pass\r\n # if not a directory: \r\n else:\r\n\r\n while True:\r\n for key in directories:\r\n\r\n length = len(directories[key])\r\n\r\n for i in range(length):\r\n ext = (directories[key][i-1])\r\n\r\n if src_file_ext == ext:\r\n print (ext + ' - ' + src_file_ext)\r\n try:\r\n shutil.move(f'{src_file_path}{src_file_ext}', f'{main_path}\\\\{key}')\r\n except shutil.Error:\r\n # Loop and try all the version numbers until it breaks the loop\r\n i = 0\r\n while True:\r\n i += 1\r\n try: \r\n os.rename(f'{src_file_path}{src_file_ext}', f'{main_path}\\\\{key}\\\\{src_name}_{i}{src_file_ext}')\r\n break\r\n except FileExistsError:\r\n pass\r\n else:\r\n pass \r\n break\r\n\r\n\r\n print (ext + ' - ' + src_file_ext)\r\n unsorted_dir = f'{main_path}\\\\{unsorted_dir_name}'\r\n \r\n # make a directory for unsorted files\r\n if os.path.exists(unsorted_dir) != True:\r\n print(f'{unsorted_dir_name} does not exists')\r\n os.mkdir(unsorted_dir)\r\n else:\r\n print(f'{unsorted_dir_name} directory exists')\r\n\r\n try:\r\n shutil.move(f'{src_file_path}{src_file_ext}', unsorted_dir)\r\n except FileNotFoundError:\r\n print ('it exists')\r\n\r\n except shutil.Error: \r\n # Loop and try all the version numbers until it breaks the loop\r\n i = 0\r\n while True:\r\n i += 1\r\n try: \r\n os.rename(f'{src_file_path}{src_file_ext}', f'{unsorted_dir}\\\\{src_name}_{i}{src_file_ext}')\r\n break\r\n except FileExistsError:\r\n pass",
"def get_versioned_dir(repodir, version):\n return os.path.join(repodir, version)",
"def _get_version_path(value, version_prefix):\n filepath = _exists(value)\n if filepath:\n path, filename = os.path.split(filepath)\n filename, ext = os.path.splitext(filename)\n version_filename = u\"%s_%s%s\" % (filename, version_prefix, ext)\n return os.path.join(VERSIONS_BASEDIR, path, version_filename)\n else:\n return None",
"def get_version_num(generic_path, versioned_path):\n generic_base, generic_ext = os.path.splitext(generic_path)\n base_len = len(generic_base)\n versioned_base, versioned_ext = os.path.splitext(versioned_path)\n if (generic_ext == versioned_ext and \n generic_base == versioned_base[:base_len]):\n try:\n return int(versioned_base[base_len:])\n except ValueError:\n return -1\n else:\n return -1",
"def __get_version_from_version_txt(path):\n file = os.path.split(__file__)[0]\n paths = [file,\n os.path.join(file, \"..\"),\n os.path.join(file, \"..\", \"..\"),\n os.path.join(file, \"..\", \"..\", \"..\"),\n path]\n for p in paths:\n fp = os.path.join(p, \"version.txt\")\n if os.path.exists(fp):\n with open(fp, \"r\") as f:\n return int(f.read().strip(\" \\n\\r\\t\"))\n raise FileNotFoundError(\n \"unable to find version.txt in\\n\" + \"\\n\".join(paths))",
"def get_asdf_standard_version(filepath):\n file_obj = file_factory(filepath)\n return file_obj.get_asdf_standard_version()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
versions up the given directory based on other directories in the same directory. The given dirpath should not have a version at the end. e.g. given "/tmp/v" this function will return "/tmp/v000" unless there is already a v000 dir in /tmp, in which case it will return "/tmp/v001". zero_padding specifies how many digits to include in the version numberthe default is 3.
|
def version_dir(dirpath, zero_padding=3):
raise NotImplementedError() # TODO
|
[
"def get_versioned_dir(repodir, version):\n return os.path.join(repodir, version)",
"def updir(path, num=1):\n for _ in range(num):\n path = os.path.dirname(path)\n return path",
"def get_version_dir(*, sha: str) -> pathlib.Path:\n\n return settings.VERSIONS_DIRECTORY / sha[: settings.VERSIONS_HASH_LENGTH]",
"def _norm_with_dir(path):\n normed = normpath(path)\n if path.endswith(os_sep):\n return normed + os_sep\n return normed",
"def get_next_version_path(save_dir: Union[Path, str], name: str):\n root_dir = Path(save_dir) / name\n\n if not root_dir.exists():\n root_dir.mkdir(parents=True, exist_ok=True)\n print(\"Created: \", root_dir)\n\n existing_versions = []\n for p in root_dir.iterdir():\n bn = p.stem\n if p.is_dir() and bn.startswith(\"version_\"):\n dir_ver = bn.split(\"_\")[1].replace('/', '')\n existing_versions.append(int(dir_ver))\n\n if len(existing_versions) == 0:\n next_version = 0\n else:\n next_version = max(existing_versions) + 1\n\n return root_dir / f\"version_{next_version}\"",
"def swap_dirs_if_needed(file_path:PATH_LIKE, alternate_directory_pairings:List[DIRECTORY_PAIRING], check_exists:bool=True) -> PATH_LIKE:\n get_path_part_count = lambda p: len(get_path_parts(p))\n\n if Path(file_path).exists():\n return file_path\n else:\n return_path = get_clean_path(file_path).as_posix()\n path_alternates = []\n # Only support 2 pairings, for now\n for dir_one, dir_two, *_ in alternate_directory_pairings:\n # We want to sort by the length of the paths, and only include existing paths\n dir_pairs = [Path(p) for p in sorted([dir_one, dir_two], key=get_path_part_count, reverse=True)]\n # dir_pairs = [get_path_from_path_parts(path_part) for path_part in sorted([path_parts for path_parts in get_path_parts([dir_one, dir_two])], key=len, reverse=True)]\n if any([p.exists() and return_path.startswith(p.as_posix()) for p in dir_pairs]):\n path_alternates.append((dir_pairs[0].as_posix(), dir_pairs[1].as_posix(), dir_pairs[0].exists()))\n path_alternates.append((dir_pairs[1].as_posix(), dir_pairs[0].as_posix(), dir_pairs[1].exists()))\n\n for this_dir, other_dir, this_dir_exists in path_alternates:\n if return_path.startswith(other_dir) and this_dir_exists:\n rest_of_path = return_path[len(str(other_dir)):]\n new_path = Path('{0}/{1}'.format(this_dir, rest_of_path))\n if check_exists is True and new_path.exists():\n return_path = new_path\n else:\n return_path = new_path\n break\n\n return return_path",
"def normalize_svn_path(path, allow_empty=False):\n\n norm_path = path_join(*path.split('/'))\n if not allow_empty and not norm_path:\n raise IllegalSVNPathError(\"Path is empty\")\n return norm_path",
"def _dir_revhash(gid: int) -> str:\n dir_hash = list(reversed(str(gid)))\n dir_hash.pop()\n return path.join(*dir_hash) if dir_hash else path.curdir",
"def replace_same_level_dir(reference_dir_path, target_dir_path):\n return os.path.join(os.path.dirname(reference_dir_path), target_dir_path)\n # generate_dir_if_not_exist(resulting__dir)",
"def correct(directory_name):\n add_zeros = lambda string: '{0:02d}'.format(int(string))\n elements = directory_name.split('_')\n return '{0}_{1}_{2}_{3}_{4}_{5}_{6}'.format(elements[0], elements[1], add_zeros(elements[2]), add_zeros(elements[3]), add_zeros(elements[4]), add_zeros(elements[5]), add_zeros(elements[6]))",
"def _remove_versionned_directories(self, dest):\n not_versionned = ['part']\n for filep in os.listdir(dest):\n if not filep in not_versionned:\n path = os.path.join(dest, filep)\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)",
"def get_vcs_root(path):\n previous_path = path\n while get_vcs_infos(path) is None:\n path = abspardir(path)\n if path == previous_path:\n return\n else:\n previous_path = path\n return osp.abspath(path)",
"def version(vCtrlFolder, current=False):\n\t# Check if directory exists\n\tif os.path.isdir(vCtrlFolder):\n\t\t# Try to figure out versioning based on existing contents\n\t\ttry:\n\t\t\tvCtrlFileLs = os.listdir(vCtrlFolder)\n\t\t\tvrsLs = []\n\t\t\t# Check all items in vCtrlFolder for 'v###' pattern\n\t\t\tfor vCtrlItem in vCtrlFileLs:\n\t\t\t\t# Get first item of split list if _ found to strip out 'approved' from version\n\t\t\t\tcontentVrs = vCtrlItem.split(\"_\")[0]\n\t\t\t\t# If found strip 'v'\n\t\t\t\tif contentVrs.startswith(\"v\"):\n\t\t\t\t\tcontentVrs = contentVrs.replace(\"v\", \"\")\n\t\t\t\telif contentVrs.startswith(\".v\"):\n\t\t\t\t\tcontentVrs = contentVrs.replace(\".v\", \"\")\n\t\t\t\t# Check for numeral(s) and add to vrsLs\n\t\t\t\tif contentVrs.isdigit():\n\t\t\t\t\tvrsLs.append(contentVrs)\n\t\t\t# Sort vrsLs and retrieve last item (highest digit)\n\t\t\tvrsLs.sort()\n\t\t\t# print(vrsLs)\n\t\t\tcurrentVersion = int(vrsLs[-1])\n\n\t\t# If no versioning detected in contents start new versioning\n\t\texcept IndexError:\n\t\t\tcurrentVersion = 0\n\telse:\n\t\tprint(\"vCtrl: directory doesn't exist.\")\n\t\tcurrentVersion = 0\n\n\t# Padding control and versioning increment\n\tif current:\n\t\tnewVersion = currentVersion\n\telse:\n\t\tnewVersion = currentVersion + 1\n\n\t# Prepend 'v' and padding to version\n\tnewVersion = \"v%03d\" %newVersion\n\n\treturn newVersion",
"def version_from_path(self):\n try:\n self.version_label = self.path.split(\"/\")[1]\n (self.major, self.minor, self.revision) = [\n int(s) for s in self.version_label.lstrip(\"v\").split(\".\")\n ]\n except (IndexError, ValueError):\n return \"\"",
"def make_timestamped_dir(path):\n date = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n newdir = os.path.join(os.path.expanduser(path), date)\n make_if_not_exists(newdir)\n\n return newdir",
"def fl_fix_dirname(dirname):\n _fl_fix_dirname = library.cfuncproto(\n library.load_so_libforms(), \"fl_fix_dirname\",\n xfdata.STRING, [xfdata.STRING],\n \"\"\"char * fl_fix_dirname(char * dir)\"\"\")\n library.check_if_flinitialized()\n s_dirname = library.convert_to_bytestrc(dirname)\n library.keep_elem_refs(dirname, s_dirname)\n retval = _fl_fix_dirname(s_dirname)\n if isinstance(retval, bytes):\n return retval.decode('utf-8')\n else: # str\n return retval",
"def generate_version_path(generic_path, version_num):\n path, ext = os.path.splitext(generic_path)\n new_path = path + str(version_num)\n new_full_path = os.path.join(new_path, ext)\n return new_full_path",
"def sort_by_directory(path):\n\treturn 1 - path.is_directory",
"def getDirectoryFromPath(path: str) -> str:\n path_temp = path.rpartition(\"/\")\n new_path = path_temp[0] + path_temp[1]\n return new_path",
"def create_dated_directory(parent_directory: str) -> str:\n today = datetime.today()\n dir_path = os.path.join(parent_directory, today.strftime('%Y%m%d'))\n FileUtils.ensure_dir(dir_path)\n return dir_path"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns a string of the same length as the given name with all the nonalphanumeric actors replaced by underscores name string to make alphanumeric
|
def alphanumeric(name):
seq = []
for char in name:
if not char.isalnum():
seq.append('_')
else:
seq.append(char)
return ''.join(seq)
|
[
"def make_ident(name):\n ident = ''\n for c in name:\n if idaapi.is_ident_char(ord(c)):\n ident += c\n else:\n ident += '_'\n return ident",
"def mangle(self):\n wrk = self._title\n wrk = wrk.strip().lower()\n last_ch = None\n tmp = \"\"\n for ch in wrk:\n #log.debug(str(ord(ch)))\n if (ch == \"-\" or \n ch == \"_\" or \n (ord(ch) >= ord(\"a\") and ord(ch) <= ord(\"z\")) or\n (ord(ch) >= ord(\"0\") and ord(ch) <= ord(\"9\"))\n ): \n tmp += ch\n last_ch = ch \n else:\n if last_ch != \"_\":\n tmp += \"_\"\n last_ch = \"_\"\n wrk = tmp.strip(\"_\") \n return wrk",
"def get_reg_name(self, name):\n return name.lower().replace('-', '').replace('_', '').replace(' ', '')",
"def normalize_username(name):\n underscores = re.sub(r'\\s', '_', name)\n single_space = re.sub(r'_+', ' ', underscores)\n trimmed = single_space.strip()\n first = trimmed[0:1]\n rest = trimmed[1:]\n return first.upper() + rest",
"def create_random_name(self):\n name = ''\n for _ in range(self.NAME_LENGTH):\n name += choice(ascii_letters)\n return name",
"def camel_to_underscore(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def get_urlname(cls, name):\n return '-'.join(''.join(ch for ch in word if ch.isalnum()) \\\n for word in name.split())",
"def _CreateLegalIdentifier(input_string):\n return re.sub(r'[\\W_]', '', input_string)",
"def camelcase_to_underscore(name):\n if not name:\n return name\n\n return CAMELCASE_RE.sub(r'\\1_\\2', name).lower()",
"def user_name_for(name):\n name = name.replace(\"_\", \" \")\n result = \"\"\n last_lower = False\n\n for c in name:\n if c.isupper() and last_lower:\n result += \" \"\n last_lower = c.islower()\n result += c\n\n return result.capitalize()",
"def underscore(word, lowercase=True):\n word = re.sub(r\"([A-Z]+)([A-Z][a-z])\", r'\\1_\\2', word)\n word = re.sub(r\"([a-z\\d])([A-Z])\", r'\\1_\\2', word)\n word = word.replace(\"-\", \"_\")\n if lowercase:\n word = word.lower()\n return word",
"def _enc_name(self, name):\n if name in self.__INVARIANT__:\n return name\n return name.replace('-', '_')",
"def underscore_to_camelcase(name):\n def replace_fn(match):\n \"\"\"\n Upercase first char after \"_\".\n\n Returns a char.\n\n \"\"\"\n return match.group(1).upper()\n\n if not name:\n return name\n\n name = UNDERSCORE_RE.sub(replace_fn, name)\n return name[0].lower() + name[1:]",
"def _make_public_name(name, suffix = \"\"):\n return name.lstrip(\"_\") + suffix",
"def _createMenuPathName(self, name):\n # hide anything between brackets\n name = re.sub(\"\\(.*\\)\", \"\", name)\n # replace invalid chars\n name = name.replace(\" \", \"_\")\n if name and name[0] in \"0123456789_\":\n name = \"_\" + name\n name = re.sub(\"[^a-zA-z_0-9]\", \"\", name)\n return name.lower()",
"def get_untransformed_name(name):\n if not is_transformed_name(name):\n raise ValueError(f\"{name} does not appear to be a transformed name\")\n return \"_\".join(name.split(\"_\")[:-3])",
"def format_resource_name(name):\n return (name\n # always replace underscores first, since other replacements\n # contain underscores as part of replacement\n .replace('_', '__')\n .replace(' ', '_s')\n .replace('\\'', '_a')\n .replace('/', '_f')\n .replace('[', '_l')\n .replace(']', '_r'))",
"def convert_name(name):\n get_length = len(name) # get length of name\n if get_length < 3: # if less than 3 letters in name\n return \"ERROR\"\n else:\n three_letters_beginning = name[0:3] # first 3 letters uppercase\n two_letters_end = name[-2:] # last 2 letters lowercase\n letter_count = str(get_length) # Convert int to string\n return \"{}-{}{}\".format(three_letters_beginning.upper(), letter_count, two_letters_end.lower())",
"def createFilename(self, name):\n validFilenameChars = \"-_.()%s%s\" % (string.ascii_letters, string.digits)\n cleanedFilename = unicodedata.normalize('NFKD', name)\n return ''.join(c for c in cleanedFilename if c in validFilenameChars)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate the interval between w_time and s_time.
|
def _get_duration(self, w_time, s_time):
w_time_list = list(map(int, w_time.split(':')))
s_time_list = list(map(int, s_time.split(':')))
if w_time_list[1] < s_time_list[1]: # wake minute < sleep minute
w_time_list[1] += 60
w_time_list[0] -= 1
if w_time_list[0] < s_time_list[0]: # wake hour < sleep hour
w_time_list[0] += 24
dur_list = [(w_time_list[x] - s_time_list[x])
for x in range(len(w_time_list))]
duration = str(dur_list[0])
if len(duration) == 1: # change hour from '1' to '01', e.g.
duration = '0' + duration
duration += self._quarter_hour_to_decimal(dur_list[1])
return duration
|
[
"def interval(self):\n return str(self.time_interval) + self.time_unit",
"def scen_t_secs(self):\n hydro_datetimes=self.t_secs*self.scenario.scu + self.time0 \n start_i, stop_i=np.searchsorted(hydro_datetimes,\n [self.scenario.start_time,\n self.scenario.stop_time])\n if start_i>0:\n start_i-=1\n if stop_i <= len(self.t_secs):\n # careful to check <= so we don't drop the last time\n # step.\n stop_i+=1\n return self.t_secs[start_i:stop_i]",
"def findAllowableOcculterSlews(self, sInds, old_sInd, sd, slewTimes, obsTimeArray, intTimeArray, mode):\r\n TK = self.TimeKeeping\r\n Obs = self.Observatory\r\n TL = self.TargetList\r\n\r\n # 0. lambda function that linearly interpolates Integration Time between obsTimes\r\n linearInterp = lambda y,x,t: np.diff(y)/np.diff(x)*(t-np.array(x[:,0]).reshape(len(t),1))+np.array(y[:,0]).reshape(len(t),1)\r\n \r\n # allocate settling time + overhead time\r\n tmpCurrentTimeAbs = TK.currentTimeAbs.copy() + Obs.settlingTime + mode['syst']['ohTime']\r\n tmpCurrentTimeNorm = TK.currentTimeNorm.copy() + Obs.settlingTime + mode['syst']['ohTime']\r\n \r\n # 1. initializing arrays\r\n obsTimes = np.array([obsTimeArray[:,0],obsTimeArray[:,-1]]) # nx2 array with start and end times of obsTimes for each star\r\n intTimes_int = np.zeros(obsTimeArray.shape)*u.d # initializing intTimes of shape nx50 then interpolating\r\n intTimes_int = np.hstack([intTimeArray[:,0].reshape(len(sInds),1).value, \\\r\n linearInterp(intTimeArray.value,obsTimes.T,obsTimeArray[:,1:].value)])*u.d\r\n allowedSlewTimes = np.zeros(obsTimeArray.shape)*u.d\r\n allowedintTimes = np.zeros(obsTimeArray.shape)*u.d \r\n allowedCharTimes = np.zeros(obsTimeArray.shape)*u.d \r\n obsTimeArrayNorm = obsTimeArray.value - tmpCurrentTimeAbs.value\r\n \r\n # obsTimes -> relative to current Time\r\n minObsTimeNorm = obsTimes[0,:].T - tmpCurrentTimeAbs.value\r\n maxObsTimeNorm = obsTimes[1,:].T - tmpCurrentTimeAbs.value\r\n ObsTimeRange = maxObsTimeNorm - minObsTimeNorm\r\n \r\n # getting max possible intTime\r\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, mode, tmpCurrentTimeNorm,TK.OBnumber)\r\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife) # Maximum intTime allowed\r\n \r\n # 2. giant array of min and max slew times, starts at current time, ends when stars enter keepout (all same size)\r\n # each entry either has a slew time value if a slew is allowed at that date or 0 if slewing is not allowed\r\n \r\n # first filled in for the current OB\r\n minAllowedSlewTimes = np.array([minObsTimeNorm.T]*len(intTimes_int.T)).T #just to make it nx50 so it plays nice with the other arrays\r\n maxAllowedSlewTimes = maxIntTime.value - intTimes_int.value\r\n maxAllowedSlewTimes[maxAllowedSlewTimes > Obs.occ_dtmax.value] = Obs.occ_dtmax.value\r\n\r\n # conditions that must be met to define an allowable slew time\r\n cond1 = minAllowedSlewTimes >= Obs.occ_dtmin.value # minimum dt time in dV map interpolant\r\n cond2 = maxAllowedSlewTimes <= Obs.occ_dtmax.value # maximum dt time in dV map interpolant\r\n cond3 = maxAllowedSlewTimes > minAllowedSlewTimes\r\n cond4 = intTimes_int.value < ObsTimeRange.reshape(len(sInds),1)\r\n \r\n conds = cond1 & cond2 & cond3 & cond4\r\n minAllowedSlewTimes[np.invert(conds)] = np.Inf #these are filtered during the next filter\r\n maxAllowedSlewTimes[np.invert(conds)] = -np.Inf\r\n \r\n # one last condition to meet\r\n map_i,map_j = np.where((obsTimeArrayNorm > minAllowedSlewTimes) & (obsTimeArrayNorm < maxAllowedSlewTimes))\r\n\r\n # 2.5 if any stars are slew-able to within this OB block, populate \"allowedSlewTimes\", a running tally of possible slews\r\n # within the time range a star is observable (out of keepout)\r\n if map_i.shape[0] > 0 and map_j.shape[0] > 0:\r\n allowedSlewTimes[map_i,map_j] = obsTimeArrayNorm[map_i,map_j]*u.d\r\n allowedintTimes[map_i,map_j] = intTimes_int[map_i,map_j]\r\n allowedCharTimes[map_i,map_j] = maxIntTime - intTimes_int[map_i,map_j]\r\n \r\n # 3. search future OBs \r\n OB_withObsStars = TK.OBstartTimes.value - np.min(obsTimeArrayNorm) - tmpCurrentTimeNorm.value # OBs within which any star is observable\r\n \r\n\r\n if any(OB_withObsStars > 0):\r\n nOBstart = np.argmin( np.abs(OB_withObsStars) )\r\n nOBend = np.argmax( OB_withObsStars ) \r\n \r\n # loop through the next 5 OBs (or until mission is over if there are less than 5 OBs in the future)\r\n for i in np.arange(nOBstart,np.min([nOBend,nOBstart+5])):\r\n \r\n # max int Times for the next OB\r\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, mode, TK.OBstartTimes[i+1],i+1)\r\n maxIntTime_nOB = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife) # Maximum intTime allowed\r\n \r\n # min and max slew times rel to current Time (norm)\r\n nOBstartTimeNorm = np.array([TK.OBstartTimes[i+1].value - tmpCurrentTimeNorm.value]*len(sInds))\r\n \r\n # min slew times for stars start either whenever the star first leaves keepout or when next OB stars, whichever comes last\r\n minAllowedSlewTimes_nOB = np.array([np.max([minObsTimeNorm,nOBstartTimeNorm],axis=0).T]*len(maxAllowedSlewTimes.T)).T \r\n maxAllowedSlewTimes_nOB = nOBstartTimeNorm.reshape(len(sInds),1) + maxIntTime_nOB.value - intTimes_int.value\r\n maxAllowedSlewTimes_nOB[maxAllowedSlewTimes_nOB > Obs.occ_dtmax.value] = Obs.occ_dtmax.value\r\n \r\n # amount of time left when the stars are still out of keepout\r\n ObsTimeRange_nOB = maxObsTimeNorm - np.max([minObsTimeNorm,nOBstartTimeNorm],axis=0).T\r\n \r\n # condition to be met for an allowable slew time\r\n cond1 = minAllowedSlewTimes_nOB >= Obs.occ_dtmin.value\r\n cond2 = maxAllowedSlewTimes_nOB <= Obs.occ_dtmax.value\r\n cond3 = maxAllowedSlewTimes_nOB > minAllowedSlewTimes_nOB\r\n cond4 = intTimes_int.value < ObsTimeRange_nOB.reshape(len(sInds),1)\r\n cond5 = intTimes_int.value < maxIntTime_nOB.value\r\n conds = cond1 & cond2 & cond3 & cond4 & cond5\r\n \r\n minAllowedSlewTimes_nOB[np.invert(conds)] = np.Inf\r\n maxAllowedSlewTimes_nOB[np.invert(conds)] = -np.Inf\r\n \r\n # one last condition\r\n map_i,map_j = np.where((obsTimeArrayNorm > minAllowedSlewTimes_nOB) & (obsTimeArrayNorm < maxAllowedSlewTimes_nOB))\r\n \r\n # 3.33 populate the running tally of allowable slew times if it meets all conditions\r\n if map_i.shape[0] > 0 and map_j.shape[0] > 0:\r\n allowedSlewTimes[map_i,map_j] = obsTimeArrayNorm[map_i,map_j]*u.d\r\n allowedintTimes[map_i,map_j] = intTimes_int[map_i,map_j]\r\n allowedCharTimes[map_i,map_j] = maxIntTime_nOB - intTimes_int[map_i,map_j]\r\n \r\n # 3.67 filter out any stars that are not observable at all\r\n filterDuds = np.sum(allowedSlewTimes,axis=1) > 0.\r\n sInds = sInds[filterDuds]\r\n \r\n # 4. choose a slew time for each available star\r\n # calculate dVs for each possible slew time for each star\r\n allowed_dVs = Obs.calculate_dV(TL, old_sInd, sInds, sd[filterDuds], allowedSlewTimes[filterDuds,:], tmpCurrentTimeAbs)\r\n\r\n if len(sInds.tolist()) > 0:\r\n # select slew time for each star\r\n dV_inds = np.arange(0,len(sInds))\r\n sInds,intTime,slewTime,dV = self.chooseOcculterSlewTimes(sInds, allowedSlewTimes[filterDuds,:], \\\r\n allowed_dVs[dV_inds,:], allowedintTimes[filterDuds,:], allowedCharTimes[filterDuds,:])\r\n\r\n return sInds,intTime,slewTime,dV\r\n \r\n else:\r\n empty = np.asarray([],dtype=int)\r\n return empty,empty*u.d,empty*u.d,empty*u.m/u.s",
"def filterOcculterSlews(self, sInds, slewTimes, obsTimeArray, intTimeArray, mode):\r\n \r\n TK = self.TimeKeeping\r\n Obs = self.Observatory\r\n\r\n #allocate settling time + overhead time\r\n tmpCurrentTimeAbs = TK.currentTimeAbs.copy() + Obs.settlingTime + mode['syst']['ohTime']\r\n tmpCurrentTimeNorm = TK.currentTimeNorm.copy() + Obs.settlingTime + mode['syst']['ohTime']\r\n \r\n # 0. lambda function that linearly interpolates Integration Time between obsTimes\r\n linearInterp = lambda y,x,t: np.diff(y)/np.diff(x)*(t-np.array(x[:,0]).reshape(len(t),1))+np.array(y[:,0]).reshape(len(t),1)\r\n \r\n # 1. initializing arrays\r\n obsTimesRange = np.array([obsTimeArray[:,0],obsTimeArray[:,-1]]) # nx2 array with start and end times of obsTimes for each star\r\n intTimesRange = np.array([intTimeArray[:,0],intTimeArray[:,-1]]) \r\n \r\n OBnumbers = np.zeros([len(sInds),1]) #for each sInd, will show during which OB observations will take place\r\n maxIntTimes = np.zeros([len(sInds),1])*u.d\r\n \r\n intTimes = linearInterp( intTimesRange.T,obsTimesRange.T, \\\r\n (tmpCurrentTimeAbs + slewTimes).reshape(len(sInds),1).value)*u.d #calculate intTimes for each slew time\r\n \r\n minObsTimeNorm = (obsTimesRange[0,:] - tmpCurrentTimeAbs.value).reshape([len(sInds),1])\r\n maxObsTimeNorm = (obsTimesRange[1,:] - tmpCurrentTimeAbs.value).reshape([len(sInds),1])\r\n ObsTimeRange = maxObsTimeNorm - minObsTimeNorm\r\n \r\n # 2. find OBnumber for each sInd's slew time\r\n if len(TK.OBendTimes) > 1:\r\n for i in range(len(sInds)):\r\n S = np.where(TK.OBstartTimes.value - tmpCurrentTimeNorm.value < slewTimes[i].value)[0][-1]\r\n F = np.where(TK.OBendTimes.value - tmpCurrentTimeNorm.value < slewTimes[i].value)[0]\r\n \r\n # case when slews are in the first OB\r\n if F.shape[0] == 0:\r\n F = -1\r\n else:\r\n F = F[-1]\r\n \r\n # slew occurs within an OB (nth OB has started but hasn't ended)\r\n if S != F: \r\n OBnumbers[i] = S\r\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, mode, TK.OBstartTimes[S],S)\r\n maxIntTimes[i] = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife) # Maximum intTime allowed\r\n \r\n # slew occurs between OBs, badbadnotgood\r\n else: \r\n OBnumbers[i] = -1 \r\n maxIntTimes[i] = 0*u.d\r\n OBstartTimeNorm = (TK.OBstartTimes[np.array(OBnumbers,dtype=int)].value - tmpCurrentTimeNorm.value)\r\n else:\r\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, mode, tmpCurrentTimeNorm)\r\n maxIntTimes[:] = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife) # Maximum intTime allowed\r\n OBstartTimeNorm = np.zeros(OBnumbers.shape) #np.array([tmpCurrentTimeNorm.value]*len(OBnumbers)).reshape(OBnumbers.shape)\r\n \r\n #finding the minimum possible slew time (either OBstart or when star JUST leaves keepout)\r\n minAllowedSlewTime = np.max([OBstartTimeNorm,minObsTimeNorm],axis=0)\r\n \r\n # 3. start filtering process\r\n good_inds = np.where((OBnumbers >= 0) & (ObsTimeRange > intTimes.value))[0] \r\n # ^ star slew within OB -AND- can finish observing before it goes back into keepout\r\n if good_inds.shape[0] > 0:\r\n #the good ones\r\n sInds = sInds[good_inds]\r\n slewTimes = slewTimes[good_inds]\r\n intTimes = intTimes[good_inds]\r\n OBstartTimeNorm = OBstartTimeNorm[good_inds]\r\n minAllowedSlewTime = minAllowedSlewTime[good_inds]\r\n \r\n #maximum allowed slew time based on integration times\r\n maxAllowedSlewTime = maxIntTimes[good_inds].value - intTimes.value\r\n maxAllowedSlewTime[maxAllowedSlewTime < 0] = -np.Inf \r\n maxAllowedSlewTime += OBstartTimeNorm #calculated rel to currentTime norm\r\n \r\n #checking to see if slewTimes are allowed\r\n good_inds = np.where( (slewTimes.reshape([len(sInds),1]).value > minAllowedSlewTime) & \\\r\n (slewTimes.reshape([len(sInds),1]).value < maxAllowedSlewTime) )[0]\r\n \r\n slewTimes = slewTimes[good_inds]\r\n else:\r\n slewTimes = slewTimes[good_inds]\r\n \r\n return sInds[good_inds], intTimes[good_inds].flatten(), slewTimes",
"def time_param(S):\n # dt\n dt = datetime.strptime(S['Time_step'], S['Time_format']).time()\n if dt.hour != 0 and dt.minute == 0 and dt.second == 0:\n dt = dt.hour\n elif dt.hour == 0 and dt.minute != 0 and dt.second == 0:\n dt = dt.minute / 60\n else:\n print_error('Period_length')\n \n Datetime_format = S['Date_format'] + ' ' + S['Time_format']\n start = S['Period_start'] + ' ' + S['Period_start_time']\n dt_start = datetime.strptime(start, Datetime_format)\n end = S['Period_end'] + ' ' + S['Period_start_time']\n dt_end = datetime.strptime(end, Datetime_format)\n \n # Nbr_of_time_steps\n Nbr_of_time_steps = (((dt_end - dt_start).days + 1) * 24) / dt\n Nbr_of_time_steps_per_day = 24 / dt\n \n # Period index\n if (int(Nbr_of_time_steps) == Nbr_of_time_steps and \n int(Nbr_of_time_steps_per_day) == Nbr_of_time_steps_per_day):\n Periods = list(range(0, int(Nbr_of_time_steps)))\n else:\n print_error('time_step_int')\n \n # Day index\n Days = list(range((dt_end - dt_start).days))\n \n # Hour index\n Hours = list(range(0,24))\n \n # Date of each day\n Day_dates = [dt_end - timedelta(days=i) for i in range(len(Days))]\n\n Time = []\n for t in range(0, int(Nbr_of_time_steps_per_day)):\n Time.append(datetime.strftime(Day_dates[0] + timedelta(hours=t*dt), S['Time_format'])) \n \n return Periods, Nbr_of_time_steps, dt, Day_dates, Time, dt_end, Days, Hours",
"def getInterval(self) -> \"SbTime const &\":\n return _coin.SoTimerSensor_getInterval(self)",
"def _updateTime(self):\n # convert seconds to int and split fraction\n sAdj = 0\n if self.time['f'] != 0: # split float\n sAdj, f = divmod(self.time['f'], 1)\n self.time['f'] = f\n # check for floats in second's vales\n self.time['s'] = self.time['s'] + sAdj\n if self.time['s'] != 0: \n sAdj = 0\n s, f = divmod(self.time['s'], 1)\n if f != 0: # there is a fraction in the seconds\n self.time['f'] = self.time['f'] + f\n # check floats again\n sAdj, fAdj = divmod(self.time['f'], 1) # check if fract is > 1\n if sAdj != 0: # f, s, needs to be adjusted\n self.time['f'] = fAdj\n self.time['s'] = int(s + sAdj) # final s as int\n # s is now and int; split seconds\n mAdj = 0\n if self.time['s'] != 0:\n mAdj, s = divmod(self.time['s'], self.sPerMin)\n if mAdj != 0: # s, m need to be adjusted\n self.time['s'] = s\n self.time['m'] = self.time['m'] + mAdj\n # check minutes\n hAdj = 0\n if self.time['m'] != 0:\n hAdj, m = divmod(self.time['m'], self.mPerHour)\n if hAdj != 0: # m,h need to be adjusted\n self.time['m'] = m\n self.time['h'] = self.time['h'] + hAdj\n # check hours\n dAdj = 0\n if self.time['h'] != 0:\n dAdj, h = divmod(self.time['h'], self.hPerDay)\n if dAdj != 0: # d, h need to be adjusted\n self.time['h'] = h\n self.time['d'] = self.time['d'] + dAdj\n # check days",
"def get_time_interval(self):\r\n time_interval0 = self.getFixDate()[:-1] # get the list without last element.\r\n time_interval1 = self.getFixDate()[1:] # get the list without first element.\r\n time_interval = [y-x for x,y in zip(time_interval0, time_interval1)] # Use two list to get the difference of fix_time.\r\n return time_interval",
"def time_span(self):\n s = self['start'][0]\n e = self['end'].iloc[-1]\n return IntervalSet(s, e)",
"def interval_seconds(self):\n return self._interval_seconds",
"def duration_outside_nwh(\n self,\n starttime: datetime.time = datetime.time(NORMAL_DAY_START_H),\n endtime: datetime.time = datetime.time(NORMAL_DAY_END_H),\n ) -> datetime.timedelta:\n total = datetime.timedelta()\n for interval in self.intervals:\n total += interval.duration_outside_uk_normal_working_hours(\n starttime, endtime\n )\n return total",
"def get_timespan(self):\n # NOTE: Scopes seem to always have a grid of 10x10 divisions,\n # so we just multiply the time/div by 10.\n time_per_div = float(self.port.query(\"TIME_DIV?\"))\n timespan = 10. * time_per_div\n\n # End of get_timespan().\n return timespan",
"def _get_time_weights(self):\n ps_int = self.point_source.integrate(self.Emin, self.Emax)\n bg_int = self.background.integrate(self.Emin, self.Emax)\n tot_flux = ps_int + bg_int\n \n weights = [ps_int / tot_flux,\n bg_int / tot_flux]\n \n Nex_ps = self.N * weights[0]\n Nex_bg = self.N * weights[1]\n\n aeff = self.effective_area.to(u.cm ** 2)\n time = (np.random.exponential(((Nex_bg+Nex_ps) / ((tot_flux) * aeff)).value)*u.s\n ).to(u.yr)\n\n self.Nex_ps = Nex_ps.value\n self.Nex_bg = Nex_bg.value\n\n return time, weights",
"def getBookingIntervalInSeconds(self):\n return self.getBookingInterval() * 60",
"def time_update(self):\r\n self.time = []\r\n t = [0] + self.time_final_all_section()\r\n for i in range(self.number_of_section):\r\n self.time.append((t[i+1] - t[i]) / 2.0 * self.tau[i]\r\n + (t[i+1] + t[i]) / 2.0)\r\n return np.concatenate([i for i in self.time])",
"def intervalOfValidity( counterdict ):\n timetree = counterdict['RichHPDImageSummary/RICH_EventTime']\n min = 1E-6*timetree.attrib('min')\n max = 1E-6*timetree.attrib('max')\n return (ctime(minval),ctime(maxval))",
"def calc_Sww(p, t, z0, h, sw_disprel=False, rho=1024, fs=1.0, window='hanning', nperseg=1024, **kw):\n omega, Spp = welch(p, fs=fs, window=window, nperseg=nperseg, return_onesided=True, **kw)\n omega = 2*np.pi*omega # Radian frequency.\n\n if sw_disprel:\n g = 9.81 # [m2/s2].\n k = omega/np.sqrt(g*h)\n else:\n k = np.array([ksgw(om, h) for om in omega]) # Get k from the linear wave dispersion relation [rad/m].\n\n # Convert pressure spectrum to w spectrum at each depth using linear wave theory.\n cff = (k/rho/omega**2)**2\n tanhkh = np.tanh(k*(z0 + h))**2\n Sww = Spp*cff*tanhkh\n\n omega = omega/(2*np.pi) # Back to linear frequency.\n\n return omega, Sww",
"def __encode_time(self, time_lsw, time_msw):\n\n msw_word_len = self._config.get(time_msw).word_len\n msw_data = self.raw.get(time_msw)\n lsw_data = self.raw.get(time_lsw)\n double_word = ((msw_data << msw_word_len) | lsw_data)\n return double_word",
"def square_integral(self,time0,time1):\n return self.value**2*(time1-time0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Coerce a number of minutes q to the nearest quarter hour.
|
def _get_closest_quarter(q: int):
if q < 8:
closest_quarter = 0
elif 8 <= q < 23:
closest_quarter = 15
elif 23 <= q < 37:
closest_quarter = 30
elif q < 60:
closest_quarter = 45
else:
raise ValueError(f'q must be < 60 in {__name__}')
return closest_quarter
|
[
"def _convert_time(self, quarter_length):\n # TODO(annahuang): Take tempo change into account.\n # Time is in quarter-note counts from the beginning of the score.\n return quarter_length * 60.0 / _DEFAULT_QPM",
"def steps_per_quarter_to_steps_per_second(steps_per_quarter=4, qpm=120):\n return steps_per_quarter * qpm / 60.0",
"def parse_time(q):\n year = q.split('_')[0]\n quarter = q.split('_')[-1]\n if quarter == '1q':\n month = 'January'\n elif quarter == '2q':\n month = 'April'\n elif quarter == '3q':\n month = 'July'\n else:\n month = 'October'\n return parser.parse('1 ' + month + ' ' + year)",
"def seconds_per_quarter(self, val: float):\n if(val is None):\n self._secs_per_q = self.DEFAULT_VAL\n elif(val <= 0):\n raise ValueError(\"Can't have a seconds per quarter value less then or equal to 0!\")\n\n self._secs_per_q = val",
"def seconds_per_quarter(self) -> float:\n return self._secs_per_q",
"def _adjust_time(time, quarter, end_of_quarter, league):\n new_time = re.split(':', time)\n minutes = int(new_time[0])\n seconds = int(new_time[1])\n if minutes is 0 and not end_of_quarter:\n end_of_quarter = True\n elif end_of_quarter and minutes > 1:\n quarter += 1\n end_of_quarter = False\n overall_time = _calc_overall_time(seconds, minutes, quarter, league)\n time_dict = {}\n time_dict['overall_time'] = overall_time\n time_dict['quarter_time'] = time\n time_dict['quarter'] = quarter\n return time_dict, quarter, end_of_quarter",
"def to_seconds(quarter, timestamp):\n seconds_in_quarter = 12 * 60\n total_seconds = seconds_in_quarter * (quarter - 1)\n\n minutes, seconds = timestamp.split(':')\n total_elapsed = total_seconds + seconds_in_quarter - (int(minutes) * 60) - int(seconds)\n return total_elapsed",
"def get_quarter(date):\n return 1+(date.month-1)//3",
"def __mul__(self, k):\n hour = self.hour * k\n minute = self.minute * k\n second = self.second * k\n res = Time(hour, minute, second)\n res.normalize()\n return res",
"def total_time(self):\n return self._convert_time(self._score.duration.quarterLength)",
"def _set_q(self, q):\n q = np.array(q)\n # if abs(np.sum(q**2) - 1.0) > 1e-6:\n # raise ValueError('Quaternion must be normalized so sum(q**2) == 1; use Quaternion.normalize')\n self._q = q\n # Erase internal values of other representations\n self._equatorial = None\n self._T = None",
"def getquarter(self):\n q = self.db['Quarters'].find_one(order_by=['-year','-quarter'])\n return int(q['year']), q['quarter']",
"def adapteHeure():\r\n minutes = int(m.get())\r\n heures = int(h.get())\r\n while minutes < 0:\r\n minutes += 60\r\n heures -= 1\r\n while minutes >= 60:\r\n minutes -= 60\r\n heures += 1\r\n heures += 24\r\n heures %= 24\r\n m.set(minutes)\r\n h.set(heures)",
"def get_minute_ceiling(self):\n\t\tminutes = self.seconds / 60.0 #get approx minute\n\t\tif self.half == 1:\n\t\t\tif minutes > 45.0:\n\t\t\t\tminutes = -1\n\t\t\telse:\n\t\t\t\tminutes = int(self.seconds) / 60\n\t\t\t\t# as long as we're a full second into next minute, round up\n\t\t\t\tif self.seconds - minutes*60 >= 1:\n\t\t\t\t\tminutes += 1 \n\t\t\t\tminutes = float(minutes)\n\t\telif self.half == 2:\n\t\t\tif minutes + 45 > 90.0:\n\t\t\t\tminutes = -2\n\t\t\telse:\n\t\t\t\tminutes = int(self.seconds) / 60\n\t\t\t\t# as long as we're a full second into next minute, round up\n\t\t\t\tif self.seconds - minutes*60 >= 1:\n\t\t\t\t\tminutes += 1\n\t\t\t\t# account for second half\n\t\t\t\tminutes += 45.0 \n\t\telse: # (self.half != 1) or (self.half != 2):\n\t\t\traise Exception(\"This Event has no half associated with it?\")\n\t\treturn minutes",
"def difficulty_of_quarter(credit_amount):\n \n if credit_amount <= 12:\n return 'Your quarter is going to be easy!'\n elif 12 < credit_amount <= 16:\n return 'Your quarter is going to be average.'\n else:\n return 'You are going to have a rough quarter.'",
"def phred33ToQ(quolity):\n\n return ord(quolity) - 33",
"def make_q(q_max, Rmax):\n from sas.sascalc.data_util.nxsunit import Converter\n\n q_min = dq = 0.1 * 2*pi / Rmax\n return np.arange(q_min,\n Converter(q_max[1])(q_max[0],\n units=\"1/A\"),\n dq)",
"def qrot(q, v):\n #TODO can I change this function to also work with constant v and changing quaternions?\n # if not just tile/stack v accordingly\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n if not q.shape[:-1] == v.shape[:-1]:\n q_batch_size = list(q.shape)[1]\n size = int(q_batch_size/BATCH_SIZE)\n v = v.repeat([1, size, 1])\n\n original_shape = list(v.shape)\n q = q.view(-1, 4)\n v = v.view(-1, 3)\n\n qvec = q[:, 1:]\n uv = torch.cross(qvec, v, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)",
"def get_quarter_start_end(quarter, year=None):\n if year is None:\n year = dt.datetime.now().year\n d = dt.date(year, 1+3*(quarter-1), 1)\n return d, d+relativedelta(months=3, days=-1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write sleep states onto row_out from current position to start of curr_triple.
|
def _insert_leading_sleep_states(self, curr_triple, row_out):
curr_posn = self.QS_IN_DAY - self.spaces_left
if curr_posn < curr_triple.start:
triple_to_insert = self.Triple(curr_posn,
curr_triple.start - curr_posn,
self.sleep_state)
row_out = self._insert_to_row_out(triple_to_insert, row_out)
elif curr_posn == curr_triple.start:
pass # insert no leading sleep states
else:
triple_to_insert = self.Triple(curr_posn,
self.QS_IN_DAY - curr_posn,
self.sleep_state)
row_out = self._insert_to_row_out(triple_to_insert, row_out)
if not row_out.count(self.NO_DATA) or \
curr_triple.symbol == self.NO_DATA: # row out is complete
self._write_output(row_out)
row_out = self.output_row[:]
self.spaces_left = self.QS_IN_DAY
if curr_triple.start > 0:
triple_to_insert = self.Triple(0, curr_triple.start,
self.sleep_state)
row_out = self._insert_to_row_out(triple_to_insert, row_out)
return row_out
|
[
"def _write_row(self, row):\n return",
"def write_rows(self, rows):\n for row in rows:\n self.write_row(row)",
"def led(self,pos,flash,quick):\n self.ledAllOff()\n #Get row\n aRow = pos[1]\n #Get column\n aCol = pos[0]\n #Set col to LOW\n if aCol == 'a':\n theLed = self.LED1\n if aCol == 'b':\n theLed = self.LED2\n if aCol == 'c':\n theLed = self.LED3\n if aCol == 'd':\n theLed = self.LED4\n if aCol == 'e':\n theLed = self.LED5\n if aCol == 'f':\n theLed = self.LED6\n if aCol == 'g':\n theLed = self.LED7\n if aCol == 'h':\n theLed = self.LED8\n\n #Set row to HIGH\n if aRow == '1':\n theRow = self.ROW1\n if aRow == '2':\n theRow = self.ROW2\n if aRow == '3':\n theRow = self.ROW3\n if aRow == '4':\n theRow = self.ROW4\n if aRow == '5':\n theRow = self.ROW5\n if aRow == '6':\n theRow = self.ROW6\n if aRow == '7':\n theRow = self.ROW7\n if aRow == '8':\n theRow = self.ROW8\n self.wiringpi.digitalWrite(theRow,self.HIGH)\n self.wiringpi.digitalWrite(theLed,self.LOW)\n if quick == True:\n flashTime = 0.2\n else:\n flashTime = 0.5\n if flash == True:\n sleep(flashTime)\n self.wiringpi.digitalWrite(theLed,self.HIGH)\n sleep(flashTime)\n self.wiringpi.digitalWrite(theLed,self.LOW)\n sleep(flashTime)\n self.wiringpi.digitalWrite(theLed,self.HIGH)",
"def record_history(self, row, col):\n self.last_two.pop(0)\n self.last_two.append((row, col))\n return",
"def assignRowsToTable(self):\n st = self.currentStage\n mem = self.mems[self.currentMem]\n tableIndex = self.tableIndex\n self.startRowDict[mem][tableIndex,self.slRange[0]] = self.rowRange[0]\n for sl in self.slRange:\n self.tablesInBlock[mem][sl][self.table] = 1\n pass\n for r in self.rowRange:\n self.lastBlockOfRow[mem][st][r] = self.slRange[-1]\n self.numberOfRowsDict[mem][tableIndex,self.slRange[0]] += 1\n for sl in self.slRange:\n self.dirty[mem][r,sl] = 1\n pass\n self.numWordsLeft -= 1\n pass\n self.logger.debug(\"Assigned \" + str(len(self.rowRange)) + \" rows from \"\\\n + str(self.rowRange[0]) + \" to \" + str(self.rowRange[-1]) +\\\n \"in blocks \" + str(self.slRange[0]) + \" to \" + str(self.slRange[-1])\\\n + \" of \" + mem)\n self.lastBlockOfTable[mem][st][tableIndex] = self.slRange[-1]\n pass",
"def output_row(output_db, table_name, row):\n row = clean_output_row(row, table_name)\n insert_row(output_db, table_name, row)",
"def write_row(self, row):\n r = [to_text(s) for s in row]\n self.writer.writerow(r)\n self.row_count += 1",
"def save_state(self):\n assert self.file is not None, 'set_up_save() has not been run'\n\n for p in self.particle_list:\n string = ''.join([str(v) + ','\n for v in (self.t_curr, p.id, p.x[0], p.x[1],\n p.v[0], p.v[1], p.P,\n p.rho, p.bound)]) + '\\n'\n self.file.write(string)",
"def write_strip(self, leds, start=0, end=None):\n # take the first color of the first \n self.set_color(leds[0])\n self.pixels[0:] = self.translate_brightness(leds)",
"def travel_row():\n while front_is_clear():\n move()",
"def pen_up_down(self):\n new_ni, degree = self._available_nodes\n degree = np.asarray(degree, dtype=np.float32)\n logwts = self.exp_wt_start * np.log(1/degree)\n logwts = logwts - logsumexp(logwts)\n wts = np.exp(logwts)\n rindx = np.random.choice(len(wts), p=wts)\n stroke = WalkerStroke(new_ni[rindx])\n self.list_ws.append(stroke)\n if not self.complete:\n self.pen_simple_step()",
"def savePressed(self):\n\t\t\n\t\t# identify idle and pressed bit\n\t\tprint(\"Don't press the startpad to identify the idle state\")\n\t\tself.serial.write(b'A')\n\t\ts = self.serial.readline().strip().decode('UTF-8')\n\t\tidlebit = s\n\t\tprint(\"touchpad idle bit = \" + s)\n\t\tif(idlebit==\"0\"):\n\t\t\tpressbit = \"1\"\n\t\telse:\n\t\t\tpressbit = \"0\"\n\n\t\tprint(\"Identifying Startpad Idle State is done!\")\n\n\n\t\t# monitor and save the press \n\t\ttimefields = ['pressed#', 'timestamp'] # header of the .csv storing timestamp file\n\t\twith open(self.TimestampFile, 'w', newline = '') as csvfile:\n\t\t\tfwriter = csv.writer(csvfile)\n\t\t\tfwriter.writerow(['all timestamp based on same time 0'])\n\t\t\tfwriter.writerow(timefields) # write the head of timestamp csv file\n\t\t\t\n\t\t\tpressi = 0\n\t\t\tprebit = \"\"\n\t\t\twhile self.started:\n\t\t\t\tself.serial.write(b'A')\n\t\t\t\ts = self.serial.readline().strip().decode('UTF-8')\n\t\t\t\tpressedtime = time.time() - t_start\n\t\t\t\tif(prebit == idlebit and s == pressbit):\n\t\t\t\t\tpressi += 1\n\t\t\t\t\tfwriter.writerow([str(pressi), pressedtime])\n\t\t\t\tprebit = s",
"def _row_swap(self, a, b):\n temp = self.x[a].copy()\n self.x[a] = self.x[b].copy()\n self.x[b] = temp\n\n temp = self.z[a].copy()\n self.z[a] = self.z[b].copy()\n self.z[b] = temp\n\n temp = self.r[a].copy()\n self.r[a] = self.r[b].copy()\n self.r[b] = temp",
"def move_sleepMem(self, window_size):\r\n to_sleep = np.where((self.t - np.array([d['t'] for d in self.model])) > window_size)[0]\r\n if len(to_sleep)>0:\r\n self.sleep_mem += list(self.model[i] for i in to_sleep)\r\n for i in reversed(to_sleep):\r\n self.model.pop(i)",
"def idle(self):\r\n self.schedule = []\r\n self.blockList = []\r\n print(\"System is now entering sleep mode\")",
"def write_final_table(out, chrm, snp_tab, obs_tab):\n for pos, ref, alt in snp_tab:\n # Skip this entry if it has not been observed\n if any([pos in obs for obs in obs_tab]):\n out.write('%s\\t%s\\t%s\\t%s' % (chrm, pos, ref, alt))\n for obs in obs_tab:\n out.write('\\t%s' % obs[pos])\n out.write('\\n')\n return",
"def flush(self):\n inp = np.array(self.inp)\n self.inp = []\n out = self.model(inp)\n for i,o in enumerate(out):\n self.out[0][i] = o\n self.out[1].set()\n self.out_reset()",
"def joint_states_cb(self, data):\n if self.start_recording:\n #self.last_joint_states_data = data\n #self.current_rosbag.write(DEFAULT_JOINT_STATES, data)\n self.joint_states_accumulator.append(data)\n #self.time_accumulator.append(rospy.Time.now())",
"def output_thread(out_q, params):\n none_count = 0\n X = []\n Y = []\n while True:\n res = out_q.get()\n if res is None:\n none_count += 1\n else:\n X.append(res[0])\n Y.append(res[1])\n if none_count == params['n_threads']:\n break\n X = np.array(X)\n Y = np.array(Y)\n\n ones = np.sum(Y)\n zeros = np.size(Y) - ones\n total = ones + zeros\n \n print(\"P-phases (zeros):\", zeros, \"(\", 100*zeros/total, \"%)\")\n print(\"S-phases (ones):\", ones, \"(\", 100*ones/total, \"%)\")\n\n np.save(params[\"training_dset_X\"], X)\n np.save(params[\"training_dset_Y\"], Y)\n\n print(\"Saved the synthetic training dataset.\")\n\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Obtain, from a time string, its starting position in a line of output.
|
def _get_start_posn(self, time_str):
if time_str:
m = re.search(self.re_hr_min_time, time_str)
assert bool(m)
return (int(m.group(1)) * 4 + # 4 output chars per hour
int(m.group(2)) // 15) % self.QS_IN_DAY
return 0
|
[
"def _look_for_timestamp_and_index_of_last_timestamp_char(self, a_line):\n\n last_char_timestamp_index = a_line.find(\"]\")\n string_timestamp = a_line[a_line.find(\"[\") + 1:last_char_timestamp_index]\n\n return datetime.datetime.strptime(string_timestamp, _DATE_FORMAT), \\\n last_char_timestamp_index",
"def test_elapsed_at(line):\n return TESTCASE_TIME_RE.match(line)",
"def find_time_start_string_in_line(self, line, full_file_name):\n datetime_object_start_time = datetime.strptime(self.time_start, DATETIME_FORMAT)\n try:\n datetime_str_in_line = line.split(',')[0]\n datetime_object_in_line = datetime.strptime(datetime_str_in_line, DATETIME_FORMAT)\n\n # True if timestamp in line is between start time and start time + min\n # example: '2018-07-25 03:11:35' <= '2018-07-25 03:15:35' <= '2018-07-25 03:16:35'\n if datetime_object_start_time <= datetime_object_in_line <= \\\n datetime_object_start_time + timedelta(minutes=MINUTES_INTERVAL):\n logger.info(\"Found start time string within defined range of %s minutes: %s in file: %s Beginning to search for event string: %s\"\n % (MINUTES_INTERVAL, datetime_str_in_line, os.path.basename(full_file_name), self.event_string_to_find))\n return True\n return False\n\n except:\n return False",
"def time(line, xmlFile):\n time = re.match(\"(.*?)(\\d+:\\d\\d[pm|am]+)\", line)\n if time is None:\n pass\n else:\n other, timeFinal = time.groups()\n print(\"\\t<qTime>\" + timeFinal + \"</qTime>\", file = xmlFile)",
"def _parse_start(self, item):\n date_line = \" \".join(item.split(\"\\n\")[1:])\n date_match = re.search(r\"[A-Z][a-z]{2,8} \\d{1,2}\", date_line)\n if not date_match:\n return\n year_str = str(datetime.now().year)\n year_match = re.search(r\"20\\d{2}\", date_line)\n if year_match:\n year_str = year_match.group()\n\n date_str = \" \".join([date_match.group().replace(\",\", \"\"), year_str])\n time_match = re.search(r\"\\d{1,2}:\\d{2} ?[apm\\.]{2,4}\", item, flags=re.I)\n time_str = \"12:00am\"\n if time_match:\n time_str = re.sub(r\"[ \\.]\", \"\", time_match.group())\n try:\n return datetime.strptime(\" \".join([date_str, time_str]), \"%B %d %Y %I:%M%p\")\n except ValueError:\n return",
"def findLogOffsetForTimestamp(sLogContent, tsTimestamp, offStart = 0, fAfter = False):\n # Turn tsTimestamp into a string compatible with what we expect to find in the log.\n oTsZulu = db.dbTimestampToZuluDatetime(tsTimestamp);\n sWantedTs = oTsZulu.strftime('%H:%M:%S.%f');\n assert len(sWantedTs) == 15;\n\n # Now loop thru the string, line by line.\n offRet = offStart;\n off = offStart;\n while True:\n sThisTs = sLogContent[off : off + 15];\n if len(sThisTs) >= 15 \\\n and sThisTs[2] == ':' \\\n and sThisTs[5] == ':' \\\n and sThisTs[8] == '.' \\\n and sThisTs[14] in '0123456789':\n if sThisTs < sWantedTs:\n offRet = off;\n elif sThisTs == sWantedTs:\n if not fAfter:\n return off;\n offRet = off;\n else:\n if fAfter:\n offRet = off;\n break;\n\n # next line.\n off = sLogContent.find('\\n', off);\n if off < 0:\n if fAfter:\n offRet = len(sLogContent);\n break;\n off += 1;\n\n return offRet;",
"def find_time(string_list):\n x = r\"\\b[1-9][0-2]?:[0-5][0-9]\\s[ap]m\" \n s_list = []\n for lines in string_list:\n strings = re.findall(x, lines)\n for index in strings:\n s_list.append(index)\n\n return s_list",
"def _parse_start(self, date_str):\n return datetime.strptime(date_str.replace(\".\", \"\"), \"%B %d, %Y %H:%M %p\")",
"def time_start(self, section):\r\n if (section == 0):\r\n return self.t0\r\n else:\r\n time_start_index = range(-self.number_of_section - 1, 0)\r\n return self.p[time_start_index[section]] * self.unit_time",
"def get_time(str_time):\n return str_time.split(':')",
"def get_position(position, entry):\n\n return entry.split(' ')[position]",
"def time_install(some_str):\r\n time_final = ''\r\n time = re.findall(r\".*:time([\\S\\s]*):track\", some_str)\r\n if time:\r\n time_objects = re.findall(r\":Name \\((.*)\\)\\n.*:Table \\((.*)\\)\", ''.join(time))\r\n if time_objects:\r\n time_buf = StringIO()\r\n for time_item in time_objects:\r\n time_name = time_item[0]\r\n time_buf.write(time_name + '\\n')\r\n time_final = re.findall(r'([\\S\\s]*)\\n', time_buf.getvalue())\r\n\r\n return time_final",
"def get_monitor_start_time():\n \n # read the 8th of December data as a list of strings\n# f = open('../data_p_beam/2_second/20171208.csv')\n# lines = f.readlines()\n# f.close()\n \n # !!! temporarily changing this to a run closer to the start of where\n # proper data was first collected\n filename = 'T071217_0001.txt'\n f = open('../data_ucn/monitor_detector/' + filename)\n lines = f.readlines()\n f.close()\n \n date_time = filename[1:3].zfill(2) + \\\n '.12.2017 ' + \\\n lines[26][15:23]\n\n pattern = '%d.%m.%Y %H:%M:%S'\n start_time = int(time.mktime(time.strptime(date_time, pattern)))\n \n return start_time",
"def extract_timestring(self, header):\n if type(header) != str:\n raise TypeError\n\n header = cleanup_text(header)\n timestring = None\n\n split_by_semicolon = header.split(';')\n split_by_newline = header.split('\\n')\n split_by_id = re.split('\\s+id\\s+[^\\s]*\\s+', header)\n\n if len(split_by_semicolon) > 1:\n timestring = split_by_semicolon[-1]\n elif len(split_by_semicolon) == 1:\n if len(split_by_newline) > 1:\n # find it on the last line\n timestring = split_by_newline[-1]\n elif len(split_by_id) > 1:\n # find it after` id abc.xyz `\n timestring = split_by_id[-1]\n\n if timestring is None:\n return None\n\n timestring = cleanup_text(timestring)\n timestring = cleanup_text(self.remove_details(timestring))\n timestring = self.strip_timezone_name(timestring)\n timestring = re.sub('-0000', '+0000', timestring)\n\n return timestring",
"def parse( s ):\n sec, nsec = TimeFormat.parseTime( s )\n return Time( sec, nsec )",
"def split_timestamp(line):\n LENGTH = 26\n FORMAT = \"%Y-%m-%d %H:%M:%S.%f\"\n t = line[:LENGTH]\n return (datetime_to_seconds_since_epoch(datetime.datetime.strptime(t, FORMAT)),\n line[LENGTH + 1:])",
"def _get_timestamp(self):\n tmp = re.findall(\"\\nTimestamp:\\s+([0-9]+)\", self.msg)\n if not len(tmp) == 1:\n logger.error(tmp)\n logger.error(self.msg)\n raise Exception(\"Found \\\"Timestamp\\\" != 1 times!\")\n return(int(tmp[0]))",
"def _make_analysis_offsets(text):\n return tuple(timedelta(hours=int(i)) for i in text.split(\" \"))",
"def findTimeTagPos(self, varName):\n for i in xrange(1, len(varName)):\n if varName[-i] == \"_\":\n return i"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sync exploring ml tool to s3
|
def sync_s3():
subprocess.run(["aws", "s3", "sync", "./", "s3://{0}".format(BLOG_BUCKET_NAME)])
|
[
"def syncToS3(self):\n\t\t#return_code = subprocess.call(\"echo Hello World\", shell=True)\n\t\treturn_code = subprocess.call(\"aws s3 ls s3://prometheus-bucket-raspberry-pi\",shell=True)\n\t\treturn_code1 = subprocess.call(\"aws s3 sync /mnt/usb s3://prometheus-bucket-raspberry-pi\",shell=True)\n\t\treturn_code2 = subprocess.call(\"aws s3 ls s3://prometheus-bucket-raspberry-pi\",shell=True)\n\t\tprint(\"Return Code\", return_code,return_code1,return_code2)",
"def _s3_download(self):\n print(\"Info : Starting to download from s3 %s ...\" %\n (self._data_requirement_file[\"src\"]))\n try:\n subprocess.check_call(['aws', 's3', 'sync', '--no-sign-request',\n self._data_requirement_file[\"src\"], self._dst_path])\n except FileNotFoundError:\n print(\"Error: aws does not appear to be installed\")\n raise",
"def _deploy_to_s3():\n s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt put gzip/ %s'\n s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude \"*\" --include-from gzip_types.txt put gzip/ %s'\n\n for bucket in env.s3_buckets:\n env.s3_bucket = bucket\n local(s3cmd % ('s3://%(s3_bucket)s/' % env))\n local(s3cmd_gzip % ('s3://%(s3_bucket)s/' % env))",
"def store_results(self):\n comp_data_path = os.path.join(self.artifacts_dir, \"comp_data\")\n if os.path.exists(comp_data_path):\n shutil.rmtree(comp_data_path)\n\n run_process(\"aws s3 cp {} s3://{}/{} --recursive\".format(self.artifacts_dir, S3_BUCKET,\n self.current_run_name))",
"def test_s3(self):\n s3_key = \"big_file_tests/PreDiag_000287_000128.092\"\n\n result = self.runner.invoke(\n cli,\n [\n \"--output-status-json\",\n self.status_json_path,\n \"structure\",\n \"--output-dir\",\n self.output_dir,\n \"--automatic\",\n \"--protocol-parameters-dir\",\n PROTOCOL_PARAMETERS_DIR,\n \"--no-raw\",\n \"--s3-bucket\",\n \"beep-sync-test-stage\",\n \"--s3-use-cache\",\n s3_key,\n ],\n catch_exceptions=False\n )\n self.assertEqual(result.exit_code, 0)\n self.assertIsNotNone(result.output)",
"def test_save_to_s3(self):\n\n # Set up mock S3 - make sure the bucket exists\n conn = boto.connect_s3()\n conn.create_bucket(\"mybucket\")\n\n with storelet.ZipBackup(\"test\") as b:\n b.save_to_s3(\"mybucket\", \"myaccesskey\", \"mysecret\")\n\n # There should be one result and it should be the one we expect\n for k in conn.get_bucket(\"mybucket\").list():\n expected_name = 'test_%s.zip' % datetime.now().strftime(\"%Y%m%d%H%M%S\")\n self.assertEqual(k.name, expected_name)",
"def sync_s3_storage_controller(self, request):\n try:\n logging.info(f\"Sync S3 storage with Label Studio project\")\n sync_storage_url = f\"{self.label_studio_config.get('s3_storage')}/{request.storage_id}/sync\"\n response, status_code = APIInterface.post(\n route=sync_storage_url, headers=self.header\n )\n return response\n except Exception as error:\n logging.error(f\"Error in sync_s3_storage_controller: {error}\")\n raise error",
"def download_from_s3():\n s3 = boto3.resource(\n service_name='s3',\n region_name=os.environ[\"AWS_DEFAULT_REGION\"],\n aws_access_key_id=os.environ[\"AWS_ACCESS_KEY_ID\"],\n aws_secret_access_key=os.environ[\"AWS_SECRET_ACCESS_KEY\"]\n )\n s3.Bucket('rossmann-mynt').download_file(Key='models/model.joblib', Filename='../model/model.joblib')",
"def move_files_to_s3():\n if settings.S3_USE:\n for filename in os.listdir(settings.STORAGE_ROOT_DIR):\n local_filepath = os.path.join(settings.STORAGE_ROOT_DIR, filename)\n try:\n transfer_obj = Transfer.objects.get(machine_file_path=local_filepath)\n except Transfer.DoesNotExist:\n print(f\"Could not find a database object for {local_filepath}\")\n continue\n update_bag_info(local_filepath, {\"Origin\": \"aurora\"})\n tar_filename = f\"{filename}.tar.gz\"\n local_tarpath = os.path.join(settings.STORAGE_ROOT_DIR, tar_filename)\n make_tarfile(local_filepath, local_tarpath)\n s3_client = boto3.client(\n 's3',\n aws_access_key_id=settings.S3_ACCESS_KEY,\n aws_secret_access_key=settings.S3_SECRET_KEY,\n region_name=settings.S3_REGION)\n s3_client.upload_file(local_tarpath, settings.STORAGE_BUCKET, tar_filename)\n transfer_obj.machine_file_path = tar_filename\n transfer_obj.save()\n remove_file_or_dir(local_tarpath)\n remove_file_or_dir(local_filepath)\n print(f\"{local_filepath} tarballed and uploaded to S3 as {tar_filename}\")",
"def s3bucket(ec2, env, source):\n\tmime_types = {\n\t\t\"eot\" : \"application/vnd.ms-fontobject\",\n\t\t\"ttf\" : \"font/truetype\",\n\t\t\"otf\" : \"font/opentype\",\n\t\t\"woff\": \"font/woff\",\n\t}\n\ts3b = boto.connect_s3(ec2.access_key,ec2.secret_key)\n\tfor machine in env:\n\t\tif 's3bucket' in machine.keys():\n\t\t\tprint 'Copying static media for %s' % machine['name']\n\t\t\ts3bucket = machine['s3bucket']\n\n\t\t\t# Get the expires\n\t\t\ttime_format = '%a, %d %b %Y %H:%M:%S'\n\t\t\tnow = datetime.datetime.now().strftime(time_format)\n\t\t\texpires = s3bucket.get('expires',datetime.datetime.utcnow().strftime(time_format))\n\t\t\ttry:\n\t\t\t\tdatetime.datetime.strptime(expires,time_format)\n\t\t\texcept:\n\t\t\t\terror('Improperly formatted datetime: %s' % expires)\n\n\t\t\t# Get or create bucket using the name\n\t\t\tname = s3bucket.get('name','s3%s'%machine['name'])\n\t\t\ttry: b = s3b.get_bucket(name)\n\t\t\texcept: b = s3b.create_bucket(name)\n\t\t\t\n\t\t\t# Set ACL Public for all items in the bucket\n\t\t\tb.set_acl('public-read')\n\n\t\t\tk = Key(b)\n\t\t\tstatic_dir = os.path.join(source,'project','static')\n\t\t\tfor root, dirs, files in os.walk(static_dir):\n\t\t\t\tif '.svn' in dirs: dirs.remove('.svn')\n\t\t\t\tkey_root = root.split('static')[1]\n\n\t\t\t\tfor file in files:\n\t\t\t\t\tfilename = os.path.join(root,file)\n\n\t\t\t\t\t# Set the headers\n\t\t\t\t\theaders = {'Expires':expires}\n\t\t\t\t\tif '.gz' in file:\n\t\t\t\t\t\theaders.update({'Content-Encoding':'gzip'})\n\n\t\t\t\t\tif os.path.isfile(filename):\n\t\t\t\t\t\t# Set the mime-type\n\t\t\t\t\t\text = file.split('.')[-1]\n\t\t\t\t\t\tif ext in mime_types.keys():\n\t\t\t\t\t\t\tk.content_type = mime_types[ext]\n\n\t\t\t\t\t\t# Send the file\n\t\t\t\t\t\tk.key = os.path.join(key_root,file)\n\t\t\t\t\t\tprint '\\nTransfering %s' % filename\n\t\t\t\t\t\tk.set_contents_from_filename(filename, headers=headers, cb=s3_percent_cb, num_cb=10)\n\t\t\tprint '\\nTransfer complete'\n\n\tinvalidate_cache(ec2, env, source)",
"def sync(pathname, bucketname):\n bucket_manager.sync(pathname, bucketname)\n print(bucket_manager.get_bucket_url(bucket_manager.s3.Bucket(bucketname)))",
"def deploy_assets_to_s3():\r\n# run('s3cmd del --recursive s3://%(s3_bucket)s/%(application)s/%(admin_media_prefix)s/' % env)\r\n# run('s3cmd -P --guess-mime-type sync %(venv_path)s/src/django/django/contrib/admin/media/ s3://%(s3_bucket)s/%(application)s/%(site_media_prefix)s/' % env)\r\n# run('s3cmd del --recursive s3://%(s3_bucket)s/%(application)s/%(newsapps_media_prefix)s/' % env)\r\n# run('s3cmd -P --guess-mime-type sync %(venv_path)s/src/newsapps/newsapps/na_media/ s3://%(s3_bucket)s/%(application)s/%(newsapps_media_prefix)s/' % env)\r\n pass",
"def syncToS3(self):\n\n if self.oCmdOptions.bInvalidCFOnly:\n return\n\n # Get all the build files\n aBuildFiles = getCwdFiles()\n # prettyPrint(aBuildFiles)\n\n # Get all files and sizes from S3\n sPrefix = 'deployments/%s/%s' % (self.oCmdOptions.sProduct, self.oCmdOptions.sDeployment)\n aS3FileInfo = self.getS3Files(self.S3_BUCKET, sPrefix)\n # prettyPrint(aS3FileInfo)\n\n # Get the list of new build files and old S3 files\n aNewBuildFiles, aOldS3Files = self.compareFiles(aBuildFiles, aS3FileInfo)\n # prettyPrint(aNewBuildFiles)\n # prettyPrint(aOldS3Files)\n\n # Avoid removing files that are part of older versions\n if self.oCmdOptions.iVersions and int(self.oCmdOptions.iVersions) > 0:\n aOldS3Files = self.maintainVersions(aS3FileInfo, aOldS3Files, self.oCmdOptions.iVersions,\n self.S3_BUCKET, sPrefix)\n\n # Transfer the new files\n self.transferFiles(self.S3_BUCKET, sPrefix, aNewBuildFiles)\n\n # Remove any old files\n self.removeS3Files(self.S3_BUCKET, sPrefix, aOldS3Files)",
"def s3cmd_path():\n return 's3cmd'",
"def To_S3(all_urls):\n files_list =[]\n client = boto3.client(\n 's3',\n aws_access_key_id='A******************N',\n aws_secret_access_key='y****************************************')\n \n try:\n for obj in client.list_objects_v2(Bucket= 'dataelection')['Contents']:\n FileName = obj['Key']\n files_list.append(FileName)\n except:\n pass \n now = datetime.datetime.now()\n current_year=now.year\n #current_year='2002'\n if len(files_list)==0:\n\n for yr_link in all_urls:\n req = urllib2.urlopen(yr_link)\n zip_file = zipfile.ZipFile(BytesIO(req.read()))\n zip_file.namelist()\n zip_file.extractall('files_to_upload') \n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n if 'files_to_upload/itcont.txt'in glob.glob(\"files_to_upload/*.txt\"):\n #os.rename('files_to_upload/itcont.txt','files_to_upload/itcont'+timestr+'.txt')\n os.rename('files_to_upload/itcont.txt','files_to_upload/itcont'+timestr+'.txt')\n files_list = os.listdir('files_to_upload')\n for file in files_list:\n if file.endswith(\".txt\"):\n print(os.path.join('files_to_upload', file))\n client.upload_file(os.path.join('files_to_upload', file),'dataelection',file)\n os.remove(os.path.join('files_to_upload', file))\n else:\n for url in all_urls:\n if current_year in url:\n req = urllib2.urlopen(url)\n zip_file = zipfile.ZipFile(BytesIO(req.read()))\n files_to_check=zip_file.namelist()\n files_to_extract=list(set(files_to_check)-set(files_list))\n if files_to_extract:\n for cnt in range(0,len(files_to_extract)):\n zip_file.extract(files_to_extract[cnt],'files_to_upload')\n if 'files_to_upload/itcont.txt'in glob.glob(\"files_to_upload/*.txt\"):\n #os.rename('files_to_upload/itcont.txt','files_to_upload/itcont'+timestr+'.txt')\n os.rename('files_to_upload/itcont.txt','files_to_upload/itcont'+timestr+'.txt')\n files_list = os.listdir('files_to_upload')\n for file in files_list:\n if file.endswith(\".txt\"):\n print(os.path.join('files_to_upload', file))\n client.upload_file(os.path.join('files_to_upload', file),'dataelection',file)\n os.remove(os.path.join('files_to_upload', file))",
"def run(date, out, dryrun):\n for feed_number in [1, 2, 11, 16, 21, 26, 31, 36]:\n # https://stackoverflow.com/q/48358992/1993206\n commands = [\"aws\", \"s3\", \"sync\", \"s3://mta-gtfs-{0}\".format(feed_number), \".\", \"--exclude\", '*',\n \"--include\", '*{0}*'.format(date)]\n if dryrun:\n commands.append(\"--dryrun\")\n\n subprocess.run(commands)\n\n try:\n os.mkdir(out)\n except FileExistsError:\n pass\n try:\n os.mkdir(\"{0}/mta-gtfs-{1}\".format(out, feed_number))\n except FileExistsError:\n pass\n try:\n os.mkdir(\"{0}/mta-gtfs-{1}/{2}\".format(out, feed_number, date))\n except FileExistsError:\n pass\n\n for f in [f for f in os.listdir(\".\") if 'pb' in f]:\n shutil.move(f, \"{0}/mta-gtfs-{1}/{2}/{3}\".format(out, feed_number, date, f))",
"def main():\n \n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = f\"s3a://{OUTPUT_BUCKET}/\"\n \n song_df = read_song_data(spark, input_data)\n process_song_data(spark, song_df, input_data, output_data) \n process_log_data(spark, song_df, input_data, output_data)\n spark.stop()",
"def connect_s3(self):\n self.out('- Connecting to S3 and making bucket.\\n')\n self.s3 = boto.connect_s3()\n self.bucket = self.s3.create_bucket(self.bucket_name)\n self.bucket = self.s3.get_bucket(self.bucket_name)\n self.bucket.set_acl(self.default_acl)\n self.bucket.set_cors(self.default_cors)",
"def _set_s3(self):\n logger.info(\"Setting up s3 ...\")\n\n cluster_name_id = AXClusterId().get_cluster_name_id()\n\n self._bucket_name = AXClusterDataPath(cluster_name_id).bucket()\n self._bucket = Cloud().get_bucket(self._bucket_name)\n artifact_prefix = AXClusterDataPath(cluster_name_id).artifact()\n self._log_s3_prefix = artifact_prefix\n\n self._bucket_ax_is_external = AXLogPath(cluster_name_id).is_external()\n self._bucket_name_ax = AXLogPath(cluster_name_id).bucket()\n self._bucket_ax = Cloud().get_bucket(self._bucket_name_ax)\n artifact_prefix_ax = AXLogPath(cluster_name_id).artifact()\n\n self._log_s3_prefix_ax = artifact_prefix_ax\n\n assert self._bucket.exists(), \"S3 bucket {} DOES NOT exist\".format(self._bucket_name)\n assert self._bucket_ax.exists(), \"S3 bucket {} DOES NOT exist\".format(self._bucket_name_ax)\n logger.info(\"Using S3 bucket %s, with log prefix %s\", self._bucket.get_bucket_name(), self._log_s3_prefix)\n logger.info(\"Using S3 bucket %s, with log prefix %s for AX\", self._bucket_ax.get_bucket_name(), self._log_s3_prefix_ax)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
reads the actions from all yml files given in `filenames` and returns the objects specified in it
|
def get_action_objects(filenames):
all_actions = {'repeat every': RepeatEvery, 'fix wifi': WifiFixer, 'switch tabs': TabSwitcher}
if type(filenames) == str:
actions = get_actions(filenames, all_actions)
else:
actions = []
for filename in filenames:
actions += get_actions(filename, all_actions)
# create all objects
all_objs = [obj(content) for obj, content in actions]
return all_objs
|
[
"def read(self, filenames):\r\n if isinstance(filenames, str):\r\n filenames = [filenames]\r\n read_ok = []\r\n for filename in filenames:\r\n try:\r\n with open(filename) as fp:\r\n self._read(fp, filename)\r\n except IOError:\r\n continue\r\n read_ok.append(filename)\r\n return read_ok",
"def load_scenes(filenames):\n scenes = []\n for fn in filenames:\n with open(fn, 'r') as fid:\n scene = json.load(fid)\n scenes.append(scene)\n return scenes",
"def load_list(self,filenames):\n\t\treturn self.loadList(filenames)",
"def filenames_to_objects(filenames: List[str]):\n vm_files: List[VMFile] = []\n for file in filenames:\n vm_files.append(VMFile(file))\n\n # Read all File Contents\n for obj in vm_files:\n obj.read_file()\n\n return vm_files",
"def get_folder_actions(folder):\n import ConfigParser\n config = ConfigParser.SafeConfigParser()\n filename = os.path.join(folder, CONF_FILENAME)\n print 'reading:', filename\n config.read(filename)\n\n actions = []\n for section in config.sections():\n action = Action()\n action.name = section\n items = dict(config.items(section))\n\n if 'exec' in items:\n action.command = items['exec']\n if 'name' in items:\n action.name = items['name']\n if 'icon' in items:\n action.icon = items['icon']\n if 'comment' in items:\n action.comment = items['comment']\n\n actions.append(action)\n return actions",
"def collect_yaml(paths):\n # Find all paths\n file_paths = []\n for path in paths:\n if os.path.exists(path):\n if os.path.isdir(path):\n try:\n file_paths.extend(sorted([\n os.path.join(path, p)\n for p in os.listdir(path)\n if os.path.splitext(p)[1].lower() in ('.yaml', '.yml')\n ]))\n except OSError:\n # Ignore permission errors\n pass\n else:\n file_paths.append(path)\n\n configs = []\n\n # Parse yaml files\n for path in file_paths:\n try:\n with open(path) as f:\n data = yaml.safe_load(f.read()) or {}\n configs.append(data)\n except (OSError, IOError):\n # Ignore permission errors\n pass\n\n return configs",
"def read_actions(self, actions_file):\n file_extension = os.path.splitext(actions_file)[1]\n if file_extension:\n file_extension = file_extension[1:]\n try:\n reader_type = InputFileType[file_extension]\n reader = self._action_reader_factory.build_for_type(reader_type)\n return reader.read(actions_file)\n except KeyError:\n print(\"type not supported\")\n return None",
"def loadFiles(self, filenames):\n loadFiles(filenames, self.cache)",
"def read_args(self,filename,varnames):\n for name in varnames:\n self.args[name]=ebf.read(filename,'/'+name)",
"def import_kivy_rule(files):\n if Builder:\n if isinstance(files, str):\n files = (files,)\n for file in files:\n if os.path.basename(file) in (os.path.os.path.basename(f) for f in Builder.files):\n continue\n Builder.load_file(file)",
"def source_artifact_list(self, filenames):\n\n raise NotImplementedError",
"def config_armies(filename: str) -> None:\n game = Game()\n reader = Reader()\n armies = reader.read(filename)\n game.start_step = reader.start_from_step\n for army in armies:\n game.add_army(army)\n game.start()",
"def get_movie_data(files: list) -> list:\n movie_files = []\n for file in files:\n with open(file) as mf:\n data = json.load(mf)\n movie_files.append(data)\n return movie_files",
"def tiles_from_files(filenames, labels=None):\n if labels is not None:\n assert len(filenames) == len(labels)\n\n for idx, filen in enumerate(filenames):\n lbl = None if labels is None else labels[idx]\n try:\n tile = pv3.Image(filen)\n except AttributeError:\n print(\"Warning: Unable to load {}\".format(filen))\n tile = None\n yield (str(idx), tile, lbl)",
"def _read_files(self) -> None:\n for file in self.files:\n with open(file, 'r') as f:\n serialized = json.load(f)\n self.obj['avsc'].append(serialized)",
"def read(*filenames):\n camps_data = []\n variables_dict = {}\n for filename in filenames:\n nc = Dataset(filename, mode='r', format=\"NETCDF4\")\n variables_dict = nc.variables\n\n # Separates netCDF Variables into the metadata variables\n # and the predictor variables.\n procedures, variables_dict = separate_procedure_and_data(\n variables_dict)\n times, variables_dict = separate_time_and_data(variables_dict)\n coordinates, variables_dict = separate_coordinate_and_data(\n variables_dict)\n nc.close()\n # Initializes the Camps_data objects\n for varname,vardata in variables_dict.iteritems():\n logging.info(\"Reading \"+str(varname))\n w_obj = read_var(filename, varname)\n camps_data.append(w_obj)\n\n return camps_data",
"def load_files(self, file_list):\n self.filenames = file_list[:self.max_no_files]\n self.update_actions()",
"def process(self, files):\n self.track_versions(files)\n astrodriz_params = [\"-n\", \"1\"]\n assoc = self.assoc_files(files)\n if assoc:\n self.run_stage1(*assoc)\n if self.stage2:\n args = astrodriz_params + assoc\n self.run_stage2(*args)\n return\n unassoc = self.unassoc_files(files)\n if unassoc:\n self.run_stage1(*unassoc)\n if self.stage2:\n args = astrodriz_params + unassoc\n self.run_stage2(*args)\n return",
"def getFiles(fileNames):\n listBeatboxers = list()\n for fileName in fileNames:\n with open(fileName) as f:\n listBeatboxers.extend(f.readlines())\n return listBeatboxers"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
given a time unit, returns the current time in the specified unit
|
def get_current_time(unit):
now = datetime.now()
time = {"days": now.day, "hours": now.hour, "minutes": now.minute, "seconds": now.second}
return time[unit]
|
[
"def current_time_ns():\n return int(time.time() * (10 ** 9))",
"def localTime():\n\treturn convertTime(time.time())",
"def time_start(self, section):\r\n if (section == 0):\r\n return self.t0\r\n else:\r\n time_start_index = range(-self.number_of_section - 1, 0)\r\n return self.p[time_start_index[section]] * self.unit_time",
"def get_utc_time():\n current_utc_time = datetime.now().strftime(\"%H:%M\")\n return current_utc_time",
"def getCurrentTime():\n\n time = datetime.datetime.now().time().strftime('%I:%M %p')\n return time",
"def quota_time_unit(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"quota_time_unit\")",
"def get_current_time():\r\n return datetime.now().strftime(\"%B %d, %Y %H:%M\")",
"def current_time(self):\n\n method = \"global.getCurrentTime\"\n r = self.request(method=method)\n if r['result'] is False:\n raise RequestError(str(r))\n\n return r['params']['time']",
"def interval(self):\n return str(self.time_interval) + self.time_unit",
"def get_moscow_time():\n tz = pytz.timezone(\"Europe/Moscow\")\n moscow_time = datetime.now(tz)\n moscow_time = moscow_time.strftime(\"%H:%M\")\n return moscow_time",
"def _stringify_time_unit(value: int, unit: str) -> str:\n if unit == \"seconds\" and value == 0:\n return \"0 seconds\"\n if value == 1:\n return f\"{value} {unit[:-1]}\"\n if value == 0:\n return f\"less than a {unit[:-1]}\"\n return f\"{value} {unit}\"",
"def getCurrentTime():\n now = datetime.datetime.now()\n return '{}-{}-{}'.format(now.year, now.month, now.day), '{}:{}:{}'.format(now.hour, now.minute, now.second)",
"def time(self, t=None):\n \n if t == None:\n try:\n return self.t\n except:\n print \"NO TIME ASSOCIATED WITH THIS SIMULATION STATE\"\n else:\n self.t = t",
"def get_current_time(self):\n return datetime.datetime.now().strftime(\"%H:%M:%S\")",
"def quota_time_unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"quota_time_unit\")",
"def get_now():\n return localtime(now())",
"def unit(self):\n return self._get_unit()",
"def get_time(self):\r\n return float(self._cur_time)",
"def current_pacific_time():\n return dt.datetime.now().astimezone(pytz.timezone('US/Pacific'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
funtion to mutate current object image with new image.
|
def updateImage(self):
self.image = self.getImage(self.location, self.name, self.imageType)
|
[
"def edit_image(self):\n self.update()",
"def image(self, obj):",
"def update_image(self):\n self.image = self.capture_image()\n self.update_background()",
"def change_img(obj: pygame.sprite.Sprite, img):\r\n obj.image = img\r\n obj.image.set_colorkey(service.colors[\"BLACK\"])",
"def update(self, img, value_dict):\r\n return self._manager.update(img, value_dict)",
"def rescaled_image():",
"def copy_image(self): \r\n\r\n for i in range(0, self.width):\r\n for j in range(0, self.height): \r\n self.image_old[i, j] = self.image[i, j]\r\n \r\n return self.image_old",
"def update(self, img, boxes):",
"def __add__(self, image):\n if self == image:\n self.image += image.image\n else:\n raise Exception(\"Different image parameters\")\n return self",
"def set_image(self):\r\n self.sc.set_image()",
"def update_image(window: tk.Tk, img: Image):\r\n\r\n window.display_image(img)",
"def reloadImage():\n pass",
"def new_image(image):\n os.replace(image,PICTURES_IN + image)\n return",
"def __init__(self, img):\n if isinstance(img, LightningImage):\n # If the input was another lightning image object it gets copied, which means\n # the original also gets copied\n self.array = copy.deepcopy(img.array)\n #self.original = copy.deepcopy(img.original)\n else:\n self.array = copy.deepcopy(img)\n #self.original = copy.deepcopy(img)\n\n # 06.11.2018\n # Saving the height and the width of the image and thus the dimensions of the array as well\n self.width = self.array.shape[1]\n self.height = self.array.shape[0]",
"def reset_image(self, new_image: pygame.Surface):\n # We use `=` instead of `blit` because `=` does not save alpha.\n self.image = new_image\n self._borders_state.fix_borders()",
"def update_image(self):\n image_dict = {} # should be a dict in the form x heading, appropriate animation index basic idea is that it will find the the key with the least difference from the current x heading, and make that value self.image. Will complete when i get the sprite",
"def update_image_path(self, new_path):\n raise NotImplementedError(\"Updating image paths is not yet supported.\")",
"def img_update(self, image=None):\n self.camera_started = True\n try:\n if not self.request_new_image:\n self.sig_msg.emit(self.__class__.__name__ + \": no new image needed, frame dropped.\")\n else:\n self.img = image.copy()\n # convert to grayscale\n self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n self.request_new_image = False\n except Exception as err:\n self.sig_msg.emit(self.__class__.__name__, \": exception in img_update \" + str(err))",
"def _update_changed_fields(self, image, image_obj):\n for field in objects.OSImage.fields:\n try:\n patch_val = getattr(image, field)\n except AttributeError:\n continue\n if patch_val == wtypes.Unset:\n patch_val = None\n if image_obj[field] != patch_val:\n image_obj[field] = patch_val"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
function that updates the LED display to the number provided
|
def updateLED(self, num):
self.displayOff() # call to function turn off any currently display number
if num == 1:
self.oneOn() # call to fucntion for Display of number 1
elif num == 2:
self.twoOn() # call to fucntion for Display of number 2
elif num == 3:
self.threeOn() # call to fucntion for Display of number 3
elif num == 4:
self.fourOn() # call to fucntion for Display of number
else:
self.singleOn() # call to fucntion for Display of single bottom led dot (if shown means error)
|
[
"def flashLed(count):\r\n print(\"My LED\")\r\n for i in range(count):\r\n GPIO.output(21, True)\r\n time.sleep(0.25)\r\n GPIO.output(21, False)\r\n time.sleep(0.25)",
"def update():\n\n global ind\n global DNA\n\n #leds = [[\"blue\"] * 8 for _ in xrange(10)];\n #leds = [[\"blue\",\"blue\",\"red\",\"red\",\"blue\",\"blue\",\"blue\",\"blue\"] for _ in xrange(10)];\n #leds[0][ind] = \"green\"\n #ind = (ind + 1) % 8\n DNA = HKY85(.1,0.3,0.1,.35,.3,.25,.1,DNA)\n color = baseToColor(DNA)\n leds = [[color[0],color[1],color[2],color[3]] for _ in xrange(1)];\n plot_leds(canvas, leds)\n root.after(100, update)",
"def update_led(self):\n if self.pwm < 300:\n self.set_led_function([255, 0, 0], \"legs\", \"\", \"\")\n else:\n percentage = self.pwm / 4095\n blue = 255 * percentage\n self.set_led_function([0, 0, blue], \"legs\", \"\", \"all\")",
"async def update_led(led):\n\n global flash_count\n flash_count = 0\n\n while True:\n if flash_count > 0:\n await flash_led(led, 100)\n flash_count -= 1\n else:\n await fade_led(led)",
"def updateLCD(self):\n if self.tick != 0:\n self.tick -= 1\n\n hour = self.tick / 3600\n minute = (self.tick % 3600) / 60\n second = (self.tick % 3600) % 60\n\n self.lcd.display(\"%02d:%02d:%02d\" % (hour, minute, second))\n else:\n self.timer.stop()\n self.btnSet.setEnabled(True)\n self.btnStart.setEnabled(False)\n self.btnReset.setEnabled(True)\n self.btnStart.setText(\"Start\")\n QMessageBox.warning(self,\"Aviso\",\"### ALARM ###\")",
"def updateLCD(self):\n # Check status of GPS unit if it has a lock, otherwise change color of background on LCD to red.\n if time.time() - self.lastScreenTime > self.screens[self.currentScreen][1]: # Time to switch display\n self.currentScreen = self.currentScreen +1\n self.lastScreenTime = time.time() # reset screen timer\n if self.currentScreen > self.nrofscreens - 1:\n self.currentScreen = 0\n self.screens[self.currentScreen][0]()",
"def lamps(val):\n GPIO.output(37, val)\n GPIO.output(38,val)",
"def update_display(value): \n #dog1 = value % 10 #Last digit\n #dog2 = (value // 10) % 10 #second to last one\n #dog3 = (value // 100) % 10 #third to last\n #dog4 = (value // 1000) % 10 #first digit\n \n display_set_digit(3, value[3], double_point=False)\n display_set_digit(2, value[2], double_point=False)\n display_set_digit(1, value[1], double_point=False)\n display_set_digit(0, value[0], double_point=False)\n #raise ValueError(\"Function not implemented.\")",
"def toggle_led():\n global led\n if led == True:\n led = False\n wiimote.led = 0\n else:\n led = True\n wiimote.led = cwiid.LED1_ON",
"def set_led(v):\n fpga_leds_addr = 0x18\n vl = v & 0x0f\n fpga_write(fpga_leds_addr, vl)",
"def setBrightness(self, value = 0):\n\t\tgrovepi.fourDigit_brightness(self.display, value)",
"def Blink(t):\n\tGPIO.output(24,True) #Turn LED on\n\ttime.sleep(t) # Wait t seconds\n\tGPIO.output(24,False) # Turn LED off",
"def light_led(self, led_number, seconds):\n self.turn_on_led(led_number)\n sleep(seconds)\n self.turn_off_leds()",
"def update_display(self, temp):\n # If same temperature, it's unnecesary to refresh the display\n if self.temp != temp:\n # Update last temperature\n self.temp = temp\n # Update colors to fit the new temperature\n X, O = colour_by_temp(temp)\n\n # Since the display is only big enough for two digits, an exception is made for anything over 99\n # If the temp does hit +/- 100 then it will blank out the display given that it still works\n if abs(temp) >= 100:\n for i in range(64):\n self.pixels_matrix[i] = X\n else:\n # Start building the display array (pixels_matrix)\n index = 0\n digitIndex = 0\n digits_representation = digits(X, O)\n left_digit = int(abs(temp) / 10)\n right_digit = int(abs(temp) % 10)\n\n # Iterates each digit across the row and then down the column and sets pixels_matrix\n for _ in range(8): # rows\n for _ in range(4): # columns\n # Update pixels_matrix image (pixels) from pixels model of each digit\n self.pixels_matrix[index] = digits_representation[left_digit][digitIndex] # Left digit\n self.pixels_matrix[index+4] = digits_representation[right_digit][digitIndex] # Right digit\n index = index + 1 # Move to the next colum of the pixels_matrix\n digitIndex = digitIndex + 1 # Move to the next pixel of the digit\n index = index + 4 # Move to the next row of the pixels_matrix\n\n # If temperature < zero, add a minus before the digits\n if temp < 0:\n self.pixels_matrix[24] = X\n else:\n self.pixels_matrix[24] = O\n # Refresh the display\n if temp >= BLINK_TRESHOLD:\n if not self.blinking:\n self.blinking = True\n thread = Thread(target=self.blink)\n thread.start()\n else:\n self.blinking = False\n self.sense.set_pixels(self.pixels_matrix)",
"async def trigger_led(self):\n self.led_on()\n await asyncio.sleep(self.relay_output_duration)\n self.led_off()",
"def led_onoff(onoff):\r\n global led\r\n \r\n if(onoff==1):\r\n led.value(0)\r\n elif(onoff==-1):\r\n led.value(not led.value())\r\n else:\r\n led.value(1)",
"def changeTargetClick():\n global curr_ip\n global ips\n curr_ip = (curr_ip + 1) % len(ips)\n lcdPrint(ips[curr_ip], 1)",
"def update(self, state):\n if state == 1:\n self.on()\n elif state == 0:\n self.off()\n else:\n raise Exception('Invalid light state')",
"def set_backlight(val):\n val = max(0, min(1.0, val))\n board.DISPLAY.auto_brightness = False\n board.DISPLAY.brightness = val"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
fucntion that captures and stores and image using the camera and the led flash
|
def captureImage(self, location, name, type):
self.camLightOn() #turn flash on
time.sleep(.25)
self.cam.capture(location+name+type) # call to camera image capture function
time.sleep(.25)
self.camLightOff() # flash off
|
[
"def capture(self):\n self.camera = self.ids['camera']\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n self.camera.export_to_png(\"IMG_{}.png\".format(timestr))\n print(\"Captured\")",
"def take_picture(self):\n imgpath = \"\"\n # print(\"Take pic from device %d\" % (self.cv2_cam_dev1))\n try:\n self.lights.headlights(True)\n time.sleep(self.light_wakeup_t)\n cap = cv2.VideoCapture(self.cv2_cam_dev1)\n ret, frame = cap.read()\n self.lights.headlights(False)\n # print(\"Returned %d\" % (ret))\n imgname = \"roboimg\" + str(int(time.time())) + \".png\"\n imgpath = os.path.join(self.imgdir, imgname)\n # print(\"Pic name \" + imgpath)\n cv2.imwrite(imgpath, frame)\n self.logger.warning(\"Captured weed %s\" % (imgpath))\n # When everything done, release the capture\n except:\n print(\"take_picture failed\")\n self.logger.error(\"Take picture failed %s\" % (imgpath))\n raise\n finally:\n cap.release()\n # cv2.destroyAllWindows()\n return imgpath",
"def snapshot(camera):\n #lamps(GPIO.HIGH)\n #reference to camera capture\n with PiRGBArray(camera) as raw:\n #raw = PiRGBArray(camera) \n #get image from camera\n lamps(GPIO.HIGH)\n camera.capture(raw, format='bgr')\n lamps(GPIO.LOW)\n #print('Captured')\n imRef = raw.array\n \n return imRef",
"def capture():\r\n (x, y) = global_camera.get_coordinates()\r\n # Label snapshot image with the x- and y-coordinates:\r\n path = \"/capture/X{}Y{}.jpeg\".format(x,y)\r\n return redirect(path)",
"def take_image_virtualcam(filename, pwi4):\r\n\r\n pwi4.virtualcamera_take_image_and_save(filename)",
"def setup_camera():\n requests.post(API_URL, json={\n \t\"method\": \"startRecMode\",\n \t\"params\": [],\n \t\"id\": 1,\n \t\"version\": \"1.0\"\n })\n requests.post(API_URL, json={\n\t\"method\": \"setPostviewImageSize\",\n\t\"params\": [\"Original\"],\n\t\"id\": 1,\n\t\"version\": \"1.0\"\n })",
"def OnImageEvent(self, image):\n # update all buffers in the camera\n status = True\n #status = True\n #print(status)\n\n if status:\n image_converted = PySpin.Image.Create(image)\n \n \n \n if self.cam.recording:\n image_recorded = PySpin.Image.Create(image)\n self.cam.write_record_frame(image_recorded)\n \n try:\n self.cam.write(image_converted)\n except Exception as ex:\n print('In callback, Error as %s' % ex)\n \n image.Release()\n\n del image\n else:\n print('status is %i' % status)\n \n \n #run user_defined functions\n self.run_func()",
"def snapshot(self):\n ts = datetime.datetime.now() # grab the current timestamp\n filename = \"{}.png\".format(ts.strftime(\n \"%Y-%m-%d_%H-%M-%S\")) # construct filename\n\n ok, frame = self.cap.read()\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(image)\n\n # save image as jpeg file\n image.save('exports/snapshots/' + filename, \"PNG\")\n print(\"[INFO] saved {}\".format(filename))",
"def fnImageCaptureAndTransform():\r\n try:\r\n camera_port = 0\r\n camera = cv2.VideoCapture(camera_port)\r\n\r\n # camera settings\r\n ARcamera.set(3, 640) # width\r\n ARcamera.set(4, 480) # height\r\n ARcamera.set(12, 0) # saturation\r\n ARcamera.set(11, 1) # contrast\r\n ARcamera.set(10, 0) # brightness\r\n \r\n time.sleep(0.1) # wait for camera to stabilize itself\r\n isOk, capturedImage = ARcamera.read()\r\n del(ARcamera)\r\n\r\n \"\"\" Transform image to grayscale and give high contrast \"\"\"\r\n # transformation to grayscale image\r\n grayImage = cv2.cvtColor(capturedImage, cv2.COLOR_BGR2GRAY)\r\n # high contrast image - set with treshold number to treshold hot and stuck pixels\r\n grayImage[grayImage<5] = 0\r\n grayImage[grayImage>=5] = 255\r\n return grayImage\r\n except:\r\n fnException(\"Unexpected error!\\nPlese check Alpha Random Camera and perform calibration of system if problem persists.\", 0)",
"def capture_robot_camera(IP_PEPPER, PORT):\n SubID = \"Pepper\"\n videoDevice = ALProxy('ALVideoDevice', PI_PEPPER, PORT)\n\n # subscribe top camera, get an image with the size of 640x480\n AL_kTopCamera, AL_kQVGA, Frame_Rates = 0, 2, 10 \n AL_kBGRColorSpace = 13 # Buffer contains triplet on the format 0xRRGGBB, equivalent to three unsigned char\n captureDevice = videoDevice.subscribeCamera(SubID, AL_kTopCamera, AL_kQVGA, AL_kBGRColorSpace, Frame_Rates)\n\n width, height = 640, 480\n image = np.zeros((height, width, 3), np.uint8)\n result = videoDevice.getImageRemote(captureDevice)\n\n if result == None:\n print \"Camera problem.\"\n elif result[6] == None:\n print \"No image was captured. \"\n else:\n # translate value to mat\n values = map(ord, list(result[6]))\n i = 0\n for y in range(0, height):\n for x in range(0, width):\n image.itemset((y, x, 0), values[i + 0])\n image.itemset((y, x, 1), values[i + 1])\n image.itemset((y, x, 2), values[i + 2])\n i += 3\n\n # uncomment below lines to see the camera image\n #cv2.imwrite(\"assets/monitor/robocam.png\", image)\n #cv2.imshow(\"Camera image\", image)\n #cv2.waitKey(1)\n\n # unsubscribe from the camera.Otherwise, the camera image\n # might be corrupted. To be absoulutely sure, perform \n # a null check on result[6]\n videoDevice.unsubscribe(captureDevice)\n\n return result[6], image",
"def take_nav_picture(self):\n \n # print(\"Take pic from device %d\" % (self.cv2_cam_dev2))\n imgpath = \"\"\n try:\n self.lights.headlights(True)\n time.sleep(self.light_wakeup_t)\n cap = cv2.VideoCapture(self.cv2_cam_dev2)\n ret, frame = cap.read()\n self.lights.headlights(False)\n # print(\"Returned %d\" % (ret))\n imgname = \"navimg\" + str(int(time.time())) + \".png\"\n imgpath = os.path.join(self.navdir, imgname)\n # print(\"Pic name \" + imgpath)\n cv2.imwrite(imgpath, frame)\n self.logger.warning(\"Captured navi %s\" % (imgpath))\n # When everything done, release the capture\n except:\n print(\"take_picture failed\")\n self.logger.error(\"Take picture failed %s\" % (imgpath))\n raise\n finally:\n cap.release()\n # cv2.destroyAllWindows()\n return imgpath",
"def storeCamera(self):\n self.camera.store()",
"def camera_thread(self):\n camera = picamera.PiCamera()\n camera.resolution = (640, 480)\n #camera.sensor_mode = 7\n #camera.shutter_speed = 10000\n camera.framerate = 30\n camera.rotation = 180\n\n cam_stream = io.BytesIO()\n for foo in camera.capture_continuous(output=cam_stream,\n format='jpeg',\n use_video_port=True,\n quality=15,\n thumbnail=None):\n cam_stream.seek(0)\n with self.image_lock:\n self.image = cam_stream.read()\n self.has_image = True\n cam_stream.seek(0)\n cam_stream.truncate()\n\n # if no clients are connected, just chill ad wait to save power.\n\n while(threading.active_count() < 3):\n pass",
"def captureVideoFrame(self):\n if(self.kinectConnected):\n self.VideoFrame = freenect.sync_get_video_with_res(resolution=freenect.RESOLUTION_HIGH)[0]\n else:\n self.loadVideoFrame()\n self.processVideoFrame()",
"def webcameCapture():\r\n retval, frame = cap.read()\r\n cv2.imwrite(\"webcam.png\",frame)\r\n img=cv2.imread(\"webcam.png\")\r\n return(img)",
"def capture(self):\n # insert the canvas\n self.fitsimage.add(self.canvas, tag='mycanvas')",
"def capture_camera(mirror=True):\n # カメラをキャプチャする\n cap = cv2.VideoCapture(0) # 0はカメラのデバイス番号\n\n ### 下記はMac Bookの設定\n #cap.set(cv2.CAP_PROP_FPS, 60) # カメラFPSを60FPSに設定\n #cap.set(cv2.CAP_PROP_FRAME_WIDTH, FRAME_HEIGHT) # カメラ画像の横幅を1280に設定\n #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FRAME_WIDTH) # カメラ画像の縦幅を720に設定\n \n ### 手持ちのLogicool Webカメラだと毎秒10フレーム程度が限界っぽい\n cap.set(cv2.CAP_PROP_FPS, 10)\n\n while True:\n # retは画像を取得成功フラグ\n ret, frame = cap.read()\n\n # 鏡のように映るか否か\n if mirror is True:\n frame = frame[:,::-1]\n\n processed_frame = detect(frame.copy())\n\n # フレームを表示する\n cv2.imshow('camera capture', frame)\n cv2.imshow('object recognition', processed_frame)\n\n k = cv2.waitKey(1) # 1msec待つ\n if k == 27: # ESCキーで終了\n break\n\n # キャプチャを解放する\n cap.release()\n cv2.destroyAllWindows()",
"def takePicture(self, mode=None):\n return myro.takePicture(mode)",
"def callback(self, data):\n try:\n # Conversion to cv2 image using bgr8 encoding \n frame = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n # Showing frame \n cv.imshow(\"Camera_Stream\", frame)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Repeat a melody x times
|
def repeat_melody(self, melody, n, offset=0):
# parent_melody = Melody([melody] * n)
parent_melody = Melody([melody.clone().step(offset*j) for j in range(n)], key=self.key)
self.play_melody(parent_melody)
|
[
"def at_repeat(self):\r\n pass",
"def repeat(self, repeat, number, domain, *args, **kwargs):\n return PlotTimings(\n self.timer.repeat(domain, repeat, number, *args, **kwargs),\n {\"functions\": self.timer.functions, \"domain\": domain},\n )",
"def repeat(c, cmd, repeat=5, verbose=False, sub='|N|'):\n def log(s):\n if verbose:\n print(s)\n\n for i in range(repeat):\n cmd_cp = cmd.replace(sub, str(i+1))\n log(f'[cmd = {cmd_cp}]')\n\n c.run(cmd_cp)",
"def repeat(s, n):\n return s * n",
"def press_x_times(input_key:str, repetitions:int, interval=.2):\n for _ in range(repetitions):\n bot.press(input_key)\n time.sleep(interval)\n\n return None",
"def repeat(x, n):\n\treturn [x for _ in range(n)]",
"def repeat_once(self):\n self.repeat = True",
"def repeat(a, repeats, axis):\n return cpp.repeat(a, repeats, axis)",
"def _repeat(self, cnt, contents):\n if cnt == 1:\n return \"{ %s }\" % contents;\n else:\n return \"{ int c = %s; while (c--) { %s } }\" % (cnt.codegen(), contents)",
"def test_repeat(self):\n pattern = b'this is a pattern'\n count = 5\n provider = payload_provider.Repeat(pattern, count)\n for payload in provider:\n self.assertEqual(payload, pattern, 'Payload does not reflect the pattern')\n count -= 1\n self.assertEqual(count, 0, 'Generated a wrong number of payloads')",
"def repeat_seq(seq, num=1):\r\n return (it.repeat(x, num) for x in seq)",
"def repeat(s: str, n: int) -> str:\n return s*n",
"def _set_n_repeat_single(self):\n self._n_repeats = 1\n self._n_repeats_finished = 1\n self._k_per_n_repeat = [1]\n self._bagged_mode = False",
"def simulate(self, animation, times=100):\n for time in range(times):\n animation.update(.01)",
"def repeat(value, num) -> SmartExpr:\n return concat(*(value for _ in range(num)))",
"def __init__(self, env: py_environment.PyEnvironment, times: types.Int):\n super(ActionRepeat, self).__init__(env)\n if times <= 1:\n raise ValueError(\n 'Times parameter ({}) should be greater than 1'.format(times)\n )\n self._times = times",
"def repeat(rpx, axiom, rules): \n production = axiom\n for i in range(0, rpx):\n production = produce(production, rules)\n return production",
"def repeat(self, value: int):\n if self.log_value:\n print(f\"[DataType] {self._debug_retriever_name} 'repeat' set to {value} (was: {self._repeat})\")\n self._repeat = value",
"def repeat_elem(values, index, num_times):\n values = values[0:index] + values[index:index+1]*num_times + values[index+1:]\n return values"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Job to start spiders. Return Deferred, which will execute after crawl has completed.
|
def crawl_job():
settings = get_project_settings()
runner = CrawlerRunner(settings)
return runner.crawl(GamesSpider)
|
[
"def schedule_crawler(self) :\n\t\tself.create_new_workspace()\n\t\t#self.add_query_keywords()\n\n\t\treq = urllib2.Request(self.url, json.dumps(self.search_terms), {\"Content-type\" : \"application/json\"})\n\n\t\ttry:\n\t\t\tresponse = urllib2.urlopen(req)\n\t\texcept IOError, e:\n\t\t print \"It looks like something went wrong in scheduling the crawl. Exiting...\"\n\t\t sys.exit(1)\n\n\t\tout = json.loads(response.read())\n\t\t\n\t\tself.job_id = out.keys()[0]\n\n\t\tprint \"Crawling in progress ...\";",
"def RUN_CRAWLER(crawler_):\n crawler_.crawl()",
"def run_spiders(spiders_to_run, settings, kwargs):\n print(\"starting crawl task with arguments %s\" % str(kwargs))\n runner = CrawlerRunner(settings)\n for spider in spiders_to_run:\n runner.crawl(spider, stop_after_crawl=False, **kwargs)\n # what to do once crawling is over\n d = runner.join()\n d.addBoth(lambda _: check_for_task())",
"def crawl():\n # blog crawler\n runner = CrawlerRunner(\n {\n 'FEED_FORMAT': 'json',\n 'FEED_URI': DATA_FILE,\n }\n )\n runner.crawl(GoogleBlog)\n runner.crawl(OpenAI)\n runner.crawl(DeepMind)\n runner.crawl(Uber)\n\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n\n reactor.run()",
"def start_crawl():\n self_configuration = get_self_configuration(exception_class=RuntimeError)\n self_node_identifier = self_configuration.node_identifier\n primary_validator = self_configuration.primary_validator\n\n primary_validator_address = format_address(\n ip_address=primary_validator.ip_address,\n port=primary_validator.port,\n protocol=primary_validator.protocol\n )\n\n crawl_banks(primary_validator_address=primary_validator_address, self_node_identifier=self_node_identifier)\n crawl_validators(primary_validator_address=primary_validator_address)\n\n send_connection_requests(node_class=Bank, self_configuration=self_configuration)\n send_connection_requests(node_class=Validator, self_configuration=self_configuration)\n\n cache.set(CRAWL_LAST_COMPLETED, str(timezone.now()), None)\n cache.set(CRAWL_STATUS, CRAWL_STATUS_NOT_CRAWLING, None)\n\n send_crawl_status_notification()",
"def crawl(self):\r\n #beging analyzer and controller thread(actually called their run())\r\n self.__analyzer.start()\r\n self.__controller.start()\r\n #block until controller thread terminate\r\n self.__controller.join(3600)\r\n self.__analyzer.setStopCondition(True)\r\n self.__siteQueueAndCond[1].acquire()\r\n self.__siteQueueAndCond[1].notifyAll()\r\n self.__siteQueueAndCond[1].release()\r\n #block until analyzer thread terminate\r\n self.__analyzer.join()\r\n print \"%d fetchers were useful\" % self.__controller.getNumFetchersUsed()\r\n print(\"%d out of %d sites were succesfully crawles\" %\r\n (len(self.__dbAndLock[0]['pages']),self.__maxPagesToCrawl))\r\n print \"The pages that were succesfully crawled:\"\r\n for s in self.__dbAndLock[0]['pages']:\r\n print self.__dbAndLock[0]['pages'][s].stringUrl\r\n\r\n self.__analyzer.report()\r\n\r\n self.__exporter.export(self.__dbAndLock[0])",
"def startjobs(jobs, concurrency=4, timeout=2, handler=None):\n if handler and isinstance(handler, BaseHandler):\n handler = handler\n elif handler:\n handler = SimpleHandler(preprocess=handler)\n else:\n handler = BaseHandler()\n handler.jobs = jobs\n crawler = Crawler(handler, concurrency=concurrency, timeout=timeout)\n crawler.start()",
"def run(self):\n while True:\n for crawler in self.crawlers:\n crawler.crawl()\n\n print 'Sleeping for %s seconds' % self.crawl_wait\n sleep(self.crawl_wait)",
"def wait_for_crawl(self) :\n\t\t\n\t\tdata = {'job_id' : self.job_id}\n\t\turl_values = urllib.urlencode(data)\n\t\treq_url = \"http://localhost/search-job-state/?\" + url_values\n\n\t\twhile(1) :\n\t\t\ttry:\n\t\t\t\tresponse = urllib2.urlopen(req_url)\n\t\t\texcept IOError, e:\n\t\t\t\tprint \"It looks like something went wrong\"\n\t\t\t\tsys.exit(1)\n\t\t\n\t\t\tstate = response.read()\n\n\t\t\tif(state == 'Done') :\n\t\t\t\tprint \"Crawl complete !\"\n\t\t\t\treturn",
"def go(self):\n \n self.setprop('crawl', crol.Crawl({\n 'seed_url' : self.registration.site,\n 'crawl_report' : crol.CrawlReport({'seed_url':self.registration.site}),\n 'log' : self.log,\n 'nofollow_patterns' : self.registration.nofollow_patterns,\n 'ignore_patterns' : self.registration.ignore_patterns\n }))\n \n self.log.filename = self.registration.department.name\n self.crawl.start(self.crawl.crawl_report.reportnode)\n self.log.reporttofile(self.crawl.crawl_report)\n if self.crawl.crawl_report.statistics['broken_count'] > 0: self.applyactions()",
"def run_spider(sender, instance, created, **kwargs):\n if created:\n scrapyd = ScrapydAPI('http://scrapyd:6800')\n job_id = scrapyd.schedule(BOT_NAME, PARSER_NAME)\n if job_id:\n instance.job_id = job_id\n instance.save(update_fields=['job_id'])",
"def scrape(self):\n log_info(f'jobfunnel glassdoor to pickle running @ {self.date_string}')\n\n # get the search url and data\n search, data = self.get_search_url(method='post')\n\n # get the html data, initialize bs4 with lxml\n request_html = self.s.post(search, data=data)\n\n # create the soup base\n soup_base = BeautifulSoup(request_html.text, self.bs4_parser)\n\n # scrape total number of results, and calculate the # pages needed\n num_res = soup_base.find(\n 'p', attrs={'class', 'jobsCount'}).text.strip()\n num_res = int(re.findall(r'(\\d+)', num_res.replace(',', ''))[0])\n log_info(\n f'Found {num_res} glassdoor results for query=' f'{self.query}')\n\n pages = int(ceil(num_res / self.max_results_per_page))\n\n # init list of job soups\n job_soup_list = []\n # init threads\n threads = ThreadPoolExecutor(max_workers=8)\n # init futures list\n fts = []\n\n # search the pages to extract the list of job soups\n for page in range(1, pages + 1):\n if page == 1:\n fts.append( # append thread job future to futures list\n threads.submit(\n self.search_page_for_job_soups,\n page,\n request_html.url,\n job_soup_list,\n )\n )\n else:\n # gets partial url for next page\n part_url = (\n soup_base.find('li', attrs={'class', 'next'}).find(\n 'a').get('href')\n )\n # uses partial url to construct next page url\n page_url = re.sub(\n r'_IP\\d+\\.',\n '_IP' + str(page) + '.',\n f'https://www.glassdoor.'\n f\"{self.search_terms['region']['domain']}\"\n f'{part_url}',\n )\n\n fts.append( # append thread job future to futures list\n threads.submit(\n self.search_page_for_job_soups,\n page,\n page_url,\n job_soup_list,\n )\n )\n wait(fts) # wait for all scrape jobs to finish\n\n # make a dict of job postings from the listing briefs\n for s in job_soup_list:\n # init dict to store scraped data\n job = dict([(k, '') for k in MASTERLIST_HEADER])\n\n # scrape the post data\n job['status'] = 'new'\n try:\n # jobs should at minimum have a title, company and location\n job['title'] = (\n s.find('div', attrs={'class', 'jobContainer'})\n .find(\n 'a',\n attrs={'class', 'jobLink jobInfoItem jobTitle'},\n recursive=False,\n )\n .text.strip()\n )\n job['company'] = s.find(\n 'div', attrs={'class', 'jobInfoItem jobEmpolyerName'}\n ).text.strip()\n job['location'] = s.get('data-job-loc')\n except AttributeError:\n continue\n\n # set blurb to none for now\n job['blurb'] = ''\n\n try:\n labels = s.find_all('div', attrs={'class', 'jobLabel'})\n job['tags'] = '\\n'.join(\n [l.text.strip() for l in labels if l.text.strip() != 'New']\n )\n except AttributeError:\n job['tags'] = ''\n\n try:\n job['date'] = (\n s.find('div', attrs={'class', 'jobLabels'})\n .find('span', attrs={'class', 'jobLabel nowrap'})\n .text.strip()\n )\n except AttributeError:\n job['date'] = ''\n\n try:\n part_url = (\n s.find('div', attrs={'class', 'logoWrap'}).find(\n 'a').get('href')\n )\n job['id'] = s.get('data-id')\n job['link'] = (\n f'https://www.glassdoor.'\n f\"{self.search_terms['region']['domain']}\"\n f'{part_url}'\n )\n\n except (AttributeError, IndexError):\n job['id'] = ''\n job['link'] = ''\n\n job['query'] = self.query\n job['provider'] = self.provider\n\n # key by id\n self.scrape_data[str(job['id'])] = job\n\n # Do not change the order of the next three statements if you want date_filter to work\n\n # stores references to jobs in list to be used in blurb retrieval\n scrape_list = [i for i in self.scrape_data.values()]\n # converts job date formats into a standard date format\n post_date_from_relative_post_age(scrape_list)\n # apply job pre-filter before scraping blurbs\n super().pre_filter(self.scrape_data, self.provider)\n\n # checks if delay is set or not, then extracts blurbs from job links\n if self.delay_config is not None:\n # calls super class to run delay specific threading logic\n super().delay_threader(\n scrape_list, self.get_blurb_with_delay, self.parse_blurb, threads\n )\n\n else: # maps jobs to threads and cleans them up when done\n # start time recording\n start = time()\n\n # maps jobs to threads and cleans them up when done\n threads.map(self.search_joblink_for_blurb, scrape_list)\n threads.shutdown()\n\n # end and print recorded time\n end = time()\n print(f'{self.provider} scrape job took {(end - start):.3f}s')",
"def run(self):\n\n # Start atleast 1 non trivial indexing. The hope is, it\n while not self.create_new_indexer():\n pass\n\n # Start listening for commands.\n while self.number_of_non_trivial_indexes <= self.max_links_to_crawl:\n write_cmd = self.main_thread_cmd_queue.pop(timeout=Crawler.POP_TIMEOUT_IN_SECONDS)\n if isinstance(write_cmd, RunOnMainThread):\n write_cmd.run()\n else:\n logger.warn(\"Main thread received a command it couldn't parse: \", write_cmd)\n\n # Crawling complete. Hola the team!\n logger.info(\n \"Crawling complete. Logged: {n_urls}\".format(\n n_urls=len(\n self.finished_indexers_list)))",
"def open_spider(self, spider):\n logging.info('open spider')",
"def run():\n setup()\n args = read_args()\n\n process = CrawlerProcess(get_project_settings())\n process.crawl('store_item_spider',\n item_num=args.item_num, file_name=args.file_name)\n process.start()\n\n name_base = os.path.splitext(args.file_name)[0]\n write(f'{name_base}.xhtml', convert(f'{name_base}.xml', transform_path() + '/transform.xsl'))",
"def parse(self, response):\n # Grab all the job posting urls\n for sel in response.xpath('//h2[@class=\"jobtitle\"]'):\n posting_url, job_location = self.get_selection_info(sel)\n try:\n self.jentries.append(scrape_job_posting(posting_url, loc=job_location))\n except Exception:\n logging.error(\"Unexpected error with website:\" + posting_url)\n traceback.print_exc()\n # Goto next page up to the end of the pagination div\n try:\n url, url_text = self.get_pagination_info(sel, response)\n if url_text == self.pagination_finish_text:\n self.search_page_index += 1\n logging.log(21, self.name + 'Processing page ' + str(self.search_page_index+1))\n yield scrapy.Request(url)\n except IndexError:\n pass",
"def run_generic_spider( user_id\t\t\t\t= None, \n\t\t\t\t\t\tspider_id\t\t\t= None, \n\t\t\t\t\t\tdatamodel\t\t\t= None, \n\t\t\t\t\t\trun_spider_config\t= None \n\t\t\t\t\t\t):\n\n\tprint \"\\n--- run_generic_spider / spider_id : \", spider_id\n\t\n\t# !!! spider is launched from main.py level !!! \n\t# all relative routes referring to this...\n\tprint \"\\n--- run_generic_spider / os.getcwd() : \", os.getcwd() \n\n\t### flattening run_spider_config : from nested to flat dict \n\t# print \"--- run_generic_spider / run_spider_config : \"\n\t# pprint.pprint(run_spider_config)\n\tprint \"--- run_generic_spider / flattening run_spider_config\"\n\tspider_config_flat = flattenSpiderConfig(run_spider_config)\n\t# print \"--- run_generic_spider / spider_config_flat : \"\n\t# pprint.pprint(spider_config_flat)\n\n\t### instantiate settings and provide a custom configuration\n\t# settings = Settings()\n\t# settings.set('ITEM_PIPELINES', {\n\t# \t'__main__.JsonWriterPipeline': 100\n\t# })\n\n\t### initiating crawler process\n\t# process = CrawlerRunner() \n\t# process = CrawlerProcess()\n\t# process = CrawlerRunner({\n\t# \t'USER_AGENT'\t\t: 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',\n\t# \t# 'ITEM_PIPELINES' \t: {'scraper.pipelines.MongodbPipeline' : 100 },\n\t# })\n\n\n\n\tprint \"--- run_generic_spider / BOT_NAME : \"\t\n\tprint settings.get('BOT_NAME')\n\tprint \"--- run_generic_spider / USER_AGENT : \"\t\n\tprint settings.get('USER_AGENT')\n\tprint \"--- run_generic_spider / ITEM_PIPELINES : \" \t\n\tprint settings.get('ITEM_PIPELINES').__dict__\n\n\tprint \"\\n--- run_generic_spider / instance process ...\" \t\n\tprocess = CrawlerRunner( settings = settings )\n\n\t### adding crawler.runner as deferred\n\tdef f(q):\n\t\ttry:\n\t\t\t### send custom spider config from run_spider_config\n\t\t\t### cf : https://stackoverflow.com/questions/35662146/dynamic-spider-generation-with-scrapy-subclass-init-error\n\t\t\t\n\t\t\tdeferred = process.crawl(GenericSpider, \n\t\t\t\t\t\t\t\t\t\tuser_id\t\t\t\t= user_id,\n\t\t\t\t\t\t\t\t\t\tdatamodel \t\t\t= datamodel , \n\t\t\t\t\t\t\t\t\t\tspider_id \t\t\t= spider_id ,\n\t\t\t\t\t\t\t\t\t\tspider_config_flat\t= spider_config_flat \n\t\t\t\t\t\t\t\t\t)\n\t\t\t# deferred = process.crawl(ToScrapeSpiderXPath )\n\t\t\t\n\t\t\tdeferred.addBoth(lambda _: reactor.stop())\n\t\t\treactor.run()\n\t\t\tq.put(None)\n\t\texcept Exception as e:\n\t\t\tq.put(e)\n\n\t### putting task in queue and start\n\tq = Queue() \n\tp = Process(target=f, args=(q,))\n\tp.start()\n\tresult = q.get()\n\tp.join()\n\n\tif result is not None:\n\t\traise result",
"def scheduleAllSpiders():\n\n logger.debug(__name__ + \" Scheduling Jobs (PID: \" + str(os.getpid()))\n\n # Clear any previous schedules\n schedule.clear(\"daily-task\")\n schedule.clear(\"recheck-task\")\n\n # Get current job status from scrapyd server\n r = requests.get(api_url + 'daemonstatus.json')\n if r.status_code != 200:\n # Schedule Rechecking incase of NON-200 Response\n logger.error(__name__ + \" Recieved Status Code (deamonstatus.json): \" + str(r.status_code))\n scheduleRechecking()\n return\n try:\n # Parse Response\n response = r.json()\n logger.debug(__name__ + \" Received Response: \" + str(response))\n \n # Check for running or pending jobs\n if response['running'] == 0 and response['pending'] == 0:\n scheduleScheduler()\n\n # Get Projects Deployed \n p = requests.get(api_url + \"listprojects.json\")\n \n if p.status_code !=200:\n logger.error(__name__ + \" Received Status Code (listprojects.json): \"+ str(p.status_code))\n scheduleRechecking()\n return\n\n # Parse Response\n data = p.json()\n\n # Fetch Deployed Spiders for each project\n for project in data['projects']:\n # Get Spiders for project\n s = requests.get(api_url + \"listspiders.json\", params={\"project\":project})\n if s.status_code != 200:\n logger.error(__name__ + \" Received Status Code (listspiders.json?project=\"+project+\") :\" + str(s.status_code))\n return\n\n # Parse Response\n spiders = s.json()\n\n # Schedule Each Spider for project\n for spider in spiders['spiders']:\n # Create a payload\n payload = {\"project\":project, \"spider\":spider}\n\n # Send The Request\n sch = requests.post(api_url + \"schedule.json\", data=payload)\n \n if sch.status_code == 200:\n # Parse Response\n job = sch.json()\n logger.info(__name__ + \" Successfully Scheduled Spider \" + spider + \" JOBID: \" + job['jobid'])\n else:\n logger.error(__name__ + \" Received Status Code (schedule.json <payload> \" + str(payload) + \"): \" + str(sch.status_code))\n logger.error(__name__ + \" Unable to Schedule Spider \" + spider)\n else:\n logger.info(__name__ + \" There are jobs pending! Rescheduling Check!\")\n scheduleRechecking()\n except Exception as e:\n logger.error(__name__ + \" [UNHANDLED] : \" + str(e))\n logger.info(__name__ + \" Recheck Scheduled\")\n scheduleRechecking()",
"def jobs(request):\n query_builder = SOLRJobSearchQueryBuilder(ITEMS_PER_PAGE)\n query = query_builder.build_query(request.GET)\n\n conn = Solr('http://127.0.0.1:8983/solr/')\n results = SearchResults(conn.search(**query))\n\n sponsored_listings = None\n if not isrobot(request):\n linkup = LinkUp()\n\n q = request.GET.get('q', None) or request.GET.get('title', None)\n l = request.GET.get('loc', None)\n\n if l is None:\n if request.GET.get('state', None):\n if not request.GET.get('city', None):\n l = state_abbrev_to_name(request.GET.get('state'))\n else:\n l = request.GET.get('city').title() + ', ' + request.GET.get('state').upper()\n elif request.GET.get('country', None):\n if not request.GET.get('city', None):\n l = country_abbrev_to_name(request.GET.get('country')).title()\n else:\n l = request.GET.get('city').title() + ', ' + country_abbrev_to_name(request.GET.get('country')).title()\n \n c = request.GET.get('company', None)\n\n try:\n response = linkup.search(get_client_ip(request), q, l, c)\n except:\n sponsored_listings = None\n else:\n sponsored_listings = LinkUpResults(response).sponsored_listings\n\n #\n # The pagination is a hack. The django paginator expects to get\n # the entire list of results and then carves out a chunk of those\n # results based on the page requested. SOLR doesn't return the \n # entire list of results though. So we fake it to make it look\n # like it does by generating a list of size 'num_hits', filling\n # the entries for the current page with our results, and filling\n # the other entries with \"don't care\" values.\n #\n jobs = [ None for i in range(results.hits) ]\n page_number = int(request.GET.get('page', '1'))\n\n # XXX start should be in results but pysolr doesn't included it!\n start = int(ITEMS_PER_PAGE) * (page_number - 1)\n jobs[start:start+ITEMS_PER_PAGE] = results.docs\n\n active_filters_vars = active_filters_context(request.GET)\n page_vars = paginate(jobs, request.GET, 'jobs')\n latlng_vars = latlng_context(results)\n\n vars = RequestContext(request, { 'facet_counts': results.facets['facet_fields'] })\n vars.update(page_vars)\n vars.update(active_filters_vars)\n vars.update(latlng_vars)\n vars.update({'sponsored_listings': sponsored_listings})\n\n return render_to_response('jobs/jobs.html', vars)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loads bars for a given instrument from a CSV formatted file. The instrument gets registered in the bar feed.
|
def addBarsFromCSV(self, instrument, path, timezone=None):
if timezone is None:
timezone = self.__timezone
rowParser = RowParser(self.getDailyBarTime(), self.getFrequency(), timezone, self.__sanitizeBars)
super().addBarsFromCSV(instrument, path, rowParser)
|
[
"def addBarsFromCSV(self, instrument, path, timezone = None):\n\n csvfeed.YahooFeed.addBarsFromCSV(self, instrument, path, timezone)",
"def addBarsFromFile(self, instrument, date, path, frequency, timezone=None):\n\n if timezone is not None:\n raise Exception('timezone is not supported, sorry.')\n\n ticks = self.loadTicksFromFile(path)\n bars = []\n\n if frequency == bar.Frequency.TRADE:\n for tick in ticks:\n bars.append(self.__barClass(\n datetime.datetime.strptime(date, '%Y.%m.%d') + \\\n datetime.timedelta(milliseconds = tick['time']),\n tick['bid'],\n tick['bid'],\n tick['bid'],\n tick['bid'],\n 10000,\n None,\n frequency))\n else:\n lastBarNum = None\n lastBar = {}\n\n # convert frequency to msecs\n frequency *= 1000\n\n for tick in ticks:\n if lastBarNum is None or tick['time'] / frequency != lastBarNum:\n if lastBarNum is not None:\n bars.append(self.__barClass(\n lastBar['dateTime'],\n lastBar['open_'],\n lastBar['high'],\n lastBar['low'],\n lastBar['curr'], # close price\n lastBar['volume'],\n lastBar['adjClose'],\n frequency))\n\n lastBar['dateTime'] = datetime.datetime.strptime(date, '%Y.%m.%d') + \\\n datetime.timedelta(milliseconds = tick['time'] - tick['time'] % frequency)\n lastBar['open_'] = tick['bid'] if lastBarNum is None else lastBar['curr']\n lastBar['high'] = lastBar['open_']\n lastBar['low'] = lastBar['open_']\n lastBar['curr'] = lastBar['open_']\n lastBar['volume'] = 10000\n lastBar['adjClose'] = None\n lastBarNum = tick['time'] / frequency\n\n lastBar['curr'] = tick['bid']\n lastBar['low'] = min(lastBar['low'], lastBar['curr'])\n lastBar['high'] = max(lastBar['high'], lastBar['curr'])\n\n super(Feed, self).addBarsFromSequence(instrument, bars)",
"def import_from_csv(self, csv_file):\n data = []\n\n reader = csv.reader(csv_file)\n self.x_labels = next(reader, None)[1:]\n\n for row in reader:\n self.y_labels.append(row[0])\n d = []\n for cell in row[1:]:\n try:\n d.append(float(cell))\n except ValueError:\n d.append(0.0)\n data.append(d)\n self.data = numpy.array(data)\n self.fill()",
"def load_batteries(self, batteries_file): \n batteries = [] \n with open(batteries_file, \"r\") as in_file:\n reader = csv.DictReader(in_file)\n all_batteries = list(reader)\n for battery in all_batteries:\n batteries.append(Battery(battery['x'], battery['y'], battery['capaciteit'])) \n return batteries",
"def load_bars(self, data):\r\n\t\tif type(data) is Bar or type(data) is Bar_1d:\r\n\t\t\tself._bars = data\r\n\t\t\tself._bar_loaded = True\r\n\t\telse:\r\n\t\t\traise TypeError('[RS]: Data should be of Bar frame type.')\r\n\t\tif self._bars.empty:\r\n\t\t\tprint '[RS]: Warning: Empty bar data loaded.'",
"def import_from_csv(self, csv_file):\n reader = csv.reader(csv_file)\n\n self.variable_labels = next(reader, None)[1:]\n self.element_labels = []\n self.data = []\n\n data_mode = True\n for row in reader:\n if not any(row):\n if data_mode:\n data_mode = False\n continue\n else:\n if data_mode:\n self.element_labels.append(row[0])\n self.data.append([int(i) for i in row[1:]])\n else:\n self.weights = [int(i) for i in row[1:]]\n self.neg_min = [int(i) for i in next(reader, None)[1:]]\n self.pos_max = [int(i) for i in next(reader, None)[1:]]\n break",
"def from_csv(self, csv_file):\n try:\n with open(csv_file, 'r') as csvf:\n csv_reader = csv.reader(csvf)\n for item in csv_reader:\n if len(item) != 0:\n self.add_score(Score(\n str(item[0]),\n int(item[1]),\n str(item[2])\n ))\n except:\n print(\"Error: datacsv.csv does not exist\")",
"def load_labels(csv_file):\n with open(csv_file, \"r\") as f:\n labels = f.read()\n labels = labels.split(\",\")\n labels = [int(label) for label in labels]\n return labels",
"def read(self, csv_file):\n f = csv.reader(open(csv_file))\n for row in f:\n self.raw_data.append(row)",
"def load_dollar_bar_sample() -> pd.DataFrame:\r\n\r\n devadarsh.track('load_dollar_bar_sample')\r\n\r\n project_path = os.path.dirname(__file__)\r\n bars_df = pd.read_csv(os.path.join(project_path, 'data/dollar_bar_sample.csv'), index_col=0, parse_dates=[0])\r\n\r\n return bars_df",
"def load_csv(self, name: str, location: str, index: str = None): \n self.basemap[name] = pd.read_csv(location,sep=';')\n if index is not None:\n self.basemap[name].set_index(index, inplace=True)",
"def read_from_csv(self, input_file, delimiter):\n\n # read CSV as UTF-8 encoded file (see also http://stackoverflow.com/a/844443)\n with codecs.open(input_file, encoding='utf8') as fp:\n logger.info(\"Reading venues from \" + input_file + \"...\")\n\n reader = csv.reader(fp, delimiter=delimiter)\n\n # read header\n header = next(reader, None)\n if not header:\n raise IllegalArgumentError(\"Missing header in CSV file.\")\n\n venue_index = header.index(\"venue\")\n year_index = header.index(\"year\")\n identifier_index = header.index(\"identifier\")\n\n # read CSV file\n for row in reader:\n if row:\n self.venues.append(\n Venue(row[venue_index], row[year_index], row[identifier_index])\n )\n else:\n raise IllegalArgumentError(\"Wrong CSV format.\")\n\n self.filename = os.path.basename(input_file)\n logger.info(str(len(self.venues)) + \" venues have been imported.\")",
"def import_books(csv_path):\n f = open(csv_path)\n reader = csv.reader(f)\n header_line = True\n for isbn,title,author,year in reader:\n if header_line:\n header_line = False\n continue\n print(f\"Adding a book titled \\\"{title}\\\" by {author}.\")\n book = Book(isbn=isbn, title=title, author=author, year=year)\n db.session.add(book)\n db.session.commit()",
"def load_unit_info(filename):\n \n hf.verify_directory(filename+ '.csv')\n unitinfo = []\n \n # open the csv\n with open(filename+'.csv') as csvfile:\n csvreader = csv.reader(csvfile)\n for row in csvreader:\n unitinfo.append(map(int, row)) # unit info is list of [tetrode, unit] pairs\n \n return unitinfo",
"def readTruBlu(csvfile):\n sep = ','\n header = 0\n skiprows = 16 #this is somewhat weak, number of lines could change over time??\n\t# Definitely weak. Probably an automated read to csv header would be better\n index_col = 3\n #names = ['ID','Name','Address','Time of Acquisition','Elapsed(Sec)','Level(PSI)','Temperature (\\'C)','Battery Voltage(Volt)','Supply Voltage(Volt)','Scan No','blank']\n parse_dates = True\n #skip_footer = 1\n #print(csvfile)\n #df = read_csv(csvfile, sep=sep, names=names, skiprows=skiprows, index_col=index_col, parse_dates=parse_dates)\n \n try:\n if os.stat(csvfile).st_size > 0:\n df = read_csv(csvfile, sep=sep, skiprows=skiprows, header=header, index_col=index_col, parse_dates=parse_dates)\n return df\n else:\n print((csvfile + \" is empty\"))\n except OSError:\n print((csvfile + \" does not exist\"))",
"def load_and_plot_data(filename):\n df = pd.load_csv(filename, index_col=0)\n df.hist()\n return df",
"def load():\n with open('../../data/Figures_Dict.csv', mode='r') as infile:\n reader = csv.reader(infile)\n FiguresDictionary.figures_dict = dict((rows[0], '# ') for rows in reader)\n FiguresDictionary.figures_re = re.compile(r'\\b(%s)\\b' % '|'.join(FiguresDictionary.figures_dict.keys()))\n FiguresDictionary.numbers_re = re.compile(r'\\w*\\d\\w*')\n FiguresDictionary.replace_text = '# '",
"def load_from_file_csv(cls):\n\n file = \"{}.csv\".format(cls.__name__)\n\n list_objs = []\n Headers = []\n with open(file, 'r', newline='') as file:\n from_csv = csv.reader(file)\n count = 0\n for row in from_csv:\n if count == 0:\n Headers = row\n count += 1\n else:\n dict = {}\n for i in range(len(row)):\n dict[Headers[i]] = int(row[i])\n list_objs.append(cls.create(**dict))\n return list_objs",
"def from_csv(self, csv_path, has_headers = True, delim = ',', spec_format = False):\n\n self.infilepath = os.path.abspath(csv_path)\n\n if not self.discretized:\n self.row_data, self.headers = read_csv_rows(csv_path, has_headers, delim, spec_format)\n self.build_col_data()\n\n else:\n raise Warning(\"You may not import new data into \\\n an already discretized time series\")\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Builds wait in line for more slaves. This method executes in the background on another thread and watches for idle slaves, then gives them out to the waiting builds.
|
def _slave_allocation_loop(self):
while True:
# This is a blocking call that will block until there is a prepared build.
build_scheduler = self._scheduler_pool.next_prepared_build_scheduler()
while build_scheduler.needs_more_slaves():
claimed_slave = self._idle_slaves.get()
# Remove dead and shutdown slaves from the idle queue
if claimed_slave.is_shutdown() or not claimed_slave.is_alive(use_cached=False):
continue
# The build may have completed while we were waiting for an idle slave, so check one more time.
if build_scheduler.needs_more_slaves():
# Potential race condition here! If the build completes after the if statement is checked,
# a slave will be allocated needlessly (and run slave.setup(), which can be significant work).
self._logger.info('Allocating {} to build {}.', claimed_slave, build_scheduler.build_id)
build_scheduler.allocate_slave(claimed_slave)
else:
self.add_idle_slave(claimed_slave)
self._logger.info('Done allocating slaves for build {}.', build_scheduler.build_id)
|
[
"def wait(self, num_slaves = 0):\n\n\t\t# wait for one to finish\n\t\twhile len(self.slaves) > num_slaves:\n\n\t\t\ttime.sleep(.1)\n\n\t\t\tfor pid in self.slaves.keys():\n\n\t\t\t\tself.slaves[pid].update()\n\t\t\t\tif self.incremental_output:\n\t\t\t\t\tself.slaves[pid].print_new_output()\n\t\t\t\t\tsys.stdout.flush()\n\n\t\t\t\tif self.slaves[pid].status != None:\n\t\t\t\t\tif not self.incremental_output and (not self.only_output_bad or self.slaves[pid].status):\n\t\t\t\t\t\tself.slaves[pid].print_all_output()\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tprint self.slaves[pid].name + \" finished with status \" + str(self.slaves[pid].status) + \" duration \" + str(datetime.timedelta(seconds = self.slaves[pid].time_end - self.slaves[pid].time_start))\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tif self.slaves[pid].status != 0:\n\t\t\t\t\t\tself.bad.append(self.slaves[pid])\n\t\t\t\t\tdel self.slaves[pid]\n\t\t\t\t\tif len(self.slaves) > num_slaves:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tcontinue\n\n\t\t\t\tif self.slaves[pid].killed:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif self.time_limit:\n\t\t\t\t\tif time.time() - self.slaves[pid].time_start > self.time_limit:\n\t\t\t\t\t\tprint self.slaves[pid].name + \" exceeded time limit\"\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\tself.slaves[pid].kill()\n\t\t\t\t\t\tcontinue\n\n\t\t\t\tif self.slaves[pid].new_output:\n\t\t\t\t\tif self.errorre.search(self.slaves[pid].new_output):\n\t\t\t\t\t\tprint self.slaves[pid].name + \" output an error\"\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\tself.slaves[pid].kill()\n\t\t\t\t\t\tcontinue",
"def poll():\n global master_thread, slave_threads\n master_thread.start()\n for i in slave_threads:\n slave_threads[i].start()",
"def wait(self, num_slaves, timeout=0):\n command = [\n b'WAIT',\n ascii(num_slaves).encode('ascii'),\n ascii(timeout).encode('ascii')\n ]\n return self._execute(command)",
"def wait(self):\n \n # Wait for some time\n time.sleep(10)\n\n count = 0\n while self.work_pending():\n time.sleep(5)\n \n # Every 2 minutes raise heartbeat event\n count += 5\n if count == 120:\n self.eventr.publish(self, 'heartbeat')\n count = 0 \n\n # Push empty values\n [w.stop() for w in self.workers]\n # [w.join() for w in self.workers]\n \n self.eventr.publish(self, 'crawl_ended') \n log.info('Crawl done.')\n\n # Wait a bit\n time.sleep(2)\n \n # print self.url_graph\n self.stats.publish_stats()\n log.info(\"Log file for this crawl can be found at\", os.path.abspath(self.task_logfile))",
"def _wait_workers(self):\n self.client = get_client(self.master_address)\n logging.debug(\"client scheduler info: {}\".format(self.client.scheduler_info()))\n if int(self.world_size) <= 1:\n self.worker_portion = 1\n worker_count_min = int(self.world_size * self.worker_portion)\n\n for _ in range(100):\n time.sleep(1)\n n_workers = len(self.client.scheduler_info()[\"workers\"])\n logging.info(\"Accessed Workers: {}\".format(n_workers))\n if n_workers >= worker_count_min:\n workers = self.client.scheduler_info()[\"workers\"]\n workers_list = []\n workers_port = {}\n for k, _ in workers.items():\n workers_list.append(k)\n (ip, port) = k.replace(\"//\", \"\").split(\":\")[1:]\n if ip in workers_port:\n workers_port[ip].append(port)\n else:\n workers_port[ip] = [port]\n os.environ[\"vega_workers_list\"] = json.dumps(workers_port)\n logging.info(\"worker list: {}\".format(workers_list))\n slave_ips = list(set([item[6:].split(\":\")[0] for item in workers_list]))\n slave_ips.remove(General.cluster.master_ip)\n General.cluster.salves = slave_ips\n return 1\n return 0",
"def wait(self) -> None:\n L.get_logger().log(f\"SlurmBaseRunner::run: Waiting for added commands to finish.\",\n level=\"debug\")\n self.batch_interface.wait()",
"def wait(self):\n logging.info(\"waiting for {} jobs to complete\".format(len(self.submissions)))\n while not self.shutdown:\n time.sleep(1)",
"def any_builds_running(self):",
"def deploy_slaves():\n # Time for our slaves\n _, master_ip = get_master_dns_ip()\n if master_ip:\n # Test and see if we can find existing slaves\n slave_list = get_slave_dns_list()\n if NO_OF_SLAVES - len(slave_list) > 0:\n print 'Found {0} existing slaves creating {1} new slaves'.format(len(slave_list),\n NO_OF_SLAVES - len(slave_list))\n create_slaves(NO_OF_SLAVES - len(slave_list))\n host_list = [slave.public_dns_name for slave in SLAVE_INSTANCES.itervalues()] + slave_list\n else:\n print 'No more slaves needed'\n host_list = slave_list\n\n execute(run_slave_tasks, hosts=host_list)\n else:\n print 'Setup a Master first'",
"def on_wait(self, wait):\n # FIXME: Use 'gen_loop_times'.\n start = label(self.lineno, 'wait_%i' % wait.time)\n self.insert(\"lda %s\" % absarg(wait.time))\n self.insert(\"get d0\")\n counter = mem_counter(self.lineno)\n self.insert(\"store d0 %s\" % counter)\n self.insert(\"%s :\" % start)\n\n # Inner loop.\n inner = label(self.lineno, 'inner_wait')\n self.insert(\"lda %s\" % absarg(200))\n self.insert(\"%s :\" % inner)\n [self.insert(\"nop\") for _ in range(14)]\n self.insert(\"sub %s\" % absarg(1))\n inner_end = label(self.lineno + 2, 'inner_end')\n self.insert(\"jmpz %s\" % inner_end)\n self.insert(\"jmp %s\" % inner)\n self.insert(\"%s :\" % label(self.lineno, 'inner_end'))\n\n # Outer check.\n self.insert(\"load d0 %s\" % counter)\n self.insert(\"put d0\")\n self.insert(\"sub %s\" % absarg(1))\n self.insert(\"get d0\")\n self.insert(\"store d0 %s\" % counter)\n outer_end = label(self.lineno + 2, 'wait_end')\n self.insert(\"jmpz %s\" % outer_end)\n self.insert(\"jmp %s\" % start)\n self.insert(\"%s :\" % outer_end)",
"def wait_everybody():\n log.info('Waiting for SSH on all nodes')\n for i in seeds + nodes:\n i.wait_ready()",
"def put_robots_to_work(robots, num_robots):\r\n print('%s New Robots were created' % num_robots)\r\n print('Putting the robots to work. This may take some time depending on how many robots have been created.\\n')\r\n robot_threads = []\r\n for robot in robots:\r\n try:\r\n r_thread = threading.Thread(target=robot.complete_chores)\r\n robot_threads.append(r_thread)\r\n r_thread.start()\r\n except:\r\n print('Error starting thread.')\r\n\r\n for thread in robot_threads:\r\n thread.join()",
"def _nextSlave(self, builder, slaves):\n request = builder.current_builder_request\n target_name = request.properties.getProperty('target-slave')\n\n if target_name:\n # See if we have the requested slave.\n for slave_builder in slaves:\n if slave_builder.slave.slavename == target_name:\n return slave_builder\n\n for slave_builder in slaves:\n if slave_builder.slave.canStartBuild():\n return slave_builder\n\n return random.choice(slaves)",
"def start_ready_jobs(self):\n for case in self.cases:\n for job in case['jobs']:\n if job.status != JobStatus.VALID:\n continue\n if len(self.running_jobs) >= self.max_running_jobs:\n msg = 'running {} of {} jobs, waiting for queue to shrink'.format(\n len(self.running_jobs), self.max_running_jobs)\n if self.debug:\n print_line(msg)\n return\n deps_ready = True\n for depjobid in job.depends_on:\n depjob = self.get_job_by_id(depjobid)\n if depjob.status != JobStatus.COMPLETED:\n deps_ready = False\n break\n\n # if 'ilamb' in job.msg_prefix():\n # import ipdb; ipdb.set_trace()\n job.check_data_ready(self.filemanager)\n if deps_ready and job.data_ready:\n\n # if the job was finished by a previous run of the processflow\n\n if job.postvalidate(self.config):\n job.status = JobStatus.COMPLETED\n self._job_complete += 1\n job.handle_completion(\n filemanager=self.filemanager,\n config=self.config)\n self.report_completed_job()\n continue\n\n # set to pending before data setup so we dont double submit\n job.status = JobStatus.PENDING\n\n # setup the data needed for the job\n job.setup_data(\n config=self.config,\n filemanager=self.filemanager,\n case=job.case)\n # if this job needs data from another case, set that up too\n if isinstance(job, Diag):\n if job.comparison != 'obs':\n job.setup_data(\n config=self.config,\n filemanager=self.filemanager,\n case=job.comparison)\n\n # get the instances of jobs this job is dependent on\n dep_jobs = [self.get_job_by_id(\n job_id) for job_id in job._depends_on]\n run_id = job.execute(\n config=self.config,\n dryrun=self.dryrun,\n depends_jobs=dep_jobs)\n self.running_jobs.append({\n 'manager_id': run_id,\n 'job_id': job.id\n })\n if run_id == 0:\n job.status = JobStatus.COMPLETED\n self.monitor_running_jobs()",
"def setup_node(self):\n config = self._settings\n\n while not self.is_master_ready(config.jar_download_url):\n self._logger.info('Master not ready yet, sleeping for 10sec!')\n time.sleep(10)\n\n self._logger.info('Master %s is now ready.' % config.master_url)\n signal.signal(signal.SIGINT, self.signal_handler)\n signal.signal(signal.SIGTERM, self.signal_handler)\n\n if (self.download_jar_file(config.slave_jar_file)):\n self._logger.info(\n 'Downloaded slave jar file from %s to %s.' %\n (config.jar_download_url, config.slave_jar_file)\n )\n else:\n self._logger.info(\n 'Could not download slave jar file from %s to %s.' %\n (config.jar_download_url, config.slave_jar_file)\n )\n\n os.chdir(config.slave_working_dir)\n self._logger.info('Current cwd is %s.' % os.getcwd())\n\n if config.clean_working_dir:\n self.clean_node(config.slave_working_dir)\n self._logger.info('Cleaned up working directory.')\n\n self.create_node(config.slave_working_dir)\n self._logger.info(\n 'Created temporary Jenkins slave %s.' %\n config.slave_name\n )\n self._process = self.run_process()\n self._logger.info(\n 'Started Jenkins slave with name \"%s\" and labels [%s].' %\n (config.slave_name, config.slave_labels)\n )\n self._process.wait()\n self._logger.info('Jenkins slave stopped.')\n if config.slave_name:\n self.delete_node(config.slave_name)\n self._logger.info('Removed temporary Jenkins slave.')",
"def wait(build_id):\n api = Heroku()\n\n while True:\n if api.check_build_status(build_id):\n break\n sleep(3)",
"def wait_for_slave_thread(server, timeout=None, wait_for_running=True,\n threads=None):\n while (timeout is None or timeout > 0) and \\\n not _check_condition(server, threads, wait_for_running):\n time.sleep(1)\n timeout = timeout - 1 if timeout is not None else None\n if not _check_condition(server, threads, wait_for_running):\n raise _errors.TimeoutError(\n \"Error waiting for slave's thread(s) to either start or stop.\"\n )",
"def set_up_channels(self):\n\n yield self.reg.cd(['Servers','software_laser_lock'])\n lasers_to_lock = yield self.reg.get('lasers')\n for chan in lasers_to_lock:\n self.lasers[chan] = yield self.reg.get(chan)\n self.lasers[chan] = list(self.lasers[chan])\n \n #self.lc.start(self.timer)\n self.loop_server()",
"def run_on_local_slaves(cmd):\n slave_host = slave_hosts_cfg.get_slave_host_config(socket.gethostname())\n slaves = slave_host.slaves\n results = {}\n procs = []\n for (slave, _) in slaves:\n try:\n proc = _launch_cmd(cmd, cwd=os.path.join(buildbot_path, slave,\n 'buildbot'))\n procs.append((slave, proc))\n except OSError as e:\n results[slave] = SingleCommandResults.make(stderr=str(e))\n\n for slavename, proc in procs:\n results[slavename] = _get_result(proc)\n\n return MultiCommandResults(results)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a slave to the idle queue.
|
def add_idle_slave(self, slave):
try:
slave.mark_as_idle()
self._idle_slaves.put(slave)
except SlaveMarkedForShutdownError:
pass
|
[
"def add(self, slave):\n\n\t\tself.slaves[slave.pid] = slave",
"def add_to_master(self):\n ordered_queue = self.queueorderer.get_queue()\n if (len(ordered_queue) == 0):\n pass\n else:\n for line in ordered_queue:\n self.master.write(line)",
"def multiroom_add(self, slave_ip: str) -> str:\n self._logger.info(\"Slaving '\"+str(slave_ip)+\"' to this device...\")\n info = self._device_info()\n secure = info.get('securemode')\n args = [info.get('ssid'), info.get('WifiChannel'), info.get('auth') if secure else \"OPEN\",\n info.get('encry') if secure else \"\", info.get('psk') if secure else \"\"]\n self._logger.debug(\"Opening client connection to slave device '\"+str(slave_ip)+\"'...\")\n slave = linkplayctl.Client(slave_ip)\n return slave.multiroom_master(*args)",
"def test_add_out_of_sync_slave_node(self):\n mock_slave = Mock()\n mock_slave.is_slave = True\n mock_slave.master_name = 'a:1'\n mock_slave.cluster_id = 0\n mock_slave.timestamp = 700\n self._cluster.add_node(mock_slave)\n assert_items_equal(self._cluster.slaves, [])\n assert_items_equal(self._cluster.lost, [mock_slave])",
"def test_add_up_to_date_slave_node(self):\n mock_slave = Mock()\n mock_slave.is_slave = True\n mock_slave.master_name = 'a:1'\n mock_slave.cluster_id = 0\n mock_slave.timestamp = 1000\n self._cluster.add_node(mock_slave)\n assert_items_equal(self._cluster.slaves, [mock_slave])\n assert_items_equal(self._cluster.lost, [])",
"def add_master_node(self, host_ip):\n\t\tself.swarm_manager.add_master_node(host_ip)",
"def add_slaves(no_of_slaves=''):\n _, master_ip = get_master_dns_ip()\n if master_ip and no_of_slaves:\n # Test and see if we can find existing slaves\n create_slaves(int(no_of_slaves))\n host_list = [slave.public_dns_name for slave in SLAVE_INSTANCES.itervalues()]\n execute(run_slave_tasks, hosts=host_list)\n else:\n print 'Setup a Master first'",
"def test_add_slave_node_with_wrong_master(self):\n mock_slave = Mock()\n mock_slave.is_slave = True\n mock_slave.master_name = 'b:1'\n mock_slave.cluster_id = 0\n mock_slave.timestamp = 1000\n self._cluster.add_node(mock_slave)\n assert_items_equal(self._cluster.slaves, [])\n assert_items_equal(self._cluster.lost, [mock_slave])",
"def add_to_queue(self, sid, data):\n self.activation_queue.put((sid, data))",
"def _add_queue(self, name, register_event):\n q = queue.Queue()\n register_event(q.put)\n self._queues[name] = q",
"def add_slave_group_id(self, slave_group_id, persister=None):\n persister.exec_stmt(Group.INSERT_MASTER_SLAVE_GROUP_MAPPING,\n {\"params\": (self.__group_id, slave_group_id)})",
"def add_sensor(self, sensor_thread):\n i = len(self.sensors)\n sensor_thread.master = self\n self.sensors.append(sensor_thread)\n self.sensors[i].daemon = True",
"def addque(self, qkey, queue, update=False):\n if update or (qkey not in self.kqmap):\n self.kqmap[qkey] = queue",
"def add_bot(self, bot):\n self.bots.append(bot)",
"def _slave_allocation_loop(self):\n while True:\n # This is a blocking call that will block until there is a prepared build.\n build_scheduler = self._scheduler_pool.next_prepared_build_scheduler()\n\n while build_scheduler.needs_more_slaves():\n claimed_slave = self._idle_slaves.get()\n\n # Remove dead and shutdown slaves from the idle queue\n if claimed_slave.is_shutdown() or not claimed_slave.is_alive(use_cached=False):\n continue\n\n # The build may have completed while we were waiting for an idle slave, so check one more time.\n if build_scheduler.needs_more_slaves():\n # Potential race condition here! If the build completes after the if statement is checked,\n # a slave will be allocated needlessly (and run slave.setup(), which can be significant work).\n self._logger.info('Allocating {} to build {}.', claimed_slave, build_scheduler.build_id)\n build_scheduler.allocate_slave(claimed_slave)\n else:\n self.add_idle_slave(claimed_slave)\n\n self._logger.info('Done allocating slaves for build {}.', build_scheduler.build_id)",
"def _additem(self):\n\n self.queue.put(self._genitem())",
"def queue_command(self, cmd):\r\n \r\n self.queue.append(cmd)\r\n with self.condition:\r\n self.status = READY\r\n self.condition.notify()",
"def add_to_queue(self, video_id):\n self.start_session_if_none()\n self._session.add_to_queue(video_id)",
"def push(self, actor):\n busy_actors = []\n if self._future_to_actor.values():\n _, busy_actors = zip(*self._future_to_actor.values())\n if actor in self._idle_actors or actor in busy_actors:\n raise ValueError(\"Actor already belongs to current ActorPool\")\n else:\n self._return_actor(actor)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set up of the Tuya switch.
|
def setup_platform(hass, config, add_devices, discovery_info=None):
from . import pytuya
devices = config.get(CONF_SWITCHES)
switches = []
pytuyadevice = pytuya.OutletDevice(config.get(CONF_DEVICE_ID), config.get(CONF_HOST), config.get(CONF_LOCAL_KEY))
pytuyadevice.set_version(float(config.get(CONF_PROTOCOL_VERSION)))
if len(devices) > 0:
for object_id, device_config in devices.items():
outlet_device = TuyaCache(pytuyadevice)
switches.append(
TuyaDevice(
outlet_device,
device_config.get(CONF_NAME),
device_config.get(CONF_FRIENDLY_NAME, object_id),
device_config.get(CONF_ICON),
device_config.get(CONF_ID),
device_config.get(CONF_CURRENT),
device_config.get(CONF_CURRENT_CONSUMPTION),
device_config.get(CONF_VOLTAGE)
)
)
print('Setup localtuya subswitch [{}] with device ID [{}] '.format(device_config.get(CONF_FRIENDLY_NAME, object_id), device_config.get(CONF_ID)))
_LOGGER.info("Setup localtuya subswitch %s with device ID %s ", device_config.get(CONF_FRIENDLY_NAME, object_id), config.get(CONF_ID) )
else:
outlet_device = TuyaCache(pytuyadevice)
switches.append(
TuyaDevice(
outlet_device,
config.get(CONF_NAME),
config.get(CONF_FRIENDLY_NAME),
config.get(CONF_ICON),
config.get(CONF_ID),
config.get(CONF_CURRENT),
config.get(CONF_CURRENT_CONSUMPTION),
config.get(CONF_VOLTAGE)
)
)
print('Setup localtuya switch [{}] with device ID [{}] '.format(config.get(CONF_FRIENDLY_NAME), config.get(CONF_ID)))
_LOGGER.info("Setup localtuya switch %s with device ID %s ", config.get(CONF_FRIENDLY_NAME), config.get(CONF_ID) )
add_devices(switches)
|
[
"def init_tetanus_lib(self) -> None:\n\n # Can't set instance attributes in fixture with scope='class', only class attributes.\n cls = type(self)\n cls._tetanus_lib = Tetanus() # pylint: disable=protected-access\n cls._echo_port = 1337 # pylint: disable=protected-access",
"def initControllerSetup(self):\r\n # Set the front motors to be the followers of the rear motors\r\n self.frontLeft.set(WPI_TalonSRX.ControlMode.Follower, DRIVETRAIN_REAR_LEFT_MOTOR)\r\n self.frontRight.set(WPI_TalonSRX.ControlMode.Follower, DRIVETRAIN_REAR_RIGHT_MOTOR)\r\n\r\n # Set the neutral output mode to Brake/Coast/\r\n self.leftTalon.setNeutralMode(WPI_TalonSRX.NeutralMode.Brake)\r\n self.rightTalon.setNeutralMode(WPI_TalonSRX.NeutralMode.Brake)\r\n\r\n # Diable the motor-safety\r\n self.diffDrive.setSafetyEnabled(False)\r\n\r\n # Set the feedback sensor phases\r\n self.leftTalon.setSensorPhase(True)\r\n self.rightTalon.setSensorPhase(True)\r\n\r\n # Setup the Pigeon IMU and Talon Mag Encoders\r\n self.initPigeonIMU()\r\n self.initQuadratureEncoder()\r\n\r\n # Set the voltage compensation to 12V and disable it for now\r\n self.leftTalon.configVoltageCompSaturation(12.0, 10)\r\n self.leftTalon.enableVoltageCompensation(False)\r\n self.rightTalon.configVoltageCompSaturation(12.0, 10)\r\n self.rightTalon.enableVoltageCompensation(False)\r\n\r\n # PIDF slot index 0 is for autonomous wheel postion\r\n self.leftTalon.config_kP(0, 0.8, 10)\r\n self.leftTalon.config_kI(0, 0.0, 10)\r\n self.leftTalon.config_kD(0, 0.0, 10)\r\n self.leftTalon.config_kF(0, 1023 / 12, 10) # 10-bit ADC units / 12 V\r\n self.rightTalon.config_kP(0, 0.8, 10)\r\n self.rightTalon.config_kI(0, 0.0, 10)\r\n self.rightTalon.config_kD(0, 0.0, 10)\r\n self.rightTalon.config_kF(0, 1023 / 12, 10) # 10-bit ADC units / 12 V\r\n\r\n # PIDF slot index 1 is for autonomous heading postion\r\n self.leftTalon.config_kP(1, 1.0, 10)\r\n self.leftTalon.config_kI(1, 0, 10)\r\n self.leftTalon.config_kD(1, 0, 10)\r\n self.leftTalon.config_kF(1, 0, 10)\r\n self.rightTalon.config_kP(1, 1.0, 10)\r\n self.rightTalon.config_kI(1, 0, 10)\r\n self.rightTalon.config_kD(1, 0, 10)\r\n self.rightTalon.config_kF(1, 0, 10)",
"def __init__(self):\n self.wlbt = WalabotAPI\n self.wlbt.Init()\n self.wlbt.SetSettingsFolder()",
"def __init__(self):\n\n self.config = {\n 'debug': False,\n 'enable': False,\n 'secret': '',\n 'timeout': 120,\n 'delay': 3,\n 'drift_backward': 1,\n 'drift_forward': 1,\n }\n self.config_path = os.path.join(os.environ['HOME'], '.ssh', 'otp')\n self.load()",
"def setUp(self):\n self.CLI = TestBTCPBNBcmdCreate.cli\n self.obj = TestBTCPBNBcmdCreate.obj",
"def setup(self):\n # Create an underlying uwsgi app to handle the setup and execution.\n if \"nginx\" in self.config:\n if self.config[\"nginx\"].get(\"enabled\", False) is True:\n self.nginx = nginx(self.config[\"nginx\"])\n self.nginx.run()\n\n # Create an underlying uwsgi app to handle the setup and execution.\n self = uwsgi.createObject(self)\n\n # We call this last here because we are going to update variables if we use ``uwsgi`` for execution.\n super().setup()",
"def setUp(self):\n self.CLI = BTCPBNBCommand()",
"def setup(self, bt):\n self.beamtransfer = io.get_beamtransfer(bt)",
"def setup_class(self):\n self.dut = self.android_devices[0]\n required_params = dir(VPN_PARAMS)\n required_params = [x for x in required_params if not x.startswith('__')]\n self.unpack_userparams(required_params)\n wifi_test_utils.wifi_test_device_init(self.dut)\n wifi_test_utils.wifi_connect(self.dut, self.wifi_network)\n time.sleep(3)",
"def setUp(self):\n # Run general bot test setup\n super(TestAngle, self).setUp()\n self.turret_conf = lib.get_config()['turret']\n\n # Build turret in testing mode\n self.turret = t_mod.Turret()",
"def setup(self):\n # Listen for all updates\n self._init_webhooks()",
"def mainSetup():\n setupGlobals()\n setupCallbacks()",
"async def hue_setup(self, ctx, ip):\n await self.config.ip.set(ip)\n self.bridge = Bridge(await self.config.ip())\n self.lights = self.bridge.lights",
"def setup(self, abstraction) :\n pass",
"def sixteen_ch_setup(self):\n\n print \"Setting up for generic 16 channel readout...\\n\"\n # initialize edt interface\n InitFile = eolib.getCfgVal(self.CfgFile,\"INIT_FILE\")\n if not self.CheckIfFileExists(InitFile):\n print \"Init File not found. Exiting sixteen channel setup\"\n return\n #self.runcmd([\"initrcx0\"]) # This script just does the following:\n self.runcmd([self.EDTdir+\"/initcam\", \"-u\", \"0\", \"-c\", \"0\", \"-f\", InitFile]) \n\n self.runcmd([self.edtsaodir+\"/crst\"]) # Camera reset\n # Turn off the greyscale generator\n print \"Turning greyscale generator off\\n\"\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"30400000\"]) # ad board #1 gray scale off\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"31400000\"]) # ad board #2 gray scale off\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"32400000\"]) # ad board #3 gray scale off\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"33400000\"]) # ad board #4 gray scale off\n\n # Set the system gain to high\n # Note that this gets over-ridden in ccd_setup.\n self.gain(\"HIGH\")\n\n # Set unidirectional mode\n print \"Setting unidirectional CCD serial shift mode\\n\"\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"43000001\"]) # uni on\n\n # Set split mode on. \"Why on?\" you ask. Beats me.\n print \"Setting CCD serial register shifts to split mode\\n\"\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"41000001\"]) # split on \n\n self.ccd_channels()\n\n print \"Setting default ADC offsets\\n\"\n\n self.ccd_offsets()\n self.Check_Communications()\n print \"16ch_setup Done.\\n\"\n self.master.update()\n return",
"def setup_twitter(self):\n # Setup Twitter connection.\n #self.logprint(\"consumer key/secret:\", self.cfg.get('twitter_consumer_key'), self.cfg.get('twitter_consumer_secret'))\n #self.logprint(\"ouath token/secret:\", self.cfg.get('twitter_oauth_token'), self.cfg.get('twitter_oauth_token_secret'))\n try:\n self.auth = tweepy.OAuthHandler(self.cfg.get('twitter_consumer_key'), self.cfg.get('twitter_consumer_secret'))\n self.auth.set_access_token(self.cfg.get('twitter_oauth_token'), self.cfg.get('twitter_oauth_token_secret'))\n streamtwitter = self.cfg.get_bool('twitter_stream')\n #username = self.cfg.get('twitter_username')\n #password = self.cfg.get('twitter_password')\n except KeyError, ke:\n print \"Couldn't find twitter authentication information in config file:\", ke\n sys.exit(1)\n self.twit = tweepy.API(self.auth)\n\n # Listen to Twitter stream.\n try:\n if streamtwitter:\n self.stream_twitter()\n else:\n self.twitter_loop()\n except KeyboardInterrupt:\n print \"Quitting...\"\n sys.exit(0)",
"def OnosEnvSetup(self, handle):\n self.Gensshkey(handle)\n self.home = self.GetEnvValue(handle, 'HOME')\n self.AddKnownHost(handle, self.OC1, \"karaf\", \"karaf\")\n self.AddKnownHost(handle, self.OC2, \"karaf\", \"karaf\")\n self.AddKnownHost(handle, self.OC3, \"karaf\", \"karaf\")\n self.DownLoadCode(handle,\n 'https://github.com/wuwenbin2/OnosSystemTest.git')\n # self.DownLoadCode(handle, 'https://gerrit.onosproject.org/onos')\n if self.masterusername == 'root':\n filepath = '/root/'\n else:\n filepath = '/home/' + self.masterusername + '/'\n self.OnosRootPathChange(filepath)\n self.CopyOnostoTestbin()\n self.ChangeOnosName(self.agentusername, self.agentpassword)\n self.InstallDefaultSoftware(handle)\n self.SetOnosEnvVar(handle, self.masterpassword, self.agentpassword)",
"async def async_setup(self):\n hass = self.hass\n\n try:\n self.api = await get_controller(\n self.hass, **self.config_entry.data[CONF_CONTROLLER])\n await self.api.initialize()\n\n except CannotConnect:\n raise ConfigEntryNotReady\n\n except Exception: # pylint: disable=broad-except\n LOGGER.error(\n 'Unknown error connecting with UniFi controller.')\n return False\n\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(\n self.config_entry, 'switch'))\n\n return True",
"def tetanus(self, request): # type: ignore\n\n tetanus_version = request.param\n # Install Tetanus at the start of each test.\n self._tetanus_lib.install(self._giraffe, tetanus_version, self._echo_port)\n yield\n # Uninstall Tetanus at the end of each test.\n self._tetanus_lib.uninstall(self._giraffe)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Turn Tuya switch on.
|
def turn_on(self, **kwargs):
self._device.set_status(True, self._switch_id)
|
[
"def turn_on(self, **kwargs):\n self.data.switch_on()",
"def turbo_on(self):\n command = 'SET:DEV:TURB' + self._turbo_channel + ':PUMP:SIG:STATE:ON\\r\\n'\n response = self.query_and_receive(command)\n\n if not response:\n raise RuntimeError(\"Enabling of turbo pump unsuccessful.\")",
"def uvSwitch(on=False):\n if type(on) == bool:\n lightswitch = 1 if on == False else 0\n sock = openSocket()\n message = \"ob[2]={};\".format(lightswitch)\n print message\n address = axisAddress(\"X\")\n print address\n sendCommand(message, address, sock)\n sock.close()\n else:\n raise ValueError(\"Invalid 'on' value recieved: {}. Should be 'True' (on) or 'False' (off)\".format(on))",
"def turn_on(self, label):\n self.change_relay_state(self.relay_labels[label], TruckerBoardCommands.ON)",
"def on(\n context: typer.Context,\n relay: str = typer.Argument(..., help='The label or index of the relay to turn on.'),\n):\n control(context, relay=relay, command='turn_on')",
"def turn_on(self, time_s):\n if not self.on:\n self.on = True\n self.ontime_s = time_s",
"async def _wled_turn_on(self) -> None:\n await self.wled.nightlight(on=True)",
"def switch_on_led_talking(rpi_settings, on):\n if rpi_settings:\n if rpi_settings.pin_led_talking:\n if on:\n RpiUtils.switch_pin_to_on(rpi_settings.pin_led_talking)\n else:\n RpiUtils.switch_pin_to_off(rpi_settings.pin_led_talking)",
"def turn_on(self) -> None:\n raise NotImplementedError(\"Device subclass needs to implement this.\")",
"async def _wled_turn_on(self) -> None:\n await self.wled.sync(send=True)",
"def joystickOnOff(self, on):\r\n if on:\r\n self._command(\"J\")\r\n else:\r\n self._command(\"H\")",
"def toggle_led():\n global led\n if led == True:\n led = False\n wiimote.led = 0\n else:\n led = True\n wiimote.led = cwiid.LED1_ON",
"def on(self):\n self.light.turnOn()",
"def toggle_relay():\n global relay\n if relay.value():\n set_relay(\"off\")\n else:\n set_relay(\"on\")\n print(\"Toggled relay\")",
"def turnOnHeat(self,frame,db):\r\n self.heat.actuators[0].turnOn()\r\n frame.heatStateDisplayLabel.config(text=\"On\")\r\n frame.update()\r\n db.commit()",
"async def turn_on(self, ctx, name=None):\n if not await self.get_bridge():\n await ctx.send(\"No IP has been set.\")\n return\n for light in self.lights:\n if name is None or light.name.lower() == name.lower():\n light.on = True",
"def toggle_wifi():\n config.set_wifi(not config.get_wifi())\n config.save_state()",
"def on(self):\n self.transite_light_state(on_off=1)",
"def turnOnSink(self,frame,db):\r\n self.sink.actuators[0].turnOn()\r\n frame.laundryRoomSinkStateDisplayLabel.config(text=\"On\")\r\n frame.update()\r\n db.commit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
called when the other bot returns false for recieve_move. This is sent to the bot who made the move, telling it to undo the last move it made. If the last move had several jumps, all of them are undone, so the board is in the same state it was in before the move was made.
|
def undo_last_move(self):
raise NotImplementedError()
|
[
"def redo_last_move(self):\n # If there's no undone moves, simply return without doing\n # anything. Otherwise, get the last undone move.\n try:\n last_move = self.undone_moves.pop()\n except IndexError:\n return\n if last_move.is_drop:\n # Drop the piece, if it was a drop\n self.to_add = last_move.piece\n self.drop_piece(last_move.end)\n else:\n # Otherwise, make the move from and to\n self.make_moves(last_move.start, last_move.end, clear_undone=False)\n # Again, explicitly set the turn, just in case\n self.board.current_player = last_move.player_color.other",
"def reverse_move(self, show=False):\n\n last_move = self.moves.pop()\n self.state[last_move[1]] = 0 # Removes last move from board\n self.turn = next(self.player_iterator) # TODO: Only works for 2 player games!\n self.check_if_game_over()",
"def revert_move(self):\n assert self.board[self.pointer] == 0\n\n previous_move = self.history[-1]\n self.history = self.history[:-1]\n\n # Reverse pointer to spot of previous move\n previous_pointer = (previous_move['row'], previous_move['col'])\n current_value = self.board[previous_pointer]\n\n if current_value < 9:\n self.board[previous_pointer] += 1\n self.history.append({\n 'row': previous_move['row'],\n 'col': previous_move['col'],\n 'val': self.board[previous_pointer],\n })\n else:\n self.pointer = previous_pointer\n self.board[self.pointer] = 0\n self.revert_move()",
"def undo(self) -> None:\n if not self.onitama_stack.empty():\n # The pop call here returns a board and a list of styles that we use\n # to revert to the previous state of the game\n board, styles = self.onitama_stack.pop()\n self._board.set_board(board)\n self._board.styles = styles\n # Switch to the previous player's turn\n self.whose_turn = self.other_player(self.whose_turn)",
"def undo_move(self) -> None:\n if self.move_stack_right:\n self.move_stack_right = self.move_stack_right[:-2]\n else:\n self.move_stack_left = self.move_stack_left[:-2]\n self.reset_terminal()",
"def make_move(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n\n if game.game_over:\n return game.to_form('Game already over!')\n guess = request.guess.lower()\n # Check for invalid inputs and raise an exception as necessary\n made_illegal_move = False\n if not guess.isalpha():\n msg = 'Please enter a letter or word.'\n made_illegal_move = True\n elif game.previous_guesses is not None and guess in game.previous_guesses.split(\",\") :\n msg = 'You have already guessed that. Choose again.'\n made_illegal_move = True\n elif len(guess) > len(game.target):\n msg = 'Your guess had too many letters in it.'\n made_illegal_move = True\n\n if made_illegal_move:\n game.history.append({'guess': guess, 'result': msg})\n game.put()\n raise endpoints.BadRequestException(msg)\n\n if guess == game.target:\n msg = 'You guessed correctly! You win!'\n game.current_word_state = game.target\n game.history.append({'guess': guess, 'result': msg})\n game.put()\n game.end_game(True)\n return game.to_form(msg)\n\n if guess in game.target:\n indices = find(game.target, guess)\n new_state = \"\"\n for idx, char in enumerate(game.current_word_state):\n if idx in indices:\n new_state += guess\n else:\n new_state += char\n game.current_word_state = new_state\n msg = \"You guessed correctly!\"\n if game.current_word_state == game.target:\n msg += ' You win!'\n game.history.append({'guess': guess, 'result': msg})\n game.put()\n game.end_game(True)\n return game.to_form(msg)\n else:\n msg = \"You guessed incorrectly.\"\n game.attempts_remaining -= 1\n\n if not game.previous_guesses or len(game.previous_guesses) == 0:\n game.previous_guesses = guess\n else:\n game.previous_guesses += \",\" + guess\n msg += \" Here's the current state of the word: %s\" % game.current_word_state\n if game.attempts_remaining < 1:\n msg += ' Game over!'\n game.history.append({'guess': guess, 'result': msg})\n game.put()\n game.end_game(False)\n return game.to_form(msg)\n else:\n game.history.append({'guess': guess, 'result': msg})\n game.put()\n return game.to_form(msg)",
"def opposite_move(move):\n if move not in opposite_move.move_map:\n return None\n return opposite_move.move_map[move]",
"def undo(self):\n oldstate = self.pop_state()\n if oldstate:\n self.world.mutate(oldstate)\n Publisher().sendMessage(\"broadcast.updated_world\")",
"def undo_move(self, original_from_pos, original_to_pos, captured_piece_id):\n # update the piece's position attribute and its position on the board\n board = self.get_board()\n piece_id = board.get_occupation(original_to_pos)\n current_player = self.get_current_player()\n piece = current_player.get_pieces()[piece_id]\n piece.set_position(original_from_pos)\n\n # if the general was moved, set the board's general_position\n color = current_player.get_color()\n if original_to_pos == board.get_general_position(color):\n board.set_general_position(color, original_from_pos)\n\n # update the moved piece's position on the board\n board.move_piece(original_to_pos, original_from_pos)\n\n # add a captured piece back\n opponent = self.get_opponent()\n if captured_piece_id is not None:\n\n # add it back to the board\n board.set_occupation(captured_piece_id, original_to_pos)\n\n # initialize the piece and add it back to the player\n opponent.add_piece(captured_piece_id, original_to_pos)\n\n # update the pieces and players based on the state of the board after\n # the move is undone\n opponent.update_pieces()\n current_player.update_pieces()\n self.update_generals()",
"def can_player_undo(self):\n return self.player_type == 'human' and self.game.sub_turn == 'move'",
"def unmove(self):\n self.insert(None, self.moves.pop())\n self.legal_moves = self.generate_legal_moves()\n self.x_turn = not self.x_turn",
"def test_undo_resets_game_to_previous(self):\n self.game.history = [' ', ' X ', ' OX ']\n self.game.turn = 'X'\n self.game.undo()\n self.assertEqual(\n self.game.history, [' ', ' X ']\n )\n self.assertEqual(self.game.turn, 'O')",
"def move(roundsAlive, repertoire, historyRounds, historyMoves, historyActs, historyPayoffs, historyDemes, currentDeme,\n canChooseModel, canPlayRefine, multipleDemes):\n\n return 0",
"def oneMoveRep(history, myBoard):\n if myBoard in history:\n return True\n return False",
"def opponentMove(self, move):\n\t\tpass",
"def make_a_move(self, game):\n raise NotImplementedError(\"not implemented in CheckersBot abstract class\")",
"def does_move_violate_ko(self, player, move):\n if not move.is_play:\n return False\n\n next_board = copy.deepcopy(self.board)\n next_board.place_stone(player, move.point)\n next_situation = (player.other, next_board.zobrist_hash())\n return next_situation in self.previous_states",
"def move_neutral(self, valid_moves: list[list], board: list[list], del_coords: [tuple]) -> list[\n list]:\n run = True\n clock = pygame.time.Clock()\n new_board = board\n while run:\n clock.tick(FPS)\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n position = pygame.mouse.get_pos()\n coords = self.calc_row_col(position)\n new_board = self.add_piece(del_coords, coords, board)\n\n if new_board in valid_moves:\n run = False\n else:\n new_board = board\n print('This is not a valid move.')\n\n return new_board",
"def update_move(self, game_data, move_index, move):\n current_data = self.get_data_at_move(game_data, move_index)\n current_data[\"last_move\"] = move\n\n # active piece\n active_piece = current_data[\"board\"][move[\"pos\"][\"to\"]]\n\n # last pawn move\n if active_piece.description == \"pawn\":\n active_piece.first_move = False\n current_data[\"last_pawn_move\"] = move_index[\"move_number\"]\n\n # castling rights\n elif active_piece.description == \"king\":\n current_data[\"castling\"][move_index[\"player_id\"]] = {0: False, 1: False}\n\n elif active_piece.description == \"rook\":\n if (self.dimensions[1] + 1 - active_piece.pos[1]) >= (self.dimensions[1]//2):\n current_data[\"castling\"][move_index[\"player_id\"]][0] = False\n else:\n current_data[\"castling\"][move_index[\"player_id\"]][1] = False\n\n\n # check / checkmate / stalemate\n for player_id in range(self.number_of_players): # for every player\n if self.is_in_check(game_data, move_index, player_id): # check\n current_data[\"check\"][player_id] = True\n else:\n current_data[\"check\"][player_id] = False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Iterates through query args and transforms any oneelement lists to single items.
|
def _flatten_query_args(args):
def _make_flat(item):
if not item:
return None
if not isinstance(item, list):
return item
# item -> list
if len(item) == 1:
return item[0] if item[0] else None # Empty string -> None
return [x if x else None for x in item]
Validator.is_instance(dict, args=args)
res = dict()
for key, val in args.items():
res[key] = _make_flat(val)
return res
|
[
"def process_some(self, items):\n process_one = self.process_one\n ret = process_one(self.null)\n for item in items:\n ret.extend(process_one(item))\n return ret",
"def _normalizeargs(sequence, output = None):\n if output is None:\n output = []\n\n cls = sequence.__class__\n if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:\n output.append(sequence)\n else:\n for v in sequence:\n _normalizeargs(v, output)\n\n return output",
"def list2args(args_list, delimiters=(\"{\", \"}\"), **kwargs):\n\n args = []\n for arg in args_list: # Make each element a one-elem list\n get_promotion = promoteSelection(arg, **kwargs)\n if get_promotion is not None:\n args += get_promotion\n else:\n args.append(parseElem(arg, **kwargs))\n\n str_args = \"\"\n if args and args_list != [None]: # Not empty and is not the list [None]\n for arg in args:\n str_args += delimiters[0] + arg + delimiters[1]\n\n return str_args",
"def getAllQueryItemValues(*args, **kwargs):\n \n pass",
"def convert_args(self):\n for elem in self.xml_tree_root.findall(\"arg\"):\n launch_action = self.convert_arg_elem(elem)\n self.launch_actions.append(launch_action)",
"def get_args(self, argset):\n args = []\n kwargs = {}\n for element in argset or []:\n if isinstance(element, dict):\n kwargs.update(element)\n else:\n args.append(element)\n return args, kwargs",
"def multiple(*args: Ex_Inv) -> MultipleResults[Ex_Inv]:\n return MultipleResults(args)",
"def _convert_args( argtypes ):\n\tif not argtypes:\n\t\treturn None\n\t\n\targs = [x.strip() for x in argtypes.split(\",\")]\n\t\n\ttmp = []\n\tfor a in args:\n\t\targ = a.split(\" \")\n\t\tif arg[-1][0] == \"*\":\n\t\t\targ[-1] = \"*\"\t\n\t\telse:\n\t\t\tdel arg[-1]\n\n\t\targ = \" \".join(arg)\t\t\n\t\ttmp.append( _convert_type( arg ) )\n\treturn tmp",
"def xs(name, parser_args, list_args):\n for args, kwargs in list_args:\n if len(set(args) & parser_args) > 0:\n yield args, kwargs\n\n else:\n if 'dest' in kwargs:\n if kwargs['dest'] == name:\n yield args, kwargs",
"def prepare_args_for_datatransfer_list(args: Dict[str, str]) -> Dict[str, str]:\n return GSuiteClient.remove_empty_entities({\n 'customerId': args.get('customer_id'),\n 'maxResults': GSuiteClient.validate_get_int(args.get('max_results'),\n MESSAGES['INTEGER_ERROR'].format('max_results')),\n 'newOwnerUserId': args.get('new_owner_user_id'),\n 'oldOwnerUserId': args.get('old_owner_user_id'),\n 'pageToken': args.get('page_token'),\n 'status': args.get('status'),\n })",
"def arg_to_iter(arg):\n if arg is None:\n return []\n elif (\n hasattr(arg, \"__iter__\")\n and not isinstance(arg, _ITERABLE_SINGLE_VALUES)\n and not is_item(arg)\n ):\n return arg\n else:\n return [arg]",
"def listify(arg):\n return [arg] if not hasattr(arg, '__iter__') else arg",
"def quoteArgs(self, args):\n if type(args[0]) is tuple or type(args[0]) is list:\n args = args[0]\n nargs = []\n for a in args:\n nargs.append(self.escape(a))\n return tuple(nargs)",
"def gaia_multi_query_run(args):\r\n\r\n return gaia_query(*args)",
"def listify(arg):\n if isinstance(arg, (set, tuple)):\n # if it is a set or tuple make it a list\n return list(arg)\n if not isinstance(arg, list):\n return [arg]\n return arg",
"def _prepare_args(self, args, build):\n\n if isinstance(args, list):\n return [self._prepare_args(x, build) for x in args]\n\n if isinstance(args, tuple):\n return tuple(self._prepare_args(x, build) for x in args)\n\n if isinstance(args, dict):\n return dict((k, self._prepare_args(v, build))\n for k, v in args.iteritems())\n\n if isinstance(args, Retval):\n # Get return value for the *pinned* build of that\n # job for the currently running build.\n current_build = execution_context.current_build\n dep_build = current_build.get_dependency_build(args.job_id)\n return dep_build['retval']\n\n return args",
"def batch_item_converter(self) -> list:\n items_list = []\n for item in self.items:\n items_list.append({\n \"id\": item.ID,\n \"name\": item.name,\n })\n\n return items_list",
"def add_entries(self, *args: Iterator, user_objects: bool = False) -> None:\n\n\t\tfor a in args:\n\t\t\tself.add_entry(a, user_object= user_objects)\n\t\treturn None",
"def converting_args(self, args: Tuple[Any, ...]) -> Tuple[_Type, ...]:\n return tuple(map(self.extract_type, args))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[]+ Returns metadata values for the specified package.
|
def metadata(argv):
if (len(argv) < 4):
print >> sys.stderr, "ERROR: insufficient parameters!"
sys.exit(2)
root, pkgtype, pkgspec = argv[0:3]
metakeys = argv[3:]
type_map = {
"ebuild":"porttree",
"binary":"bintree",
"installed":"vartree"}
if pkgtype not in type_map:
print >> sys.stderr, "Unrecognized package type: '%s'" % pkgtype
sys.exit(1)
trees = portage.db
if os.path.realpath(root) == os.path.realpath(portage.settings["ROOT"]):
root = portage.settings["ROOT"] # contains the normalized $ROOT
try:
values = trees[root][type_map[pkgtype]].dbapi.aux_get(
pkgspec, metakeys)
for value in values:
print value
except KeyError:
print >> sys.stderr, "Package not found: '%s'" % pkgspec
sys.exit(1)
|
[
"def _fetch(self, package_name=str):\n package_metadata = self._from_npm_registry(package_name)\n\n # If key words are not found in repository, get it from github.\n if package_metadata and len(package_metadata.get(\"keywords\", [])) == 0 and \\\n len(package_metadata.get(\"repositoryurl\", \"\")) > 0:\n package_metadata[\"keywords\"] = self._from_github(package_metadata[\"repositoryurl\"])\n\n return package_metadata",
"def get_package(self, package_name):\n return package_key(package_name).get()",
"def get_package_data(self) -> dict:\n return self.pack_data",
"def get_package_info(package_name):\n log_helper = logging_helper.logging_helper.Logger()\n log_helper.logger.debug(\"Getting additional package info for %s\" % package_name)\n command = \"smart info \" + package_name\n output = shell_ops.run_command(command)\n description = ''\n version = ''\n if output.count('Name:') > 1:\n # Multiple versions available. Narrow down smart info scope to get accurate info for the current version\n response = shell_ops.run_command(\"smart query --installed \" + package_name + \" --show-format=$version\")\n version = response[response.index('[100%]') + 6:response.index('@')].replace('\\n', '')\n if 'not' in version: # Workaround for \"(not installed)\" case\n version = 'Unknown'\n\n output = output[output.rindex(version):]\n\n if 'Name' in output:\n if output.index('Name') > output.index('Description'):\n # Additional entry after description\n description = output[output.rindex(\"Description:\") + 14: output.index(\"Name\")].replace('\\n', '').strip()\n else:\n description = output[output.rindex(\"Description:\") + 14:].replace('\\n', '').strip()\n else:\n version = output[output.index(\"Version:\") + 9: output.index(\"Priority:\")].replace('\\n', '')\n version = version[:version.index('@')]\n if 'not' in version: # Workaround for \"(not installed)\" case\n version = 'Unknown'\n description = output[output.rindex(\"Description:\") + 14:].replace('\\n', '').strip()\n\n url = output[output.index(\"Reference URLs:\") + 16: output.index(\"Flags:\")].replace('\\n', '')\n my_license = output[output.index(\"License:\") + 9: output.index(\"Installed Size:\")].replace('\\n', '')\n size = output[output.index(\"Installed Size:\") + 16: output.index(\"Reference URLs:\")].replace('\\n', '')\n group = output[output.index(\"Group:\") + 7: output.index(\"License:\")].replace('\\n', '')\n summary = output[output.index(\"Summary:\") + 9: output.index(\"Description:\")].replace('\\r\\n', '')\n\n # escape special JSON charater (\") if any in description and summary\n summary = summary.replace('\"', '\\\\\"')\n description = description.replace('\"', '\\\\\"')\n\n package = {\n 'url': url,\n 'license': my_license,\n 'size': size,\n 'description': description,\n 'summary': summary,\n 'group': group,\n 'version': version\n }\n log_helper.logger.debug(\"Returning package info: \" + str(package))\n return json.dumps(package)",
"def test_metadata(self):\n with open('tests/PackageXml/test.xml', 'r') as test_file:\n test_xml = test_file.read()\n ret = PackageMetadata(test_xml)\n self.assertEqual(ret.upstream_email, 'someone@example.com')\n self.assertEqual(ret.upstream_name, 'Someone')\n self.assertEqual(ret.description, 'This is my package\\'s description.')\n self.assertEqual(ret.longdescription, 'This is my package\\'s description.')\n self.assertEqual(ret.homepage, 'http://wiki.ros.org/my_package')\n self.assertEqual(ret.build_type, 'my_builder')",
"def get_package_info(pkg_name):\n global package_info\n if pkg_name in package_info:\n return package_info.get(pkg_name)\n else:\n try:\n yaml_stream = check_output(['apt-cache','show',pkg_name])\n except:\n print \"Unable to find info for package: '%s'\" % pkg_name\n package_info[pkg_name] = {}\n return {}\n d = Deb822(yaml_stream)\n package_info[pkg_name] = d\n return d",
"def metadata(self) -> dict[str, Any]:",
"def get_packages(self):\n #messy\n return [self.data[package]['name'] for package in range(len(self.data))]",
"def get_package_metadata(dependency):\n\n version_symbol_index = dependency.rfind('@')\n name_index = dependency.find('/') + 1\n dependency_name = dependency[name_index:version_symbol_index]\n\n entry = dict()\n\n entry['name'] = dependency\n\n result = json.loads(pypistats.recent(dependency_name, \"month\", format=\"json\"))\n print(result)\n entry['downloads_last_month'] = result['data']['last_month']\n request_url = f'{PYPI_DEPENDENCY_META_URL}{dependency_name}/{dependency[version_symbol_index+1:]}/json'\n json_result = requests.get(request_url)\n print(request_url)\n print(json_result)\n return entry",
"def get_metadata(self):\n return meta.get_metadata(self.ast)",
"def getPackageInfo(package_pattern, package_index):\n\n # Parse for package info\n matchs = re.search(package_pattern, package_index)\n package_info = matchs.group(0)\n\n return package_info",
"def get_metadata(self, filename):\n f, metadata = self.api_client.get_file_and_metadata(\n self.current_path + \"/\" + filename)\n return metadata",
"def get_metadata(self):\r\n return self.manager.get_metadata(self, node=self)",
"def metadata(self):\r\n metadataurlpath = 'content/items/' + self.itemid + '/info/metadata/metadata.xml'\r\n try:\r\n return self._portal.con.get(metadataurlpath, try_json=False)\r\n\r\n # If the get operation returns a 400 HTTP Error then the metadata simply\r\n # doesn't exist, let's just return None in this case\r\n except HTTPError as e:\r\n if e.code == 400 or e.code == 500:\r\n return None\r\n else:\r\n raise e",
"def get_metadata(C_ROOT, GGD_INFO_DIR, METADATA_FILE):\n\n try:\n metadata_dict = load_json(os.path.join(C_ROOT, GGD_INFO_DIR, METADATA_FILE))\n except IOError as e:\n print(str(e))\n sys.exit(\"\\n:ggd:list: !!ERROR!! Unable to load the local metadata\")\n\n return metadata_dict",
"def package_view(self):\n package_name = self.request.matchdict.get('package_name', None)\n package_id = self.request.matchdict.get('id', None)\n\n packages = Package.get_packages_by_name(package_name)\n requires = None\n other_versions = False\n\n if package_id:\n package = packages.filter(Package.id == package_id).first()\n if package and package.requires:\n requires = package.requires\n else:\n package = None\n\n if packages.count() > 1:\n other_versions = True\n\n return {'packages': packages.all(), 'package': package,\n 'package_name': package_name, 'main': self.main,\n 'other_versions': other_versions,\n 'requires': requires}",
"def get_all_metadata(self):\n metadata = {}\n for key in self.METADATA_KEYS:\n try:\n val = self.get_metadata(key)\n except MissingMetadataError:\n pass\n else:\n metadata[key] = val\n\n return metadata",
"def metadata(self):\n return metadata_for_forecasts()",
"def package_names(self):\n return {package.header.name for package in self.packages}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[]+ Given a list of files, print the packages that own the files and which files belong to each package. Files owned by a package are listed on the lines below it, indented by a single tab character (\\t). All file paths must start with . Returns 1 if no owners could be found, and 0 otherwise.
|
def owners(argv):
if len(argv) < 2:
sys.stderr.write("ERROR: insufficient parameters!\n")
sys.stderr.flush()
return 2
from portage import catsplit, dblink
settings = portage.settings
root = settings["ROOT"]
vardb = portage.db[root]["vartree"].dbapi
cwd = None
try:
cwd = os.getcwd()
except OSError:
pass
files = []
for f in argv[1:]:
f = portage.normalize_path(f)
if not f.startswith(os.path.sep):
if cwd is None:
sys.stderr.write("ERROR: cwd does not exist!\n")
sys.stderr.flush()
return 2
f = os.path.join(cwd, f)
f = portage.normalize_path(f)
if not f.startswith(root):
sys.stderr.write("ERROR: file paths must begin with <root>!\n")
sys.stderr.flush()
return 2
files.append(f[len(root):])
found_owner = False
for cpv in vardb.cpv_all():
cat, pkg = catsplit(cpv)
mylink = dblink(cat, pkg, root, settings, vartree=vardb.vartree)
myfiles = []
for f in files:
if mylink.isowner(f, root):
myfiles.append(f)
if myfiles:
found_owner = True
sys.stdout.write("%s\n" % cpv)
for f in myfiles:
sys.stdout.write("\t%s\n" % \
os.path.join(root, f.lstrip(os.path.sep)))
sys.stdout.flush()
if not found_owner:
sys.stderr.write("None of the installed packages claim the file(s).\n")
sys.stderr.flush()
return 1
return 0
|
[
"def display_result(file_paths):\n for paths in file_paths:\n print(paths, stat.filemode(os.stat(paths).st_mode))\n print(f'Found {len(file_paths)} file(s).')",
"def check_owners(self, dir_name, owned_directories, error_messages):\n found = False\n for owned in owned_directories:\n if owned.startswith(dir_name) or dir_name.startswith(owned):\n found = True\n break\n if not found:\n error_messages.append(\n \"New directory %s appears to not have owners in CODEOWNERS\" % dir_name)",
"def cmd_installed_files(pkgname, execs_only=False, short=False):\n status = noop if short else print_status\n\n try:\n package = cache_main[pkgname]\n except KeyError:\n print_missing_pkg(pkgname)\n return 1\n\n if not pkg_install_state(package):\n print_err(\n '\\nThis package is not installed: {}'.format(\n C(package.name, 'blue')\n ),\n '\\nCan\\'t get installed files for ',\n 'uninstalled packages.',\n sep=''\n )\n return 1\n\n if not hasattr(package, 'installed_files'):\n print_err(''.join((\n '\\nUnable to get installed files for {}',\n ', apt/apt_pkg module may be out of date.'\n )).format(package.name))\n return 1\n\n files = sorted(fname for fname in package.installed_files if fname)\n if execs_only:\n # Show executables only (/bin directory files.)\n # Returns true for a path if it looks like an executable.\n # is_exec = lambda s: ('/bin' in s) and (not s.endswith('/bin'))\n files = [fname for fname in files if is_executable(fname)]\n label = 'executable' if len(files) == 1 else 'executables'\n else:\n # Show installed files.\n label = 'installed file' if len(files) == 1 else 'installed files'\n\n if files:\n status('Found {} {} for {}:'.format(len(files), label, package.name))\n if short:\n print('\\n'.join(sorted(files)))\n else:\n print(' {}\\n'.format('\\n '.join(sorted(files))))\n return 0\n\n # No files found (possibly after trimming to only executables)\n print_status_err('Found 0 {} for: {}'.format(label, package.name))\n return 1",
"def list_owners(self, changed_files):\n email_map = collections.defaultdict(set)\n for relpath in changed_files:\n absolute_path = self.finder.path_from_chromium_base(relpath)\n if not absolute_path.startswith(self.finder.layout_tests_dir()):\n continue\n owners_file, owners = self.find_and_extract_owners(self.filesystem.dirname(relpath))\n if not owners_file:\n continue\n owned_directory = self.filesystem.dirname(owners_file)\n owned_directory_relpath = self.filesystem.relpath(owned_directory, self.finder.layout_tests_dir())\n email_map[tuple(owners)].add(owned_directory_relpath)\n return {owners: sorted(owned_directories) for owners, owned_directories in email_map.iteritems()}",
"def _package_in_repo(self, package_name, version):\n matches = self._find_file(package_name + '_' + version + '.dsc',\n self.repository)\n return len(matches)",
"def num_27(): \n def get_dirlist(path):\n \"\"\"\n Return a sorted list of all entries in path.\n This returns just the names, not the full path to the names.\n \"\"\"\n dirlist = os.listdir(path)\n dirlist.sort()\n return dirlist\n\n def print_files(path, prefix = \"\"):\n \"\"\" Print recursive listing of contents of path \"\"\"\n if prefix == \"\": # Detect outermost call, print a heading\n print(\"Folder listing for\", path)\n prefix = \"| \"\n dirlist = get_dirlist(path)\n for f in dirlist:\n print(prefix + \"- \" + f) # Print the line\n fullname = os.path.join(path, f) # Turn name into full pathname\n if os.path.isdir(fullname): # If a directory, recurse.\n print_files(fullname, prefix + \"| \")\n return None\n \"\"\"dir check\"\"\"\n #path = os.getcwd()\n path = '/private/var/mobile/Containers/Shared/AppGroup/A9DDA80F-9432-45DA-B931-2E9386579AE6/Pythonista3/Documents'\n #path = '/private/var/mobile/Containers/Shared/AppGroup/A9DDA80F-9432-45DA-B931-2E9386579AE6'\n\n print_files(path)\n return None #dirlist",
"def cmd_contains_file(name, shortnamesonly=False):\n\n try:\n repat = re.compile(name)\n except Exception as ex:\n print_err('\\nInvalid search term!: {}\\n{}'.format(name, ex))\n return 1\n\n print_status(\n 'Looking for packages by file pattern',\n value=repat.pattern,\n )\n\n # Setup filename methods (long or short, removes an 'if' from the loop.)\n def getfilenameshort(s):\n return os.path.split(s)[-1]\n # Pick filename retrieval function..\n filenamefunc = getfilenameshort if shortnamesonly else str\n\n # Iterate all packages...\n totalpkgs = 0\n totalfiles = 0\n\n for pkgname in cache_main.keys():\n pkg = cache_main[pkgname]\n matchingfiles = []\n if not pkg_install_state(pkg):\n continue\n if not hasattr(pkg, 'installed_files'):\n print_err(\n '\\n'.join((\n '\\nUnable to retrieve installed files for {},',\n 'apt/apt_pkg may be out of date!'\n )).format(pkgname)\n )\n return 1\n\n for installedfile in (pkg.installed_files or []):\n shortname = filenamefunc(installedfile)\n rematch = repat.search(shortname)\n if rematch:\n # Save match for report,\n # (report when we're finished with this package.)\n matchingfiles.append(installedfile)\n\n # Report any matches.\n if matchingfiles:\n totalpkgs += 1\n totalfiles += len(matchingfiles)\n print(pkg_format(pkg, no_desc=True, no_marker=True))\n print(' {}'.format('\\n '.join(matchingfiles)))\n\n pluralfiles = 'file' if totalfiles == 1 else 'files'\n pluralpkgs = 'package.' if totalpkgs == 1 else 'packages.'\n print_status(\n '\\nFound',\n C(totalfiles, fore='blue', style='bright'),\n pluralfiles,\n 'in',\n C(totalpkgs, fore='blue', style='bright'),\n pluralpkgs,\n )\n return 0",
"def package_report(root_packages: List[str]):\n root_packages.sort(reverse=True)\n root_packages_list = []\n for m in pkg_resources.working_set:\n if m.project_name.lower() in root_packages:\n root_packages_list.append([m.project_name, m.version])\n \n display(pd.DataFrame(\n root_packages_list,\n columns=[\"package\", \"version\"]\n ).set_index(\"package\").transpose())",
"def _count_files(p, *preds):\n return sum(1 for f in os.listdir(p)\n if os.path.isfile(f) and all(map(lambda p: p(f), preds)))",
"def check_file_paths(self):\n if self.version != OUTDATED_WACZ:\n package_files = [item[\"path\"] for item in self.datapackage[\"resources\"]]\n for filepath in pathlib.Path(self.dir.name).glob(\"**/*.*\"):\n filename = os.path.basename(filepath)\n if (\n filename != \"datapackage.json\"\n and filename != \"datapackage-digest.json\"\n ):\n file = str(filepath).split(\"/\")[-2:]\n file = \"/\".join(file)\n if file not in package_files:\n print(\"file %s is not listed in the datapackage\" % file)\n return False\n return True",
"def countCodebookEntries(self):\n\n cb_dir = self.getCurrentCodebook()[1]\n\n possible_entries = os.listdir(cb_dir)\n\n total_entries = 0\n for path in possible_entries:\n if os.path.exists('/'.join([cb_dir, path, 'description.txt'])):\n total_entries += 1\n\n return total_entries",
"def cmd_locate(pkgnames, only_existing=False, short=False):\n existing = 0\n checked = 0\n for pname in pkgnames:\n pname = pname.lower().strip()\n # Use Package for existing, packagename for missing.\n pkg = cache_main.get(pname, pname)\n if pkg != pname:\n existing += 1\n elif only_existing:\n continue\n print(pkg_format(\n pkg,\n color_missing=True,\n no_marker=short,\n no_desc=short\n ))\n\n checked += 1\n\n plural = 'package' if existing == 1 else 'packages'\n print_status('\\nFound {} of {} {}.'.format(existing, checked, plural))\n return 0 if (checked > 0) and (existing == checked) else 1",
"def find_duplicates(src):\n files = [files for _, _, files in os.walk(src)]\n iter_files = flat_list(files)\n dupes = [j for i, j in enumerate(iter_files) if j in iter_files[:i]]\n print(dupes)\n return dupes",
"def find_and_extract_owners(self, start_directory):\n # Absolute paths do not work with path_from_chromium_base (os.path.join).\n assert not self.filesystem.isabs(start_directory)\n directory = self.finder.path_from_chromium_base(start_directory)\n external_root = self.finder.path_from_layout_tests('external')\n # Changes to both LayoutTests/TestExpectations and the entire\n # LayoutTests/FlagExpectations/ directory should be skipped and not\n # raise an assertion.\n if directory == self.finder.layout_tests_dir() or \\\n directory.startswith(self.finder.path_from_layout_tests('FlagExpectations')):\n return None, None\n assert directory.startswith(external_root), '%s must start with %s' % (\n directory, external_root)\n while directory != external_root:\n owners_file = self.filesystem.join(directory, 'OWNERS')\n if self.filesystem.isfile(self.finder.path_from_chromium_base(owners_file)):\n owners = self.extract_owners(owners_file)\n if owners:\n return owners_file, owners\n directory = self.filesystem.dirname(directory)\n return None, None",
"def check(self):\n table = []\n package = \"Package\"\n installed = \"Installed\"\n released = \"Released\"\n match = \"Match\"\n s = f'{package:>12} | {installed:>15} | {released:>15} | {match:>5}'\n table.append(s)\n table.append(\"-\"*len(s))\n for package in self.installed:\n installed = self.installed[package]\n released = self.released[package]\n match = installed == released\n s = f'{package:>12} | {installed:>15} | {released:>15} | {match:>5}'\n table.append(s)\n print(\"\\n\".join(table))",
"def is_file_in_list(paths: list, file_name: str, prompt: str) -> bool:\n for path in paths:\n result = os.path.commonpath([path, file_name]).replace(os.sep, \"/\")\n if result == path:\n logger.debug(\n '\".%s%s\" is %s as specified in the domain \".%s%s\"',\n os.sep,\n file_name,\n prompt,\n os.sep,\n path,\n )\n return True\n return False",
"def check_pool_files(log, hosts, uuid):\n status = True\n log.info(\"Checking for pool data on %s\", hosts)\n pool_files = [uuid, \"superblock\"]\n for filename in [\"/mnt/daos/{}\".format(item) for item in pool_files]:\n result = check_file_exists(hosts, filename, sudo=True)\n if not result[0]:\n log.error(\"%s: %s not found\", result[1], filename)\n status = False\n return status",
"def test_local_file_listing(capsys):\n\t\n\t# Display the local files; we'll test the output against known values.\n\tdisplay.display_local_files()\n\n\t# Save the captured stdout output so we can check against it\n\t# multiple times.\n\toutput = capsys.readouterr().out\n\n\t# List of files to search for. We'll use one file and one folder\n\t# that should definitely be there.\n\tfiles_to_find = [\"README.md\", \"app\"]\n\n\t# Check for each file in the output.\n\tfor file in files_to_find:\n\t\tassert file in output",
"def count_authors(file):\n\tauthor_list = []\n\n\tfor mss in file[\"single_sale\"]:\n\t\tauthor = mss[\"author\"]\n\t\tif author is not None:\n\t\t\tauthor = author.lower().capitalize()\n\t\t\tauthor_list.append(author)\n\n\tfor mss in file[\"multiple_sales\"]:\n\t\tauthor = mss[\"mss\"][0][\"author\"]\n\t\tif author is not None:\n\t\t\tauthor = author.lower().capitalize()\n\t\t\tauthor_list.append(author)\n\n\tcount = Counter(author_list)\n\n\treturn count"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns all best_visible packages (without .ebuild).
|
def all_best_visible(argv):
if (len(argv) < 1):
print "ERROR: insufficient parameters!"
#print portage.db[argv[0]]["porttree"].dbapi.cp_all()
for pkg in portage.db[argv[0]]["porttree"].dbapi.cp_all():
mybest=portage.best(portage.db[argv[0]]["porttree"].dbapi.match(pkg))
if mybest:
print mybest
|
[
"def get_installedpackages():\n\n # TODO finish me\n\n return []",
"def get_local_packages():\n sys.stdout = mystdout = StringIO()\n pip.main(['freeze', '-l'])\n sys.stdout = sys.__stdout__\n \n pkgs = mystdout.getvalue().split('\\n')\n return [p.split('==') for p in pkgs]",
"def all_packages(self):\n return self.packages | self.depends | self.makedepends",
"def available_packages() -> List[str]:\n return list(algorithm_functions.keys())",
"def get_packages(self) -> list:\r\n return os.listdir(f\"{self.path}/uniflash-packages\")",
"def list_pkgbuilds():\n return glob('*/PKGBUILD')",
"def list_packages(self):\n return list(self.iter_packages())",
"def package_nevras(self):\n return {package.nevra for package in self.packages}",
"def find_packages():\n excludes = ['deathrow', 'quarantine']\n packages = []\n for directory, subdirs, files in os.walk(\"IPython\"):\n package = directory.replace(os.path.sep, \".\")\n if any(package.startswith(\"IPython.\" + exc) for exc in excludes):\n # package is to be excluded (e.g. deathrow)\n continue\n if '__init__.py' not in files:\n # not a package\n continue\n packages.append(package)\n return packages",
"def runtime_packages(self):\n return self.packages | self.depends",
"def get_modules(self):\n test_repo = import_module(self.test_repo_name)\n prefix = \"{0}.\".format(test_repo.__name__)\n product_path = \"{0}{1}\".format(prefix, self.product)\n modnames = []\n for importer, modname, is_pkg in pkgutil.walk_packages(\n path=test_repo.__path__, prefix=prefix,\n onerror=lambda x: None):\n if not is_pkg and modname.startswith(product_path):\n if (not self.module_regex or\n self.module_regex in modname.rsplit(\".\", 1)[1]):\n modnames.append(modname)\n\n filter_mods = []\n for modname in modnames:\n add_package = not bool(self.packages)\n for package in self.packages:\n if package in modname.rsplit(\".\", 1)[0]:\n add_package = True\n break\n if add_package:\n filter_mods.append(modname)\n filter_mods.sort()\n return filter_mods",
"def vulnerable_to(self):\n return self.vulnerable_packages.all()",
"def get_versions():\n return [version for version in get_version_list() if has_package(version)]",
"def get_available_software():\n return Config.package_list",
"def find_packages(where='.', exclude=(), invalidate_cache=False):\n\n if exclude:\n warnings.warn(\n \"Use of the exclude parameter is no longer supported since it does \"\n \"not work as expected. Use add_exclude_packages instead. Note that \"\n \"it must be called prior to any other calls from setup helpers.\",\n AstropyDeprecationWarning)\n\n # Calling add_exclude_packages after this point will have no effect\n _module_state['excludes_too_late'] = True\n\n if not invalidate_cache and _module_state['package_cache'] is not None:\n return _module_state['package_cache']\n\n packages = _find_packages(\n where=where, exclude=list(_module_state['exclude_packages']))\n _module_state['package_cache'] = packages\n\n return packages",
"def resolved_packages(self):\n return self.resolved_packages_",
"def _get_pydrake_modules():\n result = []\n worklist = [\"pydrake\"]\n while worklist:\n current = worklist.pop(0)\n result.append(current)\n for sub in _get_submodules(current):\n worklist.append(sub)\n return sorted(result)",
"def _get_standard_modules():\n\n # the frozen application is not meant to create GUIs or to add\n # support for building and installing Python modules\n ignore_list = ['__main__', 'distutils', 'ensurepip', 'idlelib', 'lib2to3'\n 'test', 'tkinter', 'turtle']\n\n # some modules are platform specific and got a\n # RecursionError: maximum recursion depth exceeded\n # when running this script with PyInstaller 3.3 installed\n if loadlib.IS_WINDOWS:\n os_ignore_list = ['(Unix)', '(Linux)', '(Linux, FreeBSD)']\n elif loadlib.IS_LINUX:\n os_ignore_list = ['(Windows)']\n elif loadlib.IS_MAC:\n os_ignore_list = ['(Windows)', '(Linux)', '(Linux, FreeBSD)']\n else:\n os_ignore_list = []\n\n modules = []\n url = 'https://docs.python.org/{0}.{1}/py-modindex.html'.format(*sys.version_info)\n for s in urlopen(url).read().decode().split('#module-')[1:]:\n m = s.split('\"><code')\n add_module = True\n for x in os_ignore_list:\n if x in m[1]:\n ignore_list.append(m[0])\n add_module = False\n break\n if add_module:\n modules.append(m[0])\n\n included_modules, excluded_modules = [], []\n for module in modules:\n include_module = True\n for mod in ignore_list:\n if module.startswith(mod):\n excluded_modules.extend(['--exclude-module', module])\n include_module = False\n break\n if include_module:\n included_modules.extend(['--hidden-import', module])\n return included_modules + excluded_modules",
"def findSubmodules(package) :\n ret = []\n for importer, modname, ispkg in pkgutil.iter_modules(package.__path__):\n ret.append(modname)\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the mirrors set to use in the portage configuration.
|
def gentoo_mirrors(argv):
print portage.settings["GENTOO_MIRRORS"]
|
[
"def get_mirrors(self):\r\n return [mirror if isinstance(mirror, type) else mirror\r\n for mirror in self._mirrors]",
"def ListMirrorPlanes(self):\n lde = self.TheSystem.LDE\n nSurf = lde.NumberOfSurfaces\n surfList = []\n for n in range(0,nSurf):\n surf = lde.GetSurfaceAt(n)\n if surf.Material == 'MIRROR':\n surfList.append(n)\n return(surfList)",
"def read(self):\n p = compile('Server = {url}\\n')\n with open(self.path) as f:\n for line in f:\n r = p.parse(line)\n if r:\n self.mirrors.append(r.named['url'])\n return self.mirrors",
"def get_all_remotes():\n if not in_repo:\n return None\n\n config = configparser.ConfigParser()\n config.read(find_repo_toplevel(\".\") / \".git/config\")\n\n remotes = {\n x.removeprefix('remote \"').removesuffix('\"'): {\n \"url\": config[x][\"url\"],\n \"name\": x.removeprefix('remote \"').removesuffix('\"'),\n }\n for x in config.sections()\n if x.startswith(\"remote \")\n }\n\n return remotes",
"def get_mirrors_pinged(mirrors, processes=8):\n pool = multiprocessing.Pool(processes=processes)\n return pool.map(get_ping, mirrors)",
"def dp_qtree_snapmirror_destinations(self):\n return self._dp_qtree_snapmirror_destinations",
"def get_remotes(self):\n remotes = set()\n for line in self._do(['remote', '-v'], as_lines=True):\n parts = line.split('\\t')\n remotes.add(Remote(parts[0], parts[1]))\n return remotes",
"def get_mirror_destinations(self):\n for rule in self.rules:\n if \"actions\" not in rule or \"mirror\" not in rule[\"actions\"]:\n continue\n yield rule[\"actions\"][\"mirror\"]",
"def get_proxies(self):\n ins = []\n outs = []\n for iname in self._config_items:\n val = self._config_settings[iname]\n if isinstance(val, ProxyInputFile):\n ins.append(val)\n elif isinstance(val, ProxyOutputFile):\n outs.append(val)\n return ins, outs",
"def repositories(self):\n return {repo for repo in self.app.repositories.get_all() if repo.host == self}",
"def _GetRemotes(self):\n if not self._remotes:\n exit_code, output, _ = self.RunCommand('git remote -v')\n if exit_code == 0:\n self._remotes = list(filter(None, output.split('\\n')))\n\n return self._remotes",
"def __get_configs():\n configs = {}\n for entry_point in pkg_resources.iter_entry_points(\"matlab_desktop_proxy_configs\"):\n configs[entry_point.name] = entry_point.load()\n\n return configs",
"def NETRemoteList(self):\n Rclone.list_remotes_in_vim_buffer()",
"def get_datacenters_list():\n return util.get(abs_link=False)",
"def _configs(self):\n return self.dm.configs",
"def urls():\n projects = ccmenu.preferences.read().get(\"Projects\",[])\n return list(sorted(map(lambda p:p[\"serverUrl\"],projects)))",
"def networks(self) -> pulumi.Input[List[pulumi.Input['ManagedZonePrivateVisibilityConfigNetworkArgs']]]:\n return pulumi.get(self, \"networks\")",
"def get_pool(self, **kwargs) -> type(set):\n anonymity = kwargs.get('anonymity', 'elite proxy').upper()\n https = kwargs.get('https', 'yes')\n proxy_pool = set()\n # Filter proxy pool as per anonymity or https requirements\n filtered = self.data_frame[\n (self.data_frame['anonymity'] == anonymity)\n & (self.data_frame['https'] == https)\n ]\n for ip, port in zip(filtered['ip'], filtered['port']):\n proxy_pool.add(f\"{ip}:{port}\")\n return proxy_pool",
"def all_hosts():\n return set(itertools.chain(*settings.ROLES.values()))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the PORTDIR path.
|
def portdir(argv):
print portage.settings["PORTDIR"]
|
[
"def configDir(self):\n p = os.path.dirname(self.cctDir())\n return p",
"def make_host_port_path(uds_path, port):\n return \"{}_{}\".format(uds_path, port)",
"def get_port(self) -> str:\n return self.__serial.port",
"def get_file_name(self, port):\n \n port_file_name = \"%s_%s_%d\" %(self.file_prefix, self.system_manager.cur_user, port )\n return os.path.join(self.working_dir, port_file_name)",
"def get_datadir_from_port(port: int) -> str:\n section = get_section_from_port(port)\n if section is None:\n return \"/srv/sqldata\"\n else:\n return \"/srv/sqldata.\" + section",
"def get_url_directory(self):\n \n # get the directory path of the url\n fulldom = self.get_full_domain()\n urldir = fulldom\n\n if self.dirpath:\n newpath = \"\".join((self.URLSEP, \"\".join([ x+'/' for x in self.dirpath])))\n urldir = \"\".join((fulldom, newpath))\n\n return urldir",
"def cipDir(self):\n if self.isMaster:\n p = os.path.join(self.configDir(), \"cip\")\n return p\n else:\n p = os.path.join(os.path.abspath(os.path.join(self.filePath(), \"..\", \"..\", \"..\", \"..\", \"..\")), \"cip\")\n return p",
"def getBuildDir(self):\n default = 'build'\n pathstr = self.getCustom('Build', 'builddir', default)\n pathstr = self._getAbsPath(pathstr)\n\n return pathstr",
"def host_dir(self):\n\n return self._sysroot.host_dir",
"def output_dir(self, typename):\n setting = \"{}_DIR\".format(typename.upper())\n if setting in os.environ:\n return os.environ[setting]\n else:\n return \".\"",
"def config_dir(self):\n return self.client.fldigi.config_dir()",
"def dir_name(path):\n return os.path.dirname(path)",
"def base_dir(self):\n return self.get_base_dir(self.out_dir, self.experiment_id, self.run_id)",
"def get_project_dir():\n current_dir = os.path.dirname(os.path.realpath(__file__))\n current_dir = os.path.dirname(current_dir)\n current_dir = os.path.dirname(current_dir)\n # current_dir = os.path.dirname(current_dir)\n # current_dir = os.path.dirname(current_dir)\n return current_dir",
"def get_deploy_dir() -> str:\n deploydir = local(\"pwd\", capture=True)\n return deploydir",
"def port_id(self) -> str:\n return self._port_id",
"def dir_logs(self):\n d = self.dir_dettype()\n return os.path.join(d, self.dirname_log)",
"def get_supervisor_conf_dir(self):\n base_path = app.config.get(\"SUPERVISOR_CONFIG_DIR\")\n base_path = abspath(base_path)\n\n return join_path(base_path, \"programs\", self.username)",
"def database_path(self):\n return DIRS['MOTOR_DATA_DIR']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the CONFIG_PROTECT_MASK paths.
|
def config_protect_mask(argv):
print portage.settings["CONFIG_PROTECT_MASK"]
|
[
"def ignored_paths_patterns(self) -> ConfigNodePropertyArray:\n return self._ignored_paths_patterns",
"def _get_preserved_paths(self, root_dir=None):\n root_dir = root_dir if root_dir else self.processed_dataset_dir\n preserved_paths = _glob_multiple(_list_of_strings(self.config.preserve_paths), root_dir=root_dir)\n return [os.path.relpath(p, start=root_dir) for p in preserved_paths]",
"def get_masks(target):\n path = STYLE_MASK_PATH if target else CONTENT_MASK_PATH\n masks = [f for f in os.listdir(path) if is_jpg_mask(f)]\n return masks",
"def GetRequiredSysPaths(self):\n reqSysPaths = []\n for mod in [p3d]:\n modPath = os.path.dirname(mod.__file__)\n modLoc = os.path.dirname(modPath).replace('\\\\', '/')\n reqSysPaths.append(modLoc)\n \n return reqSysPaths",
"def get_config_files(self):\n if package.backend.FORMAT == \"rpm\":\n return [\"sysconfig/clamd.amavisd\", \"tmpfiles.d/clamd.amavisd.conf\"]\n return []",
"def get_data_paths_list(image_folder, mask_folder):\n \n print(\"**** in helper function ***\")\n print(\"Image folder: \",image_folder)\n print(\"Mask folder: \",mask_folder)\n \n image_paths = [os.path.join(image_folder, x) for x in sorted(os.listdir(\n image_folder)) if x.endswith(\".jpg\")]\n mask_paths = [os.path.join(mask_folder, x) for x in sorted(os.listdir(\n mask_folder)) if x.endswith(\".png\")]\n \n \n return image_paths, mask_paths",
"def getPolicyMask(self):\n return self.policy_mask",
"def _windows_roots() -> typing.List[str]:\n from ctypes import windll\n import string\n drives = []\n bitmask = windll.kernel32.GetLogicalDrives()\n for letter in string.ascii_lowercase:\n if bitmask & 1:\n drives.append(letter)\n bitmask >>= 1\n return drives",
"def get_paths(self, name):\n info = self.get_module_info(name)\n if info:\n return info.get(constants.MODULE_PATH, [])\n return []",
"def getSystemDirs(permissions):\r\n return permissions.get(\"system-dirs\",[])",
"def object_storage_access_patterns(self):\n return list(self._unit.received[\"object-storage-access-patterns\"] or [])",
"def delete_path_regexps(self) -> ConfigNodePropertyArray:\n return self._delete_path_regexps",
"def _default_masks_root(self) -> str:\n concept_hash = '{}_rad{:.3}' \\\n .format('-'.join(sorted(self.keypoint_names)), self.pt_radius)\n masks_basename = \"{img_base}_{concept_hash}\".format(\n img_base=os.path.basename(self.dataset_root),\n concept_hash=concept_hash)\n masks_dirname = os.path.join(\n os.path.dirname(os.path.dirname(self.dataset_root)),\n \"masks\"\n )\n return os.path.join(masks_dirname, masks_basename)",
"def get_paths(spec: dict) -> typing.List[str]:\n return [str(k) for k in spec['paths'].keys()]",
"def _get_files(self):\n\n glob_path = os.path.join(self.path, self.mask)\n return glob.glob(glob_path)",
"def available_managed_paths_for_site_creation(self):\n return self.properties.get(\"availableManagedPathsForSiteCreation\", StringCollection())",
"def _get_path_sect_keys(mapping, keys=[PATH_KEY]):\n return [k for k, v in mapping.items() if bool(set(keys) & set(mapping[k]))]",
"def getFiles(path, mask):\n return [x.name for x in pathlib.Path(path).glob(mask)]",
"def enabled_in_paths(self):\n names_or_paths = self.value or []\n in_paths = set()\n for name_or_path in names_or_paths:\n in_path = self._controller.compose_input_file_path(name_or_path)\n if os.path.exists(in_path):\n in_paths.add(in_path)\n else:\n in_paths.add(name_or_path)\n return in_paths"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the PORTDIR_OVERLAY path.
|
def portdir_overlay(argv):
print portage.settings["PORTDIR_OVERLAY"]
|
[
"def portal_path(): # pragma: no cover",
"def get_overlay_template_dir():\n return DEFAULT_OVERLAY_TEMPLATE_DIR",
"def portdir(argv):\n\tprint portage.settings[\"PORTDIR\"]",
"def get_dashboard_path(self):\n db_file = pkg_resources.resource_filename(\n 'emuvim.dashboard', \"index.html\")\n db_dir = path.dirname(db_file)\n logging.info(\"[DB] Serving emulator dashboard from: {} and {}\"\n .format(db_dir, db_file))\n return db_dir, db_file",
"def get_jsonrpc_socket_path(data_dir: Path) -> Path:\n return Path(os.environ.get(\n 'HELIOS_JSONRPC_IPC',\n data_dir / JSONRPC_SOCKET_FILENAME,\n ))",
"def dir_panel(self, panel_id):\n return os.path.join(self.dir_dettype(), panel_id)",
"def stubDir(self):\n p = os.path.join(self.cctParent(), \"Stub\")\n return p",
"def pc_path(self):\r\n\t\treturn self.__pathstub + \".pc\"",
"def feFrontendSrcOverlayDir(self) -> bool:\n default = os.path.join(self._homePath, 'frontendSrcOverlayDir')\n with self._cfg as c:\n return self._chkDir(c.frontend.frontendSrcOverlayDir(default, require_string))",
"def path_on_server(self):\n\n # change dev_base if necessary\n if ConfigHandler.cfg.wb_new == \"True\":\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI41\n else:\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI40\n\n # if on Linux, we have to subtract local share base from development folder\n # -> the local share base acts like the drive letter on windows\n if platform.system() == 'Linux':\n tmp = self.projectfolder.replace(ConfigHandler.cfg.local_share_base, \"\")\n else:\n tmp = self.projectfolder\n\n if platform.system() == \"Windows\":\n # remove drive letter\n return oPB.DEV_BASE + tmp[2:].replace(\"\\\\\", \"/\")\n else:\n # replace possible double '/' with single '/'\n return (oPB.DEV_BASE + \"/\" + tmp).replace(\"//\", \"/\")\n\n \"\"\"\n if tmp.startswith(repo_base):\n return tmp\n else:\n if tmp.strip() != \"\":\n ret = (repo_base + \"/\" + tmp + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"a\", ret)\n return ret\n else:\n ret = (repo_base + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"b\", ret)\n return ret\n \"\"\"",
"def get_path() -> str:\n config_dir: str = appdirs.user_config_dir(\"plotman\")\n return config_dir + \"/plotman.yaml\"",
"def ping_path(self):\n return self.cfg.cache.full_path(\"%s.ping\" % self.dashed)",
"def cipDir(self):\n if self.isMaster:\n p = os.path.join(self.configDir(), \"cip\")\n return p\n else:\n p = os.path.join(os.path.abspath(os.path.join(self.filePath(), \"..\", \"..\", \"..\", \"..\", \"..\")), \"cip\")\n return p",
"def get_config_path():\n return _folder + \"/config\"",
"def getShaderDir():\n\n if fslgl.GL_COMPATIBILITY == '3.3': subdir = 'gl33'\n if fslgl.GL_COMPATIBILITY == '2.1': subdir = 'gl21'\n elif fslgl.GL_COMPATIBILITY == '1.4': subdir = 'gl14'\n\n return op.join(fsleyes.assetDir, 'gl', subdir)",
"def _get_board_data_dir():\n x = op.dirname(op.abspath(__file__))\n x = op.abspath(op.join(x, \"data\"))\n return x",
"def services_path(self):\n return os.path.join(self.dotci3_path, 'services')",
"def configDir(self):\n p = os.path.dirname(self.cctDir())\n return p",
"def socket_path(self):\n return self._shell._socket_path"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the DISTDIR path.
|
def distdir(argv):
print portage.settings["DISTDIR"]
|
[
"def getBuildDir(self):\n default = 'build'\n pathstr = self.getCustom('Build', 'builddir', default)\n pathstr = self._getAbsPath(pathstr)\n\n return pathstr",
"def get_build_dir_path(rel_path=''):\n build_root = os.environ['BUILD_ROOT']\n return os.path.join(build_root, rel_path)",
"def build_dir(self) -> str:\n assert self.build_arch is not None\n return os.path.join(self.out_dir, self.name, self.build_arch)",
"def configDir(self):\n p = os.path.dirname(self.cctDir())\n return p",
"def getDocDir():\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep",
"def output_dir(self, typename):\n setting = \"{}_DIR\".format(typename.upper())\n if setting in os.environ:\n return os.environ[setting]\n else:\n return \".\"",
"def get_output_dir(self) -> Path:\n return self.output_dir",
"def get_install_path():\n\n return os.path.dirname(__file__)",
"def get_deploy_dir() -> str:\n deploydir = local(\"pwd\", capture=True)\n return deploydir",
"def get_dist_dir(self, dist_name):\n\n # If the expected dist name does exist, simply use that\n expected_dist_dir = join(self._build_dir, 'dists', dist_name)\n if exists(expected_dist_dir):\n return expected_dist_dir\n\n # If no directory has been found yet, our dist probably\n # doesn't exist yet, so use the expected name\n return expected_dist_dir",
"def get_output_file_path(self):\n zip_filename = \"%s.%s_%s.wotmod\" % (\n self.author_id, self.mod_id, self.mod_version)\n return os.path.abspath(os.path.join(self.dist_dir, zip_filename))",
"def output_dir(self):\n return self.options.output_dir",
"def cctDir(self):\n if self.isMaster:\n p = os.path.dirname(os.path.abspath(self.cctFilePath()))\n else:\n p = os.path.abspath(os.path.join(self.filePath(), \"..\", \"..\", \"..\", \"..\"))\n return p",
"def BuildCWD(self):\n return ROOT_DIR",
"def getDNSInstallDir(self):\n key = 'DNSInstallDir'\n P = self.userregnl.get(key, '')\n if P:\n os.path.normpath(P)\n if os.path.isdir(P):\n return P\n \n pf = natlinkcorefunctions.getExtendedEnv('PROGRAMFILES')\n if not os.path.isdir(pf):\n raise IOError(\"no valid folder for program files: %s\"% pf)\n for dnsdir in DNSPaths:\n cand = os.path.join(pf, dnsdir)\n if os.path.isdir(cand):\n programfolder = os.path.join(cand, 'Program')\n if os.path.isdir(programfolder):\n return os.path.normpath(cand)\n print 'no valid DNS Install Dir found, please provide one in natlinkconfigfunctions (option \"d\") or in natlinkconfig GUI (info panel)'",
"def BuildOutputRootDir(self):\n return os.path.join(GYP_OUT_DIR, self.GYP_OUT_SUBDIR)",
"def get_script_dir():\n return os.path.dirname(os.path.realpath(__file__)) + '/'",
"def demopath():\n\tp = os.path.abspath(__file__)\n\t\n\t# traverse up to the directory probably containing doc/\n\tfor i in range(0,3):\n\t\tp = os.path.split(p)[0]\n\t\t\n\t# check wheter doc/ really exists here\n\tres = os.path.join(p, \"doc\") # package demo should be inside doc/\n\tassert os.path.exists(res)\n\tassert os.path.isdir(res)\t\n\treturn res",
"def get_assets_dir():\n profile = getattr(BUILD_CONSTANTS, 'VISTAS_PROFILE', 'dev')\n\n return '' if profile == 'deploy' else '..'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates x number of users
|
def generate_users(self, x):
for i in range(x):
user = id_generator()
self.create_user(user)
|
[
"def create_n_users(size):\n users = []\n for i in range(size):\n users.append({\n \"first_name\": \"First%d\" % i,\n \"last_name\": \"First%d\" % i,\n \"credit_card\": i,\n \"email\": \"%dgmai.com\" % i,\n \"username\": \"username%d\" % i,\n \"driver\": False,\n \"password\": \"%d\" % i\n })\n return users",
"def _generate_users(self):\n success_counter = 0\n hunter_attempts = 0\n hunter_max_attempts = 3\n\n while success_counter < self.number_of_users:\n try:\n users = self._get_some_users()\n except HunterError:\n hunter_attempts += 1\n if hunter_attempts >= hunter_max_attempts:\n logger.error(\"reached max retries to connect to hunterio\\\n will stop\")\n raise AutoBotError(\"TERMINTATING\")\n users = []\n\n for user in users:\n new_user, created = user.create_me(self.api)\n\n if created:\n self.append(new_user)\n success_counter += 1\n if len(self) >= self.number_of_users:\n break\n logger.info(f\"successfully created {self.number_of_users} users\")",
"def add_fake_data(number_users):\n User.generate_fake(count=number_users)",
"def generate_username():\n n = random.randint(1, 999999)\n new_username = 'user%d' % (n,)\n\n while User.objects.filter(username=new_username).exists():\n n = random.randint(1, 999999)\n new_username = 'user%d' % (n,)\n\n return new_username",
"def seed_users(num_entries=10, overwrite=False, overwrite_superusers=False):\n if overwrite:\n if overwrite_superusers:\n print(\"Overwriting all Users\")\n User.objects.all().delete()\n else:\n print(\"Overwriting all Users except superusers\")\n User.objects.filter(is_staff=False).delete()\n\n count = 0\n\n password = make_password('wy3MW5') # use this password to login as the created users\n\n # first create a test user and then create the other users as necessary\n u = User(\n email=\"testuser@sportsdictionary.com\",\n username=\"testuser\",\n password=password\n )\n u.save()\n\n for _ in range(num_entries - 1):\n retry = True\n while retry:\n first_name = fake.first_name()\n last_name = fake.last_name()\n username = first_name + last_name\n try:\n User.objects.get(username=username)\n except ObjectDoesNotExist:\n retry = False\n\n u = User(\n first_name=first_name,\n last_name=last_name,\n email=first_name + \".\" + last_name + \"@faker.com\",\n username=username,\n password=password\n )\n u.save()\n count += 1\n percent_complete = count / num_entries * 100\n print(\n \"Adding {} new Users: {:.2f}%\".format(num_entries, percent_complete),\n end='\\r',\n flush=True\n )\n print()",
"def users(request, test_db):\n user_data = request.node.get_closest_marker(\"users\")\n\n if not user_data:\n return\n # We must work on a copy of the data or else rerunfailures/flaky fails\n user_data = tuple(user_data.args)\n for each in user_data[0]:\n _create_user(request, test_db, each)",
"def create_random_user_tsv(num, users):\n\n # user_ids = user_artist_df['user_id'].unique()\n # create_random_user_tsv(10000, user_ids)\n\n random_users = random.sample(list(users), num)\n random_users_df = pd.DataFrame(random_users, columns=['user_id'])\n random_users_df.to_csv('../../data/random_users.tsv', sep='\\t', index=False)",
"def create_user_names(num_users):\n\n num_per_type = num_users / len(USER_TYPES)\n user_names = []\n\n for user_type in USER_TYPES:\n for i in range(num_per_type):\n user_names.append('{}_{}'.format(user_type, i))\n\n return user_names",
"async def generate_everything(count: int = 200,\n user_creation_weight: int = 1, item_creation_weight: int = 1,\n order_creation_weight: int = 1, top_up_user_weight: int = 1,\n pay_order_weight: int = 1, return_order_weigth: int = 1):\n actions = [generate_user] * user_creation_weight + \\\n [generate_item] * item_creation_weight + \\\n [generate_order] * order_creation_weight + \\\n [top_up_user] * top_up_user_weight + \\\n [pay_order] * pay_order_weight + \\\n [return_order] * return_order_weigth\n for _ in range(count):\n try:\n await random.choice(actions)()\n except IndexError:\n pass\n return {\"message\": \"OK\"}",
"def create_people(self):\n emails = [\n \"user1@ggrc.com\",\n \"miha@policy.com\",\n \"someone.else@ggrc.com\",\n \"another@user.com\",\n ]\n for email in emails:\n self.generator.generate_person({\n \"name\": email.split(\"@\")[0].title(),\n \"email\": email,\n }, \"Administrator\")",
"def generate_random_people(self, n, cls=Person):\n return [self.generate_person(cls) for i in range(n)]",
"def generate_k_ranodm_users(*, init: int, k: int, shuffle: bool) -> Optional[List[Dict]]:\n res = []\n if init <= k:\n for i in range(k):\n if init < k:\n res.append(generate_random_user(is_init=True))\n else:\n res.append(generate_random_user(is_init=False))\n else:\n log_msg(\"Please ensure that init <= k for user generation.\")\n return None\n\n if shuffle:\n random.shuffle(res)\n return res",
"def iid(dataset, num_users, seed):\n\tnp.random.seed(seed)\n\t\n\tnum_items = int(len(dataset) / num_users)\n\trem_items = len(dataset) % num_users\n\tif rem_items == 0:\n\t\tprint(\"Each user will get %d samples from the training set.\"%(num_items))\n\telse:\n\t\tprint(\"Each user will get %d samples from the training set. %d samples are discarded.\"%(num_items, rem_items))\n\n\tuser_groups = {} \n\tall_idxs = list(range(len(dataset)))\n\t\n\tfor i in range(num_users):\n\t\tuser_groups[i] = list(np.random.choice(all_idxs, num_items, replace=False))\n\t\tall_idxs = list(set(all_idxs) - set(user_groups[i]))\n\t\n\treturn user_groups",
"def generate_user_name():\n connection = mysql.get_db()\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM users WHERE username IS NULL\")\n users_obj = convert_objects(cursor.fetchall(), cursor.description)\n cursor.close()\n counter = random.randint(1, 101)\n for user in users_obj:\n # Set username. It will be\n # [first letter of firstname][lastname without spaces/special charcters][a number to differentiate]\n user_name = \"\"\n if 'first_name' in user and user['first_name'] is not None:\n user_name += user[\"first_name\"][:1]\n if 'last_name' in user and user['last_name'] is not None:\n # https://stackoverflow.com/questions/5843518/remove-all-special-characters-punctuation-and-spaces-from-string\n user_name += ''.join(e for e in user[\"last_name\"] if e.isalnum())\n user_name += str(counter)\n counter += 1\n put_cursor = connection.cursor()\n put_cursor.execute(\"UPDATE users SET username=%s WHERE id=%s\", (user_name, user['id']))\n connection.commit()\n return make_response(\"OK\", HTTPStatus.OK)",
"def get_users_by_n_tests():\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT n_tests, COUNT(*) AS n_users\n FROM (\n SELECT t.user_id, COUNT(*) AS n_tests\n FROM (\n SELECT ts.user_id, COUNT(*) AS n_responses\n FROM drill_testset AS ts\n INNER JOIN drill_testset_responses AS tsr\n ON ts.id = tsr.testset_id\n GROUP BY ts.id\n ) AS t\n WHERE t.n_responses > 0\n GROUP BY t.user_id\n ) AS tests_per_user\n GROUP BY n_tests\n ORDER BY n_tests ASC\n \"\"\")\n data = list(cursor.fetchall())\n\n # Make cumulative\n for i in xrange(len(data) - 1, 0, -1):\n label, value = data[i-1]\n data[i-1] = (label, value + data[i][1])\n\n return data",
"def generateRandomParticipants(count: int) -> list:\n participants = []\n for i in range(0, count):\n p1 = randint(0, 10)\n p2 = randint(0, 10)\n p3 = randint(0, 10)\n participant = createParticipant(p1, p2, p3)\n participants.append(participant)\n return participants",
"def get_prousers(self, count: int = 5):\n resp = sess.get(api.pro_users % (self.symbol, count))\n dt = resp.ok and resp.json()\n self.prousers = [User(i) for i in dt]",
"def generate_user_id(num_char):\n letters = string.ascii_lowercase + string.ascii_uppercase + string.digits\n return ''.join(random.choice(letters) for i in range(num_char))",
"def load_random_users():\r\n \r\n data_load_state = st.text('Loading user data...')\r\n \r\n try:\r\n response = requests.get(RANDOM_API_URL)\r\n response.raise_for_status()\r\n except requests.exceptions.RequestException as e:\r\n raise SystemExit(e)\r\n\r\n data_load_state.text('Encrypting sensitive fields...')\r\n \r\n # Get random user data from API\r\n users = response.json()\r\n user_list = users['results']\r\n \r\n hashed_user_list = []\r\n \r\n # Create a list of User objects with hashed password\r\n for user_data in user_list:\r\n hashed_user_list.append(User(user_data))\r\n \r\n data_load_state.empty()\r\n \r\n return hashed_user_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return the subject name in test, function or component file
|
def _subject_name(path):
subject = os.path.basename(path)
subject = subject.replace('-test', '')
subject = subject.replace('-spec', '')
subject = subject.replace('-unit', '')
subject = subject.replace('.test', '')
subject = subject.replace('.spec', '')
subject = subject.replace('.unit', '')
subject = subject.replace('.acceptance', '')
subject = subject.split('.')[0]
if subject == "index":
# use the parent directory's name
subject = _directory(path)
return subject
|
[
"def test_get_subject(self):\n pass",
"def get_subject_common_name(self):\n return self.subject_info.get_common_name()",
"def getSubject(self):\n subject_st = urllib.unquote(self.path[1:].split('?', 1)[0]).decode('utf8')\n return article.Subject.fromString(subject_st)",
"def show_subject(self):\n showme(\"%s %s\" % (SUBJECT, self.cert.get_subject()))",
"def show_subject_common_name(self):\n showme(\"%s %s : %s\" % \\\n (SUBJECT, self.subject_info.reference[REF_COMMON_NAME][REF_NAME]\\\n .lower(), self.get_subject_common_name()))",
"def test_name(self) -> str:\n tc = self.testcontainer\n test_name = \"%s@%s#%s\" % (tc.__module__, tc.__name__, self.testmeth.__name__)\n return test_name",
"def get_message_subject(self):\n subject = loader.render_to_string(self.subject_template, self.get_rendering_context())\n subject = ''.join(subject.splitlines())\n return subject",
"def get_subject_info(self, lproject, lsubject):\n project = self.session.projects[lproject]\n try:\n subject = project.subjects[lsubject]\n return vars(subject)\n except:\n return \"The subject was not found in the project.\"",
"def pytest_logfest_log_file_name_full_module(filename_components):",
"def certificate_subject_name(self) -> Optional[str]:\n return pulumi.get(self, \"certificate_subject_name\")",
"def subject(self):\n return self._messageRow['subject']",
"def subject_text(self):\n return self._subject_text",
"def get_email_subject(confirmation):\n return get_email_file_name(confirmation) + '.pdf'",
"def get() -> Subject:\n return self._subject",
"def get_subject(self):\n if self.subject is not None:\n return self.subject\n raise NotImplementedError(\"Notifier requires a subject\")",
"def test_discover_subjects(self):\n pass",
"def test_get_subjects(self):\n pass",
"def test_name(self) -> None:\n return self._test_name",
"def pytest_logfest_log_file_name_full_session(filename_components):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return the subject relative path from current file there is no magic, for now it only replaces __tests__ with ../
|
def subject_relative_path(path):
directory = path
subject = component_name(path)
filename = os.path.basename(path)
directory = os.path.dirname(path)
parent = os.path.basename(directory)
if re.match(r"index(?:[-._](?:spec|unit|test|acceptance))?\.jsx?$", filename):
if re.match(r"__tests?__/?", parent):
return '..' + os.sep
return '.' + os.sep
if re.match(r"__tests?__/?", parent):
return '..' + os.sep
return os.path.join('.', subject)
|
[
"def tests_dir():\n return Path(os.path.realpath(__file__)).parent",
"def __get_testfile_path(self, path):\n path = os.path.relpath(\n path, os.path.join(self.__data_path, os.pardir))\n return path",
"def getAbsPath() -> str:\n thisFile:str = os.path.realpath(__file__)\n absPath:str = thisFile.replace(\"/srcTemplates.py\",\"\")\n return absPath",
"def get_full_path(): \n path = Path(QtCore.__file__).parent.as_posix()\n path += \"/examples/\"\n #print(path)\n return path",
"def get_tests_dir_path(): \n fmod_path = ctbto.tests.__path__\n \n test_dir = \"%s/conf_tests\" % fmod_path[0]\n \n return test_dir",
"def get_real_path(self):\n return os.path.join(self.root.path, self.path, self.filename)",
"def transform_path():\n return str(pathlib.Path(__file__).parent.absolute())",
"def get_full_filepath(test_filename):\n file_path = os.path.dirname(os.path.abspath(__file__))\n return_filepath = os.path.abspath(file_path + \"/responses/\" + test_filename)\n return return_filepath",
"def menpo_src_dir_path():\n from pathlib import Path # to avoid cluttering the menpo.base namespace\n return Path(os.path.abspath(__file__)).parent",
"def menpobench_dir():\n from pathlib import Path # to avoid cluttering the menpo.base namespace\n import os\n return Path(os.path.abspath(__file__)).parent",
"def _get_test_template_dir():\n return os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'test_templates/')",
"def script_path(script, test_name=__name__):\n return '{test_path}.{script}'.format(test_path=test_name, script=script)",
"def _subject_name(path):\n subject = os.path.basename(path)\n subject = subject.replace('-test', '')\n subject = subject.replace('-spec', '')\n subject = subject.replace('-unit', '')\n subject = subject.replace('.test', '')\n subject = subject.replace('.spec', '')\n subject = subject.replace('.unit', '')\n subject = subject.replace('.acceptance', '')\n subject = subject.split('.')[0]\n\n if subject == \"index\":\n # use the parent directory's name\n subject = _directory(path)\n\n return subject",
"def get_project_path():\n return Path(__file__).absolute().parents[1]",
"def _GetTestPath(self, configuration):\n return os.path.join(\n self._build_dir, configuration, '%s.exe' % self._raw_name)",
"def _GetSrcRelativePath(path):\n assert path.startswith(_GetToolsParentDir())\n return expand_owners.SRC + path[len(_GetToolsParentDir()) + 1:]",
"def data_test_dir():\n return Path(__file__).absolute().parent.parent.parent / \"test_data\"\n # return os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), \"test_data\")",
"def makeFilePath(self, file_path):\n return '%s/%s' % (os.path.dirname(__file__), file_path)",
"def get_test_configuration_path() -> Path:\n return get_project_root() / '.test_configuration'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all initialization methods for the comparison algorithm. Initialization methods must start with 'initialize_' and take no parameters.
|
def get_initialization_functions(self):
initialization_methods = [
(
method,
getattr(self, method),
) for method in dir(self) if method.startswith('initialize_')
]
return {
key: value for (key, value) in initialization_methods
}
|
[
"def get_init_ops(self):\n return self.get_train_init_op(), self.get_test_init_op()",
"def _slot_initializers(self) -> List[init_ops_v2.Initializer]:\n raise NotImplementedError",
"def init_make_method_static(self):\n refactoring_main = make_method_static_2.main\n params = {\"udb_path\": self.udb_path}\n candidates = self._methods\n params.update(random.choice(candidates))\n return refactoring_main, params",
"def init_make_method_non_static(self):\n refactoring_main = make_method_non_static_2.main\n params = {\"udb_path\": self.udb_path}\n candidates = self._static_methods\n params.update(random.choice(candidates))\n return refactoring_main, params",
"def initiators(self):\n return self._initiators",
"def __init__(self):\n self._factory_methods = {\n Noise.SALT_AND_PEPPER: PreprocessorFactory._create_salt_and_pepper,\n Noise.MISSING_BLOCK: PreprocessorFactory._create_missing_block,\n Noise.UNIFORM: PreprocessorFactory._create_uniform,\n Noise.GAUSSIAN: PreprocessorFactory._create_gaussian,\n }",
"def Initialize():\r\n pass",
"def __init__(self, methods , verbose = True ):\n self.methods = methods\n self.verbose = verbose",
"def get_available_compression_methods():\r\n # type: () -> List[Text]\r\n return _nassl_legacy.SSL.get_available_compression_methods()",
"def verification_methods(self) -> List[VerificationMethod]:\n pass",
"def test_default_class_initialization(self):\n\n wf = WordFilter.create_default_filter()\n\n self.assertEqual(len(wf.bloom_filter1), 1000)\n self.assertEqual(len(wf.bloom_filter2), 1000)\n self.assertEqual(len(wf.hash_table), 30)",
"def __shared_initialize__(self, **kwargs):",
"def test_method_signatures(self):\n errors = {}\n cls = self.driver\n # Create fictional driver instance (py3 needs bound methods)\n tmp_obj = cls(hostname=\"test\", username=\"admin\", password=\"pwd\")\n attrs = [m for m, v in inspect.getmembers(tmp_obj)]\n for attr in attrs:\n func = getattr(tmp_obj, attr)\n if attr.startswith(\"_\") or not inspect.ismethod(func):\n continue\n try:\n orig = getattr(NetworkDriver, attr)\n orig_spec = inspect.getfullargspec(orig)[:4]\n except AttributeError:\n orig_spec = \"Method does not exist in napalm.base\"\n func_spec = inspect.getfullargspec(func)[:4]\n if orig_spec != func_spec:\n errors[attr] = (orig_spec, func_spec)\n\n EXTRA_METHODS = [\"__init__\"]\n for method in EXTRA_METHODS:\n orig_spec = inspect.getfullargspec(getattr(NetworkDriver, method))[:4]\n func_spec = inspect.getfullargspec(getattr(cls, method))[:4]\n if orig_spec != func_spec:\n errors[attr] = (orig_spec, func_spec)\n\n assert not errors, \"Some methods vary. \\n{}\".format(errors.keys())",
"def get_all_algorithm_names():\n alg_lookup = get_algorithm_to_version_lookup()\n return alg_lookup.keys()",
"def load_comparators(plugin_dir):\n\n sys.path.insert(0, plugin_dir)\n\n comparators = dict()\n\n for modname in os.listdir(plugin_dir):\n if modname[0] == '_' or modname[-4:] == '.pyc':\n continue\n if modname[-3:] == '.py':\n modname = modname[:-3]\n try:\n mod = __import__(\"{}\".format(modname))\n new_comps = mod.comparators()\n except AttributeError:\n raise error(\n \"Plugin {} does not have a comparators() function.\".format(mod))\n comparators.update(new_comps)\n\n return comparators",
"def test_get_standard_methods():\n out = _get_standard_methods()\n assert len(out) == 5",
"def init_pylab_methods(self):\r\n\r\n methods = ALL_PYLAB_METHODS\r\n for f in methods:\r\n try:\r\n func = PylabFuncWrapper(f)\r\n #func.useKeywords = True\r\n #setattr(self, 'soap_'+f, func)\r\n setattr(self, 'xmlrpc_'+f, PylabFuncWrapper(f))\r\n except KeyboardInterrupt:\r\n raise\r\n except:\r\n # I guess this version of mpl doesn't know this one...\r\n print \"Matplotlib function '%s' not found.\" % f\r\n pass",
"def GetAlgorithmNames():\n return _ALGORITHMS.keys()",
"def get_all_methods(self, entry_point=ALL, protocol=ALL, sort_methods=False):\n\n if sort_methods:\n return [\n method for (_, method) in sorted(self._registry.items()) if method.is_valid_for(entry_point, protocol)\n ]\n\n return self._registry.values()",
"def similarity_functions(self):\n def default_similarity_function(r1, r2):\n v1 = self.value_merge_func(r1)\n v2 = self.value_merge_func(r2)\n return 1 if v1 == v2 else 0\n\n return [default_similarity_function]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Download the NLTK vader lexicon for sentiment analysis that is required for this algorithm to run.
|
def initialize_nltk_vader_lexicon(self):
from .utils import nltk_download_corpus
nltk_download_corpus('sentiment/vader_lexicon')
|
[
"def __init__(self):\n nltk.download(\"vader_lexicon\", quiet=True)\n self.vader = SentimentIntensityAnalyzer()",
"def _open_lexicon(self):\n # Positive Lexicon\n with open(opinion_lexicon_path + '/positive-words.txt', encoding='iso-8859-1') as f:\n positive_words = np.loadtxt(f, comments=';', dtype='bytes')\n positive_words = [x.decode('us-ascii') for x in positive_words]\n self.positive_words = set(positive_words)\n\n # Negative Lexicon\n with open(opinion_lexicon_path + '/negative-words.txt', encoding='iso-8859-1') as f:\n negative_words = np.loadtxt(f, comments=';', dtype='bytes')\n negative_words = [x.decode('iso-8859-1') for x in negative_words.tolist()]\n self.negative_words = set(negative_words)",
"def inception_v3_download():",
"def sentences_vectorisation(file, lexicon, sentiment, num_of_lines=0):\n\n\tlist_of_vectors = []\n\twith open(file, \"r\") as file:\n\n\t\tif not num_of_lines:\n\t\t\tto_read = file\n\t\telse:\n\t\t\tto_read = file.readlines()[:num_of_lines]\n\n\t\tfor line in to_read:\n\t\t\twords = word_tokenize(line.lower())\n\t\t\twords = [WordNetLemmatizer().lemmatize(word) for word in words]\n\t\t\tvector = np.zeros(len(lexicon))\n\n\t\t\tfor word in words:\n\t\t\t\tif word.lower() in lexicon:\n\t\t\t\t\tword_index = lexicon.index(word.lower())\n\t\t\t\t\tvector[word_index] += 1\n\n\t\t\tlist_of_vectors.append((vector,sentiment))\n\n\t\treturn list_of_vectors",
"def learn_domain_sentiment_words(articles, _filename):\n # Invers freq = log(Anzahl docs / Anzahl docs mit Term)\n sentiment_words = pd.read_csv(articleAnalysis.lex_folder + 'sentiment_lexicon.csv', skiprows=[1], header=0)\n pos_sentiment = [w for w in sentiment_words['Positive sentiment'].fillna('').tolist() if w != '']\n neg_sentiment = [w for w in sentiment_words['Negative sentiment'].fillna('').tolist() if w != '']\n doc_freq_pos = dict.fromkeys(pos_sentiment, 1)\n doc_freq_neg = dict.fromkeys(neg_sentiment, 1)\n for a in articles:\n new_pos, new_neg = find_conjunctions(a, pos_sentiment)\n new_neg_2, new_pos_2 = find_conjunctions(a, neg_sentiment)\n new_pos.update(new_pos_2)\n new_neg.update(new_neg_2)\n for w in new_pos:\n if w in doc_freq_pos:\n doc_freq_pos[w] += 1\n else:\n doc_freq_pos[w] = 1\n for w in new_neg:\n if w in doc_freq_neg:\n doc_freq_neg[w] += 1\n else:\n doc_freq_pos[w] = 1\n no_art = len(articles)\n print(\"Save dictionary at \", articleAnalysis.lex_folder)\n # write newly learned lexicon to file\n if not (\".csv\") in _filename:\n _filename = _filename + \".csv\"\n f = open(articleAnalysis.lex_folder + \"sentiment_lexicon_\" + _filename, \"w+\", newline='', encoding=\"UTF-8\"\n )\n file_writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)\n file_writer.writerow([\"positive\", \"pos_iwf\", \"negative\", \"neg_iwf\"])\n for i in range(max(len(doc_freq_neg), len(doc_freq_pos))):\n if i < min(len(doc_freq_neg), len(doc_freq_pos)):\n file_writer.writerow(\n [list(doc_freq_pos)[i],\n np.log10(no_art / list(doc_freq_pos.values())[i]),\n list(doc_freq_neg)[i],\n np.log10(no_art / list(doc_freq_neg.values())[i])])\n else:\n if len(doc_freq_neg) == max(len(doc_freq_neg), len(doc_freq_pos)):\n file_writer.writerow(\n [\"\",\n \"\",\n list(doc_freq_neg)[i],\n np.log10(no_art / list(doc_freq_neg.values())[i])])\n else:\n file_writer.writerow(\n [list(doc_freq_pos)[i],\n np.log10(no_art / list(doc_freq_pos.values())[i]),\n \"\",\n \"\"])\n f.close()",
"def sentiment_analysis(text):\n\n # pass text into sentiment url\n if True:\n ret = get_sentiment_from_url(text, sentimentURL)\n if ret is None:\n sentiment_url = None\n else:\n sentiment_url, neg_url, pos_url, neu_url = ret\n else:\n sentiment_url = None\n\n # pass text into TextBlob\n text_tb = TextBlob(text)\n\n # pass text into VADER Sentiment\n analyzer = SentimentIntensityAnalyzer()\n text_vs = analyzer.polarity_scores(text)\n\n # determine sentiment from our sources\n if sentiment_url is None:\n #threshold values\n if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05:\n sentiment = \"negative\"\n elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05:\n sentiment = \"positive\"\n else:\n sentiment = \"neutral\"\n else:\n # this works if the above function executes properly\n if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05 and sentiment_url == \"negative\":\n sentiment = \"negative\"\n elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05 and sentiment_url == \"positive\":\n sentiment = \"positive\"\n else:\n sentiment = \"neutral\"\n\n polarity = (text_tb.sentiment.polarity + text_vs['compound']) / 2\n\n # output sentiment polarity\n print(\"************\")\n print(\"Sentiment Polarity: \" + str(round(polarity, 3)))\n\n # output sentiment subjectivity (TextBlob)\n print(\"Sentiment Subjectivity: \" + str(round(text_tb.sentiment.subjectivity, 3)))\n\n # output sentiment\n print(\"Sentiment (url): \" + str(sentiment_url))\n print(\"Sentiment (algorithm): \" + str(sentiment))\n print(\"Overall sentiment (textblob): \", text_tb.sentiment)\n print(\"Overall sentiment (vader): \", text_vs)\n print(\"sentence was rated as \", round(text_vs['neg']*100, 3), \"% Negative\")\n print(\"sentence was rated as \", round(text_vs['neu']*100, 3), \"% Neutral\")\n print(\"sentence was rated as \", round(text_vs['pos']*100, 3), \"% Positive\")\n print(\"************\")\n\n return polarity, text_tb.sentiment.subjectivity, sentiment",
"def _post_install():\n import nltk\n\n nltk.download(\"words\")\n nltk.download(\"stopwords\")",
"def analyze(movie_review_filename):\n client = language.LanguageServiceClient()\n\n with open(movie_review_filename, 'r') as review_file:\n # Instantiates a plain text document.\n content = review_file.read()\n print(content)\n\n document = types.Document(\n content=content,\n type=enums.Document.Type.PLAIN_TEXT) \n annotations = client.analyze_sentiment(document=document)\n \n print_result(annotations)",
"def main():\n\n # command line parsing\n parser = buildParser()\n args = parser.parse_args()\n\n\n # construct the tweet pro-processing object\n tweetTokenizer = TweetTokenizer()\n lPunct = list(string.punctuation)\n lStopwords = stopwords.words('english') + lPunct + ['rt', 'via', '...', '…', '\"', \"'\", '`']\n\n tweetProcessor = TwitterProcessing(tweetTokenizer, lStopwords)\n\n\n # load set of positive words\n lPosWords = []\n with open(args.posWordFile, 'r', encoding='utf-8', errors='ignore') as fPos:\n for sLine in fPos:\n lPosWords.append(sLine.strip())\n\n setPosWords = set(lPosWords)\n\n\n # load set of negative words\n lNegWords = []\n with codecs.open(args.negWordFile, 'r', encoding='utf-8', errors='ignore') as fNeg:\n for sLine in fNeg:\n lNegWords.append(sLine.strip())\n\n setNegWords = set(lNegWords)\n\n # compute the sentiment\n lSentiment = []\n if args.approach == 'count':\n lSentiment = countWordSentimentAnalysis(setPosWords, setNegWords, args.tweetsFile, args.print, tweetProcessor)\n elif args.approach == 'vader':\n lSentiment = vaderSentimentAnalysis(args.tweetsFile, args.print, tweetProcessor)\n\n\n # determine if we should output a time series of sentiment scores across time\n if args.ts:\n # TODO: write code to display the time series\n # we are using pandas for this, but first we need to get it into a pandas data frame structure\n series = pd.DataFrame(lSentiment, columns=['date', 'sentiment'])\n # tell pandas that the date column is the one we use for indexing (or x-axis)\n series.set_index('date', inplace=True)\n # pandas makes a guess at the type of the columns, but to make sure it doesn't get it wrong, we set the sentiment\n # column to floats\n series[['sentiment']] = series[['sentiment']].apply(pd.to_numeric)\n\n # This step is not necessary, but pandas has a neat function that allows us to group the series at different\n # resultion. The 'how=' part tells it how to group the instances. In this example, it sames we want to group\n # by day, and add up all the sentiment scores for the same day and create a new time series called 'newSeries'\n # with this day resolution\n # TODO: play with this for different resolution, '1H' is by hour, '1M' is by minute etc\n sentimentSeries = series.resample('1H').sum()\n tweetCountSeries = series.resample('1H').count()\n \n # this plots and shows the time series\n plt.figure(figsize=(6,3), dpi = 100)\n plt.plot(sentimentSeries)\n plt.plot(tweetCountSeries)\n plt.legend(['Sentiment', 'Tweet Count'], loc='upper left')\n plt.savefig('fig6.png')\n plt.show()\n plt.close()",
"def extract_lexicon(dirname, volume, page_str, out_file):\n indices = load(INDICES_FILE)\n tei_files = files(dirname, int(volume), page_range(indices, int(volume), page_str))\n with open(out_file, 'w') as fo:\n fo.write(\"# volume: {}, pages: {}\\n\".format(volume, page_str))\n for f in tei_files:\n vol, page = vol_page(f)\n for item in gazetteer_items(f):\n fo.write(format_entry(item, ne_label(indices, vol, page)))",
"def get_analyzer(lexicon: Union[Lexicon, DefaultLexicon]) -> SentimentIntensityAnalyzer:\n\n if isinstance(lexicon, DefaultLexicon):\n return CustomSentimentIntensityAnalyzer(VADER_LEXICON)\n\n # turn the custom lexicon into a TSV and pass it to VADER\n lexicon_file_contents = \"\\n\".join(get_lexicon_file_lines(lexicon))\n return CustomSentimentIntensityAnalyzer(lexicon_file_contents)",
"def main():\n st.title(\"NLPiffy with Streamlit\")\n st.subheader(\"Natural Language Processing on the GO\")\n\n # Tokanzaition\n\n # Tokenization\n if st.checkbox(\"Show Tokens and Lemma\"):\n st.subheader(\"Tokenize Your Text\")\n message = st.text_area(\"Enter You Text\", \"Type Here\")\n if st.button(\"Analyze\"):\n nlp_result = text_analyzer(message)\n st.json(nlp_result)\n\n # Named Entity\n if st.checkbox(\"Show Named Entities\"):\n st.subheader(\"Show Named Entities\")\n message = st.text_area(\"Enter You Text\", \"Python was created by Guido. He Works in UK\")\n if st.button(\"Extract\"):\n nlp_result = entity_analyzer(message)\n st.json(nlp_result)\n\n # Sentiment Analysis\n if st.checkbox(\"Show Sentiment Analysis\"):\n st.subheader(\"Show Named Entities\")\n message = st.text_area(\"Enter You Text\", \"He loves reading and cooking.\")\n if st.button(\"Analyze\"):\n blob = TextBlob(message)\n result_sentiment = blob.sentiment\n st.success(result_sentiment)\n\n\n # Text Summalization\n if st.checkbox(\"Show Text Summarization\"):\n st.subheader(\"Show Named Entities\")\n message = st.text_area(\"Enter You Text\", \"He loves reading and cooking.\\nHe is a programmer.\")\n summary_options = st.selectbox(\"Choice Your Summarize\", (\"gensim\", \"sumy\")) \n if st.button(\"Summarize\"):\n if summary_options == 'gensim':\n st.text(\"Using Gensim..\")\n summary_result = summarize(message)\n elif summary_options == 'sumy':\n import nltk; nltk.download('punkt')\n st.text(\"Using Sumy..\")\n summary_result = sumy_summarizer(message)\n else:\n st.warning(\"Using Defult Summarizer\")\n st.text(\"Using Gensim\")\n summary_result = summarize(message)\n st.success(summary_result)\n \n\n \n\n # Sentiment Analysis\n\n\n # Text Summarization",
"def prepare_lexicons():\n path_to_lexicons = \"./data/sentiment_datasets\"\n lexicons = [lex[:-4] for lex in os.listdir(path_to_lexicons) if lex.endswith(\".csv\")]\n lexicons_dict = {}\n for lexicon in lexicons:\n lex_df = pd.read_csv(\"./data/sentiment_datasets/{}.csv\".format(lexicon), sep=\";\", encoding=\"utf-8\")\n lexicons_dict[lexicon] = lex_df\n return lexicons_dict",
"def _extract_sentiment_from_text(self, corpus_list, doc_name_to_id_dict):\n vader = SentimentIntensityAnalyzer()\n '''\n Go through the documents and rate their sentiment\n '''\n doc_count=0\n sentiment_feature_dict=defaultdict(list)\n for doc_name, row_id in doc_name_to_id_dict.iteritems():\n logger.debug(\"Extracting sentiment from: \" + doc_name)\n doc=corpus_list[row_id]\n ''' \n doc is one document from our corpus\n '''\n sentences=doc.split(\".\")\n pos_count=0\n neg_count=0\n prev_word_was_positive=False\n prev_word_was_negative=False\n pos_neg_count=0\n count=0\n longest_run_of_positives=0\n longest_run_of_negatives=0\n run_of_positives_count=0\n run_of_negatives_count=0\n score=vader.polarity_scores(' '.join(sentences))\n compound_polarity=score['compound']\n '''\n Rate the overall polarity of the document (1 positive, 0 negative)\n '''\n if compound_polarity>0:\n compound_polarity=1\n else:\n compound_polarity=0\n\n '''\n Rate each word in the corpus for sentiment and construct the word-based\n features\n '''\n for sentence in sentences:\n words=sentence.split(\" \")\n for word in words:\n score=vader.polarity_scores(word)\n '''\n If the negative sentiment of a word is greater than the positive sentiment\n '''\n if score['pos']>abs(score['neg']):\n pos_count+=1\n if prev_word_was_negative:\n pos_neg_count+=1\n prev_word_was_negative=False\n if run_of_negatives_count>longest_run_of_negatives:\n longest_run_of_negatives=run_of_negatives_count\n run_of_negatives_count=0\n else:\n run_of_positives_count+=1\n prev_word_was_positive=True\n\n '''\n If the positive sentiment of a word is greater than the negative sentiment\n '''\n if score['pos']<abs(score['neg']):\n neg_count+=1\n if prev_word_was_positive:\n prev_word_was_positive=False\n pos_neg_count+=1\n if run_of_positives_count>longest_run_of_positives:\n longest_run_of_positives=run_of_positives_count\n run_of_negatives_count=0\n else:\n run_of_negatives_count+=1\n prev_word_was_negative=True\n count+=1\n\n sentiment_feature_dict[doc_name].append([pos_count,neg_count,pos_neg_count,longest_run_of_negatives,longest_run_of_positives,compound_polarity])\n \n return sentiment_feature_dict",
"def download_word_embeddings_nl() -> None:\n print('--- Beginning word embedding file download ---')\n url = 'https://www.clips.uantwerpen.be/dutchembeddings/combined-320.tar.gz'\n with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1,\n desc=url.split('/')[-1]) as t:\n file_tmp = urlretrieve(url, filename=None, reporthook=t.update_to)[0]\n t.total = t.n\n\n base_name = os.path.basename(url)\n file_name, file_extension = os.path.splitext(base_name)\n tar = tarfile.open(file_tmp)\n tar.extractall(ROOT_DIR+'/resources/word_embeddings/'+file_name)\n return None",
"def getTextVectors():\n raw_text_file = open(utilites.getAbsPath(setup.corpus_file_path))\n raw_text = raw_text_file.readlines()\n print(\"Corpus file \" + raw_text_file.name + \" was loaded.\")\n # use re to split the raw text string and replace the original text\n # After this all the sentence are split into such format:\n # [0]filename, [1]order of annotation, [2]annotation text\n raw_text = [re.split('\\t|#', singleLine.replace('\\n', '')) for singleLine in raw_text]\n\n # now we only need the annotations\n annotations = [line[2] for line in raw_text]\n\n # Prepare the sentences\n sentences = annotation_to_wordlists(annotations)\n\n # Set values for Word2Vec\n num_features = 300 # Use a 300-dimension vector to represent a word\n min_word_count = 5 # Word appears less than 5 times will be ignored\n num_workers = 4 # Number of threads to run in parallel\n context = 5 # Sample 5 words as input for each iteration\n\n # initialize a model using parameters above\n word_model = gensim.models.Word2Vec(workers=num_workers,\n size=num_features, min_count=min_word_count, window=context)\n\n word_model.build_vocab(sentences) # build vocabulary on split sentenced\n print(\"Language model established.\")\n print(\"Loading pre-trained language model...\")\n # initialize the network weights using pre-trained model\n word_model.intersect_word2vec_format(utilites.getAbsPath(setup.lmodel_file_path), binary=True)\n print(\"Loaded weights from pre-trained Google News language model.\")\n print(\"Training models...\")\n # train the model to get word vectors\n word_model.train(sentences)\n print(\"Training completed.\")\n\n return extractVecs(word_model)",
"def _download( self ):\n self._system.download_file(\"https://github.com/mastbaum/avalanche/tarball/\" + self._tar_name)",
"def demo_liu_hu_lexicon(sentence):\n from nltk.corpus import opinion_lexicon\n\n pos_words = 0\n neg_words = 0\n\n y = []\n\n for word in sentence:\n if word in opinion_lexicon.positive():\n pos_words += 1\n y.append(1) # positive\n elif word in opinion_lexicon.negative():\n neg_words += 1\n y.append(-1) # negative\n else:\n y.append(0) # neutral\n\n if pos_words > neg_words:\n return 'Positive'\n elif pos_words < neg_words:\n return 'Negative'\n elif pos_words == neg_words:\n return 'Neutral'",
"def market_sentiment(raw_data):\n # TODO\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Download the NLTK wordnet corpora that is required for this algorithm to run only if the corpora has not already been downloaded.
|
def initialize_nltk_wordnet(self):
from .utils import nltk_download_corpus
nltk_download_corpus('corpora/wordnet')
|
[
"def _maybe_download_corpora(tmp_dir):\n cnn_filename = \"cnn_stories.tgz\"\n dailymail_filename = \"dailymail_stories.tgz\"\n cnn_finalpath = os.path.join(tmp_dir, \"cnn/stories/\")\n dailymail_finalpath = os.path.join(tmp_dir, \"dailymail/stories/\")\n if not tf.gfile.Exists(cnn_finalpath):\n cnn_file = generator_utils.maybe_download_from_drive(\n tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)\n with tarfile.open(cnn_file, \"r:gz\") as cnn_tar:\n cnn_tar.extractall(tmp_dir)\n if not tf.gfile.Exists(dailymail_finalpath):\n dailymail_file = generator_utils.maybe_download_from_drive(\n tmp_dir, dailymail_filename, _CNN_STORIES_DRIVE_URL)\n with tarfile.open(dailymail_file, \"r:gz\") as dailymail_tar:\n dailymail_tar.extractall(tmp_dir)\n return [cnn_finalpath, dailymail_finalpath]",
"def _post_install():\n import nltk\n\n nltk.download(\"words\")\n nltk.download(\"stopwords\")",
"def download_word_embeddings_nl() -> None:\n print('--- Beginning word embedding file download ---')\n url = 'https://www.clips.uantwerpen.be/dutchembeddings/combined-320.tar.gz'\n with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1,\n desc=url.split('/')[-1]) as t:\n file_tmp = urlretrieve(url, filename=None, reporthook=t.update_to)[0]\n t.total = t.n\n\n base_name = os.path.basename(url)\n file_name, file_extension = os.path.splitext(base_name)\n tar = tarfile.open(file_tmp)\n tar.extractall(ROOT_DIR+'/resources/word_embeddings/'+file_name)\n return None",
"def _maybe_download_corpus(tmp_dir):\n corpus_url = (\"https://dumps.wikimedia.org/simplewiki/20171201/\"\n \"simplewiki-20171201-pages-articles-multistream.xml.bz2\")\n corpus_filename = os.path.basename(corpus_url)\n corpus_filepath = os.path.join(tmp_dir, corpus_filename)\n if not tf.gfile.Exists(corpus_filepath):\n maybe_download(tmp_dir, corpus_filename, corpus_url)\n return corpus_filepath",
"def download_corpora_steps(project_name, test_image_suffix):\n fuzz_targets = _get_targets_list(project_name)\n if not fuzz_targets:\n sys.stderr.write('No fuzz targets found for project \"%s\".\\n' % project_name)\n return None\n\n steps = []\n # Split fuzz targets into batches of CORPUS_DOWNLOAD_BATCH_SIZE.\n for i in range(0, len(fuzz_targets), CORPUS_DOWNLOAD_BATCH_SIZE):\n download_corpus_args = []\n for binary_name in fuzz_targets[i:i + CORPUS_DOWNLOAD_BATCH_SIZE]:\n qualified_name = binary_name\n qualified_name_prefix = '%s_' % project_name\n if not binary_name.startswith(qualified_name_prefix):\n qualified_name = qualified_name_prefix + binary_name\n\n # Normalize qualified_name name.\n qualified_name = _normalized_name(qualified_name)\n\n url = get_signed_url(CORPUS_BACKUP_URL.format(project=project_name,\n fuzzer=qualified_name),\n method='GET')\n\n corpus_archive_path = os.path.join('/corpus', binary_name + '.zip')\n download_corpus_args.append('%s %s' % (corpus_archive_path, url))\n\n steps.append({\n 'name': get_runner_image_name(test_image_suffix),\n 'entrypoint': 'download_corpus',\n 'args': download_corpus_args,\n 'volumes': [{\n 'name': 'corpus',\n 'path': '/corpus'\n }],\n })\n\n return steps",
"def fetch_neurosynth_dataset(data_dir, return_pkl=True):\n if not os.path.isdir(data_dir):\n os.mkdir(data_dir)\n\n dataset_file = os.path.join(data_dir, \"database.txt\")\n if not os.path.isfile(dataset_file):\n logging.info(\"Downloading the Neurosynth dataset.\")\n download(data_dir, unpack=True)\n feature_file = os.path.join(data_dir, \"features.txt\")\n\n if return_pkl:\n pkl_file = os.path.join(data_dir, \"dataset.pkl\")\n if not os.path.isfile(pkl_file):\n logging.info(\n \"Converting Neurosynth data to a .pkl file. This may take a while.\"\n )\n dataset = Dataset(dataset_file, feature_file)\n dataset.save(pkl_file)\n return pkl_file\n\n return (dataset_file, feature_file)",
"def load(full=False):\n\n\n filename = small_file\n conll_folder = folder + \"/conll-small/\"\n\n if full:\n filename = big_file\n conll_folder = folder + \"/conll-full/\"\n\n path = folder +\"/cetem/\"+filename\n\n if not os.path.exists(path):\n download(full=full)\n\n if not os.path.exists(conll_folder):\n cetem_to_conll(path, conll_folder)\n\n corpus = load_to_nltk(conll_folder)\n return corpus",
"def gather_all_corpora(corpora_dirs):\n\n corpora = {\n data_dir: get_corpus(corpus_dir + \"/\" + data_dir)\n for corpus_dir in corpora_dirs\n for data_dir in data_dirs\n }\n\n corpora[\"unsorted\"] = corpus()\n for unsorted_corpus in list(map(get_corpus, corpora_dirs)):\n corpora[\"unsorted\"] += unsorted_corpus\n return corpora",
"def parse_corpus(self, **kwargs) -> None:\n self._load_resource(\"nlp_base\")\n logger.info(\"Parsing external corpus\")\n (\n self._entities,\n self._failed_entity_lookups,\n self._annotations,\n ) = self._parse_corpus(**kwargs)\n\n # Serialize entity information.\n for to_serialize in (\n (self._paths[\"entities\"], self._entities),\n (self._paths[\"failed_entity_lookups\"], self._failed_entity_lookups),\n (self._paths[\"annotations\"], self._annotations),\n ):\n with open(to_serialize[0], \"wb\") as fp:\n pickle.dump(to_serialize[1], fp)\n logger.info(\"Successfully parsed corpus.\")",
"def download_wordlist_http(filedown):\n\n mkdir_if_not_exists(\"dictionaries\")\n\n # List of files to download:\n arguments = {\n 1: (\n \"Moby\",\n (\n \"mhyph.tar.gz\",\n \"mlang.tar.gz\",\n \"moby.tar.gz\",\n \"mpos.tar.gz\",\n \"mpron.tar.gz\",\n \"mthes.tar.gz\",\n \"mwords.tar.gz\",\n ),\n ),\n 2: (\"afrikaans\", (\"afr_dbf.zip\",)),\n 3: (\"american\", (\"dic-0294.tar.gz\",)),\n 4: (\"aussie\", (\"oz.gz\",)),\n 5: (\"chinese\", (\"chinese.gz\",)),\n 6: (\n \"computer\",\n (\n \"Domains.gz\",\n \"Dosref.gz\",\n \"Ftpsites.gz\",\n \"Jargon.gz\",\n \"common-passwords.txt.gz\",\n \"etc-hosts.gz\",\n \"foldoc.gz\",\n \"language-list.gz\",\n \"unix.gz\",\n ),\n ),\n 7: (\"croatian\", (\"croatian.gz\",)),\n 8: (\"czech\", (\"czech-wordlist-ascii-cstug-novak.gz\",)),\n 9: (\"danish\", (\"danish.words.gz\", \"dansk.zip\")),\n 10: (\n \"databases\",\n (\"acronyms.gz\", \"att800.gz\", \"computer-companies.gz\", \"world_heritage.gz\"),\n ),\n 11: (\n \"dictionaries\",\n (\n \"Antworth.gz\",\n \"CRL.words.gz\",\n \"Roget.words.gz\",\n \"Unabr.dict.gz\",\n \"Unix.dict.gz\",\n \"englex-dict.gz\",\n \"knuth_britsh.gz\",\n \"knuth_words.gz\",\n \"pocket-dic.gz\",\n \"shakesp-glossary.gz\",\n \"special.eng.gz\",\n \"words-english.gz\",\n ),\n ),\n 12: (\"dutch\", (\"words.dutch.gz\",)),\n 13: (\n \"finnish\",\n (\"finnish.gz\", \"firstnames.finnish.gz\", \"words.finnish.FAQ.gz\"),\n ),\n 14: (\"french\", (\"dico.gz\",)),\n 15: (\"german\", (\"deutsch.dic.gz\", \"germanl.gz\", \"words.german.gz\")),\n 16: (\"hindi\", (\"hindu-names.gz\",)),\n 17: (\"hungarian\", (\"hungarian.gz\",)),\n 18: (\"italian\", (\"words.italian.gz\",)),\n 19: (\"japanese\", (\"words.japanese.gz\",)),\n 20: (\"latin\", (\"wordlist.aug.gz\",)),\n 21: (\n \"literature\",\n (\n \"LCarrol.gz\",\n \"Paradise.Lost.gz\",\n \"aeneid.gz\",\n \"arthur.gz\",\n \"cartoon.gz\",\n \"cartoons-olivier.gz\",\n \"charlemagne.gz\",\n \"fable.gz\",\n \"iliad.gz\",\n \"myths-legends.gz\",\n \"odyssey.gz\",\n \"sf.gz\",\n \"shakespeare.gz\",\n \"tolkien.words.gz\",\n ),\n ),\n 22: (\"movieTV\", (\"Movies.gz\", \"Python.gz\", \"Trek.gz\")),\n 23: (\n \"music\",\n (\n \"music-classical.gz\",\n \"music-country.gz\",\n \"music-jazz.gz\",\n \"music-other.gz\",\n \"music-rock.gz\",\n \"music-shows.gz\",\n \"rock-groups.gz\",\n ),\n ),\n 24: (\n \"names\",\n (\n \"ASSurnames.gz\",\n \"Congress.gz\",\n \"Family-Names.gz\",\n \"Given-Names.gz\",\n \"actor-givenname.gz\",\n \"actor-surname.gz\",\n \"cis-givenname.gz\",\n \"cis-surname.gz\",\n \"crl-names.gz\",\n \"famous.gz\",\n \"fast-names.gz\",\n \"female-names-kantr.gz\",\n \"female-names.gz\",\n \"givennames-ol.gz\",\n \"male-names-kantr.gz\",\n \"male-names.gz\",\n \"movie-characters.gz\",\n \"names.french.gz\",\n \"names.hp.gz\",\n \"other-names.gz\",\n \"shakesp-names.gz\",\n \"surnames-ol.gz\",\n \"surnames.finnish.gz\",\n \"usenet-names.gz\",\n ),\n ),\n 25: (\n \"net\",\n (\n \"hosts-txt.gz\",\n \"inet-machines.gz\",\n \"usenet-loginids.gz\",\n \"usenet-machines.gz\",\n \"uunet-sites.gz\",\n ),\n ),\n 26: (\"norwegian\", (\"words.norwegian.gz\",)),\n 27: (\n \"places\",\n (\n \"Colleges.gz\",\n \"US-counties.gz\",\n \"World.factbook.gz\",\n \"Zipcodes.gz\",\n \"places.gz\",\n ),\n ),\n 28: (\"polish\", (\"words.polish.gz\",)),\n 29: (\n \"random\",\n (\n \"Ethnologue.gz\",\n \"abbr.gz\",\n \"chars.gz\",\n \"dogs.gz\",\n \"drugs.gz\",\n \"junk.gz\",\n \"numbers.gz\",\n \"phrases.gz\",\n \"sports.gz\",\n \"statistics.gz\",\n ),\n ),\n 30: (\"religion\", (\"Koran.gz\", \"kjbible.gz\", \"norse.gz\")),\n 31: (\"russian\", (\"russian.lst.gz\", \"russian_words.koi8.gz\")),\n 32: (\n \"science\",\n (\n \"Acr-diagnosis.gz\",\n \"Algae.gz\",\n \"Bacteria.gz\",\n \"Fungi.gz\",\n \"Microalgae.gz\",\n \"Viruses.gz\",\n \"asteroids.gz\",\n \"biology.gz\",\n \"tech.gz\",\n ),\n ),\n 33: (\"spanish\", (\"words.spanish.gz\",)),\n 34: (\"swahili\", (\"swahili.gz\",)),\n 35: (\"swedish\", (\"words.swedish.gz\",)),\n 36: (\"turkish\", (\"turkish.dict.gz\",)),\n 37: (\"yiddish\", (\"yiddish.gz\",)),\n }\n\n # download the files\n\n intfiledown = int(filedown)\n\n if intfiledown in arguments:\n\n dire = \"dictionaries/\" + arguments[intfiledown][0] + \"/\"\n mkdir_if_not_exists(dire)\n files_to_download = arguments[intfiledown][1]\n\n for fi in files_to_download:\n url = CONFIG[\"global\"][\"dicturl\"] + arguments[intfiledown][0] + \"/\" + fi\n tgt = dire + fi\n download_http(url, tgt)\n\n print(\"[+] files saved to \" + dire)\n\n else:\n print(\"[-] leaving.\")",
"def fetch_query_corpus(arg_tuple): \n \n # Destructure the tuple (needed for multiprocessing)\n path, query_text, key = arg_tuple\n\n # Open file and fetch all lines of URLs\n with open(BASE_PATH + path) as url_file: \n lines = url_file.read().split('\\n')\n \n results = []\n\n print(\"Processing \" + query_text)\n\n for line in lines:\n result = fetch_article_text(line, key)\n if result != None: \n results.append(result)\n\n # Print results to file\n filename = \"CORPUS/%s.json\" % (query_text)\n with open(filename, 'w') as outfile:\n json.dump(results, outfile, indent=4)",
"def _load_corpora(corpora_file):\n with open(corpora_file) as fo:\n data = json.load(fo)\n for name, meta in data.items():\n modelled = Corpus.from_json(meta, name)\n modelled.save()",
"def _load_default_texts():\n dataset = Dataset()\n dataset.fetch_dataset(\"20NewsGroup\")\n return dataset.get_corpus()",
"def _clear_all_target_corpora(self):\n logs.log('Clearing corpora on target')\n self.fuzzer.device.ssh(['rm', '-rf', self._corpus_directories_target()])",
"def extract_english(conceptnet_path, output_jsonl_path, output_vocab_path=None,\n conceptnet_core=False):\n relation_mapping = load_merge_relation()\n\n cpnet_vocab = dict() # store word and the pos tags\n with open(conceptnet_path, 'r', encoding=\"utf8\") as fin, \\\n open(output_jsonl_path, 'w', encoding=\"utf8\") as fout:\n for line in tqdm(fin):\n toks = line.strip().split('\\t')\n head_str = toks[2]\n tail_str = toks[3]\n rel_str = toks[1]\n source = json.loads(toks[4])[\"dataset\"]\n\n if head_str.startswith('/c/en/') and tail_str.startswith('/c/en/'):\n triple = dict()\n rel = rel_str.split(\"/\")[-1].lower()\n head_pos = get_part_of_speech(head_str)\n tail_pos = get_part_of_speech(tail_str)\n head = get_entity_name(head_str).lower()\n tail = get_entity_name(tail_str).lower()\n\n if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha():\n continue\n if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha():\n continue\n if rel not in relation_mapping:\n continue\n\n # ignore triples not from conceptnet core\n if conceptnet_core and \"conceptnet\" not in source:\n continue\n\n rel = relation_mapping[rel]\n if rel.startswith(\"*\"):\n head, tail, rel = tail, head, rel[1:]\n\n triple[\"head\"] = head\n triple[\"tail\"] = tail\n triple[\"rel\"] = rel\n triple[\"head_pos\"] = head_pos\n triple[\"tail_pos\"] = tail_pos\n fout.write(json.dumps(triple) + '\\n')\n\n for w, pos in zip([head, tail], [head_pos, tail_pos]):\n if w not in cpnet_vocab:\n cpnet_vocab[w] = set()\n if pos != \"\":\n cpnet_vocab[w].add(pos)\n print(f'Saved ConceptNet json file to {output_jsonl_path}')\n\n if output_vocab_path:\n with open(output_vocab_path, 'w') as f:\n for word in sorted(list(cpnet_vocab.keys())):\n f.write(word + '\\t' + \",\".join(sorted(list(cpnet_vocab[word]))) + \"\\n\")\n print(f'Saved concept vocabulary to {output_vocab_path}')",
"def combine_corpora(corpora, mode=\"normal\"):\n assert all(corpus in CORPORA for corpus in corpora)\n\n if mode == \"normal\":\n gc = GraphCorpus()\n for corpus in corpora:\n gc.load(CORPORA[corpus][\"path\"])\n folds = list(gc.create_folds())\n\n elif mode == \"cross\":\n gc = GraphCorpus()\n assert len(corpora) > 1\n last = corpora[-1]\n gc.load(CORPORA[last][\"path\"])\n last_folds = list(gc.create_folds())\n first_corpora_text_ids = []\n for corpus in corpora[:-1]:\n ids = gc.load(CORPORA[corpus][\"path\"])\n first_corpora_text_ids.extend(ids)\n folds = [\n (first_corpora_text_ids, test, n) for _, test, n in last_folds\n ]\n\n elif mode == \"add\":\n gc = GraphCorpus()\n assert len(corpora) > 1\n last = corpora[-1]\n all_corpora_text_ids = []\n ids = gc.load(CORPORA[last][\"path\"])\n all_corpora_text_ids.extend(ids)\n last_folds = list(gc.create_folds())\n for corpus in corpora[:-1]:\n ids = gc.load(CORPORA[corpus][\"path\"])\n all_corpora_text_ids.extend(ids)\n folds = [\n ([i for i in all_corpora_text_ids if i not in test], test, n)\n for _, test, n in last_folds\n ]\n\n return gc, folds",
"def test_load_corpus(self):\r\n corpus_data = corpus.load_corpus('chatterbot.corpus')\r\n\r\n self.assertTrue(len(corpus_data))",
"def download_http():\n path = \"MASTER\"\n if not os.path.exists(path):\n os.makedirs(path)\n now = datetime.datetime.now()\n year = now.year % 100\n masters = []\n # masters = [(os.path.join(path, \"master{:02d}-int-SI.txt\".format(year)),\n # \"https://www.vlbi.at/wp-content/uploads/2020/06/master20-int-SI.txt\")]\n\n for cat in masters:\n url_response(cat)\n\n path = \"CATALOGS\"\n if not os.path.exists(path):\n os.makedirs(path)\n\n # catalogs = [(os.path.join(path, \"antenna.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/antenna.cat\"),\n # (os.path.join(path, \"equip.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/equip.cat\"),\n # (os.path.join(path, \"flux.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/flux.cat\"),\n # (os.path.join(path, \"freq.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/freq.cat\"),\n # (os.path.join(path, \"hdpos.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/hdpos.cat\"),\n # (os.path.join(path, \"loif.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/loif.cat\"),\n # (os.path.join(path, \"mask.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/mask.cat\"),\n # (os.path.join(path, \"modes.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/modes.cat\"),\n # (os.path.join(path, \"position.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/position.cat\"),\n # (os.path.join(path, \"rec.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/rec.cat\"),\n # (os.path.join(path, \"rx.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/rx.cat\"),\n # (os.path.join(path, \"source.cat.geodetic.good\"),\n # \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/source.cat.geodetic.good\"),\n # (os.path.join(path, \"tracks.cat\"), \"https://ivscc.gsfc.nasa.gov/IVS_AC/sked_cat/tracks.cat\")]\n\n catalogs = []\n\n # ThreadPool(13).imap_unordered(url_response, catalogs)\n\n for cat in catalogs:\n url_response(cat)",
"def __init__(self, corpora, tokenizer=0, stemmer=0, lemmatization_enabled=True, stopwords_removal=True): \n self.corpora = corpora\n self.cleaned_corpora_set = []\n self.tokenized_corpora = []\n self.all_words = []\n\n if tokenizer <= 2:\n if tokenizer == 0:\n self.tokenizer = RegexpTokenizer(r'\\w+')\n elif tokenizer == 1:\n self.tokenizer = TreebankWordTokenizer()\n else:\n self.tokenizer = TweetTokenizer()\n else:\n assert tokenizer <= 2,\"you used the wrong tokenizer value\"\n\n if stemmer <= 3:\n if stemmer == 0:\n self.stemmer = PorterStemmer()\n elif stemmer == 1:\n self.stemmer = LancasterStemmer()\n elif stemmer == 2:\n self.stemmer = RegexpStemmer('ing$|s$|e$|able$', min=4) #manually modifiable stemmer\n else:\n self.stemmer = Cistem(case_insensitive=False) #favorite german stemmer\n else:\n assert stemmer <= 3,\"you used the wrong stemmer value\"\n\n if lemmatization_enabled:\n self.lemmatization_enabled = True\n self.stemmer = WordNetLemmatizer()\n else:\n self.lemmatization_enabled = False\n #print(\"no lemmatization was selected\") \n\n if stopwords_removal:\n self.stopwords_removal = True\n self.stop_words = set(stopwords.words('english'))\n else:\n self.stopwords_removal = False\n #print(\"no stopword removal was selected\") "
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.