query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Method tests the request to delete a bucketlist item
def test_delete_bucketlist_item(self): email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first() self.assertTrue(item) response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id) result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '200 OK') self.assertEqual( result['message'], 'Bucketlist Item with ID {} deleted'.format(item.id) ) item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first() self.assertFalse(item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_bucketlist_item(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n # create a bucketlist by making a POST request\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n # get the json with the bucketlist\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"Bearer \" + access_token), )\n self.assertEqual(res.status_code, 200)\n\n # Test to see if it exists, should return a 404\n result = self.client().get(\n '/api/v1/bucketlists/{}/items/1'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def test_delete_bucketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n delete_item = self.client.delete('/bucketlistitems/1/items/1',\n headers={\n \"Authorization\": self.token\n })\n self.assertEqual(delete_item.status_code, 204)", "def test_delete_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)", "def test_api_delete_bucketlist(self):\n\n res_post = self.client().post('/bucketlist', data={'name': \"Don't forget to exercise\"})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8'))\n id = res_post_in_json['id']\n res_delete = self.client().delete(f\"/bucketlist/{id}\")\n self.assertEqual(res_delete.status_code, 200)\n\n # should return 404 after delete the data\n res = self.client().get(f'/bucketlist/{id}')\n self.assertEqual(res.status_code, 404)", "def test_single_bucketlist_item_delete_with_no_auth_header(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={'name': 'Visit the Grand Canyon!'})\n self.assertEqual(res.status_code, 201)\n # get the bucketlist in json\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Header with key Authorization missing.', str(res.data))", "def test_single_bucketlist_item_delete_with_empty_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"\"), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Token not provided in the header with key Authorization.', str(res.data))", "def test_single_bucketlist_item_delete_with_invalid_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=access_token), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Invalid token format.', str(res.data))", "def test_bucket_is_deleted(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Delete the created Bucket\n res = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Bucket Deleted successfully')\n self.assertTrue(res.content_type == 'application/json')", "def test_delete_item_wrong_id(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_delete_item_successfully(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item, headers=self.header)\n response = self.client.delete('/buckets/1/items/1',\n headers=self.header)\n self.assertEquals(response.status_code, 200)\n self.assertIn('Item successfully deleted', response.data.decode())", "def test_delete_item(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n assert response.status_code == 204\n assert response.get_data() == b''", "def test_deletion_handles_no_bucket_found_by_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Bucket resource cannot be found')\n self.assertTrue(response.content_type == 'application/json')", "def test_request_for_deleting_bucket_has_integer_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None", "def delete_bucketlist():\n pass", "def test_delete_item_using_delete(self):\n pass", "def test_delete_bucket(self):\n pass", "def test_delete_item_with_non_existing_bucket(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item, headers=self.header)\n response = self.client.delete('/buckets/2/items/1'\n , headers=self.header)\n self.assertEquals(response.status_code, 400)\n self.assertIn('Attempting to delete item on non existing bucket',\n response.data.decode())", "def delete_bucketlist_item(self, email, password, bucketlist_id, item_id):\r\n headers = self.authentication_headers(email=email, password=password)\r\n return self.client.delete(\r\n '/api/v1/bucketlist/{}/items/{}'.format(bucketlist_id, item_id),\r\n content_type=\"application/json\",\r\n headers=headers,\r\n follow_redirects=True\r\n )", "def delete(self, user, id):\n # Search for bucketlist\n print (id)\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n bucketlist.delete()\n\n return \"Successfully deleted bucketlist\", 200", "def test_vault_delete_vault_item(self):\n pass", "def test_delete_buckets(self):\n pass", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def test_app_can_delete_list(self):\n delete_list=self.client.delete('/shoppinglists/nikes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(delete_list.status_code,200)", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['1.SKM7.640188']", "def test_shoppingitem_deletion(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Christmass', 'maina@gmail.com')\n # create an item\n self.item_class_obj.add_item(\n 'Christmass', 'Bread', 'maina@gmail.com')\n # make a post request with the delete name\n res = self.app.post(\n '/delete-item', data={'list_name': 'Christmass', 'item_name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n self.item_class_obj.delete_item(\n 'Bread', 'maina@gmail.com', 'Christmass')\n # check if delete was successful\n self.assertIn(\"Successfuly deleted item \", str(res.data))", "def test_shoppinglist_deletion(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Christmass', 'maina@gmail.com')\n # make a post request with the delete name\n res = self.app.post(\n '/delete-list', data={'list_name': 'Christmass'})\n self.assertEqual(res.status_code, 200)\n self.shopping_class_obj.delete_list(\n 'Christmass', 'maina@gmail.com')\n # check if delete was successful by looking for the deleted name\n self.assertIn(\"Christmass\", str(res.data))", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_DELETE3(self):\n r = requests.delete(self.address + \"/cars/42\")\n self.assertEqual(r.status_code, 400)", "def test_delete_single(single_bucket): # pylint: disable=redefined-outer-name\n single_bucket.delete(\"key 1\")\n\n assert single_bucket.is_empty() is True", "def _bucket_delitem(self, j, k):\n pass", "def test_deleteorganizations_item(self):\n pass", "def delete_item(id):\n return '', 201", "def test_delete_list(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n main.List.delete(r, \"ToDo\", \"all\")\n task = r.get(\"ToDo\")\n self.assertFalse(task, \"Deleting task list failed.\")", "def deleteItem(list,item):\n print \"I deleted this item:\", item\n list.remove(item)", "def test_delete(self):\n package = make_package()\n self.storage.upload(package, StringIO())\n self.storage.delete(package)\n keys = list(self.bucket.list())\n self.assertEqual(len(keys), 0)", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def test_delete_item_incorrect_id(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_deletehardwares_item(self):\n pass", "def _delete(self, item):\n self.cv.delete(item)", "def test_get_bucketlist_items(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n\n self.assertEqual(resp_item.status_code, 200)\n resp_item = self.client.get('/bucketlistitems/1/items', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp_item.status_code, 200)", "def test_get_bucketlist_item_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n get_item = self.client.get('/bucketlistitems/1/items/1', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)", "def test_delete(self):\n\n\t\titem_id = mock_item()[0]\n\t\tmodels.delete(item_id)\n\n\t\titem = models.item(item_id)\n\t\tself.assertIsNone(item)", "def test_delete_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.post(f\"/tags/{self.tag.id}/delete\", follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertNotIn(\"Marvel\", html)", "def do_del_item(self, arg):\n try:\n del_item = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_item_str = \" \".join(del_item)\n print(del_item_str)\n elif choice == \"id\":\n del_item_str = int(\" \".join(del_item))\n print (del_item_str)\n app.ToDoApp.to_delete_item(del_item_str)\n print (\"Item deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def delete(self, item):\n self._createAction(item, \"delete\")", "def test_07_datastore_delete(self, Mock):\r\n html_request = FakeRequest(json.dumps({}), 200,\r\n {'content-type': 'application/json'})\r\n\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_delete failed\" == type, type", "def test_creating_and_getting_a_bucketlist_for_authenticated_user(self):\n\n # test all bucketlists\n response = self.client.post(\n \"/bucketlists/\",\n data=dict(name='test_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n bucketlist = json.loads(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bucketlist[\"name\"], 'test_bucketlist')\n\n # test single bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n single_bucketlist = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n one_bucketlist = json.loads(single_bucketlist.data)\n\n self.assertEqual(single_bucketlist.status_code, 200)\n self.assertEqual(one_bucketlist[\"name\"], 'test_bucketlist')\n\n # test all items in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n self.assertEqual(item.status_code, 200)\n self.assertEqual(one_item[\"name\"], 'test_item')\n\n # test single item in bucketlist\n self.item_id = one_item[\"item_id\"]\n single_item = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\" + str(self.item_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n created_item = json.loads(single_item.data)\n\n self.assertEqual(single_item.status_code, 200)\n self.assertEqual(created_item[\"name\"], 'test_item')\n\n # test for deletion of bucketlist\n second_bucketlist = self.client.post(\n \"/bucketlists/\",\n data=dict(name='second_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n\n bucketlist_two = json.loads(second_bucketlist.data)\n\n self.assertEqual(second_bucketlist.status_code, 200)\n self.assertEqual(bucketlist_two[\"name\"], 'second_bucketlist')\n\n delete_response = self.client.delete(\n \"/bucketlists/\" + str(bucketlist_two[\"bucketlist_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n deletion = json.loads(delete_response.data)\n\n self.assertEqual(delete_response.status_code, 200)\n self.assertEqual(deletion[\"message\"], \"Deleted\")\n\n # test for deletion of an item in bucketlist\n delete_item = self.client.delete(\n \"/bucketlists/\" + str(bucketlist[\"bucketlist_id\"]) + \"/items/\" + str(one_item[\"item_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n item_deletion = json.loads(delete_item.data)\n\n self.assertEqual(delete_item.status_code, 200)\n self.assertEqual(item_deletion[\"message\"], \"Deleted\")\n\n # test for updating of bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n bucketlist_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n data=dict(name='bucketlist_test'),\n headers={'Authorization': self.user_token}\n )\n\n updated_bucketlist = json.loads(bucketlist_update.data)\n\n self.assertEqual(bucketlist_update.status_code, 200)\n self.assertEqual(updated_bucketlist[\"name\"], 'bucketlist_test')\n\n # test update of item in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n item_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\"+ str(one_item[\"item_id\"]) + \"\",\n data=dict(name=\"item_test\"),\n headers={'Authorization': self.user_token}\n )\n\n updated_item = json.loads(item_update.data)\n\n self.assertEqual(item_update.status_code, 200)\n self.assertEqual(updated_item[\"name\"], 'item_test')", "def delete_item(request):\n if request.json_body[u'type'] == u'post':\n if DBSession.query(Post).filter(Post.name==request.json_body[u'name']).delete() == 1:\n return {\"deletion_status\":\"success\"}\n import ipdb; impdb.set_trace()\n return {\"deletion_status\":\"error\"}", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def test_api_edit_bucketlist(self):\n res_post = self.client().post('/bucketlist', data={'name': 'Wake up, Eat, Code, Sleep & Repeat'})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n id = res_post_in_json['id']\n res_put = self.client().put(\n f'bucketlist/{id}',\n data={\n 'name': \"Don't forget to exercise\"\n }\n )\n self.assertEqual(res_put.status_code, 200)\n res = self.client().get(f'/bucketlist/{id}')\n self.assertIn(\"exercise\", str(res.data))", "def cfDel(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_DEL, *params)", "def test_delete(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.DELETE, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.delete(rest_url)", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['pcr_primers']", "def test_delete(self):\n pass", "def test_delete_nveto_pmt_item(self):\n pass", "def POST(self, req):\n def object_key_iter(elem):\n for obj in elem.iterchildren('Object'):\n key = obj.find('./Key').text\n if not key:\n raise UserKeyMustBeSpecified()\n version = obj.find('./VersionId')\n if version is not None:\n version = version.text\n\n yield key, version\n\n max_body_size = min(\n # FWIW, AWS limits multideletes to 1000 keys, and swift limits\n # object names to 1024 bytes (by default). Add a factor of two to\n # allow some slop.\n 2 * self.conf.max_multi_delete_objects * MAX_OBJECT_NAME_LENGTH,\n # But, don't let operators shoot themselves in the foot\n 10 * 1024 * 1024)\n\n try:\n xml = req.xml(max_body_size)\n if not xml:\n raise MissingRequestBodyError()\n\n req.check_md5(xml)\n elem = fromstring(xml, 'Delete', self.logger)\n\n quiet = elem.find('./Quiet')\n self.quiet = quiet is not None and quiet.text.lower() == 'true'\n\n delete_list = list(object_key_iter(elem))\n if len(delete_list) > self.conf.max_multi_delete_objects:\n raise MalformedXML()\n except (XMLSyntaxError, DocumentInvalid):\n raise MalformedXML()\n except ErrorResponse:\n raise\n except Exception as e:\n self.logger.error(e)\n raise\n\n elem = Element('DeleteResult')\n\n # check bucket existence\n try:\n req.get_response(self.app, 'HEAD')\n except AccessDenied as error:\n body = self._gen_error_body(error, elem, delete_list)\n return HTTPOk(body=body)\n\n if 'object_versioning' not in get_swift_info() and any(\n version not in ('null', None)\n for _key, version in delete_list):\n raise S3NotImplemented()\n\n def do_delete(base_req, key, version):\n req = copy.copy(base_req)\n req.environ = copy.copy(base_req.environ)\n req.object_name = str_to_wsgi(key)\n if version:\n req.params = {'version-id': version, 'symlink': 'get'}\n\n try:\n try:\n query = req.gen_multipart_manifest_delete_query(\n self.app, version=version)\n except NoSuchKey:\n query = {}\n if version:\n query['version-id'] = version\n query['symlink'] = 'get'\n\n resp = req.get_response(self.app, method='DELETE', query=query,\n headers={'Accept': 'application/json'})\n # If async segment cleanup is available, we expect to get\n # back a 204; otherwise, the delete is synchronous and we\n # have to read the response to actually do the SLO delete\n if query.get('multipart-manifest') and \\\n resp.status_int != HTTP_NO_CONTENT:\n try:\n delete_result = json.loads(resp.body)\n if delete_result['Errors']:\n # NB: bulk includes 404s in \"Number Not Found\",\n # not \"Errors\"\n msg_parts = [delete_result['Response Status']]\n msg_parts.extend(\n '%s: %s' % (obj, status)\n for obj, status in delete_result['Errors'])\n return key, {'code': 'SLODeleteError',\n 'message': '\\n'.join(msg_parts)}\n # else, all good\n except (ValueError, TypeError, KeyError):\n # Logs get all the gory details\n self.logger.exception(\n 'Could not parse SLO delete response (%s): %s',\n resp.status, resp.body)\n # Client gets something more generic\n return key, {'code': 'SLODeleteError',\n 'message': 'Unexpected swift response'}\n except NoSuchKey:\n pass\n except ErrorResponse as e:\n return key, {'code': e.__class__.__name__, 'message': e._msg}\n except Exception:\n self.logger.exception(\n 'Unexpected Error handling DELETE of %r %r' % (\n req.container_name, key))\n return key, {'code': 'Server Error', 'message': 'Server Error'}\n\n return key, None\n\n with StreamingPile(self.conf.multi_delete_concurrency) as pile:\n for key, err in pile.asyncstarmap(do_delete, (\n (req, key, version) for key, version in delete_list)):\n if err:\n error = SubElement(elem, 'Error')\n SubElement(error, 'Key').text = key\n SubElement(error, 'Code').text = err['code']\n SubElement(error, 'Message').text = err['message']\n elif not self.quiet:\n deleted = SubElement(elem, 'Deleted')\n SubElement(deleted, 'Key').text = key\n\n body = tostring(elem)\n\n return HTTPOk(body=body)", "def __chore_delete(self, db):\n delete_chore = {\"id\": 1,\n \"worker_id\": 2}\n\n response = None\n while response is None:\n response = self.remove_api(body=json.dumps(delete_chore)).body\n\n # Test that response is success\n expected = {\"success\": True}\n self.assertEqual(response, json.dumps(expected))\n\n # Test that database contains updated chore info\n # chore_in_db = db.query(Chore).filter(Chore.id == delete_chore[\"id\"]).one()\n # self.assertIsNone(chore_in_db)", "def test_s3_delete(self):\n keys = [self.bucket + '/another_directory/text2.txt',\n self.bucket + '/text1.txt',\n self.bucket + '/another_directory/']\n tasks = []\n for key in keys:\n tasks.append(FileInfo(\n src=key, src_type='s3',\n dest_type='local', operation_name='delete',\n size=0, client=self.client,\n source_client=self.source_client\n ))\n response = self.client.list_objects(Bucket=self.bucket)\n self.assertEqual(len(response.get('Contents', [])), 3)\n self.s3_handler.call(tasks)\n response = self.client.list_objects(Bucket=self.bucket)\n self.assertEqual(len(response.get('Contents', [])), 0)", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.delete(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def delete_bucket(Bucket=None):\n pass", "def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200", "def test_DELETE4(self):\n r = requests.delete(self.address + \"/car/\")\n self.assertEqual(r.status_code, 400)", "def test_items_delete(patch_mongo):\n # create a user, first\n response = client.put(\"/user\",\n json={\"name\": \"John\"})\n assert response.status_code == status.HTTP_201_CREATED\n\n response = client.get(\"/users\")\n assert response.status_code == status.HTTP_200_OK\n assert len(response.json()) == 1\n\n item = {\n \"content\": \"lorem ipsum\",\n \"priority\": \"high\",\n \"status\": \"backlog\",\n \"users\": [\"John\"],\n }\n response = client.post(\"/item\", json=item)\n assert response.status_code == status.HTTP_200_OK\n\n # get the items back enriched with an ID\n response = client.get(\"/items\")\n assert response.status_code == status.HTTP_200_OK\n item_list = response.json()\n\n # delete the first item\n response = client.delete(\"/items/\" + item_list[0][\"item_id\"])\n assert response.status_code == status.HTTP_200_OK\n\n # check if the item has been deleted\n response = client.get(\"/items\")\n assert response.status_code == status.HTTP_200_OK\n item_list = response.json()\n assert not item_list", "def testDelete(self):\n response = requests.delete(url=self.url)\n headers = response.headers\n json_data = response.json()\n\n self.assertTrue(self.place == storage.get(Place, self.place_id))\n self.assertTrue(self.user == storage.get(User, self.user_id))\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertTrue(self.state == storage.get(State, self.state_id))\n self.assertEqual(response.status_code, 200, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertEqual(len(json_data), 0)\n storage.reload()\n self.assertIsNone(storage.get(Place, self.place_id))", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def _delete(self, table, _id, return_item=False):\n data = {\"Key\": _id, \"ReturnValues\": \"ALL_OLD\" if return_item else \"NONE\"}\n\n return self._response_handler(table, \"delete_item\", data)", "def test_delete_entity(self):\n\n storage = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(c.URL,\"http://127.0.0.1:8090/compute/this_is_bilel\")\n c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])\n c.setopt(c.CUSTOMREQUEST, 'DELETE')\n c.setopt(c.WRITEFUNCTION, storage.write)\n c.perform()\n content = storage.getvalue()\n print \" ===== Body content =====\\n \" + content + \" ==========\\n\"", "def test_edit_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"The seasons will be, summer winter and autumn\"\n })\n self.assertEqual(result_of_put_method.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertIn('The seasons will b', str(result_of_get_method.data))", "def delete(self, bucket: str, object_name: str):\n raise NotImplementedError()", "def delete():", "def deleteItem(request, itemid):\n try:\n item = ItemSerializer(Item.objects.get(id=itemid))\n Item.objects.get(id=itemid).delete()\n return Response(item.data)\n\n except Item.DoesNotExist:\n fail = {\n \"item\":\"item does not exist\"\n }\n return JsonResponse(fail)", "def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)", "def __delitem__(self, key):\n\n bucket_key = self.key_for_bucket(key)\n del self.buckets[bucket_key][key]\n\n if not self.buckets[bucket_key]:\n del self.buckets[bucket_key]", "def test_add_bucketlist_items(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(email, _pword, bucketlist.id, \"bucketlist item name\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(result['message'], 'Bucket list item added')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertLess(item_no, new_item_no)", "def test_client_can_do_delete_request(self):\n response = self.httpbin_4.test_requests_delete_method()\n self.assertEqual(response.request.method, 'DELETE')\n self.assertEqual(response.status_code, 200)", "def POST(self, req):\n def object_key_iter(elem):\n for obj in elem.iterchildren('Object'):\n key = obj.find('./Key').text\n if not key:\n raise UserKeyMustBeSpecified()\n version = obj.find('./VersionId')\n if version is not None:\n version = version.text\n\n yield key, version\n\n try:\n xml = req.xml(MAX_MULTI_DELETE_BODY_SIZE, check_md5=True)\n elem = fromstring(xml, 'Delete')\n\n quiet = elem.find('./Quiet')\n if quiet is not None and quiet.text.lower() == 'true':\n self.quiet = True\n else:\n self.quiet = False\n\n delete_list = list(object_key_iter(elem))\n if len(delete_list) > CONF.max_multi_delete_objects:\n raise MalformedXML()\n except (XMLSyntaxError, DocumentInvalid):\n raise MalformedXML()\n except ErrorResponse:\n raise\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n LOGGER.error(e)\n raise exc_type, exc_value, exc_traceback\n\n elem = Element('DeleteResult')\n\n # check bucket existence\n try:\n req.get_response(self.app, 'HEAD')\n except AccessDenied as error:\n body = self._gen_error_body(error, elem, delete_list)\n return HTTPOk(body=body)\n\n for key, version in delete_list:\n if version is not None:\n # TODO: delete the specific version of the object\n raise OssNotImplemented()\n\n req.object_name = key\n\n try:\n query = req.gen_multipart_manifest_delete_query(self.app)\n req.get_response(self.app, method='DELETE', query=query)\n except NoSuchKey:\n pass\n except ErrorResponse as e:\n error = SubElement(elem, 'Error')\n SubElement(error, 'Key').text = key\n SubElement(error, 'Code').text = e.__class__.__name__\n SubElement(error, 'Message').text = e._msg\n continue\n\n if not self.quiet:\n deleted = SubElement(elem, 'Deleted')\n SubElement(deleted, 'Key').text = key\n\n body = tostring(elem)\n\n return HTTPOk(body=body)", "def delete(self, item):\r\n self.fetch()\r\n t = self.make_item_tuple(item)\r\n changed = False\r\n while t in self.data:\r\n self.data.remove(t)\r\n changed = True\r\n \r\n if changed:\r\n query_cache.set(self.iden, self.data)", "def test_delete(self):\n query = {\"id\":0}\n result = self.app.delete('/testParaDelete', query_string=query)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, 'ok')", "def test_delete_request_by_owner(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.test_user2_token)\n response = client.post('/api/places/', self.restaurant_data, format='json')\n url = f\"/api/places/{response.data['id']}/\"\n\n response = client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def remove_bucket_list_item(self, id, collection, item):\n if type(id) is not ObjectId:\n id = ObjectId(id)\n obj = getattr(self.db, collection)\n result = obj.update(\n {'_id': id},\n {'$pull': {'bucket_list': item}}\n )\n return result", "def delete(self, name):\n global items\n items = _Helper.all_item_except_searching_for(name)\n return {\"message\": f\"Item {name} deleted successfully\"}, 204", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def test_delete_method(self):\n self.getPage('/blah', method='PUT')\n self.getPage('/', method='DELETE')\n self.assertStatus('204 No Content')\n self.assertHeader('Content-Type', 'application/json')", "def delete_item(self, id: str, user: User) -> bool:", "def testDeletingItem(self):\n\n data = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n memcache.set('data', data)\n assert memcache.get('data') == data\n memcache.delete('data')\n assert memcache.get('data') == None", "def delete_object(Bucket=None, Key=None, MFA=None, VersionId=None, RequestPayer=None, BypassGovernanceRetention=None):\n pass", "def delete_item(category, name):\r\n item_key = course_key.make_usage_key(category, name)\r\n resp = self.client.delete(get_url('xblock_handler', item_key))\r\n self.assertEqual(resp.status_code, 204)\r\n _test_no_locations(self, resp, status_code=204, html=False)", "def delete(self, path):\n params = request.args.to_dict()\n if params.get(\"instances\"):\n int_list = params.get(\"instances\")\n return items_delete_response(path, int_list)\n abort(405)", "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n response = self.client.get(\n \"/bucketlists/1\", headers={\n \"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item.data)\n self.assertEqual(result[\"message\"],\n \"Bucket list item added successfully.\")\n self.assertEqual(resp.status_code, 201)", "def do_delete(self, arg):\n \treturn False", "def delete(self):\r\n self.domain.delete_item(self)", "def test_DELETE(self):\n if not self.url:\n return\n response = self.client.delete(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])", "def test_delete_movie(self): # executive can delete movies\r\n res = self.client().delete('/movies/3/delete', headers=executive_producer)\r\n data = json.loads(res.data)\r\n\r\n #self.assertEqual(res.status_code, 200)\r\n #self.assertTrue(data[\"success\"])\r\n #self.assertTrue(data[\"deleted\"])\r", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def test_delete_user_list(mocker, list_type):\n patched_delete_task = mocker.patch(\"search.search_index_helpers.deindex_document\")\n user_list = UserListFactory.create(list_type=list_type)\n deindex_user_list(user_list)\n assert patched_delete_task.called is True\n assert patched_delete_task.call_args[0] == (\n gen_user_list_id(user_list),\n USER_LIST_TYPE,\n )", "def test_delete_cloud(self):\n pass", "def delete(self):\n ...", "def test_delete_item_all_fails(self):\n\n web.app.config['READONLY'] = False\n\n # Delete all items\n response = self.client.delete('/item/')\n self.assertEqual(response.status_code, 405)\n\n # Note: if this fails, all items have gone and rest of\n # tests will fail!" ]
[ "0.8505305", "0.83253217", "0.82858104", "0.8246607", "0.82087", "0.81654054", "0.8043401", "0.78209937", "0.77620775", "0.76105493", "0.7587545", "0.75202996", "0.7477461", "0.7458294", "0.73910975", "0.7342991", "0.73347974", "0.7244719", "0.7221748", "0.7206562", "0.7108436", "0.6908336", "0.68953", "0.6842576", "0.68179005", "0.67890906", "0.6782164", "0.6727874", "0.67037225", "0.6703348", "0.6695637", "0.66561705", "0.6655323", "0.6637443", "0.65918684", "0.65884095", "0.65827113", "0.6581214", "0.6578166", "0.6542448", "0.6509191", "0.6507228", "0.64767754", "0.64599663", "0.6424958", "0.6408491", "0.64072585", "0.63904834", "0.63745606", "0.63736194", "0.6358819", "0.6357077", "0.63569325", "0.63462436", "0.6343029", "0.63420016", "0.63363296", "0.6333739", "0.6327949", "0.6322139", "0.6321246", "0.63176274", "0.6309317", "0.63084537", "0.6298866", "0.6295812", "0.6293705", "0.62837654", "0.6270381", "0.62682", "0.6266848", "0.62493104", "0.62473094", "0.62469554", "0.6243917", "0.62068397", "0.6203928", "0.6201714", "0.62000775", "0.6199257", "0.61989725", "0.6195158", "0.61896", "0.61769193", "0.6174211", "0.61696595", "0.6165069", "0.61576056", "0.6155391", "0.61527", "0.61490893", "0.61489236", "0.6146255", "0.6145401", "0.61417437", "0.6141718", "0.6141116", "0.61350316", "0.61310565", "0.6126819" ]
0.8602903
0
Method tests the error raised when end point for delete a bucket list item contains the wrong id
def test_delete_item_wrong_id(self): email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first() self.assertFalse(item) response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0) result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '404 NOT FOUND') self.assertEqual( result['message'], 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\ '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\ ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deletion_handles_no_bucket_found_by_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Bucket resource cannot be found')\n self.assertTrue(response.content_type == 'application/json')", "def test_request_for_deleting_bucket_has_integer_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_delete_item_incorrect_id(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_api_delete_bucketlist(self):\n\n res_post = self.client().post('/bucketlist', data={'name': \"Don't forget to exercise\"})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8'))\n id = res_post_in_json['id']\n res_delete = self.client().delete(f\"/bucketlist/{id}\")\n self.assertEqual(res_delete.status_code, 200)\n\n # should return 404 after delete the data\n res = self.client().get(f'/bucketlist/{id}')\n self.assertEqual(res.status_code, 404)", "def test_delete_bucketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n delete_item = self.client.delete('/bucketlistitems/1/items/1',\n headers={\n \"Authorization\": self.token\n })\n self.assertEqual(delete_item.status_code, 204)", "def test_delete_bucketlist_item(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertTrue(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '200 OK')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} deleted'.format(item.id)\r\n )\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertFalse(item)", "def test_single_bucketlist_item_delete_with_invalid_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=access_token), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Invalid token format.', str(res.data))", "def test_delete_bucketlist_item(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n # create a bucketlist by making a POST request\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n # get the json with the bucketlist\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"Bearer \" + access_token), )\n self.assertEqual(res.status_code, 200)\n\n # Test to see if it exists, should return a 404\n result = self.client().get(\n '/api/v1/bucketlists/{}/items/1'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def test_delete_item_with_non_existing_bucket(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item, headers=self.header)\n response = self.client.delete('/buckets/2/items/1'\n , headers=self.header)\n self.assertEquals(response.status_code, 400)\n self.assertIn('Attempting to delete item on non existing bucket',\n response.data.decode())", "def test_delete_data_wrong_id(self):\n # get current ids\n list_current = [item['id'] for item in self.current_data]\n self.assertNotIn(10, list_current)\n\n response = self.client.delete(self.url + '10/')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())", "def test_delete_car_invalid_id():\n response = client.delete(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_AlgorithmsIdHandler_DELETE_MalformedRequest(self):\n searchedId='xyz' + ' ' + '1'\n response = self.testapp.delete('/algorithms/' + searchedId, expect_errors=True)\n self.assertEqual(400, response.status_int, msg='Wrong answer code')\n self.assertEqual('application/json', response.content_type)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding='UTF-8'))", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None", "def test_delete_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)", "def test_single_bucketlist_item_delete_with_no_auth_header(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={'name': 'Visit the Grand Canyon!'})\n self.assertEqual(res.status_code, 201)\n # get the bucketlist in json\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Header with key Authorization missing.', str(res.data))", "def test_delete_item_using_delete(self):\n pass", "def test_single_bucketlist_item_delete_with_empty_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"\"), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Token not provided in the header with key Authorization.', str(res.data))", "def test_delete_reusableitem_api_fails(self):\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.delete(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)", "def test_delete_item_successfully(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item, headers=self.header)\n response = self.client.delete('/buckets/1/items/1',\n headers=self.header)\n self.assertEquals(response.status_code, 200)\n self.assertIn('Item successfully deleted', response.data.decode())", "def test_delete_unkonwn_id_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate.delete(5)", "def test_bucket_is_deleted(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Delete the created Bucket\n res = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Bucket Deleted successfully')\n self.assertTrue(res.content_type == 'application/json')", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['1.SKM7.640188']", "def test_delete_bucket(self):\n pass", "def test_delete_with_bad_id(self):\n resp = self.api_client.delete('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')", "def test_delete_item(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n assert response.status_code == 204\n assert response.get_data() == b''", "def test_DELETE3(self):\n r = requests.delete(self.address + \"/cars/42\")\n self.assertEqual(r.status_code, 400)", "def test_delete_car_valid_id():\n initial_number_of_cars = len(client.get(\"/\").json())\n\n response = client.delete(\"/1\")\n assert response.status_code == STATUS_NO_CONTENT\n\n assert len(client.get(\"/\").json()) == initial_number_of_cars - 1", "def test_vault_delete_vault_item(self):\n pass", "def test_id_of_bucket_to_be_edited_does_not_exist(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 404)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'The Bucket with Id 1 does not exist')", "def delete(self, user, id):\n # Search for bucketlist\n print (id)\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n bucketlist.delete()\n\n return \"Successfully deleted bucketlist\", 200", "def test_id_of_bucket_to_be_edited_is_invalid(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/bucketid',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 400)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def delete_item(id):\n return '', 201", "def test_delete_unexisting_product(self):\n response=self.delete_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],'Product Not found')\n self.assertEqual(response.status_code, 400)", "def test_delete_unkonwn_id_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate.delete(5)", "def test_request_for_a_bucket_has_integer_id(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_handle_delete_lookup_error(self):\n self.db.query.return_value = []\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.lookup_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_api_get_bucketlist_by_id_not_exist(self):\n res = self.client().get(f\"/bucketlist/99\")\n self.assertEqual(res.status_code, 404)", "def test_delete_buckets(self):\n pass", "def test_delete_not_found(self):\n resp = self.client.delete(\n \"/tracking?repo=not_found1&branch=not_found1\", content_type=\"application/json\", headers=self.auth\n )\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(ResponseCode.DELETE_DB_NOT_FOUND, resp_dict.get(\"code\"), msg=\"Error in status code return\")", "def test_DELETE4(self):\n r = requests.delete(self.address + \"/car/\")\n self.assertEqual(r.status_code, 400)", "def test_delete(self):\n\n\t\titem_id = mock_item()[0]\n\t\tmodels.delete(item_id)\n\n\t\titem = models.item(item_id)\n\t\tself.assertIsNone(item)", "def test_invalid_token_delete(self):\n with self.client:\n id = self.get_id()\n response = self.client.delete(\n 'api/v1/meals/{}'.format(id), headers=({\"token\": \"12345\"}))\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data.get('message'), \"Invalid token.Please login\")", "def test_delete_error(self):\n with self.assertRaises(QiitaDBExecutionError):\n PrepTemplate.delete(1)", "def test_delete_error(self):\n r = mock.Mock(spec=requests.Response)\n r.status_code = 201\n r.content = '{\"it\\'s all\": \"ok\"}'\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n self.assertRaises(exceptions.DeleteError, f.delete_activities, 12345)", "def test_no_bucket_returned_by_given_id(self):\n with self.client:\n token = self.get_user_token()\n\n response = self.client.get(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['bucket'], list)\n self.assertTrue(response.content_type == 'application/json')", "def test_wrong_admin_delete(self):\n\n with self.client:\n self.get_meals()\n id = 100\n token = self.get_token()\n response = self.client.delete(\n 'api/v1/meals/{}'.format(id), headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal not found\")\n self.assertEqual(response.status_code, 400)", "def test_AlgorithmsIdHandler_DELETE_NotFound(self):\n searched_id = 'xyz1'\n right_list = []\n create_test_algorithm_list(right_list, 101)\n documents = []\n create_test_documents_list(right_list, documents, 101)\n index = search.Index(name=search_algorithm._INDEX_STRING)\n index.put(documents)\n # end of preparing data\n self.assertIsNone(index.get(searched_id), msg='Algorithm is there but should not be')\n response = self.testapp.delete('/algorithms/' + searched_id)\n self.assertEqual(200, response.status_int, msg='Wrong return code')\n self.assertIsNone(index.get(searched_id), msg='Algorithm is still there')", "def test_delete(self):\n pass", "def test_delete_actor_404(self):\r\n res = self.client().delete('/actors/2/delete', headers=executive_producer)\r\n data = json.loads(res.data)\r\n\r\n #self.assertEqual(res.status_code, 404)\r\n #self.assertFalse(data[\"success\"])\r\n #self.assertEqual(data[\"message\"], \"resource not found\")\r", "def test_404_delete_question(self):\n\n res = self.client().get('/qustions/100')\n\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], \"resource not found\")", "def test_delete_failure_http_error(self, acme_id):\n\n api_url = self.get_acme_account_url(acme_id)\n\n # Setup the mocked response\n responses.add(responses.DELETE, api_url, status=400)\n\n acme = ACMEAccount(client=self.client)\n\n self.assertRaises(HTTPError, acme.delete, acme_id)", "def test_delete_idea_invalid_id(self):\n self.newidea()\n rv = self.app.post('/delete/idea/35',\n data=dict(confirm=True),\n follow_redirects=True)\n self.assertIn(b'Idea does not exist', rv.data)", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def test_employee_deletion(self):\n res = self.client().delete(service_url_emp, json={\"id_emp\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url_emp+'/1')\n self.assertEqual(result.status_code, 400)", "def test_delete_empty(empty_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n empty_bucket.delete(\"key 1\")", "def test_validate_delete(client):\n response = client.delete('/user/1')\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_update_item_incorrect_id(test_client, item):\n\n response = test_client.put(BAD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['pcr_primers']", "def test_delete_a_todo(self):\n # hit the API endpoint\n response = self.delete_a_todo(1)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n # test with invalid data\n response = self.delete_a_todo(100)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_issue_by_unauthenticated_user_fails(self):\n response = self.client.delete(\n self.url,\n )\n response_json = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_json[\"SubCode\"], \"InvalidToken\")", "def delete_fail(self, id_, message):\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert in_response(rv, message)\n assert self.verify_object({self.id_field: id_})\n return rv", "def test_delete_without_partition_key(self):\r\n with self.assertRaises(query.QueryException):\r\n TestModel.objects(attempt_id=0).delete()", "def deleteItem(request, itemid):\n try:\n item = ItemSerializer(Item.objects.get(id=itemid))\n Item.objects.get(id=itemid).delete()\n return Response(item.data)\n\n except Item.DoesNotExist:\n fail = {\n \"item\":\"item does not exist\"\n }\n return JsonResponse(fail)", "def test_delete_wrong_way_id(self):\n\n url = reverse('notification',\n kwargs={'way_id': 38987, 'notification_id': self.notification.id})\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 400)", "def test_deleteorganizations_item(self):\n pass", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def test_delete_delete_and_delete_id_not_equal(self):\n doc = TestDoc(\"1\", \"test\")\n self.assertNotEqual(\n BulkActionItem.delete(doc),\n BulkActionItem.delete_id(doc.id),\n )", "def test_error_db_deleting(self):\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n with mock.patch('notification.views.Notification.delete_by_id') as notification_delete:\n notification_delete.return_value = False\n response = self.client.delete(url)\n self.assertEqual(response.status_code, 400)", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_401_UNAUTHORIZED)", "def test_delete_movie_404(self):\r\n res = self.client().delete('/movies/1/delete', headers=executive_producer)\r\n data = json.loads(res.data)\r\n\r\n #self.assertEqual(res.status_code, 404)\r\n #self.assertFalse(data[\"success\"])\r\n #self.assertEqual(data[\"message\"], \"resource not found\")\r", "def delete(self, _id):", "def test_500_delete_nonexistent_question(self):\n res = self.client().delete('/api/questions/5000')\n self.assertEqual(res.status_code, 500)\n data = json.loads(res.data)\n self.assertFalse(data[\"success\"])", "def test_before_delete_for_linked_resource(self):\n resource = factories.Resource()\n helpers.call_action(\"resource_delete\", id=resource[\"id\"])\n with pytest.raises(p.toolkit.ObjectNotFound):\n helpers.call_action(\"resource_show\", id=resource[\"id\"])", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"test@test.com\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"test@test.com\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def taco_test_put_error_requires_id(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '501'))", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_delete_run(self):\n pass", "def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete_data(self):\n response = self.client.delete(self.url + str(self.current_data[-1]['id']) + '/')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n list_response_new = sorted(response.json(), key=operator.itemgetter('id'))\n self.assertLess(len(list_response_new), len(self.current_data))\n self.assertListEqual(list_response_new, self.current_data[:-1])", "def test_delete_cart_item_unauthorized(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_id1 = self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type',\n 'entity_version')\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.delete_cart_item('112', cart_id, item_id1)", "def test_delete__invalid(self):\n testing_config.sign_in('admin@example.com', 123567890)\n\n with register.app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.BadRequest):\n self.handler.do_delete(None)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertFalse(revised_feature.deleted)", "def test_delete_non_notification_id(self):\n\n url = reverse('notification', kwargs={'way_id': self.notification.way_id})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, 400)", "def test_delete_non_existing_resource(self):\n CommonTestCases.admin_token_assert_in(\n self,\n delete_assigned_resource_from_non_existing_resource,\n \"Resource does not exist\"\n )", "def test_get_bucketlist_item_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n get_item = self.client.get('/bucketlistitems/1/items/1', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)", "def test_delete_single(single_bucket): # pylint: disable=redefined-outer-name\n single_bucket.delete(\"key 1\")\n\n assert single_bucket.is_empty() is True", "def test_delete(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.DELETE, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.delete(rest_url)", "def test_client_verification_document_delete(self):\n pass", "def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200", "def test_delete_another_way_id(self):\n\n url = reverse('notification',\n kwargs={'way_id': 101, 'notification_id': self.notification.id})\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def test_delete_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.delete()\n except KeyError:\n self.assertRaises(KeyError)", "def test_delete_non_existent_issue_fails(self):\n response = self.client.delete(\n self.non_existent_url, headers={\"Authorization\": self.test_user_token}\n )\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)", "def test_delete_wrong_notification_id(self):\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 400)", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['DEPTH']", "def test_items_delete(patch_mongo):\n # create a user, first\n response = client.put(\"/user\",\n json={\"name\": \"John\"})\n assert response.status_code == status.HTTP_201_CREATED\n\n response = client.get(\"/users\")\n assert response.status_code == status.HTTP_200_OK\n assert len(response.json()) == 1\n\n item = {\n \"content\": \"lorem ipsum\",\n \"priority\": \"high\",\n \"status\": \"backlog\",\n \"users\": [\"John\"],\n }\n response = client.post(\"/item\", json=item)\n assert response.status_code == status.HTTP_200_OK\n\n # get the items back enriched with an ID\n response = client.get(\"/items\")\n assert response.status_code == status.HTTP_200_OK\n item_list = response.json()\n\n # delete the first item\n response = client.delete(\"/items/\" + item_list[0][\"item_id\"])\n assert response.status_code == status.HTTP_200_OK\n\n # check if the item has been deleted\n response = client.get(\"/items\")\n assert response.status_code == status.HTTP_200_OK\n item_list = response.json()\n assert not item_list", "def test_delete_fail(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'id': people[2].id}\n self.delete('user', 403, params=p)", "def test_07_datastore_delete(self, Mock):\r\n html_request = FakeRequest(json.dumps({}), 200,\r\n {'content-type': 'application/json'})\r\n\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_delete failed\" == type, type", "def test_deletehardwares_item(self):\n pass" ]
[ "0.8280715", "0.81965166", "0.8153652", "0.80517626", "0.7886369", "0.7813242", "0.7694013", "0.76679814", "0.76200867", "0.74761516", "0.7440213", "0.74288243", "0.74263364", "0.7391026", "0.73868865", "0.7326834", "0.7310586", "0.72898954", "0.72570956", "0.7218091", "0.7213452", "0.71546626", "0.71132094", "0.71117187", "0.70680153", "0.70378864", "0.7037814", "0.7031019", "0.7004663", "0.69480836", "0.6923664", "0.6923496", "0.69065434", "0.689108", "0.6886447", "0.68661094", "0.684115", "0.6828757", "0.68203354", "0.6797767", "0.6752703", "0.6745996", "0.6729974", "0.67284566", "0.6716485", "0.67143375", "0.6705093", "0.6701196", "0.6680632", "0.66655797", "0.6665219", "0.6662589", "0.66602784", "0.6634745", "0.66331375", "0.66243595", "0.66231394", "0.6620437", "0.6608814", "0.6605166", "0.6604894", "0.66048074", "0.66014516", "0.65988004", "0.6595715", "0.65859884", "0.6584737", "0.6579533", "0.65634286", "0.6561741", "0.6557319", "0.6534181", "0.6527287", "0.65196854", "0.65181977", "0.6510874", "0.651058", "0.6503149", "0.6501931", "0.6468134", "0.64618427", "0.64584714", "0.6456925", "0.6442706", "0.64402056", "0.643594", "0.64348084", "0.64317816", "0.6425554", "0.6423847", "0.6415603", "0.6414125", "0.6413216", "0.6391658", "0.63906425", "0.63884354", "0.6383032", "0.63798285", "0.6373957", "0.6370947" ]
0.864614
0
Method is used to send request to the api to add a bucketlist for testing
def add_bucketlist_item(self, email, password, buckelist_id, item_name): test_date = str(date(2020, 9, 22)) headers = self.authentication_headers(email=email, password=password) return self.client.post( '/api/v1/bucketlist/{}/items/'.format(buckelist_id), data=json.dumps({"name": item_name, "finished_by": test_date}), content_type="application/json", headers=headers, follow_redirects=True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n response = self.client.get(\n \"/bucketlists/1\", headers={\n \"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item.data)\n self.assertEqual(result[\"message\"],\n \"Bucket list item added successfully.\")\n self.assertEqual(resp.status_code, 201)", "def test_bucketlist_create(self):\n res = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Go to vacation', str(res.data))", "def post_bucketlist():\n pass", "def test_add_bucketlist_items(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(email, _pword, bucketlist.id, \"bucketlist item name\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(result['message'], 'Bucket list item added')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertLess(item_no, new_item_no)", "def test_creating_and_getting_a_bucketlist_for_authenticated_user(self):\n\n # test all bucketlists\n response = self.client.post(\n \"/bucketlists/\",\n data=dict(name='test_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n bucketlist = json.loads(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bucketlist[\"name\"], 'test_bucketlist')\n\n # test single bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n single_bucketlist = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n one_bucketlist = json.loads(single_bucketlist.data)\n\n self.assertEqual(single_bucketlist.status_code, 200)\n self.assertEqual(one_bucketlist[\"name\"], 'test_bucketlist')\n\n # test all items in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n self.assertEqual(item.status_code, 200)\n self.assertEqual(one_item[\"name\"], 'test_item')\n\n # test single item in bucketlist\n self.item_id = one_item[\"item_id\"]\n single_item = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\" + str(self.item_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n created_item = json.loads(single_item.data)\n\n self.assertEqual(single_item.status_code, 200)\n self.assertEqual(created_item[\"name\"], 'test_item')\n\n # test for deletion of bucketlist\n second_bucketlist = self.client.post(\n \"/bucketlists/\",\n data=dict(name='second_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n\n bucketlist_two = json.loads(second_bucketlist.data)\n\n self.assertEqual(second_bucketlist.status_code, 200)\n self.assertEqual(bucketlist_two[\"name\"], 'second_bucketlist')\n\n delete_response = self.client.delete(\n \"/bucketlists/\" + str(bucketlist_two[\"bucketlist_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n deletion = json.loads(delete_response.data)\n\n self.assertEqual(delete_response.status_code, 200)\n self.assertEqual(deletion[\"message\"], \"Deleted\")\n\n # test for deletion of an item in bucketlist\n delete_item = self.client.delete(\n \"/bucketlists/\" + str(bucketlist[\"bucketlist_id\"]) + \"/items/\" + str(one_item[\"item_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n item_deletion = json.loads(delete_item.data)\n\n self.assertEqual(delete_item.status_code, 200)\n self.assertEqual(item_deletion[\"message\"], \"Deleted\")\n\n # test for updating of bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n bucketlist_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n data=dict(name='bucketlist_test'),\n headers={'Authorization': self.user_token}\n )\n\n updated_bucketlist = json.loads(bucketlist_update.data)\n\n self.assertEqual(bucketlist_update.status_code, 200)\n self.assertEqual(updated_bucketlist[\"name\"], 'bucketlist_test')\n\n # test update of item in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n item_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\"+ str(one_item[\"item_id\"]) + \"\",\n data=dict(name=\"item_test\"),\n headers={'Authorization': self.user_token}\n )\n\n updated_item = json.loads(item_update.data)\n\n self.assertEqual(item_update.status_code, 200)\n self.assertEqual(updated_item[\"name\"], 'item_test')", "def test_create_bucket_list_return(self):\n bucket = BucketList(\"\", \"\")\n bucket = bucket.create_bucket_list(\"Name\", \"Completed\")\n self.assertIsInstance(bucket, BucketList)", "def test_bucketlist_creation(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n self.assertIn('Climb the Himalayas', str(post_data.data))", "def test_edit_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"The seasons will be, summer winter and autumn\"\n })\n self.assertEqual(result_of_put_method.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertIn('The seasons will b', str(result_of_get_method.data))", "def update_bucketlist():\n pass", "def test_get_bucketlist_items(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n\n self.assertEqual(resp_item.status_code, 200)\n resp_item = self.client.get('/bucketlistitems/1/items', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp_item.status_code, 200)", "def test_create_bucket_list_name(self):\n bucket = BucketList(\"\", \"\")\n bucket = bucket.create_bucket_list(\"\")\n self.assertEqual(bucket, \"Please provide a name for your bucket list\", )", "def test_api_get_all_bucketlists(self):\n res = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/bucketlist')\n self.assertEqual(res.status_code, 200)\n self.assertIn('Go to vacation', str(res.data))", "def test_api_edit_bucketlist(self):\n res_post = self.client().post('/bucketlist', data={'name': 'Wake up, Eat, Code, Sleep & Repeat'})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n id = res_post_in_json['id']\n res_put = self.client().put(\n f'bucketlist/{id}',\n data={\n 'name': \"Don't forget to exercise\"\n }\n )\n self.assertEqual(res_put.status_code, 200)\n res = self.client().get(f'/bucketlist/{id}')\n self.assertIn(\"exercise\", str(res.data))", "def post(self, user):\n # parse request data\n bucketlist_name = self.request.form['name']\n\n # validate bucketlist\n if not bucketlist_name:\n return \"Name cannot be empty\", 401\n\n # create bucketlist and save bucketlist\n bucketlist = Bucketlist(name=bucketlist_name, date_created=datetime.utcnow(\n ), created_by=user.username, author=user)\n bucketlist.save()\n\n return \"Successfully created bucketlist\", 201", "def test_list(self):\n responses.add(\n responses.Response(\n method='GET',\n url='https://connection.keboola.com/v2/storage/buckets',\n json=list_response\n )\n )\n buckets_list = self.buckets.list()\n assert isinstance(buckets_list, list)", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def test_delete_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)", "def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)", "def manipulate_bucketlist():\n pass", "def test_user_can_get_list_of_buckets(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['buckets'], list)\n self.assertEqual(len(data['buckets']), 0)\n self.assertEqual(data['count'], 0)\n self.assertIsInstance(data['count'], int)\n self.assertEqual(data['previous'], None)\n self.assertEqual(data['next'], None)", "def test_get_request_on_bucketlist_resource(self):\n\n response = self.client.get(\"/bucketlists/\")\n self.assertEqual(response.status_code, 401)", "def test_bucket_by_id_is_returned_on_get_request(self):\n with self.client:\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n response = self.client.get(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['bucket']['name'] == 'travel')\n self.assertIsInstance(data['bucket'], dict)\n self.assertTrue(response.content_type == 'application/json')", "def test_api_delete_bucketlist(self):\n\n res_post = self.client().post('/bucketlist', data={'name': \"Don't forget to exercise\"})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8'))\n id = res_post_in_json['id']\n res_delete = self.client().delete(f\"/bucketlist/{id}\")\n self.assertEqual(res_delete.status_code, 200)\n\n # should return 404 after delete the data\n res = self.client().get(f'/bucketlist/{id}')\n self.assertEqual(res.status_code, 404)", "def test_api_get_bucketlist_by_id(self):\n res_post = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res_post.status_code, 201)\n res_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n res = self.client().get(f\"/bucketlist/{res_in_json['id']}\")\n self.assertEqual(res.status_code, 200)\n self.assertIn('Go to vacation', str(res.data))", "def test_list_bucket(self):\n\n if self.bos.does_bucket_exist(\"aaaaaaxzr1\"):\n self.bos.delete_bucket(\"aaaaaaxzr1\")\n if self.bos.does_bucket_exist(\"aaaaaaxzr2\"):\n self.bos.delete_bucket(\"aaaaaaxzr2\")\n\n time1 = utils.get_canonical_time()\n self.bos.create_bucket(\"aaaaaaxzr1\")\n\n time2 = utils.get_canonical_time()\n self.bos.create_bucket(\"aaaaaaxzr2\")\n\n response = self.bos.list_buckets()\n self.check_headers(response)\n\n self.assertEqual(response.owner.id, bos_test_config.OWNER_ID)\n self.assertEqual(response.owner.display_name, bos_test_config.DISPLAY_NAME)\n for bucket in response.buckets:\n if bucket.name == \"aaaaaaxzr1\":\n self.assertEqual(\n compat.convert_to_bytes(bucket.creation_date)[0:19], \n compat.convert_to_bytes(time1)[0:19])\n elif bucket.name == \"aaaaaaxzr2\":\n self.assertEqual(\n compat.convert_to_bytes(bucket.creation_date)[0:19], \n compat.convert_to_bytes(time2)[0:19])\n self.bos.delete_bucket(\"aaaaaaxzr1\")\n self.bos.delete_bucket(\"aaaaaaxzr2\")", "def create_bucketlist(self, title, intro):\n bucketlist_ = Bucketlist(owner_id=self._id,\n title=title,\n intro=intro,\n owner=self.username)\n bucketlist_.save_to_bucketlists()", "def test_delete_bucketlist_item(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n # create a bucketlist by making a POST request\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n # get the json with the bucketlist\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"Bearer \" + access_token), )\n self.assertEqual(res.status_code, 200)\n\n # Test to see if it exists, should return a 404\n result = self.client().get(\n '/api/v1/bucketlists/{}/items/1'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def test_create(self):\n responses.add(\n responses.Response(\n method='POST',\n url='https://connection.keboola.com/v2/storage/buckets',\n json=create_response\n )\n )\n name = 'my-new-bucket'\n description = 'Some Description'\n backend = 'snowflake'\n created_detail = self.buckets.create(name=name,\n description=description,\n backend=backend)\n assert created_detail['id'] == 'in.c-{}'.format(name)", "def test_name_attribute_is_set_in_bucket_creation_request(self):\n with self.client:\n response = self.client.post(\n '/bucketlists',\n headers=dict(Authorization='Bearer ' + self.get_user_token()),\n data=json.dumps({}),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'], 'failed')\n self.assertTrue(data['message'], 'Missing name attribute')", "def test_read_bucket(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Climb the Himalayas', str(result_of_get_method.data))", "def test_update_busketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n update_item = self.client.put('/bucketlistitems/1/items/1',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs and museums too.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(update_item.status_code, 201)", "def test_app_can_add_list(self):\n add_list=self.client.post('/addshoppinglists/?user='+self.user['user'], \n data=self.shopllist, \n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(add_list.status_code,200)", "def test_create_bucketlist_view_returns_200_status_code(self):\n response = self.app.get('/create-bucketlist')\n self.assertEqual(response.status_code, 200, \n \"should return a status code of 200\")", "def test_get_bucketlist_items(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, id=1).first()\r\n items_no = len(bucketlist.bucketlist_items)\r\n headers = self.authentication_headers(email=email, password=_pword)\r\n response = self.client.get(\r\n '/api/v1/bucketlist/1/items/',\r\n content_type=\"application/json\",\r\n headers=headers,\r\n follow_redirects=True\r\n )\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(len(result), items_no)", "def get_bucketlist():\n pass", "def test_duplicate_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item2 = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item2.data)\n self.assertEqual(result[\"message\"], \"Item with the given name exists.\")\n self.assertEqual(resp_item2.status_code, 409)", "def put_bucketlist_item(self, email, password, bucketlist_id, item_id, data):\r\n headers = self.authentication_headers(email=email, password=password)\r\n return self.client.put(\r\n '/api/v1/bucketlist/{}/items/{}'.format(bucketlist_id, item_id),\r\n content_type=\"application/json\",\r\n data=json.dumps(data),\r\n headers=headers,\r\n follow_redirects=True\r\n )", "def test_delete_bucketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n delete_item = self.client.delete('/bucketlistitems/1/items/1',\n headers={\n \"Authorization\": self.token\n })\n self.assertEqual(delete_item.status_code, 204)", "def test_get_bucketlist_item_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n get_item = self.client.get('/bucketlistitems/1/items/1', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)", "def test_single_bucketlist_item_delete_with_no_auth_header(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={'name': 'Visit the Grand Canyon!'})\n self.assertEqual(res.status_code, 201)\n # get the bucketlist in json\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Header with key Authorization missing.', str(res.data))", "def add_bucket(bucket_name):\n pass", "def test_bucket_post_content_type_is_json(self):\n with self.client:\n response = self.client.post(\n '/bucketlists',\n headers=dict(Authorization='Bearer ' + self.get_user_token()),\n data=json.dumps(dict(name='Travel'))\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 202)\n self.assertTrue(data['status'], 'failed')\n self.assertTrue(data['message'], 'Content-type must be json')", "def test_single_bucketlist_item_delete_with_empty_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"\"), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Token not provided in the header with key Authorization.', str(res.data))", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"test@test.com\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"test@test.com\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def test_no_bucket_returned_by_given_id(self):\n with self.client:\n token = self.get_user_token()\n\n response = self.client.get(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['bucket'], list)\n self.assertTrue(response.content_type == 'application/json')", "def test_bucket_is_deleted(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Delete the created Bucket\n res = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Bucket Deleted successfully')\n self.assertTrue(res.content_type == 'application/json')", "def test_create_bucket(self):\n pass", "def create_bucket(request: Dict) -> Dict:\n global config\n\n body = {\n \"user_name\": request.get(\"user_name\"),\n \"prefix\": request.get(\"bucket_name\")[0:5],\n \"bucket_name\": request.get(\"bucket_name\"),\n \"region\": request.get(\"region\")\n }\n\n response = requests.post(url=config.api_url('bucket'),\n data=json.dumps(body),\n headers={'content-type': 'application/json'})\n\n if response.status_code == HTTPStatus.OK:\n return response.json()", "def test_buckets_returned_when_searched(self):\n with self.client:\n token = self.get_user_token()\n self.create_buckets(token)\n response = self.client.get(\n '/bucketlists/?q=T',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['buckets'], list, 'Items must be a list')\n self.assertEqual(len(data['buckets']), 3)\n self.assertEqual(data['buckets'][0]['id'], 1)\n self.assertEqual(data['count'], 6)\n self.assertEqual(data['next'], 'http://localhost/bucketlists/?page=2')\n self.assertEqual(data['previous'], None)\n self.assertEqual(response.status_code, 200)", "def put(self, user, id):\n # parse request data\n if 'name' not in self.request.form:\n return \"Bucketlist not Update\", 202\n\n bucketlist_name = self.request.form['name']\n\n # validate bucketlist\n if not bucketlist_name:\n return \"Name cannot be empty\", 401\n\n # search for the bucketlist_id\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n # Update bucketlist and save changes\n bucketlist.name = bucketlist_name\n bucketlist.save()\n\n return \"Successfully updated bucketlist\", 201", "def test_list_all_bucektlists_for_authenticated_user(self):\n\n response = self.client.get(\n \"/bucketlists/\",\n headers={'Authorization': self.user_token}\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, '[]\\n')", "def delete_bucketlist():\n pass", "def test_creating_a_bucket(self):\n with self.client:\n self.create_bucket(self.get_user_token())", "def put_list(self, request, **kwargs):\r\n response = super(BaseCorsResource, self).put_list(request, **kwargs)\r\n return self.add_cors_headers(response, True)", "def test_s3_csv_file_list(\n self,\n mock_init_storage_function,\n mock_get_list_of_bucket_files\n ): # pylint: disable=unused-argument\n\n with self.app.app_context():\n url = '/donation/s3/csv/files?bucket={}&path={}'\n\n bucket = self.app.config[ 'AWS_CSV_FILES_BUCKET' ]\n path = self.app.config[ 'AWS_CSV_FILES_PATH' ]\n\n response = self.test_client.get( url.format( bucket, path ), headers=self.headers )\n self.assertEqual( response.status_code, status.HTTP_200_OK )", "def test_id_of_bucket_to_be_edited_does_not_exist(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 404)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'The Bucket with Id 1 does not exist')", "def test_buckets_returned_when_searched_2(self):\n with self.client:\n token = self.get_user_token()\n self.create_buckets(token)\n response = self.client.get(\n '/bucketlists/?q=T&page=2',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['buckets'], list, 'Items must be a list')\n self.assertEqual(len(data['buckets']), 3)\n self.assertEqual(data['buckets'][0]['id'], 4)\n self.assertEqual(data['count'], 6)\n self.assertEqual(data['next'], None)\n self.assertEqual(data['previous'], 'http://localhost/bucketlists/?page=1')\n self.assertEqual(response.status_code, 200)", "def test_app_can_update_a_list(self):\n self.ne=json.dumps({\"newName\":\"pants\"})\n list_update=self.client.put('/shoppinglists/trou',\n data=self.ne,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"list doesnt exist\",str(list_update.data)) \n self.assertEqual(list_update.status_code,200)", "def add_bucket_list_item(self, id, collection, item):\n if type(id) is not ObjectId:\n id = ObjectId(id)\n obj = getattr(self.db, collection)\n result = obj.update(\n {'_id': id},\n {'$addToSet': {'bucket_list': item}}\n )\n return result", "def test_400_bad_requests(self):\n with self.client:\n token = self.get_user_token()\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Bad Request')", "def test_single_bucketlist_item_delete_with_invalid_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=access_token), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Invalid token format.', str(res.data))", "def test_id_of_bucket_to_be_edited_is_invalid(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/bucketid',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 400)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_list(self):\n self.add_to_queue('not-image.txt')\n self.add_to_queue('riker.gif')\n\n rv = self.get('/queue/', token=self.user_token)\n\n expected = {\"filelist\": [{\"filename\": \"riker.gif\",\n \"url\": \"/queue/riker.gif\"\n }\n ]}\n self.assertJSONOk(rv, **expected)\n return", "def test_add_item_at_using_put(self):\n pass", "def test_api_get_bucketlist_by_id_not_exist(self):\n res = self.client().get(f\"/bucketlist/99\")\n self.assertEqual(res.status_code, 404)", "def create_item(_id, item_name, description):\n data_ = Data.get_the_data(_id, Data.bucketlists)\n for data in data_:\n bucketlist = Bucketlist(data['title'],\n data['owner'],\n data['intro'],\n data['owner_id'],\n data['_id'])\n bucketlist.new_item(item_name=item_name,\n description=description)", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_shelflistitem_putpatch_requires_auth(api_settings,\n assemble_custom_shelflist,\n get_shelflist_urls, api_client):\n test_lcode, test_id = '1test', 99999999\n _, _, trecs = assemble_custom_shelflist(test_lcode, [(test_id, {})])\n url = '{}{}'.format(get_shelflist_urls(trecs)[test_lcode], test_id)\n before = api_client.get(url)\n put_resp = api_client.put(url, {})\n patch_resp = api_client.patch(url, {})\n after = api_client.get(url) \n assert put_resp.status_code == 403\n assert patch_resp.status_code == 403\n assert before.data == after.data", "def test_content_type_for_editing_bucket_is_json(self):\n with self.client:\n token = self.get_user_token()\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure'))\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 202)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Content-type must be json')", "def test_get_bucket(self):\n pass", "def test_get_buckets(self):\n pass", "def test_update_bucket(self):\n pass", "def test_deletion_handles_no_bucket_found_by_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Bucket resource cannot be found')\n self.assertTrue(response.content_type == 'application/json')", "def add_bucket_to_json(args):\n\n if does_bucket_exist(args) == 0:\n print(\"Error: bucket already exists, use update_bucket_permissions if you want to update it.\")\n return 1\n\n if does_group_exist(args) != 0:\n add_group_to_json(args)\n\n new_bucket = {\n \"name\": args.bucket,\n \"propogate_permissions\": True,\n \"allowed_attributes\": [],\n }\n\n # read_groups and write_groups are lists to separate users with read and write permissions on buckets\n\n if args.read_groups:\n read_groups_config = {\n \"attribute_requirements\": {\n \"or\": [],\n },\n \"permissions\": \"rl\"\n }\n\n for read_group in args.read_groups:\n attribute = {\n \"attribute\": \"http.OIDC_CLAIM_groups\",\n \"value\": read_group\n }\n read_groups_config[\"attribute_requirements\"][\"or\"].append(attribute)\n\n new_bucket[\"allowed_attributes\"].append(read_groups_config)\n\n sanitised_group = args.group.replace('/', '-')\n\n if not args.write_groups:\n args.write_groups = [args.group]\n\n write_groups_config = {\n \"attribute_requirements\": {\n \"or\": [],\n },\n \"permissions\": \"rlwdc\"\n }\n\n for write_group in args.write_groups:\n attribute = {\n \"attribute\": \"http.OIDC_CLAIM_groups\",\n \"value\": write_group\n }\n write_groups_config[\"attribute_requirements\"][\"or\"].append(attribute)\n\n new_bucket[\"allowed_attributes\"].append(write_groups_config)\n\n try:\n with open(args.file, \"r\") as f:\n config = json.load(f)\n except FileNotFoundError:\n print(\"Error: could not find given auth JSON file\")\n return 1\n \n for group in config[\"groups\"]:\n if group[\"name\"] == sanitised_group:\n config[\"groups\"].remove(group)\n bucket_list = group[\"buckets\"]\n bucket_list.append(new_bucket)\n group[\"buckets\"] = bucket_list\n config[\"groups\"].append(group)\n break\n\n with open(args.file, \"w\") as f:\n json.dump(config, f, indent=4)\n\n return 0", "def test_patch_bucket(self):\n pass", "def test_authorization_is_enforced(self):\n new_client = APIClient()\n res = new_client.get('/bucketlists/', kwargs={'pk': 2}, format=\"json\")\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def add_list(self):\n the_list = models.List(user_id=1,\n list_name=self.test_list,\n description=self.test_list_desc)\n the_list.add()", "def get(self, user):\n search = True if self.request.args.get('q') else False\n limit = int(self.request.args.get('limit')) if self.request.args.get('limit') else 20\n page = int(self.request.args.get('page')) if self.request.args.get('page') else 1\n bucketlists = user.bucketlists.paginate(page, limit, True).items\n bucketlists = user.bucketlists.filter(Bucketlist.name.contains(self.request.args.get('q'))) if self.request.args.get('q') else bucketlists\n\n bucketlists = [\n {'id': bucketlist.id,\n 'name': bucketlist.name,\n 'items': [\n {'id': item.id,\n 'name': item.description,\n 'date_created': str(item.date_created),\n 'date_modified': str(item.date_modified),\n 'done': str(item.is_done)\n } for item in bucketlist.items\n ],\n 'date_created': str(bucketlist.date_created),\n 'date_modified': str(bucketlist.date_modified),\n 'created_by': bucketlist.created_by\n } for bucketlist in bucketlists\n ]\n\n # if empty retutn no bucketlists added\n if not bucketlists:\n return \"You have no avialable bucketlists\", 200\n\n return bucketlists, 200", "def test_POST_send_list(self):\n\t\tself.POST_list()\n\t\tlist = self.GET_data('/api/list/' + self.list_id)\n\t\tself.POST_data('/api/list/' + self.list_id + '/send', data=list)", "async def test_list_fleet(client):\n group_param = {}\n params = [('access_token', 'access_token_example'),\n ('starting_after', 'starting_after_example'),\n ('ending_before', 'ending_before_example'),\n ('limit', 56)]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/list',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_delete_bucketlist_item(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertTrue(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '200 OK')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} deleted'.format(item.id)\r\n )\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertFalse(item)", "def test_put_list_new(self):\n self.story.save()\n self.assertEqual(self.story.places.count(), 0)\n put_data = [place.place_id for place in\n Place.objects.filter(name=\"Logan Square\")]\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/%s/places/' % (self.story.story_id)\n response = self.api_client.put(uri, format='json', data=put_data)\n self.assertHttpAccepted(response)\n self.story = Story.objects.get(story_id=self.story.story_id)\n self.assertEqual(self.story.places.count(), 1)\n ids = [place.place_id for place in self.story.places.all()]\n self.assertEqual(ids, put_data)", "def test_list(self):\n key = Key(self.bucket)\n name, version, filename = 'mypkg', '1.2', 'pkg.tar.gz'\n key.key = name + '/' + filename\n key.set_metadata('name', name)\n key.set_metadata('version', version)\n key.set_contents_from_string('foobar')\n package = list(self.storage.list(Package))[0]\n self.assertEquals(package.name, name)\n self.assertEquals(package.version, version)\n self.assertEquals(package.filename, filename)", "def test_create_interest_list_auth(self):\n url = reverse('xds_api:interest-lists')\n interest_list = {\n \"name\": \"Devops\",\n \"description\": \"Devops Desc\",\n \"courses\": []\n }\n _, token = AuthToken.objects.create(self.user_1)\n response = \\\n self.client.post(url,\n interest_list,\n HTTP_AUTHORIZATION='Token {}'.format(token))\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED),\n self.assertEqual(responseDict[\"name\"], \"Devops\")", "def test_add_url(self):\n url = 'http://test.com/'\n info = self.api.add_url(url, tags=['asd'])\n self.assertEqual(info['value'], url)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def post_list(self, request, **kwargs):\r\n #logger.debug(\"post list %s\\n%s\" % (request, kwargs));\r\n response = super(BaseCorsResource, self).post_list(request, **kwargs)\r\n return self.add_cors_headers(response, True)", "def validate_bucket_list_data(func):\n @wraps(func)\n def validate_bucket_list(*args, **kwargs):\n bucket_list_data = request.get_json()\n if not bucket_list_data:\n return {\"message\": \"You have to provide the required data\"}, 400\n elif \"bucket_list_id\" not in bucket_list_data or \"bucket_list_name\" not in bucket_list_data:\n return {\"message\": \"You have to provide the required data\"}, 400\n elif bucket_list_data[\"bucket_list_id\"] == \"\"or bucket_list_data[\"bucket_list_name\"] == \"\":\n return {\"message\": \"You have to provide all the required data\"}, 400\n return func(*args, **kwargs)\n return validate_bucket_list", "def test_list(self):\n bust_fragments(self.resp, ['/foo/bar', '/zip/zap'])\n self.assert_header_set('[\"/foo/bar\", \"/zip/zap\"]')", "def test_addList(self):\n lili = []\n lili.append(['term', 'tags', 'value'])\n lili.append(['foo', 'a', '1'])\n lili.append(['bar', 'a, b', '2'])\n lili.append(['gnark', 'a, c', '3'])\n self.g.add_list(lili)", "def test_shoppinglist_creation(self):\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n res = self.app.post(\n '/shoppinglist', data={'list-name': 'Easter'})\n self.assertEqual(res.status_code, 200)\n response = self.shopping_class_obj.create_list(\n 'Easter', 'maina@gmail.com')\n self.assertIsInstance(response, list)\n self.assertIn(\"Easter\", str(res.data))", "def create_object(self,object_data_list):\n for object_data in object_data_list:\n if self.valid_object(object_data):\n self.populate_names(object_data.get(\"planId\"))\n\n if self.unique_name(name=object_data.get(\"name\"),plan_id=object_data.get(\"planId\")):\n self.post_object(object_data)\n else:\n new_name = self.try_create_uniqe_name(object_data.get(\"name\"),object_data.get(\"planId\"))\n if new_name:\n object_data[\"name\"]= new_name\n self.post_object(object_data)\n else:\n logging.error(f'no unique name for bucket')\n self.append_response(f'no unique name for bucket')\n return self.response", "def test_csc_authorization_request_list_authlist_user(self):\n # Arrange:\n self.client.credentials(\n HTTP_AUTHORIZATION=\"Token \" + self.token_user_authlist.key\n )\n\n # Act:\n url = reverse(\"authlistrequest-list\")\n response = self.client.get(url, format=\"json\")\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 3)", "def list_bucket(self, bucket):\n self.response.write('Creating more files for listbucket...\\n')\n self.create_file(bucket + '/foo1')\n self.create_file(bucket + '/foo2')\n self.response.write('\\nListbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket, max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n last_filename = stat.filename[len(bucket) + 1:]\n stats = gcs.listbucket(bucket, max_keys=page_size, marker=last_filename)", "def test_list(self):\n response = self.app.get(self.url('tags.list'))\n # Test response...", "def setUp(self):\n self.user = User('lornatumuhairwe@gmail.com')\n self.bucketlists = bucketlists", "def add(bucket_name, permissions=None, region_name=Location.EU):\n conn = connect_s3()\n conn.create_bucket(bucket_name, location=region_name)", "def test_buckets(self):\n objectstore.bucket.Bucket.create('new_bucket', self.context)\n bucket = objectstore.bucket.Bucket('new_bucket')\n\n # creator is authorized to use bucket\n self.assert_(bucket.is_authorized(self.context))\n\n # another user is not authorized\n context2 = context.RequestContext('user2', 'proj2')\n self.assertFalse(bucket.is_authorized(context2))\n\n # admin is authorized to use bucket\n admin_context = context.RequestContext('admin_user', None)\n self.assertTrue(bucket.is_authorized(admin_context))\n\n # new buckets are empty\n self.assertTrue(bucket.list_keys()['Contents'] == [])\n\n # storing keys works\n bucket['foo'] = \"bar\"\n\n self.assertEquals(len(bucket.list_keys()['Contents']), 1)\n\n self.assertEquals(bucket['foo'].read(), 'bar')\n\n # md5 of key works\n self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())\n\n # deleting non-empty bucket should throw a NotEmpty exception\n self.assertRaises(NotEmpty, bucket.delete)\n\n # deleting key\n del bucket['foo']\n\n # deleting empty bucket\n bucket.delete()\n\n # accessing deleted bucket throws exception\n self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')", "def test_addEntryByList(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n b = self.g.add_entry(['foo', 'a', '1'])\n self.assertTrue(b)", "def create_bucket() -> None:\n try:\n client.make_bucket(DATASETS_BUCKET)\n except BucketAlreadyOwnedByYou:\n logger.debug(f\"Not creating bucket {DATASETS_BUCKET}: Bucket already exists\")\n pass\n else:\n logger.debug(f\"Successfully created bucket {DATASETS_BUCKET}\")", "def get(self, user, id):\n # Search for bucketlist\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n # serialize items if ann\n bucketlists_items = [\n {'id': item.id,\n 'name': item.description,\n 'date_created': str(item.date_created),\n 'date_modified': str(item.date_modified),\n 'done': str(item.is_done)\n } for item in bucketlist.items\n ]\n\n # serialize bucketlist\n response_bucketlist = [\n {'id': bucketlist.id,\n 'name': bucketlist.name,\n 'items': bucketlists_items,\n 'date_created': str(bucketlist.date_created),\n 'date_modified': str(bucketlist.date_modified),\n 'created_by': bucketlist.created_by\n }\n ]\n\n return response_bucketlist, 200" ]
[ "0.7808094", "0.7646647", "0.76416534", "0.7634788", "0.73911667", "0.7332509", "0.7283901", "0.7266109", "0.72469544", "0.7237784", "0.7166871", "0.7161714", "0.7063952", "0.70266515", "0.7024281", "0.7019768", "0.69053954", "0.6877802", "0.6849187", "0.67823607", "0.67349434", "0.6705708", "0.6699396", "0.6599365", "0.6567896", "0.65521157", "0.65269536", "0.6501914", "0.6428159", "0.6401108", "0.6388704", "0.6384686", "0.6322986", "0.6282633", "0.6278758", "0.62545955", "0.6247495", "0.6237146", "0.62323403", "0.6191845", "0.6169163", "0.6158529", "0.6153481", "0.61333936", "0.6096719", "0.6083624", "0.6075869", "0.60613865", "0.6057949", "0.6057598", "0.60405266", "0.60255736", "0.6025457", "0.5880173", "0.58579046", "0.5854113", "0.5844896", "0.5816608", "0.57863575", "0.57751566", "0.5768101", "0.5730621", "0.57282746", "0.56878316", "0.5667684", "0.56566435", "0.5645506", "0.5611349", "0.56036514", "0.5589336", "0.5585305", "0.5572459", "0.55687857", "0.55683166", "0.5565519", "0.5538105", "0.55355257", "0.5534576", "0.5528747", "0.552294", "0.55157954", "0.5502681", "0.54976755", "0.54861766", "0.54787046", "0.54644847", "0.54485583", "0.5402842", "0.5393391", "0.53754395", "0.53666896", "0.5355328", "0.5345206", "0.5342864", "0.53179526", "0.5317494", "0.53094625", "0.5304682", "0.5299802", "0.5298378" ]
0.7050521
13
Method is used to send request for put for the bucketlist item to the api
def put_bucketlist_item(self, email, password, bucketlist_id, item_id, data): headers = self.authentication_headers(email=email, password=password) return self.client.put( '/api/v1/bucketlist/{}/items/{}'.format(bucketlist_id, item_id), content_type="application/json", data=json.dumps(data), headers=headers, follow_redirects=True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def put(self, user, id):\n # parse request data\n if 'name' not in self.request.form:\n return \"Bucketlist not Update\", 202\n\n bucketlist_name = self.request.form['name']\n\n # validate bucketlist\n if not bucketlist_name:\n return \"Name cannot be empty\", 401\n\n # search for the bucketlist_id\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n # Update bucketlist and save changes\n bucketlist.name = bucketlist_name\n bucketlist.save()\n\n return \"Successfully updated bucketlist\", 201", "def test_edit_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"The seasons will be, summer winter and autumn\"\n })\n self.assertEqual(result_of_put_method.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertIn('The seasons will b', str(result_of_get_method.data))", "def test_update_busketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n update_item = self.client.put('/bucketlistitems/1/items/1',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs and museums too.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(update_item.status_code, 201)", "def update_bucketlist():\n pass", "def test_api_edit_bucketlist(self):\n res_post = self.client().post('/bucketlist', data={'name': 'Wake up, Eat, Code, Sleep & Repeat'})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n id = res_post_in_json['id']\n res_put = self.client().put(\n f'bucketlist/{id}',\n data={\n 'name': \"Don't forget to exercise\"\n }\n )\n self.assertEqual(res_put.status_code, 200)\n res = self.client().get(f'/bucketlist/{id}')\n self.assertIn(\"exercise\", str(res.data))", "def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)", "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n response = self.client.get(\n \"/bucketlists/1\", headers={\n \"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item.data)\n self.assertEqual(result[\"message\"],\n \"Bucket list item added successfully.\")\n self.assertEqual(resp.status_code, 201)", "def post_bucketlist():\n pass", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def _put(self, *args, **kwargs):\n return self._request('put', *args, **kwargs)", "def test_add_bucketlist_items(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(email, _pword, bucketlist.id, \"bucketlist item name\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(result['message'], 'Bucket list item added')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertLess(item_no, new_item_no)", "def manipulate_bucketlist():\n pass", "def test_get_bucketlist_items(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n\n self.assertEqual(resp_item.status_code, 200)\n resp_item = self.client.get('/bucketlistitems/1/items', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp_item.status_code, 200)", "def put(handler, *args, **kwargs):\n bucket_id = args[0]\n user_id = args[1]\n content = tornado.escape.json_decode(handler.request.body)\n BucketClasss = type(\"Bucket_%s\" % (bucket_id), (Bucket,), {})\n bucket = yield BucketClasss.get(user_id)\n if not bucket:\n bucket = BucketClasss()\n bucket.pkey = user_id \n if bucket_id == \"userData\":\n save_ver = int(content.get(\"data\", {}).get(\"saveVer\",0))\n current_ver = int(bucket.data.get(\"data\", {}).get(\"saveVer\",0))\n if save_ver < current_ver:\n raise Return((405, None))\n bucket.data = content \n yield bucket.put()\n raise Return((204, None))", "def put_req(self, item):\n self.export.put_req(item)", "def put_req(self, item):\n self.req_q.put(item)", "def put(self, *args, **kwargs):\n self.request(\"put\", *args, **kwargs)", "def http_put(self, **kwargs):\n return self.rabjcallable.put(**kwargs)", "def add_bucketlist_item(self, email, password, buckelist_id, item_name):\r\n test_date = str(date(2020, 9, 22))\r\n headers = self.authentication_headers(email=email, password=password)\r\n return self.client.post(\r\n '/api/v1/bucketlist/{}/items/'.format(buckelist_id),\r\n data=json.dumps({\"name\": item_name, \"finished_by\": test_date}),\r\n content_type=\"application/json\",\r\n headers=headers,\r\n follow_redirects=True\r\n )", "def test_get_bucketlist_item_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n get_item = self.client.get('/bucketlistitems/1/items/1', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)", "def put(self, *args, **kwargs):\n return self.handle_put_request()", "def test_delete_bucketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n delete_item = self.client.delete('/bucketlistitems/1/items/1',\n headers={\n \"Authorization\": self.token\n })\n self.assertEqual(delete_item.status_code, 204)", "def put_list(self, request, **kwargs):\r\n response = super(BaseCorsResource, self).put_list(request, **kwargs)\r\n return self.add_cors_headers(response, True)", "def test_modify_item_successfully(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item,\n headers=self.header)\n response = self.client.put('/buckets/1/items/1',\n content_type='application/json',\n data=self.item_edit,\n headers=self.header)\n self.assertEquals(response.status_code, 200)\n self.assertIn('Item successfully updated',\n response.data.decode())", "def put_object(self, bucket_name, key, data):\n url = self.__key_url(bucket_name, key)\n resp = self.infinispan_client.put(url, data=data,\n auth=self.basicAuth,\n headers=self.headers)\n logger.debug(resp)", "def test_add_item_at_using_put(self):\n pass", "def test_id_of_bucket_to_be_edited_is_invalid(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/bucketid',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 400)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def add_bucket_list_item(self, id, collection, item):\n if type(id) is not ObjectId:\n id = ObjectId(id)\n obj = getattr(self.db, collection)\n result = obj.update(\n {'_id': id},\n {'$addToSet': {'bucket_list': item}}\n )\n return result", "def test_app_can_update_a_list(self):\n self.ne=json.dumps({\"newName\":\"pants\"})\n list_update=self.client.put('/shoppinglists/trou',\n data=self.ne,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"list doesnt exist\",str(list_update.data)) \n self.assertEqual(list_update.status_code,200)", "def PUT(self, req):\n xml = req.xml(MAX_PUT_BUCKET_REFERER_SIZE)\n if xml:\n # check referer\n try:\n elem = fromstring(xml, 'RefererConfiguration')\n allow_empyt_referer=elem.find('AllowEmptyReferer').text\n if allow_empyt_referer not in ['true','false']:\n raise InvalidArgument()\n referer_list=elem.find('RefererList')\n\t\tswift_referers=[]\n for referer in referer_list.findall('Referer'):\n\t swift_referers.append(referer.text)\n\t\tif len(swift_referers)==0 :\n\t\t req.headers['X-Container-Read']=' '\n\t\telse:\n req.headers['X-Container-Read'] = '.r:'+','.join(get_real_url(swift_referers))\n except (XMLSyntaxError, DocumentInvalid):\n raise MalformedXML()\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n LOGGER.error(e)\n raise exc_type, exc_value, exc_traceback\n resp = req.get_response(self.app)\n resp.status = HTTP_OK\n return resp", "def post(self, user):\n # parse request data\n bucketlist_name = self.request.form['name']\n\n # validate bucketlist\n if not bucketlist_name:\n return \"Name cannot be empty\", 401\n\n # create bucketlist and save bucketlist\n bucketlist = Bucketlist(name=bucketlist_name, date_created=datetime.utcnow(\n ), created_by=user.username, author=user)\n bucketlist.save()\n\n return \"Successfully created bucketlist\", 201", "def test_delete_bucketlist_item(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n # create a bucketlist by making a POST request\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n # get the json with the bucketlist\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"Bearer \" + access_token), )\n self.assertEqual(res.status_code, 200)\n\n # Test to see if it exists, should return a 404\n result = self.client().get(\n '/api/v1/bucketlists/{}/items/1'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def test_duplicate_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item2 = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item2.data)\n self.assertEqual(result[\"message\"], \"Item with the given name exists.\")\n self.assertEqual(resp_item2.status_code, 409)", "def test_id_of_bucket_to_be_edited_does_not_exist(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 404)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'The Bucket with Id 1 does not exist')", "def test_update_bucket(self):\n pass", "def do_PUT(self,):\n self.http_method = 'PUT'\n # Nothing to do for now.\n pass", "def _put(self, item: T) -> None:\n ...", "def put(self, item):\n self.url_queue.put(item)", "def test_bucketlist_create(self):\n res = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Go to vacation', str(res.data))", "def put(self, key, headers, value, metadata=None):", "def test_bucketlist_creation(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n self.assertIn('Climb the Himalayas', str(post_data.data))", "def test_bucket_by_id_is_returned_on_get_request(self):\n with self.client:\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n response = self.client.get(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['bucket']['name'] == 'travel')\n self.assertIsInstance(data['bucket'], dict)\n self.assertTrue(response.content_type == 'application/json')", "def put(self):\n pass", "def put(self):\n pass", "def put(self, path: str) -> Response:\n endpoint_ = checkEndpoint(\"PUT\", path)\n if not endpoint_[\"method\"]:\n # If endpoint and PUT method is not supported in the API\n abort(endpoint_[\"status\"])\n # If 'instances' is available in request\n params = request.args.to_dict()\n object_ = json.loads(request.data.decode(\"utf-8\"))\n if params.get(\"instances\") or object_.get(\"data\"):\n int_list = params.get(\"instances\")\n return items_put_response(path, int_list)\n return item_collection_put_response(path)", "def vaultPutBulk(url, params, body, sessionId):\n headers = {'Authorization': sessionId,'Content-Type': 'application/json','Accept': 'application/json'}\n responseStr = requests.put(url, params=params, json=body, headers=headers)\n response = json.loads(responseStr.content)\n if response['responseStatus'] != 'SUCCESS':\n print('Bulk update failed\\nHTTP PUT request to Vault\\n' +\n 'ERROR while putting to URL \"' + url + '\": ' +\n response['errors'][0]['message'])\n \n return responseStr", "def put(self ,request, pk = None):\r\n\r\n return Response({'method ': 'put'})", "def test_content_type_for_editing_bucket_is_json(self):\n with self.client:\n token = self.get_user_token()\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure'))\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 202)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Content-type must be json')", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"test@test.com\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"test@test.com\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def handle_put(self, api, command):\n return self._make_request_from_command('PUT', command)", "def put(self, *args, **kw):\n kw['method'] = 'PUT'\n return self.open(*args, **kw)", "def put(self,item,index=None):\n self.inbound.put((item,index))", "def put(self):\n return", "def test_creating_and_getting_a_bucketlist_for_authenticated_user(self):\n\n # test all bucketlists\n response = self.client.post(\n \"/bucketlists/\",\n data=dict(name='test_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n bucketlist = json.loads(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bucketlist[\"name\"], 'test_bucketlist')\n\n # test single bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n single_bucketlist = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n one_bucketlist = json.loads(single_bucketlist.data)\n\n self.assertEqual(single_bucketlist.status_code, 200)\n self.assertEqual(one_bucketlist[\"name\"], 'test_bucketlist')\n\n # test all items in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n self.assertEqual(item.status_code, 200)\n self.assertEqual(one_item[\"name\"], 'test_item')\n\n # test single item in bucketlist\n self.item_id = one_item[\"item_id\"]\n single_item = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\" + str(self.item_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n created_item = json.loads(single_item.data)\n\n self.assertEqual(single_item.status_code, 200)\n self.assertEqual(created_item[\"name\"], 'test_item')\n\n # test for deletion of bucketlist\n second_bucketlist = self.client.post(\n \"/bucketlists/\",\n data=dict(name='second_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n\n bucketlist_two = json.loads(second_bucketlist.data)\n\n self.assertEqual(second_bucketlist.status_code, 200)\n self.assertEqual(bucketlist_two[\"name\"], 'second_bucketlist')\n\n delete_response = self.client.delete(\n \"/bucketlists/\" + str(bucketlist_two[\"bucketlist_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n deletion = json.loads(delete_response.data)\n\n self.assertEqual(delete_response.status_code, 200)\n self.assertEqual(deletion[\"message\"], \"Deleted\")\n\n # test for deletion of an item in bucketlist\n delete_item = self.client.delete(\n \"/bucketlists/\" + str(bucketlist[\"bucketlist_id\"]) + \"/items/\" + str(one_item[\"item_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n item_deletion = json.loads(delete_item.data)\n\n self.assertEqual(delete_item.status_code, 200)\n self.assertEqual(item_deletion[\"message\"], \"Deleted\")\n\n # test for updating of bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n bucketlist_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n data=dict(name='bucketlist_test'),\n headers={'Authorization': self.user_token}\n )\n\n updated_bucketlist = json.loads(bucketlist_update.data)\n\n self.assertEqual(bucketlist_update.status_code, 200)\n self.assertEqual(updated_bucketlist[\"name\"], 'bucketlist_test')\n\n # test update of item in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n item_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\"+ str(one_item[\"item_id\"]) + \"\",\n data=dict(name=\"item_test\"),\n headers={'Authorization': self.user_token}\n )\n\n updated_item = json.loads(item_update.data)\n\n self.assertEqual(item_update.status_code, 200)\n self.assertEqual(updated_item[\"name\"], 'item_test')", "def put(self, *args, **kwargs):\n return super(APIFolderView, self).put(*args, **kwargs)", "def _put(self, item, queue):", "def do_update(url,indexHeaders,update_file):\n updateUrl=url.replace(\"buckets\",\"riak\")\n indexHeaders['content-type'] = 'application/json'\n r=requests.post(url, data=json.dumps(update_file), headers=indexHeaders)", "def test_api_delete_bucketlist(self):\n\n res_post = self.client().post('/bucketlist', data={'name': \"Don't forget to exercise\"})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8'))\n id = res_post_in_json['id']\n res_delete = self.client().delete(f\"/bucketlist/{id}\")\n self.assertEqual(res_delete.status_code, 200)\n\n # should return 404 after delete the data\n res = self.client().get(f'/bucketlist/{id}')\n self.assertEqual(res.status_code, 404)", "def test_api_get_bucketlist_by_id(self):\n res_post = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res_post.status_code, 201)\n res_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n res = self.client().get(f\"/bucketlist/{res_in_json['id']}\")\n self.assertEqual(res.status_code, 200)\n self.assertIn('Go to vacation', str(res.data))", "def put(self, call, params={}): \n # Build an endpoint using the parameters...\n endpoint = self._calls[call](params)\n url = '{}/{}'.format(str(self), str(endpoint))\n return self.putter.respond(url)", "def _put(self, url, **kwargs):\n return self._call('PUT', url, kwargs)", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def put(self, item): \n self.__db.rpush(self.key, item)", "def put(data):", "def aput(url, **kwargs):\n return requests.put(url, **kwargs)", "def test_delete_bucketlist_item(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertTrue(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '200 OK')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} deleted'.format(item.id)\r\n )\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertFalse(item)", "def update_bucket_acl(self, bucket, user_list):\n msg = \"update_bucket_acl not implemented\"\n raise NotImplementedError(msg)", "def put(self, url, body, headers={}):\n return self.request(url, \"PUT\", body, headers)", "def test_shelflistitem_putpatch_requires_auth(api_settings,\n assemble_custom_shelflist,\n get_shelflist_urls, api_client):\n test_lcode, test_id = '1test', 99999999\n _, _, trecs = assemble_custom_shelflist(test_lcode, [(test_id, {})])\n url = '{}{}'.format(get_shelflist_urls(trecs)[test_lcode], test_id)\n before = api_client.get(url)\n put_resp = api_client.put(url, {})\n patch_resp = api_client.patch(url, {})\n after = api_client.get(url) \n assert put_resp.status_code == 403\n assert patch_resp.status_code == 403\n assert before.data == after.data", "def test_single_bucketlist_item_delete_with_empty_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"\"), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Token not provided in the header with key Authorization.', str(res.data))", "def put(self,request, pk =None):\n return Response({'method': 'PUT'})", "def api_put(self, *args, **kwargs):\n return self.api_put_with_response(*args, **kwargs)[0]", "def http_method_put():\n return 'PUT'", "def PUT(self, req):\r\n xml = req.xml(MAX_PUT_BUCKET_CORERULE_SIZE)\r\n if xml:\r\n # check location\r\n try:\r\n\t\ttry:\r\n\r\n elem = fromstring(xml, 'CORSConfiguration')\r\n\t\texcept (XMLSyntaxError, DocumentInvalid):\r\n raise InvalidArgument()\r\n for core_rule in elem.findall('CORSRule'):\r\n allowed_origins = _find_all_tags(core_rule,'AllowedOrigin')\r\n allowed_methods = _find_all_tags(core_rule,'AllowedMethod')\r\n allowed_headers= _find_all_tags(core_rule,'AllowedHeader')\r\n expose_headers = _find_all_tags(core_rule,'ExposeHeader')\r\n if core_rule.find('MaxAgeSeconds') is not None:\r\n max_age_seconds = core_rule.find('MaxAgeSeconds').text\r\n req.headers['X-Container-Meta-Access-Control-Allow-Origin'] = _list_str(allowed_origins)\r\n req.headers['X-Container-Meta-Access-Control-Allow-Methods']=_list_str(allowed_methods)\r\n req.headers['X-Container-Meta-Access-Control-Allow-Headers'] = _list_str(allowed_headers)\r\n req.headers['X-Container-Meta-Access-Control-Expose-Headers'] = _list_str(expose_headers)\r\n req.headers['X-Container-Meta-Access-Control-Max-Age'] = max_age_seconds\r\n except (XMLSyntaxError, DocumentInvalid):\r\n raise MalformedXML()\r\n except Exception as e:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n LOGGER.error(e)\r\n raise exc_type, exc_value, exc_traceback\r\n resp = req.get_response(self.app)\r\n\r\n resp.status = HTTP_OK\r\n\r\n return resp", "def _put(self, path=\"\", **kwargs):\n uri = force_json(self.uri + path)\n return self.client.request(uri, method=\"PUT\", **kwargs)", "def put(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n cid = self.current_user.cid\n tid = self.current_user.tid\n gid = data.gid\n name = data.name\n logging.info(\"[UWEB] Modify group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n self.db.execute(\"UPDATE T_GROUP\"\n \" SET name = %s\"\n \" WHERE id = %s\",\n name, gid)\n\n # NOTE: wspush to client \n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status)\n except Exception as e:\n logging.exception(\"[UWEB] Modify group failed. cid: %s, Exception: %s\",\n self.current_user.cid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def put_object(self, bucket_name, body, key):\n return self._client.put_object(Bucket=bucket_name, Body=body, Key=key)", "def taco_test_put_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))", "def test_delete_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)", "def __setitem__(self, key, value):\n\n bucket_key = self.key_for_bucket(key)\n self.buckets[bucket_key][key] = value", "def put(self,request,pk=None):\n return Response({'method':'Put'})", "def put_object(self, key, data):\n self.s3client.put_object(Bucket=self.s3_bucket, Key=key, Body=data)", "def test_putorganizations_item(self):\n pass", "def update_items(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['id'] = cpdoc.id\n\n item_ser = self.get_serializer(instance=obj_cp, data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)", "def test_update_item_using_post(self):\n pass", "def put(self, key, value):\r\n\r\n\r\n\t\tindex = self.get_index(key) # get the index\r\n\t\tcur_list = self._buckets[index] # this is the linked list\r\n\r\n\t\t# remove the key and assign the returned boolean in removed\r\n\t\tremoved = cur_list.remove(key)\r\n\t\tcur_list.add_front(key, value) # re-add the key with updated value\r\n\r\n\t\t# if removed is false, then a new key was added so increase size by 1\r\n\t\tif not removed:\r\n\t\t\tself.size += 1", "def test_read_bucket(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Climb the Himalayas', str(result_of_get_method.data))", "def _put(self, path, data):\n return self._api.put_status(path, data, headers={\"Hawkular-Tenant\": self.tenant_id,\n \"Content-Type\": \"application/json\"})", "def test_single_bucketlist_item_delete_with_no_auth_header(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={'name': 'Visit the Grand Canyon!'})\n self.assertEqual(res.status_code, 201)\n # get the bucketlist in json\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Header with key Authorization missing.', str(res.data))", "async def put(self, request: Request) -> Response:\n try:\n request_json = await request.json()\n await self._client.add_to_cart(upc=request_json.get('upc'),\n quantity=request_json.get('quantity', 1))\n return Response(\n content_type=\"application/json\",\n status=200\n )\n except Exception as ex:\n return Response(\n content_type=\"application/json\",\n body={\"error\": f\"Unexpected error occurred: {ex}\"},\n status=500\n )", "def put(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def put(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def test_update_item_good(test_client, item):\n\n response = test_client.put(GOOD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 200\n assert data['item']['name'] == item['name']\n assert data['item']['value'] == item['value']", "def update():\n return 'update api in put'", "def _put(self, url, data, extra_headers=None):\n headers = {'X-Requested-By': 'Unit Tests'}\n headers.update(extra_headers)\n return self.client.put(\n url,\n content_type='application/json',\n data=utils.as_json(data),\n headers=headers,\n )", "def _put(self, url, data=None):\n url = urljoin(self.base_url, url)\n try:\n r = self._make_request(**dict(\n method='PUT',\n url=url,\n json=data,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n return r.json()", "def put(self,item):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying to put new item to queue %s'%(item))\n\t\t\tself.queue.put(item)\n\t\t\tself.logger.debug('Successfull put new item to queue')\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tself.logger.error('Error method put, item: %s, error: %s'%(item,e),exc_info=True)\n\t\t\treturn False", "def put(self, item, block=True, timeout=None):\n self.q.put(item, block, timeout)", "def set(self, key, value):\n return s3.Object(self.bucket.name, key).put(Body=value)" ]
[ "0.77104014", "0.75837624", "0.73698086", "0.7226279", "0.7216704", "0.71507794", "0.702163", "0.68962586", "0.6804519", "0.67802167", "0.6777862", "0.66020447", "0.6598506", "0.65621024", "0.64808017", "0.64796245", "0.64737403", "0.64593357", "0.6454596", "0.645011", "0.640735", "0.6369721", "0.6360353", "0.633439", "0.631797", "0.63168114", "0.6312306", "0.6288249", "0.62331", "0.62205523", "0.6208311", "0.61869574", "0.61823636", "0.6181108", "0.61732215", "0.6169253", "0.6150731", "0.6142746", "0.6123812", "0.6087711", "0.60789526", "0.6015056", "0.60130376", "0.59979075", "0.59979075", "0.59953094", "0.59952354", "0.5972286", "0.59686154", "0.5951951", "0.59486055", "0.5947415", "0.5942291", "0.5934871", "0.59307045", "0.5926492", "0.5909083", "0.5899151", "0.5899069", "0.588861", "0.5879145", "0.5864819", "0.5852653", "0.5843172", "0.5827515", "0.5820333", "0.5811342", "0.5810124", "0.5805293", "0.5797877", "0.5797125", "0.57948506", "0.5790679", "0.57885236", "0.57855433", "0.5785346", "0.57512695", "0.5750978", "0.5740579", "0.57326734", "0.57251245", "0.57227194", "0.5716407", "0.56927043", "0.56897223", "0.5681632", "0.5680627", "0.5671169", "0.56699926", "0.5669801", "0.566621", "0.5661525", "0.5661525", "0.5655044", "0.5653809", "0.56527835", "0.56503797", "0.56261766", "0.56246364", "0.5620619" ]
0.7592196
1
Method is used to send request to delete a bucketlist item
def delete_bucketlist_item(self, email, password, bucketlist_id, item_id): headers = self.authentication_headers(email=email, password=password) return self.client.delete( '/api/v1/bucketlist/{}/items/{}'.format(bucketlist_id, item_id), content_type="application/json", headers=headers, follow_redirects=True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_bucketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n delete_item = self.client.delete('/bucketlistitems/1/items/1',\n headers={\n \"Authorization\": self.token\n })\n self.assertEqual(delete_item.status_code, 204)", "def delete_bucketlist():\n pass", "def test_delete_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)", "def test_delete_bucketlist_item(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertTrue(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '200 OK')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} deleted'.format(item.id)\r\n )\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertFalse(item)", "def test_delete_bucketlist_item(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n # create a bucketlist by making a POST request\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n # get the json with the bucketlist\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"Bearer \" + access_token), )\n self.assertEqual(res.status_code, 200)\n\n # Test to see if it exists, should return a 404\n result = self.client().get(\n '/api/v1/bucketlists/{}/items/1'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def delete(self, user, id):\n # Search for bucketlist\n print (id)\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n bucketlist.delete()\n\n return \"Successfully deleted bucketlist\", 200", "def test_api_delete_bucketlist(self):\n\n res_post = self.client().post('/bucketlist', data={'name': \"Don't forget to exercise\"})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8'))\n id = res_post_in_json['id']\n res_delete = self.client().delete(f\"/bucketlist/{id}\")\n self.assertEqual(res_delete.status_code, 200)\n\n # should return 404 after delete the data\n res = self.client().get(f'/bucketlist/{id}')\n self.assertEqual(res.status_code, 404)", "def test_single_bucketlist_item_delete_with_no_auth_header(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={'name': 'Visit the Grand Canyon!'})\n self.assertEqual(res.status_code, 201)\n # get the bucketlist in json\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Header with key Authorization missing.', str(res.data))", "def test_single_bucketlist_item_delete_with_empty_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"\"), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Token not provided in the header with key Authorization.', str(res.data))", "def test_single_bucketlist_item_delete_with_invalid_token(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n\n # assert that the bucketlist is created\n self.assertEqual(res.status_code, 201)\n # get the response data in json format\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=access_token), )\n self.assertEqual(res.status_code, 401)\n self.assertIn('Invalid token format.', str(res.data))", "def test_bucket_is_deleted(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Delete the created Bucket\n res = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Bucket Deleted successfully')\n self.assertTrue(res.content_type == 'application/json')", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def test_deletion_handles_no_bucket_found_by_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Bucket resource cannot be found')\n self.assertTrue(response.content_type == 'application/json')", "def test_delete_item_wrong_id(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None", "def test_request_for_deleting_bucket_has_integer_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def delete_item(id):\n return '', 201", "def POST(self, req):\n def object_key_iter(elem):\n for obj in elem.iterchildren('Object'):\n key = obj.find('./Key').text\n if not key:\n raise UserKeyMustBeSpecified()\n version = obj.find('./VersionId')\n if version is not None:\n version = version.text\n\n yield key, version\n\n max_body_size = min(\n # FWIW, AWS limits multideletes to 1000 keys, and swift limits\n # object names to 1024 bytes (by default). Add a factor of two to\n # allow some slop.\n 2 * self.conf.max_multi_delete_objects * MAX_OBJECT_NAME_LENGTH,\n # But, don't let operators shoot themselves in the foot\n 10 * 1024 * 1024)\n\n try:\n xml = req.xml(max_body_size)\n if not xml:\n raise MissingRequestBodyError()\n\n req.check_md5(xml)\n elem = fromstring(xml, 'Delete', self.logger)\n\n quiet = elem.find('./Quiet')\n self.quiet = quiet is not None and quiet.text.lower() == 'true'\n\n delete_list = list(object_key_iter(elem))\n if len(delete_list) > self.conf.max_multi_delete_objects:\n raise MalformedXML()\n except (XMLSyntaxError, DocumentInvalid):\n raise MalformedXML()\n except ErrorResponse:\n raise\n except Exception as e:\n self.logger.error(e)\n raise\n\n elem = Element('DeleteResult')\n\n # check bucket existence\n try:\n req.get_response(self.app, 'HEAD')\n except AccessDenied as error:\n body = self._gen_error_body(error, elem, delete_list)\n return HTTPOk(body=body)\n\n if 'object_versioning' not in get_swift_info() and any(\n version not in ('null', None)\n for _key, version in delete_list):\n raise S3NotImplemented()\n\n def do_delete(base_req, key, version):\n req = copy.copy(base_req)\n req.environ = copy.copy(base_req.environ)\n req.object_name = str_to_wsgi(key)\n if version:\n req.params = {'version-id': version, 'symlink': 'get'}\n\n try:\n try:\n query = req.gen_multipart_manifest_delete_query(\n self.app, version=version)\n except NoSuchKey:\n query = {}\n if version:\n query['version-id'] = version\n query['symlink'] = 'get'\n\n resp = req.get_response(self.app, method='DELETE', query=query,\n headers={'Accept': 'application/json'})\n # If async segment cleanup is available, we expect to get\n # back a 204; otherwise, the delete is synchronous and we\n # have to read the response to actually do the SLO delete\n if query.get('multipart-manifest') and \\\n resp.status_int != HTTP_NO_CONTENT:\n try:\n delete_result = json.loads(resp.body)\n if delete_result['Errors']:\n # NB: bulk includes 404s in \"Number Not Found\",\n # not \"Errors\"\n msg_parts = [delete_result['Response Status']]\n msg_parts.extend(\n '%s: %s' % (obj, status)\n for obj, status in delete_result['Errors'])\n return key, {'code': 'SLODeleteError',\n 'message': '\\n'.join(msg_parts)}\n # else, all good\n except (ValueError, TypeError, KeyError):\n # Logs get all the gory details\n self.logger.exception(\n 'Could not parse SLO delete response (%s): %s',\n resp.status, resp.body)\n # Client gets something more generic\n return key, {'code': 'SLODeleteError',\n 'message': 'Unexpected swift response'}\n except NoSuchKey:\n pass\n except ErrorResponse as e:\n return key, {'code': e.__class__.__name__, 'message': e._msg}\n except Exception:\n self.logger.exception(\n 'Unexpected Error handling DELETE of %r %r' % (\n req.container_name, key))\n return key, {'code': 'Server Error', 'message': 'Server Error'}\n\n return key, None\n\n with StreamingPile(self.conf.multi_delete_concurrency) as pile:\n for key, err in pile.asyncstarmap(do_delete, (\n (req, key, version) for key, version in delete_list)):\n if err:\n error = SubElement(elem, 'Error')\n SubElement(error, 'Key').text = key\n SubElement(error, 'Code').text = err['code']\n SubElement(error, 'Message').text = err['message']\n elif not self.quiet:\n deleted = SubElement(elem, 'Deleted')\n SubElement(deleted, 'Key').text = key\n\n body = tostring(elem)\n\n return HTTPOk(body=body)", "def POST(self, req):\n def object_key_iter(elem):\n for obj in elem.iterchildren('Object'):\n key = obj.find('./Key').text\n if not key:\n raise UserKeyMustBeSpecified()\n version = obj.find('./VersionId')\n if version is not None:\n version = version.text\n\n yield key, version\n\n try:\n xml = req.xml(MAX_MULTI_DELETE_BODY_SIZE, check_md5=True)\n elem = fromstring(xml, 'Delete')\n\n quiet = elem.find('./Quiet')\n if quiet is not None and quiet.text.lower() == 'true':\n self.quiet = True\n else:\n self.quiet = False\n\n delete_list = list(object_key_iter(elem))\n if len(delete_list) > CONF.max_multi_delete_objects:\n raise MalformedXML()\n except (XMLSyntaxError, DocumentInvalid):\n raise MalformedXML()\n except ErrorResponse:\n raise\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n LOGGER.error(e)\n raise exc_type, exc_value, exc_traceback\n\n elem = Element('DeleteResult')\n\n # check bucket existence\n try:\n req.get_response(self.app, 'HEAD')\n except AccessDenied as error:\n body = self._gen_error_body(error, elem, delete_list)\n return HTTPOk(body=body)\n\n for key, version in delete_list:\n if version is not None:\n # TODO: delete the specific version of the object\n raise OssNotImplemented()\n\n req.object_name = key\n\n try:\n query = req.gen_multipart_manifest_delete_query(self.app)\n req.get_response(self.app, method='DELETE', query=query)\n except NoSuchKey:\n pass\n except ErrorResponse as e:\n error = SubElement(elem, 'Error')\n SubElement(error, 'Key').text = key\n SubElement(error, 'Code').text = e.__class__.__name__\n SubElement(error, 'Message').text = e._msg\n continue\n\n if not self.quiet:\n deleted = SubElement(elem, 'Deleted')\n SubElement(deleted, 'Key').text = key\n\n body = tostring(elem)\n\n return HTTPOk(body=body)", "def test_delete_item_successfully(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item, headers=self.header)\n response = self.client.delete('/buckets/1/items/1',\n headers=self.header)\n self.assertEquals(response.status_code, 200)\n self.assertIn('Item successfully deleted', response.data.decode())", "def _bucket_delitem(self, j, k):\n pass", "def cfDel(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_DEL, *params)", "def remove_bucket_list_item(self, id, collection, item):\n if type(id) is not ObjectId:\n id = ObjectId(id)\n obj = getattr(self.db, collection)\n result = obj.update(\n {'_id': id},\n {'$pull': {'bucket_list': item}}\n )\n return result", "def test_delete_item(test_client):\n\n response = test_client.delete(GOOD_ITEM_URL)\n\n assert response.status_code == 204\n assert response.get_data() == b''", "def cloudflare_waf_ip_list_item_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n\n list_id = args['list_id']\n items = [{'id': item} for item in argToList(args.get('items_id'))]\n\n response = client.cloudflare_waf_ip_list_item_delete_request(list_id, items)\n\n output = response['result']\n\n return CommandResults(\n readable_output=f'Delete items to the ip-list {list_id} is executing',\n raw_response=output)", "def delete(self, path):\n params = request.args.to_dict()\n if params.get(\"instances\"):\n int_list = params.get(\"instances\")\n return items_delete_response(path, int_list)\n abort(405)", "def deleteItem(list,item):\n print \"I deleted this item:\", item\n list.remove(item)", "def delete_bucket(Bucket=None):\n pass", "def do_del_item(self, arg):\n try:\n del_item = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_item_str = \" \".join(del_item)\n print(del_item_str)\n elif choice == \"id\":\n del_item_str = int(\" \".join(del_item))\n print (del_item_str)\n app.ToDoApp.to_delete_item(del_item_str)\n print (\"Item deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def s3_delete_data(self):\n\n self.k.delete()", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def delete(self, bucket: str, object_name: str):\n raise NotImplementedError()", "def test_delete_item_with_non_existing_bucket(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item, headers=self.header)\n response = self.client.delete('/buckets/2/items/1'\n , headers=self.header)\n self.assertEquals(response.status_code, 400)\n self.assertIn('Attempting to delete item on non existing bucket',\n response.data.decode())", "def delete(self, item):\n self._createAction(item, \"delete\")", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete(self, item_id, **params):\n\n self.queue('delete', item_id=item_id, **params)", "def delete(self):\r\n return http.Request('DELETE', '{0}'.format(\r\n self.get_url())), parsers.parse_json", "def _delete(self, item):\n self.cv.delete(item)", "def delete(self, name):\n global items\n items = _Helper.all_item_except_searching_for(name)\n return {\"message\": f\"Item {name} deleted successfully\"}, 204", "def delete(self):\n self.request().delete()", "def test_delete_bucket(self):\n pass", "def delete(self):\r\n self.domain.delete_item(self)", "def delete(self, call, params={}): \n # Build an endpoint using the parameters...\n endpoint = self._calls[call](params)\n url = '{}/{}'.format(str(self), str(endpoint))\n return self.deleter.respond(url)", "def delete_whole_bucket(bucket):\n bucket = s3r.Bucket(bucket)\n for key in bucket.objects.all():\n key.delete()\n bucket.delete()\n print(bucket, \" : is deletd \")", "def delete_bucket_cors(Bucket=None):\n pass", "async def delete(self, key: str):", "def delete_object(self, bucket_name, key):\n url = self.__key_url(bucket_name, key)\n return self.infinispan_client.delete(url, headers=self.headers, auth=self.basicAuth)", "def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response", "def delete():", "def obj_delete_list(self, request=None, **kwargs):\n self.get_collection(request).remove()", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n return JsonResponse({'status': 'ok'})", "def delete(self):\r\n self.require_collection()\r\n request = http.Request('DELETE', self.get_url())\r\n\r\n return request, parsers.parse_empty", "async def delete(self, delete: TPayload) -> None:", "def _delete(self, table, _id, return_item=False):\n data = {\"Key\": _id, \"ReturnValues\": \"ALL_OLD\" if return_item else \"NONE\"}\n\n return self._response_handler(table, \"delete_item\", data)", "def __delitem__(self, key):\n\n bucket_key = self.key_for_bucket(key)\n del self.buckets[bucket_key][key]\n\n if not self.buckets[bucket_key]:\n del self.buckets[bucket_key]", "def delete_object(Bucket=None, Key=None, MFA=None, VersionId=None, RequestPayer=None, BypassGovernanceRetention=None):\n pass", "def delete_bucket(self, name):\n return", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()", "def delete(self):\n ...", "def delete_item(request):\n if request.json_body[u'type'] == u'post':\n if DBSession.query(Post).filter(Post.name==request.json_body[u'name']).delete() == 1:\n return {\"deletion_status\":\"success\"}\n import ipdb; impdb.set_trace()\n return {\"deletion_status\":\"error\"}", "def delete(self, key):\n self.resource.Object(self.bucketname, key).delete()", "def deleteItem(request, itemid):\n try:\n item = ItemSerializer(Item.objects.get(id=itemid))\n Item.objects.get(id=itemid).delete()\n return Response(item.data)\n\n except Item.DoesNotExist:\n fail = {\n \"item\":\"item does not exist\"\n }\n return JsonResponse(fail)", "def delete(self, url):\n return self.request(url, \"DELETE\")", "def delete(self):\n return api.delete([self._key])", "def http_delete(self, **kwargs):\n return self.rabjcallable.delete(**kwargs)", "def test_app_can_delete_list(self):\n delete_list=self.client.delete('/shoppinglists/nikes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(delete_list.status_code,200)", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def _delete(self, url):\n return self._request(url, method=\"DELETE\")", "def bulkDelete(self, objList: List[PermissionContext], tokenData: TokenData):", "def DELETE(self, env, start_response):\n key_args = set(['cors','lifecycle','policy','tagging','website'])\n\n qs = env.get('QUERY_STRING', '')\n args = urlparse.parse_qs(qs, 1)\n\n if not key_args & set(args):\n # DELETE a Bucket\n version = args.get('versionId')\n if version:\n vid = version[0]\n if vid.lower() == 'lastest':\n pass\n else:\n env['PATH_INFO'] = '/v1/AUTH_%s/%s/%s' % (quote(self.account_name),\n quote(self.version_name(self.container_name)),\n vid)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if status != HTTP_NO_CONTENT:\n if status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n elif status == HTTP_NOT_FOUND:\n return self.get_err_response('NoSuchBucket')\n elif status == HTTP_CONFLICT:\n return self.get_err_response('BucketNotEmpty')\n else:\n return self.get_err_response('InvalidURI')\n\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n else:\n # DELETE specified data\n action = args.keys().pop()\n if action == 'cors':\n # delete cors\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_ALLOW_ORIGIN'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_MAX_AGE'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_EXPOSE_HEADERS'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_ALLOW_METHOD'] = ''\n env['QUERY_STRING'] = ''\n env['REQUEST_METHOD'] = 'POST'\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'lifecycle':\n # delete lifecycle\n env['HTTP_X_CONTAINER_META_TRANS_AT'] = ''\n env['HTTP_X_CONTAINER_META_TRANS_AFTER'] = ''\n env['HTTP_X_CONTAINER_META_TRANS_CLASS'] = ''\n\n env['HTTP_X_CONTAINER_META_EXPIRATION_AT'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_AFTER'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_PREFIX'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_STATUS'] = ''\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'policy':\n # delete policy\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n env['HTTP_X_CONTAINER_META_POLICY'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'tagging':\n # delete tagging\n env2 = copy(env)\n container_info = get_container_info(env2, self.app)\n meta_keys = container_info['meta'].keys()\n for key in meta_keys:\n env['HTTP_X_CONTAINER_META_' + key.replace('-', '_').upper()] = ''\n env['QUERY_STRING'] = ''\n env['REQUEST_METHOD'] = 'POST'\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'website':\n # delete website\n body = env['wsgi.input'].read()\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n env['HTTP_X_CONTAINER_META_WEBSITE'] = quote(body)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n else:\n return self.get_err_response('InvalidURI')", "def delete_bucket_from_project(projectname, bucketname):\n return jsonify(\n admin.delete_bucket_on_project(\n current_app.scoped_session(), projectname, bucketname\n )\n )", "def delete(self, obj):", "def delete(self):\n return self.request('', pylastica.request.Request.DELETE)", "def DeleteKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(self, data):\r\n pass", "def do_del(self, arg):\n try:\n del_list = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_list_str = \" \".join(del_list)\n print(del_list_str)\n elif choice == \"id\":\n del_list_str = int(\" \".join(del_list))\n print (del_list_str)\n app.ToDoApp.to_delete_todo(del_list_str)\n print (\"List deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def test_delete_item_using_delete(self):\n pass", "def post_delete():\n req_data = request.get_json()\n print('This is the request itself \\n', req_data)\n print(req_data['name'])\n flask_wms.delete_entry(req_data['name'])\n return 'Request recieved, delete method'", "def removeItem(*args):", "def removeItem(*args):", "def __delitem__(self, key):\n bucket = self._buckets[self._index(key)]\n for node in bucket.linked_list:\n bucket_object_key, bucket_object_value = node.value\n if bucket_object_key.load_value() == key:\n # remove objects from object -> list_node dict\n key_list_node = self._object_to_list_node.pop(bucket_object_key)\n value_list_node = self._object_to_list_node.pop(bucket_object_value)\n # remove list_node from in_memory and disk objects\n self._in_memory_objects.remove(key_list_node)\n self._in_memory_objects.remove(value_list_node)\n self._disk_objects.remove(key_list_node)\n self._disk_objects.remove(value_list_node)\n # remove node from bucket linked list\n assert bucket.linked_list.remove(node) == True\n self._balance()\n return\n raise KeyError(\"Key `{}` is not exists\".format(key))", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)" ]
[ "0.77946883", "0.7778827", "0.7758909", "0.7664357", "0.76476765", "0.7614669", "0.7598389", "0.74553555", "0.74454176", "0.714708", "0.69668", "0.6871372", "0.6836843", "0.68232435", "0.67902356", "0.6745601", "0.66664237", "0.66347444", "0.6566819", "0.6558374", "0.6537086", "0.6535251", "0.6528435", "0.6517352", "0.64569455", "0.64415103", "0.6431554", "0.64219093", "0.63871896", "0.6374976", "0.63636684", "0.635899", "0.6356526", "0.635561", "0.6352624", "0.63341576", "0.631475", "0.6271315", "0.6268941", "0.6260085", "0.62529993", "0.6242043", "0.6239868", "0.6215472", "0.62099266", "0.6199433", "0.6197168", "0.6191902", "0.6179021", "0.61705536", "0.61690676", "0.6167929", "0.6167138", "0.6163527", "0.6147709", "0.61434287", "0.61386", "0.61278194", "0.61256254", "0.61223054", "0.6112558", "0.6089634", "0.6070179", "0.60687524", "0.6067129", "0.6066399", "0.60620314", "0.6058488", "0.6058488", "0.60432804", "0.60421556", "0.6037692", "0.60358834", "0.60285556", "0.6026349", "0.60133606", "0.6008991", "0.6007998", "0.6003695", "0.59999686", "0.5978244", "0.5978244", "0.5973401", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755", "0.5968755" ]
0.7630652
5
Spawn a new thread which continually updates things.
def run(self): while True: buf = "" while len(buf) == 0 or buf[-1] != '\n': if self.ser.available(): buf += self.ser.read() else: delay(1) # Avoid pegging CPU tokens = buf.split(' ') s = tokens[0] self.lock.acquire() try: if s == "PPM": self.ppm = [int(i) for i in tokens[1:]] elif s == "Wind": self.wind = int(tokens[1]) elif s == "roll": self.roll = float(tokens[1]) elif s == "yaw": self.yaw = float(tokens[1]) elif s == "Wpot": self.winch = int(tokens[1]) elif s == "Rpot": self.rudder = int(tokens[1]) except: pass # A cast likely failed self.lock.release()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n Thread(target=self.update, args=()).start()\n return self", "def start(self):\n Thread(target=self.update, args=()).start()\n return self", "def start(self):\r\n t = Thread(target=self.update, args=())\r\n t.daemon = True\r\n t.start()\r\n return self", "def start(self):\r\n t = Thread(target=self.update, args=())\r\n t.daemon = True\r\n t.start()\r\n return self", "def _make_thread(self):\r\n pass", "def run(self):\n self.spawn()\n while self.is_alive:\n self.move()\n time.sleep(.2)", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def run(self):\n self.submit()\n self.start()", "def spawn_thread(func, *args, **kwargs):\n thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n thread.daemon = True\n thread.start()\n return thread", "def run(self): # pragma: no cover\n while True:\n self.update()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def run(self):\n self.log.info(\"Starting thread: \" + self.name)\n self.object__ = self.run_process(self.object__, self.args)", "def __init__(self):\n Thread.__init__(self)\n self.start() # start the thread", "def update_thread(thread, thread_num):\n pass# TODO", "def run(self):\n\t\t\n\t\twhile self.update():\n\t\t\tpass", "def _StartStatusUpdateThread(self):\n self._status_update_active = True\n self._status_update_thread = threading.Thread(\n name='Status update', target=self._StatusUpdateThreadMain)\n self._status_update_thread.start()", "def start(self):\r\n threading.Thread(target=self.update_frame, args=()).start()\r\n return self", "def ticker(self):\n\n gevent.spawn(self.tick)\n self.greenlet = gevent.spawn_later(self.cluster.config.get('tick'), self.ticker)", "def _start_loop(self):\n self.p = tread.Thread(target=self._loop)\n self.p.start()", "def start(self):\n self.thread.start()", "def start(self):\n \n self.thread.start()\n self.state = \"running\"", "def run(self):\n last_time = time.time()\n while self.running:\n now_time = time.time()\n interval = now_time - last_time\n last_time = now_time\n self.update(interval)\n time.sleep(Options['update interval'])", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def start(self):\n self._setup_thread()\n self.thread.start()", "def run(self):\n while True: # make sure to run at least once before exiting\n with self._lock:\n self._update(self._data)\n if self._done:\n break\n time.sleep(1)", "def launch_thread(self, daemon=1):\r\n assert self._thread is None\r\n t = threading.Thread(target=self._loop, name=\"TorLoop\")\r\n if daemon:\r\n t.setDaemon(daemon)\r\n t.start()\r\n self._thread = t\r\n t = threading.Thread(target=self._eventLoop, name=\"EventLoop\")\r\n if daemon:\r\n t.setDaemon(daemon)\r\n t.start()\r\n self._eventThread = t\r\n # eventThread provides a more reliable indication of when we are done.\r\n # The _loop thread won't always die when self.close() is called.\r\n return self._eventThread", "def start(self):\n gevent.spawn_later(self._period, self._run)", "def _ensure_thread(self) -> None:\n\n if not self._thread:\n thread = self._thread_factory(self.run)\n self._thread = thread\n thread.start()", "def background(self):\n self.thread = threading.Thread(target=self.run)\n self.thread.setDaemon(True)\n self.thread.start()", "def spawn(cls, *args, **kwargs):\n g = cls(*args, **kwargs)\n g.start()\n return g", "def start(self):\n self._thread.start()", "def __init__(self):\n sleep(10)", "def fake_spawn(time_from_now_in_seconds, func, *args, **kw):\n def thread_start():\n # fake_sleep(time_from_now_in_seconds)\n return func(*args, **kw)\n\n cr = Coroutine(thread_start)\n fake_threads.append({'sleep': time_from_now_in_seconds,\n 'greenlet': cr,\n 'name': str(func)})", "def __init__(self, update_func):\n QtCore.QThread.__init__(self)\n self._update_func = update_func", "def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)", "def main_loop():\n while len(fake_threads) > 0:\n pulse(0.1)", "def start(self):\n self.open()\n #t = Thread(target=self._cache_update, args=())\n #t.daemon = True\n #t.start()", "def run(self):\n self.thread_send.start()\n self.thread_receive.start()", "def start(self):\n def f():\n if (self.started): return\n self.started = True\n with client.ServerProxy(self.host) as proxy:\n while (not self.req_shutdown):\n self.update_speed(proxy)\n time.sleep(self.com_freq)\n self.started = False\n self.req_shutdwon = False\n\n Thread(target=f).start()", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def serve(self):\n\t\timport thread\n\t\tthread.start_new_thread(self._server_thread, tuple())", "def create_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()", "def _StatusUpdateThreadMain(self):\n while self._status_update_active:\n self._UpdateStatus()\n time.sleep(self._status_update_interval)", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def start_thread(self):\n self.stop_thread()\n self.running = True\n self.run_thread = threading.Thread(target=self.run, daemon=True)\n self.run_thread.start()", "def run(self):\n self.speed_test.start()", "def create_and_start_threads(self):\r\n self.create_threads()\r\n self.start_threads()", "def start_thread(self):\n self.thread = Thread(target=self.put_lines_into_queue)\n self.thread.daemon = True\n self.thread.start()", "def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()", "def run_one_timestep(self):\n\n print(\"=\"*50)\n print()\n\n forks = []\n\n # Update sensors\n for sensor in self.sensors:\n if isinstance(sensor, Camera):\n thread = threading.Thread(target=sensor.update)\n forks.append(thread)\n thread.start()\n else:\n sensor.update()\n\n for fork in forks:\n fork.join()\n\n forks = []\n # Update sensobs\n for sensob in self.sensobs:\n thread = threading.Thread(target=sensob.update)\n forks.append(thread)\n thread.start()\n\n for fork in forks:\n fork.join()\n\n # Update behaviours\n for behav in self.active_behavs:\n behav.update()\n\n # Invoke arbitrator\n motor_rec = self.arbit.choose_action() # Returns a tuple(list(motor_recommendations), halt)\n print(\"Arbitrator chose: \"+str(motor_rec))\n\n if motor_rec[1]: # Check halt recommendation\n return False # Halt and exit program\n\n\n # Update motobs\n print(self.motobs)\n i = 0\n for motob in self.motobs: # Updates each motob with it's respective motor recommendation\n print(\"Bbcon: Updating motob \" + str(i))\n\n motob.update(motor_rec[0][i])\n i += 1\n\n # Wait\n time.sleep(0.5) #waits half a second\n\n # Reset sensors\n for sensor in self.sensors:\n sensor.reset()\n\n return True", "def start_non_blocking(self):\n self._start_thread(self.start, daemon=True)", "def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()", "def run(self):\n try:\n while self._running:\n time.sleep(1)\n finally:\n self._exit()", "def run_threaded(self):\n \n t = threading.Thread(target=self.run, args=())\n t.daemon = True\n t.start()\n return t # Return the thread, but don't join it (the caller can join if they want to)", "def start_pull_thread(self):\r\n threading.Thread(target=self._pull_thread).start()", "def run_in_background(self):\n threading.Thread(target=self._run_loop).start()", "def startLoop():\n patchAsyncio()", "def make_updates(self, x):\n global inc\n for stone in self.players[x].stones_reference:\n temp = Thread(target=stone.move_stone)\n temp.daemon = True\n temp.start()\n if not stone.is_alive:\n self.players[x].stones_reference.remove(stone)\n if self.num_players == 1:\n self.maps[0].control_music(self.players[0].min_x)\n\n rand_x = randrange(1, 100)\n rand_x_2 = randrange(1, 150)\n rand_x_3 = randrange(1, 75)\n if inc % rand_x == 0:\n self.maps[x].clouds[0].move_cloud()\n if inc % rand_x_2 == 0:\n self.maps[x].clouds[1].move_cloud()\n if inc % rand_x_3 == 0:\n self.maps[x].clouds[2].move_cloud()\n inc += 1", "def run(self):\n\t\tinterval_in_ticks = self.session.timer.get_ticks(GAME.INGAME_TICK_INTERVAL)\n\t\tScheduler().add_new_object(self._tick, self, runin=interval_in_ticks, loops=-1)", "def start_threading(self):\n\n # For Demo only commented\n self.progress_label.config(text='Running...')\n # self.progress_label.config(text='Estimating Time Reamining')\n self.prev_prog = progressbar.current\n self.submit_thread = threading.Thread(target=self.run)\n self.start_time = time.time()\n self.submit_thread.daemon = True\n self.submit_thread.start()\n self.after(1000, self.check_thread)", "async def _main(self):\n while True:\n time.sleep(1)", "def run(self):\n self.started()", "def start(self):\n\t\tself.__thread = Thread(target=self.__update, name='CamGear', args=())\n\t\tself.__thread.daemon = True\n\t\tself.__thread.start()\n\t\treturn self", "def start(self):\n \n self.keep_running = True\n self.th = threading.Thread(target=self.sendData)\n self.th.daemon = True # Thread will terminate with the main\n self.th.start()\n self.th.join(0)", "def spawn( self, event=None ):\n #print \"a\", self.msgVar.get(), \"\\n\"\n self.visible = 1\n self.after( int( self.delay * 1000 ), self.show ) # The after function takes a time argument in miliseconds\n #self.show() #ohnheiser hack", "def create_gps_thread(\n configuration: Configuration, value: AtomicValue, continue_running: AtomicValue\n):\n return Thread(\n target=gps_reception_loop,\n args=(\n init_gps(configuration),\n value,\n continue_running,\n ),\n daemon=True,\n )", "def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()", "def run( self ):\r\n \r\n # Execute the per-cycle work specifed by the user\r\n for f in self.updateFuncList:\r\n f() # Please make these lightweight and pertain to UI drawing!\r\n \r\n # Update window\r\n self.rootWin.update_idletasks() # idk , draw or something!\r\n \r\n # Wait remainder of period\r\n elapsed = time.time() * 1000 - self.last\r\n if elapsed < self.stepTime:\r\n sleepTime = int( self.stepTime - elapsed ) \r\n else:\r\n sleepTime = 0\r\n # 4.e. Mark beginning of next loop\r\n self.last = time.time() * 1000 \r\n self.rootWin.after( sleepTime , self.run )", "def run(self):\n\t\tt = threading.Thread(target=self.__temperature_thread)\n\t\tt.daemon = True\n\t\tt.start()", "def run(self):\n t = threading.Thread(target=self._thread_action,\n args=(self._sensor_queue,))\n t.daemon = True\n t.start()", "def launchFunc():\n th = threading.Thread(target=progressScreen)\n th.start()", "def _worker(self, robot_id):\n robot = Robot(self, rid=robot_id, scroll_times=3)\n self.robots.update({robot_id: robot})\n d('Starting ROBO_%s' % str(robot_id))\n robot.start()\n d('End of robot_thread %s ' % str(robot_id))\n return", "def start(self):\n if not self._Thread__initialized:\n raise RuntimeError('thread.__init__() not called')\n if self._Thread__started.is_set():\n raise RuntimeError('threads can only be started once')\n with threading._active_limbo_lock:\n threading._limbo[self] = self\n try:\n start_new_background_thread(self.__bootstrap, ())\n except Exception:\n with threading._active_limbo_lock:\n del threading._limbo[self]\n raise\n self._Thread__started.wait()", "def run_forever(self, *args, **kwargs):\n try:\n self.logger.debug('Begin account update')\n\n # get account-updater server ownership\n self.get_ownership_obj = threading.Thread(target = self.msg.get_my_ownership)\n self.get_ownership_obj.setDaemon(True)\n self.get_ownership_obj.start()\n\n self.walker_obj = Walker(self.walker_map, self.__param, self.logger)\n self.walker_obj.setDaemon(True)\n self.walker_obj.start()\n self.logger.info(\"Walker Started\")\n self.reader_obj = Reader(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.reader_obj.setDaemon(True)\n self.reader_obj.start() \n self.logger.info(\"Reader Started\")\n self.account_sweeper = AccountSweep(self.__param, self.logger)\n self.account_sweeper.setDaemon(True)\n self.account_sweeper.start()\n self.logger.info(\"Account Sweeper Started\") \n self.updater_obj = Updater(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.updater_obj.setDaemon(True)\n self.updater_obj.start() \n self.logger.info(\"Updater Started\") \n self.container_sweeper = ContainerSweeper(self.walker_map, \\\n self.reader_map, self.__param, self.logger)\n self.container_sweeper.setDaemon(True)\n self.container_sweeper.start()\n self.logger.info(\"Container Sweeper Started\") \n\n account_updater_server = ThreadedAccountUpdaterServer(\\\n (self.__get_node_ip(gethostname()), \\\n self.__account_updater_port), HttpListener)\n account_updater_server.serve_forever()\n except Exception as ex:\n self.logger.error(\"Exception occured: %s\" % ex)", "def do_fakethread(self,args):\n def ft(firstarg,fakethread_stop,btc,usd):\n while(not fakethread_stop.is_set()):\n print \"THIS IS A FAKE THREAD!!!!!!\"\n fakethread_stop.wait(3)\n\n global fakethread_stop\n btc,usd = bal()\n args = stripoffensive(args)\n args = args.split()\n if 'exit' in args:\n print \"Shutting down background thread...\"\n fakethread_stop.set()\n else: \n fakethread_stop = threading.Event()\n threadlist[\"fakethread\"] = fakethread_stop\n fake_thread = threading.Thread(target = ft, args=(None,fakethread_stop,btc,usd))\n fake_thread.daemon = True\n fake_thread.start()", "def process_thread(self):", "def run(self):\n self.ident = threading.current_thread().ident\n self.ready.set()\n self.exec_()", "def run(self):\n self.monitor.start()", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def __init__(self, name='KillableThread'):\n self._stopevent = threading.Event( )\n self._sleepperiod = 1.0\n threading.Thread.__init__(self, name=name)", "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "def __enter__(self):\n self.start()\n return self", "def __enter__(self):\n self.start()\n return self", "def run(self):\n self._run = True\n caching_thread = Thread(target=self._cache_data)\n caching_thread.start()\n sonification_threads = list()\n for channel, parameter in enumerate(self._parameter_names):\n sonifying_thread = Thread(target=self._sonify_param, args=[channel, parameter])\n sonifying_thread.start()\n sonification_threads.append(sonifying_thread)", "def __init__(self):\r\n threading.Thread.__init__(self)\r\n self.wait_to_start = True\r\n self.id = 0\r\n self.players = None\r\n self.socket = None", "def start(self):\n if not self._is_running:\n self._pos_thread = Timer(self.draw_intvl, self._run)\n self._pos_thread.start()\n self._is_running = True", "def run(self):\r\n\r\n # t=0 is singular point\r\n\r\n print 'Time of laboratory clock Tw =', self.tick\r\n tt = self.tmp\r\n ll = self.lst\r\n car = self.interaction(self.carr)\r\n ll.item_run(tt, self.tick, car)\r\n tt = tt.next\r\n\r\n # run of local time\r\n\r\n while not tt is None:\r\n\r\n if tt.dedicated_node:\r\n self.tick = self.tick + 1\r\n print 'Time of laboratory clock Tw =', self.tick\r\n\r\n # self.move() # It is classical motion of particle (example).\r\n\r\n self.move_reset()\r\n car = self.interaction(self.carr)\r\n\r\n ll = self.lst\r\n while not ll is None:\r\n ll.item_run(tt, self.tick, car)\r\n ll = ll.right\r\n\r\n tt = tt.next", "def block(self):\n while self.running:\n time.sleep( 1 )", "def master():\n init = Initializer.create_init()\n while True:\n # current frame\n ret, frame = 'unknown', np.random.rand(224, 224, 3) * 255\n frame = frame.astype(dtype=np.uint8)\n Thread(target=send_request, args=(frame.tobytes(), 'block12345', 'initial')).start()\n time.sleep(1)", "def _create_thread(self, target=None, **kwargs):\n return PyonThread(target=target, **kwargs)", "def run(self):\n # Start the thread to receive commands\n self.command_input.start()\n\n # The next time to execute a state\n self.next_time = time.time() + self.control_period\n\n # Run forever!\n logger.debug(\"Starting\")\n while True:\n old_state = self.current_state\n\n # Wait for the next control cycle\n if time.time() < self.next_time:\n time.sleep(0.001)\n continue\n else: # Set the next execution time\n self.next_time += self.control_period\n\n # Execute the state!\n if self.current_state == LightSystem.IDLE_STATE:\n self.idle_state()\n elif self.current_state == LightSystem.MANUAL_STATE:\n self.manual_state()\n elif self.current_state == LightSystem.EXIT_STATE:\n self.pi.stop()\n return\n\n if old_state != self.current_state:\n logger.debug(\"%s -> %s\"%(old_state, self.current_state))", "def trigger_update(self):\n update_thread = Thread(target=self.process_queued_msg)\n update_thread.setDaemon(True)\n update_thread.start()", "def run(self):\n while True:\n self.sm.run()\n time.sleep(0.05)", "def spawn( self, event=None ):\n self.visible = 1\n self.after( int( self.delay * 1000 ), self.show ) # The after function takes a time argument in miliseconds", "def main():\n print('creating balloons')\n a = ThreadBalloon('a',0,0,2,1,3)\n b = ThreadBalloon('b',100,100, 4, 2, 2)\n a.start()\n b.start()\n a.join()\n b.join()", "def minorloop(self, delay = None): # in milliseconds\n if delay != None:\n self.delay = delay\n self.running = 1\n lastUpdated = 0\n lastData = []\n while self.running:\n self.update()\n time.sleep(self.delay)", "def main():\n channel_watcher = ChannelWatcher()\n channel_watcher.create_threads()\n for thread in channel_watcher.threads:\n thread.join()\n return", "def Run(self):\n self.RunAsync().join()", "def start(self):\n\n self.__thread = Thread(target=self.run, name=self.name)\n self.__thread.daemon = True\n self.__thread.start()\n\n return self.__thread, None", "def run_in_thread(self, fn, *args, **kwargs):\r\n thread = threading.Thread(target=fn, args=args, kwargs=kwargs)\r\n thread.start()\r\n \r\n return thread" ]
[ "0.67790216", "0.67790216", "0.6629717", "0.6629717", "0.6471286", "0.63497496", "0.6247109", "0.60593855", "0.6020881", "0.60143054", "0.6010006", "0.6010006", "0.59761846", "0.5964021", "0.59637165", "0.59495205", "0.59432256", "0.59210443", "0.5894969", "0.5848531", "0.58363295", "0.5826473", "0.5817213", "0.58072317", "0.5793432", "0.5767581", "0.57619834", "0.57618016", "0.57454485", "0.5744589", "0.57346314", "0.57332695", "0.5707711", "0.56953764", "0.56776804", "0.56543094", "0.56440526", "0.5640235", "0.56362325", "0.56237084", "0.5621009", "0.56187946", "0.561584", "0.5610331", "0.5603305", "0.5601889", "0.55923563", "0.55733293", "0.55694747", "0.55559313", "0.55381954", "0.5537282", "0.55259955", "0.5525911", "0.5524902", "0.5521367", "0.5511084", "0.55106187", "0.5510548", "0.55036736", "0.5499646", "0.5488041", "0.5487454", "0.5484375", "0.54839456", "0.5478354", "0.5461926", "0.5455243", "0.5439717", "0.5435245", "0.54332143", "0.54273045", "0.5426858", "0.54251647", "0.53914154", "0.53898597", "0.53884906", "0.53881145", "0.5385941", "0.538116", "0.53757924", "0.53728634", "0.5372681", "0.5372681", "0.53712565", "0.5368569", "0.53668064", "0.5352943", "0.5346155", "0.53431875", "0.5337543", "0.5319271", "0.5308058", "0.5305749", "0.52922076", "0.52883303", "0.52837694", "0.52836394", "0.5278915", "0.52762425", "0.52744323" ]
0.0
-1
Apply type padding in a convolution operation.
def padding_type(spatial, config): ret = None if 'padding' not in config: return 0 elif isinstance(config['padding'], list): ret = torch.tensor(config['padding']) elif config['padding'] == 'same': k = torch.tensor(config['kernel_size']) s = torch.tensor(config['stride']) ret = (spatial*(s-1)-1+k)//2 elif config['padding'] == 'valid': ret = torch.zeros(spatial.shape).long() else: raise ValueError('Pad type is invalid') return list(ret.numpy())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_same_padder(conv_op: nn.Conv1d | nn.Conv2d | nn.Conv3d, image_size: list[int]):\n # calculate padding required\n padding: list[int] = _get_same_padding_conv_nd(image_size, conv_op.kernel_size, conv_op.dilation, conv_op.stride)\n\n # initialize and return padder\n padder = Pad[\"constantpad\", len(padding) // 2]\n if sum(padding) > 0:\n return padder(padding=padding, value=0.0)\n return nn.Identity()", "def conv2d_config(input_shape, output_shape, filter_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if len(input_shape) == 4:\n batch_size = input_shape[0]\n else:\n batch_size = None\n\n input_shape = np.array(input_shape[-3:])\n output_shape = np.array(tf.TensorShape(output_shape).as_list()[-3:])\n\n # Determine what kind of convolution to use\n if np.all(input_shape[-3:-1] >= output_shape[-3:-1]):\n conv_type = \"NORMAL\"\n elif np.all(input_shape[-3:-1] <= output_shape[-3:-1]):\n conv_type = 'FULL'\n # swap input and output shape\n input_shape, output_shape = output_shape, input_shape\n else:\n raise ValueError('Input shape dimensions must be both bigger than or both smaller than output shape dimensions')\n\n filter_shape = np.array(tf.TensorShape(filter_shape).as_list()[:2] + [input_shape[-1], output_shape[-1]])\n stride = np.ceil((input_shape[:2] - filter_shape[:2] + 1) / output_shape[:2]).astype(np.int)\n padding = output_shape[:2] * stride - input_shape[:2] + filter_shape[:2] - 1\n\n # Determine what type of padding can be used\n if np.all(np.ceil(input_shape[:2] / stride) == output_shape[:2]):\n padding_type = 'SAME'\n else:\n padding_type = 'VALID'\n\n # get padded input shape\n input_shape[:2] = input_shape[:2] + padding.astype(np.int)\n padded_shape = [batch_size] + input_shape.tolist()\n\n left_padding = np.ceil(padding / 2).astype(np.int)\n right_padding = np.floor(padding / 2).astype(np.int)\n\n padding = [[0, 0], [left_padding[0], right_padding[0]], [left_padding[1], right_padding[1]], [0, 0]]\n stride = [1, stride[0], stride[1], 1]\n\n return filter_shape.tolist(), stride, padding, padded_shape, conv_type, padding_type", "def _get_same_padding_conv_nd(\n image_size: list[int], kernel_size: tuple[int, ...], dilation: tuple[int, ...], stride: tuple[int, ...]\n) -> list[int]:\n # get number of spatial dimensions, corresponds to kernel size length\n num_dims = len(kernel_size)\n\n # additional checks to populate dilation and stride (in case they are single entry tuples)\n if len(dilation) == 1:\n dilation = dilation * num_dims\n\n if len(stride) == 1:\n stride = stride * num_dims\n\n # equation to calculate (pad^+ + pad^-) size\n _pad_size: list[int] = [\n max((math.ceil(_i_s / _s) - 1) * _s + (_k_s - 1) * _d + 1 - _i_s, 0)\n for _i_s, _k_s, _d, _s in zip(image_size, kernel_size, dilation, stride)\n ]\n # distribute paddings into pad^+ and pad^- following Tensorflow's same padding strategy\n _paddings: list[tuple[int, int]] = [(_p // 2, _p - _p // 2) for _p in _pad_size]\n\n # unroll list of tuples to tuples, and then to list\n # reversed as nn.ConstantPadNd expects paddings starting with last dimension\n _paddings_ret: list[int] = [outer for inner in reversed(_paddings) for outer in inner]\n return _paddings_ret", "def img_conv_2D(img,kernel,stride=1,pad_type='None'):\n\n m,n = img.shape\n r,c = kernel.shape\n\n pad_m = m * (stride-1) + r -1\n pad_n = n * (stride-1) + r -1\n\n img_pad = np.zeros((m+pad_m,n+pad_n),dtype=float)\n img_pad[:m,:n] = np.copy(img)\n\n if pad_type == 'zero_pad':\n return conv_2D(img_pad,kernel,stride) # define\n\n elif pad_type == 'replicate_pad':\n\n for i in range(m,m+pad_m):\n img_pad[i,:] = np.copy(img_pad[m-1,:])\n\n for j in range(n,n+pad_n):\n img_pad[:,j] = np.copy(img_pad[:,n-1])\n\n return conv_2D(img_pad,kernel,stride) # define\n\n elif pad_type == 'wrap_pad':\n\n for i in range(m,m+pad_m):\n img_pad[i,:] = np.copy(img_pad[i-m,:])\n\n for j in range(n,n+pad_n):\n img_pad[:,j] = np.copy(img_pad[:,j-n])\n\n return conv_2D(img_pad,kernel,stride) # define\n\n else:\n\n return conv_2D(np.copy(img),kernel,stride) # define", "def _conv(\n conv_type,\n nd_util,\n input,\n weight,\n bias=None,\n stride=1,\n padding=0,\n dilation=1,\n groups=None,\n):\n weight_shape = list(weight.shape)\n return FunctionLib.apply(\n conv_type,\n input.device,\n [input, weight] + ([bias] if bias else []),\n in_channels=weight_shape[1],\n out_channels=weight_shape[0],\n kernel_shape=weight_shape[2:],\n strides=nd_util(stride),\n pads=nd_util(padding),\n dilations=nd_util(dilation),\n group=groups,\n bias=bias is not None,\n dtype=weight.dtype,\n input_shape=list(input.shape),\n )", "def compute_conv(in_size, kernel, stride, padding):\n return (in_size + 2 * padding - kernel) // stride + 1", "def convolution(img, kernel, padding=True):\n result = np.zeros_like(img)\n p_size_i = kernel.shape[0] // 2\n p_size_j = kernel.shape[1] // 2\n\n if padding:\n padded_img = np.zeros((img.shape[0] + 2 * p_size_i, img.shape[1] + 2 * p_size_j))\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n padded_img[i_first: i_last + 1, j_first: j_last + 1] = img\n else:\n padded_img = img.copy()\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n \n for i in range(i_first, i_last):\n for j in range(j_first, j_last):\n window = padded_img[i - p_size_i: i + p_size_i + 1, j - p_size_j: j + p_size_j + 1]\n res_pix = np.sum(window * kernel)\n result[i - p_size_i, j - p_size_j] = res_pix\n return result", "def hook(module, input):\n image_dimensions = input[0].size()[-2:]\n module.padding = _determine_padding_from_tf_same(image_dimensions, kernel_size, stride)", "def pad_conv_pattern():\n pattern = is_op(\"nn.pad\")(wildcard(), is_constant())\n pattern = is_op(\"nn.conv2d\")(pattern, is_constant())\n pattern = pattern.optional(lambda x: is_op(\"nn.bias_add\")(x, is_constant()))\n pattern = pattern.optional(lambda x: is_op(\"add\")(x, is_constant()))\n pattern = pattern.optional(\n lambda x: is_tuple_get_item(\n is_op(\"nn.batch_norm\")(\n x, is_constant(), is_constant(), is_constant(), is_constant()\n )\n )\n )\n pattern = pattern.optional(is_op(\"nn.relu\"))\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def convolution(img, kernel, padding='fill'):\n kernel = np.rot90(kernel, 2)\n h,w = kernel.shape[:2]\n t,b,l,r = (h-1)//2, h//2, (w-1)//2, w//2 # Use numpy padding because it works for >2d\n padshape = [(t,b),(l,r)]+[(0,0)]*(len(img.shape[2:]))\n padded_img = np.pad(img, padshape, mode={'fill':'constant','replicate':'edge'}[padding])\n conved_img = np.zeros_like(img)\n for i in 1+np.arange(-h//2,h//2):\n for j in 1+np.arange(-w//2,w//2):\n if kernel[t+i,l+j]==0: continue\n conved_img += kernel[t+i,l+j]*padded_img[t+i:-b+i or None,l+j:-r+j or None]\n return conved_img", "def conv2d_fixed_padding(inputs,\n filters,\n kernel_size,\n strides,\n data_format='channels_first'):\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format=data_format)\n\n outputs = tf.layers.conv2d(\n inputs=inputs,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'),\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)\n\n return outputs", "def pad1D(X, pad, kernel_width=None, stride=None, dilation=0):\n\tX_pad = None\n\tp = pad\n\tif isinstance(p, int):\n\t\tp = (p, p)\n\tif isinstance(p, tuple):\n\t\tX_pad = np.pad(\n\t\t\tX, \n\t\t\tpad_width=((0, 0), (p[0], p[1]), (0, 0)),\n\t\t\tmode='constant',\n\t\t\t# constant_value = 0,\n\t\t\t)\n\n\t# compute the correct padding dims for a 'same' or 'causal' convolution\n\tif p in ['same', 'causal'] and kernel_width and stride:\n\t\tcausal = p == 'causal'\n\t\tp = calc_pad_dims_1D(\n\t\t\tX.shape, X.shape[1], kernel_width, stride, causal=causal, dilation=dilation\n\t\t\t)\n\t\tX_pad, p = pad1D(X, p)\n\n\treturn X_pad, p", "def conv2d(args):\n inp_ = args[0]\n kernel = args[1]\n stride = args[2]\n padding = args[3]\n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n Hc = int((H - Hk)/stride)+1\n Wc = int((W - Wk)/stride)+1\n conv_layer = np.zeros((batch_size, out_channels, Hc, Wc))\n for batch_i in range(batch_size):\n for o_chann_i in range(out_channels):\n for in_chann_i in range(in_channels):\n curr_ker = kernel[o_chann_i, in_chann_i, :, :]\n curr_inp = inp_[batch_i, in_chann_i, :, :]\n h_ind = 0\n while h_ind + Hk <= H:\n w_ind = 0\n while w_ind + Wk <= W:\n inp_patch = curr_inp[h_ind:h_ind+Hk, w_ind:w_ind+Wk]\n # Sum the conv_value of all the inp_channels\n conv_layer[batch_i, o_chann_i, h_ind//stride, w_ind//stride] += np.sum(inp_patch*curr_ker)\n w_ind+=stride\n h_ind+=stride\n return conv_layer", "def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):\n # The padding is consistent and is based only on `kernel_size`, not on the\n # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format)\n\n return tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)", "def padding(src, min_size):\n # pad before put into convolutional layer\n src_dim = src.dim()\n if src_dim[0][1] >= min_size:\n return src\n pad_size = min_size - src_dim[0][1]\n channels = src_dim[0][2] if len(src_dim[0]) >= 3 else 1\n if pad_size == 1:\n return dy.concatenate([src, dy.zeroes((src_dim[0][0], 1, channels))], d=1)\n else:\n left_border = int(pad_size) / 2\n right_border = (int(pad_size)+1) / 2\n return dy.concatenate([dy.zeroes((src_dim[0][0], left_border, channels)), src, dy.zeroes((src_dim[0][0], right_border, channels))], d=1) # do concatenate along cols", "def conv2d_fixed_padding(\n self,\n inputs,\n filters,\n kernel_size,\n strides,\n data_format):\n # The padding is consistent and is based only on `kernel_size`, not\n # on the dimensions of `inputs` (as opposed to using\n # `tf.layers.conv2d` alone).\n if strides > 1:\n inputs = self.fixed_padding(inputs, kernel_size, data_format)\n\n return tf.layers.conv2d(\n inputs=inputs,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'),\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)", "def clConvolution(self, size, mask):", "def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):\n # The padding is consistent and is based only on `kernel_size`, not on the\n # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format)\n\n return tf.compat.v1.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,\n kernel_initializer=tf.compat.v1.variance_scaling_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-5),\n data_format=data_format)", "def convolve(images, kernels, padding='same', stride=(1, 1)):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n c = images.shape[3]\n kh = kernels.shape[0]\n kw = kernels.shape[1]\n nc = kernels.shape[3]\n sh = stride[0]\n sw = stride[1]\n\n if padding == 'same':\n ph = max((h - 1) * sh + kh - h, 0)\n pt = int(np.ceil(ph / 2))\n pb = pt\n pw = max((w - 1) * sw + kw - w, 0)\n pl = int(np.ceil(pw / 2))\n pr = pl\n elif padding == 'valid':\n pt, pb, pl, pr = 0, 0, 0, 0\n else:\n pt, pb = padding[0], padding[0]\n pl, pr = padding[1], padding[1]\n\n oh = ((h - kh + pt + pb) // sh) + 1\n ow = ((w - kw + pl + pr) // sw) + 1\n\n images = np.pad(images, pad_width=((0, 0), (pt, pb), (pl, pr), (0, 0)),\n mode='constant', constant_values=0)\n\n conv = np.zeros((m, oh, ow, nc))\n for k in range(nc):\n for i in range(oh):\n for j in range(ow):\n aux = images[:, i * sh:i * sh + kh, j * sw:j * sw + kw] \\\n * kernels[:, :, :, k]\n conv[:, i, j, k] = np.sum(aux, axis=(1, 2, 3))\n return conv", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m = images.shape[0]\n image_h = images.shape[1]\n image_w = images.shape[2]\n filter_h = kernel.shape[0]\n filter_w = kernel.shape[1]\n s1 = stride[0]\n s2 = stride[1]\n\n if padding == 'valid':\n pad_h = 0\n pad_w = 0\n\n if padding == 'same':\n pad_h = int(((image_h - 1) * s1 + filter_h - image_h) / 2) + 1\n pad_w = int(((image_w - 1) * s2 + filter_w - image_w) / 2) + 1\n\n if type(padding) == tuple:\n pad_h = padding[0]\n pad_w = padding[1]\n\n n_dim1 = int((image_h + 2 * pad_h - filter_h) / stride[0]) + 1\n n_dim2 = int((image_w + 2 * pad_w - filter_w) / stride[1]) + 1\n convolve = np.zeros((m, n_dim1, n_dim2))\n new_images = np.pad(images, ((0, 0), (pad_h, pad_h), (pad_w, pad_w),\n (0, 0)), mode='constant')\n for x in range(n_dim1):\n for y in range(n_dim2):\n mini_matrix = new_images[:, x * s1: x * s1 + filter_h,\n y * s2: y * s2 + filter_w, :]\n values = np.sum(mini_matrix * kernel,\n axis=1).sum(axis=1).sum(axis=1)\n convolve[:, x, y] = values\n return (convolve)", "def conv2D(null,channels,X,stride,kernel_shape,padding = False,initialize_weights = True,*args):\n # filters = dimensionality of output space\n # If padding is enabled, we pad the input with zeros such that the input size\n # remains the same if weights with stride 1 are applied to the input\n if initialize_weights:\n kernel = np.random.normal(size = (kernel_shape[0],kernel_shape[1],kernel_shape[2]))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n kernel = torch.FloatTensor(kernel)\n kernel.requires_grad = False\n else:\n kernel = args[0] # weights and bias must be given if initialise weights is disabled\n bias = args[1]\n kernel_shape = kernel.shape\n \n X = X.detach().numpy()\n if padding: # Can only pad during initialization -> weights and input shapes cannot change during feedforward and backpropagation\n if kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 != 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 != 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n else:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n \n X = torch.FloatTensor(X)\n \n img_shape = X.shape\n \n output_size1 = math.floor((img_shape[1] - kernel_shape[1])/(stride)) + 1\n output_size2 = math.floor((img_shape[2] - kernel_shape[2])/(stride)) + 1\n output_shape = [channels,output_size1,output_size2]\n \n X_im2col,im = im2col(X,kernel,stride)\n \n \n if initialize_weights:\n weight = torch.reshape(kernel,(kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))\n # weight consists of only one weight vector. But the dimensionality of output space has to be\n # num_filters. So we need to stack weight vectors horizontally and create num_filters number of\n # feature maps\n for i in range(channels-1):\n weight2 = np.random.normal(size = (kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n weight2 = torch.FloatTensor(weight2)\n weight2.requires_grad = False\n weight = torch.cat((weight2, weight),1) # do this num_filters - 1 number of times\n conv_output = torch.t(X_im2col).mm(weight)\n bias = torch.Tensor(np.random.normal(size = conv_output.shape))\n conv_output += bias\n conv_output = torch.reshape(conv_output,(output_shape))\n return torch.nn.Parameter(conv_output), torch.nn.Parameter(weight),X_im2col,im, output_shape,bias\n else:\n # Since weights are already initialised, the relevant channels are already dictated in the architecture.\n # Therefore, conv output is just a matmul\n conv_output = torch.t(X_im2col).mm(kernel) + bias\n return torch.nn.Parameter(conv_output),X_im2col", "def forward(self, stride, padding, *args):\n #TODO\n parents = list(args)\n inp_ = parents[0].value\n kernel = parents[1].value\n \n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n assert in_channels == in_channels_t\n \n return conv2d((inp_, kernel, stride, padding))\n # return conv2d_mul(inp_, kernel, stride, padding)", "def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):\n # The padding is consistent and is based only on `kernel_size`, not on the\n # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format)\n\n #TODO try orthogonal parameter matrix initialization\n return tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)", "def conv2d(x, W, strides=(1, 1, 1, 1), padding='SAME', dilation_rate=(1, 1), name='conv2d'):\n x_shape = x.get_shape().as_list()\n x_shape = [s if isinstance(s, int) else -1 for s in x_shape]\n W_shape = W.get_shape().as_list()\n padding_x = None\n padding_y = None\n \n if padding == \"ZEROPAD\":\n if len(x_shape) == 5:\n s = strides[1:3]\n i = (int(x_shape[2] / s[0]), int(x_shape[3] / s[1]))\n elif len(x_shape) == 4:\n s = strides[1:3]\n i = (int(x_shape[1] / s[0]), int(x_shape[2] / s[1]))\n else:\n raise ValueError(\"invalid input shape\")\n # --\n kernel_x = W_shape[0]\n kernel_y = W_shape[1]\n padding_x = int(np.ceil((i[0] - s[0] - i[0] + kernel_x + (kernel_x - 1) * (dilation_rate[0] - 1)) / (s[0] * 2)))\n padding_y = int(np.ceil((i[1] - s[1] - i[1] + kernel_y + (kernel_y - 1) * (dilation_rate[1] - 1)) / (s[1] * 2)))\n elif (isinstance(padding, list) or isinstance(padding, tuple)) and len(padding) == 2:\n padding_x = padding[0]\n padding_y = padding[1]\n \n if padding_x is not None and padding_y is not None:\n if len(x_shape) == 5:\n pad = [[0, 0], [0, 0], [padding_x, padding_x], [padding_y, padding_y], [0, 0]]\n elif len(x_shape) == 4:\n pad = [[0, 0], [padding_x, padding_x], [padding_y, padding_y], [0, 0]]\n \n # pad input with zeros\n x = tf.pad(x, pad, \"CONSTANT\")\n # set padding method for convolutions to valid to not add additional padding\n padding = \"VALID\"\n elif padding not in (\"SAME\", \"VALID\"):\n raise ValueError(\"unsupported padding type\")\n \n if dilation_rate == (1, 1):\n def conv_fct(inp):\n return tf.nn.conv2d(input=inp, filter=W, padding=padding, strides=strides, name=name)\n else:\n if (strides[0] != 1) or (strides[-1] != 1):\n raise AttributeError(\"Striding in combination with dilation is only possible along the spatial dimensions,\"\n \"i.e. strides[0] and strides[-1] have to be 1.\")\n \n def conv_fct(inp):\n return tf.nn.convolution(input=inp, filter=W, dilation_rate=dilation_rate,\n padding=padding, strides=strides[1:3], name=name)\n \n # Flatten matrix in first dimensions if necessary (join samples and sequence positions)\n with tf.variable_scope(name):\n if len(x_shape) > 4:\n x_shape = [s if isinstance(s, int) else -1 for s in x.get_shape().as_list()]\n if x_shape[0] == -1 or x_shape[1] == -1:\n x_flat = tf.reshape(x, [-1] + x_shape[2:])\n else:\n x_flat = tf.reshape(x, [x_shape[0] * x_shape[1]] + x_shape[2:])\n conv = conv_fct(x_flat)\n conv = tf.reshape(conv, x_shape[:2] + conv.get_shape().as_list()[1:])\n else:\n conv = conv_fct(x)\n return conv", "def convolution(\n input, # pylint: disable=redefined-builtin\n filter, # pylint: disable=redefined-builtin\n padding,\n strides=None,\n dilation_rate=None,\n name=None,\n data_format=None,\n filters=None,\n dilations=None): # pylint: disable=g-doc-args\n filter = deprecated_argument_lookup(\"filters\", filters, \"filter\", filter)\n dilation_rate = deprecated_argument_lookup(\n \"dilations\", dilations, \"dilation_rate\", dilation_rate)\n return convolution_internal(\n input,\n filter,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilations=dilation_rate,\n name=name)", "def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if kwargs['data_format'] == 'NCHW':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end],\n [pad_beg, pad_end]],\n mode=mode)\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs", "def hook(module, input):\n image_dimensions = input[0].size()[-2:]\n module.padding = _determine_inverse_padding_from_tf_same(image_dimensions, kernel_size, stride)", "def _padding(inputs, paddings, data_format):\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], paddings, paddings])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], paddings, paddings, [0, 0]])\n return padded_inputs", "def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4. \"\n \"Shape: \" + str(static_shape))\n # Add support for left padding.\n if kwargs.get(\"padding\") == \"LEFT\":\n dilation_rate = (1, 1)\n if \"dilation_rate\" in kwargs:\n dilation_rate = kwargs[\"dilation_rate\"]\n assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1\n height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]\n cond_padding = tf.cond(\n tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding)\n # Set middle two dimensions to None to prevent convolution from complaining\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n kwargs[\"padding\"] = \"VALID\"\n\n def conv2d_kernel(kernel_size_arg, name_suffix):\n \"\"\"Call conv2d but add suffix to name.\"\"\"\n name = \"{}_{}\".format(kwargs.get(\"name\", \"conv\"), name_suffix)\n original_name = kwargs.pop(\"name\", None)\n original_force2d = kwargs.pop(\"force2d\", None)\n result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)\n if original_name is not None:\n kwargs[\"name\"] = original_name # Restore for other calls.\n if original_force2d is not None:\n kwargs[\"force2d\"] = original_force2d\n return result\n\n return conv2d_kernel(kernel_size, \"single\")", "def pad2D(X, pad, kernel_shape=None, stride=None, dilation=0):\n\tX_pad = None\n\tp = pad\n\tif isinstance(p, int):\n\t\tp = (p, p, p, p)\n\n\tif isinstance(p, tuple):\n\t\tif len(p) == 2:\n\t\t\tp = (p[0], p[0], p[1], p[1])\n\n\t\t\tX_pad = np.pad(\n\t\t\t\tX, \n\t\t\t\tpad_width=((0, 0), (p[0], p[1]), (p[2], p[3]), (0, 0)),\n\t\t\t\tmode=\"constant\",\n\t\t\t\t# constant_values=0,\n\t\t\t\t)\n\n\t# compute the crrect padding dims for a 'same' convolution\n\tif p == 'same' and kernel_shape and stride is not None:\n\t\tp = calc_pad_dims_2D(\n\t\t\tX.shape, X.shape[1:3], kernel_shape, sride, dilation=dilation\n\t\t\t)\n\t\tX_pad, p = pad2D(X, p)\n\treturn X_pad, p", "def get_convolution_op(input_shape, output_shape, kernel_shape):\n filter_shape, strides, padding, padded_shape, conv_type, padding_type = conv2d_config(input_shape, output_shape, kernel_shape)\n if conv_type == 'NORMAL':\n def conv_op(inputs, weight, name='generic_convolution'):\n with tf.name_scope(name):\n if padding_type=='VALID' and np.sum(padding) > 0:\n inputs = tf.pad(inputs, padding, name='padding')\n return tf.nn.conv2d(inputs, weight, strides, padding_type, name='convolution')\n\n else:\n def conv_op(inputs, weight, name='generic_convolution'):\n if padding_type=='SAME':\n padded_output = [padded_shape[0]] + output_shape[-3:]\n else:\n padded_output = padded_shape\n with tf.name_scope(name):\n if padded_output[0] is None:\n batch_size = tf.shape(inputs)[0]\n padded_output = [batch_size] + padded_output[1:]\n\n output = tf.nn.conv2d_transpose(inputs, weight, padded_output, strides, padding_type, name='transpose_convolution')\n if padding_type=='VALID' and np.sum(padding) > 0:\n output = tf.slice(output, [0, padding[1][0], padding[2][0], 0],\n [-1] + output_shape[-3:], name='cropping')\n return output\n\n return filter_shape, conv_op", "def convert_conv2d(g, op, block):\n\n dilations = op.attr(\"dilations\")\n groups = op.attr(\"groups\")\n paddings = op.attr(\"paddings\")\n padding_algorithm = op.attr(\"padding_algorithm\")\n strides = op.attr(\"strides\")\n\n kernel = g.get_node(op.input(\"Filter\")[0])\n input_x = g.get_node(op.input(\"Input\")[0])\n data_layout = op.attr(\"data_format\")\n out_channels, _, k_h, k_w = infer_shape(kernel)\n if padding_algorithm == \"VALID\":\n paddings = [0, 0]\n elif padding_algorithm == \"SAME\":\n # Handle history issue of PaddlePaddle\n # while padding_algorithm == \"SAME\"\n # dilations will be set to [1, 1]\n dilations = [1, 1]\n input_x = autopad(input_x, strides, [k_h, k_w], dilations)\n paddings = [0, 0]\n elif padding_algorithm == \"EXPLICIT\":\n if len(paddings) == 2:\n paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]\n elif len(paddings) == 4:\n paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]\n else:\n msg = f'Value {padding_algorithm} in attribute \"padding\" of operator Conv is not \"valid.\"'\n raise tvm.error.OpAttributeInvalid(msg)\n\n out = _op.nn.conv2d(\n input_x,\n kernel,\n strides=strides,\n padding=paddings,\n dilation=dilations,\n groups=groups,\n channels=out_channels,\n kernel_size=[k_h, k_w],\n data_layout=data_layout,\n )\n g.add_node(op.output(\"Output\")[0], out)", "def channel_padding(x):\n #keras.backend.concatenate([x, tf.zeros_like(x)], axis=-1)\n x0=keras.layers.Activation('sigmoid')(x)\n return keras.backend.concatenate([x, x0], axis=-1)", "def conv(f, kerneltype, kernelwidth, boundary='extend'):\n if kerneltype == 'box': kernel = Box1DKernel(kernelwidth)\n elif kerneltype == 'gaussian': kernel = Gaussian1DKernel(kernelwidth)\n fconv = convolve(f, kernel, boundary=boundary)\n return fconv", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, h, w, c = images.shape\n KernelHeight, kernelWidth, c = kernel.shape\n StrideHeight, StrideWidth = stride\n\n if padding == 'valid':\n PaddingHeight = 0\n PaddingWidth = 0\n elif padding == 'same':\n PaddingHeight = int(\n (((h - 1) * StrideHeight + KernelHeight - h) / 2) + 1)\n PaddingWidth = int((((w - 1) * StrideWidth + kernelWidth - w) / 2) + 1)\n else:\n PaddingHeight, PaddingWidth = padding\n\n OutputH = int(((h + 2 * PaddingHeight - KernelHeight) / StrideHeight) + 1)\n OutputW = int(((w + 2 * PaddingWidth - kernelWidth) / StrideWidth) + 1)\n\n ImagePadded = np.pad(\n images,\n ((0, 0), (PaddingHeight, PaddingHeight),\n (PaddingWidth, PaddingWidth), (0, 0)),\n 'constant'\n )\n\n output = np.zeros((m, OutputH, OutputW))\n ImageRange = np.arange(m)\n\n for i_OutputH in range(OutputH):\n for i_OutputW in range(OutputW):\n s_i_OutputH = i_OutputH * StrideHeight\n s_i_OutputW = i_OutputW * StrideWidth\n flt = ImagePadded[ImageRange,\n s_i_OutputH:KernelHeight + s_i_OutputH,\n s_i_OutputW:kernelWidth + s_i_OutputW,\n :]\n output[ImageRange, i_OutputH, i_OutputW] = np.sum(\n flt * kernel, axis=(1, 2, 3))\n return output", "def convert_padding(g, op, block):\n\n input_x = g.get_node(op.input(\"X\")[0])\n input_padding = op.input(\"Paddings\")\n if input_padding:\n padding = g.get_node(input_padding[0])\n padding = infer_value(padding, g.get_params()).numpy().tolist()\n else:\n padding = op.attr(\"paddings\")\n padding = op.attr(\"paddings\")\n value = op.attr(\"value\")\n data_format = op.attr(\"data_format\")\n mode = op.attr(\"mode\")\n assert mode != \"circular\", \"Don't support mod='circular' for PaddlePaddle's padding\"\n if mode == \"replicate\":\n mode = \"edge\"\n\n pad_len = len(padding)\n new_paddings = [0] * (pad_len + 4)\n for i in range(0, pad_len, 2):\n index = -1 - i\n if data_format[:2] != \"NC\":\n index = -3 - i\n new_paddings[index] = padding[i + 1]\n new_paddings[index - 1] = padding[i]\n\n new_paddings = [new_paddings[i : i + 2] for i in range(0, len(new_paddings), 2)]\n\n out = _op.nn.pad(input_x, new_paddings, pad_value=value, pad_mode=mode)\n g.add_node(op.output(\"Out\")[0], out)", "def test_on_conv_transpose_2d_dilation_padding_same(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(1, 1, 2, dilations=2, padding=objax.ConvPadding.SAME)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 1, 0)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[6., 17., 20., 14.],\n [13., 46., 48., 34.],\n [32., 82., 92., 58.],\n [30., 69., 76., 44.]]]])\n self.assertEqual(features.shape, (1, 1, 4, 4))\n self.assertTrue(jn.array_equal(features, expected_features))", "def get_same_padding_conv2d(image_size=None):\n if image_size is None:\n return Conv2dDynamicSamePadding\n else:\n return partial(Conv2dStaticSamePadding, image_size=image_size)", "def get_same_padding_conv2d(image_size=None):\n if image_size is None:\n return Conv2dDynamicSamePadding\n else:\n return partial(Conv2dStaticSamePadding, image_size=image_size)", "def fixed_padding(inputs, kernel_size, data_format='channels_last'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n return _padding(inputs, (pad_beg, pad_end), data_format)", "def fixed_padding(inputs, kernel_size, data_format='channels_first'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def conv1d_broadcast(x, kernel_length, padding, stride):\n batch_size = tf.shape(x)[0]\n input_channels = x.shape[2].value\n\n # Temporarily combine the (batch_size, input_channels) dims while\n # applying the convolution. Introduce a dummy channels dimension instead.\n squeezed = tf.transpose(x, perm=[0, 2, 1])\n squeezed = tf.reshape(squeezed, shape=(\n [batch_size * input_channels] +\n x.shape[1:2].as_list() +\n [1]))\n\n # Convolve each elementary (i.e. one-hot) filter with x.\n diagonal_kernel = tf.reshape(\n tf.eye(kernel_length, dtype=x.dtype),\n shape=[kernel_length, 1, kernel_length])\n conv = tf.nn.conv1d(\n squeezed, diagonal_kernel,\n padding=padding, stride=stride)\n\n # The resulting convolution has shape (batch_size*input_channels,\n # output_length, kernel_length).\n # Move input_channels back to the last dimension.\n result = tf.reshape(conv, shape=(\n [batch_size, input_channels] +\n conv.shape[1:2].as_list() +\n [kernel_length]))\n result = tf.transpose(result, perm=[0, 2, 3, 1])\n\n # Insert output_channels dimension.\n return tf.expand_dims(result, 2)", "def conv2d_output_shape(input_shape, filter_shape, stride, padding):\n filter_shape = tf.TensorShape(filter_shape).as_list()\n filter_out = filter_shape[-1]\n filter_patch_shape = np.array(filter_shape[0:2])\n input_shape_list = tf.TensorShape(input_shape).as_list()\n batch = input_shape_list[:-3]\n input_shape = np.array(input_shape_list[-3:])\n stride = np.array(stride)\n if padding == 'VALID':\n shift = -filter_patch_shape + 1\n elif padding == 'SAME':\n shift = 0\n else:\n raise ValueError('padding must be either \"VALID\" or \"SAME\", but \"%s\" was given' % padding)\n output_shape = np.ceil((input_shape[:2] + shift) / stride[1:3])\n return batch + output_shape.astype(np.int).tolist() + [filter_out]", "def convolution(x, kernel, padding, strides):\n # Temporarily combine the classes/batch dimensions while convolving.\n num_classes = x.shape[0].value\n batch_size = tf.shape(x)[1]\n x_squeezed = tf.reshape(x, shape=([num_classes * batch_size] +\n x.shape[2:].as_list()))\n if len(kernel.shape) == 4:\n y = tf.nn.convolution(x_squeezed, kernel, padding=padding, strides=strides)\n elif len(kernel.shape) == 3:\n y = tf.nn.conv1d(x_squeezed, kernel, padding=padding, stride=strides[0])\n else:\n raise ValueError()\n return tf.reshape(y, shape=([num_classes, batch_size] +\n y.shape[1:].as_list()))", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def pad_model():\n\n inputs = tf.keras.Input(shape=(10, 10, 3,))\n x = tf.keras.layers.Conv2D(16, (1, 1))(inputs)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]))\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [1, 1]]))\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]), constant_values=2)\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]), mode='SYMMETRIC')\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"pad_model\")(x)\n return outputs", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def conv(self, inputs, filters, kernel_size, strides, padding='SAME', name='conv_layer'):\n input_channels = inputs[-1]\n kernel = tf.Variable(tf.random.truncated_normal(shape=[kernel_size, kernel_size, input_channels, filters]),\n dtype=tf.float32, name='kernel')\n bias = tf.Variable(tf.zeros(shape=[filters]), name='bias')\n conv = tf.nn.conv2d(inputs, filter=kernel,\n strides=[1, strides, strides, 1],\n padding=padding, name='conv')\n out = tf.nn.relu(conv + bias, name='relu')\n return out", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\r\n conv_block = []\r\n p = 0\r\n if padding_type == 'reflect':\r\n conv_block += [nn.ReflectionPad2d(1)]\r\n elif padding_type == 'replicate':\r\n conv_block += [nn.ReplicationPad2d(1)]\r\n elif padding_type == 'zero':\r\n p = 1\r\n else:\r\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\r\n\r\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\r\n if use_dropout:\r\n conv_block += [nn.Dropout(0.5)]\r\n\r\n p = 0\r\n if padding_type == 'reflect':\r\n conv_block += [nn.ReflectionPad2d(1)]\r\n elif padding_type == 'replicate':\r\n conv_block += [nn.ReplicationPad2d(1)]\r\n elif padding_type == 'zero':\r\n p = 1\r\n else:\r\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\r\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\r\n\r\n return nn.Sequential(*conv_block)", "def convolution(image: np.array, kernel: np.array) -> np.array:\n\n # default condition: apply SAME padding, and keep stride at 1\n stride_x = 1\n stride_y = 1\n padding_y = int(len(kernel - 1) / 2)\n padding_x = int(len((kernel[0]) - 1) / 2)\n # create the return array with with the same dimensions as <image>,\n # and then create a padded image\n convolved_image = np.zeros((len(image), len(image[0])))\n padded_image = np.zeros((len(image) + 2 * padding_y,\n len(image[0]) + 2 * padding_x))\n padded_image[padding_x: -padding_x, padding_y: -padding_y] = image\n\n for py in range(0, len(padded_image) - len(kernel), stride_y):\n for px in range(0, len(padded_image[0]) - len(kernel[0]), stride_x):\n # scan the matrix over columns in image array, then shift the matrix\n # down, and repeat\n padded_image_section = padded_image[py: py + len(kernel[0]),\n px: px + len(kernel)]\n # print(padded_image_section)\n convolved_image[py, px] = int(np.tensordot(padded_image_section,\n kernel))\n\n return convolved_image", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(tensor=inputs,\n paddings=[[0, 0], [0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(tensor=inputs,\n paddings=[[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n if last:\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]\n else:\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def add_conv_type2(model, depth):\n model.add(Convolution2D(depth, 3, 3, subsample=(1, 1)))", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def conv2d_fixed_padding(inputs,\n filters,\n kernel_size,\n strides,\n pruning_method='baseline',\n init_method='baseline',\n data_format='channels_first',\n end_sparsity=0.,\n weight_decay=0.,\n init_scale=1.0,\n name=None):\n if strides > 1:\n inputs = fixed_padding(\n inputs, kernel_size, data_format=data_format)\n padding = 'SAME' if strides == 1 else 'VALID'\n\n kernel_initializer = tf.variance_scaling_initializer(scale=init_scale)\n kernel_initializer = _pick_initializer(kernel_initializer, init_method,\n pruning_method, end_sparsity)\n\n kernel_regularizer = contrib_layers.l2_regularizer(weight_decay)\n return sparse_conv2d(\n x=inputs,\n units=filters,\n activation=None,\n kernel_size=[kernel_size, kernel_size],\n use_bias=False,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_initializer=None,\n biases_regularizer=None,\n sparsity_technique=pruning_method,\n normalizer_fn=None,\n strides=[strides, strides],\n padding=padding,\n data_format=data_format,\n name=name)", "def conv(self, input_layer, num_filters, filter_size,\n filter_strides=(1,1), padding='SAME',\n activation=None, use_batch_norm=None):\n num_inputs = input_layer.get_shape().as_list()[1]\n kernel_shape = [filter_size[0], filter_size[1],\n num_inputs, num_filters]\n strides = [1, 1, filter_strides[0], filter_strides[1]]\n with tf.variable_scope(self._count_layer('conv')) as scope:\n kernel = self._get_variable('weights', kernel_shape,\n input_layer.dtype)\n if padding == 'SAME_RESNET': # ResNet models require custom padding\n kh, kw = filter_size\n rate = 1\n kernel_size_effective = kh + (kw - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padding = [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]]\n input_layer = tf.pad(input_layer, padding)\n padding = 'VALID'\n x = tf.nn.conv2d(input_layer, kernel, strides,\n padding=padding, data_format='NCHW')\n x = self._bias_or_batch_norm(x, scope, use_batch_norm)\n x = self.activate(x, activation)\n return x", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, image_h, image_w, image_c = images.shape\n kernel_h, kernel_w, kernel_c = kernel.shape\n stride_h, stride_w = stride\n\n if isinstance(padding, tuple):\n padding_h, padding_w = padding\n if padding is 'same':\n padding_h = int(((stride_h * image_h)\n - stride_h + kernel_h - image_h) / 2) + 1\n padding_w = int(((stride_w * image_w)\n - stride_w + kernel_w - image_w) / 2) + 1\n if padding is 'valid':\n padding_h, padding_w = 0, 0\n\n output_h = int(((image_h + (2 * padding_h) - kernel_h) / stride_h) + 1)\n output_w = int(((image_w + (2 * padding_w) - kernel_w) / stride_w) + 1)\n conv_output = np.zeros((m, output_h, output_w))\n\n img_m = np.arange(0, m)\n\n images = np.pad(\n images,\n [(0, 0), (padding_h, padding_h), (padding_w, padding_w), (0, 0)],\n mode='constant',\n constant_values=0)\n\n for i in range(output_h):\n for j in range(output_w):\n s_h = (stride_h)\n s_w = (stride_w)\n multiply = images[\n img_m,\n i*s_h:kernel_h+i*s_h,\n j*s_w:kernel_w+j*s_w]\n conv_output[img_m, i, j] = np.sum(\n np.multiply(multiply, kernel), axis=(1, 2, 3))\n return conv_output", "def conv2d(X,W,b,strides=1):\n \"\"\"\n If the padding = 'SAME', the input and output images are of the same size by implementing\n zero padding on the input. (TF will compute using the padding equation from notes 4-12-2018) \n If the padding = 'VALID', the input is not padded and the output image size will be less \n than the input image.\n \"\"\"\n net = tf.nn.conv2d(X,W,strides=[1,strides,strides,1],padding='SAME')\n net = tf.nn.bias_add(net,b) #add bias to each convolved value, but all get the same bias value\n return tf.nn.relu(net) #return the output of the detection layer", "def calc_conv_out_dims(X_shape, W_shape, stride=1, pad=0, dilation=0):\n\tdummy = np.zeros(X_shape)\n\ts, p, d = stride, pad, dilation\n\tif len(X_shape) == 3:\n\t\t_, p = pad1D(dummy, p)\n\t\tpw1, pw2 = p\n\t\tfw, in_ch, out_ch = W_shape\n\t\tn_ex, in_length, in_ch = X_shape\n\n\t\t_fw = fw * (d+1) - d\n\t\tout_length = (in_length + pw1 + pw2 - _fw) // s + 1\n\t\tout_dim = (n_ex, out_length, out_ch)\n\n\telif len(X_shape) == 4:\n\t\t_, p = pad2D(dummy, p)\n\t\tpr1, pr2, pc1, pc2 = p\n\t\tfr, fc, in_ch, out_ch = W_shape\n\t\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t\t# adjust effective filter size to account for dilation\n\t\t_fr, _fc = fr * (d+1) - d, fc * (d+1) - d\n\t\tout_rows = (in_rows + pr1 + pr2 - _fr) // s + 1\n\t\tout_cols = (in_cols + pc1 + pc2 - _fc) // s + 1\n\t\tout_dims = (n_ex, out_rows, out_cols, out_ch)\n\telse:\n\t\traise ValueError(\"unrecognized number of the input dims: {}\".format(len(X_shape)))", "def convolution_internal(\n input, # pylint: disable=redefined-builtin\n filters,\n strides=None,\n padding=\"VALID\",\n data_format=None,\n dilations=None,\n name=None,\n call_from_convolution=True,\n num_spatial_dims=None):\n if (not isinstance(filters, variables_lib.Variable) and\n not tensor_util.is_tf_type(filters)):\n with ops.name_scope(\"convolution_internal\", None, [filters, input]):\n filters = ops.convert_to_tensor(filters, name='filters')\n if (not isinstance(input, tensor_lib.Tensor) and not tensor_util.is_tf_type(\n input)):\n with ops.name_scope(\"convolution_internal\", None, [filters, input]):\n input = ops.convert_to_tensor(input, name=\"input\")\n\n filters_rank = filters.shape.rank\n inputs_rank = input.shape.rank\n if num_spatial_dims is None:\n if filters_rank:\n num_spatial_dims = filters_rank - 2\n elif inputs_rank:\n num_spatial_dims = inputs_rank - 2\n else:\n raise ValueError(\n \"When `num_spatial_dims` is not set, one of `input.shape.rank` or \"\n \"`filters.shape.rank` must be known. \"\n f\"Received: input.shape={input.shape} of rank {inputs_rank} and \"\n f\"filters.shape={filters.shape} of rank {filters_rank}\")\n elif filters_rank and filters_rank - 2 != num_spatial_dims:\n raise ValueError(\n \"`filters.shape.rank - 2` should equal `num_spatial_dims`. Received: \"\n f\"filters.shape={filters.shape} of rank {filters_rank} and \"\n f\"num_spatial_dims={num_spatial_dims}\")\n\n if inputs_rank:\n num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension.\n else:\n num_batch_dims = 1 # By default, assume single batch dimension.\n\n if num_spatial_dims not in {1, 2, 3}:\n raise ValueError(\n \"`num_spatial_dims` must be 1, 2, or 3. \"\n f\"Received: num_spatial_dims={num_spatial_dims}.\")\n\n if data_format is None or data_format in _CHANNELS_LAST_FORMATS:\n channel_index = num_batch_dims + num_spatial_dims\n else:\n channel_index = num_batch_dims\n\n if dilations is None:\n dilations = _get_sequence(dilations, num_spatial_dims, channel_index,\n \"dilations\")\n is_dilated_conv = False\n else:\n dilations = _get_sequence(dilations, num_spatial_dims, channel_index,\n \"dilations\")\n is_dilated_conv = any(i != 1 for i in dilations)\n\n strides = _get_sequence(strides, num_spatial_dims, channel_index, \"strides\")\n has_tpu_context = device_context.enclosing_tpu_context() is not None\n\n if name:\n default_name = None\n elif not has_tpu_context or call_from_convolution:\n default_name = \"convolution\"\n elif num_spatial_dims == 2: # Most common case.\n default_name = \"Conv2D\"\n elif num_spatial_dims == 3:\n default_name = \"Conv3D\"\n else:\n default_name = \"conv1d\"\n\n with ops.name_scope(name, default_name, [input, filters]) as name:\n # Fast path for TPU or if no dilation, as gradient only supported on TPU\n # for dilations.\n if not is_dilated_conv or has_tpu_context:\n if num_spatial_dims == 2: # Most common case.\n op = _conv2d_expanded_batch\n elif num_spatial_dims == 3:\n op = _conv3d_expanded_batch\n else:\n op = conv1d\n\n return op(\n input,\n filters,\n strides,\n padding=padding,\n data_format=data_format,\n dilations=dilations,\n name=name)\n else:\n if channel_index == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n\n op = Convolution(\n tensor_shape.as_shape(input.shape),\n tensor_shape.as_shape(filters.shape),\n padding,\n strides=strides,\n dilation_rate=dilations,\n name=name,\n data_format=data_format,\n num_spatial_dims=num_spatial_dims)\n return op(input, filters)", "def deconv_output_length(input_length,\n filter_size,\n padding,\n output_padding=None,\n stride=0,\n dilation=1):\n assert padding in {'same', 'valid', 'full'}\n if input_length is None:\n return None\n\n # Get the dilated kernel size\n filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n\n # Infer length if output padding is None, else compute the exact length\n if output_padding is None:\n if padding == 'valid':\n length = input_length * stride + max(filter_size - stride, 0)\n elif padding == 'full':\n length = input_length * stride - (stride + filter_size - 2)\n elif padding == 'same':\n length = input_length * stride\n\n else:\n if padding == 'same':\n pad = filter_size // 2\n elif padding == 'valid':\n pad = 0\n elif padding == 'full':\n pad = filter_size - 1\n\n length = ((input_length - 1) * stride + filter_size - 2 * pad +\n output_padding)\n return length", "def __calc_padding(self, input_shape, kernel_size, stride=1):\n # default of pytorch for input_size = (C_in, H_in, W_in)\n if len(input_shape) == 3:\n if stride != (1,1):\n raise ValueError(\"calc padding only works for stride=(1,1)\")\n padding = (0,0)\n if kernel_size[0]%2 == 0 or kernel_size[1]%2 == 0:\n raise ValueError(\"the kernel size: {} is incompatible with CnnHighway. With this kernel, the conv output shape will not equal the input shape\".format(kernel_size))\n padding_height = int((kernel_size[0] - 1)/2)\n padding_width = int((kernel_size[1] - 1)/2)\n return (padding_height, padding_width)\n if len(input_shape) == 2:\n if stride != 1:\n raise ValueError(\"calc padding only works for stride=(1)\")\n padding = int((kernel_size -1)/2)\n return padding", "def conv_broadcast(x, kernel_shape, padding, strides):\n if len(kernel_shape) == 2:\n return conv2d_broadcast(x, kernel_shape[0], kernel_shape[1],\n padding, strides)\n elif len(kernel_shape) == 1:\n return conv1d_broadcast(x, kernel_shape[0], padding, strides[0])\n else:\n raise ValueError()", "def calc_pad_dims_1D(X_shape, l_out, kernel_width, stride, dilation=0, causal=False):\n\tif not isinstance(X_shape, tuple):\n\t\traise ValueError(\"X_shape must be type tuple\")\n\n\tif not isinstance(l_out, int):\n\t\traise ValueError(\"l_out must be type int\")\n\n\tif not isinstance(kernel_width, int):\n\t\traise ValueError(\"kernel_width must be type int\")\n\n\tif not isinstance(stride, int):\n\t\traise ValueError(\"stride must be type int\")\n\n\td = dilation\n\tfw = kernel_width\n\tn_ex, l_in, in_ch = X_shape\n\n\t# update effective filter shape based on dilation factor\n\t_fw = fw * (d + 1) - d\n\ttotal_pad = int((stride * (l_out - 1) + _fw - l_in))\n\n\tif not causal:\n\t\tpw = total_pad // 2\n\t\tl_out1 = int(1 + (l_in + 2 * pw - _fw) / stride)\n\n\t\t# add asymmetric padding pixels to right / bottom\n\t\tpw1, pw2 = pw, pw\n\t\tif l_out1 == l_out - 1:\n\t\t\tpw1, pw2 = pw, pw + 1\n\t\telif l_out1 != l_out:\n\t\t\traise AssertionError\n\n\tif causal:\n\t\t# if this is a causal convolution, only pad the left side of \n\t\t# the sequence\n\t\tpw1, pw2 = total_pad, 0\n\t\tl_out1 = int(1 + (l_in + total_pad - _fw) / stride)\n\t\tassert l_out1 == l_out\n\n\tif any(np.array([pw1, pw2]) < 0):\n\t\traise ValueError(\"padding cannot be less than 0, Got: {}\".\\\n\t\t\tformat((pw1, pw2)))\n\treturn (pw1, pw2)", "def conv(dims, inplanes, outplanes, kernel_size, stride, dilation, bias):\n padding = math.floor((dilation * (kernel_size - 1) + 2 - stride) / 2)\n if dims == 2:\n return nn.Conv2d(inplanes, outplanes, kernel_size, stride,\n padding, dilation, bias=bias)\n elif dims == 3:\n return nn.Conv3d(inplanes, outplanes, kernel_size, stride,\n padding, dilation, bias=bias)\n else:\n raise ValueError('dimension of conv must be 2 or 3')", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n \n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n \n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n \n if type(stride) is not tuple:\n stride = (stride, stride)\n \n if type(pad) is not tuple:\n pad = (pad, pad)\n \n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1)// stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1)// stride[1] + 1\n \n return h, w", "def convolution_shape(input_shape, n_filters, filter_shape, stride, padding):\n img_height, img_width, _ = input_shape\n height = (img_height + 2 * padding[0] - filter_shape[0]) / float(stride) + 1\n width = (img_width + 2 * padding[1] - filter_shape[1]) / float(stride) + 1\n\n return int(height), int(width), n_filters", "def test_on_conv_transpose_2d_dilation_padding_valid(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(1, 1, 2, dilations=2, padding=objax.ConvPadding.VALID)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 1, 0)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[2., 1., 7., 6., 6., 8.],\n [5., 6., 17., 20., 14., 16.],\n [15., 13., 46., 48., 34., 40.],\n [28., 32., 82., 92., 58., 64.],\n [27., 30., 69., 76., 44., 48.],\n [39., 42., 97., 104., 60., 64.]]]])\n self.assertEqual(features.shape, (1, 1, 6, 6))\n self.assertTrue(jn.array_equal(features, expected_features))", "def spatial_reflection_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format ' + str(data_format))\n\n if data_format == 'channels_first':\n pattern = [[0, 0],\n [0, 0],\n list(padding[0]),\n list(padding[1])]\n else:\n pattern = [[0, 0],\n list(padding[0]), list(padding[1]),\n [0, 0]]\n return tf.pad(x, pattern, \"REFLECT\")", "def forward(self, input, padding=0):\n return self.conv(input, weight=self.weight, groups=self.groups, padding=padding)", "def reflection_pad(images, filter_size):\n num = filter_size // 2\n return tf.pad(images, [[0, 0], [num, num], [num, num], [0, 0]], mode='REFLECT')", "def spatial_reflection_2d_padding(x, padding=((1, 1), (1, 1)),\n data_format=None):\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format ' + str(data_format))\n\n if data_format == 'channels_first':\n pattern = [[0, 0],\n [0, 0],\n list(padding[0]),\n list(padding[1])]\n else:\n pattern = [[0, 0],\n list(padding[0]), list(padding[1]),\n [0, 0]]\n return tf.pad(x, pattern, \"REFLECT\")", "def conv2d_fixed_padding(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=1,\n use_xavier=True,\n use_bias=True,\n data_format=None):\n # The padding is consistent and is based only on `kernel_size`, not on the\n # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n # ex) In case kernel_size=3, stride=[2,2] , it is impossible to keep same size as input neither padding='VALID', 'SAME'\n if data_format is None:\n data_format = _DATA_FORMAT\n if stride > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format)\n\n padding= 'SAME' if stride == 1 else 'VALID'\n return conv2d(inputs, num_output_channels,\n kernel_size, scope,\n stride=stride,\n padding=padding,\n use_xavier=use_xavier,\n use_bias=use_bias,\n data_format=data_format\n )", "def Get_Convolution(label, radius, feature_dict, pad = True, convert_length = 0.2204315, verbose = False, \n path = '', filename = '', meta = None):\n ## Make convolution at specified radius\n r = round(radius / convert_length)\n num_class = len(feature_dict)\n ## Create circular filter window\n x = np.arange(0, 2*r)\n y = np.arange(0, 2*r)\n mask = (x[np.newaxis,:]-r)**2 + (y[:,np.newaxis]-r)**2 < r**2 \n mask = mask[:,:,np.newaxis, np.newaxis]\n mask_tensor = tf.constant(mask, tf.float32)\n\n expanded_label = Expand_Mask(label, feature_dict)\n lab_shape = expanded_label.shape\n all_lab = np.zeros((lab_shape[0] - mask.shape[0] + 1, lab_shape[1] - mask.shape[1] + 1, num_class))\n for val in range(num_class): \n ohe_layer = expanded_label[:,:,val]\n ohe_tensor = tf.constant(ohe_layer[np.newaxis, :, :, np.newaxis], tf.float32)\n tensor_res = tf.nn.convolution(ohe_tensor, mask_tensor, padding='VALID') \n all_lab[:,:,val] = tensor_res.numpy()[0,:,:,0]\n if verbose:\n print('Finished: ' + str(val))\n \n if pad:\n array_shape = label.shape\n # up-down padding\n tot_pw_ud = (array_shape[0] - all_lab.shape[0])/2\n pw_up = int(np.ceil(tot_pw_ud))\n pw_down = int(np.floor(tot_pw_ud))\n # left-right padding\n tot_pw_lr = (array_shape[1] - all_lab.shape[1])/2\n pw_left = int(np.ceil(tot_pw_lr))\n pw_right = int(np.floor(tot_pw_lr))\n all_lab_pad = np.pad(all_lab, pad_width = ((pw_down, pw_up), (pw_left, pw_right), (0,0)), \n mode = 'constant', constant_values = 255)\n \n if filename !='':\n try:\n if path == '':\n path = 'Predictions'\n os.makedirs(path)\n except OSError as error: \n print('') \n \n meta.update(count = num_class, nodata = 255, compress = 'deflate', predictor = 2)\n \n # Write raster label to file\n tif_lab_pad = np.moveaxis(all_lab_pad,-1,0)\n with rasterio.open(path + '/' + filename + '.tif', 'w', **meta) as src:\n src.write(tif_lab_pad) \n return all_lab_pad", "def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs", "def pad_image(input_img, window_size, padding_mode='symmetric'):\n assert np.isscalar(window_size)\n assert window_size % 2 == 1\n\n # Padding width must be window_size-1 and divided by 2. So that we can check every pixels\n pad_width = int((window_size-1)/2)\n # For each padding_mode, pad differently\n\n # But in result, I chose symmetric cause it seems to have smallest aepe\n if padding_mode == 'symmetric':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n elif padding_mode == 'reflect':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n elif padding_mode == 'constant':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n\n return padded_img", "def conv_pad(x, ks, mode):\n\tpad = (int(np.floor((ks-1)/2)), int(np.ceil((ks-1)/2)))\n\treturn F.pad(x, (*pad, *pad), mode=mode)", "def fold(input_f, input_shape, kernel_size=(3,3), stride=(1,1), padding=(0,0)):\n batch_size, in_chan, in_height, in_width = input_shape\n \n height_padded, width_padded = in_height + 2*padding[0], in_width + 2*padding[1]\n input_padded = np.zeros((batch_size, in_chan, height_padded, width_padded), dtype=input_f.dtype)\n \n k, i, j = get_indices(input_shape, kernel_size, stride, padding)\n \n input = input_f.reshape(in_chan*kernel_size[0]*kernel_size[1], -1, batch_size)\n input = input.transpose((2, 0, 1))\n np.add.at(input_padded, (slice(None), k, i, j), input)\n if padding == (0,0):\n return input_padded\n return input_padded[:, :, padding[0]: -padding[0], padding[1]: -padding[1]]", "def add_conv2D(\n self, input_name, filter_name, strides, padding=\"SAME\", data_format=\"NHWC\", name=None):\n attr = {}\n attr['strides'] = strides\n attr['padding'] = padding\n attr['data_format'] = data_format\n attr['dilations'] = [1, 1, 1, 1]\n\n return self._build_op('Conv2D', [input_name, filter_name], attr=attr, name=name)", "def hook_factory_tf_padding_same(kernel_size, stride):\n def hook(module, input):\n \"\"\"The hook overwrites the padding attribute of the padding layer.\"\"\"\n image_dimensions = input[0].size()[-2:]\n module.padding = _determine_padding_from_tf_same(image_dimensions, kernel_size, stride)\n return hook", "def temporal_padding(x, padding=(1, 1)):\n assert len(padding) == 2\n pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]\n return tf.pad(x, pattern)", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = floor(((h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def fixed_padding_2d3d(self, inputs, kernel_size):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if self.data_format == 'channels_first':\n if len(inputs.shape)==4:\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n elif len(inputs.shape)==5:\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n if len(inputs.shape)==4:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n elif len(inputs.shape)==5:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n return padded_inputs", "def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):\n if stride == 1:\n return Conv2D(filters,\n (kernel_size, kernel_size),\n strides=(stride, stride),\n padding='same', use_bias=False,\n dilation_rate=(rate, rate)\n )(x)\n else:\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n x = ZeroPadding2D((pad_beg, pad_end))(x)\n return Conv2D(filters,\n (kernel_size, kernel_size),\n strides=(stride, stride),\n padding='valid', use_bias=False,\n dilation_rate=(rate, rate)\n )(x)", "def conv_block(input_tensor, kernel_size, filters, strides=(2, 2)):\n\n filters1, filters2, filters3 = filters\n\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n x = layers.Conv2D(filters1, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size, strides=strides, padding='same',\n use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n\n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides, use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n shortcut = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(shortcut)\n\n x = layers.add([x, shortcut])\n x = layers.Activation('relu')(x)\n return x", "def output_shape_conv_and_pool_layer(rows: int,\n columns: int,\n kernel: int,\n stride: int = 1,\n padding: int = 0,\n dilatation: float = 1.) -> Tuple[int, int]:\n return (\n int((rows + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n int((columns + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n )", "def convolve(images, kernels, padding='same', stride=(1, 1)):\n m, h, w = images.shape[:3]\n kh, kw, c, nc = kernels.shape\n sh, sw = stride\n if type(padding) is tuple:\n ph, pw = padding\n elif padding == 'valid':\n ph, pw = 0, 0\n else:\n ph = (((h - 1) * sh + kh - h) // 2) + 1\n pw = (((w - 1) * sw + kw - w) // 2) + 1\n out_images = np.zeros((m, (h - kh + (2 * ph))//sh + 1,\n (w - kw + (2 * pw))//sw + 1, nc))\n images = np.pad(images, ((0, 0), (ph, ph), (pw, pw), (0, 0)), 'constant')\n for i in range((h - kh + (2 * ph))//sh + 1):\n for j in range((w - kw + (2 * pw))//sw + 1):\n for n in range(nc):\n out_images[:, i, j, n] = np.sum(kernels[:, :, :, n] * images[\n :, i*sh: i*sh + kh, j*sw: j*sw + kw, :], axis=(1, 2, 3))\n return out_images", "def conv2d(input, filters, image_shape=None, filter_shape=None,\r\n border_mode='valid', subsample=(1, 1), **kargs):\r\n\r\n #accept Constant value for image_shape and filter_shape.\r\n if image_shape is not None:\r\n image_shape = list(image_shape)\r\n for i in xrange(len(image_shape)):\r\n if image_shape[i] is not None:\r\n try:\r\n image_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(image_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the image_shape parameter\" %\r\n image_shape[i])\r\n assert str(image_shape[i].dtype).startswith('int')\r\n image_shape[i] = int(image_shape[i])\r\n if filter_shape is not None:\r\n filter_shape = list(filter_shape)\r\n for i in xrange(len(filter_shape)):\r\n if filter_shape[i] is not None:\r\n try:\r\n filter_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(filter_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the filter_shape \"\r\n \"parameter\" % filter_shape[i])\r\n assert str(filter_shape[i].dtype).startswith('int')\r\n filter_shape[i] = int(filter_shape[i])\r\n\r\n if image_shape and filter_shape:\r\n try:\r\n assert image_shape[1] == filter_shape[1]\r\n except Exception:\r\n print 'image ', image_shape, ' filters ', filter_shape\r\n raise\r\n\r\n if filter_shape is not None:\r\n nkern = filter_shape[0]\r\n kshp = filter_shape[2:]\r\n else:\r\n nkern, kshp = None, None\r\n\r\n if image_shape is not None:\r\n bsize = image_shape[0]\r\n imshp = image_shape[1:]\r\n else:\r\n bsize, imshp = None, None\r\n\r\n op = ConvOp(output_mode=border_mode, dx=subsample[0], dy=subsample[1],\r\n imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)\r\n\r\n return op(input, filters)", "def add_conv_type1(model, depth, input_shape=None):\n if input_shape is not None:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n input_shape=input_shape))\n else:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n activation='relu', W_regularizer=l2(0.05)))", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if is_NHWC(data_format):\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n return padded_inputs", "def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride, dilation=0):\n\tif not isinstance(X_shape, tuple):\n\t\traise ValueError(\"X_shape must be of type tuple\")\n\n\tif not isinstance(out_dim, tuple):\n\t\traise ValueError(\"out_dim must be of type tuple\")\n\n\tif not isinstance(kernel_shape, tuple):\n\t\traise ValueError(\"kernel_shape must be of type tuple\")\n\n\tif not isinstance(stride, int):\n\t\traise ValueError(\"stride must be of type int\")\n\n\td = dilation\n\tfr, fc = kernel_shape\n\tout_rows, out_cols = out_dim\n\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t# update effective filter shape based on dilaltion factor\n\t_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d\n\tpr = int((stride * (out_rows - 1) + _fr - in_rows) / 2)\n\tpc = int((stride * (out_cols - 1) + _fc - in_cols) / 2)\n\tout_rows1 = int(1 + (in_rows + 2 * pr - _fr) / stride)\n\tout_cols1 = int(1 + (in_cols + 2 * pc - _fc) / stride)\n\n\t# add asymmetric padding pixels to right/bottom\n\tpr1, pr2 = pr, pr\n\tif out_rows1 == out_rows - 1:\n\t\tpr1, pr2 = pr, pr+1\n\telif out_rows1 != out_rows:\n\t\traise AssertionError\n\n\tif any(np.array([pr1, pr2, pc1, pc2]) < 0):\n\t\traise ValueError(\n\t\t\t\"padding cannot be less than 0. Get: {}\".format((pr1, pr2, pc1, pc2))\n\t\t\t)\n\treturn (pr1, pr2, pc1, pc2)", "def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride, dilation=0):\n if not isinstance(X_shape, tuple):\n raise ValueError(\"`X_shape` must be of type tuple\")\n\n if not isinstance(out_dim, tuple):\n raise ValueError(\"`out_dim` must be of type tuple\")\n\n if not isinstance(kernel_shape, tuple):\n raise ValueError(\"`kernel_shape` must be of type tuple\")\n\n if not isinstance(stride, int):\n raise ValueError(\"`stride` must be of type int\")\n\n d = dilation\n fr, fc = kernel_shape\n out_rows, out_cols = out_dim\n n_ex, in_rows, in_cols, in_ch = X_shape\n\n # update effective filter shape based on dilation factor\n _fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d\n\n pr = int((stride * (out_rows - 1) + _fr - in_rows) / 2)\n pc = int((stride * (out_cols - 1) + _fc - in_cols) / 2)\n\n out_rows1 = int(1 + (in_rows + 2 * pr - _fr) / stride)\n out_cols1 = int(1 + (in_cols + 2 * pc - _fc) / stride)\n\n # add asymmetric padding pixels to right / bottom\n pr1, pr2 = pr, pr\n if out_rows1 == out_rows - 1:\n pr1, pr2 = pr, pr + 1\n elif out_rows1 != out_rows:\n raise AssertionError\n\n pc1, pc2 = pc, pc\n if out_cols1 == out_cols - 1:\n pc1, pc2 = pc, pc + 1\n elif out_cols1 != out_cols:\n raise AssertionError\n\n if any(np.array([pr1, pr2, pc1, pc2]) < 0):\n raise ValueError(\n \"Padding cannot be less than 0. Got: {}\".format((pr1, pr2, pc1, pc2))\n )\n return (pr1, pr2, pc1, pc2)", "def test_on_conv_transpose_2d_padding(self):\n x = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n y = jn.array([[[[2., 5., 5., 10.], [11., 27., 32., 46.], [24., 66., 76., 86.], [40., 106., 116., 126.]]]])\n w_init = lambda s: jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1))\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=objax.ConvPadding.SAME, w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='same', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='Same', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='SAME', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=(1, 0), w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=[(1, 0), (1, 0)], w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n y = [[[[2., 5., 5., 10., 8.], [11., 27., 32., 46., 32.], [24., 66., 76., 86., 56.],\n [40., 106., 116., 126., 80.], [39., 94., 101., 108., 64.]]]]\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=1, w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=(1, 1), w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=[(1, 1), (1, 1)], w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)", "def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):\n conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding=\"same\")\n output = conv_layer(conv1d_placeholder)\n output_width = output.axes.find_by_name(\"W\")[0].length\n assert output_width == np.ceil(width / float(stride)), (\"Same convolution output width != \"\n \"ceil(input_width / stride): {} != \"\n \"ceil({} / {})\").format(output_width,\n width,\n stride)", "def out_conv(spatial, config):\n p, k, s = [config[k] \n for k in ['padding', 'kernel_size', 'stride']]\n p2 = p if isinstance(p, int) else p[0] + p[1]\n\n return (spatial + p2 - k)//s + 1", "def schedule_conv2d_NCHWc(num_filter, kernel_size, stride, padding, outs):\n s = tvm.create_schedule([x.op for x in outs])\n\n def traverse(op):\n \"\"\"Traverse operators from computation graph\"\"\"\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if tensor.op.input_tensors:\n traverse(tensor.op)\n\n if 'conv2d_NCHWc' in op.tag:\n conv_out = op.output(0)\n kernel = conv_out.op.input_tensors[1]\n data_vec = conv_out.op.input_tensors[0]\n data = data_vec.op.input_tensors[0] \\\n if isinstance(data_vec.op, tvm.tensor.ComputeOp) and \"pad\" not in data_vec.op.tag \\\n else data_vec\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n data = data_pad.op.input_tensors[0]\n\n n, ic_chunk, h, w, ic_block = [x.value for x in data.shape]\n ic = ic_chunk * ic_block\n original_data = tvm.placeholder((n, ic, h, w), dtype=conv_out.dtype)\n\n kh, kw = kernel_size\n original_kernel = tvm.placeholder((num_filter, ic, kh, kw), dtype=conv_out.dtype)\n\n wkl = _get_workload(original_data, original_kernel, stride, padding, conv_out.dtype)\n sch = _get_schedule(wkl)\n _SCH_TO_SCH_FUNC[type(sch)](s, wkl, data_vec,\n kernel, conv_out, outs[0])\n\n traverse(outs[0].op)\n return s", "def Conv2d(X, size, stride, init, name, padding, activation):\n print(name, size, size[-1])\n w = get_weights(shape=size, name='W_' + name, init=init)\n b = get_weights(shape=[size[-1]], name='b_' + name, init=init)\n \n conv = tf.nn.conv2d(X, w, strides=[1, stride, stride, 1], \n padding=padding) + b\n \n ## Applying activation\n\n if activation == 'relu':\n h_conv = tf.nn.relu(conv)\n elif activation == 'sigmoid':\n h_conv = tf.nn.sigmoid(conv)\n elif activation == 'leaky_relu':\n h_conv = tf.nn.leaky_relu(conv)\n \n return h_conv" ]
[ "0.6828457", "0.67641896", "0.67054737", "0.6690218", "0.6687156", "0.659067", "0.6519699", "0.6487844", "0.6447366", "0.6444962", "0.6419147", "0.64094675", "0.63942194", "0.63535935", "0.631613", "0.62766844", "0.6266804", "0.6255546", "0.6248054", "0.62391937", "0.6237672", "0.623349", "0.62235785", "0.6223345", "0.6218419", "0.62148196", "0.6202379", "0.61973995", "0.6191305", "0.61822826", "0.61793965", "0.616857", "0.6158484", "0.6154959", "0.6154595", "0.61354375", "0.6130778", "0.60992086", "0.6074383", "0.6074383", "0.6073008", "0.60661066", "0.6062866", "0.6056274", "0.6045058", "0.60408694", "0.60408694", "0.60390556", "0.60334975", "0.60077035", "0.5997675", "0.5995301", "0.59763736", "0.59679997", "0.5958819", "0.5956379", "0.5952508", "0.59514815", "0.5948652", "0.5948612", "0.59385955", "0.5937551", "0.59291434", "0.59157014", "0.59080154", "0.59078664", "0.590588", "0.5887573", "0.58781683", "0.587679", "0.58754665", "0.5870557", "0.586943", "0.5866345", "0.5838039", "0.58362687", "0.58169734", "0.58138996", "0.5811781", "0.57960474", "0.5788932", "0.57842714", "0.5773422", "0.5767027", "0.5765463", "0.5764751", "0.57618725", "0.5761819", "0.57554764", "0.5755144", "0.5754906", "0.5732188", "0.57274604", "0.5720008", "0.5716325", "0.5714832", "0.571348", "0.5713423", "0.57016337", "0.5697558" ]
0.6141132
35
Safely convert parameter value from .cfg file
def safe_conversion(value): try: value = ast.literal_eval(value) value = list(value) if isinstance(value, tuple) else value return value except ValueError: return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _loadParamFromFile(config, section, paramName):\n\n # Get paramName from answer file\n value = config.get(section, paramName)\n\n # Validate param value using its validation func\n param = controller.getParamByName(paramName)\n _validateParamValue(param, value)\n\n # Keep param value in our never ending global conf\n controller.CONF[param.getKey(\"CONF_NAME\")] = value\n\n return value", "def parameter(name,default_value=None):\n settings = file(settings_file()).read()\n for line in settings.split(\"\\n\"):\n line = line.strip(\" \\n\\r\")\n if len(line.split(\"=\")) != 2: continue\n keyword,value = line.split(\" = \")\n keyword = keyword.strip(\" \")\n if keyword == name: return eval(value)\n return default_value", "def ReadParameterFile(pf):\n f = open(pf, \"r\")\n pf_dict = SetDefaultParameterValues()\n for line in f:\n if not line.split(): \n continue\n if line.split()[0][0] == \"#\": \n continue\n \n # This will prevent crashes if there is not a blank line at the end of the parameter file\n if line[-1] != '\\n': \n line += '\\n'\n \n # Cleave off end-of-line comments.\n line = line[:line.rfind(\"#\")].strip()\n \n # Read in the parameter name and the parameter value(s).\n parname, eq, parval = line.partition(\"=\")\n \n # Else, actually read in the parameter \n try: \n parval = float(parval)\n except ValueError:\n if re.search('/', parval): # For directory with more than one level\n parval = str(parval.strip())\n elif parval.strip().isalnum(): \n parval = str(parval.strip())\n elif parval.replace('_', '').strip().isalnum():\n parval = parval.strip()\n elif parval.partition('.')[-1] in ['dat', 'hdf5', 'h5', 'txt']:\n parval = str(parval.strip())\n else:\n parval = parval.strip().split(\",\")\n tmp = [] \n if parval[0][0] == '(':\n for element in parval: \n if element.strip(\" (,)\").isdigit(): \n tmp.append(float(element.strip(\"(,)\")))\n else: \n tmp.append(element.strip(\" (,)\"))\n parval = tuple(tmp) \n elif parval[0][0] == '[':\n for element in parval: \n tmp.append(float(element.strip(\"[,]\")))\n parval = list(tmp)\n else:\n print(parname, parval)\n raise ValueError('The format of this parameter is not understood.')\n \n pf_dict[parname.strip()] = parval\n \n return pf_dict", "def _load_parameter(self):", "def getconfig(filepath, param, default=None):\n with open(filepath, 'rb') as f:\n for line in f:\n if line.strip().startswith('#') or '=' not in line:\n continue\n k, v = line.split('=', 1)\n if k.strip() == param:\n return v.strip()\n return default", "def get_parameter(par_name):\r\n config_file = open('./config.txt', 'r')\r\n lines = config_file.readlines()\r\n for line in lines:\r\n line = line.rstrip('\\n\\r')\r\n if line.startswith(par_name):\r\n return line.split('=')[1]", "def _config_file_callback(ctx, param, value):\n ctx.default_map = ctx.default_map or {}\n section = ctx.info_name\n\n if value:\n config = anyconfig.load(value)[section]\n ctx.default_map.update(config)\n\n return value", "def check_convert_parameter(name, value, parameter):\n\n try:\n # Try to cast the value to the correct type if int or float\n if parameter.type_ is int:\n new_value = parameter.type_(float(value))\n elif parameter.type_ is float:\n new_value = parameter.type_(value)\n elif parameter.type_ is bool:\n new_value = strtobool(value)\n else:\n new_value = value\n except ValueError:\n new_value = parameter.default\n console.print(\n f\"[red]'{name}' format should be '{parameter.type_.__name__}' type[/red]\",\n f\"[red]and could not be converted. Setting default '{new_value}'.\\n[/red]\",\n )\n\n return new_value", "def config_gettype(function_name,\n config_name,\n param):\n config = configparser.ConfigParser()\n config.read(path_creator(config_name))\n #config.read(path_creator(config_name))\n if config[function_name][param].split(' ## ')[1] == 'str':\n return str(config.get(function_name,param).split(' ## ')[0])\n if config[function_name][param].split(' ## ')[1] == 'int':\n return int(config.get(function_name,param).split(' ## ')[0])\n if config[function_name][param].split(' ## ')[1] == 'float':\n return float(config.get(function_name,param).split(' ## ')[0])\n if config[function_name][param].split(' ## ')[1] == 'bool':\n return bool(config.get(function_name,param).split(' ## ')[0])\n if config[function_name][param].split(' ## ')[1] == 'path':\n return path_creator(str(config.get(function_name,param).split(' ## ')[0]))\n if config[function_name][param].split(' ## ')[1] == 'NoneType':\n return None", "def _adaptConfigurationValue (cls, value : String) -> Object:\n\n Logging.trace(\">>: %r\", value)\n uppercasedValue = value.upper()\n\n if uppercasedValue in cls._validBooleanValueNames:\n result = (uppercasedValue in cls._trueBooleanValueNames)\n elif (cls._integerRegExp.match(value)\n or cls._hexIntegerRegExp.match(value)): \n result = int(value)\n elif cls._realRegExp.match(value):\n result = float(value)\n else:\n result = value\n \n Logging.trace(\"<<: %r\", result)\n return result", "def to_python(self, name, value):\r\n \r\n if len( str(value).strip() ) == 0:\r\n raise admin.ArgValidationException(\"The value for the '%s' parameter cannot be empty\" % (name))\r\n \r\n return value", "def readParameterfile(self, filename):\n return None", "def _str_to_val(self, value):\n kind, value = value.split(': ', 1)\n\n # Lists and dictionaries are special case\n if kind in ('L', 'D'):\n return eval(value)\n\n if kind in TYPE_MAPPING.keys():\n if kind == 'B':\n if value != 'True':\n return False\n\n value = TYPE_MAPPING[kind](value)\n\n return value\n else:\n raise ValueError(\"An Unknown type of setting was found!\")", "def test_bool_param_from_str():\n\n @dataclass\n class TestConfig(ConfigParserToDataclassMapper):\n param_1: bool\n param_2: bool\n mapping = {'param_1': ParamInfo('section', 'param_1'),\n 'param_2': ParamInfo('section', 'param_2')}\n\n cfg = configparser.ConfigParser()\n cfg['section'] = {'param_1': 'yes', 'param_2': 'no'}\n\n conf = TestConfig.create_from_cfg(cfg)\n assert conf.param_1 == True\n assert conf.param_2 == False", "def check_config(cfg):", "def valid_cfg(cfg):\n\t\tif not isinstance(cfg, dict):\n\t\t\traise TypeError('Config should be a python dictionary')\n\t\treturn cfg", "def sanitize_config(config):\n\tif not 'dbfile' in config.keys():\n\t\tconfig['dbfile'] = DEFAULTCONFIG['dbfile']\n\tfor path in ['dbfile']:\n\t\tconfig[path]=os.path.expanduser(config[path])\n\treturn config", "def parse_value(self, value_name, default=None):\n\t\treturn self.cfg_root.find(value_name).text", "def _trans_format(self):\n config_dict = vars(self._config)\n for item, value in config_dict.items():\n if value == 'None':\n config_dict[item] = None\n elif isinstance(value, str) and is_number(value):\n if value.isdigit():\n value = int(value)\n else:\n value = float(value)\n config_dict[item] = value", "def config_value(name):\n def get():\n try:\n return config.get('yourls', name)\n except (NoOptionError, NoSectionError):\n return None\n return get", "def _coerceOption(self, configDict, key, value):\n if key in configDict:\n if isinstance(configDict[key], bool):\n value = value == \"True\"\n\n elif isinstance(configDict[key], (int, float, long)):\n value = type(configDict[key])(value)\n\n elif isinstance(configDict[key], (list, tuple)):\n value = value.split(',')\n\n elif isinstance(configDict[key], dict):\n raise UsageError(\n \"Dict options not supported on the command line\"\n )\n\n elif value == 'None':\n value = None\n\n return value", "def _parse_env_value(val):\n if val.lower() == \"false\":\n return False\n elif val.lower() == \"true\":\n return True\n try:\n return int(val)\n except ValueError:\n pass\n try:\n return float(val)\n except ValueError:\n pass\n return val", "def log_value_from_config(arg_name: str, config_value: Any):\n logger.info(\n f\"The {arg_name} argument is being taken from configuration file, i.e., {config_value}.\"\n )", "def decode_conf(value):\n if not value:\n return None\n\n if \"@\" in value:\n try:\n return \"\".join(chr(int(c)) for c in value.rstrip()[:-1].split(\"@\"))\n\n except Exception as ex:\n print \"Failed to decode value %s: %s\" % ex\n return None\n\n return value", "def reloadConfig():\n print \"---=== load cfg ===---\"\n cfgFile = open(\"../cfg/config.cfg\", \"r\")\n lines = cfgFile.readlines()\n \n for l in lines:\n l = l.replace(\"\\n\", \"\")\n if (l.count(\"#\") and l.index(\"#\") == 0) or l.count(\":\") == 0:\n continue\n tmp = l.split(\":\")\n tmp[0] = tmp[0].replace(\"trim\", \"\")\n tmp[0] = tmp[0].replace(\"max\", \"\")\n tmp[0] = tmp[0].replace(\"slew\", \"\")\n tmp[0] = tmp[0].replace(\"min\", \"\")\n \n if tmp[0] in var.min:\n var.min[tmp[0]] = float(tmp[1])\n print tmp[0], \" = \", tmp[1]\n if tmp[0] in var.trim:\n var.trim[tmp[0]] = int(tmp[1])\n print tmp[0], \" = \", tmp[1]\n if tmp[0] in var.max:\n var.max[tmp[0]] = int(tmp[1])\n print tmp[0], \" = \", tmp[1]\n if tmp[0] in var.slew:\n var.slew[tmp[0]] = int(tmp[1])\n print tmp[0], \" = \", tmp[1]\n \n print \"---=== loaded cfg ===---\"", "def from_config_string(self, v: str) -> Any:\n try:\n v = eval(v)\n except Exception:\n pass\n return self.instance(v)", "def _parse_param_as_bool(\n enodeb: EnodebAcsStateMachine,\n param_name: ParameterName\n) -> str:\n try:\n param = enodeb.get_parameter(param_name)\n pval = param.lower().strip()\n if pval in {'true', '1'}:\n return '1'\n elif pval in {'false', '0'}:\n return '0'\n else:\n logging.warning(\n '%s parameter not understood (%s)', param_name, param)\n return '0'\n except (KeyError, ConfigurationError):\n return '0'", "def changeParameterFile(speciesfolder, species):\n\twith open(\"{}/{}_parameters.cfg\".format(speciesfolder, species), \"r+\") as inFile:\n\t\tfor line in inFile:\n\t\t\tif \"generic\" in line:\n\t\t\t\tinFile.write(line.replace(\"generic\", species))\n\t\t\telse:\n\n\t\t\t\tinFile.write(line)", "def test_config_from_text_wellformed_content():\n config_text = \"\"\"\n [resources]\n metadata_cache_uri = https://another-aqua.url\n \"\"\"\n config = Config(text=config_text)\n assert config.metadata_cache_uri == \"https://another-aqua.url\"", "def get_config(args):\n load_args={}\n with open(args.config, 'r') as f:\n for line in f:\n key, value = line.strip().split('=')\n try:\n value = int(value)\n except ValueError:\n try:\n value = float(value)\n except ValueError:\n value = value\n load_args[key] = value\n args.__dict__.update(load_args)", "def ini_get(interp, vname):\n w_value = interp.config.get_ini_w(vname)\n if w_value is None:\n return interp.space.w_False\n return interp.space.as_string(w_value)", "def _parse_option_value(line, option_name):\n try:\n option_value = line.split('=')[1].strip()\n except IndexError:\n option_value = ''\n if not option_value:\n raise ValueError(\"No value specified for {} option.\".format(option_name))\n return option_value", "def get_cfg_var(interp, var):\n w_value = interp.config.get_ini_w(var)\n if w_value is None:\n return interp.space.w_False\n return w_value", "def test_get_config_file_value(self):\n parser = GbpOptionParser('cmd4')\n self.assertEqual(parser.get_config_file_value('new_overrides_git_option1'),\n 'new_overrides_git_value1')\n self.assertEqual(parser.get_config_file_value('doesnotexist'), None)", "def read_parameter_file( param_file_name ):\n return json.loads( open( param_file_name).read() )", "def testConfigC(self):\n assert type(self.config['debug']) == bool, \"Not parsing string to boolean correctly\"", "def getPropValue(nm, encrypted=False, cfg=None, dflt=None, skipComplaining=False):\n if cfg is None:\n return None\n global getPropDict\n if getPropDict.get(cfg):\n savedDate = getPropDict[cfg]\n # trace(\"getPropValue: savedDate[\" + cfg + \"]=\" + str(savedDate))\n cfgDate = os.path.getmtime(cfg)\n # trace(\"getPropValue: cfgDate=\" + str(cfgDate))\n if float(savedDate) >= float(cfgDate): # cfg has not changed\n val = getPropDict.get(cfg + \":\" + nm)\n # trace(\"getPropValue: val=\" + val)\n if val is not None:\n # trace(\"getPropValue: getPropValue(saved) => '%s'\" % str(val))\n return val\n else: # clear out any previously saved keys\n cfgcolon = cfg + \":\"\n for k in list(getPropDict.keys()):\n if re.match(cfgcolon, k):\n del getPropDict[k]\n getPropValueProgram = '/opt/app/cdf/bin/getpropvalue'\n if encrypted:\n cmd = [getPropValueProgram, \"-f\", cfg, \"-x\", \"-n\", nm]\n else:\n cmd = [getPropValueProgram, \"-f\", cfg, \"-n\", nm]\n # trace(\"getPgaasPropValue: cmd=\" + str(cmd))\n\n try:\n with subprocess.Popen(cmd,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE) as p:\n (origString, stderrString) = p.communicate()\n except Exception as e:\n traceback.print_exc()\n print(\"Error decoding string because {0}\".format(e), file=errorOutput)\n return None\n else:\n if stderrString:\n if not re.search(\"Configuration property .* must be defined\", stderrString.decode('utf-8')) and not skipComplaining:\n print(\"Error decoding string because: {0} \".format(stderr), file=errorOutput)\n return dflt\n else:\n trace(\"getPgaasPropValue() => \" + str(origString), minLevel=2)\n return origString.decode('utf-8').rstrip('\\n')", "def test_process_classic_config_conv_field(self):\n self.country_config[\"fields\"].append(\n {\"dest\": \"d_val\", \"source\": \"s_val\",\n \"conv\": \"generateRegistrantUrl\"})\n expected_output = (\"the_table|primkey|\"\n \" `d_val` varchar(255) NOT NULL DEFAULT '',|\")\n result = monument_tables.process_classic_config(self.country_config)\n self.assertEqual(result, expected_output)", "def _read_parameter_file():\n if not os.path.exists(\"./config.txt\"):\n raise FileNotFoundError(\"Can't find the configuration file \"\n \"./config.txt\")\n\n required = ['compiler_suite', 'project_name', 'scratch_path',\n 'specfem_root', 'lasif_path', 'iteration_name']\n\n # Read parameters into dictionary.\n parameters = {}\n file = open(\"./config.txt\", \"r\")\n for line in file:\n if line.startswith(\"#\"):\n continue\n fields = line.split()\n parameters.update({fields[0]: fields[1]})\n\n # Ensure all parameters are there.\n for param in required:\n if param not in parameters.keys():\n raise ParameterError(\"Parameter \" + param + \" not in parameter \"\n \"file\")\n\n # Build full paths.\n parameters['scratch_path'] = os.path.abspath(parameters['scratch_path'])\n parameters['specfem_root'] = os.path.abspath(parameters['specfem_root'])\n parameters['lasif_path'] = os.path.abspath(parameters['lasif_path'])\n\n # Derived parameters.\n forward_stage_dir = os.path.join(\n parameters['scratch_path'],\n parameters['project_name'])\n forward_run_dir = os.path.join(\n forward_stage_dir,\n parameters['iteration_name'])\n lasif_scratch_path = os.path.join(\n parameters['scratch_path'],\n os.path.basename(\n parameters['lasif_path']))\n\n # Get list of all event names.\n try:\n iteration_xml_path = os.path.join(\n parameters['lasif_path'],\n 'ITERATIONS',\n 'ITERATION_%s.xml' %\n (parameters['iteration_name']))\n tree = ET.parse(iteration_xml_path)\n except:\n iteration_xml_path = os.path.join(\n lasif_scratch_path,\n 'ITERATIONS',\n 'ITERATION_%s.xml' %\n (parameters['iteration_name']))\n tree = ET.parse(iteration_xml_path) \n \n root = tree.getroot()\n event_list = []\n for name in root.findall('event'):\n for event in name.findall('event_name'):\n event_list.append(event.text)\n\n parameters.update({'forward_stage_dir': forward_stage_dir})\n parameters.update({'forward_run_dir': forward_run_dir})\n parameters.update({'iteration_xml_path': iteration_xml_path})\n parameters.update({'event_list': sorted(event_list)})\n parameters.update({'lasif_scratch_path': lasif_scratch_path})\n\n return parameters", "def set_param(self):\n with open(\"settings.txt\", \"r\") as f:\n filedata = f.read()\n settings = [_.split(\"=\") for _ in filedata.split(\"\\n\")]\n for setting in settings:\n if len(setting) < 2: # if blank line\n continue\n if setting[0] == self.param:\n setting[1] = str(self.param_value)\n\n with open(\"settings.txt\", \"w\") as f:\n for setting in settings:\n if len(setting) < 2: # if blank line\n continue\n f.write(setting[0] + \"=\" + setting[1] + \"\\n\")", "def set_config(param, value):\n _config = loadConfig()\n _paramField = rgetattr(_config, param)\n # define types that can be cast from command line input\n primitive = (int, str, bool)\n\n def is_primitiveType(_type):\n return _type in primitive\n\n # cast type\n if type(_paramField) == type(Union) and is_primitiveType(type(_paramField).__args__[0]):\n value = type(_paramField).__args__[0](value)\n elif is_primitiveType(type(_paramField)):\n value = type(_paramField)(value)\n\n try:\n rsetattr(_config, param, value)\n except TypeError as err:\n click.echo(err)\n saveConfig(_config)", "def makeConfig (self):\n for line in self.lines :\n ll = line.split ('=', 1)\n if len(ll) < 2 :\n print \"Error in parsing cfg label line: \" , line\n return None\n self.config[(ll[0]).strip()] = ((ll[1]).strip())", "def get_config(item: str) -> Union[str, int]:\n file = load_config_file(\"config.json\")\n\n value = file.get(item)\n\n if value is None:\n raise Exception(f\"Your config is out of date! Missing a value for {item}\")\n return value", "def prepare(k,v):\n self._settings[k]=v.strip()\n if '#' in v:\n self._settings[k]=v[:v.find('#')]", "def cb_config(data, option, value):\n option_name = option.split(\".\")[-1]\n if option_name in vimode_settings:\n vimode_settings[option_name] = value\n return weechat.WEECHAT_RC_OK", "def testConfigD(self):\n assert type(self.config['icon_img_path']) == str, \"Not parsing string to correct type\"", "def coerceOption(configDict, key, value):\n if key in configDict:\n if isinstance(configDict[key], bool):\n value = value == \"True\"\n\n elif isinstance(configDict[key], (int, float, long)):\n value = type(configDict[key])(value)\n\n elif isinstance(configDict[key], (list, tuple)):\n value = value.split(\",\")\n\n elif isinstance(configDict[key], dict):\n raise UsageError(\n \"Dict options not supported on the command line\"\n )\n\n elif value == \"None\":\n value = None\n\n return value", "def _get_config_info_from_file(file, args):\n config_info = _load_config_info(file)\n\n if args.url:\n config_info = config_info._replace(url=args.url)\n\n if args.force:\n config_info = config_info._replace(version=None)\n elif args.local_version:\n config_info = config_info._replace(version=args.local_version)\n\n return config_info", "def conf_load_parameter(fin):\n err_msg = \"Unknown parameter definition. Excpected $par_name=(list|range|linspace).\"\n spec = fin.readline().strip().split('=')\n if len(spec) != 2:\n raise EnvironmentError(err_msg)\n par_name, par_def = [s.strip() for s in spec]\n if len(par_def) > 1 and par_def[0] == '[' and par_def[-1] == ']':\n return par_name, conf_load_par_list(par_def)\n elif len(par_def) > 3 and par_def.count(':') == 2 and par_def[-1] == 'l':\n return par_name, conf_load_par_linspace(par_def)\n elif par_def.count(':') == 2:\n return par_name, conf_load_par_range(par_def)\n else:\n raise EnvironmentError(err_msg + \" Found {0} for {1}\".format(par_def,par_name))", "def parse_parameter(code, param):\n if (\n param != \"null\"\n and param[0] != \"'\"\n and not is_number(param)\n ):\n return find_value(code, param).replace(\"'\", \"\")\n\n return param.replace(\"'\", \"\")", "def validation_param_detail(yaml_file_name, file_header):\n validation_param = ast.literal_eval(\n json.dumps(\n yaml.load(\n open(f\"{Common.config_path}/{yaml_file_name}\"),\n Loader=yaml.FullLoader,\n )[file_header]\n )\n )\n return validation_param", "def get(self,section,option):\n \n value = ConfigParser.SafeConfigParser.get(self,section,option)\n value=value.strip('\"')\n return value", "def testConfigF(self):\n assert type(self.config['game_height']) == int, \"Not parsing the game width correctly\"", "def LoadParams(file):\n global globalParams\n global globalSections\n\n # check to see whether the file exists\n try: f = open(file, 'r')\n except IOError:\n fail('ERROR: parameter file does not exist: ', file)\n else:\n f.close()\n\n\n cp = ConfigParser.ConfigParser()\n cp.optionxform = str\n cp.read(file)\n\n globalSections = cp.sections()\n\n for sec in cp.sections():\n\n for opt in cp.options(sec):\n\n value = cp.get(sec, opt)\n \n # check in turn whether this is an interger, float, or string\n if (isInt(value)):\n globalParams[sec + \".\" + opt] = int(value)\n elif (isFloat(value)):\n globalParams[sec + \".\" + opt] = float(value)\n else:\n globalParams[sec + \".\" + opt] = value.strip()", "def _fake_safe_get(self, value):\n try:\n val = getattr(self.configuration, value)\n except AttributeError:\n val = None\n return val", "def test_from_yaml_path(tmpdir):\n with tmpdir.as_cwd():\n with open(\"params.yaml\", \"w\") as fp:\n fp.write(YAML_PARAMS_STR)\n params = load_params(\"./params.yaml\")\n assert params == YAML_PARAMS\n assert isinstance(params[\"x\"], float)\n assert isinstance(params[\"y\"], int)", "def _getConfigParam(self, name, default=None):\n return self.config.get(self._configPrefix + name.lower(), default)", "def add_fixed_parameters_from_config_file(self, config_file):\n pass", "def testInitConfFromString():\n\n testString = \"x=5;b='hallo'\"\n conf = naiveConf.NaiveConf(testString)\n\n assert conf.x == 5\n assert conf.b == 'hallo'", "def _cv_input_number(cfg):\n minimum = cfg.get(CONF_MIN)\n maximum = cfg.get(CONF_MAX)\n if minimum >= maximum:\n raise vol.Invalid(\n f\"Maximum ({minimum}) is not greater than minimum ({maximum})\"\n )\n state = cfg.get(CONF_INITIAL)\n if state is not None and (state < minimum or state > maximum):\n raise vol.Invalid(f\"Initial value {state} not in range {minimum}-{maximum}\")\n return cfg", "def read_config(config, section, item):\n value = config.get(section, item)\n if value == \"None\":\n return None\n return value", "def _sanitize_param(self, param):\n if param:\n # Can't send unicode.\n param = str(param)\n return param", "def testConfigB(self):\n assert type(self.config['sensitivity']) == int, \"Not parsing and converting digit correctly\"", "def get_loss_config(config):\n if type(config) != str:\n return config\n else:\n\n return loss_str2obj[config.lower()]", "def copy_config(cfg):\n res= dict(cfg)\n #model_param = dict(cfg['model_param'])\n model_param = dict(cfg.get('model_param', {}))\n res['model_param'] = model_param\n return res", "def convert_spec(spec):\n config = configobj.ConfigObj(configspec=spec)\n\n return config.configspec", "def load_config(self):\n\n try:\n file = open(self.cfg_file_name, 'r')\n str_data = file.read()\n except OSError as err:\n print(\"can't load property: {0}\".format(err))\n return None #wx.DefaultPosition\n else:\n file.close()\n #print(\"position loaded\")\n data = json.loads(str_data)\n return data #wx.Point(*position)", "def setconfig(filepath, param, value):\n\n with open(filepath, 'rb') as f:\n lines = f.readlines()\n with open(filepath, 'wb') as f:\n updated = False\n for line in lines:\n if line.strip().startswith('#') or '=' not in line:\n # keep comments and other non informative lines unchanged\n f.write(line)\n continue\n k, v = line.split('=', 1)\n if k.strip() == param:\n # update with new value\n f.write('%s=%s\\n' % (param, value))\n updated = True\n else:\n # keep line unchanged\n f.write(line)\n if not updated:\n # append the new param at the end of the file\n f.write('%s=%s\\n' % (param, value))", "def set_param(self, param_value):\n with open(\"settings.txt\", \"r\") as f:\n filedata = f.read()\n settings = [_.split(\"=\") for _ in filedata.split(\"\\n\")]\n for setting in settings:\n if len(setting) < 2: # if blank line\n continue\n if setting[0] == self.param:\n\n setting[1] = param_value\n\n with open(\"settings.txt\", \"w\") as f:\n for setting in settings:\n if len(setting) < 2: # if blank line\n continue\n f.write(setting[0] + \"=\" + setting[1] + \"\\n\")", "def getstr(self, sec, name, default=None, badtypeok=False, morevars=None,\n taskvars=None):\n if sec in self.OLD_SECTIONS:\n sec = 'config'\n\n try:\n return super().getstr(sec, name, default=None,\n badtypeok=badtypeok, morevars=morevars,\n taskvars=taskvars).replace('//', '/')\n except NoOptionError:\n # if config variable is not set\n self.check_default(sec, name, default)\n return default.replace('//', '/')", "def update_settings(self, param):\n if param.name() == '':\n pass", "def get(self,section,key):\n value = ConfigParser.get(self,section,key)\n if value.startswith('\"') or value.startswith(\"'\"):\n return value\n if re.search(r\":\",value):\n out_dict = {}\n pieces = valuesplit(\",\")\n for piece in pieces:\n key,v = piece.split(\":\")\n out_dict[key] = translate(v)\n return out_dict\n elif re.search(\",\",value):\n values = value.split(\",\")\n return [translate(v) for v in values]\n return translate(value)", "def validate_settings(_cfg, _ctx):\n pass", "def string_to_param(self,string):\n\n if (string.startswith(\"log_\")): return math.log10(self.string_to_param(string[4:]))\n if (string.startswith(\"ln_\")): return math.log(self.string_to_param(string[3:]))\n if (string.startswith(\"exp_\")): return math.exp(self.string_to_param(string[4:]))\n if (string == \"Mass\"): return self.glb[imass]/constants.solar_mass\n if (string == \"Radius\"): return self.glb[iradius]/constants.solar_radius\n if (string == \"Luminosity\"): return self.glb[iluminosity]/constants.solar_luminosity\n if (string == \"Z\"): return self.glb[iz0]\n if (string == \"Y\"): return 1.0-self.glb[iz0]-self.glb[ix0]\n if (string == \"X\"): return self.glb[ix0]\n if (string == \"Ys\"): return 1.0-self.glb[user_params_index[\"Zs\"]]-self.glb[user_params_index[\"Xs\"]]\n if (string == \"zsx_s\"): return self.zsx_s\n if (string == \"zsx_0\"): return self.zsx_0\n if (string == \"Fe_H\"): return self.FeH\n if (string == \"M_H\"): return self.MH\n if (string == \"Age\"): return self.glb[iage]\n if (string == \"Teff\"): return self.glb[itemperature]\n if (string == \"Dnu\"): return self.find_large_separation()*self.glb[ifreq_ref]\n if (string == \"numax\"): return self.numax\n if (string == \"Rho\"): return 3.0*self.glb[imass]/(4.0*math.pi*self.glb[iradius]**3)\n if (string == \"g\"): return constants.G*self.glb[imass]/self.glb[iradius]**2\n if (string == \"beta_Sonoi2015\"): return self.beta_Sonoi2015\n if (string == \"b_Kjeldsen2008\"): return self.b_Kjeldsen2008\n\n try:\n return self.glb[user_params_index[string]]\n except KeyError:\n sys.exit(\"ERROR: unrecognised model quantity: \"+string)", "def config_debug():\n config_str = request.args.get('config')\n config = pickle.loads(b64decode(config_str))\n return str(config)", "def set_param_raw(param, value, verbose=False):\n if type(value) == dict:\n # #1098 changing dictionary behavior to be an update, rather\n # than replace behavior.\n for k, v in value.items():\n # dictionary keys must be non-unicode strings\n if isinstance(k, str):\n set_param_raw(ns_join(param, k), v, verbose=verbose)\n else:\n raise RosParamException(\"YAML dictionaries must have string keys. Invalid dictionary is:\\n%s\"%value)\n else:\n try:\n expected_type = long\n except NameError :\n expected_type = int\n \n if type(value) == expected_type:\n if value > sys.maxsize:\n raise RosParamException(\"Overflow: Parameter Server integers must be 32-bit signed integers:\\n\\t-%s <= value <= %s\"%(maxint - 1, maxint))\n \n try:\n get_param_server().setParam(param, value)\n except socket.error:\n raise RosParamIOException(\"Unable to communicate with master!\")\n if verbose:\n print(\"set parameter [%s] to [%s]\"%(param, value))", "def get_config_params(config_file_name=\"config\\import.ini\"):\n\n configParser=configparser.ConfigParser()\n configfile=config_file_name\n configfile_path=os.path.abspath(configfile)\n configParser.read(configfile_path)\n # READING INI FILE\n #db params\n db_params=dict()\n db_params['user']=configParser.get('DB','pangaea_db_user')\n db_params['pwd']=configParser.get('DB','pangaea_db_pwd')\n db_params['db']=configParser.get('DB','pangaea_db_db')\n db_params['host']=configParser.get('DB','pangaea_db_host')\n db_params['port']=configParser.get('DB','pangaea_db_port')\n #terminologies\n terminologies_params=configParser.get('INPUT','terminologies') # parameters for each terminology as JSON str\n terminologies_params_parsed=json.loads(terminologies_params)\n\n return db_params,terminologies_params_parsed", "def extract_paramval(self, injparams, systkey, fhkey=None,\n paramlabel=None, smalllabel=False):\n paramval = float(injparams[systkey].split(' ')[0])\n if (fhkey is None) or (paramlabel is None):\n if not ((fhkey is None) and (paramlabel is None)):\n raise ValueError(\n \"Either both fhkey and paramlabel must be\"\n \" None or they must both be specified.\"\n )\n return paramval\n else:\n hypo = self.get_hypo_from_fiducial_hypo_key(fhkey=fhkey)\n hypo_label = self.labels.dict['%s_name'%hypo]\n if systkey == 'deltam31':\n if 'no' in hypo_label:\n if np.sign(paramval) != 1:\n paramval = -1*float(injparams[systkey].split(' ')[0])\n if not smalllabel:\n paramlabel += r' ($\\times-1$)'\n elif 'io' in hypo_label:\n if np.sign(paramval) != -1:\n paramval = -1*float(injparams[systkey].split(' ')[0])\n if not smalllabel:\n paramlabel += r' ($\\times-1$)'\n\n if (np.abs(paramval) < 1e-2) and (paramval != 0.0):\n paramlabel += ' = %.2e'%paramval\n else:\n paramlabel += ' = %.3g'%paramval\n\n return paramval, paramlabel", "def read_config(self, config_filename):", "def do_config(self, line: str):\n if self._real_module is None:\n print(\"'config' command depends on using a module. See 'use' for help.\")\n return\n\n settings = self._real_module.params()\n if line != '':\n value = settings.get(line, None)\n if value is not None:\n print(value[\"Value\"])\n else:\n print(settings)", "def load_parameter(fname, col_name):\r\n \r\n wb = load_workbook(fname, data_only=True)\r\n ws = wb['Data'] # Load Data sheet of excel file\r\n\r\n # read keys and values from Excel sheet\r\n keys = (c.value for c in ws['E'][1:])\r\n values = (c.value if c.value != 'ns' else None for c in ws[col_name][1:])\r\n \r\n parameter = dict(zip(keys, values))\r\n\r\n # deletes entries where key is None\r\n del parameter[None]\r\n\r\n # Assign specific parameters\r\n parameter['P_PV2AC_out_PVINV'] = ws[col_name][15].value\r\n parameter['P_PV2AC_out'] = ws[col_name][24].value\r\n parameter['P_AC2BAT_in_DCC'] = ws[col_name][25].value\r\n parameter['P_AC2BAT_in'] = ws[col_name][26].value\r\n parameter['P_BAT2AC_out'] = ws[col_name][27].value\r\n parameter['P_BAT2AC_out_DCC'] = ws[col_name][28].value\r\n\r\n # Set refrence case values to boolean\r\n if parameter['ref_1'] == 'yes':\r\n parameter['ref_1'] = True\r\n elif parameter['ref_1'] == 'no':\r\n parameter['ref_1'] = False\r\n \r\n if parameter['ref_2'] == 'yes':\r\n parameter['ref_2'] = True\r\n elif parameter['ref_2'] == 'no':\r\n parameter['ref_2'] = False\r\n\r\n # Specific parameters of DC-coupled systems\r\n if parameter['Top'] == 'DC':\r\n parameter['P_AC2BAT_in'] = parameter['P_AC2BAT_in_DCC'] # Nominal charging power (AC) in kW\r\n parameter['P_BAT2AC_out'] = parameter['P_BAT2AC_out_DCC']\r\n \r\n # Specific parameters of PV inverters and AC-coupled systems\r\n if parameter['Top'] == 'PVINV' or parameter['Top'] == 'AC' and parameter['P_PV2AC_out_PVINV'] is not None:\r\n parameter['P_PV2AC_out'] = parameter['P_PV2AC_out_PVINV']\r\n \r\n # Specific parameters of PV-coupled systems\r\n if parameter['Top'] == 'PV':\r\n parameter['P_BAT2PV_in'] = parameter['P_BAT2AC_in']\r\n parameter['P_BAT2AC_out'] = parameter['P_BAT2AC_out_DCC']\r\n\r\n # replace 'ns', 'o' and 'c' entries to None\r\n for key, value in parameter.items():\r\n if value == 'ns' or value == 'o' or value == 'c' or value == ' ':\r\n parameter[key] = None\r\n\r\n # Convert to kW\r\n convert_to_kw = ['P_PV2AC_in', 'P_PV2AC_out_PVINV','P_PV2AC_out','P_AC2BAT_in_DCC','P_AC2BAT_in','P_BAT2AC_out',\r\n 'P_BAT2AC_out_DCC','P_PV2BAT_in','P_BAT2PV_out','P_PV2BAT_out','P_BAT2AC_in']\r\n\r\n for par in convert_to_kw:\r\n if parameter[par]:\r\n parameter[par] /= 1000\r\n \r\n return parameter", "def get_config_setting(setting: str) -> Any:\n try:\n with open('config.json') as config_file:\n data = json.load(config_file)\n return data[setting]\n except Exception as e:\n print(f'Failed get json setting. Error: {e}')", "def read_var(strvar, strl):\n\n if strvar + ' = ' in strl:\n var = strl.split('= ', 1)[1]\n\n if (var == 'None') or (var == 'default') or (var == 'no'):\n var = None\n\n elif ((strvar == 'year') or (strvar == 'Nsteps') or (strvar == 'fopt')\n or (strvar == 'window')):\n var = int(var)\n\n elif (strvar == 'doy') or (strvar == 'Ndays'):\n var = float(var)\n\n elif (strvar == 'models'):\n if (',' in var):\n var2 = var.split(', ')\n\n if len(var2) < 2: # needed on some systems\n var2 = var.split(',')\n\n var = var2\n\n else:\n var = [var]\n\n return var\n\n else:\n return", "def _readConfigOption(self, section, option, returnBool=False, returnList=False):\n value = self.configParser.get(section,option)\n if(value != None and value):\n if(returnBool):\n return self._str2bool(value)\n elif(returnList):\n return value.split(\",\")\n else:\n return value\n \n return getattr(self, option)", "def _check_config(self):", "def test_from_yaml_file_like():\n params = load_params(StringIO(YAML_PARAMS_STR))\n assert params == YAML_PARAMS\n assert isinstance(params[\"x\"], float)\n assert isinstance(params[\"y\"], int)", "def test_from_yaml_string():\n params = load_params(YAML_PARAMS_STR)\n assert params == YAML_PARAMS\n assert isinstance(params[\"x\"], float)\n assert isinstance(params[\"y\"], int)", "def test_unsafe(self):\n\n conf = configuration(\n category(\n 'test',\n Parameter('test', svalue='=int')\n )\n )\n\n configurable = Configurable(conf=conf, safe=False)\n\n configurable.applyconfiguration(targets=[configurable], paths='test')\n\n self.assertIs(configurable.test, int)", "def test_configurations_create_invalid_value_type(self):\n values = '{\"key_buffer_size\": \"this is a string not int\"}'\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)", "def __set_special_config_values(cfg: __Config, config: dict) -> \"__Config\":\n cfg.file_name_plane_masks = lambda i: str(i) + config['file_name_plane_mask_suf']\n cfg.file_name_planercnn_image = lambda i: str(i) + config['file_name_planercnn_image_suf']\n cfg.dir_results = f\"{cfg.edge_detection_type}\" # will be the output folder, create in data dir\n cfg.image_size = tuple(int(x) for x in config['image_size'].split(\" \"))\n return cfg", "def check_info(self):\n #define convertissor\n def pass_in_integer(value):\n return int(value)\n def pass_in_logical(value):\n if value in ['1','t','T','.true.']:\n return 1\n else:\n return 0\n def pass_in_float(value):\n return float(value)\n\n for line in open('./Source/MadWeight_File/Python/MW_param_default.inc'):\n line=line.split('#')[0] #remove comment\n splitline=line.split() #split the data\n if len(splitline)!=4:\n continue\n #assign element\n block=splitline[0].lower()\n tag=splitline[1].lower()\n type=splitline[2].lower()\n value=splitline[3]\n #check if exist -> default\n try:\n self[block][tag]\n except:\n try:\n self[block][tag]=value\n except:\n self[block]={tag:value}\n #change type\n if type in ['integer','logical','float']:\n self[block][tag]=eval('pass_in_'+type+'(self[block][tag])')", "def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except configparser.NoOptionError:\n return", "def _parse_and_validate(self, val):\n if self._is_parameter_type:\n val = self._parse(val) if isinstance(val, str) else val\n self._validate_or_throw(val)\n return val", "def load_from_conf(self):\r\n raise NotImplementedError", "def _conversion(self, val):\n if (self.__set_type == \"str\"):\n return val\n else:\n try:\n return ast.literal_eval(val)\n except ValueError:\n return None", "def value_from_str(self, s):\n if s == 'sys.stderr':\n ### print(\"DecoSettingFile.value_from_str, s=%s, returning %r (sys.stderr?)\" % (s, sys.stderr))\n return sys.stderr\n # 'sys.stdout' ultimately becomes None via this:\n return super().value_from_str(s)", "def testConfigE(self):\n assert type(self.config['game_width']) == int, \"Not parsing the game width correctly\"", "def load_params():\n with open('params.p', mode='rb') as in_file:\n return pickle.load(in_file)", "def readParamFromFile(self, file, sect):\r\n f = configparser.ConfigParser()\r\n f.read(file)\r\n # s = f.sections()\r\n # print(s)\r\n\r\n self.m_param = dict(f.items(sect))\r\n print(self.m_param)\r\n # print(len(self.m_param))\r", "def parseConfig(f):\n config = {\"formats\":{}}\n \n for line in f:\n if line.startswith(\"//\"): \n continue\n \n sline = re.split(\"[=\\s]\", line)\n if sline[0] is \"\":\n continue\n \n if sline[0]==\"format\":\n #Puts the format as a key in the dict pointed to by \"formats\"\n config[\"formats\"][sline[1]] = sline[3] \n else:\n config[sline[0]] = sline[1]\n \n return config", "def read_param_phil(self):\n\n if self.params.analysis.run_clustering:\n self.clustering.toggle_boxes()\n self.clustering.threshold.SetValue(\n str(self.params.analysis.cluster_threshold))\n self.clustering.limit.SetValue(\n str(self.params.analysis.cluster_limit))\n self.clustering.n_images.SetValue(str(\n self.params.analysis.cluster_n_images))\n\n viz_idx = self.visualization.ctr.FindString(str(self.params.analysis.viz))\n if str(self.params.analysis.viz).lower() == 'none':\n viz_idx = 0\n self.visualization.ctr.SetSelection(viz_idx)\n\n self.proc_charts.SetValue(self.params.analysis.charts)\n self.summary_graphs.SetValue(self.params.analysis.summary_graphs)" ]
[ "0.62456924", "0.5969067", "0.58489376", "0.5828713", "0.57924104", "0.57802683", "0.57431704", "0.56006944", "0.5588103", "0.55810744", "0.55671716", "0.5529748", "0.5526934", "0.55040497", "0.55028737", "0.54622763", "0.5404378", "0.5393546", "0.53748494", "0.5366472", "0.53552395", "0.5332203", "0.5316083", "0.5260408", "0.5257586", "0.52502704", "0.52240396", "0.5187189", "0.5182608", "0.51808715", "0.51643515", "0.516152", "0.5157124", "0.5157085", "0.5156823", "0.5154911", "0.5145349", "0.5140534", "0.51292455", "0.5121403", "0.51037484", "0.5098172", "0.5093826", "0.5091383", "0.50907356", "0.5090299", "0.50797623", "0.50761914", "0.5075372", "0.50739706", "0.5073521", "0.5067388", "0.5063683", "0.50575775", "0.505322", "0.5037901", "0.50333196", "0.50319266", "0.5016635", "0.50091803", "0.4975007", "0.49715978", "0.49672452", "0.49594992", "0.49572873", "0.49562585", "0.49560934", "0.4954515", "0.49534252", "0.49527845", "0.49361038", "0.49269208", "0.49090698", "0.49063176", "0.4904253", "0.490392", "0.48980308", "0.48920232", "0.48894036", "0.48883626", "0.48875195", "0.48864833", "0.48804536", "0.4870336", "0.4865577", "0.48636854", "0.48635706", "0.4860835", "0.48600143", "0.4857866", "0.48548144", "0.48521543", "0.4847155", "0.4845128", "0.4839482", "0.48324236", "0.4830157", "0.48294732", "0.48286813", "0.4827078", "0.48256263" ]
0.0
-1
Calculate spatial output shape after convolution.
def out_conv(spatial, config): p, k, s = [config[k] for k in ['padding', 'kernel_size', 'stride']] p2 = p if isinstance(p, int) else p[0] + p[1] return (spatial + p2 - k)//s + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_output_shape(self, input_shape):\r\n return input_shape", "def output_shape(self):\r\n return self.detector.output_shape", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_output = weights[-1]\n dilations = np.asarray(self.dilation_rate)\n if (isinstance(self.padding, list) or isinstance(self.padding, tuple)) and len(self.padding) == 2:\n output_size = np.asarray(\n np.ceil((input_size + 2 * np.asarray(self.padding) - kernels - (kernels - 1) * (\n dilations - 1)) / strides + 1),\n dtype=np.int)\n else:\n output_size = np.asarray(\n np.ceil(input_size / strides) if self.padding == \"SAME\" or self.padding == \"ZEROPAD\" else np.ceil(\n (input_size - (kernels - 1) * dilations) / strides), dtype=np.int)\n \n output_shape = self.incoming_shape[:]\n output_shape[-3:-1] = output_size.tolist()\n output_shape[-1] = num_output\n return output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def output_shape(self):\n raise NotImplementedError", "def get_output_shape(self):\n return self.shape", "def compute_output_shape(self,input_shape):\n return (input_shape[0][0])", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.out.shape.as_list()", "def output_shape(self) ->torch.Size:\n return self._computed_output_shape()", "def compute_output_shape(self, s):\n return s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3]", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 2)", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def compute_output_shape(self, input_shape):\n \n assert input_shape and len(input_shape) == 2\n return input_shape[0], self.n_clusters", "def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)", "def get_output_shape(self):\n return self.incoming_shapes[0][:-1] + [sum([s[-1] for s in self.incoming_shapes])]", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def get_output_shape(self):\n return []", "def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]", "def get_output_shape(self):\n # TODO: return shape without construction of graph\n return self.get_output(comp_next_seq_pos=False).get_shape().as_list()", "def get_output_shape(self):\n # TODO: return shape without construction of graph\n return self.get_output(comp_next_seq_pos=False).get_shape().as_list()", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 1)", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 1)", "def output_shape_param(self, param):\n\t\tindex = self.variables['output_format'].index(param)\n\t\treturn self.variables['output'].shape[index]", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def compute_output_shape(self, input_shape):\n if tf.keras.backend.image_data_format() == 'channels_first':\n return (input_shape[0][0], input_shape[0][1]) + input_shape[1][2:4]\n\n return (input_shape[0][0],) + input_shape[1][1:3] + (input_shape[0][-1],)", "def compute_output_shape(self, input_shape):\n output_shape = [0] * self.rank\n for d in range(self.rank):\n output_shape[d] = sum(self.paddings[d]) + input_shape[d]\n return tf.TensorShape(output_shape)", "def get_output_shape(self):\n return self.incoming_shapes[0]", "def get_output_shape(self):\n return self.incoming_shapes[0]", "def get_output_shape(self):\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[:-1]] + [self.n_units]", "def output_width(self):\n\t\treturn self.output_shape_param('W')", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = math.floor( ((h_w[0] + (2 * pad) - ( dilation * (kernel_size[0] - 1)\n ) - 1 )/ stride) + 1)\n w = math.floor( ((h_w[1] + (2 * pad) - ( dilation * (kernel_size[1] - 1)\n ) - 1 )/ stride) + 1)\n return h, w", "def get_output_shape(self) -> List[int]:\n if -1 not in self.output_shape:\n return self.output_shape\n\n total_input_dims = np.prod(self.input_shape)\n\n dim = 1\n for i in self.output_shape:\n if i != -1:\n dim *= i\n missing_dim = int(total_input_dims / dim)\n\n output_shape = self.output_shape\n for ix, dim in enumerate(output_shape):\n if dim == -1:\n output_shape[ix] = missing_dim\n\n return output_shape", "def get_output2_shape(self):\n return self.__y2.shape", "def conv_output_shape(\n h_w: Tuple[int, int],\n kernel_size: int = 1,\n stride: int = 1,\n pad: int = 0,\n dilation: int = 1,\n ):\n h = floor(\n ((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n w = floor(\n ((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n return h, w", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = floor(((h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def conv2d_output_shape(height, width, filter_height, filter_width, out_channels, stride):\n return (out_channels, ((height - filter_height) / stride + 1), ((width - filter_width) / stride + 1))", "def compute_output_shape(self, input_shape):\n return [\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\n ]", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = floor(((h_w[0] + (2 * pad) - (dilation *\n (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad) - (dilation *\n (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def getOutShapes(self):\n\t\treturn self.output_shape", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, x, y, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1\n for s in self.incoming_shape[0:2] + self.incoming_shape[2:-1] + [self.n_units]]", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n \n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n \n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n \n if type(stride) is not tuple:\n stride = (stride, stride)\n \n if type(pad) is not tuple:\n pad = (pad, pad)\n \n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1)// stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1)// stride[1] + 1\n \n return h, w", "def shape(self,squeeze=True):\n return np.shape(self.getData(squeeze=squeeze))", "def get_output1_shape(self):\n return self.__y1.shape", "def kernel_output(self):\n\t\treturn self.kernel_shape_param('O')", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def num_conv_locations(input_shape, filter_shape, strides, padding):\n if len(input_shape) != 4 and len(input_shape) != 3:\n raise ValueError(\"input_shape must be length 4, corresponding to a Conv2D,\"\n \" or length 3, corresponding to a Conv1D.\")\n if len(input_shape) != len(filter_shape):\n raise ValueError(\"Inconsistent number of dimensions between input and \"\n \"filter for convolution\")\n\n if strides is None:\n if len(input_shape) == 4:\n strides = [1, 1, 1, 1]\n else:\n strides = [1, 1, 1]\n\n # Use negative integer division to implement 'rounding up'.\n # Formula for convolution shape taken from:\n # http://machinelearninguru.com/computer_vision/basics/convolution/convolution_layer.html\n if len(input_shape) == 3:\n if padding is not None and padding.lower() == \"valid\":\n out_width = -(-(input_shape[1] - filter_shape[0] + 1) // strides[1])\n else:\n out_width = -(-input_shape[1] // strides[1])\n\n return out_width\n else:\n if padding is not None and padding.lower() == \"valid\":\n out_height = -(-(input_shape[1] - filter_shape[0] + 1) // strides[1])\n out_width = -(-(input_shape[2] - filter_shape[1] + 1) // strides[2])\n else:\n out_height = -(-input_shape[1] // strides[1])\n out_width = -(-input_shape[2] // strides[2])\n\n return out_height * out_width", "def get_output_shape(self, name):\n return self._shape_dict.get(name)", "def get_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.numoffeatures]\n else:\n return [None] + [self.numoffeatures] + self.w", "def compute_output_shape(self, input_shape):\n logging.warning(\n 'All custom layers should implement the '\n '`compute_output_shape` method. This layer (' + self.name + ') '\n 'is relying on the base `Layer.compute_output_shape` implementation, '\n 'which will start raising a `NotImplementedError` '\n 'as of July 1st, 2018.')\n return input_shape", "def out_tconv(spatial, config):\n p, k, s = [config[k] \n for k in ['padding', 'kernel_size', 'stride']]\n p2 = p if isinstance(p, int) else p[0] + p[1]\n p_out = config.get('output_padding', 0)\n return (spatial-1)*s - p2 + k + p_out", "def get_output_shape_for(self, input_shape):\n # Extract nodes and membership\n atom_features_shape = input_shape[0]\n # membership_shape = input_shape[2]\n\n # assert (len(atom_features_shape) == 2,\n # \"GraphGather only takes 2 dimensional tensors\")\n n_feat = atom_features_shape[1]\n return self.batch_size, n_feat", "def shape(self):\n return np.array([self.w, self.h])", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None] + output_image_shape + [n_filters])", "def kernel_shape(self):\n\t\treturn self.weights_shape()", "def add_input_and_output_shape(self, input_shape, output_shape):", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def inputShape(self):\n return self.input_shape", "def __extract_graph_shape(self):\n circuit = UbqcClient.pb_to_circuit(self.program)\n bw_pattern = transpile_to_brickwork(circuit)\n\n # Get shape\n input_ = bw_pattern.input_\n c_out, q_out = bw_pattern.output_\n output_ = c_out + q_out\n width = len(input_)\n depth = output_[0][1] - input_[0][1] + 1\n\n return width, depth", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def shape(self):\n return self.X.shape", "def input_shape(self):\n return self._ipt_shape", "def shape(self):\n return self._img.shape", "def _output_size_conv2d(conv, size):\n o_size = np.array(size) + 2 * np.array(conv.padding)\n o_size -= np.array(conv.dilation) * (np.array(conv.kernel_size) - 1)\n o_size -= 1\n o_size = o_size / np.array(conv.stride) + 1\n return np.floor(o_size)", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def calculate_flatten_output_shapes(operator):\n check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)\n check_input_and_output_types(operator, good_input_types=[FloatTensorType])\n\n input = operator.inputs[0]\n output = operator.outputs[0]\n\n if len(input.type.shape) not in [2, 4]:\n raise RuntimeError(\"Input must be 2-D or 4-D float tensor\")\n\n input_shape = input.type.shape\n output_shape = [input_shape[0], 1]\n\n # Calculate the multiplication of C, H, and W.\n for i in input_shape[1:]:\n if i != \"None\":\n output_shape[1] *= i\n else:\n # If any of C, H, W-dimensions is unknown, the flatten C-dimension is unknown\n output_shape[1] = \"None\"\n break\n\n output.type.shape = output_shape", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def infer_shape(self, node, input_shapes):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_shapes) == 1\r\n N, C, H, W = input_shapes[0]\r\n p_H = (H + 2 * self.padding - self.kernel_H) / self.stride + 1\r\n p_W = (W + 2 * self.padding - self.kernel_W) / self.stride + 1\r\n return (N, C, p_H, p_W)", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def shape(self):\n\t\treturn self.variable.shape\n\t\t#return tuple([s.stop-s.start-1 for s in self._subset])", "def get_shape(self):\n return shape(self._data)", "def clConvolution(self, size, mask):", "def compute_output_shape(self, input_shape):\n batch_size = input_shape[0]\n sequence_length = input_shape[1]\n return (batch_size, sequence_length)", "def calc_conv_out_dims(X_shape, W_shape, stride=1, pad=0, dilation=0):\n\tdummy = np.zeros(X_shape)\n\ts, p, d = stride, pad, dilation\n\tif len(X_shape) == 3:\n\t\t_, p = pad1D(dummy, p)\n\t\tpw1, pw2 = p\n\t\tfw, in_ch, out_ch = W_shape\n\t\tn_ex, in_length, in_ch = X_shape\n\n\t\t_fw = fw * (d+1) - d\n\t\tout_length = (in_length + pw1 + pw2 - _fw) // s + 1\n\t\tout_dim = (n_ex, out_length, out_ch)\n\n\telif len(X_shape) == 4:\n\t\t_, p = pad2D(dummy, p)\n\t\tpr1, pr2, pc1, pc2 = p\n\t\tfr, fc, in_ch, out_ch = W_shape\n\t\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t\t# adjust effective filter size to account for dilation\n\t\t_fr, _fc = fr * (d+1) - d, fc * (d+1) - d\n\t\tout_rows = (in_rows + pr1 + pr2 - _fr) // s + 1\n\t\tout_cols = (in_cols + pc1 + pc2 - _fc) // s + 1\n\t\tout_dims = (n_ex, out_rows, out_cols, out_ch)\n\telse:\n\t\traise ValueError(\"unrecognized number of the input dims: {}\".format(len(X_shape)))", "def output_size(self) -> int:\n return self.output_dim", "def get_output_shape(node: Node, out_port: int):\n if out_port not in node.out_ports():\n raise Exception('Can\\'t get shape for {} port of {} node. No such port in node'.format(out_port, node.name))\n out_port = node.out_port(out_port)\n return out_port.data.get_shape()", "def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):\n return conv2d_spatial_pack_nchw(\n cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=3\n )", "def shape(self):\n return self._shape", "def shape(self):\n return self.image.shape", "def get_input_shape(self):\n return self.__x.shape", "def output_shape_conv_and_pool_layer(rows: int,\n columns: int,\n kernel: int,\n stride: int = 1,\n padding: int = 0,\n dilatation: float = 1.) -> Tuple[int, int]:\n return (\n int((rows + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n int((columns + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n )", "def output(self, input, in_features, out_features,reuse=False):\n # with tf.variable_scope(self.name):\n # print('f'*20,input.get_shape().as_list(),in_features,out_features)\n w=self._create_weight([self.cnn_size,self.cnn_size,in_features,out_features],name='Wfn')\n out=self._conv2d(input,w,[1, self.cnn_stride, self.cnn_stride, 1],pre_name='convfn')\n return out", "def shape_in(self):\n return [c.size for c in self.coords]", "def shape(self):\n return self.__shape", "def shape(self):\n return self.__shape", "def reshape_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 384\n return(shape_1, shape_2)", "def convolution_shape(input_shape, n_filters, filter_shape, stride, padding):\n img_height, img_width, _ = input_shape\n height = (img_height + 2 * padding[0] - filter_shape[0]) / float(stride) + 1\n width = (img_width + 2 * padding[1] - filter_shape[1]) / float(stride) + 1\n\n return int(height), int(width), n_filters", "def _pred_shape(self, X):\n return X.shape[:-1] # X has Euler angles, while output is scalar" ]
[ "0.7263656", "0.718158", "0.7174481", "0.7144437", "0.7144437", "0.7144437", "0.7144437", "0.7144437", "0.71191394", "0.711027", "0.70433396", "0.695608", "0.695608", "0.695608", "0.67904943", "0.67573696", "0.6704987", "0.66797125", "0.66329926", "0.65868956", "0.6567974", "0.6564348", "0.65425193", "0.65425193", "0.65425193", "0.65335256", "0.6516564", "0.6515341", "0.6515341", "0.6514138", "0.6514138", "0.6512595", "0.6494228", "0.64284277", "0.6348743", "0.632009", "0.632009", "0.63128924", "0.6287846", "0.6183831", "0.6182475", "0.6178434", "0.61690116", "0.6164086", "0.6164019", "0.6162655", "0.615817", "0.6151343", "0.6101507", "0.60990715", "0.6095664", "0.608943", "0.607772", "0.6068423", "0.6068423", "0.6068423", "0.60479724", "0.6041097", "0.6038472", "0.60080385", "0.60005975", "0.5962999", "0.5959054", "0.5941858", "0.5910867", "0.5897509", "0.58965594", "0.58965594", "0.5884481", "0.5873863", "0.5835222", "0.58272666", "0.5820347", "0.581841", "0.5817615", "0.58150464", "0.58138555", "0.5813617", "0.5792931", "0.57543266", "0.57543266", "0.5738767", "0.57385886", "0.57348007", "0.57262576", "0.5719175", "0.57133675", "0.5704294", "0.5693447", "0.56853354", "0.5681791", "0.56757253", "0.5674636", "0.5671596", "0.5653112", "0.5649798", "0.5649798", "0.56468177", "0.56280214", "0.5618925" ]
0.67702305
15
Calculate spatial output shape after transpose convolution.
def out_tconv(spatial, config): p, k, s = [config[k] for k in ['padding', 'kernel_size', 'stride']] p2 = p if isinstance(p, int) else p[0] + p[1] p_out = config.get('output_padding', 0) return (spatial-1)*s - p2 + k + p_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_output_shape(self, input_shape):\r\n return input_shape", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_output = weights[-1]\n dilations = np.asarray(self.dilation_rate)\n if (isinstance(self.padding, list) or isinstance(self.padding, tuple)) and len(self.padding) == 2:\n output_size = np.asarray(\n np.ceil((input_size + 2 * np.asarray(self.padding) - kernels - (kernels - 1) * (\n dilations - 1)) / strides + 1),\n dtype=np.int)\n else:\n output_size = np.asarray(\n np.ceil(input_size / strides) if self.padding == \"SAME\" or self.padding == \"ZEROPAD\" else np.ceil(\n (input_size - (kernels - 1) * dilations) / strides), dtype=np.int)\n \n output_shape = self.incoming_shape[:]\n output_shape[-3:-1] = output_size.tolist()\n output_shape[-1] = num_output\n return output_shape", "def compute_output_shape(self,input_shape):\n return (input_shape[0][0])", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.shape", "def output_shape(self):\n raise NotImplementedError", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def get_output_shape(self):\n return self.out.shape.as_list()", "def compute_output_shape(self, s):\n return s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3]", "def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def output_shape(self):\r\n return self.detector.output_shape", "def compute_output_shape(self, input_shape):\n if tf.keras.backend.image_data_format() == 'channels_first':\n return (input_shape[0][0], input_shape[0][1]) + input_shape[1][2:4]\n\n return (input_shape[0][0],) + input_shape[1][1:3] + (input_shape[0][-1],)", "def output_shape(self) ->torch.Size:\n return self._computed_output_shape()", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 2)", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 1)", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 1)", "def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)", "def conv_transpose_output_shape(h_w, kernel_size=1, stride=1, pad=0, output_padding=0):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = (h_w[0] - 1) * stride - (2 * pad) + kernel_size[0] + output_padding\n w = (h_w[1] - 1) * stride - (2 * pad) + kernel_size[1] + output_padding\n return h, w", "def compute_output_shape(self, input_shape):\n \n assert input_shape and len(input_shape) == 2\n return input_shape[0], self.n_clusters", "def compute_output_shape(self, input_shape):\n return [\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\n ]", "def compute_output_shape(self, input_shape):\n output_shape = [0] * self.rank\n for d in range(self.rank):\n output_shape[d] = sum(self.paddings[d]) + input_shape[d]\n return tf.TensorShape(output_shape)", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, x, y, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1\n for s in self.incoming_shape[0:2] + self.incoming_shape[2:-1] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def reshape_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 384\n return(shape_1, shape_2)", "def get_output_shape(self):\n return self.incoming_shapes[0][:-1] + [sum([s[-1] for s in self.incoming_shapes])]", "def get_output_shape(self):\n # TODO: return shape without construction of graph\n return self.get_output(comp_next_seq_pos=False).get_shape().as_list()", "def get_output_shape(self):\n # TODO: return shape without construction of graph\n return self.get_output(comp_next_seq_pos=False).get_shape().as_list()", "def out_conv(spatial, config):\n p, k, s = [config[k] \n for k in ['padding', 'kernel_size', 'stride']]\n p2 = p if isinstance(p, int) else p[0] + p[1]\n\n return (spatial + p2 - k)//s + 1", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def conv2d_output_shape(height, width, filter_height, filter_width, out_channels, stride):\n return (out_channels, ((height - filter_height) / stride + 1), ((width - filter_width) / stride + 1))", "def conv2d_transpose(self, output_shape, filter_):\n return self.add_layer(conv2d_transpose, output_shape, filter_)", "def get_output_shape(self):\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[:-1]] + [self.n_units]", "def output_shape_param(self, param):\n\t\tindex = self.variables['output_format'].index(param)\n\t\treturn self.variables['output'].shape[index]", "def get_output_shape(self):\n return []", "def conv_output_shape(\n h_w: Tuple[int, int],\n kernel_size: int = 1,\n stride: int = 1,\n pad: int = 0,\n dilation: int = 1,\n ):\n h = floor(\n ((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n w = floor(\n ((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n return h, w", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = math.floor( ((h_w[0] + (2 * pad) - ( dilation * (kernel_size[0] - 1)\n ) - 1 )/ stride) + 1)\n w = math.floor( ((h_w[1] + (2 * pad) - ( dilation * (kernel_size[1] - 1)\n ) - 1 )/ stride) + 1)\n return h, w", "def reshape_output_shape_0(input_shape): \n shape_1 = input_shape[0]\n shape_2 = input_shape[1]\n shape_3 = input_shape[2]\n return(shape_1, shape_2, shape_3, 1)", "def calculate_flatten_output_shapes(operator):\n check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)\n check_input_and_output_types(operator, good_input_types=[FloatTensorType])\n\n input = operator.inputs[0]\n output = operator.outputs[0]\n\n if len(input.type.shape) not in [2, 4]:\n raise RuntimeError(\"Input must be 2-D or 4-D float tensor\")\n\n input_shape = input.type.shape\n output_shape = [input_shape[0], 1]\n\n # Calculate the multiplication of C, H, and W.\n for i in input_shape[1:]:\n if i != \"None\":\n output_shape[1] *= i\n else:\n # If any of C, H, W-dimensions is unknown, the flatten C-dimension is unknown\n output_shape[1] = \"None\"\n break\n\n output.type.shape = output_shape", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = floor(((h_w[0] + (2 * pad) - (dilation *\n (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad) - (dilation *\n (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def conv_out_shape(dims, conv):\n kernel_size, stride, pad, dilation = conv.kernel_size, conv.stride, conv.padding, conv.dilation\n return tuple(int(((dims[i] + (2 * pad[i]) - (dilation[i]*(kernel_size[i]-1))-1)/stride[i])+1) for i in range(len(dims)))", "def get_output_shape(self):\n return self.incoming_shapes[0]", "def get_output_shape(self):\n return self.incoming_shapes[0]", "def get_output_shape(self) -> List[int]:\n if -1 not in self.output_shape:\n return self.output_shape\n\n total_input_dims = np.prod(self.input_shape)\n\n dim = 1\n for i in self.output_shape:\n if i != -1:\n dim *= i\n missing_dim = int(total_input_dims / dim)\n\n output_shape = self.output_shape\n for ix, dim in enumerate(output_shape):\n if dim == -1:\n output_shape[ix] = missing_dim\n\n return output_shape", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n \n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n \n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n \n if type(stride) is not tuple:\n stride = (stride, stride)\n \n if type(pad) is not tuple:\n pad = (pad, pad)\n \n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1)// stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1)// stride[1] + 1\n \n return h, w", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = floor(((h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def output_width(self):\n\t\treturn self.output_shape_param('W')", "def output_reshape(ct):\n return np.moveaxis(ct, 1, -1)", "def get_output1_shape(self):\n return self.__y1.shape", "def get_output2_shape(self):\n return self.__y2.shape", "def get_output_shape_for(self, input_shape):\n # Extract nodes and membership\n atom_features_shape = input_shape[0]\n # membership_shape = input_shape[2]\n\n # assert (len(atom_features_shape) == 2,\n # \"GraphGather only takes 2 dimensional tensors\")\n n_feat = atom_features_shape[1]\n return self.batch_size, n_feat", "def getOutShapes(self):\n\t\treturn self.output_shape", "def get_output_shape(node: Node, out_port: int):\n if out_port not in node.out_ports():\n raise Exception('Can\\'t get shape for {} port of {} node. No such port in node'.format(out_port, node.name))\n out_port = node.out_port(out_port)\n return out_port.data.get_shape()", "def get_output_shape(self, name):\n return self._shape_dict.get(name)", "def infer_shape(self, node, input_shapes):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_shapes) == 1\r\n N, C, H, W = input_shapes[0]\r\n p_H = (H + 2 * self.padding - self.kernel_H) / self.stride + 1\r\n p_W = (W + 2 * self.padding - self.kernel_W) / self.stride + 1\r\n return (N, C, p_H, p_W)", "def shape(self,squeeze=True):\n return np.shape(self.getData(squeeze=squeeze))", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None] + output_image_shape + [n_filters])", "def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):\n data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(\n data, kernel, strides, padding, out_dtype, output_padding\n )\n batch, in_c, in_h, in_w = data_pad.shape\n out_c, _, filter_h, filter_w = kernel_transform.shape\n\n # convolution stage\n out_c = simplify(out_c)\n\n out_h = simplify(in_h - filter_h + 1)\n out_w = simplify(in_w - filter_w + 1)\n dc = te.reduce_axis((0, in_c), name=\"dc\")\n dh = te.reduce_axis((0, filter_h), name=\"dh\")\n dw = te.reduce_axis((0, filter_w), name=\"dw\")\n\n Output = te.compute(\n (batch, out_c, out_h, out_w),\n lambda b, c, h, w: te.sum(\n data_pad[b, dc, h + dh, w + dw].astype(out_dtype)\n * kernel_transform[c, dc, dh, dw].astype(out_dtype),\n axis=[dc, dh, dw],\n ),\n tag=\"conv2d_transpose_nchw\",\n )\n\n return Output", "def input_shape(self):\n return self._ipt_shape", "def _pred_shape(self, X):\n return X.shape[:-1] # X has Euler angles, while output is scalar", "def compute_output_shape(self, input_shape):\n batch_size = input_shape[0]\n sequence_length = input_shape[1]\n return (batch_size, sequence_length)", "def _transpose_by_1_vnchwconv():\n\n pass", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def shape(self):\n return np.array([self.w, self.h])", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype):\n data_pad, kernel_transform = \\\n conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype)\n batch, in_c, in_h, in_w = data_pad.shape\n out_c, _, filter_h, filter_w = kernel_transform.shape\n stride_h, stride_w = strides\n\n # convolution stage\n out_c = simplify(out_c)\n out_h = simplify(in_h - filter_h + 1)\n out_w = simplify(in_w - filter_w + 1)\n dc = tvm.reduce_axis((0, in_c), name='dc')\n dh = tvm.reduce_axis((0, filter_h), name='dh')\n dw = tvm.reduce_axis((0, filter_w), name='dw')\n\n Output = tvm.compute(\n (batch, out_c, out_h, out_w),\n lambda b, c, h, w: tvm.sum(\n data_pad[b, dc, h+dh, w+dw].astype(out_dtype) *\n kernel_transform[c, dc, dh, dw].astype(out_dtype),\n axis=[dc, dh, dw]), tag=\"conv2d_transpose_nchw\")\n\n return Output", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def testTransposeNCHW(self, use_bias, use_output_shape):\n output_shape = tf.TensorShape((4, 5))\n\n conv2_transpose = snt.Conv2DTranspose(\n output_channels=5,\n output_shape=output_shape if use_output_shape else None,\n kernel_shape=3,\n padding=snt.VALID,\n stride=1,\n name=\"conv2_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NCHW)\n conv2 = conv2_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv2_transpose.kernel_shape, conv2.kernel_shape)\n self.assertEqual((1,) + conv2_transpose.stride[1:3] + (1,), conv2.stride)\n self.assertEqual(conv2_transpose.padding, conv2.padding)\n\n # Before conv2_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv2_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv2.output_channels\n\n # After connection the number of `output_channels` is known.\n batch_size = 32\n in_height = 2\n in_width = 3\n in_channels = 4\n x = tf.constant(np.random.randn(batch_size, in_channels, in_height,\n in_width),\n dtype=np.float32)\n conv2_transpose(x)\n self.assertEqual(in_channels, conv2.output_channels)\n\n # As is `output_channels`.\n self.assertEqual(output_shape, conv2_transpose.output_shape)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv2_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv2_transpose.output_shape, conv2.input_shape)", "def output_tensor(self, type_shape):\n return self._output[self._type_shape_to_idx[type_shape]]", "def testOutputShapeInference(self, use_bias):\n inputs = tf.zeros(shape=[3, 5, 5, 5, 2], dtype=tf.float32)\n\n conv1 = snt.Conv3DTranspose(name=\"conv3d_1\",\n output_channels=10,\n output_shape=None,\n kernel_shape=5,\n padding=snt.SAME,\n stride=2,\n use_bias=use_bias)\n\n outputs = conv1(inputs)\n\n self.assertTrue(outputs.get_shape().is_compatible_with((3, 10, 10, 10, 10)))", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def _output_size_conv2d(conv, size):\n o_size = np.array(size) + 2 * np.array(conv.padding)\n o_size -= np.array(conv.dilation) * (np.array(conv.kernel_size) - 1)\n o_size -= 1\n o_size = o_size / np.array(conv.stride) + 1\n return np.floor(o_size)", "def _reshape_output(self, output):\n output = np.transpose(output, [0, 2, 3, 1])\n _, height, width, _ = output.shape\n dim1, dim2 = height, width\n dim3 = 3\n # There are CATEGORY_NUM=80 object categories:\n dim4 = (4 + 1 + CATEGORY_NUM)\n return np.reshape(output, (dim1, dim2, dim3, dim4))", "def conv2d_nhwc_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):\n return conv2d_spatial_pack_nhwc(\n cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=3\n )", "def shape(self):\n return self.X.shape", "def _squeeze_output(out):\r\n out = out.squeeze()\r\n if out.ndim == 0:\r\n out = out[()]\r\n return out", "def get_model_shape(self):\n return self.nlay, self.nrow, self.ncol", "def inputShape(self):\n return self.input_shape", "def make_conv_output_axes(input, filter, conv_params):\n # type: (TensorOp, TensorOp, Dict) -> Axes\n number_output_features = filter.axes[-1].length\n mini_batch_size = input.axes[-1].length\n\n input_d, input_h, input_w = input.axes.lengths[1:4] # axes order C, D, H, W, N\n filter_d, filter_h, filter_w = filter.axes.lengths[1:4] # axes order J, T(d), R(h), S(w), K\n\n def output_dim(input_x, filter_x, pad_x, str_x, dil_x):\n return floor((input_x + 2 * pad_x - filter_x - (filter_x - 1) * (dil_x - 1)) / str_x) + 1\n\n convp = conv_params\n output_d = output_dim(input_d, filter_d, convp['pad_d'], convp['str_d'], convp['dil_d'])\n output_h = output_dim(input_h, filter_h, convp['pad_h'], convp['str_h'], convp['dil_h'])\n output_w = output_dim(input_w, filter_w, convp['pad_w'], convp['str_w'], convp['dil_w'])\n\n output_axes = ng.make_axes(axes=(\n ng.make_axis(name='C', docstring='output features', length=int(number_output_features)),\n ng.make_axis(name='D', docstring='depth', length=int(output_d)),\n ng.make_axis(name='H', docstring='height', length=int(output_h)),\n ng.make_axis(name='W', docstring='width', length=int(output_w)),\n ng.make_axis(name='N', docstring='mini-batch size', length=int(mini_batch_size)),\n ))\n return output_axes", "def get_target_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.nclasses]\n else:\n return [None] + [self.nclasses] + self.w", "def get_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.numoffeatures]\n else:\n return [None] + [self.numoffeatures] + self.w", "def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype):\n return declaration_conv2d_transpose_impl(Input, Filter, strides, padding, out_dtype)", "def add_input_and_output_shape(self, input_shape, output_shape):", "def convert_shape(g, op, block):\n\n x = g.get_node(op.input(\"Input\")[0])\n out = shape_of(x, dtype=\"int32\")\n g.add_node(op.output(\"Out\")[0], out)", "def compute_output_shape(self, input_shape):\n logging.warning(\n 'All custom layers should implement the '\n '`compute_output_shape` method. This layer (' + self.name + ') '\n 'is relying on the base `Layer.compute_output_shape` implementation, '\n 'which will start raising a `NotImplementedError` '\n 'as of July 1st, 2018.')\n return input_shape", "def out_shape(imgshape, ds, ignore_border=False):\r\n if len(imgshape) < 2:\r\n raise TypeError('imgshape must have at least two elements '\r\n '(rows, cols)')\r\n r, c = imgshape[-2:]\r\n rval = list(imgshape[:-2]) + [r // ds[0], c // ds[1]]\r\n\r\n if not ignore_border:\r\n if isinstance(r, theano.Variable):\r\n rval[-2] = tensor.switch(r % ds[0], rval[-2] + 1, rval[-2])\r\n elif r % ds[0]:\r\n rval[-2] += 1\r\n if isinstance(c, theano.Variable):\r\n rval[-1] = tensor.switch(c % ds[1], rval[-1] + 1, rval[-1])\r\n elif c % ds[1]:\r\n rval[-1] += 1\r\n return rval", "def out_shape(imgshape, ds, ignore_border=False):\r\n if len(imgshape) < 2:\r\n raise TypeError('imgshape must have at least two elements '\r\n '(rows, cols)')\r\n r, c = imgshape[-2:]\r\n rval = list(imgshape[:-2]) + [r // ds[0], c // ds[1]]\r\n\r\n if not ignore_border:\r\n if isinstance(r, theano.Variable):\r\n rval[-2] = tensor.switch(r % ds[0], rval[-2] + 1, rval[-2])\r\n elif r % ds[0]:\r\n rval[-2] += 1\r\n if isinstance(c, theano.Variable):\r\n rval[-1] = tensor.switch(c % ds[1], rval[-1] + 1, rval[-1])\r\n elif c % ds[1]:\r\n rval[-1] += 1\r\n return rval" ]
[ "0.69743323", "0.68945485", "0.6871195", "0.68555003", "0.68555003", "0.68555003", "0.68555003", "0.68555003", "0.6844321", "0.6745843", "0.6660508", "0.6644264", "0.6641586", "0.66338116", "0.6628068", "0.6628068", "0.6628068", "0.65929323", "0.65625435", "0.65463483", "0.6537348", "0.6398154", "0.6398154", "0.63795304", "0.6363818", "0.6358981", "0.63487", "0.6340277", "0.63380766", "0.6325984", "0.6325984", "0.6325984", "0.6292003", "0.62846327", "0.62711614", "0.62535316", "0.62535316", "0.62302065", "0.6197289", "0.6197289", "0.6197289", "0.6179993", "0.61779034", "0.61643964", "0.6159617", "0.6125169", "0.61199486", "0.60661364", "0.6060953", "0.60406834", "0.602698", "0.60246575", "0.6020757", "0.6020757", "0.59931064", "0.59877974", "0.59751177", "0.5926418", "0.59231585", "0.58930916", "0.5851123", "0.580212", "0.5798324", "0.57979727", "0.5797496", "0.5767246", "0.57477194", "0.56770396", "0.5648693", "0.5638983", "0.5637205", "0.56334877", "0.56334704", "0.5628336", "0.5622245", "0.56171983", "0.56171983", "0.5608733", "0.5603731", "0.5603731", "0.5602743", "0.5593774", "0.55401856", "0.5518424", "0.5518134", "0.55034465", "0.5503023", "0.5488971", "0.5487654", "0.5486718", "0.5484978", "0.5476948", "0.54703856", "0.54651916", "0.5462972", "0.5459562", "0.5454699", "0.5441337", "0.54294354", "0.54294354" ]
0.6426456
21
Deal with the repeating blocks in the model by keeping track of the encompassed rows and multiplying them before appending.
def format_repeats(file): ret = [] while True: try: l = next(file).lstrip().replace('\n','') except StopIteration: break if l.lower().startswith('repeat'): times = int(l.split('x')[1]) repeats = [] while True: l = next(file).lstrip().replace('\n','') if l.lower() == 'end': break repeats.append(l) ret += repeats*times else: if not l.startswith('#'): ret += [l] return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rows(self, row):\n self.row += row", "def addInnerBreaks(self):\n for item in self:\n item.addInnerBreaks()", "def process_duplicate_rows(self):\n pass", "def addBreaks(self):\n for item in self:\n item.addBreaks()", "def block_replicate(data, block_size, conserve_sum=True):\n data, block_size = _process_block_inputs(data, block_size)\n for i in range(data.ndim):\n data = np.repeat(data, block_size[i], axis=i)\n\n if conserve_sum:\n # in-place division can fail due to dtype casting rule\n data = data / np.prod(block_size)\n\n return data", "def _rebuild(self):\n for shape, record in iter(self):\n self.write_row(shape, record)\n self.__isBuilt = True", "def build_A_block(self):\n N = self.N # number of MPC steps\n A = self.A\n \n row_list = [A] # reocrd the every row in B_hat\n \n for i in range(1, N):\n A = A.mm(self.A)\n row_list.append(A)\n return torch.cat(row_list,0)", "def adjust_body_records_to_samples(self):\r\n if len(self.list_of_samples_to_be_combined) == 0:\r\n self.determinate_samples_to_be_combined()\r\n Body_header_line.list_of_samples_to_be_combined = self.list_of_samples_to_be_combined\r\n Body_record.list_of_samples_to_be_combined = self.list_of_samples_to_be_combined\r\n for body_object in self.list_of_body_objects:\r\n body_object.update_line()", "def grow(self):\n while self.splittable_nodes:\n self.split_next()", "def _generate_rows(self):\n logger.debug(\"Generating pre-genealogical coherence data for %s\", self.w1)\n if not self.rows:\n for w2 in self.all_mss:\n if self.w1 == w2:\n continue\n self._add_row(w2)\n\n self._sort()\n logger.debug(\"Generated pre-genealogical coherence data for %s\", self.w1)", "def introduceData(self, kmerCounts):\n for block in self.blocks:\n if len(block.kmers) > 0:\n count = sum(kmerCounts[k] * 2.0 for k in block.getKmers())\n #count = sum(kmerCounts[k] * self.G.weights[k] for k in block.getKmers())\n adjustedCount = (1.0 * count) / (len(block.kmers) * self.normalizing)\n block.adjustedCount = adjustedCount\n self.constrain_approximately_equal(adjustedCount, sum(block.getVariables() + [block.getTrash()]), \n penalty=self.dataPenalty)", "def new_iteration(self):\n if (\n self.inner_solutions is not None\n and self.inner_solutions.size(0) > self.raw_samples\n ):\n indices = torch.randperm(n=self.inner_solutions.size(0))[: self.raw_samples]\n self.inner_solutions = self.inner_solutions[indices]\n self.inner_values = self.inner_values[indices]", "def grow(self):\r\n # Double the physical size if no more room for items\r\n # and add the fillValue to the new cells in the underlying list\r\n for count in range(len(self)):\r\n self._items.append(self._fillValue)", "def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])", "def raw_processing(self):\n well_dilution_code = {'e': 5, 'f': 6, 'g': 7, 'h': 8}\n\n for well in self.data_labels:\n x = 10 ** well_dilution_code[well[-1]]\n y = self.film_count[self.data_labels.index(well)] * 5 * x * (20 / self.plated_volume)\n z = self.plank_count[self.data_labels.index(well)] * 5 * x * (20 / self.plated_volume)\n\n self.film_conc.append(y)\n self.plank_conc.append(z)", "def _batcher(self, rows):\n row_count = 0\n batch = []\n batch_count = 1\n\n total_rows_modified = 0\n throttle_count = 0\n\n i = 0\n for row in rows:\n if row_count > self.batch_size - 1:\n logger.debug(f\"row_count={row_count} batch_size={self.batch_size} and batch={len(batch)}\")\n # Yield the previous batch\n yield batch\n\n # Start the new batch\n batch = []\n batch.append(row)\n row_count = 1\n\n batch_count += 1\n # break # toggle to load one batch only\n else:\n row_count += 1\n batch.append(row)\n\n # Put in a sleep timer to throttle how hard we hit the database\n if self.throttle_time and self.throttle_size and (throttle_count > self.throttle_size - 1):\n logger.info(f\"Sleeping for {self.throttle_time} seconds... row: {i}\")\n time.sleep(int(self.throttle_time))\n throttle_count = 0\n elif self.throttle_time and self.throttle_size:\n throttle_count += 1\n i += 1\n\n yield batch", "def run(self):\n first_index, last_index = \\\n self.get_initial_blocks()\n while last_index - first_index > self.block_size:\n first_index, last_index = \\\n self.join_blocks(first_index, last_index)\n self.merge_blocks(self.output_file_name, first_index, last_index)", "def mergeGroup(self):\n if len(self) < 2:\n return\n mainItem = self[0]\n for item in self[1:]:\n mainItem.textLines.extend(item.textLines)\n mainItem.height = reduce(lambda x,y: x+y, [item.height for item in\n self])", "def interpolate_next(self):\n\n # Get valid ensembles\n valid_ens = self.valid_data[0, :]\n\n # Process ensembles\n n_ens = len(valid_ens)\n\n for n in np.arange(0, n_ens-1)[::-1]:\n if not valid_ens[n]:\n self.u_processed_mps[n] = self.u_processed_mps[n+1]\n self.v_processed_mps[n] = self.v_processed_mps[n+1]", "def _merge(self, box_list):\n if isinstance(box_list, self.__class__):\n box_list = [box_list]\n for box in box_list:\n for row in box:\n row[IND] = len(self)\n self.append(row)\n self._combine(row)", "def calculate_pyramid_rows_loop(number_of_rows):\n total_blocks = 0\n for x in range(1, number_of_rows + 1):\n total_blocks += x\n return total_blocks", "def test_block_extra_batch(self):\n pass", "def createInnerRepresentation(self):\n\n for idx, single_block in enumerate(self._block_list):\n del self._to_be_processed[:]\n del self._metastring_rest[:]\n self._metastring_rest.append(self._metastring[idx])\n self.addMetastringPointer(single_block)", "def fill_rows(self) -> bool:\n recurrence = 5\n offset = 0\n while recurrence != 0 and offset != 5:\n for x in range(0, recurrence):\n position_y = 9 - x\n tuple_y = 9 - offset\n self.board[offset][x] = 1\n self.board[tuple_y][position_y] = 2\n recurrence = recurrence - 1\n offset = offset + 1\n\n return True", "def _compute_amount(self):\n for line in self:\n line.update({\n 'price_subtotal': line.price_unit * line.quantity,\n })", "def addTechnicalReplicate(self, technical_replicate):\n technicalReplicate = ExperimentalReplicate(technical_replicate)\n\n for i in range(len(self.cellData)):\n self.cellData[i] = pd.merge(\n self.cellData[i],\n technicalReplicate.cellData[i],\n how=\"outer\",\n on=[\"peptide-phosphosite\", \"Master Protein Descriptions\", \"Overflow Protein Descriptions\"])\n\n for each in self.timePoints:\n cellLine = self.cellLines[i]\n replicate_x = str(cellLine) + \"-\" + str(each) + \"_x\"\n replicate_y = str(cellLine) + \"-\" + str(each) + \"_y\"\n\n self.cellData[i][str(cellLine) + \"-\" + str(each)] = self.cellData[i][[replicate_x, replicate_y]].mean(axis=1)\n\n self.cellData[i] = self.cellData[i].drop(columns=[replicate_x, replicate_y])\n\n self.fullIntersection()\n self.referenceIntersections()", "def row_reduce(self):\n res = self.row_echelon()\n for i in range(1, res.m):\n for j in range(res.n):\n if res[i, j] == 1:\n for k in range(i):\n constant = res[k, j]\n res.data[k] = [elem_k - elem_i * constant\n for elem_i, elem_k in\n zip(res.data[i], res.data[k])]\n break\n return res", "def grids_augmented_with_number(part_grid, val, curr_row=0):\n if curr_row == len(part_grid):\n return [part_grid]\n else:\n res = []\n for option in grids_augmented_in_row(part_grid, val, curr_row):\n res += grids_augmented_with_number(option, val, curr_row + 1)\n return res", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def populate_blocks_with_blockheights(self):\n for (height, block) in enumerate(self.blocks):\n block[\"height\"] = height", "def populate_dyn(self, table):\n myrow = table.row\n myrow[\"sample_time\"] = int(time.time() - glob.base_time)\n myrow[\"available_bike_stands\"] = self.available_bike_stands\n myrow[\"available_bikes\"] = self.available_bikes\n myrow[\"last_update\"] = self.last_update\n myrow[\"status\"] = self.status\n myrow.append()\n table.flush()", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def add_cell(self, cell):\r\n if cell not in self.cells:\r\n self.cells.add(cell)\r\n if cell.block == \"A\":\r\n self.blockA += 1\r\n self.blockA_free += 1\r\n self.blockA_cells.append(cell)\r\n else:\r\n assert cell.block == \"B\"\r\n self.blockB += 1\r\n self.blockB_free += 1\r\n self.blockB_cells.append(cell)", "def block_repeat(self, board, n):\r\n this_block = []\r\n row_start = 3 * (n // 4)\r\n col_start = 3 * (n % 4)\r\n for r in range(3):\r\n this_row = board[(12 * (row_start + r) + col_start):(12 * (row_start + r) + col_start + 3)]\r\n for x in this_row:\r\n this_block.append(x)\r\n # println(n)\r\n # println(this_block)\r\n for letter in 'abcdef':\r\n if this_block.count(letter) > 1:\r\n # println(this_block)\r\n return True\r\n return False", "def newrow(self, rowlist):\n if len(rowlist) > 0:\n if islist(rowlist[0]):\n for row in rowlist:\n self.newrow(row)\n elif len(rowlist) == self.x:\n for x in xrange(0, len(rowlist)):\n rowlist[x] = self.prepare(rowlist[x])\n self.a.append(rowlist)\n self.y += 1\n else:\n raise IndexError(\"Unequal matrix row lengths for newrow of \"+str(self.x)+\" and \"+str(len(rowlist)))", "def insert(self, e):\n elementsintable = 1\n for numberset in self.table:\n for number in numberset:\n elementsintable += 1\n if (elementsintable / len(self.table) > 0.75):\n self.rehash(len(self.table) * 2)\n self.table[hash(e) % len(self.table)].add(e)", "def build(self, block_size):", "def grow(self):\n \n self.body.append(self.body[-1])", "def _body(self, x, ensembled_batch, non_ensembled_batch, idx):\n i, current_representations = x\n del x\n feats = self._slice_batch(i, ensembled_batch, non_ensembled_batch)\n representations_update = self.evoformer(*self.batch_expand(feats))\n new_representations = {}\n for k in current_representations:\n new_representations[k] = (\n current_representations[k] + representations_update[k])\n del representations_update\n return i+1, new_representations", "def _hide_numbers(self):\n global counter\n\n # num of attempts allow for more blocks to be removed\n attempts = self._difficulty\n\n while attempts > 0:\n # selecting random cell and rotational counterpart\n row = randint(0, 8)\n col = randint(0, 8)\n while self._grid_init[row][col] == 0:\n row = randint(0, 8)\n col = randint(0, 8)\n\n # backing up in case removal is gives multiple solutions\n backupone = self._grid_init[row][col]\n backuptwo = self._grid_init[8 - row][8 - col]\n self._grid_init[row][col] = 0\n self._grid_init[8 - row][8 - col] = 0\n\n # cloning grid to test number of solutions\n test_puzzle = []\n for r in range(0, 9):\n test_puzzle.append(self._grid_init[r][:])\n\n # counter for num solutions is set to 0\n counter = 0\n\n # check num of solutions\n self._solve_puzzle(test_puzzle)\n\n # if num of solutions is not one, replace the two blocks\n if counter != 1:\n self._grid_init[row][col] = backupone\n self._grid_init[8 - row][8 - col] = backuptwo\n attempts -= 1", "def grow(self, seed_density):\r\n # - first we need to decide how to grow\r\n choice = rand.choice([0, 1, 2, 3])\r\n # - now do it\r\n if (choice == 0):\r\n # add a new row before the first row\r\n self.cells = np.vstack([np.zeros(self.yspan, dtype=np.int), self.cells])\r\n # initialize the new row with a density of approximately seed_density\r\n for s_y in range(self.yspan):\r\n if (rand.uniform(0, 1) < seed_density):\r\n self.cells[0][s_y] = 1\r\n #\r\n elif (choice == 1):\r\n # add a new row after the last row\r\n self.cells = np.vstack([self.cells, np.zeros(self.yspan, dtype=np.int)])\r\n # initialize the new row with a density of approximately seed_density\r\n for s_y in range(self.yspan):\r\n if (rand.uniform(0, 1) < seed_density):\r\n self.cells[-1][s_y] = 1\r\n #\r\n elif (choice == 2):\r\n # add a new column before the first column\r\n self.cells = np.hstack([np.zeros((self.xspan, 1), dtype=np.int), self.cells])\r\n # initialize the new column with a density of approximately seed_density\r\n for s_x in range(self.xspan):\r\n if (rand.uniform(0, 1) < seed_density):\r\n self.cells[s_x][0] = 1\r\n #\r\n elif (choice == 3):\r\n # add a new column after the last column\r\n self.cells = np.hstack([self.cells, np.zeros((self.xspan, 1), dtype=np.int)])\r\n # initialize the new column with a density of approximately seed_density\r\n for s_x in range(self.xspan):\r\n if (rand.uniform(0, 1) < seed_density):\r\n self.cells[s_x][-1] = 1\r\n #\r\n #\r\n # now let's update xspan and yspan to the new size\r\n self.xspan = self.cells.shape[0]\r\n self.yspan = self.cells.shape[1]\r\n #\r", "def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def _make_weighting_blocks(self, num_blocks, reduce_ratio, stride=1):\n layers = []\n for i in range(num_blocks):\n layers.append(ConditionalChannelWeighting(self.in_channels, stride=stride, reduce_ratio=reduce_ratio, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, with_cp=self.with_cp))\n return nn.Sequential(*layers)", "def process(self):\n # Process value 1, 2, ..., 9 in order\n for val in range(1, 10):\n # For each row\n for x in range(0, 9):\n exist = False\n can_enter = []\n for y in range(0, 9):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append(y)\n\n # Put val if only one cell can do\n if not exist and len(can_enter) == 1:\n y = can_enter[0]\n self.put(x, y, val)\n \n # For each column\n for y in range(0, 9):\n exist = False\n can_enter = []\n for x in range(0, 9):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append(x)\n\n # Put val in only one cell can do\n if not exist and len(can_enter) == 1:\n x = can_enter[0]\n self.put(x, y, val)\n \n # For each block\n for bx in range(0, 3):\n for by in range(0, 3):\n exist = False\n can_enter = []\n for x in range(bx * 3, (bx + 1) * 3):\n for y in range(by * 3, (by + 1) * 3):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append((x, y))\n \n # Put val if only one cell can do\n if not exist and len(can_enter) == 1:\n x = can_enter[0][0]\n y = can_enter[0][1]\n self.put(x, y, val)", "def __treeview_append(self, newbuffer):\n\t\tnew_iter = None\r\n\t\tfor data in newbuffer:\r\n\t\t\tdev = self.dev_dict.get(data[1], self.dev_dict.get(data[2], \"?\"))\t# try to get device/adapter\r\n\t\t\turl = data[0].replace('\\\\','')\t\t\t\t\t\t# remove Backslashes from link/url (filter 1)\n\t\t\turlinfo = urlparse.urlparse(url)\t\t\t\t\t# get protocol\n\t\t\tproto, port = urlinfo.scheme, urlinfo.port\t\t\t\t# \n\t\t\tdata = (self.capture_index, url, proto, dev)\t\t\t\t# create enhanced data\n\t\t\tself.capture_index += 1\t\t\t\t\t\t\t# increase capture index\n\t\t\tdup = (self.capture_last == data[1:])\t\t\t\t\t# is this a duplicate of last entry?\n\t\t\tself.capture_last = data[1:]\t\t\t\t\t\t# store this entry for next duplicate check\n\t\t\tif self.settings[\"del_dups\"] and dup: continue\t\t\t\t# if option set; skip adding of duplicates\n\t\t\tnew_iter = self.model1.append( (data,'#888888',dup,False,) + data[0:] )\t# add data (hidden + columns) as new row", "def _update_cell_list(self):\n self._cell_list = []\n for (line_index,element_list) in enumerate(self.elements): \n line = self._line_coordinates(line_index)\n if self.orientation == Guides.HORIZONTAL:\n next_x = line[0].x - self.font_size//2\n next_y = line[0].y - (2*self.font_size)//3\n elif self.orientation == Guides.VERTICAL:\n next_x = line[0].x - self.font_size//2\n next_y = line[0].y - self.font_size//2\n total_length = 0\n #self.line_length = max_numbers * (self._number_height + self.font_size // 2) + 5\n for element in reversed(element_list):\n text = str(element.value)\n if self.orientation == Guides.HORIZONTAL:\n width = self._number_width * len(text)\n height = self._number_height\n rectangle = Rectangle(Point(next_x-width,next_y),\n width,\n -height)\n wide_rectangle = Rectangle(Point(line[0].x,next_y),\n -self.cell_size,\n -height)\n next_y = next_y - height - self.font_size//2\n total_length += height + self.font_size // 2\n elif self.orientation == Guides.VERTICAL:\n width = self._number_width * (len(text) + 1)\n height = self._number_height\n rectangle = Rectangle(Point(next_x - width,next_y),\n width,\n -height)\n wide_rectangle = Rectangle(Point(next_x - width,line[0].y),\n width,\n -self.cell_size)\n next_x -= width + self.font_size//2\n total_length += width + self.font_size // 2\n element.cell = rectangle\n element.wide_cell = wide_rectangle\n self.line_length = max(total_length + 5, self.line_length)\n self._update_clip()", "def disaggregate_chunk(self, test_mains):\n raise NotImplementedError()", "def _output_add(block_x, orig_x):\n stride = orig_x.shape[-2] // block_x.shape[-2]\n strides = (stride, stride)\n if block_x.shape[-1] != orig_x.shape[-1]:\n orig_x = nn.avg_pool(orig_x, strides, strides)\n channels_to_add = block_x.shape[-1] - orig_x.shape[-1]\n orig_x = jnp.pad(orig_x, [(0, 0), (0, 0), (0, 0), (0, channels_to_add)])\n return block_x + orig_x", "def group_balance_items(self) -> None:\n counts = Counter(self.balance_items)\n self.clear_balance()\n for item in counts:\n quantity_str = f\" (x{counts[item]} @ {format_money(item.amount)} each)\" if counts[item] > 1 else \"\"\n self.add_balance(item.amount * counts[item], category=item.category, description=f\"{item.description}{quantity_str}\")\n self.dirty = False # No need to recalculate; total is the same", "def mutate(self):\n \n # Mutate each weight\n self.w1 = self.w1 + np.random.normal(0, 1, 8).reshape((2,4))\n self.b1 = self.b1 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w2 = self.w2 + np.random.normal(0, 1, 4).reshape((2,2))\n self.b2 = self.b2 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w3 = self.w3 + np.random.normal(0, 1, 2).reshape((1,2))\n self.b3 = self.b3 + np.random.normal(0, 1, 1)\n \n # Return thyself\n return self", "def newrow(self):\n maxlen = 0\n for colbuf in self.colbufs:\n maxlen = max(maxlen, len(colbuf))\n\n for i in range(maxlen):\n first = True\n for colbuf in self.colbufs:\n if first:\n first = False\n else:\n sys.stdout.write(self.sepstr)\n if i < len(colbuf):\n sys.stdout.write(colbuf[i])\n else:\n sys.stdout.write(\" \"*self.colwidth)\n sys.stdout.write(\"\\n\")\n\n self.colbufs = []\n for i in range(self.ncolumns):\n self.colbufs.append([])", "def _fract_whole_data(self) :\n if self._fract_data == -1 :\n pass\n else :\n rows = self._df.shape[0]\n fract_rows = int(rows*self._fract_data)\n self._df = self._df.sample(fract_rows).copy()", "def add_accumulated(self) -> None:\n if len(self.line_parts):\n for word in self.next_line.line_parts[0].words:\n self.line_parts[-1].add_word(word)\n self.next_line.line_parts = self.next_line.line_parts[1:]\n\n self.line_parts.extend(self.next_line.line_parts)\n last_part = self.line_parts[-1]\n last_part.add_word(' ')\n self.next_line.line_parts = [\n PDFTextLinePart(last_part.style, self.fonts, last_part.ids)\n ]", "def next_minibatch_feed_dict(self, placeholders):\n while True:\n if self.iter % 4 == 0:\n # gene-gene relation\n self.current_edge_type_idx = self.edge_type2idx[0, 0, 0]\n elif self.iter % 4 == 1:\n # gene-drug relation\n self.current_edge_type_idx = self.edge_type2idx[0, 1, 0]\n elif self.iter % 4 == 2:\n # drug-gene relation\n self.current_edge_type_idx = self.edge_type2idx[1, 0, 0]\n else:\n # random side effect relation\n if len(self.freebatch_edge_types) > 0:\n self.current_edge_type_idx = np.random.choice(self.freebatch_edge_types)\n else:\n self.current_edge_type_idx = self.edge_type2idx[0, 0, 0]\n self.iter = 0\n\n i, j, k = self.idx2edge_type[self.current_edge_type_idx]\n if self.batch_num[self.current_edge_type_idx] * self.batch_size \\\n <= len(self.train_edges[i,j][k]) - self.batch_size + 1:\n break\n else:\n if self.iter % 4 in [0, 1, 2]:\n self.batch_num[self.current_edge_type_idx] = 0\n else:\n self.freebatch_edge_types.remove(self.current_edge_type_idx)\n\n self.iter += 1\n start = self.batch_num[self.current_edge_type_idx] * self.batch_size\n self.batch_num[self.current_edge_type_idx] += 1\n batch_edges = self.train_edges[i,j][k][start: start + self.batch_size]\n return self.batch_feed_dict(batch_edges, self.current_edge_type_idx, placeholders)", "def __addRowConstraints(self):\n for y in range(self.height):\n plusTarget = self.rowPlusCounts[y]\n minusTarget = self.rowMinusCounts[y]\n plusTotal = 0\n minusTotal = 0\n for x in range(self.width):\n g = self.grid[(x, y)]\n plusTotal = plusTotal + If(g == Magnets.PLUS, 1, 0)\n minusTotal = minusTotal + If(g == Magnets.MINUS, 1, 0)\n if plusTarget != None:\n self.solver.add(plusTotal == plusTarget)\n if minusTarget != None:\n self.solver.add(minusTotal == minusTarget)", "def generate_sub_blocks(rdd):\n ### BEGIN SOLUTION ###\n return rdd", "def clear_rows(self):\n ### Previous version had a bug, in that it assumed the set of ###\n ### indices of full rows had to be a contiguous sequence! ###\n full_rows = [j for j in range(ROWS) if all(\n (i, j) in self.locked_squares for i in range(COLS))]\n if not full_rows: return\n ### Calculate how for to drop each other row, and do it ###\n drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}\n self.locked_squares = {(i, j+drop[j]): color for (i, j), color in\n self.locked_squares.items() if j not in full_rows}\n ### Now just update score, etc. ###\n d = len(full_rows)\n self.increment_lines(d)\n self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])\n if self.level < self.lines // 10 + 1:\n self.increment_level()", "def add(self, mp):\n \n self.tile_contents.append(mp)\n if(self.tile_contents[-1].raised == False):\n self.paint_blocks += 1.00", "def append_rows(self, rows):\n for row in rows:\n self.append_row(row)", "def propagate(self):\n\n N = self.Npackets\n\n for j in tqdm.tqdm(range(N)):\n z = np.random.rand(1)[0]\n i = np.argwhere((self.npackets_cell_cum_frac - z) > 0)[0, 0]\n packet = self.init_packet(i)\n packet.propagate()\n if packet.is_escaped:\n self.esc_packets_x.append(packet.x)\n self.esc_packets_mu.append(packet.mu)\n self.esc_packets_L.append(packet.L)", "def grow(self):\n self.capacity = self.capacity * 2\n self.rehash()", "def rows(self, rows):\n self.grid.items = rows[:]", "def addRowEvent(self):\n\t\tif self.stretch: self.setRowStretch(self.row, 0)\n\t\tself.row += 1\n\t\tif self.stretch: self.setRowStretch(self.row, 1)", "def augment_data(lines, rep, random_idx):\n\tout = lines.copy()\n\n\tout = np.concatenate([out] * rep, axis=0)\n\t\n\tif not random_idx:\n\t\trem = [lines[idx] for idx in random_idx] # add remainder\n\t\tnp.concatenate((out, rem), axis=0)\n\treturn out", "def reduce_recipe(self):\n\n self.recipe.reduce(self.crafting, self.crafting_stride)", "def refine(self, ijk):\n if self.cbc is None or not self.sub_block_count:\n raise ValueError(\n \"Cannot refine sub block model without specifying number \"\n \"of parent and sub blocks\"\n )\n try:\n inds = self.ijk_array_to_indices(ijk)\n except ValueError:\n inds = self.ijk_to_index(ijk)\n self.cbc.array[inds] = np.prod(self.sub_block_count) # pylint: disable=E1137", "def mixup_data(self, data_ratio_produce=2, alpha=0.2):\n real_samples_idx = np.argwhere(self.data['real']).ravel()\n n_training_samples = real_samples_idx.shape[0]\n # Make random mixup samples\n n_samples = int(n_training_samples * data_ratio_produce)\n data_new = dict()\n for key in self.data:\n data_new[key] = []\n for i in range(n_samples):\n # Mixup ratio\n lam = np.random.beta(alpha, alpha)\n # Should not happen, but just in case to detect bugs\n if lam < 0 or lam > 1:\n raise ValueError('Lam not between 0 and 1')\n # Images to choose for mixup, choose only from real samples\n idxs = np.random.choice(real_samples_idx, 2, replace=False)\n idx0 = idxs[0]\n idx1 = idxs[1]\n\n # Make mixup data\n data_new['greyscale'].append(\n self.data['greyscale'][idx0] * lam + self.data['greyscale'][idx1] * (1 - lam))\n data_new['sample'].append(\n '_'.join([str(self.data['sample'][idx0]), str(lam), str(str(self.data['sample'][idx1])), str(1 - lam)]))\n data_new['lifetime'].append(\n self.data['lifetime'][idx0] * lam + self.data['lifetime'][idx1] * (1 - lam))\n data_new['magnification'].append(\n self.data['magnification'][idx0] * lam + self.data['magnification'][idx1] * (1 - lam))\n data_new['uncertainty'].append(\n self.data['uncertainty'][idx0] * lam + self.data['uncertainty'][idx1] * (1 - lam))\n data_new['image_id'].append(\n '_'.join(\n [str(self.data['image_id'][idx0]), str(lam), str(self.data['image_id'][idx1]), str(1 - lam)]))\n data_new['real'].append(0)\n\n # Add mixup to data\n for key in self.data.keys():\n if len(data_new[key]) != n_samples:\n raise ValueError('Mixup data for %s not of corect length' % key)\n # Do not use np concat as it is slow - filling an array is quicker\n # data_temp = np.empty((self.data[key].shape[0] + len(data_new[key]), *self.data[key].shape[1:]),\n # dtype=self.data[key].dtype)\n # for i in range(self.data[key].shape[0]):\n # data_temp[i] = self.data[key][i]\n # # Add new data after old one (array positions starting after positions of original data)\n # for i in range(len(data_new[key])):\n # data_temp[i+self.data[key].shape[0]] = data_new[key][i]\n # self.data[key] = data_temp\n self.data[key] = np.concatenate([self.data[key], data_new[key]])", "def replicate(self, nx, ny, nz):\n contents_list = []\n numreplicate = 0\n for i in range(nx):\n for j in range(ny):\n for k in range(nz):\n new_df = self.contents.copy()\n new_df['X'] += i * self.lengthx\n new_df['Y'] += j * self.lengthy\n new_df['Z'] += k * self.lengthz\n contents_list.append(new_df)\n numreplicate += 1\n self.numatom *= numreplicate\n self.contents = pd.concat(contents_list)", "def grow(self):\n self.mass *= 1.1", "def setUpBombs(self, event):\n pos = (event.widget.row * self.cols) + event.widget.col\n size = self.rows * self.cols\n \n #get a list random indexes in range to be mines\n mines = random.sample(range(size), self.numMines)\n if pos in mines:\n mines.remove(pos)\n temp = random.sample(range(size), 1)[0]\n while (temp == pos): temp = random.sample(range(size), 1)[0]\n mines.append(temp)\n \n #mark all mine squares as mines\n for mine in mines:\n targetRow = int(mine/self.cols)\n targetCol = mine % self.cols\n self.tiles[targetRow][targetCol].setMine()\n\n #calculate the number in each Square of the current game\n for row in self.tiles:\n for tile in row:\n if not tile.isMine():\n counter = 0\n for adjTile in self.getAdjacentTiles(tile.row,tile.col):\n if adjTile.isMine(): counter += 1\n tile.setCount(counter)\n \n self.minesArmed = True\n self.startTime = time.time()\n return 1", "def _divide_pattern(self, page_size):\n\n tmp_chart = self.floss_num_chart.copy()\n floss_size = (float(len(tmp_chart[0])), float(len(tmp_chart)))\n chart_size = (int(math.ceil(floss_size[0]/page_size[0])), int(math.ceil(floss_size[1]/page_size[1])))\n num_patterns = chart_size[0] * chart_size[1]\n divided_patterns = []\n\n #TEST CODE\n print(\"chart_size = \", chart_size)\n print(\"num_patterns = \", num_patterns)\n\n while len(tmp_chart):\n divided_rows = tmp_chart[:60]\n while len(divided_rows[0]):\n templist = []\n for row in divided_rows:\n templist.append(row[:60])\n del row[:60] # Note, decrease this number compared to number above to have repeated rows in table break\n divided_patterns.append(templist)\n del tmp_chart[:60]\n\n return divided_patterns", "def record_all(self):\n for i in self.recorders:\n t = i[0]\n r = i[1]\n self.add_row(t, r())", "def calc_multi_special(self):\r\n\r\n for special_type in self.specials_data_list:\r\n for num in range(special_type.get('num_to_apply', 0)):\r\n discounted_amount = self.basket_item['product_price'] - (self.basket_item['product_price'] *\r\n (1 - special_type['special_discount_rate']))\r\n\r\n try:\r\n self.processed_basket_item_list[num]['specials'].append({'special_code': special_type['special_code'],\r\n 'special_discount': '{0:.2f}'.format(discounted_amount)})\r\n except IndexError:\r\n self.processed_basket_item_list.append({'product_code': self.basket_item['product_code'],\r\n 'product_price': self.basket_item['product_price'],\r\n 'specials': [{'special_code': special_type['special_code'],\r\n 'special_discount': '{0:.2f}'.format(discounted_amount)}]})\r\n\r\n for item in self.processed_basket_item_list:\r\n yield item", "def sum_across_rows(grid):\n pass", "def redraw_row(cls, row):\n # TODO: Draw blocks in row", "def finalize(self):\n self.total_priors = np.sum(list(self.priors.values()))\n self.total_blocks = np.sum(list(self.nblocks.values()))\n self.total_fitness = np.sum(list(self.fitness.values()))\n self.blocks = BedTool.from_dataframe(self.df)", "def condense_blocks_to_aoi_series(subject_blocks, cumulative, difference, add_data):\n cumulative_series = initialize_feature_series(add_data)\n sum_keys = [e for e in list(cumulative_series.index) if e not in list(add_data.index)]\n examples_dataframe = pd.DataFrame()\n labels = []\n aoi_keys = list(DURATION_TARGET_DICT.keys())\n for block in subject_blocks:\n if block.shape[0] > 0:\n # Condense the block into a series of (summation) features\n feature_series = initialize_feature_series(add_data) # Depends on globally defined variables\n feature_series.loc[\"Game Time\"] = (parse(block[\"TimeStamp\"].iloc[-1]) - parse(block[\"TimeStamp\"].iloc[0])).total_seconds()\n for i,row in block.iterrows():\n if row[\"Event\"] == \"AOI\":\n aoi_key = _convert_target_category(row[\"Target\"])\n if aoi_key != \"UNIDENTIFIED AOI_TARGET\":\n feature_series[aoi_key] += row[\"Duration\"]\n feature_series[\"Fixations/Sec\"] += 1\n cumulative_series.loc[sum_keys] += feature_series.loc[sum_keys]\n cumulative_series.loc[\"Game Time\"] += feature_series.loc[\"Game Time\"]\n\n # Get the game score label for the block with either difference or last\n try:\n if difference:\n label = block.loc[:, \"CumulativeGameScore\"].iloc[-1] - block.loc[:, \"CumulativeGameScore\"].iloc[0]\n else:\n label = block.loc[:, \"CumulativeGameScore\"].iloc[-1]\n except IndexError:\n print(block)\n label = 0.0\n labels.append(label)\n\n # Calculate proportions and append to dataframe using either cumulative or block specific features\n fixation_sum = np.sum(feature_series.loc[aoi_keys])\n if cumulative and fixation_sum > 0:\n proportion_cumulative_series = cumulative_series.copy()\n proportion_cumulative_series.loc[aoi_keys] = cumulative_series.loc[aoi_keys] / np.sum(cumulative_series.loc[aoi_keys])\n proportion_cumulative_series.loc[\"Fixations/Sec\"] = proportion_cumulative_series.loc[\"Fixations/Sec\"] / proportion_cumulative_series.loc[\"Game Time\"]\n examples_dataframe = pd.concat([examples_dataframe, proportion_cumulative_series.copy()], axis=1)\n elif fixation_sum > 0:\n proportion_feature_series = feature_series.copy()\n proportion_feature_series.loc[aoi_keys] = feature_series.loc[aoi_keys] / np.sum(feature_series.loc[aoi_keys])\n proportion_feature_series.loc[\"Fixations/Sec\"] = proportion_feature_series.loc[\"Fixations/Sec\"] / proportion_feature_series.loc[\"Game Time\"]\n examples_dataframe = pd.concat([examples_dataframe, proportion_feature_series.copy()], axis=1)\n\n # Adding labels as a series in the dataframe\n examples_dataframe = examples_dataframe.T\n examples_dataframe.index = list(range(examples_dataframe.shape[0]))\n examples_dataframe[\"Label\"] = pd.Series(labels)\n return examples_dataframe", "def main():\n for i in range(5):\n check_row()\n put_beeeper_if_not()\n go_next_row()", "def collapse_wavefunction(self):\r\n #check normalised:\r\n n = sum(self.block_weights)\r\n if n != 1:\r\n #normalise here if required\r\n self.block_weights = [x/n for x in self.block_weights]\r\n #make choice\r\n choice = np.random.choice(self.block_opts, p = self.block_weights)\r\n #update self accordingly\r\n self.block_opts = [choice]\r\n self.block_weights = [1]\r\n self.collapsed = True\r\n self.propogate()\r\n self.arr = self.superposition()\r\n return", "def _create_artificial_doublets(self):\n\n # set numpy seed\n np.random.seed(RANDOM_STATE)\n\n # generate list of ints for each cell column idx\n cells = [i for i in range(self.num_cells)]\n\n # randomly sample artificial_fraction% of the cells to be used in doublet generation\n cells_for_artifical_doublets = np.random.choice(\n cells, self.num_cells_for_artifial_doublets, replace=False\n )\n\n # TODO: convert this data structure into a tuple of tuples of len 2 representing the two cell\n\n # verify selection was done without replacement\n assert len(cells_for_artifical_doublets) == len(set(cells_for_artifical_doublets))\n\n # fill in artificial doublet matrix\n extension_mtx = np.zeros(shape=(self.num_genes, self.num_artifial_doublets))\n for i in range(0, len(cells_for_artifical_doublets), 2):\n arr = (\n self.mtx.getcol(cells_for_artifical_doublets[i]).toarray()\n + self.mtx.getcol(cells_for_artifical_doublets[i + 1]).toarray()\n )\n arr.shape = (arr.shape[0],)\n extension_mtx[:, int(i / 2)] = arr\n\n # make the numpy array into sparse scipay matrix\n extension_mtx = scipy.sparse.csc_matrix(extension_mtx)\n\n # append extension_mtx to existing sparse matrix\n self.mtx = scipy.sparse.hstack((self.mtx, extension_mtx))", "def add_end_caps(self):\n\n # Far top dummy row (first row above array is NOT flipped if even number of rows)\n flip_dummy = (self.row_size + self.rbl[1]) % 2\n dummy_row_offset = self.bitcell_offset.scale(0, self.rbl[1] + flip_dummy) + self.bitcell_array_inst.ul()\n self.dummy_row_insts[1].place(offset=dummy_row_offset,\n mirror=\"MX\" if flip_dummy else \"R0\")\n\n # Far bottom dummy row (first row below array IS flipped)\n flip_dummy = (self.rbl[0] + 1) % 2\n dummy_row_offset = self.bitcell_offset.scale(0, -self.rbl[0] - 1 + flip_dummy) + self.unused_offset\n self.dummy_row_insts[0].place(offset=dummy_row_offset,\n mirror=\"MX\" if flip_dummy else \"R0\")\n # Far left dummy col\n # Shifted down by the number of left RBLs even if we aren't adding replica column to this bitcell array\n dummy_col_offset = self.bitcell_offset.scale(-len(self.left_rbl) - 1, -self.rbl[0] - 1) + self.unused_offset\n self.dummy_col_insts[0].place(offset=dummy_col_offset)\n\n # Far right dummy col\n # Shifted down by the number of left RBLs even if we aren't adding replica column to this bitcell array\n dummy_col_offset = self.bitcell_offset.scale(len(self.right_rbl), -self.rbl[0] - 1) + self.bitcell_array_inst.lr()\n self.dummy_col_insts[1].place(offset=dummy_col_offset)", "def reduce_possibilities_by_row(self):\n x = self.targetCell.x\n for i in range(1,10): #content\n for n in range(9): #y-coord adjacent cells\n neighbour_cell = self.puzzleGrid.grid[x][n]\n if self.targetCell != neighbour_cell:\n self.targetCell.row_neighbour_possibilities.append( neighbour_cell.possibilities)\n if str(i) == neighbour_cell.finalNumber:\n self.RemovePossiblityFromTargetCell(i)\n self.targetCell.row_neighbour_possibilities = flatten_list(self.targetCell.row_neighbour_possibilities)", "def extend_rows(self, matrix):\n row_count, column_count = matrix.size[:2]\n if column_count != self.column_count:\n raise ValueError\n self.row_count += row_count\n self.size = (self.row_count, self.column_count)\n base_row_count = self.row_count\n for key, value in matrix.data.items():\n row, column = key[:2]\n self.set((base_row_count + row, column), value)\n return self", "def __write_out_row__(self):\n column_pointer = spacing\n\n row_height = np.max([b.shape[0] for b in self.row_bitmaps])\n\n with open(\"active_weather.basic.exp\"+str(self.box_count)+\".box\",\"a\") as f:\n for char,b in zip(self.row_characters,self.row_bitmaps):\n assert isinstance(b, np.ndarray)\n height, width = b.shape\n\n # row first and then column\n additional_height = row_height-height\n\n self.training_page[self.row_pointer+additional_height:self.row_pointer + height+additional_height, column_pointer:column_pointer + width] = b\n a, b, c, d, e = char, column_pointer, self.height - (self.row_pointer + height + additional_height), column_pointer + width, self.height - (self.row_pointer+additional_height)\n f.write(str(a) + \" \" + str(b) + \" \" + str(c+1) + \" \" + str(d-1) + \" \" + str(e) + \" 0\\n\")\n\n column_pointer += width + spacing\n\n\n self.row_pointer += spacing + row_height\n self.column_pointer = spacing\n\n self.row_bitmaps = []\n self.row_characters = []", "def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return", "def DrawGrid(self, count):\n for i in range(0, self.width, self.incr):\n self.canvas.create_line(i, 100, i, 700, fill = \"#696969\", width = 1)\n for i in range(100, 800, 100):\n self.canvas.create_line(0, i, self.width, i, fill = \"#696969\", width = 1)\n self.canvas.create_rectangle(self.incr * 4, self.height - self.incr * 3.5,\n self.width - self.incr * 4, self.height, fill = \"black\", width = 3)\n for i in range(int(self.height - self.incr * 3.5), self.height, int(self.incr / 4)):\n self.canvas.create_line(self.incr * 4, i, self.width - self.incr * 4,\n i, fill = \"#696969\", width = 1)\n for i in range(self.incr * 4, self.width - self.incr * 4 + 1, int(self.incr / 4)):\n self.canvas.create_line(i, self.height - self.incr * 3.5, i, self.height,\n fill = \"#696969\", width = 1)", "def _grow_main(self, amt):\n self.ratio += amt\n self.ratio = min(self.max_ratio, self.ratio)", "def build_B_block(self):\n\n N = self.N # number of MPC steps\n row_list = [] # reocrd the every row in B_hat\n \n first_block = self.B\n zero = Variable(torch.zeros(self.num_input, self.num_output*(N-1)))\n zero = self.vari_gpu(zero)\n row= torch.cat([first_block, zero],1)\n row_list.append(row)\n \n for i in range(1, N):\n first_block = self.A.mm(first_block)\n row = torch.cat([first_block, row[:,:self.num_output*(N-1)]],1)\n row_list.append(row) \n \n return torch.cat(row_list,0)", "def createTrainingSet(id_row, full_id_row, usedUri, r, amount):\n \n nbIdRequired = amount * len(id_row)\n \n result = []\n while len(result) < nbIdRequired/2:\n newRow = dict()\n row = getRandomRow(id_row, r) \n \n if row[\"uri\"] in usedUri:\n continue\n #id_row.pop(row[\"uri\"])\n \n usedUri.add(row[\"uri\"])\n newRow[\"uri\"] = row[\"uri\"]\n newRow[\"question\"] = row[\"subject\"]+\" \"+row[\"content\"]\n newRow[\"bestanswer\"] = row[\"bestanswer\"]\n newRow[\"label\"] = \"0\"\n result.append(newRow)\n \n print str(len(result))+\"\\r\",\n \n while len(result) < nbIdRequired:\n \n newRow = dict()\n row = getRandomRow(id_row, r)\n\n if row[\"uri\"] in usedUri:\n continue\n id_row.pop(row[\"uri\"])\n \n usedUri.add(row[\"uri\"])\n newRow[\"uri\"] = row[\"uri\"]\n newRow[\"question\"] = row[\"subject\"]+\" \"+row[\"content\"]\n rRow = getRandomRow(full_id_row, r)\n while rRow == row:\n rRow = getRandomRow(full_id_row, r)\n \n newRow[\"bestanswer\"] = rRow[\"bestanswer\"]\n newRow[\"label\"] = \"1\" \n result.append(newRow)\n \n print str(len(result))+\"\\r\",\n \n header = [\"uri\", \"question\", \"bestanswer\", \"label\"]\n return (header,result)", "def add_new_block(self):\n old_block = self.curr_block\n self.curr_block = self.gen_new_block()\n add_edge(old_block, self.curr_block)", "def advance(self):\n count = [[0 for col in range(self.width+2)] for row in range(self.height+2)]\n for y in range(1, self.height+1):\n for x in range(1, self.width+1):\n if self.array[y][x]:\n count[y][x-1] += 1\n count[y][x+1] += 1\n count[y-1][x-1] += 1\n count[y-1][x] += 1\n count[y-1][x+1] += 1\n count[y+1][x-1] += 1\n count[y+1][x] += 1\n count[y+1][x+1] += 1\n for y in range(1, self.height+1):\n for x in range(1, self.width+1):\n if count[y][x] == 3:\n self.array[y][x] = 1\n elif count[y][x] == 2 and self.array[y][x]:\n self.array[y][x] = 1\n else:\n self.array[y][x] = 0\n self.array[1][1] = 1\n self.array[1][self.width] = 1\n self.array[self.height][self.width] = 1\n self.array[self.height][1] = 1", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def augment(self):\n for n in self.notes:\n n.augment()", "def _process_row(self, fields: Dict[str, Any], subrow=None) -> None:\n name = fields[\"benchmark\"]\n benchmark_group = fields[\"benchmark_group\"]\n stakeholders = fields[\"stakeholders\"]\n b = self._glb_idx.get(Benchmark.partial_key(name=name))\n if len(b) == 1:\n b = b[0]\n elif len(b) == 0:\n b = Benchmark(name, benchmark_group, stakeholders.split(\",\") if stakeholders else [])\n self._glb_idx.put(b.key(), b)\n else:\n self._add_issue(IType.ERROR,\n f\"There are {len(b)} instances of the Benchmark '{name}'\"+subrow_issue_message(subrow))\n return\n\n # Add range, if not repeated\n category = fields[\"category\"]\n if category not in b.ranges:\n b.ranges[category] = create_dictionary(\n data=dict(range=fields[\"range\"],\n unit=fields[\"unit\"],\n category=category,\n label=fields[\"label\"],\n description=fields[\"description\"])\n )\n else:\n self._add_issue(IType.WARNING,\n f\"Range with category '{category}' repeated\"+subrow_issue_message(subrow))", "def reallocate(self) -> None:\n self._pair_lines.clear()\n for add_pair in self._buffer:\n insert = False\n for line in self._pair_lines:\n pairs = line.get(add_pair[\"time\"].get_number())\n if pairs is not None and pairs[0][\"time\"].duration() == \\\n add_pair[\"time\"].duration() and self.is_merge(add_pair, pairs):\n pairs.append(add_pair)\n insert = True\n break\n else:\n free = True\n for pairs in line.values():\n if add_pair[\"time\"].intersect(pairs[0][\"time\"]):\n free = False\n break\n\n if free:\n line[add_pair[\"time\"].get_number()] = [add_pair]\n insert = True\n break\n\n if not insert:\n self._pair_lines.append({add_pair[\"time\"].get_number(): [add_pair]})", "def make_grid(self):\n for k in range(0, NUM + 1):\n self.create_line(k * UNIT, 0, k * UNIT, SIZE, width=THICKNESS)\n self.create_line(0, k * UNIT, SIZE, k * UNIT, width=THICKNESS)", "def update_nb_rows(self, nb_rows):\n nb_rows = int(nb_rows)\n self.game.n_row = nb_rows\n self.game.board.update_shape(n_row=nb_rows)\n self.game.draw_board()", "def __init__(\n self, channel_count: int, input_height_width: tuple, no_add: bool = False\n ):\n super().__init__()\n\n self.no_add: bool = no_add\n\n self.rm_block_1 = RMBlock(\n channel_count // 2,\n channel_count,\n input_height_width,\n 1,\n \"weight\",\n 1,\n )\n\n self.rm_block_2 = RMBlock(\n channel_count // 2,\n channel_count // 2,\n input_height_width,\n 3,\n \"weight\",\n 32,\n )\n\n self.rm_block_3 = RMBlock(\n channel_count,\n channel_count // 2,\n input_height_width,\n 1,\n \"weight\",\n 1,\n )", "def add_noise_batch(self,fingerprints):\r\n\t\tif self.trparas.complex:\r\n\t\t\tn,l,r = fingerprints.shape\r\n\t\t\tnp.random.seed()\r\n\r\n\t\t\t# noise_level = 0.002\r\n\t\t\tnoise_level = self.noise_level\r\n\t\t\t# add noise to real and imag part seperately with different noise level\r\n\t\t\tnoise_real = np.random.normal(0, noise_level, (n, l))\r\n\t\t\tnoise_imag = np.random.normal(0, noise_level, (n, l))\r\n\t\t\tnoise = np.stack([noise_real,noise_imag],axis=2) # n,l,r 4000,666,2\r\n\t\t\tfingerprints += noise\r\n\t\t\treturn fingerprints\r\n\r\n\t\telse:\t\r\n\t\t\tn,l = fingerprints.shape\r\n\t\t\tnp.random.seed()\r\n\t\t\tif self.noise_type == 'SNR':\r\n\t\t\t\tnoise = np.random.normal(0, 1, (n,l))\r\n\t\t\t\tsignal_Power = np.linalg.norm(fingerprints, axis=1)\r\n\t\t\t\tnoise_Power = np.linalg.norm(noise,axis=1)\r\n\t\t\t\tcst = signal_Power / (noise_Power * self.noise_level)\r\n\t\t\t\tnoise = noise * np.tile(cst.reshape(-1,1),(1,l))\r\n\t\t\telif self.noise_type == 'Standard':\r\n\t\t\t\tnoise = np.random.normal(0, self.noise_level, (n,l))\r\n\t\t\tfingerprints += noise\r\n\t\t\tif self.trparas.normalization == 'Noisy_input':\r\n\t\t\t\treturn fingerprints / np.tile(np.linalg.norm(fingerprints,axis=1).reshape(-1,1), (1,l))\r\n\t\t\telse:\r\n\t\t\t\treturn fingerprints", "def random_blocks():\n cells = []\n while len(cells) != 43:\n cell_to_add = (random.randint(0, 11), random.randint(0, 9))\n if cell_to_add not in cells:\n cells.append(cell_to_add)\n return cells", "def _reduce_cells(self):\n\n def reduce_cell(result, cell):\n # We assume only _sum aggergation\n # All measures should be prepared so we can to this\n for aggregate in self.aggregate_names:\n result[aggregate] = result.get(aggregate, 0) + \\\n cell.get(aggregate, 0)\n return result\n\n # 1. Map cells to reduced time path\n #\n reduced_map = defaultdict(list)\n reduced_len = len(self.time_levels)\n\n for key, cell in self.time_cells.items():\n time_path = key[0]\n reduced_path = time_path[0:reduced_len]\n\n reduced_key = (reduced_path, key[1])\n\n # self.logger.debug(\"reducing %s -> %s\" % (key, reduced_key))\n reduced_map[reduced_key].append(cell)\n\n self.browser.logger.debug(\"response cell count: %s reduced to: %s\" %\n (len(self.time_cells), len(reduced_map)))\n\n # 2. Reduce the cells\n #\n # See the function reduce_cell() above for aggregation:\n #\n reduced_cells = {}\n for key, cells in reduced_map.items():\n # self.browser.logger.debug(\"Reducing: %s -> %s\" % (key, cells))\n cell = reduce(reduce_cell, cells, {})\n\n reduced_cells[key] = cell\n\n self.time_cells = reduced_cells" ]
[ "0.5476102", "0.5351711", "0.53062737", "0.51498103", "0.5123177", "0.5094556", "0.5087477", "0.5058288", "0.5041181", "0.49976635", "0.4990649", "0.497327", "0.49467406", "0.4940926", "0.49374586", "0.4928902", "0.48958787", "0.48951703", "0.4883801", "0.48619694", "0.48530278", "0.48488024", "0.48431575", "0.4842288", "0.4820817", "0.48148495", "0.4809325", "0.47869337", "0.47810864", "0.47757396", "0.47749025", "0.47723415", "0.47626776", "0.47593075", "0.4746422", "0.47283474", "0.47164908", "0.47094098", "0.47091782", "0.47061798", "0.47053885", "0.47050083", "0.47008014", "0.46990317", "0.4698987", "0.46980563", "0.46891186", "0.46836433", "0.46782225", "0.46781242", "0.4657243", "0.46510133", "0.4647312", "0.46413362", "0.46403182", "0.4638945", "0.4630073", "0.46284273", "0.4618753", "0.46179974", "0.46168643", "0.4616297", "0.46119583", "0.46079284", "0.46020624", "0.4599375", "0.45938295", "0.45887658", "0.45874378", "0.4587162", "0.4579207", "0.45783296", "0.45712405", "0.45698938", "0.45686355", "0.45662302", "0.4564554", "0.45610183", "0.45609546", "0.4560771", "0.4555397", "0.45463955", "0.45454237", "0.453436", "0.4534008", "0.45288625", "0.4527576", "0.45239812", "0.45211178", "0.45203364", "0.4519867", "0.45174563", "0.4516819", "0.45042935", "0.45037252", "0.45022026", "0.4498406", "0.44964635", "0.44954857", "0.449309", "0.44860396" ]
0.0
-1
Keep track of how many times a type of layer has appeard and append _counter to their name to maintain module name uniqueness.
def add_counters(dic, arr): ret = [] for el in arr: name = el[1:-1] num = dic.get(name, None) if num is not None: ret.append('[%s_%s]'%(name, dic[name])) dic[name] += 1 else: ret.append(el) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_layer_count(layer_type=''):\n _LAYER_TYPES[layer_type] += 1\n return _LAYER_TYPES[layer_type]", "def _auto_name(name, parent):\n if not is_ready(parent):\n parent._pywarm_auto_name_dict = {}\n def _hook(model, x):\n model._pywarm_auto_name_dict = {}\n parent._pywarm_forward_pre_hook = parent.register_forward_pre_hook(_hook)\n track = parent._pywarm_auto_name_dict\n if name not in track:\n track[name] = 0\n track[name] += 1\n return f'{name}_{track[name]}'", "def get_layer_uid(layer_name=''):\r\n if layer_name not in _LAYER_UIDS:\r\n _LAYER_UIDS[layer_name] = 1\r\n return 1\r\n else:\r\n _LAYER_UIDS[layer_name] += 1\r\n return _LAYER_UIDS[layer_name]", "def get_layer_uid(layer_name=''):\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]", "def get_layer_uid(layer_name=''):\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]", "def get_layer_uid(layer_name=''):\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]", "def increment_counter(self) -> None:", "def counter(self) -> int:", "def counter(self) -> int:", "def get_gensym_counter():\n global counter\n return counter", "def get_unique_name(self, prefix):\n\t\tident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1\n\t\treturn '%s_%d' % (prefix, ident)", "def add_count(self):\n self.count += 1", "def __disambiguate_layer_names(raw_layers: List) -> List:\n names_cnt = {}\n for raw_layer in raw_layers:\n name = raw_layer['Name']\n if name in names_cnt:\n names_cnt[name] += 1\n name += \"_\" + str(names_cnt[name])\n raw_layer['Name'] = name\n else:\n names_cnt[name] = 1\n return raw_layers", "def incInstCount(self):\n self.instCount += 1", "def _increase_counter(self, response):\n response_id = response.meta['__id']\n spot = self._request_registry[response_id]\n spot['counter'] = spot.get('counter', 0) + 1", "def make_advanced_counter_maker():\n\n global_count = 0\n\n def make_counter():\n count = 0\n def counts(action):\n nonlocal global_count, count\n if action == \"global-count\":\n global_count += 1\n return global_count\n elif action == \"count\":\n count += 1\n return count\n elif action == \"reset\":\n count = 0\n elif action == \"global-reset\":\n global_count = 0\n return counts\n\n return make_counter", "def counter(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n wrapper.count = wrapper.count + 1\n res = func(*args, **kwargs)\n print(\"{0} has been used: {1}x\".format(func.__name__, wrapper.count))\n return res\n wrapper.count = 0\n return wrapper", "def gensym(name = 'G'):\n global counter\n g = '{0}{1}'.format(name, counter)\n counter += 1\n return g", "def _offer_counter(self):\n self._offer_count += 1\n return hash((self.name, self._offer_count))", "def _get_layer_name(self, layer):\n label = '{}-{}'.format(layer.label, layer.rank)\n if label not in self.naming_map:\n self.naming_map[label] = {}\n\n if layer not in self.naming_map[label].keys():\n self.naming_map[label][layer] = len(self.naming_map[label]) + 1\n return '{}-{}'.format(label, self.naming_map[label][layer])", "def increment_count(self, word):\n pass", "def increment_requests_count(self, type):\n if type not in self._requests_count:\n self._requests_count[type] = 0\n self._requests_count[type] += 1", "def debug_counter(name, every=1):\r\n setattr(debug_counter, name, getattr(debug_counter, name, 0) + 1)\r\n n = getattr(debug_counter, name)\r\n if n % every == 0:\r\n print >>sys.stderr, \"debug_counter [%s]: %s\" % (name, n)", "def updateName(g):\n try:\n n = int(g.group(2))\n except TypeError:\n n = 0\n\n return \"%s-%d\" % (g.group(1), n + 1)", "def _create_counter(self):\n iter_cls_total = tf.get_variable(\n name='iter_cls_total',\n initializer=tf.constant(0, name='iter_cls_total'),\n use_resource=True,\n trainable=False)\n iter_cls_total_update = iter_cls_total.assign_add(1)\n return iter_cls_total, iter_cls_total_update", "def get_and_increment(name, counter=defaultdict(int)):\n n = counter[name]\n counter[name] = n + 1\n return n", "def _unique_key(self):\n key = f'factor_{self.counter}'\n self.counter += 1\n return key", "def name(self) -> str:\n return f\"{self._obj_name} count\"", "def increment_name(base, existing):\r\n if not base in existing:\r\n return base\r\n n = 1\r\n make_name = lambda: base + str(n)\r\n while make_name() in existing:\r\n n += 1\r\n return make_name()", "def increment(self, count_name):\n prop_name = 'count_' + count_name\n setattr(self, prop_name, getattr(self, prop_name, 0) + 1)", "def increment(cls):\n index = random.randint(0, SimpleCounterShard.NUM_SHARDS - 1)\n shard_name = 'shard' + str(index)\n counter = SimpleCounterShard.objects.get_or_create(pk=shard_name)[0]\n counter.count += 1\n counter.save()", "def module_count(self):\n return self._module_count", "def example_six():\n stats = defaultdict(int)\n stats['my_counter'] += 1", "def _generateUID(self):\n self._GlobalShadowIndex += 1\n return self._GlobalShadowIndex", "def get_counter(self, name: str, attributes: Attributes = None):\n key = _generate_key_name(name, attributes)\n if key not in self.map:\n self.map[key] = self._create_counter(name)\n return self.map[key]", "def _create_counter(self, name):\n otel_safe_name = _get_otel_safe_name(name)\n\n if _is_up_down_counter(name):\n counter = self.meter.create_up_down_counter(name=otel_safe_name)\n else:\n counter = self.meter.create_counter(name=otel_safe_name)\n\n logging.debug(\"Created %s as type: %s\", otel_safe_name, _type_as_str(counter))\n return counter", "def global_counter(fn):\n cntr = 0 \n\n def inner(*args, **kwargs):\n nonlocal cntr\n cntr = cntr + 1\n g_counters[fn.__name__] = cntr # counters is global\n return fn(*args, **kwargs)\n\n return inner", "def _increment_counter(metric: str):\n if metric not in db:\n db[metric] = 0\n db[metric] += 1", "def _fuse_counter(\n self, gridded_perms: Iterable[GriddedPerm]\n ) -> Counter[GriddedPerm]:\n fuse_counter: Counter[GriddedPerm] = collections.Counter()\n for gp in gridded_perms:\n fused_perm = self.fuse_gridded_perm(gp)\n fuse_counter[fused_perm] += 1\n return fuse_counter", "def inc_counter(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def increment(self,counterName,step=1):\n if not self.counters.has_key(counterName): \n self.addCounter(counterName)\n # 026 was logged too often.\n # self.debug.mainLogger.debug(\"New counter created: %s\"%(counterName))\n self.counters[counterName]+=step", "def _setcounter():\n fname = os.path.basename(camera.status.lastfile)\n tname = fname.split('.')[0]\n i = len(tname)-1\n if i > -1:\n while tname[i].isdigit() and i>-1:\n i = i - 1\n nname = fname[:-4]\n bname = tname[:i+1]\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9].cntr'):\n os.remove(file)\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9][0-9].cntr'):\n os.remove(file)\n f = open('/data/counters/'+nname+'cntr','w')\n f.close()", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def increment_library_count(self, purchased_ebooks):\n\t\tself.library_count += purchased_ebooks", "def incr_counter(cls, cname):\n if not cname in cls.__counters: cls.__counters[cname] = -1\n cls.__counters[cname] += 1\n return cls.__counters[cname]", "def reset_gensym_counter(val = 0, verbose = False):\n global counter\n if isinstance(val, int):\n counter = val\n else:\n counter = 0\n if verbose:\n print \"counter = {0}\".format(counter)", "def set_count(c):\n global count\n count = c", "def id_counter(self):\n self._id_counter += 1\n return self._id_counter", "def add_state_count(tweet, tweet_counter):\n if tweet.rows[0].value[1] in tweet_counter:\n tweet_counter[tweet.rows[0].value[1]] += 1\n else:\n tweet_counter[tweet.rows[0].value[1]] = 1\n\n return tweet_counter", "def updateGACount(self):\n self.ga_count += 1", "def inc( self ):\n self.count += 1", "def increase_count(self, number=1):\n self.count += number", "def name(prefix = 'tmp'):\n nameidx = context.curr().setdefault('NAME_INDEX', {})\n idx = nameidx.setdefault(prefix, 0)\n name = '_%s_%d' % (prefix, idx)\n nameidx[prefix] = idx + 1\n return name", "def __init__(self):\n self.num_counts = {}", "def obstruction_fuse_counter(self) -> Counter[GriddedPerm]:\n if self._obstruction_fuse_counter is not None:\n return self._obstruction_fuse_counter\n fuse_counter = self._fuse_counter(self._tiling.obstructions)\n self._obstruction_fuse_counter = fuse_counter\n return self._obstruction_fuse_counter", "def _inc_counter(self) -> None:\n self._state_storage.increment_counter()", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def _increment_count(self, key):\n\n if not self._count.has_key(key):\n self._count[key] = 0\n\n self._count[key] += 1", "def pycount(self):\n\n self.count += 1\n return self.count", "def update_count(self):\n pass", "def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)", "def update_count(self):\n pass # Do nothing", "def counter(name):\n count_name = list(name)\n counter = 0\n for letter in count_name:\n counter += 1\n\n print(f\"There are {counter} letter in the name {name}.\")\n print(f\"\\tAnd btw... {name} backwards is {name[::-1].lower()}.\")", "def increment_count(self, data, suffix=''):\n # Just to show data coming in...\n assert data['hello'] == 'world'\n\n self.count += 1\n return {\"count\": self.count}", "def inc(self):\n \n self.count += 1", "def generate_group_id(self):\n if not hasattr(self.space, '_group_ctr'):\n self.space._group_ctr = 999\n self.space._group_ctr += 1\n return self.space._group_ctr", "def get_name(self, op_type):\n\n def _gen(t):\n t = t.lower()\n if t not in self.local_op_namespace:\n self.local_op_namespace[t] = START_IDX\n suffix = \"\"\n else:\n self.local_op_namespace[t] += 1\n suffix = f\"{self.local_op_namespace[t] - 1}\"\n\n return f\"{self._get_name(t)}{suffix}\"\n\n new_name = _gen(op_type)\n while new_name in self.local_var_namespace:\n new_name = _gen(op_type)\n\n self.local_var_namespace.add(new_name)\n return new_name", "def make_count_change():\n \"*** YOUR CODE HERE ***\"", "def name_layer_factory(num=0, name_prefix=\"\", name_suffix=\"\"):\n def name_layer_fn(layer):\n return '{}{}{}-{}'.format(name_prefix, layer, name_suffix, num)\n \n return name_layer_fn", "def __init__(self):\n self.counter = 0", "def new_id(self):\n self._tmp_id_counter += 1\n return '%.' + str(self._tmp_id_counter)", "def count():", "def increment(name, count=1):\n # check the counter is tracked\n if name not in _counter_cache:\n track_counter(name)\n _counter_cache.add(name)\n print 'increment: %s' % name\n memcache.incr(name, delta=count, initial_value=0, namespace=NAMESPACE)", "def model_number_layers(model):\n for idx, layer in enumerate(model.layers):\n print(idx, layer.name)", "def counter(fn, counters):\r\n\r\n cnt = 0\r\n def inner(*args, **kwargs):\r\n nonlocal cnt\r\n cnt += 1\r\n counters[fn.__name__] = cnt\r\n return fn(*args, **kwargs)\r\n return inner", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def create_collection_counter(self):\n c = collections.Counter()\n i=1\n print type(self.read_history_file)\n for element in self.read_history_file():\n print i\n print element\n c.update(element.split())\n i+=1\n return c", "def __init__(self):\n self.cnt = {}", "def count_me(fnc):\n def increment(self, *args, **kwargs):\n type(self)._count += 1\n return fnc(self, *args, **kwargs)\n return increment", "def get_sig_count_name(self, orig):\n return orig", "def obstruction_fuse_counter(self) -> Counter[GriddedPerm]:\n if self._obstruction_fuse_counter is not None:\n return self._obstruction_fuse_counter\n obs = (ob for ob in self._tiling.obstructions if not self.is_crossing_len2(ob))\n fuse_counter = self._fuse_counter(obs)\n self._obstruction_fuse_counter = fuse_counter\n return self._obstruction_fuse_counter", "def incr_no_of_attacks(self):\n\t\tself.__anom += 1\n\t\tself.__anom_lbl.setText(str(self.__anom))", "def generate_id(cls):\n cls._index += 1\n return 'fp_%s' % cls._index", "def _create_counter(self):\n iter_cotrain = tf.get_variable(\n name='iter_cotrain',\n initializer=tf.constant(0, name='iter_cotrain'),\n use_resource=True,\n trainable=False)\n iter_cotrain_update = iter_cotrain.assign_add(1)\n return iter_cotrain, iter_cotrain_update", "def collect_type_freqs(self):\n tf.logging.info('collecting type_freq, mapping type_id -> # instances')\n self.type_freq = collections.defaultdict(int)\n for f in self.frames():\n if self.instance_of in f:\n self.type_freq[f[self.instance_of].id] += 1\n # not sure why this is needed - do some things have Null id's?\n del self.type_freq[None]", "def get_name(self, op_type):\n\n def _gen(t):\n t = t.lower()\n if t not in global_op_namespace:\n global_op_namespace[t] = START_IDX\n suffix = \"\"\n else:\n global_op_namespace[t] += 1\n suffix = f\"{global_op_namespace[t] - 1}\"\n\n return f\"{self._get_name(t)}{suffix}\"\n\n new_name = _gen(op_type)\n while new_name in global_var_namespace:\n new_name = _gen(op_type)\n\n global_var_namespace.add(new_name)\n return new_name", "def CountGlobal():\r\n return _hiew.HiewGate_Names_CountGlobal()", "def gen_counters(vv,names,basenm,sites,is_prefix):\n counter_names=[]\n counter_lists=[]\n for k in range(len(sites)):\n for kk in range(len(sites[k])):\n if is_prefix[k]:\n tname=\"%s_%s_tot\" % (sites[k][kk], basenm)\n else:\n tname=\"%s_%s_tot\" % (basenm, sites[k][kk])\n tlist=[]\n for (vec,vname) in zip(vv,names):\n if (vec[k] == kk ): tlist.append(vname) \n counter_names.append(tname)\n counter_lists.append(tlist)\n # and a last one for all \n allname=\"%s_all\" % basenm\n tlist=[]\n for kk in range(len(sites[-1])):\n if is_prefix[-1]:\n tname=\"%s_%s_tot\" % (sites[-1][kk], basenm)\n else:\n tname=\"%s_%s_tot\" % (basenm, sites[-1][kk])\n tlist.append(tname)\n counter_names.append(allname)\n counter_lists.append(tlist) \n return counter_names, counter_lists", "def _increment_file_counter(self):\n self._add_to_file_counter(1)", "def example_five():\n stats = {}\n key = 'my_customer'\n if key not in stats:\n stats[key] = 0\n stats[key] += 1", "def increment_name(name: str) -> str:\n\n match = _number_suffix.search(name)\n if match:\n number_str = match.group(1)\n next_number = int(number_str) + 1\n return f'{name[:-len(number_str)]}{next_number}'\n else:\n return f'{name}-1'", "def update_global_identifiers(self, universe_test):\n self.cellNum += 1\n self.surfaceNum += 1\n self.materialNum += 1\n if universe_test:\n self.universe += 1", "def remove_duplicates(self):\n names: Dict[str, int] = dict()\n for step in self.Sequence:\n if isinstance(step, Repeater):\n continue\n name = step.Name\n if name != '':\n if name not in names:\n names[name] = 1\n else:\n names[name] += 1\n for step in reversed(self.Sequence):\n if isinstance(step, Repeater):\n continue\n name = step.Name\n if name and (names[name] > 1):\n names[name] -= 1\n step.Name = name + \"_%i\" % names[name]", "def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1", "def _name_increment_revision(name):\n revre = r\"^(.*?)([0-9]+)$\"\n m = re.search(revre, name)\n if m:\n name = m.group(1) + str(int(m.group(2)) + 1)\n else:\n name = name + \" (copy)\"\n return name", "def loop_counter(self, name = \"i\", start_val = syntax_helpers.zero_i64):\n\n start_val = syntax_helpers.wrap_if_constant(start_val)\n counter_type = start_val.type\n\n counter_before = self.assign_temp(start_val, name + \"_before\")\n counter = self.fresh_var(counter_type, name)\n counter_after = self.fresh_var(counter_type, name + \"_after\")\n merge = {counter.name:(counter_before, counter_after)}\n return counter, counter_after, merge", "def count_naked_names(graph: BELGraph) -> typing.Counter[str]:\n return Counter(_naked_names_iter(graph))", "def __init__(self):\n self.counts = Counter()", "def generate_name(obj):\n global NAME_COUNTERS\n\n calling_name = obj.__name__\n\n NAME_COUNTERS[calling_name] += 1\n return '{0}_{1}'.format(calling_name, NAME_COUNTERS[calling_name])" ]
[ "0.6607571", "0.6507959", "0.6308612", "0.630668", "0.630668", "0.630668", "0.6284107", "0.6157651", "0.6157651", "0.6146809", "0.61214304", "0.6117625", "0.60797423", "0.59838635", "0.5966383", "0.5896614", "0.58724344", "0.58569306", "0.5815706", "0.581127", "0.5774243", "0.5753227", "0.5743522", "0.5706571", "0.56692827", "0.56536114", "0.56333566", "0.56115603", "0.56084603", "0.55867773", "0.5583976", "0.55698866", "0.5568681", "0.5553162", "0.5539926", "0.5536917", "0.5530819", "0.55102557", "0.5487364", "0.547339", "0.5463076", "0.54611516", "0.5455352", "0.54521084", "0.5446217", "0.54052883", "0.5400268", "0.5390028", "0.5387824", "0.53775746", "0.5369565", "0.5369103", "0.5368043", "0.53666335", "0.5360936", "0.5353266", "0.53489786", "0.53489786", "0.5345362", "0.5344465", "0.53426343", "0.5331912", "0.5324114", "0.53225297", "0.5314907", "0.53056055", "0.5305438", "0.52986073", "0.5291747", "0.5290932", "0.5282995", "0.52826375", "0.52787495", "0.5276372", "0.52744067", "0.526355", "0.526355", "0.5261767", "0.5261352", "0.52586806", "0.5256971", "0.5256939", "0.52566504", "0.52504116", "0.5250206", "0.52461874", "0.524174", "0.52407575", "0.52304006", "0.5227422", "0.5227405", "0.5226757", "0.52219737", "0.521744", "0.5210953", "0.52059823", "0.5200876", "0.51981354", "0.51967055", "0.5193351" ]
0.5392121
47
Check if model uses submodules
def defined_submodule(arr): return any([el.endswith('_module]') for el in arr])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_submodule(submodule):\n is_correct_subclass = issubclass(submodule, AnalysisModule)\n # Ensure submodule is defined within the package we are inspecting (and not 'base')\n is_correct_module = package.__name__ in submodule.__module__\n return is_correct_subclass and is_correct_module", "def is_root_mod(self):\n return all([not b.parents for b in self.input_bindings.bindings.values()])", "def check_submodules():\n if not os.path.exists('.git'):\n return\n with open('.gitmodules') as f:\n for l in f:\n if 'path' in l:\n p = l.split('=')[-1].strip()\n if not os.path.exists(p):\n raise ValueError('Submodule %s missing' % p)\n\n\n proc = subprocess.Popen(['git', 'submodule', 'status'],\n stdout=subprocess.PIPE)\n status, _ = proc.communicate()\n status = status.decode(\"ascii\", \"replace\")\n for line in status.splitlines():\n if line.startswith('-') or line.startswith('+'):\n raise ValueError('Submodule not clean: %s' % line)", "def is_model(self):\n return self.model_name() is not None", "def __contains__(self, name):\n return name in self._modules", "def is_submodule(self, submodule_basename):\n\n return submodule_basename in self._submodule_basename_to_node", "def use_ppmodel(self):\n return hasattr(self, \"ppmodel\")", "def test_model_can_import():\n assert hasattr(model, \"SEIR_model_publish_w_risk\")\n assert hasattr(model, \"compute_R0\")", "def hasModel(self, model):\n if model in self.models:\n return S_OK()\n else:\n return S_ERROR(\"Model %s is not defined, use any of %s\" % (model, self.models.keys()))", "def isModellingFramework(*args):\n return _libsbml.SBO_isModellingFramework(*args)", "def requires_model_loading(self):\n return self.requires_loaded_models", "def models_compatible(model_a: ModuleModel, model_b: ModuleModel) -> bool:\n if model_a == model_b:\n return True\n return model_b.value in _load_v2_module_def(model_a)['compatibleWith']", "def __contains__(self, name):\n return (self.model_dir / (str(name) + '.pkl')).exists()", "def is_module(obj):\n return type(obj) is types.ModuleType", "def SBO_isModellingFramework(*args):\n return _libsbml.SBO_isModellingFramework(*args)", "def source_repo_is_related():\n return True", "def test_get_leaf_modules(request):\n filename = request.module.__file__\n qalgebra_dir = os.path.join(\n os.path.split(filename)[0], '..', 'src', 'qalgebra'\n )\n modules = get_leaf_modules(qalgebra_dir)\n assert \"qalgebra.core.abstract_algebra\" in modules", "def isModule(self, name):\n return os.path.isfile(self.modulePath(name))", "def supports_ordinary_make_module_imports(self):\n return True", "def is_surjective(self):\n # Testing equality of free modules over PIDs is unreliable\n # see Trac #11579 for explanation and status\n # We test if image equals codomain with two inclusions\n # reverse inclusion of below is trivially true\n return self.codomain().is_submodule(self.image())", "def has_module(self, name: str) -> bool:\n return name in self.modules", "def check_model_exists(class_name):\n if path.exists(settings.get('FALAFEL_DIR') + settings.get('MODELS_DIR') + '/' + class_name + '.py'):\n return True\n else:\n return False", "def __is_model_permission(self, name: str) -> bool:\n permission_name, model_name = name.split('_')\n\n return permission_name in PERMISSIONS_PREFIXES and model_name in self.models_names", "def hasMetaModel(self, metaModel):\r\n if self.getClass() == metaModel: return 1\t\t\t\t# if the meta model is the actual class\r\n for mmodels in self.mergedASG:\t\t\t\t\t# else check the merged meta-models\r\n if mmodels.getClass() == metaModel: return 1\r\n return 0", "def _validate_mod(self, mod: Modifier):\r\n return not mod.name in self.mods", "def is_distributed_model(model):\n try:\n get_tenant_field(model)\n return True\n except ValueError:\n return False", "def is_model(model: Model) -> bool:\n for key in model:\n if not is_variable(key):\n return False\n return True", "def has_module_perms(self, app_label):\n\t\treturn True", "def is_module_object(self, obj):\n if not isinstance(obj, BaseException):\n try:\n c = obj.__class__\n source_file = inspect.getsourcefile(c)\n except (TypeError, AttributeError):\n pass\n else:\n if source_file and source_file.startswith(self.path):\n return True\n\n return False", "def _sufficient_deps(cls, deps):\n if cls.MODEL_PACKAGE is None:\n return True\n else:\n for d in deps.conda:\n if cls.MODEL_PACKAGE in d:\n return True\n for d in deps.pip:\n if cls.MODEL_PACKAGE in d:\n return True\n return False", "def _validate_submodels(self, type_promax, type_ms):\n return type_promax in self._submodels and \\\n type_ms in self._submodels and \\\n len(self._submodels[type_promax]) > 0 and \\\n len(self._submodels[type_promax]) == len(self._submodels[type_ms])", "def model_has_permissions(obj: models.Model) -> bool:\n additional_labels = [\"flow.Storage\"]\n return hasattr(obj, \"permission_group\") or obj._meta.label in additional_labels", "def has_module_perms(self, app_label):\n return True", "def has_module_perms(self, app_label):\n return True", "def is_object_identification_module(self):\n return True", "def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False", "def has_module_perms(self, app_label) -> bool:\n return True", "def test_inherit(self):\n self.assertTrue(issubclass(User, BaseModel))", "def __contains__(self, module):\n\n for enabled_module in self.modules:\n if enabled_module.ID == module:\n return True\n return False", "def __gitSubmodulesStatus(self):\n self.vcs.gitSubmoduleStatus(self.project.getProjectPath())", "def has_module_perms(self, users):\r\n return True", "def __validate_model(loaded_model, dummy_model):\n loaded_modules = [loaded_model]\n dummy_modules = [dummy_model]\n\n valid = torch.tensor(1, dtype=torch.long)\n try:\n while len(loaded_modules) > 0:\n loaded_module = loaded_modules.pop(0)\n dummy_module = dummy_modules.pop(0)\n\n # Assert modules have the same number of parameters\n loaded_params = [param for param in loaded_module.parameters()]\n dummy_params = [param for param in dummy_module.parameters()]\n assert len(loaded_params) == len(dummy_params)\n\n for i, param in enumerate(loaded_params):\n assert param.size() == dummy_params[i].size()\n\n # Assert that modules have the same number of sub-modules\n loaded_module_modules = [mod for mod in loaded_module.modules()][1:]\n dummy_module_modules = [mod for mod in dummy_module.modules()][1:]\n\n loaded_modules.extend(loaded_module_modules)\n dummy_modules.extend(dummy_module_modules)\n assert len(loaded_modules) == len(dummy_modules)\n except AssertionError:\n valid = torch.tensor(0, dtype=torch.long)\n return valid", "def checkMetamodelLevel(cls):\n for mmd in cls.metamodelDependencies():\n mmd.check()", "def test_inheritence(self):\n self.assertTrue(issubclass(City, BaseModel))", "def test_issubclass(self):\n self.assertTrue(issubclass(self.rev.__class__, BaseModel), True)", "def is_model_type(obj: Any) -> bool: # pragma: no cover\n pass", "def checkModel(self, model):\n # TODO", "def is_model(thing):\n return (isinstance(thing, sqlalchemy.ext.declarative.api.DeclarativeMeta)\n and hasattr(thing, '__table__')) # disard sqlalchemy.ext.declarative.declarative_base()", "def is_package(self, fullname):\n return hasattr(self.__get_module(fullname), \"__path__\")", "def has_module_perms(app_label):\n # Simplest possible answer: Yes, always\n return True", "def is_for(self, model_type: str, version: Version):\n return model_type == self.model_type and version in self.version_spec", "def has_module_perms(self, user_obj, app_label):\n for perm in self.get_all_permissions(user_obj):\n if perm[:perm.index('.')] == app_label:\n return True\n return False", "def test_import_allows_multiple_modules_failure(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\", \"_qiskit_module_does_not_exist_\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertFalse(feature)\n check.assert_called_once()", "def test_non_leaf_module_names(self):\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = HierarchicalMultiplyModule()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n expected_names = [\n # names compatible with torch 1.9.1 version (should be removed in the future)\n 'layer.mul1.mul',\n 'layer.mul1.Mul_7',\n 'layer.mul2.mul',\n 'layer.mul2.Mul_15',\n 'layer.Mul_18',\n \n # names compatible with torch 1.13.1 version \n '/layer/mul1/Mul',\n '/layer/mul2/Mul',\n '/layer/Mul'\n ]\n for node in onnx_model.graph.node:\n assert 'Constant' in node.name or node.name in expected_names\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def try_models(self):\n result = os.system(\"python try_models.py\")\n return result == 0", "def has_module_permission(self, request):\n if request.user.groups.filter(name__in=['Sellers', 'Supporters']).exists():\n return True\n return False", "def is_mocking():\n from . import core\n return len(core.PATCHERS.targets) > 0", "def has_module_perms(self, user_obj, app_label):\n if not user_obj.is_active:\n return False\n for perm in self.get_all_permissions(user_obj):\n if perm[:perm.index('.')] == app_label:\n return True\n return False", "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT", "def test_inheritance(self):\n self.assertTrue(issubclass(type(self.user_1), BaseModel))", "def test_import_allows_multiple_modules_successful(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertTrue(feature)\n check.assert_called_once()", "def allow_relation(self, obj1, obj2, **hints):\n\n result = False\n if not (obj1._meta.model_name in GeoSpatialRouting.includedModels and \n obj2._meta.model_name in GeoSpatialRouting.includedModels) :\n result = None\n return result", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def ismodule(object):\r\n return isinstance(object, types.ModuleType)", "def is_package(self, fullmodname):\n submodname, is_package, relpath = self._get_info(fullmodname)\n return is_package", "def check_module(name):\n return importlib.util.find_spec(name) is not None", "def test_issubclass(self):\n self.assertTrue(issubclass(User()), BaseModel)", "def supports(self, model: str) -> bool:\n model = model.lower().replace(\"#slash#\", \"/\")\n\n if self._model.lower() == model:\n return True\n\n # @todo implement Regex/Json path\n for alias in self.aliases:\n if alias.lower() == model:\n return True\n\n # Also try to match model ID between parentheses.\n if match := re.search(r\"\\(([^\\(\\)]+)\\)$\", model):\n return self.supports(match.group(1))\n\n return False", "def is_model(model: Model) -> bool:\r\n for key in model:\r\n if not (is_variable(key) and type(model[key]) is bool):\r\n return False\r\n return True", "def _should_ignore_module(cls, module_name):\n # exclude test modules for now to avoid spurious failures\n # TODO(jelle): enable for test modules too\n return module_name.split(\".\")[-1].startswith(\"test\")", "def user_deletable(self):\n source_module_id = getattr(self, 'source_module_id', False)\n if not source_module_id:\n return True\n\n root_module_id = getattr(self, 'root_module_id', False)\n if not root_module_id:\n return True\n\n app = self.get_app()\n parent_module = app.get_module_by_unique_id(root_module_id)\n\n if parent_module.module_type == 'shadow':\n return False\n\n return True", "def has_module(name):\n _refresh_cache()\n return name in _modules", "def has_module_perms(self, app_label):\n # Simplest possible answer: Yes, always\n return True", "def has_module_perms(self, app_label):\n # Simplest possible answer: Yes, always\n return True", "def has_module_perms(self, app_label):\n # Simplest possible answer: Yes, always\n return True", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def has_module_perms(self, user_obj, app_label):\n if not user_obj.is_active:\n return False\n for perm in self.get_all_permissions(user_obj):\n if perm.partition(':')[0] == app_label:\n return True\n return False", "def test_multiple_base_models(engine):\n with pytest.raises(ValueError):\n bloop.model.BaseModel(engine)", "def _user_has_module_perms(user, app_label):\n for backend in auth.get_backends():\n if not hasattr(backend, 'has_module_perms'):\n continue\n try:\n if backend.has_module_perms(user, app_label):\n return True\n except PermissionDenied:\n return False\n return False", "def _user_has_module_perms(user, app_label):\n for backend in auth.get_backends():\n if not hasattr(backend, 'has_module_perms'):\n continue\n try:\n if backend.has_module_perms(user, app_label):\n return True\n except PermissionDenied:\n return False\n return False", "def is_core(self):\n #core_stems = (\n # 'Algebra','Geometry','Precalculus','Calculus',\n # 'Biology','Chemistry','Physics','Living Environment','Global Environment','Scientific Literacy',\n # 'History','Economics',\n # 'Literature','Language','Writing','AP','Sem',\n # 'Korean',\n # )\n #core = False\n #for stem in core_stems:\n # if stem in self.title:\n # core = True\n \n return self.level>0", "def load_all(hub: pop.hub.Hub, subname: str) -> bool:\n if hasattr(hub, subname):\n sub = getattr(hub, subname)\n sub._load_all()\n return True\n else:\n return False", "def _is_opinion_mod(token: tokens.Token) -> bool:\n is_mod = token.dep_ in {\"amod\", \"advmod\"}\n is_op = token.text.lower() in _OPINION_WORDS\n return is_mod and is_op", "def need_ovo(model_name):\n return (model_name == 'logistic') or (model_name == 'sgd')", "def _find_module(model, mod_name):\n for name, module in model.named_modules():\n if name == mod_name:\n return module\n return None", "def _calculate_nested_inclusion(self, method: str, mode: str | None, model: Self) -> bool:\n # EMPTY RULE: If there is no value in the model it should NOT be INCLUDED.\n if not model:\n return False\n\n # METHOD RULE: When HTTP method is DELETE or GET nested objects should NOT be INCLUDED.\n if method in ['DELETE', 'GET']:\n return False\n\n # STAGED RULE: Any nested object provided by developer should be INCLUDED.\n if model._staged is True:\n return True\n\n # POST RULE: When method is POST all nested object should be INCLUDED.\n if method == 'POST':\n return True\n\n # Current Object Restrictions:\n # * The method is PUT\n # * The nested object was NOT added via the stage_xxx method\n # * The nested object contains a ID or Name Field\n # * The nested object could either have been set during initializing the object or fetched.\n\n # CM and TI endpoint behaves differently. Start with rules based on the parent type,\n # then add more specific rules.\n\n if self._cm_type is True:\n #\n # CM PARENT TYPES\n #\n\n # Nested Types:\n # * CM Types (Artifact, Artifact Type, Case, Note, Task, Workflow Event/Template)\n # * Attributes\n # * Group (currently read-only)\n # * Tags\n # * Users\n\n # Coverage:\n # * Downloaded from API (e.g., case.get(id=123))\n # * Added on instantiation\n # * Added with stage_xxx() method\n\n if model._cm_type is True:\n # RULE: Short-Circuit Nested CM Types\n # Nested CM types are updated through their direct endpoints and should\n # never be INCLUDED when updating the parent. For new nested CM types\n # added with the stage_xxx() method, the STAGED RULE would trigger\n # before this rule.\n return False\n\n if model._shared_type is True:\n # RULE: Nested Tags\n # Nested tags on a parent CM type behave as REPLACE mode and need to be\n # INCLUDED to prevent being removed.\n return True\n\n # RULE: Nested Attributes w/ APPEND mode\n # Nested attributes on a parent CM type use the mode feature. When the mode\n # is APPEND and has been UPDATED, then the attributes should be INCLUDED.\n # For new nested objects added with the stage_xxx() method, the STAGED\n # RULE would trigger first.\n # A secondary PATTERN consideration is that attributes can be immediately\n # updated using the attribute.updated() method. While this isn't as\n # efficient as updating them all in one request, it's is a simpler\n # development design pattern.\n\n if mode == 'replace':\n # RULE: Nested Attributes w/ REPLACE mode\n # Nested attributes on a parent CM type use the mode feature. When the mode\n # is REPLACE the attributes should be INCLUDED.\n return True\n\n # RULE: Nested Attributes w/ DELETE mode\n # Nested attributes on a parent CM type use the mode feature. When the mode\n # is DELETE the attribute should NOT be INCLUDED. Any attribute that was\n # added by the developer using the stage_xxx() method would have hit the\n # STAGED RULE above and would be INCLUDED.\n # A secondary PATTERN consideration is that attributes can be immediately\n # deleted using the attribute.delete() method. While this isn't as\n # efficient as deleting them all in one request, it's is a simpler\n # development design pattern.\n\n # All non-matching nested object that did not match a rule above will NOT be INCLUDED.\n return False\n\n #\n # TI PARENT TYPES (Groups, Indicators, Victim, and Victims Assets)\n #\n\n # Nested Types:\n # * Associations (Groups, Indicators, Victim Assets)\n # * Attributes\n # * Security Labels\n # * Tags\n\n # Coverage:\n # * Downloaded from API\n # * Added on instantiation\n # * Added with stage_xxx() method\n\n if mode == 'append' and self._associated_type:\n # RULE: Nested Object w/ APPEND mode\n # Nested object on a parent CM type use the mode feature. When the mode\n # is APPEND and not STAGED the object should NOT be INCLUDED.\n return True\n\n if mode == 'replace':\n # RULE: Nested Object w/ REPLACE mode\n # Nested object on a parent TI type use the mode feature. When the mode\n # is REPLACE the object should be INCLUDED.\n return True\n\n # * security_label -> delete (support id or name only)\n # * tag -> delete (support id or name only)\n if (\n mode == 'delete'\n and (model._shared_type is True or self._associated_type is True)\n and (model.id is not None or model.name is not None) # type: ignore\n ):\n # RULE: Nested Shared Object w/ DELETE mode (TAGS, SECURITY LABELS)\n # Nested shared object on a parent TI type use the mode feature. When the mode\n # is DELETE the shard object should not be INCLUDED. Any object that was\n # added by the developer would have hit the STAGED RULE above and would\n # be INCLUDED.\n return True\n\n # * associated -> delete (support id only)\n # * attribute -> delete (support id only)\n # RULE: Nested Object w/ DELETE mode\n # Nested object on a parent TI type use the mode feature. When the mode\n # is DELETE the object should not be INCLUDED. Any object that was\n # added by the developer would have hit the STAGED RULE above and would\n # be INCLUDED.\n\n # All non-matching nested object that did not match a rule above will NOT be INCLUDED.\n return False", "def is_imported():\n return len(inspect.stack()) > 3", "def getStandard(self):\n\n app = self.app\n loadData = app.loadData\n\n if not loadData or loadData == \"core\":\n return\n\n aContext = app.context\n moduleSpecs = aContext.moduleSpecs\n seen = self.seen\n checkout = self.checkout\n backend = self.backend\n\n for m in moduleSpecs or []:\n org = m[\"org\"]\n repo = m[\"repo\"]\n relative = m[\"relative\"]\n theCheckout = m.get(\"checkout\", checkout)\n theBackend = m.get(\"backend\", backend)\n bRep = backendRep(theBackend, \"spec\", default=backend)\n\n ref = f\"{bRep}{org}/{repo}{relative}\"\n if ref in seen:\n continue\n\n if not self.getModule(\n org,\n repo,\n relative,\n theCheckout,\n backend=theBackend,\n specs=m,\n ):\n self.good = False", "def getModule(\n self, org, repo, relative, checkout, backend=None, isBase=False, specs=None\n ):\n\n backend = self.backend if backend is None else backendRep(backend, \"norm\")\n bRep = backendRep(backend, \"spec\", default=self.backend)\n version = self.version\n silent = self.silent\n mLocations = self.mLocations\n provenance = self.provenance\n seen = self.seen\n app = self.app\n _browse = app._browse\n aContext = app.context\n branch = aContext.provenanceSpec[\"branch\"]\n\n relative = prefixSlash(normpath(relative))\n\n moduleRef = f\"{bRep}{org}/{repo}{relative}\"\n if moduleRef in self.seen:\n return True\n\n if org is None or repo is None:\n relativeBare = relative.removeprefix(\"/\")\n repoLocation = relativeBare\n mLocations.append(relativeBare)\n (commit, local, release) = (None, None, None)\n else:\n (commit, release, local, localBase, localDir) = checkoutRepo(\n backend,\n _browse=_browse,\n org=org,\n repo=repo,\n folder=relative,\n version=version,\n checkout=checkout,\n withPaths=False,\n keep=False,\n silent=silent,\n )\n if not localBase:\n return False\n\n repoLocation = f\"{localBase}/{org}/{repo}\"\n mLocations.append(f\"{localBase}/{localDir}\")\n\n seen.add(moduleRef)\n if isBase:\n app.repoLocation = repoLocation\n\n info = {}\n for item in (\n (\"doi\", None),\n (\"corpus\", f\"{org}/{repo}{relative}\"),\n ):\n (key, default) = item\n info[key] = (\n getattr(aContext, key)\n if isBase\n else specs[key]\n if specs and key in specs\n else default\n )\n provenance.append(\n (\n (\"corpus\", info[\"corpus\"]),\n (\"version\", version),\n (\"commit\", commit or \"??\"),\n (\"release\", release or \"none\"),\n (\n \"live\",\n provenanceLink(\n backend, org, repo, version, branch, commit, local, release, relative\n ),\n ),\n (\"doi\", info[\"doi\"]),\n )\n )\n return True", "def has_module_perms(self, app_label):\n # Active superusers have all permissions.\n if self.is_active and self.is_superuser:\n return True\n\n return _user_has_module_perms(self, app_label)", "def has_module_perms(self, app_label):\n # Active superusers have all permissions.\n if self.is_active and self.is_superuser:\n return True\n\n return _user_has_module_perms(self, app_label)", "def has_module_perms(self, app_label):\n # Active superusers have all permissions.\n if self.is_active and self.is_superuser:\n return True\n\n return _user_has_module_perms(self, app_label)", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def test_father(self):\n user1 = User()\n self.assertTrue(issubclass(user1.__class__, BaseModel))", "def check_models_loaded_or_error(self, rerank):\n if not self._parser_model_loaded:\n raise ValueError(\"Parser model has not been loaded.\")\n if rerank is True and not self.reranker_model:\n raise ValueError(\"Reranker model has not been loaded.\")\n if rerank == 'auto':\n return bool(self.reranker_model)\n else:\n return rerank", "def is_peewee_model(obj) -> bool:\n return (inspect.isclass(obj) and\n issubclass(obj, peewee.Model) and\n not obj == peewee.Model and\n not obj.__name__.startswith('_'))", "def is_installed(cls):\n return find_spec_or_loader(cls.module) is not None", "def is_management_or_admin(user):\n if user.id:\n if in_projects_admin_group(user) or \\\n shared_models.Section.objects.filter(head=user).count() > 0 or \\\n shared_models.Division.objects.filter(head=user).count() > 0 or \\\n shared_models.Branch.objects.filter(head=user).count() > 0:\n return True", "def has_db_objects(self) -> bool:\n\n def has_db_models(entity: t.Union[EntityList, Model]) -> bool:\n # pylint: disable=protected-access\n return len(entity._db_models) > 0\n\n def has_db_scripts(entity: t.Union[EntityList, Model]) -> bool:\n # pylint: disable=protected-access\n return len(entity._db_scripts) > 0\n\n has_db_objects = False\n for model in self.models:\n has_db_objects |= hasattr(model, \"_db_models\")\n\n # Check if any model has either a DBModel or a DBScript\n # we update has_db_objects so that as soon as one check\n # returns True, we can exit\n has_db_objects |= any(\n has_db_models(model) | has_db_scripts(model) for model in self.models\n )\n if has_db_objects:\n return True\n\n # If there are no ensembles, there can be no outstanding model\n # to check for DBObjects, return current value of DBObjects, which\n # should be False\n ensembles = self.ensembles\n if not ensembles:\n return has_db_objects\n\n # First check if there is any ensemble DBObject, if so, return True\n has_db_objects |= any(\n has_db_models(ensemble) | has_db_scripts(ensemble) for ensemble in ensembles\n )\n if has_db_objects:\n return True\n for ensemble in ensembles:\n # Last case, check if any model within an ensemble has DBObjects attached\n has_db_objects |= any(\n has_db_models(model) | has_db_scripts(model)\n for model in ensemble.models\n )\n if has_db_objects:\n return True\n\n # `has_db_objects` should be False here\n return has_db_objects", "def allow_relation(self, obj1, obj2, **hints):\n\n result = (obj1._meta.model_name in DefaultRouting.defaultModels and \n obj2._meta.model_name in DefaultRouting.defaultModels)\n return result" ]
[ "0.66678244", "0.6193898", "0.6127741", "0.6090915", "0.60585386", "0.5981495", "0.5975656", "0.59688056", "0.58849907", "0.5884931", "0.5833496", "0.5831226", "0.58079565", "0.5742593", "0.5681119", "0.5659924", "0.56587225", "0.56485283", "0.56155103", "0.5606545", "0.5603672", "0.5603305", "0.559993", "0.5596409", "0.5594016", "0.5591113", "0.5575471", "0.5573548", "0.5569031", "0.5562536", "0.5543583", "0.55399793", "0.55325776", "0.55325776", "0.5519819", "0.54907817", "0.54877794", "0.54863393", "0.54833674", "0.545854", "0.54516953", "0.5442931", "0.5436814", "0.5417743", "0.54169154", "0.54032785", "0.53914726", "0.53901577", "0.5362201", "0.53587854", "0.53571844", "0.5352305", "0.53409696", "0.5339727", "0.53351116", "0.5334859", "0.5313142", "0.5311856", "0.53105617", "0.52933854", "0.528284", "0.52812076", "0.5265676", "0.5260746", "0.52551514", "0.52542496", "0.52539617", "0.5251077", "0.5246189", "0.52330077", "0.5228621", "0.5224862", "0.52217245", "0.52217245", "0.52217245", "0.52139425", "0.5209416", "0.51947296", "0.5194413", "0.5194413", "0.51799726", "0.51557213", "0.51505065", "0.51442194", "0.5136908", "0.51283383", "0.51235485", "0.5105901", "0.51003873", "0.50886035", "0.50886035", "0.50886035", "0.5087551", "0.5083551", "0.50735354", "0.50721824", "0.50692534", "0.5068657", "0.50679433", "0.5063325" ]
0.6265166
1
Initialize a MAD4PG network.
def __init__(self, env, args): self.framework = "MAD4PG" self.t_step = 0 self.episode = 1 self.avg_score = 0 self.C = args.C self._e = args.e self.e_min = args.e_min self.e_decay = args.e_decay self.anneal_max = args.anneal_max self.update_type = args.update_type self.tau = args.tau self.state_size = env.state_size self.action_size = env.action_size # Create all the agents to be trained in the environment self.agent_count = env.agent_count self.agents = [D4PG_Agent(self.state_size, self.action_size, args, self.agent_count) for _ in range(self.agent_count)] self.batch_size = args.batch_size # Set up memory buffers, currently only standard replay is implemented self.memory = ReplayBuffer(args.device, args.buffer_size, args.gamma, args.rollout, self.agent_count) self.memory.init_n_step() for agent in self.agents: self.update_networks(agent, force_hard=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_network(self):\n raise NotImplementedError", "def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])", "def __init__(self, network=None):\n\n if network is None:\n self.graph = nx.Graph()\n self.graph.graph['graph_type'] = 'generic'\n # extent is the extent defined by pores surfaces\n self.graph.graph['extent'] = None\n self.graph.graph['bbox'] = None\n self.geom_complete = False\n self.pores_volume = 0\n self.throats_volume = 0\n else:\n self.graph = network\n self.compute_geometry()", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def initialize_gateway(self, network_ref):\n raise NotImplementedError()", "def __init__(self, n_gmm=2, z_dim=1):\n super(DAGMM, self).__init__()\n #Encoder network\n self.fc1 = nn.Linear(118, 60)\n self.fc2 = nn.Linear(60, 30)\n self.fc3 = nn.Linear(30, 10)\n self.fc4 = nn.Linear(10, z_dim)\n\n #Decoder network\n self.fc5 = nn.Linear(z_dim, 10)\n self.fc6 = nn.Linear(10, 30)\n self.fc7 = nn.Linear(30, 60)\n self.fc8 = nn.Linear(60, 118)\n\n #Estimation network\n self.fc9 = nn.Linear(z_dim+2, 10)\n self.fc10 = nn.Linear(10, n_gmm)", "def __init__(self, *args):\n _snap.TMMNet_swiginit(self, _snap.new_TMMNet(*args))", "def __init__(self, network: Network):\n self.graph = network.graph", "def _init_networks(self, state_dict: OrderedDict):\n self.dqn = Brain(self.backbone_cfg, self.head_cfg).to(self.device)\n self.dqn.load_state_dict(state_dict)\n self.dqn.eval()", "def test_init(self):\n network = PerceptronNetwork(\n [\n PerceptronLayer.blank(4, 2, 'layer1', ['a', 'b', 'c', 'd']),\n PerceptronLayer.blank(2, 2, 'layer2', ['a', 'b', 'c', 'd'])\n ]\n )\n self.assertIsNotNone(network)", "def setUp(self):\n self.G = nx.DiGraph()", "def initialize_networkHandler(self):\n\t\tself.networkHandler = NetworkHandler(\n\t\t\tself.callbackQueue,\n\t\t\tself.received_order,\n\t\t\tself.set_light_callback,\n\t\t\tself.newOrderQueue,\n\t\t\tself.startedOrderQueue,\n\t\t\tself.lost_connection\n\t\t\t)", "def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network", "def start_network(self):\n try:\n self.topo.build_topo()\n except:\n error('Cannot build the topology.')\n try:\n self.net = IPNet(topo=self.topo, use_v4=False, use_v6=True)\n self.net.start()\n except:\n self.stop_network()\n error('Cannot start the network.')", "def initialize(self):\n LOGGER.info('Set %d initializing...', self.port_set)\n # There is a race condition here with ovs assigning ports, so wait a bit.\n time.sleep(2)\n shutil.rmtree(self.tmpdir, ignore_errors=True)\n networking_name = 'gw%02d' % self.port_set\n networking_port = self.pri_base + self.NETWORKING_OFFSET\n LOGGER.debug(\"Adding networking host on port %d\", networking_port)\n cls = docker_host.make_docker_host('daq/networking', prefix='daq', network='bridge')\n try:\n self.networking = self.runner.add_host(networking_name, port=networking_port,\n cls=cls, tmpdir=self.tmpdir)\n self._create_config(self.networking.tmpdir)\n self.record_result('startup')\n except Exception as e:\n self._state_transition(_STATE.ERROR)\n self.record_result('startup', exception=e)", "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def init_network() -> dict:\n network = {}\n network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n network['b1'] = np.array([0.1, 0.2, 0.3])\n network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n network['b2'] = np.array([0.1, 0.2])\n network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])\n network['b3'] = np.array([0.1, 0.2])\n return network", "def set_network(self, pair_blocks=1, base_channels=512, layers=5):\n\n # store architecture\n self.pair_blocks = pair_blocks\n self.base_channels = base_channels\n self.layers = layers\n\n self.net = Network(pair_blocks, base_channels, layers, self.device)\n self.train_loader.index = 0\n\n self._loaded = False\n self.time_stamp_path = None", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def setup_net(self):\n pass", "def initialize_network(self, cidr, is_external):\n raise NotImplementedError()", "def __init__(self, mode, cfg):\n super(DMCM, self).__init__()\n\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n\n # Matrix network does not need weight initialization because there can\n # be no vanishing gradients.\n self.conv_net.apply(_init_weights_xavier)", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def __init__(__self__, *,\n network_id: Optional[pulumi.Input[str]] = None):\n if network_id is not None:\n pulumi.set(__self__, \"network_id\", network_id)", "def init_network(session: \"Session\", new_network_name: str) -> None:\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n _post(session, url_tail, None, params={CoordConstsV2.QP_NAME: new_network_name})", "def __init__(self, *args):\n _snap.TUNGraphMtx_swiginit(self, _snap.new_TUNGraphMtx(*args))", "def load_network(self):\t\t\r\n\t\tself.dqn.load_network(self.path)", "def __init__(self):\r\n self._empty = EmptyNetworkGroup()\r\n self._groups = {}\r\n self._uid = set()\r\n self._machines = set()\r\n self._iaas = None", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def Setup(self):\n self.Peers = [] # active nodes that we're connected to\n self.KNOWN_ADDRS = [] # node addresses that we've learned about from other nodes\n self.DEAD_ADDRS = [] # addresses that were performing poorly or we could not establish a connection to\n self.MissionsGlobal = []\n self.NodeId = random.randint(1294967200, 4294967200)", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def __init__(__self__, *,\n network_tags: Optional[pulumi.Input['NetworkTagsArgs']] = None):\n if network_tags is not None:\n pulumi.set(__self__, \"network_tags\", network_tags)", "def test_create_network():\n _network = Network()", "def __init__(self, *args):\n _snap.TMMNetModeNetI_swiginit(self, _snap.new_TMMNetModeNetI(*args))", "def _build_network(self):\n pass", "def __init__(self, functions=None, variables=None, global_resource=None):\n self.ssa = NetworkEnsemble()\n if functions is None:\n self.ssa.functions = dict()\n else:\n self.ssa.functions = functions\n if variables is None:\n self.ssa.variables = dict()\n else:\n self.ssa.variables = variables\n if global_resource is None:\n self.ssa.global_resource = dict()\n else:\n self.ssa.global_resource = global_resource", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def empty_network(network_id=NETWORK_ID):\n return make_net_model({\"id\": network_id,\n \"subnets\": [],\n \"ports\": [],\n \"tenant_id\": \"calico\",\n \"mtu\": neutron_constants.DEFAULT_NETWORK_MTU})", "def __init__(self, network=None, additional_info=None): # noqa: E501 # noqa: E501\n self._network = None\n self._additional_info = None\n self.discriminator = None\n self.network = network\n self.additional_info = additional_info", "def __init__(self, client, network_id):\n super(NetworksMixin, self).__init__(client)\n self._network_id = network_id", "def __init__(self, *args):\n _snap.TModeNet_swiginit(self, _snap.new_TModeNet(*args))", "def initialisation(x, dim, n_iterations):\r\n net_dim = x\r\n init_radius = int(x / 4)\r\n times = n_iterations / np.log(init_radius)\r\n network = np.random.random((x,dim))\r\n \r\n return net_dim, init_radius, times, network", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def __init__(self, networkFile=\"\", demandFile=\"\"):\n self.numNodes = 0\n self.numLinks = 0\n self.numZones = 0\n self.firstThroughNode = 0\n \n self.node = dict()\n self.link = dict()\n self.ODpair = dict()\n self.path = dict()\n\n if len(networkFile) > 0 and len(demandFile) > 0:\n self.readFromFiles(networkFile, demandFile)", "def __init__(self, *args):\n _snap.TUNGraph_swiginit(self, _snap.new_TUNGraph(*args))", "def network_initial(request, SPIC_group, SPIC_id):\n SPIC_obj = get_object_or_404(SPIC, group=SPIC_group, local_id=SPIC_id)\n network_obj, created = Network.objects.get_or_create(user_id=request.user.pk, SPIC=SPIC_obj, local_id=0, deleted=False)\n\n if created is True:\n # Check if prototype exists\n prototype = get_object_or_404(Network, user_id=0, SPIC=SPIC_obj)\n network_obj.nodes_json = prototype.nodes_json\n network_obj.links_json = prototype.links_json\n network_obj.save()\n\n return network(request, SPIC_group, SPIC_id, 0)", "def __init__(self, latent_network, z0, noise=0.1, burnin=0, stride=1, nwalkers=1, xmapper=None):\n self.network = latent_network\n self.model = latent_network.energy_model\n self.noise = noise\n self.burnin = burnin\n self.stride = stride\n self.nwalkers = nwalkers\n if xmapper is None:\n class DummyMapper(object):\n def map(self, X):\n return X\n xmapper = DummyMapper()\n self.xmapper = xmapper\n self.reset(z0)", "def __init__(self, data, net, train_rat=0.8, force_cpu=False):\n # Check if CUDA is available and use it if we selected to\n self.device = 'cpu'\n if torch.cuda.is_available() and not force_cpu:\n self.device = 'cuda'\n # Original data\n self._data = data\n\n # First network\n self.net = net.double().to(self.device)\n\n # First partitioned data\n self.Xtrain, self.Ttrain, self.Xtest, self.Ttest = NetworkBGP.partition(self._data, self.device, train_rat)", "def setup_networks(self, configs):\n self.__networks = self.setup_components(configs, 'scale_client.networks')", "def __init__(self, netlist_file):\n with open(netlist_file, 'r') as f:\n self.netlist = _parse_netlist(f)\n self.G = _create_graph(self.netlist)", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network", "def __init__(self, nodes=[], edges=[], connections=[], directed=False, isNetwork=False):\n Node.count=0\n Edge.count=0\n self.nodes = [n for n in nodes]\n self.edges = [e for e in edges]\n self.connections = [(a, b) for (a, b) in connections]\n self.isDirected = directed\n self.isNetwork = isNetwork", "def initialize(ip_config, num_servers=1, num_workers=0,\n max_queue_size=MAX_QUEUE_SIZE, net_type='socket',\n num_worker_threads=1):\n if os.environ.get('DGL_ROLE', 'client') == 'server':\n from .dist_graph import DistGraphServer\n assert os.environ.get('DGL_SERVER_ID') is not None, \\\n 'Please define DGL_SERVER_ID to run DistGraph server'\n assert os.environ.get('DGL_IP_CONFIG') is not None, \\\n 'Please define DGL_IP_CONFIG to run DistGraph server'\n assert os.environ.get('DGL_NUM_SERVER') is not None, \\\n 'Please define DGL_NUM_SERVER to run DistGraph server'\n assert os.environ.get('DGL_NUM_CLIENT') is not None, \\\n 'Please define DGL_NUM_CLIENT to run DistGraph server'\n assert os.environ.get('DGL_CONF_PATH') is not None, \\\n 'Please define DGL_CONF_PATH to run DistGraph server'\n serv = DistGraphServer(int(os.environ.get('DGL_SERVER_ID')),\n os.environ.get('DGL_IP_CONFIG'),\n int(os.environ.get('DGL_NUM_SERVER')),\n int(os.environ.get('DGL_NUM_CLIENT')),\n os.environ.get('DGL_CONF_PATH'))\n serv.start()\n sys.exit()\n else:\n if os.environ.get('DGL_NUM_SAMPLER') is not None:\n num_workers = int(os.environ.get('DGL_NUM_SAMPLER'))\n else:\n num_workers = 0\n if os.environ.get('DGL_NUM_SERVER') is not None:\n num_servers = int(os.environ.get('DGL_NUM_SERVER'))\n else:\n num_servers = 1\n\n rpc.reset()\n ctx = mp.get_context(\"spawn\")\n global SAMPLER_POOL\n global NUM_SAMPLER_WORKERS\n is_standalone = os.environ.get('DGL_DIST_MODE', 'standalone') == 'standalone'\n if num_workers > 0 and not is_standalone:\n SAMPLER_POOL = ctx.Pool(num_workers, initializer=_init_rpc,\n initargs=(ip_config, num_servers, max_queue_size,\n net_type, 'sampler', num_worker_threads))\n else:\n SAMPLER_POOL = None\n NUM_SAMPLER_WORKERS = num_workers\n if not is_standalone:\n assert num_servers is not None and num_servers > 0, \\\n 'The number of servers per machine must be specified with a positive number.'\n connect_to_server(ip_config, num_servers, max_queue_size, net_type)\n init_role('default')\n init_kvstore(ip_config, num_servers, 'default')", "def __init__(self):\n load_dotenv()\n mnemonic_phrase = os.getenv(\n \"MNEMONIC\", \"soccer cousin badge snow chicken lamp soft note ugly crouch unfair biology symbol control heavy\")\n\n # initialize w3\n self.w3 = Web3(Web3.HTTPProvider(\"http://127.0.0.1:8545\"))\n # support PoA algorithm\n self.w3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\n self.coins = {}\n for coin in COINS:\n self.coins[coin] = self.derive_wallets(mnemonic_phrase, coin)", "def __init__(self, *args):\n _snap.TNEGraph_swiginit(self, _snap.new_TNEGraph(*args))", "def __init__(self, device, state_space, action_space, num_actions):\n # initialize all parameters\n super(DQN_MLP_model, self).__init__(device, state_space, action_space,\n num_actions)\n # architecture\n self.layer_sizes = [(768, 768), (768, 768), (768, 512)]\n\n self.build_model()", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def __init__(self, config) -> None:\n super(Global_MP, self).__init__()\n self.dim = config.dim\n\n self.h_mlp = MLP([self.dim, self.dim])\n\n self.res1 = Res(self.dim)\n self.res2 = Res(self.dim)\n self.res3 = Res(self.dim)\n self.mlp = MLP([self.dim, self.dim])\n\n self.x_edge_mlp = MLP([self.dim * 3, self.dim])\n self.linear = nn.Linear(self.dim, self.dim, bias=False)", "def initialize(self,t0=0.0):\n \n # An connection_distribution_list (store unique connection(defined by weight,syn,prob))\n self.connection_distribution_collection = ConnectionDistributionCollection() # this is \n self.t = t0\n \n # put all subpopulation and all connections into the same platform\n for subpop in self.population_list:\n subpop.simulation = self\n for connpair in self.connection_list:\n connpair.simulation = self\n \n \n \n # initialize population_list, calculate \n \n \n for p in self.population_list:\n p.initialize() # 2 \n \n for c in self.connection_list:\n print 'initialize population'\n c.initialize() # 1", "def setup_mininet(log, spread, depth, bandwidth, delay, loss, fpga, fpga_bandwidth, fpga_delay,\n fpga_loss, poisson):\n Cleanup.cleanup()\n\n setLogLevel(log)\n\n # Create network\n topo = TreeTopoGeneric(spread, depth, bandwidth, delay, loss, fpga, fpga_bandwidth, fpga_delay,\n fpga_loss, poisson)\n net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink, autoStaticArp=True)\n net.start()\n\n return net", "def set_network(self, network: str = \"d\", pretrained=False,\n px_coordinates=True):\n # Set up the different networks\n if network == \"d\":\n network = CurbNetD(pretrained=pretrained,\n px_coordinates=px_coordinates)\n elif network == \"e\":\n network = CurbNetE()\n elif network == \"f\":\n network = CurbNetF()\n elif network == \"g\":\n network = CurbNetG()\n\n # Initialize the network as a parallelized network\n self.network = Network(network)\n\n self.network = self.network.to(device=self.device)\n\n # Set the network to train or to validation\n self.network.train(not self.validation)\n\n if not self.validation:\n # Set the optimizer according to the arguments if not validating\n if self.optimizer == \"adam\":\n self.optimizer = torch.optim.Adam(self.network.parameters(),\n lr=self.lr, eps=0.1)\n elif self.optimizer == \"sgd\":\n self.optimizer = torch.optim.SGD(self.network.parameters(),\n lr=self.lr)\n else:\n raise ValueError(\"Illegal optimizer value: only SGD and Adam \"\n \"optimizers are currently supported.\")", "def __init__(self, network, subnetSize=24):\n self.network = ipaddress.ip_network(unicode(network), strict=False)\n if subnetSize < self.network.prefixlen:\n raise Exception(\"Invalid subnetSize {} for network {}\".format(\n subnetSize, network))\n\n subnets = self.network.subnets(new_prefix=subnetSize)\n numSubnets = 2 ** (subnetSize - self.network.prefixlen)\n\n super(NetworkPool, self).__init__(subnets, numSubnets)", "def _create_networks_and_optimizer(self):\n self.policy_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self.target_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self._update_target_net()\n \n self.optimizer = optim.Adam(self.policy_net.parameters(), \n lr=self.lr, eps=1e-7)", "def test_enlarge_4_create_pdep_network(self):\n self.assertEqual(len(self.rmg.reaction_model.network_list), 1)\n self.assertEqual(len(self.rmg.reaction_model.network_list[0].source), 1)\n self.assertEqual(self.rmg.reaction_model.network_list[0].source[0].label, 'C2H4')\n\n self.assertEqual(len(self.rmg.reaction_model.network_dict), 1)\n self.assertEqual(len(list(self.rmg.reaction_model.network_dict.keys())[0]), 1)\n self.assertEqual(list(self.rmg.reaction_model.network_dict.keys())[0][0].label, 'C2H4')", "def __init__(self, *args):\n _snap.TDirNet_swiginit(self, _snap.new_TDirNet(*args))", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def __init__(self, *args, **kwargs):\n self.driver = importutils.import_module(CONF.network_driver)\n\n self.q_conn = quantum_connection.QuantumClientConnection()\n self.m_conn = melange_connection.MelangeConnection()\n self.a_conn = aiclib_connection.AICLibConnection()\n\n # NOTE(tr3buchet): map for global uuids\n # if these should change, restart this service\n # self._nw_map will look like:\n # self._nw_map = {'0000000000-0000-0000-0000-000000000000': pub_uuid,\n # '1111111111-1111-1111-1111-111111111111': priv_uuid,\n # pub_uuid: '0000000000-0000-0000-0000-000000000000',\n # priv_uuid: '1111111111-1111-1111-1111-111111111111'}\n # there will be only one (each way) entry per label\n self._nw_map = {}\n self._rackconnect_servicenet = None\n\n if CONF.network_global_uuid_label_map:\n self._nw_map = self._get_nw_map()\n LOG.debug('the self._nw_map is |%s|' % self._nw_map)\n else:\n self._nw_map = {}\n\n self._rackconnect_roles = set(CONF.rackconnect_roles)\n rc_public_gateway_roles = CONF.rackconnect_public_gateway_roles\n self._rc_public_gateway_roles = set(rc_public_gateway_roles)\n\n super(QuantumManager, self).__init__(service_name='network',\n *args, **kwargs)", "def init_host(self, host):\n self._precreate_network()\n LOG.info(_LI(\"Create/Update Ntwork and Subnet, Done.\"))", "def basic_network(cm=False):\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 0, 0],\n [1, 1, 0],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 0],\n ])\n if cm is False:\n cm = np.array([\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n else:\n cm = None\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def __init__(self, name: str, *args, size: int = 1024, network: 'base_network.Network' = None):\n self.name = name\n self._network = network if network is not None else defaults.network\n self._network.add_subnet(self)\n self._max_size = size\n self._ip_range = self._network.get_subnet_range(self._max_size)\n self._hosts = list(self._ip_range.hosts())\n\n self._nodes_dict = {}\n self.started = False\n self.loaded = False\n\n for node in utils.args.list_from_args(args):\n self.add_node(node)", "def initialize(N0,L,Nt,pflag):\n qmax,qnet,enet = net.generate(N0,L,Nt)\n N = (N0+Nt)\n #generate initial conditions. all nodes have S,E,C=(1,0,0)\n #except the infected node which has S,E,C=(0.1,0.05,0.05)\n init = np.zeros(3*N)\n for i in range(N):\n init[i] = 1\n #the highest degree node is infected\n infnode = qnet.argmax(axis=0)+1\n init[infnode-1] = 0.1\n init[N+infnode-1] = 0.05\n init[2*N+infnode-1] = 0.05\n if (pflag==True):\n #compute the transport matrix from the degree vector\n #and the adjacency matrix\n A = net.adjacency_matrix(N,enet)\n P = np.zeros([N,N])\n for j in range(N):\n for i in range(N):\n P[i,j] = qnet[i]*A[i,j]\n P[:,j] = P[:,j]/sum(P[:,j])\n return init,infnode,P\n return init,infnode", "def __init__(self, graph=None):\n\n self.graph = graph if graph else nx.Graph()", "def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)", "def __init__(self, address=None, group=None, network=None, connection=None):\n \n # Address\n self.address = address\n if address is None:\n self.address = Address()\n \n # Group\n self.group = group\n if group is None:\n self.group = Group(None, address, [])\n \n # Network\n self.network = network\n if network is None:\n self.network = Network()\n \n # Connection\n self.connection = connection\n if connection is None:\n self.connection = Connection()\n \n # Dictionary of sent messages waiting for an ack\n # Key: Message identifier\n # Value: \n self.waiting_for_ack = dict()", "def __init__(self, *args):\n _snap.TUNGraphNodeI_swiginit(self, _snap.new_TUNGraphNodeI(*args))", "def __init__(self) -> None:\n self.network: list = list()\n self.arcs = 0", "def initialize( self, layout, numGhostAgents=1000 ):\n self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py).", "def __init__(self, *args):\n _snap.TModeNetNodeI_swiginit(self, _snap.new_TModeNetNodeI(*args))", "def initialize_network(self, model, num_init=None, **net_args):\n\n self.net_args = net_args\n\n if num_init is None:\n self.num_init = 1\n else:\n self.num_init = num_init\n\n nets = []\n for i in range(self.num_init):\n nets.append( model(dim_inp=self.dim_inp, \n dim_out=self.dim_out, **net_args) )\n\n return nets", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def __init__(self, *args):\n _snap.TNEANet_swiginit(self, _snap.new_TNEANet(*args))", "def __init__(self, v_reg):\n nn.Module.__init__(self)\n self.mdls = [GaoNet(reg) for reg in v_reg]", "def __init__(self, name, config):\n super(RelationalNetwork, self).__init__(name, RelationalNetwork, config)\n\n # Get key mappings.\n self.key_feature_maps = self.stream_keys[\"feature_maps\"]\n self.key_question_encodings = self.stream_keys[\"question_encodings\"]\n self.key_outputs = self.stream_keys[\"outputs\"]\n\n # Retrieve input sizes from globals.\n self.feature_maps_height = self.globals[\"feature_maps_height\"]\n self.feature_maps_width = self.globals[\"feature_maps_width\"]\n self.feature_maps_depth = self.globals[\"feature_maps_depth\"]\n self.question_encoding_size = self.globals[\"question_encoding_size\"]\n \n # Create \"object\" coordinates.\n self.obj_coords = []\n for h in range(self.feature_maps_height):\n for w in range(self.feature_maps_width):\n self.obj_coords.append((h,w))\n\n # Calculate input size to the g_theta: two \"objects\" + question (+ optionally: image size)\n input_size = 2 * self.feature_maps_depth + self.question_encoding_size\n\n # Create the module list.\n modules = []\n\n # Retrieve dropout rate value - if set, will put dropout between every layer.\n dropout_rate = self.config[\"dropout_rate\"]\n\n # Create the model, i.e. the \"relational\" g_theta network.\n g_theta_sizes = self.config[\"g_theta_sizes\"]\n if type(g_theta_sizes) == list and len(g_theta_sizes) > 1:\n # First input dim.\n input_dim = input_size\n for hidden_dim in g_theta_sizes:\n # Add linear layer.\n modules.append( torch.nn.Linear(input_dim, hidden_dim) )\n # Add activation and dropout.\n modules.append( torch.nn.ReLU() )\n if (dropout_rate > 0):\n modules.append( torch.nn.Dropout(dropout_rate) )\n # Remember input dim of next layer.\n input_dim = hidden_dim\n\n # Add output layer.\n modules.append( torch.nn.Linear(input_dim, hidden_dim) )\n\n self.logger.info(\"Created g_theta network with {} layers\".format(len(g_theta_sizes)+1))\n\n else:\n raise ConfigurationError(\"'g_theta_sizes' must contain a list with numbers of neurons in g_theta layers (currently {})\".format(self.hidden_sizes))\n\n # Export output_size to globals.\n self.output_size = g_theta_sizes[-1]\n self.globals[\"output_size\"] = self.output_size\n\n # Finally create the sequential model out of those modules.\n self.g_theta = torch.nn.Sequential(*modules)", "def __init__(self, *args):\n _snap.TNGraphMtx_swiginit(self, _snap.new_TNGraphMtx(*args))", "def __init__(self, game, net_args: DotDict, architecture: str) -> None:\n super().__init__(game, net_args, Agents.MuZeroNetworks[architecture])\n self.action_size = game.getActionSize()\n self.architecture = architecture", "def __init__(self, \n xml_path=cfg.PERSON_REID_XML,\n bin_path=cfg.PERSON_REID_BIN):\n self.__net = cv.dnn.readNet(xml_path, bin_path)\n self.__net.setPreferableTarget(cv.dnn.DNN_TARGET_MYRIAD)", "def __init__(self, lemmas_info):\n self._lemmas_info = lemmas_info\n self._graph = self._create_nx_graph()", "def __init__(self):\n self.networks = [\n ipaddress.ip_network(address)\n for address in self.addresses\n ]", "def construct_network(self, n_units, n_samples=1, noise_dim=0,\n keep_p=1., nonlinearity=True, init_params=None, name=\"\"):\n print \"constructing network, n_units: \",n_units\n # TODO use kwargs for more elagant solutions to being called by this \n # base class\n assert keep_p ==1. and nonlinearity and noise_dim == 0\n\n assert init_params is None # this is implemented only in the Bayesian flow version of this function\n\n ### Define parameters of the network\n self.weights, self.biases, KL = {}, {}, 0.\n self.layers = []\n # Establish paramters of appromiate posterior over weights and\n # biases.\n for l in range(1, len(n_units)):\n with tf.variable_scope(name+'Layer_%d'%l):\n n_in, n_out = n_units[l-1], n_units[l]\n\n # use non neglidgible uncertainty if we are doing VI\n sigma_init = self.init_sigma_params\n\n w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma\n mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1.\n\n (w_mu, w_logstd), _, w_KL = utils.set_q(name+\"w_%d\"%l,\n sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w,\n sigma_init=sigma_init, n_samples=0,\n size=[n_in, n_out], save_summary=True)\n\n # We use same init_sigma for weights and biases.\n (b_mu, b_logstd), _, b_KL = utils.set_q(name+\"b_%d\"%l,\n sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b,\n sigma_init=sigma_init, n_samples=0,\n size=[n_out], save_summary=True)\n self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd)\n self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd)\n\n self.params += [w_mu, b_mu, w_logstd, b_logstd]\n KL += w_KL + b_KL\n\n # Add an extra dimension to correspond to samples.\n prev_layer = tf.stack([self.x]*n_samples)\n self.layers.append(prev_layer)\n # shape is [n_samples, ?, dim(x)]\n\n ### Define activations in each layer\n for l in range(1,len(n_units)):\n print \"defining activations in layer %d\"%l\n # Multiply with weight matrix and add bias\n prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]])\n layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l])\n layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]])\n # Shape of layer_pre_bias is [n_samples, ?, n_units[l]]\n\n # add mean bias term\n layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :])\n\n # Calculate the noise in each hidden unit.\n # must use absolute value of activation because final layer may\n # have negative values.\n layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1,\n n_units[l-1]]), self.weights['w_%d_std'%l]**2)\n layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]])\n layer_var += self.biases['b_%d_std'%l]**2\n\n # Now sample noise and add scaled noise.\n # This constitutes the local reparameterization trick.\n eps = tf.random_normal(name='eps_%d'%l, mean=0.,\n stddev=1.0, shape=[n_samples, 1, n_units[l]])\n layer_sigma = tf.sqrt(layer_var)\n layer += layer_sigma*eps\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_sigmas\"%l, layer_sigma)\n tf.summary.histogram(name+\"Layer_%d_activations_pre_tanh\"%l, layer)\n\n # Add tanh nonlinearity\n if l != (len(n_units) - 1): layer = tf.nn.tanh(layer)\n\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_activations_post_tanh\"%l,layer)\n\n prev_layer = layer\n self.layers.append(prev_layer)\n self.KL_BNN = KL\n return prev_layer", "def __init__(self, address, netmask=None):\n\n if netmask:\n ip = Ipv4Address(address)\n address = \"%s/%s\" % (ip,netmask)\n\n google.ipaddr.IPv4Network.__init__(self, address, strict=False)", "def _init_network_operation(self, operation, parameters):\n message_id = self.operator.async_remote_call(None, operation, parameters, True)", "def initialize_graph(compound_relations, relation_types):\n graph = nx.DiGraph()\n for compound, targets in compound_relations.items():\n for target, relation in targets.items():\n if relation in relation_types:\n graph.add_edge(compound, target)\n return graph", "def __init__(self, *args):\n _snap.TCrossNet_swiginit(self, _snap.new_TCrossNet(*args))", "def __createNetwork__(self, amount_nodes, amount_links):\n random.seed()\n numOfNodes = 0\n linksPerIteration = (amount_links-3)/(amount_nodes-3) if amount_nodes > 3 else 1\n #generate n nodes\n while numOfNodes < amount_nodes:\n node = Node(numOfNodes)\n self.appendNode(node)\n numOfNodes += 1\n #make first three nodes fully connected\n if numOfNodes == 2:\n self.__connectNode__(numOfNodes, 1)\n if numOfNodes == 3:\n self.__connectNode__(numOfNodes, 2)\n #link following nodes\n if numOfNodes > 3:\n self.__connectNode__(numOfNodes, linksPerIteration)", "def __init__(self, input_dimensions=2, number_of_nodes=4, transfer_function=\"Hard_limit\"):\n self.input_dimensions = input_dimensions\n self.number_of_nodes = number_of_nodes\n self.transfer_function = transfer_function\n self.initialize_weights()", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)" ]
[ "0.6553901", "0.650665", "0.6231836", "0.607061", "0.60383505", "0.5979345", "0.5966271", "0.5948596", "0.58692914", "0.586917", "0.5827446", "0.58020544", "0.5797991", "0.5792459", "0.5786038", "0.5722977", "0.5710139", "0.5696189", "0.5653719", "0.5616278", "0.56124663", "0.5577533", "0.5551598", "0.55327106", "0.5512008", "0.54674405", "0.54612964", "0.546098", "0.5460471", "0.5431485", "0.54195315", "0.5414129", "0.5409739", "0.540784", "0.538869", "0.5384533", "0.5338012", "0.5331442", "0.5323723", "0.5319797", "0.5300818", "0.53001076", "0.5286131", "0.52847964", "0.5268696", "0.5268068", "0.5264546", "0.525836", "0.5248461", "0.52441055", "0.52439606", "0.52418804", "0.5241463", "0.5221782", "0.52146584", "0.52102983", "0.52083814", "0.5201637", "0.51988786", "0.51960063", "0.51934785", "0.51882577", "0.51818943", "0.5179211", "0.51667774", "0.5165402", "0.51594806", "0.5156593", "0.51541847", "0.5152796", "0.5149127", "0.51433414", "0.5133144", "0.51294893", "0.512481", "0.512281", "0.51210994", "0.5116056", "0.51144904", "0.5101766", "0.50958276", "0.5093157", "0.50915396", "0.5082967", "0.507859", "0.5070257", "0.5068652", "0.50622207", "0.50581914", "0.5056527", "0.50548404", "0.5033218", "0.5030215", "0.5020872", "0.5018324", "0.501568", "0.50116926", "0.5011455", "0.5010617", "0.50052226", "0.50015557" ]
0.0
-1
For each agent in the MAD4PG network, choose an action from the ACTOR
def act(self, obs, training=True): assert len(obs) == len(self.agents), "Num OBSERVATIONS does not match \ num AGENTS." with torch.no_grad(): actions = np.array([agent.act(o) for agent, o in zip(self.agents, obs)]) if training: actions += self._gauss_noise(actions.shape) return np.clip(actions, -1, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _advance_by_action(game, agents, action):\n getLogger(__name__).debug(\"Agent {} action {}\".format(game.current_agent_id, action))\n agent_id_for_action = game.current_agent_id\n\n game.take_action(action)\n for agent in agents:\n agent.take_action(action, agent.agent_id == agent_id_for_action)", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions_next = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in\n zip(self.maddpg_agent, obs_all_agents)]\n return target_actions_next", "def onActionChosen(self, agent, action):\n\n pass", "def step(self, actions): # actions is a list,\n\n assert len(actions) == len(self.agents), \"Number of actions (\" + str(\n len(actions)) + \") does not match number of agents (\" + str(self.n_agents) + \")\"\n\n # Process movement based on real states (not belief)\n\n\n rewards = [0.] * self.n_agents\n\n reward = 0.\n\n\n nextcells = [None] * self.n_agents\n rand_nums = self.rng.uniform(size=self.n_agents)\n\n for i in range(self.n_agents):\n\n currcell = self.tocellcoord[self.agents[i].state]\n if isinstance(actions,int):\n act = actions\n else:\n act = actions[i]\n direction = self.directions[act]\n\n if rand_nums[i] > 1/3: # pick action as intended\n if self.occupancy[tuple(currcell + direction)] == 0:\n nextcells[i] = self.tocellnum[tuple(currcell+direction)]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n else: # pick random action, except one initially intended\n adj_cells = self.adjacent_to(currcell) # returns list of tuples\n adj_cells.remove(tuple(currcell+direction))\n\n index = self.rng.choice(range(len(adj_cells)))\n new_cell = adj_cells[i]\n\n if self.occupancy[new_cell] == 0:\n nextcells[i] = self.tocellnum[new_cell]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n\n # check for inter-agent collisions:\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n while(len(collisions) != 0): # While loop needed to handle edge cases\n for i in range(len(nextcells)):\n if nextcells[i] in collisions:\n nextcells[i] = self.agents[i].state # agent collided with another, so no movement\n\n\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n\n\n for i in range(self.n_agents):\n if nextcells[i] == self.agents[i].state: # A collision happened for this agent\n rewards[i] += self.collision_penalty\n else:\n s = nextcells[i] # movement is valid\n self.agents[i].state = s\n if s in self.goals and s not in self.discovered_goals:\n rewards[i] += self.goal_reward\n self.discovered_goals.append(s)\n #rewards[i] += broadcasts[i]*self.broadcast_penalty\n\n\n self.currstate = tuple(nextcells)\n\n\n\n reward = np.sum(rewards)\n\n self.step_count += 1\n\n\n # If all goals were discovered, end episode\n done = len(self.discovered_goals) == len(self.goals)\n\n \n return reward, self.currstate, done, None", "def traverse(self, action_details: Dict):\n agent = action_details[\"agent_id\"]\n self.agents[agent-1].traversing = True\n # distanation node\n dest_node = action_details[\"to\"]\n\n # TODO add checks for from and to nodes\n\n node1, node2, distance = self.agents_location[agent]\n # people_collected = 0\n \n # If the agent is in node ( not on the edge ) check if the distination node is its neighbor\n if node1 == node2 and self.graph.is_neighbours(node1, dest_node) and not (node2,dest_node) in self.blocked_edges :\n # Get (node1,dest_node) edge weight\n\n edge_weight = self.graph.get_weight(node1, dest_node)\n\n # Move the agent into the edge (node1,dest_node)\n distance = edge_weight - 1\n self.agents_location[agent] = [node1, dest_node, distance]\n action_succeed = True\n\n # If the agent is already inside the edge , check whether destination node is correct\n elif node1 != node2 and node2 == dest_node:\n\n # Move the agent one step on the edge\n distance -= 1\n self.agents_location[agent][2] = distance\n\n action_succeed = True\n else:\n # If the destination node is wrong\n action_succeed = False\n # TODO write warning\n\n # If the agent arrived to some node , collect all the people there and change the location from [node1,node2,X]\n # to [dest_node,dest_node,0]\n if distance == 0 and action_succeed:\n self.agents_location[agent] = [dest_node, dest_node, 0]\n self.agents[agent-1].traversing = False\n self.agents[agent-1].location = dest_node\n action_succeed = True\n\n self.agents_last_action[agent] = action_succeed\n\n new_observation = self.get_observation({})\n\n return new_observation", "def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def take_one_step(self):\n\t\tfor i in range(len(self.agents)):\n\t\t\tself.agents[i].action(0)", "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions", "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions", "def gen_action(self, agent_list, observation, frame_idx, train, free_map=None):\n #TODO add multiple agent functionality with a for loop \n \n if train == True:\n epsilon = self.epsilon_by_frame(frame_idx)\n if random.random() > epsilon:\n state = observation\n state = torch.FloatTensor(np.float32(state))\n state = state.to(self.device).unsqueeze(0).unsqueeze(0)\n q_value = self.current_model.forward(state)\n max_q, action = q_value[0].max(0)\n max_q = float(max_q)\n action = int(action)\n \n else:\n action = random.randrange(self.num_actions)\n \n # for evaluation\n elif train == False:\n #TODO fix the CNN input dimensions\n state = observation.flatten()\n state = torch.FloatTensor(np.float32(state))\n state = state.to(self.device)\n \n q_value = self.current_model.forward(state)\n max_q, action = q_value.max(0)\n\n #TODO get all agent actions for one team here\n action_out = []\n action_out.append(action)\n return action_out", "def act(self, obs_all_agents, noise=0.0):\n actions_next = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions_next", "def target_act(self, obs, noise=0.0):\n #return target_actions\n target_actions = torch.zeros(obs.shape[:2] + (self.action_size,), dtype=torch.float, device=device)\n for i in range(self.num_agents):\n target_actions[:, i, :] = self.maddpg_agent[i].target_act(obs[:, i])\n \n return target_actions", "def get_next_action(self, epsilon, learning_params):\n\n T = learning_params.T\n\n if random.random() < epsilon:\n # With probability epsilon, randomly select an action for each agent.\n a_selected = np.full(self.num_agents, -1, dtype=int)\n for i in range(self.num_agents):\n a_selected[i] = random.choice(self.actions[i])\n else:\n partial_index = [] # Don't include action indexes. As a result, in pr_sum, we are summing over actions.\n for i in range(self.num_agents):\n partial_index.append(self.s[i])\n partial_index.append(self.u)\n partial_index = tuple(partial_index)\n\n # Sum over all possible actions for fixed team state and reward machine state.\n pr_sum = np.sum(np.exp(self.q[partial_index] * T))\n\n # pr[i] is an array representing the probability values that agent i will take various actions.\n pr = np.exp(self.q[partial_index] * T)/pr_sum\n\n shp = pr.shape\n pr = pr.flatten()\n\n pr_select = np.zeros([len(pr) + 1, 1])\n pr_select[0] = 0\n for i in range(len(pr)):\n pr_select[i+1] = pr_select[i] + pr[i]\n\n randn = random.random()\n for i in range(len(pr)):\n if randn >= pr_select[i] and randn <= pr_select[i+1]:\n a_selected = np.unravel_index(i, shp)\n a_selected = np.array(a_selected, dtype=int)\n break\n\n a = a_selected\n\n return self.s, a", "def process_action(self, attacking_agent, action_dict, **kwargs):\n if self._get_action_from_dict(action_dict):\n for attacked_agent in self.agents.values():\n if attacked_agent.id == attacking_agent.id:\n # Cannot attack yourself\n continue\n elif not attacked_agent.is_alive:\n # Cannot attack a dead agent\n continue\n elif np.linalg.norm(attacking_agent.position - attacked_agent.position,\n self.attack_norm) > attacking_agent.attack_range:\n # Agent is too far away\n continue\n elif not self.team_attack_matrix[attacking_agent.team, attacked_agent.team]:\n # Attacking agent cannot attack this agent\n continue\n elif np.random.uniform() > attacking_agent.attack_accuracy:\n # Attempted attack, but it failed\n continue\n else:\n # The agent was successfully attacked!\n return attacked_agent", "def apply_action(self, action):\n agent = action['action_details']['agent_id']\n current_node = self.agents_location[agent][0]\n people_collected = self.people_location.get(current_node, 0)\n self.people_location[current_node] = 0\n self.people_collected[agent] += people_collected\n self.agents[agent-1].score += people_collected\n self.agents[agent-1].location = current_node\n self.agents[agent-1].t += 1\n self.agents[agent%2].t += 1\n # self.agents_location[agent%2+1][2] = max(self.agents_location[agent%2+1][2]-1,0)\n # if self.agents_location[agent%2+1][2] == 0:\n # self.agents_location[agent%2+1][0] = self.agents_location[agent%2+1][1]\n # self.agents[agent%2].traversing = False\n # self.agents[agent%2].location = self.agents_location[agent%2+1][1]\n\n if 'expansions' in action[\"action_details\"]:\n self.agents_expansions[action[\"action_details\"]['agent_id']] += action[\"action_details\"]['expansions']\n resulting_observ = self.actions_reactions[action[\"action_tag\"]](action[\"action_details\"])\n resulting_observ['collected'] = people_collected\n return resulting_observ", "def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]", "def agent(obs):\n # dictionary for Memory Patterns data\n obs[\"memory_patterns\"] = {}\n # We always control left team (observations and actions\n # are mirrored appropriately by the environment).\n controlled_player_pos = obs[\"left_team\"][obs[\"active\"]]\n # get action of appropriate pattern in agent's memory\n action = get_action_of_agent(obs, controlled_player_pos[0], controlled_player_pos[1])\n # return action\n return action", "def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action", "def transl_action_agent2env(acts):\n act_arr = action_id2arr(acts)\n return act_arr[0]", "def action_callback(agent, self):\n obs = self.obs_callback(agent, self)\n action = self.s_agents.select_action(torch.Tensor([obs]), action_noise=True, param_noise=None).squeeze().numpy()\n return _get_action(action, agent, self)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def transl_action_env2agent(acts):\n act_ids = action_arr2id(acts)\n return one_hot(act_ids)", "def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()", "def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()", "def action(self):\n # --- Ruled Based Test Policy ---\n # Stay still just send communication event\n if self.uid == 0:\n if random.choice(list(range(50))) == 1 and self.comm_count < self.comm_limit:\n action = 3\n action_param = {}\n self.comm_count += 1\n else:\n action = 1\n action_param = {\"ang_accel\": (0 * math.pi / 180), \"accel\": 0}\n return action, action_param\n\n # Others\n # If wall in vision, rotate\n vision_array = self.vision[1]\n if 1 in vision_array[0]:\n accel = -1 if self.speed > 0 else 0\n action = 1\n action_param = {\"ang_accel\": (random.randint(20, 45) * math.pi / 180), \"accel\": accel}\n\n # If hider in front, tag\n elif self.agt_class == 3 and 2 in vision_array[0] and vision_array[1][list(vision_array[0]).index(2)] < 60:\n action = 2\n action_param = {}\n\n # Randomly invoked communication event\n # elif random.choice(list(range(50))) == 1 and self.comm_count < self.comm_limit:\n # action = 3\n # action_param = {}\n # self.comm_count += 1\n\n # If communication received head towards nearest comm. agent for three steps\n elif len(self.comm) > 0:\n closest_agent = min(self.comm, key=lambda x: x[0])\n\n # Calculate target angle to the event sender\n target_angle = closest_agent[1] + self.angle\n target_angle = 2*math.pi + target_angle if target_angle < 0 else target_angle\n target_angle = target_angle - 2*math.pi if target_angle > 2*math.pi else target_angle\n\n # Add target angle to history such that the agent moves until it finds the target angle\n self.history.append(target_angle)\n direction = closest_agent[1]/abs(closest_agent[1])\n action = 1\n action_param = {\"ang_accel\": direction*math.pi/18, \"accel\": -1 if self.speed > 0 else 0}\n\n # If target angle not found, continue searching\n elif len(self.history) > 0:\n direction = self.history[-1]/abs(self.history[-1])\n action = 1\n action_param = {\"ang_accel\": direction*math.pi/18, \"accel\": -1 if self.speed > 0 else 0}\n if self.history[-1] - math.pi/9 < self.angle < self.history[-1] + math.pi/9:\n self.history.pop(-1)\n\n # When there isn't a special event, just move forward\n else:\n st_rate = self.stamina/self.max_stamina\n if st_rate > 0.75:\n accel = np.random.normal(3, 1, 1)\n elif st_rate > 0.4:\n accel = np.random.randint(-1, 3)\n else:\n accel = -1\n action = 1\n action_param = {\"ang_accel\": (0 * math.pi / 180), \"accel\": accel}\n\n return action, action_param", "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return np.array(actions)", "def choose_action(data_map, connection, data_ia):\n player = 'player' + str((data_map['main_turn'] % 2) + 1)\n enemy = 'player' + str(2 - (data_map['main_turn'] % 2))\n if data_map['remote']:\n player = 'player' + str(data_map['ia_id'])\n enemy = 'player' + str(data_map['enemy_id'])\n\n # Tells whether IA or player's turn.\n if data_map['main_turn'] % 2 == data_map['ia_id'] or data_map[str(player + 'info')][1] == 'IA':\n game_instruction = ia_action(data_map, data_ia, player)\n if data_map['remote']:\n notify_remote_orders(connection, game_instruction)\n else:\n if data_map['remote']:\n game_instruction = get_remote_orders(connection)\n else:\n game_instruction = raw_input('Enter your commands in format xx_xx -a-> xx_xx or xx_xx -m-> xx_xx')\n\n # Split commands string by string.\n list_action = game_instruction.split()\n\n # grouper instruction par instructions\n list_action2 = []\n for instruction in range(0, len(list_action), 3):\n list_action2.append((list_action[instruction], list_action[instruction + 1], list_action[instruction + 2]))\n\n # Call attack_unit or move_unit in function of instruction.\n attack_counter = 0\n for i in range(len(list_action2)):\n if '-a->' in list_action2[i]:\n data_map, attacked, data_ia = attack_unit(data_map, (int(list_action2[i][0][:2]), int(list_action2[i][0][3:])),\n (int(list_action2[i][2][:2]), int(list_action2[i][2][3:])), player, enemy, data_ia)\n attack_counter += attacked\n elif '-m->' in list_action2[i]:\n data_map, data_ia = move_unit(data_map, (int(list_action2[i][0][:2]), int(list_action2[i][0][3:])),\n (int(list_action2[i][2][:2]), int(list_action2[i][2][3:])), player, enemy, data_ia)\n\n # Save if a player have attacked.\n if attack_counter:\n data_map['attack_turn'] = 0\n else:\n data_map['attack_turn'] += 1\n data_map['main_turn'] += 1\n\n return data_map", "def step(self, action, update=True):\n\n if self.centralized_planning:\n agent_states = [human.get_full_state() for human in self.humans]\n if self.robot.visible:\n agent_states.append(self.robot.get_full_state())\n human_actions = self.centralized_planner.predict(\n agent_states, self.group_membership, self.obstacles\n )[:-1]\n else:\n human_actions = self.centralized_planner.predict(\n agent_states, self.group_membership, self.obstacles\n )\n else:\n human_actions = []\n for human in self.humans:\n # Choose new target if human has reached goal and in perpetual mode:\n if human.reached_destination() and self.perpetual:\n if self.train_val_sim == \"square_crossing\":\n gx = (\n np.random.random() * self.square_width * 0.5 * np.random.choice([-1, 1])\n )\n gy = (np.random.random() - 0.5) * self.square_width\n human.set(human.px, human.py, gx, gy, 0, 0, 0)\n elif self.train_val_sim == \"circle_crossing\":\n human.set(human.px, human.py, -human.px, -human.py, 0, 0, 0)\n else:\n if np.random.rand(1) > 0.5:\n gx = (\n np.random.random()\n * self.square_width\n * 0.5\n * np.random.choice([-1, 1])\n )\n gy = (np.random.random() - 0.5) * self.square_width\n human.set(human.px, human.py, gx, gy, 0, 0, 0)\n else:\n human.set(human.px, human.py, -human.px, -human.py, 0, 0, 0)\n # observation for humans is always coordinates\n human_ob = [\n other_human.get_observable_state()\n for other_human in self.humans\n if other_human != human\n ]\n if self.robot.visible:\n human_ob += [self.robot.get_observable_state()]\n human_actions.append(human.act(human_ob, self.group_membership))\n # collision detection\n dmin = float(\"inf\")\n collisions = 0\n human_distances = list()\n for i, human in enumerate(self.humans):\n px = human.px - self.robot.px\n py = human.py - self.robot.py\n if self.robot.kinematics == \"holonomic\":\n vx = human.vx - action.vx\n vy = human.vy - action.vy\n else:\n vx = human.vx - action.v * np.cos(action.r + self.robot.theta)\n vy = human.vy - action.v * np.sin(action.r + self.robot.theta)\n ex = px + vx * self.time_step\n ey = py + vy * self.time_step\n # closest distance between boundaries of two agents\n human_dist = (\n point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius\n )\n if human_dist < 0:\n collisions += 1\n self.episode_info[\"collisions\"] -= self.collision_penalty\n # logging.debug(\"Collision: distance between robot and p{} is {:.2E}\".format(i, human_dist))\n break\n elif human_dist < dmin:\n dmin = human_dist\n human_distances.append(human_dist)\n\n # collision detection between robot and static obstacle\n static_obstacle_dmin = float(\"inf\")\n static_obstacle_collision = 0\n obstacle_distances = list()\n min_dist = self.robot.radius\n px = self.robot.px\n py = self.robot.py\n\n if self.robot.kinematics == \"holonomic\":\n vx = action.vx\n vy = action.vy\n else:\n vx = action.v * np.cos(action.r + self.robot.theta)\n vy = action.v * np.sin(action.r + self.robot.theta)\n ex = px + vx * self.time_step\n ey = py + vy * self.time_step\n for i, obstacle in enumerate(self.obstacles):\n robot_position = ex, ey\n obst_dist = line_distance(obstacle, robot_position)\n if obst_dist < min_dist:\n static_obstacle_collision += 1\n self.episode_info[\n \"static_obstacle_collisions\"\n ] -= self.static_obstacle_collision_penalty\n break\n\n # collision detection between humans\n human_num = len(self.humans)\n for i in range(human_num):\n for j in range(i + 1, human_num):\n dx = self.humans[i].px - self.humans[j].px\n dy = self.humans[i].py - self.humans[j].py\n dist = (\n (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius\n )\n if dist < 0:\n # detect collision but don't take humans' collision into account\n logging.debug(\"Collision happens between humans in step()\")\n # check if reaching the goal\n end_position = np.array(self.robot.compute_position(action, self.time_step, self.closed))\n reaching_goal = (\n norm(end_position - np.array(self.robot.get_goal_position()))\n < self.robot.radius + self.goal_radius\n )\n done = False\n info = Nothing()\n reward = -self.time_penalty\n goal_distance = np.linalg.norm(\n [\n (end_position[0] - self.robot.get_goal_position()[0]),\n (end_position[1] - self.robot.get_goal_position()[1]),\n ]\n )\n progress = self.previous_distance - goal_distance\n self.previous_distance = goal_distance\n reward += self.progress_reward * progress\n self.episode_info[\"progress\"] += self.progress_reward * progress\n if self.global_time >= self.time_limit:\n done = True\n info = Timeout()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 1.0\n if collisions > 0:\n reward -= self.collision_penalty * collisions\n if self.end_on_collision:\n done = True\n info = Collision()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 1.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 0.0\n\n if static_obstacle_collision > 0:\n reward -= self.static_obstacle_collision_penalty * static_obstacle_collision\n if self.end_on_collision:\n done = True\n info = Collision()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 1.0\n self.episode_info[\"did_timeout\"] = 0.0\n if reaching_goal:\n reward += self.success_reward\n done = True\n info = ReachGoal()\n self.episode_info[\"goal\"] = self.success_reward\n self.episode_info[\"did_succeed\"] = 1.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 0.0\n for human_dist in human_distances:\n if 0 <= human_dist < self.discomfort_dist * self.discomfort_scale:\n discomfort = (\n (human_dist - self.discomfort_dist * self.discomfort_scale)\n * self.discomfort_penalty_factor\n * self.time_step\n )\n reward += discomfort\n self.episode_info[\"discomfort\"] += discomfort\n\n forces = self.centralized_planner.get_force_vectors(coeff=[1] * 6)\n\n if forces is not None:\n # separate human and robot forces\n robot_forces = forces[-1]\n human_forces = forces[:-1]\n # calculate average of human forces and append them to the log\n for i, force in enumerate(self.force_list):\n self.episode_info.get(\"avg_\" + force).append(\n np.average(np.hypot(*human_forces[:, i, :].transpose()))\n )\n # add robot social force\n self.episode_info.get(\"robot_social_force\").append(np.hypot(*robot_forces[1]))\n\n human_num = len(self.humans)\n for i in range(human_num):\n px = self.humans[i].px\n py = self.humans[i].py\n gx = self.humans[i].gx\n gy = self.humans[i].gy\n\n self.episode_info[\"pedestrian_distance_traversed\"][i].append([px,py])\n self.episode_info[\"pedestrian_goal\"][i].append([gx,gy])\n\n self.episode_info[\"pedestrian_velocity\"][i].append([vx,vy])\n\n\n\n # penalize group intersection\n robot_pos = [self.robot.px, self.robot.py]\n robot_vel = [self.robot.vx, self.robot.vy]\n\n self.episode_info[\"robot_distance_traversed\"].append(robot_pos)\n self.episode_info[\"robot_velocity\"].append(robot_vel)\n\n\n\n convex = 1\n\n for idx, group in enumerate(self.group_membership):\n # get the members of the group\n points = []\n for human_id in group:\n ind_points = [\n point_along_circle(\n self.humans[human_id].px,\n self.humans[human_id].py,\n self.humans[human_id].radius,\n )\n for _ in range(10)\n ]\n points.extend(ind_points)\n\n if convex == 1:\n\n # compute the convex hull\n hull = ConvexHull(points)\n\n group_col = point_in_hull(robot_pos, hull)\n\n # min spanning circle\n else:\n circle_def = minimum_enclosing_circle(points)\n\n group_col = is_collision_with_circle(\n circle_def[0][0], circle_def[0][1], circle_def[1], robot_pos[0], robot_pos[1]\n )\n\n if group_col:\n group_discomfort = -self.group_discomfort_penalty\n reward += group_discomfort\n self.episode_info[\"group_discomfort\"] += group_discomfort\n\n # we only want to track number of violations once per group per episode\n self.episode_info[\"group_intersection_violations\"][idx] = 1.0\n\n if (\n len(human_distances) > 0\n and 0 <= min(human_distances) < self.discomfort_dist * self.discomfort_scale\n ):\n info = Danger(min(human_distances))\n if update:\n # update all agents\n self.robot.step(action, self.closed)\n for i, human_action in enumerate(human_actions):\n self.humans[i].step(human_action, self.closed)\n self.global_time += self.time_step\n for i, human in enumerate(self.humans):\n # only record the first time the human reaches the goal\n if self.human_times[i] == 0 and human.reached_destination():\n self.human_times[i] = self.global_time\n # compute the observation\n if self.robot.sensor == \"coordinates\":\n ob = [human.get_observable_state() for human in self.humans]\n\n if self.enable_intent:\n if self.intent_type == \"individual\":\n target_maps = np.array([human.get_target_map() for human in self.humans])\n elif self.intent_type == \"group\":\n target_maps = np.array([human.get_target_map() for human in self.humans])\n\n # average intent map across group members\n for group in self.group_membership:\n # get the members of the group\n avg = np.average([target_maps[human_id] for human_id in group], axis=0)\n for human_id in group:\n target_maps[human_id] = avg\n\n # add target_map to observation\n for i in range(len(ob)):\n ob[i].update_target_map(target_maps[i])\n else:\n print(\n \"unrecognized intent type, only valid options are individual or group, received: \",\n self.intent_type,\n )\n\n elif self.robot.sensor.lower() == \"rgb\" or self.robot.sensor.lower() == \"gray\":\n snapshot = self.get_pixel_obs()\n prior_planes = snapshot.shape[1] * (self.num_frames - 1)\n self.obs_history = np.concatenate(\n (self.obs_history[:, -prior_planes:, :, :], snapshot), axis=1\n )\n ob = self.obs_history\n else:\n raise ValueError(\"Unknown robot sensor type\")\n # store state, action value and attention weights\n self.states.append(\n [\n self.robot.get_full_state(),\n [human.get_full_state() for human in self.humans],\n self.centralized_planner.get_force_vectors(),\n ]\n )\n if hasattr(self.robot.policy, \"action_values\"):\n self.action_values.append(self.robot.policy.action_values)\n if hasattr(self.robot.policy, \"get_attention_weights\"):\n self.attention_weights.append(self.robot.policy.get_attention_weights())\n else:\n if self.robot.sensor == \"coordinates\":\n ob = [\n human.get_next_observable_state(action, self.closed)\n for human, action in zip(self.humans, human_actions)\n ]\n elif self.robot.sensor.lower() == \"rgb\" or self.robot.sensor.lower() == \"gray\":\n snapshot = self.get_pixel_obs()\n prior_planes = snapshot.shape[1] * (self.num_frames - 1)\n self.obs_history = np.concatenate(\n (self.obs_history[:, -prior_planes:, :, :], snapshot), axis=1\n )\n ob = self.obs_history\n else:\n raise ValueError(\"Unknown robot sensor type\")\n if done:\n self.episode_info[\"time\"] = -self.global_time * self.time_penalty / self.time_step\n self.episode_info[\"global_time\"] = self.global_time\n info = self.episode_info # Return full episode information at the end\n return ob, reward, done, info", "def parseAction(self, action):\n action = self.AGENT_TYPES[action]\n\n\n full_action = {}\n full_action[\"action\"] = action\n if action == \"eli-kw\":\n keywords = self.dataset.getSuggestedKeywords()\n full_action[\"keywords\"] = keywords[:self.N]\n elif action == \"info\" or action == \"info-all\":\n full_action[\"function\"] = self.current_function\n\n elif action == \"sugg\" or action == \"sugg-info-all\":\n top_hit = self.dataset.getTopHits(1)\n if not top_hit:\n full_action[\"action\"] = \"eli-query\"\n else:\n functions = self.dataset.getTopHits(1, self.result_index)\n if functions:\n full_action[\"function\"] = functions[0]\n else:\n full_action[\"function\"] = \"\"\n\n self.result_index += 1\n\n elif action == \"sugg-all\":\n full_action[\"list\"] = self.dataset.getTopHits(self.K, self.result_index)\n\n elif action == \"change-page\":\n self.result_index += self.K\n full_action[\"list\"] = self.dataset.getTopHits(self.K, self.result_index)\n return full_action", "def run_agent(self):\n\n #-- Load and init mission --#\n print('Generate and load the ' + self.mission_type + ' mission with seed ' + str(self.mission_seed) + ' allowing ' + self.AGENT_MOVEMENT_TYPE + ' movements')\n mission_xml,reward_goal,reward_intermediate,n_intermediate_rewards,reward_timeout,reward_sendcommand, timeout = init_mission(self.agent_host, self.agent_port, self.AGENT_NAME, self.mission_type, self.mission_seed, self.AGENT_MOVEMENT_TYPE)\n self.solution_report.setMissionXML(mission_xml)\n\n #-- Define local capabilities of the agent (sensors)--#\n self.agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY)\n self.agent_host.setVideoPolicy(MalmoPython.VideoPolicy.LATEST_FRAME_ONLY)\n self.agent_host.setRewardsPolicy(MalmoPython.RewardsPolicy.KEEP_ALL_REWARDS)\n\n # Initialise cumulative reward\n reward_cumulative = 0.0\n\n state_t = self.agent_host.getWorldState()\n\n while state_t.is_mission_running:\n # Wait 0.5 sec\n time.sleep(0.5)\n\n if state_t.is_mission_running:\n actionIdx = random.randint(0, 3)\n print(\"Requested Action:\",self.AGENT_ALLOWED_ACTIONS[actionIdx])\n\n # Now try to execute the action givne a noisy transition model\n actual_action = self.__ExecuteActionForRandomAgentWithNoisyTransitionModel__(actionIdx, 0.05);\n print(\"Actual Action:\",actual_action)\n\n # Collect the number of rewards and add to reward_cumulative\n # Note: Since we only observe the sensors and environment every a number of rewards may have accumulated in the buffer\n for reward_t in state_t.rewards:\n reward_cumulative += reward_t.getValue()\n self.solution_report.addReward(reward_t.getValue(), datetime.datetime.now())\n print(\"Reward_t:\",reward_t.getValue())\n print(\"Cumulative reward so far:\",reward_cumulative)\n\n # Check if anything went wrong along the way\n for error in state_t.errors:\n print(\"Error:\",error.text)\n\n # Handle the sensor input\n xpos = None\n ypos = None\n zpos = None\n yaw = None\n pitch = None\n if state_t.number_of_observations_since_last_state > 0: # Has any Oracle-like and/or internal sensor observations come in?\n msg = state_t.observations[-1].text # Get the detailed for the last observed state\n oracle = json.loads(msg) # Parse the Oracle JSON\n\n # Oracle\n grid = oracle.get(u'grid', 0) #\n\n # GPS-like sensor\n xpos = oracle.get(u'XPos', 0) # Position in 2D plane, 1st axis\n zpos = oracle.get(u'ZPos', 0) # Position in 2D plane, 2nd axis (yes Z!)\n ypos = oracle.get(u'YPos', 0) # Height as measured from surface! (yes Y!)\n\n # Standard \"internal\" sensory inputs\n yaw = oracle.get(u'Yaw', 0) # Yaw\n pitch = oracle.get(u'Pitch', 0) # Pitch\n\n # Vision\n if state_t.number_of_video_frames_since_last_state > 0: # Have any Vision percepts been registred ?\n frame = state_t.video_frames[0]\n\n #-- Print some of the state information --#\n print(\"Percept: video,observations,rewards received:\",state_t.number_of_video_frames_since_last_state,state_t.number_of_observations_since_last_state,state_t.number_of_rewards_since_last_state)\n print(\"\\tcoordinates (x,y,z,yaw,pitch):\" + str(xpos) + \" \" + str(ypos) + \" \" + str(zpos)+ \" \" + str(yaw) + \" \" + str(pitch))\n\n # Get the new world state\n state_t = self.agent_host.getWorldState()\n\n # --------------------------------------------------------------------------------------------\n # Summary\n print(\"Summary:\")\n print(\"Cumulative reward = \" + str(reward_cumulative) )\n\n return", "def gen_action(self, agent_list, observation, free_map=None):\n action_out = []\n # for i in agent_list:\n # action_out.append(self.random.randint(0, 5)) # choose random action\n #\n # return action_out\n if free_map is not None: self.free_map = free_map\n\n for idx, agent in enumerate(agent_list):\n # First choose a random direction to go into\n starting_action = self.random.randint(0, 5)\n # Initializing the direction based on the starting_action\n if starting_action == 1:\n self.heading_up[idx] = True\n elif starting_action == 2:\n self.heading_right[idx] = True\n # elif starting_action == 3:\n # self.heading_down[idx] = True\n # elif starting_action == 4:\n # self.heading_left[idx] = True\n\n a = self.roomba(agent, idx, observation)\n\n # if starting_action < 3:\n # a = self.roomba(agent, idx, observation)\n # else:\n # a = starting_action\n action_out.append(a)\n\n return action_out", "def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB", "def execute_actions(self, actions):\n execute_actions(self.board, self.agent_locs, actions)", "def execute_action(self, agent, action):\n raise NotImplementedError", "def execute_action(self, agent, action):\n raise NotImplementedError", "def step(self, action):\n obs = self.gym.get_observations()\n all_actions = self.gym.act(obs)\n all_actions.insert(self.gym.training_agent, action)\n state, reward, terminal, info = self.gym.step(all_actions)\n agent_state = self.featurize(state[self.gym.training_agent])\n\n # agent_state_history = self.make_observation(agent_state, self.step)\n agent_reward = reward[self.gym.training_agent]\n\n # self.step += 1\n return agent_state, agent_reward, terminal, info", "def step(self, action):\n self.steps += 1\n self.robots[0].setAction(action)\n for i in range(self.num_agents):\n if i != 0 and self.policies[i:i+1]: # self.policies[0] is dummy\n self.robots[i].setAction(self.policies[i](self.robots[i].getObservation()))\n # rewards = [ -1.0 * self.num_foods / self.max_steps for _ in range(self.num_agents) ] # so agent needs to eat foods quickly\n rewards = [ 0.0 for _ in range(self.num_agents) ]\n for i in range(self.BULLET_STEPS):\n p.stepSimulation()\n rewards = [ rewards[i]+self._getReward(self.robots[i]) for i in range(self.num_agents) ]\n self.episode_rewards = [ self.episode_rewards[i]+rewards[i] for i in range(self.num_agents) ]\n obs = self.robots[0].getObservation()\n done = self._isDone()\n info = { 'steps': self.steps }\n if done:\n # TODO\n info['episode'] = { 'r': self.episode_rewards[0], 'l': self.steps, 'r_all': self.episode_rewards }\n # print(self.episode_rewards, self.steps)\n return obs, rewards[0], done, info", "def choose_action(self, agent_data):\r\n action_value_estimates = agent_data[\"action_value_estimates\"]\r\n roll = random.uniform(0,1)\r\n if roll <= self.epsilon:\r\n action = random.choice( list( range(0,len(action_value_estimates))))\r\n else:\r\n action = self.argmax_with_random_tiebreaker(action_value_estimates)\r\n return action", "def execute_action(self, agent, action):\n abstract", "def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()", "def actionSelector(self): \n if self.Temp!=0:\n if len(self.lessons) > 60 and self.var_T: \n # if the agent haven't already gotten food since a certain time \n # we increase the temperature by 0.001 \n if self.count_without_food>12:\n self.Temp += 0.01 \n if self.Temp>=(self.var_T[0]): \n self.Temp = self.var_T[0] \n # otherwise we decrease the temperatur by 0.001 \n else: \n self.Temp -= 0.001\n if self.Temp <= (self.var_T[-1]):\n self.Temp = self.var_T[-1]\n \n s = np.sum([np.exp(float(k)/self.Temp) for k in self.U_list])\n\n self.action_proba =[np.exp(float(m)/self.Temp)/s for m in self.U_list]\n action = np.random.choice(np.arange(4),p=self.action_proba) # choice a random choice relating to the probability distribution given by the softmax algorith \n else:\n action = np.argmax(self.U_list)\n return action", "def execute_action(self, agent, action):\n agent.bump = False\n agent.performance_measure -= 1\n \n if action == 'TurnRight':\n agent.heading = self.turn_heading(agent.heading, -1)\n elif action == 'TurnLeft':\n agent.heading = self.turn_heading(agent.heading, +1)\n elif action == 'Forward':\n self.move_to(agent, vector_add(self.heading_to_vector(agent.heading),\n agent.location))\n elif action == 'Grab':\n if self.some_things_at(agent.location, tclass=Gold):\n try:\n gold = self.list_things_at(agent.location, tclass=Gold)[0]\n agent.has_gold = True\n self.delete_thing(gold)\n except:\n print \"Error: Gold should be here, but couldn't find it!\"\n print 'All things:', self.list_things_at(agent.location)\n print 'Gold?:', self.list_things_at(agent.location, tclass=Gold)\n sys.exit(-1)\n\n elif action == 'Release':\n if agent.location == self.entrance:\n if agent.has_gold:\n agent.performance_measure += 1000\n self.done = True\n elif action == 'Shoot':\n if agent.has_arrow:\n agent.has_arrow = False\n agent.performance_measure -= 10\n self.shoot_arrow(agent)\n elif action == 'Stop':\n self.done = True\n \n print '\\nCurrent Location: ', agent.location\n print 'Heading: ', self.heading_to_str(agent.heading)\n print 'Reminder- Start Location:', self.entrance\n print ''\n print 'Percepts:'", "def agent_start(self,thisObs): \n action={'vol':0,'price':0}\n \n \"\"\"Changes for Boltzman Exploration\"\"\"\n #choice=self.pick_action_from_dist()\n #action_bin=self.prob_dist_action[choice]\n #action=self.unbin_action(action_bin,thisObs)\n \n \"\"\"Changes for epsilon greedy method\"\"\"\n action= self.return_random_action(thisObs)\n \n self.lastAction=action\n self.lastObs=thisObs\n return action", "def get_agent(env) -> DDPGAgent:\n assert len(env.action_space.shape) == 1\n nb_actions = env.action_space.shape[0]\n action_input = Input(shape=(nb_actions,), name='action_input')\n observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')\n\n range_action_input = 0.5 * (env.action_space.high - env.action_space.low)\n constantBias = 1\n lowb = env.action_space.low\n\n # actor = Flatten(input_shape=(1,) + env.observation_space.shape)(observation_input)\n y = Flatten()(observation_input)\n y = Dense(16)(y)\n y = BatchNormalization()(y)\n y = Activation('relu')(y)\n y = Dense(16)(y)\n y = BatchNormalization()(y)\n y = Activation('relu')(y)\n pht = Dense(1)(y)\n pht = BatchNormalization()(pht)\n pht = Activation('tanh')(pht)\n pht = Lambda(lambda a: (a + K.constant(constantBias)) * K.constant(range_action_input[0])\n + K.constant(lowb[0]))(pht)\n rht = Dense(1)(y)\n rht = BatchNormalization()(rht)\n rht = Activation('tanh')(rht)\n rht = Lambda(lambda a: (a + K.constant(constantBias)) * K.constant(range_action_input[1])\n + K.constant(lowb[1]))(rht)\n axn = Concatenate()([pht, rht])\n actor = Model(inputs=observation_input, outputs=axn)\n\n flattened_observation = Flatten()(observation_input)\n x = Concatenate()([action_input, flattened_observation])\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(1)(x)\n x = Activation('linear')(x)\n critic = Model(inputs=[action_input, observation_input], outputs=x)\n\n memory = SequentialMemory(limit=1000, window_length=1)\n\n random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.5, size=nb_actions)\n agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,\n memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,\n gamma=.99, target_model_update=1e-3, random_process=random_process)\n agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])\n return agent", "def decide(self, state: OthelloState, actions: list):\n # -------- TASK 2 ------------------------------------------------------\n # Your task is to implement an algorithm to choose an action form the\n # given `actions` list. You can implement any algorithm you want.\n # However, you should keep in mind that the execution time of this\n # function is limited. So, instead of choosing just one action, you can\n # generate a sequence of increasing good action.\n # This function is a generator. So, you should use `yield` statement\n # rather than `return` statement. To find more information about\n # generator functions, you can take a look at:\n # https://www.geeksforgeeks.org/generators-in-python/\n #\n # If you generate multiple actions, the last action will be used in the\n # game.\n #\n # Tips\n # ====\n # 1. During development of your algorithm, you may want to find the next\n # state after applying an action to the current state; in this case,\n # you can use the following patterns:\n # `next_state = current_state.successor(action)`\n #\n # 2. If you need to simulate a game from a specific state to find the\n # the winner, you can use the following pattern:\n # ```\n # simulator = Game(FirstAgent(), SecondAgent())\n # winner = simulator.play(starting_state=specified_state)\n # ```\n # The `MarkovAgent` has illustrated a concrete example of this\n # pattern.\n #\n # 3. You are free to choose what kind of game-playing agent you\n # implement. Some of the obvious approaches are the following:\n # 3.1 Implement alpha-beta (and investigate its potential for searching deeper\n # than what is possible with Minimax). Also, the order in which the actions\n # are tried in a given node impacts the effectiveness of alpha-beta: you could\n # investigate different ways of ordering the actions/successor states.\n # 3.2 Try out better heuristics, e.g. ones that take into account the higher\n # importance of edge and corner cells. Find material on this in the Internet.\n # 3.3 You could try out more advanced Monte Carlo search methods (however, we do\n # not know whether MCTS is competitive because of the high cost of the full\n # gameplays.)\n # 3.4 You could of course try something completely different if you are willing to\n # invest more time.\n #\n # GL HF :)\n # ----------------------------------------------------------------------\n\n # Replace the following lines with your algorithm\n best_action = actions[0]\n yield best_action", "def step(self, action):\n actions = action.reshape(2, 2)\n action = {'GoalieBrain': actions[0], 'StrikerBrain': actions[1]}\n #action = {'GoalieBrain': [5, 5], 'StrikerBrain': [2, 2]}\n #print(action)\n info = self.env.step(action)\n\n state0 = info[self.brain_names[0]].vector_observations\n state1 = info[self.brain_names[1]].vector_observations\n state = np.vstack((state0, state1))\n reward0 = info[self.brain_names[0]].rewards\n reward1 = info[self.brain_names[1]].rewards\n reward = reward0 + reward1\n done0 = info[self.brain_names[0]].local_done\n done1 = info[self.brain_names[1]].local_done\n done = done0 + done1\n return state, reward, done", "def GetTargetAcc(MCMoves):", "def choose_action(self, features_all_arms) -> Tuple[torch.Tensor, torch.Tensor]:\n actor_output = self.policy.act(obs=features_all_arms)\n chosen_action = torch.argmax(actor_output.action, dim=1)\n log_prob = actor_output.log_prob\n return torch.unsqueeze(chosen_action, 1), log_prob", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n ''' \n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n obs = gameState.getAgentDistances()\n for o in self.opponents:\n self.observe(o, obs[o], gameState)\n self.displayDistributionsOverPositions(self.distributions)\n\n # You can profile your evaluation time by uncommenting these lines\n start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n #self.elapseTime(gameState)\n\n return random.choice(bestActions)", "def get_action_of_agent(obs, player_x, player_y):\n memory_patterns = find_patterns(obs, player_x, player_y)\n # find appropriate pattern in list of memory patterns\n for get_pattern in memory_patterns:\n pattern = get_pattern(obs, player_x, player_y)\n if pattern[\"environment_fits\"](obs, player_x, player_y):\n return pattern[\"get_action\"](obs, player_x, player_y)", "def actions(self, states, agent_indices):\n return NotImplementedError()", "def step(self, actions):\n \n lastidx = 0\n for _i in range(self.nbehavior):\n action_tuple = ActionTuple()\n action_tuple.add_discrete(actions[lastidx:lastidx + self.n_each_agent[_i], :])\n self.env.set_actions(behavior_name=self.behavior_names[_i], action=action_tuple)\n lastidx = self.n_each_agent[_i]\n\n self.env.step()\n self.decision_steps = []\n self.terminal_steps = []\n\n for _i in range(self.nbehavior):\n d_s, t_s = self.env.get_steps(self.behavior_names[_i])\n self.decision_steps.append(d_s)\n self.terminal_steps.append(t_s)\n\n obs = []\n reward = []\n done = []\n info = {}\n\n for _i in range(self.nbehavior):\n _j = 0\n for o in self.reshape_obs(self.decision_steps[_i]):\n obs.append(o)\n reward.append(self.decision_steps[_i].reward[_j])\n done.append(False)\n _j += 1\n\n return obs, reward, done, info", "def act(self):\n self.features = self.next_features\n self.choose_random = np.random.choice(2,p=(1-self.epsilon,self.epsilon)) # Chooses whether to explore or exploit with probability 1-self.epsilon\n # Selects the best action index in current state\n if self.choose_random:\n self.chosenA = np.random.choice(4)\n else:\n self.chosenA = self.argmaxQsa(self.features)\n # Records reward for printing and performs action\n self.action = self.idx2act[self.chosenA]\n # Execute the action and get the received reward signal\n self.reward = self.move(self.action)\n self.total_reward += self.reward\n # IMPORTANT NOTE:\n # 'action' must be one of the values in the actions set,\n # i.e. Action.LEFT, Action.RIGHT, Action.ACCELERATE or Action.BRAKE\n # Do not use plain integers between 0 - 3 as it will not work", "def choose_target(self, agents):\n\n number_of_suspects = [0]*(len(agents))\n number_of_suspects_per_agent = []\n\n index = 0\n for a1 in agents:\n if not a1.is_impostor():\n for a2 in agents:\n if self.km.suspects(a1.agent_id, a2.agent_id):\n number_of_suspects[index] = number_of_suspects[index] + 1\n else:\n number_of_suspects[index] = 999999\n number_of_suspects_per_agent.append((a1.agent_id,number_of_suspects[index]))\n index = index + 1\n\n self.target = min(number_of_suspects_per_agent, key = lambda t: t[1])[0]", "def select_action(images, n_actions, device, eps_threshold=-1):\n actions = []\n\n for i in images:\n if eps_threshold == -1:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n else:\n sample = random.random()\n if sample > eps_threshold:\n with torch.no_grad():\n # t.min(1) will return smallest column value of each row.\n # second column on min result is index of where min element was\n # found, so we pick action with the lower expected reward.\n actions.append(policy_net(i.unsqueeze(0)).min(1)[1].view(1, 1))\n else:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n\n return torch.tensor(actions, device=device)", "def test_mmp_active_inference(self):\n\n num_obs = [3, 2]\n num_states = [4, 3]\n num_control = [1, 3]\n A = random_A_matrix(num_obs, num_states)\n B = random_B_matrix(num_states, num_control)\n\n C = obj_array_zeros(num_obs)\n C[1][0] = 1.0 \n C[1][1] = -2.0 \n\n agent = Agent(A=A, B=B, C=C, control_fac_idx=[1], inference_algo=\"MMP\", policy_len=2, inference_horizon=3)\n\n T = 10\n\n for t in range(T):\n\n o = [np.random.randint(num_ob) for num_ob in num_obs] # just randomly generate observations at each timestep, no generative process\n qx = agent.infer_states(o)\n agent.infer_policies()\n action = agent.sample_action()\n \n print(agent.prev_actions)\n print(agent.prev_obs)", "def step(self):\n if not self.is_done():\n actions = [ agent.program(self.percept(agent)) for agent in self.agents ]\n for agent, action in zip(self.agents, actions):\n self.execute_action(agent, action)\n\n self.exogenous_change()", "def step(self, actions):\r\n # Run actions\r\n actions = [np.argmax((action_scores+.0001) * mask) for action_scores, mask in zip(actions, self.get_avail_actions())]\r\n reward, terminated, info = self.env.step(actions)\r\n\r\n # Get updated state\r\n self.state = self.env.get_state()\r\n\r\n # Return arrays for each agent\r\n reward_n = [reward / self.n for _ in range(self.n)]\r\n terminated_n = [terminated for _ in range(self.n)]\r\n info_n = [info for _ in range(self.n)]\r\n observation_n = self.env.get_obs()\r\n\r\n return observation_n, reward_n, terminated_n, info_n", "def find_next_action(self, obs, agents, i):\n return None", "def step(self, action):\n # THIS WILL BE CALLED FROM ALL STEP DRIVERS\n self._world = self._action_wrapper.action_to_behavior(world=self._world,\n action=action)\n # 1. move the agent we set the action for\n controlled_agent_id = self._scenario._eval_agent_ids[self._action_wrapper._input_count-1]\n self._world.stepAgent(self._step_time, controlled_agent_id)\n\n # length of agents\n if self._action_wrapper._input_count >= len(self._scenario._eval_agent_ids):\n # CANNOT STEP WORLD IF NOT ALL ACTIONS ARE SET\n self._action_wrapper._input_count = 0\n \n # 2. move all other agent\n self._world.step(self._step_time)\n if self._render:\n self.render()\n\n # TODO needs to know the agents id\n return self.snapshot(\n world=self._world,\n controlled_agents=controlled_agent_id,\n action=action)", "def step(self, action):\n # print(action)\n distances = self.agent.return_distances(self.agent.corners, self.agent.line_pos)\n\n left = distances[0]\n right = distances[1]\n self.agent.distances.append({\n 'left': left,\n 'right': right\n })\n reward = 0\n if action == 1:\n self.agent.angle -= 90\n if self.agent.angle < 0:\n self.agent.angle = 0\n self.agent.direction_history.append('left')\n self.reset_raycasts(self.agent.angle)\n self.render()\n if left > right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 2:\n self.agent.angle += 90\n if self.agent.angle >= 360:\n self.agent.angle = 0\n\n self.reset_raycasts(self.agent.angle)\n self.render()\n self.agent.direction_history.append('right')\n if left < right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 0:\n self.agent.direction_history.append('forward')\n if self.agent.angle >= 360: self.agent.angle == 0\n if self.agent.angle == 0 or self.agent.angle == 360:\n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n \n if left + right >= 50:\n reward += 5\n\n self.render()\n\n elif action == 3:\n self.agent.direction_history.append('reverse')\n if self.agent.angle == 0:\n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n \n if left + right <= 50:\n reward += 5\n\n \n else:\n reward -= 5\n\n if \"forward\" not in self.agent.direction_history[len(self.agent.direction_history)-6:len(self.agent.direction_history)-1]:\n reward -= 10\n\n \n info = {}\n if self.agent.check_collision():\n reward -= 10\n self.reset() \n self.agent.rewards.append({\n 'leftDistance': left,\n 'rightDistance': right,\n 'reward': reward,\n })\n self.render()\n print(f\"REWARD: {reward}\")\n # self.render()\n # print(self.agent.direction_history[-1])\n self.agent.rewards.append(reward)\n return np.array([left, right]), reward, False, info", "def act(self, s, exploration, game):\n agent_p = self.compute_marginal_pi(s, one_hot=False)\n if self.exploration and random.random() < self.episilon:\n agent_action = random.randint(0, self.action_num - 1)\n else:\n if self.verbose:\n for s in self.Q.keys():\n print('{}--------------'.format(self.id_))\n print('Q of agent {}: state {}: {}'.format(self.id_, s, str(self.Q[s])))\n # print('QAof agent {}: state {}: {}'.format(self.id_, s, str(self.Q_A[s])))\n # self.Q_A\n print('pi of agent {}: state {}: {}'.format(self.id_, s, self.pi[s]))\n # print('pi of opponent agent {}: state{}: {}'.format(self.id_, s, self.opponent_best_pi[s]))\n print('{}--------------'.format(self.id_))\n agent_action = np.argmax(agent_p)\n return agent_action", "def actions(self, state):\n \"*** YOUR CODE HERE ***\"\n if state[2] == 0: # When agent is facing North\n state_fw = (state[0], state[1] + 1, 0)\n state_tr = (state[0], state[1], 3)\n state_tl = (state[0], state[1], 1)\n elif state[2] == 1: # When agent is facing West\n state_fw = (state[0] - 1, state[1], 1)\n state_tr = (state[0], state[1], 0)\n state_tl = (state[0], state[1], 2)\n elif state[2] == 2: # When agent is facing South\n state_fw = (state[0], state[1] - 1, 2)\n state_tr = (state[0], state[1], 1)\n state_tl = (state[0], state[1], 3)\n elif state[2] == 3: # When agent is facing East\n state_fw = (state[0] + 1, state[1], 3)\n state_tr = (state[0], state[1], 2)\n state_tl = (state[0], state[1], 0)\n else:\n raise Exception(\"This shouldn't be happening. Can't find heading\")\n \n shoot_loc_arr = [] # Initialize Array\n for allowed_state in self.allowed: # Iterate through all allowed states\n for goal_state in self.goals: # Iterate through all goal states\n if allowed_state[0] == goal_state[0] and allowed_state[1] < goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 0)) # X Matches, Head North\n if allowed_state[0] > goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 1)) # Y Matches, Head West\n if allowed_state[0] == goal_state[0] and allowed_state[1] > goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 2)) # X Matches, Head South\n if allowed_state[0] < goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 3)) # Y Matches, Head East \n\n dist_fw_arr, dist_tr_arr, dist_tl_arr = ([9999999] for i in range(3)) # Initialize to large values\n for goal in shoot_loc_arr: # Iterate through arrays\n if (state_fw[0],state_fw[1]) in self.allowed:\n dist_fw_arr.append(manhattan_distance_with_heading(state_fw, goal))\n dist_tr_arr.append(manhattan_distance_with_heading(state_tr, goal))\n dist_tl_arr.append(manhattan_distance_with_heading(state_tl, goal))\n\n if (min(dist_fw_arr) <= min(min(dist_tr_arr),min(dist_tl_arr))) and (state_fw[0],state_fw[1]) in self.allowed: return ['Forward']\n if min(dist_tr_arr) <= min(min(dist_fw_arr),min(dist_tl_arr)): return ['TurnRight']\n if min(dist_tl_arr) <= min(min(dist_tr_arr),min(dist_tr_arr)): return ['TurnLeft']\n raise Exception(\"This shouldn't be happening. Can't determine action\")", "def act(self, action:Action) -> None:\r\n if self.terminated:\r\n raise ValueError # must restart mdp first, as agent already took Terminate action on terminal\r\n\r\n if action == Action.TERMINAL:\r\n self.terminated = True\r\n return\r\n\r\n if uniform(0, 1) < self.failure:\r\n action = action.turn(uniform(0, 1) <= 0.5) # clockwise or counter-clockwise with equal chance for both\r\n print(\"FAIL:\", action)\r\n\r\n dx, dy = action.delta()\r\n x, y = self.agent_x+dx, self.agent_y+dy\r\n\r\n if x < 0 or x >= len(self.fields) or y < 0 or y >= len(self.fields[0]) or self.fields[x][y] == Field.OBSTACLE:\r\n return\r\n\r\n self.agent_x = x\r\n self.agent_y = y", "def get_action(self, state):\n max_v = -INFINITY\n next_a = Directions.STOP\n alpha = -INFINITY\n beta = INFINITY\n\n # We initialize the visited list. This is a list\n # containing a list by agent.\n num = state.getNumAgents()\n visited = [None] * num\n\n for i in range(num):\n visited[i] = []\n\n for child, action in state.generatePacmanSuccessors():\n next_p = self.player(state, PACMAN)\n\n value = self.minimax(child, next_p, visited, alpha, beta)\n\n if value >= max_v:\n max_v, next_a = value, action\n\n return next_a", "def ChooseAction(self):\n self.lastAction = None\n self.lastState = None\n if(self.attention is None or self.attention == \"\"): return\n # find best action for the currently attended node\n actions = list(self.vi.Q[self.states.index(self.attention)])\n actionIndex = actions.index(max(actions))\n actionName = self.actions[actionIndex]\n # execute the best action for the currently attended node\n self.nodes[actionName].Activate()\n self.lastAction = actionName\n self.lastState = self.attention", "def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list", "def multi_agent_example():\n env = holodeck.make(\"CyberPunkCity-FollowSight\")\n\n cmd0 = np.array([0, 0, -2, 10])\n cmd1 = np.array([0, 0, 0])\n for i in range(10):\n env.reset()\n env.tick()\n env.act(\"uav0\", cmd0)\n env.act(\"nav0\", cmd1)\n for _ in range(1000):\n states = env.tick()\n pixels = states[\"uav0\"][\"RGBCamera\"]", "def move(self, agent, action):\n\t\tpass", "def chooseAction(self, gameState):\n\n '''\n You should change this in your own agent.\n '''\n problem = foodsearchproblem(gameState,self)\n return self.astarsearch(problem,gameState,self.foodhuristic)[0]", "def actions_to_accel(self, actions_list): #动作处理函数\n a_container = [[] for _ in range(self.agent_num)]\n for agent_idx in range(self.agent_num):\n action = actions_list[agent_idx]\n if action is None:\n accel = [0, 0]\n else:\n if self.agent_list[agent_idx].is_fatigue: #if agent is out of energy, no driving force applies\n accel = [0,0]\n else:\n mass = self.agent_list[agent_idx].mass\n\n assert self.action_f[0] <= action[0] <= self.action_f[1], print('Continuous driving force needs '\n 'to be within the range [-100,200]')\n force = action[0] / mass\n\n assert self.action_theta[0] <= action[1] <= self.action_theta[1], print(\n 'Continuous turing angle needs to be within the range [-30deg, 30deg]')\n theta = action[1]\n\n theta_old = self.agent_theta[agent_idx][0]\n theta_new = theta_old + theta\n self.agent_theta[agent_idx][0] = theta_new\n\n accel_x = force * math.cos(theta_new / 180 * math.pi)\n accel_y = force * math.sin(theta_new / 180 * math.pi)\n accel = [accel_x, accel_y]\n #self.agent_accel[agent_idx] = accel # update the agent acceleration\n\n a_container[agent_idx] = accel\n return a_container", "def step(self, action):\n \"\"\" Action is a motion command \"\"\"\n rich_obs, reward, done, info = super(ColoredEgoCostmapRandomAisleTurnEnv, self).step(action)\n obs = self._extract_egocentric_observation(rich_obs)\n return obs, reward, done, info", "def onActionTaken(self, agent):\n\n pass", "def level1AI(self, values):\n AI_server = AgentServer.get()\n values['e']['agent'] = AI_server.newAgent(2)\n #values['r']['agent'] = AI_server.newAgent(2)\n values['r']['agent'] = AI_server.newFakeAgent()\n values['j']['agent'] = AI_server.newFakeAgent()", "def act(self, states: np.ndarray, eps: float = 0.0) -> List[np.ndarray]:\n actions = [\n agent.act(state.reshape(-1, 1).T, eps)\n for agent, state in zip(self.agents, states)\n ]\n return actions", "def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])", "def work(self, agentInput, type_=\"selectAction\"):\n agentInput = from_numpy(np.array(agentInput)).float().unsqueeze(0) # Add batch dimension with unsqueeze\n\n if self.use_cuda:\n agentInput = agentInput.cuda()\n\n with no_grad():\n action_prob = self.actor_net(agentInput)\n\n if type_ == \"selectAction\":\n c = Categorical(action_prob)\n action = c.sample()\n return action.item(), action_prob[:, action.item()].item()\n elif type_ == \"selectActionMax\":\n return np.argmax(action_prob).item(), 1.0", "def act(self, states, add_noise=True):\n actions = np.zeros([self.num_agents, self.action_size])\n for index, agent in enumerate(self.agents):\n actions[index, :] = agent.act(states[index], add_noise)\n return actions", "def step(self):\n\n \"\"\" First updates the variables values of the current time form the environment \"\"\"\n self.update_crispval(self.env.context)\n\n \"\"\"\n here the decision making of the agent\n to determine which activity to suggest to the patient\n i apply the creative controller to the current context\n \"\"\"\n curr_input = sample_inputs(False, 0, self.curr_interaction, self.variables_default_val, self.action_var,\n self.fuzzysets_values, self.variables_universe)\n c_out, rules_activations, is_cc_exception = self.creative_controller.computeOutput(curr_input, False)\n\n \"\"\" i obtain a number of ouput crisp values.\n i determine which one achieves the max expected output w.r.t. the a-rules \"\"\"\n best_a = None\n best_a_val = -1000\n best_a_exphapp = 5\n if self.verbose > Constants.VERBOSE_BASIC:\n print(\"rules activations\")\n for a in rules_activations:\n if rules_activations[a] > 0:\n print(str(a) + \"\\n\\t\\t\\t-> \" + str(rules_activations[a]))\n for item in c_out.items(): # for each pair <activity, crisp output>\n if self.verbose > Constants.VERBOSE_BASIC:\n print(item)\n if not item[\n 0] in self.curr_iter_suggestions: # if i didn't suggest the same activity already in the same interaction\n inputs = dict(curr_input) # I create a copy fo the dict\n inputs[item[0]] = item[1]\n assessor_id = self.actions_to_ti[item[0]]\n self.assessors[assessor_id].feed_inputs(inputs)\n is_ac_exception = False\n assout = []\n try:\n a_out, a_rules_activations, is_ac_exception = self.assessors[assessor_id].compute(verbose=False)\n assout = [a_out[ao] for ao in a_out]\n except:\n is_ac_exception = True\n traceback.print_exc()\n # todo the following assumes that every assessor controller has same eval var\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n if len(assout) == 0:\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n w_ta = self.weights_therapeutic_interventions[self.actions_to_ti[item[0]]]\n\n avg_credit_rules_that_suggested_action = 1.0\n nr_rules_that_suggested_action = 0\n for r in rules_activations:\n if (rules_activations[r] > 0) and (str(item[0]) in str(r)):\n avg_credit_rules_that_suggested_action = avg_credit_rules_that_suggested_action + \\\n self.rules_credits[str(r)]\n nr_rules_that_suggested_action = nr_rules_that_suggested_action + 1\n if nr_rules_that_suggested_action > 0:\n avg_credit_rules_that_suggested_action = (\n avg_credit_rules_that_suggested_action - 1.0) / nr_rules_that_suggested_action\n repetition_cost = 1.0\n a_val = (mean(assout) * w_ta * avg_credit_rules_that_suggested_action) / repetition_cost\n if (a_val > best_a_val) and (\n item[1] >= (self.variables_default_val[item[0]] + self.range_step[item[0]])):\n best_a = item\n best_a_val = a_val\n best_a_exphapp = mean(assout)\n\n \"\"\"I suggest the activity with best expected outcome and store the information to populate the interactions \n memory \"\"\"\n self.proposeActivity(best_a)\n if not best_a is None:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"proposing activity\" + str(best_a) + \" which has expected feedback: \" + str(\n best_a_exphapp) + \", which weighted is \" + str(best_a_val))\n self.curr_iter_suggestions.append(best_a[0])\n self.last_suggestion = best_a\n else:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"the activity proposed is \" + str(\n best_a) + \" so I don't suggest anything. I will ask a question instead\")\n self.last_suggestion = []\n self.expected_feedback = best_a_exphapp\n self.last_context = self.env.context.copy()\n self.last_rules_activations = rules_activations", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def learn(self):\n for a in self.agents:\n a.learn()", "def getAction1(self, state):\n action = QLearningAgent.getAction1(self,state)\n################################################################################################################################ Eric Changed Stuff Below\n #print \"ACTION IN PACMANQAGENT: \", action\n self.doAction(state,action)\n return action", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def step(self, action, update=True):\n if self.centralized_planning:\n agent_states = [human.get_full_state() for human in self.humans]\n if self.robot.visible:\n agent_states.append(self.robot.get_full_state())\n human_actions = self.centralized_planner.predict(agent_states)[:-1]\n else:\n human_actions = self.centralized_planner.predict(agent_states)\n else:\n human_actions = []\n for human in self.humans:\n ob = self.compute_observation_for(human)\n human_actions.append(human.act(ob))\n\n # collision detection\n dmin = float('inf')\n collision = False\n for i, human in enumerate(self.humans):\n px = human.px - self.robot.px\n py = human.py - self.robot.py\n if self.robot.kinematics == 'holonomic':\n vx = human.vx - action.vx\n vy = human.vy - action.vy\n else:\n vx = human.vx - action.v * np.cos(action.r + self.robot.theta)\n vy = human.vy - action.v * np.sin(action.r + self.robot.theta)\n ex = px + vx * self.time_step\n ey = py + vy * self.time_step\n # closest distance between boundaries of two agents\n closest_dist = point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius\n if closest_dist < 0:\n collision = True\n logging.debug(\"Collision: distance between robot and p{} is {:.2E} at time {:.2E}\".format(human.id, closest_dist, self.global_time))\n break\n elif closest_dist < dmin:\n dmin = closest_dist\n\n # collision detection between humans\n human_num = len(self.humans)\n for i in range(human_num):\n for j in range(i + 1, human_num):\n dx = self.humans[i].px - self.humans[j].px\n dy = self.humans[i].py - self.humans[j].py\n dist = (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius\n if dist < 0:\n # detect collision but don't take humans' collision into account\n logging.debug('Collision happens between humans in step()')\n\n # check if reaching the goal\n end_position = np.array(self.robot.compute_position(action, self.time_step))\n reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius\n\n if self.global_time >= self.time_limit - 1:\n reward = 0\n done = True\n info = Timeout()\n elif collision:\n reward = self.collision_penalty\n done = True\n info = Collision()\n elif reaching_goal:\n reward = self.success_reward\n done = True\n info = ReachGoal()\n elif dmin < self.discomfort_dist:\n # adjust the reward based on FPS\n reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor * self.time_step\n done = False\n info = Discomfort(dmin)\n else:\n reward = 0\n done = False\n info = Nothing()\n\n if update:\n # store state, action value and attention weights\n if hasattr(self.robot.policy, 'action_values'):\n self.action_values.append(self.robot.policy.action_values)\n if hasattr(self.robot.policy, 'get_attention_weights'):\n self.attention_weights.append(self.robot.policy.get_attention_weights())\n if hasattr(self.robot.policy, 'get_matrix_A'):\n self.As.append(self.robot.policy.get_matrix_A())\n if hasattr(self.robot.policy, 'get_feat'):\n self.feats.append(self.robot.policy.get_feat())\n if hasattr(self.robot.policy, 'get_X'):\n self.Xs.append(self.robot.policy.get_X())\n if hasattr(self.robot.policy, 'traj'):\n self.trajs.append(self.robot.policy.get_traj())\n\n # update all agents\n self.robot.step(action)\n for human, action in zip(self.humans, human_actions):\n human.step(action)\n if self.nonstop_human and human.reached_destination():\n self.generate_human(human)\n\n self.global_time += self.time_step\n self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans],\n [human.id for human in self.humans]])\n self.robot_actions.append(action)\n self.rewards.append(reward)\n\n # compute the observation\n if self.robot.sensor == 'coordinates':\n ob = self.compute_observation_for(self.robot)\n elif self.robot.sensor == 'RGB':\n raise NotImplementedError\n else:\n if self.robot.sensor == 'coordinates':\n ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)]\n elif self.robot.sensor == 'RGB':\n raise NotImplementedError\n\n return ob, reward, done, info", "def run(self, agent_host):\n\n # start conditions\n total_reward = 0\n self.prev_s = None\n self.prev_a = None\n is_first_action = True\n # main loop:\n #grab world state and continue if mission is running\n world_state = agent_host.getWorldState()\n while world_state.is_mission_running:\n current_r = 0\n if is_first_action:\n # wait until have received a valid observation\n while True:\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n for error in world_state.errors:\n self.logger.error(\"Error: %s\" % error.text)\n for reward in world_state.rewards:\n current_r += reward.getValue()\n if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text==\"{}\":\n total_reward += self.act(world_state, agent_host, current_r)\n break\n if not world_state.is_mission_running:\n break\n is_first_action = False\n else:\n # wait for non-zero reward\n while world_state.is_mission_running and current_r == 0:\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n for error in world_state.errors:\n self.logger.error(\"Error: %s\" % error.text)\n for reward in world_state.rewards:\n current_r += reward.getValue()\n # allow time to stabilise after action\n while True:\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n for error in world_state.errors:\n self.logger.error(\"Error: %s\" % error.text)\n for reward in world_state.rewards:\n current_r += reward.getValue()\n if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text==\"{}\":\n total_reward += self.act(world_state, agent_host, current_r)\n break\n if not world_state.is_mission_running:\n break\n\n # process final reward\n self.logger.debug(\"Final reward: %d\" % current_r)\n total_reward += current_r\n\n # update Q values\n if self.prev_s is not None and self.prev_a is not None:\n self.updateQTableFromTerminatingState( current_r )\n\n self.drawQ()\n\n return total_reward", "def step(self, actions, agent_id=0):\n self._last_state = self._current_state\n\n # TODO\n # action = actions.discrete_actions[0]-1\n action = actions.argmax()\n\n done = 0\n if self._stage == 0: # is fixation\n if action == 0:\n reward = 0.\n else:\n reward = -1.\n self._current_state = 1\n self._stage = 1\n elif self._stage == 1: # is first stage, use prob_transition\n if action == 1 or action == 2:\n if np.random.random() < self._prob_transition[0][action-1]:\n self._current_state = 2\n else:\n self._current_state = 3\n reward = 0.\n else: # pick a next state at random\n reward = -1.\n self._current_state = np.random.random() < 0.5 and 2 or 3\n self._stage = 2\n else: # is second stage, use prob_reward\n # Given an action (arm pulled), sample reward, return\n if action == 1 or action == 2:\n current_prob_rewards = self._prob_reward[self._current_state-2]\n self._best_reward = self._max_reward*np.max(current_prob_rewards)\n thisProb = current_prob_rewards[action-1]\n if np.random.random() < thisProb:\n # print(\"give reward\")\n reward = self._max_reward\n else:\n reward = 0.0\n else:\n reward = -1.\n\n self._total_reward += reward\n self._best_total_reward += self._best_reward\n self._stage = 0\n self._current_state = 0\n self._trial += 1\n self._since_flipped += 1\n # if more than self._min_stable trials since flipping, certain chance of flipping prob rews\n if (self._since_flipped >= self._min_stable) and (np.random.random() <= self._flip_prob):\n self._randomize()\n self._since_flipped = 0\n\n\n self._last_action = np.zeros(self._num_arms)\n self._last_action[action] = 1\n # conditions to end episode\n if self._step >= self._steps_per_ep-1:\n self._state = READY_TO_END_EPISODE\n done = 1\n\n self._step += 1\n self._prev_reward = reward\n\n obs = self._current_state\n reset = done == 1. or self._step == MAX_FRAMES\n\n # print(np.array([[obs]]).shape)\n\n # print(reward, self._stage)\n return np.array([obs]), reward, done, reset", "def learn(self, experiences, gamma):\n \n next_actions = []\n actions = []\n for i, agent in enumerate(self.agents):\n \n # collect current agent's ID, current and next states\n states, _, _, next_states, _ = experiences[i]\n agent_id = torch.tensor([i]).to(device)\n \n # extract the state of an agent and get action provided by actor \n state = states.reshape(-1, self.n_agents, self.state_size).index_select(1, agent_id).squeeze(1)\n action = agent.actor_local(state)\n actions.append(action)\n \n # extract the next state of an agent and get action provided by target actor\n next_state = next_states.reshape(-1, self.n_agents, self.state_size).index_select(1, agent_id).squeeze(1)\n next_action = agent.actor_target(next_state)\n next_actions.append(next_action)\n \n # perform learning for each agent, from its own sampled experience\n for i, agent in enumerate(self.agents):\n agent.learn(i, experiences[i], gamma, next_actions, actions)", "def run_actor(self, local_others, local_v, goals, epsilon, sess):\n # convert to batch\n obs_others = np.array(local_others)\n v_obs = np.array(local_v)\n\n feed = {self.obs_others:obs_others, self.v_obs:v_obs,\n self.v_goal:goals}\n actions_argmax = sess.run(self.argmax_Q, feed_dict=feed)\n\n actions = np.zeros(self.n_agents, dtype=int)\n for idx in range(self.n_agents):\n if np.random.rand(1) < epsilon:\n actions[idx] = np.random.randint(0, self.l_action)\n else:\n actions[idx] = actions_argmax[idx]\n\n return actions", "def transfer_actions(action, act_space):\n #print(action)\n action_spaces = []\n res = []\n for act in act_space.spaces:\n if act_space[act].__class__.__name__ == 'Discrete':\n action_spaces.append(act_space[act].n)\n res.append(action[act])\n elif act_space[act].__class__.__name__ == 'Enum':\n action_spaces.append(len(act_space[act].values))\n res.append(action[act])\n elif act == 'camera':\n res.append(camera_transform(action[act][0]))\n res.append(camera_transform(action[act][1]))\n action_spaces.append(36)\n action_spaces.append(36)\n\n return res", "def act(self, game_state: dict) -> str:\n #print(\"act\")\n self.logger.debug(\"Querying model for action.\")\n if self.train and self.steps_done < self.imitationSteps:\n self.steps_done += 1\n act = self.imitationAgent.act(game_state)\n return act\n else:\n act = ACTIONS[select_action(self, state_to_features(self, game_state))]\n return act", "def choose_action(self, agent_data):\r\n action_value_estimates = agent_data[\"action_value_estimates\"]\r\n action_counts = agent_data[\"action_counts\"]\r\n time_step = np.sum(action_counts)\r\n ucb_value_estimates = np.zeros(len(action_counts))\r\n for i in np.arange(len(action_counts)):\r\n if action_counts[i]!=0:\r\n ucb_value_estimates[i] = action_value_estimates[i] + self.C * np.sqrt(np.log(time_step) / action_counts[i])\r\n else:\r\n ucb_value_estimates[i] = sys.float_info.max\r\n action = self.argmax_with_random_tiebreaker(ucb_value_estimates)\r\n return action", "def choose_action(self, board, possible_actions):\r\n pass", "def get_possible_actions(self, world_state,agent_host):\n action_list = []\n possibilities = {'movenorth 1': -3,'movesouth 1': 3,'moveeast 1': 1,'movewest 1': -1}\n #check walls to see whether can move left,right,back,forward\n #check floor beneath to see whether should do anything at all, or just nothing and sink\n obs_text = world_state.observations[-1].text\n obs = json.loads(obs_text)\n grid = load_grid(world_state)\n print 'GRID SIZE: ', len(grid)\n for k,v in possibilities.items():\n #with current grid, index 31 will always be our agent's current location\n #check walls to see whether can move left,right,back,forward\n if grid[31+v+9] == 'water' or grid[31+v+9] == 'wooden_door': #+9 because we want to check\n action_list.append(k) #where our feet are located\n #check if you can teleport down a level\n if grid[31-27] == 'water' or grid[31-27] == 'wooden_door':\n action_list.append(self.teleport(agent_host,False))\n #check if you can teleport up a level\n if grid[31+45] == 'water' or grid[31+45] == 'wooden_door':\n action_list.append(self.teleport(agent_host,True))\n\n print(\"ACTION LIST: {}\".format(action_list))\n return action_list", "def run(self, agent_host):\n S, A, R = deque(), deque(), deque()\n global shot\n shot = 0\n while shot < 5:\n ##update total arrow shot\n self.totalCount+=1\n\n ##update accuracy number for every 10000 arrows\n if(self.totalCount%10000==0):\n self.phasesOnTarget.append(self.phasesTemp)\n self.phasesTemp=0\n\n\n s0 = self.get_zombie_state(agent_host)\n a0= self.choose_action(s0)\n if a0[2] == 'shoot':\n shot += 1\n self.shootCount+=1\n r0 = self.act(agent_host, a0)\n\n ##update arrow numbers for different angles\n if(a0[2]=='shoot'):\n self.arrowAngleCount[tuple((a0[0],a0[1]))]+=1\n\n ##update arrow on target quantity\n if(r0>0):\n self.totalOnTarget+=1\n self.phasesTemp+=1\n\n ## update arrow hit the target on different angles\n if(a0[2]=='shoot'):\n self.arrowAngleOn[tuple((a0[0],a0[1]))]+=1\n\n\n ##update reward\n S.append(s0)\n A.append(a0)\n R.append(r0)\n\n ##update reward list\n self.reward.append(r0)\n print(s0,a0,r0)\n while len(S) >= 1:\n self.update_q_table(S, A, R)\n S.popleft()\n A.popleft()\n R.popleft()\n agent_host.sendCommand('quit')", "def scan_and_act(self):\n\n self.scan()\n\n if self.has_payload and self.has_home:\n self.strategy = self._move_home_strategy\n elif self.has_payload:\n self.strategy = self._find_home_strategy\n elif self.has_home:\n self.strategy = self._find_payload_strategy\n else:\n if self.visible_payloads:\n self.strategy = self._fetch_payload_strategy\n else:\n self.strategy = self._wander_strategy\n\n action = self.strategy()\n\n response = self.island_of_agents.agent_action(self.identity, action, 1)\n assert response.status_code == 200\n\n return action", "def select_action(self, state):\n # print(\"agent.select_action() - state: {}\".format(state))\n\n self.step_counter += 1\n # self.epsilon = max(0.1, 1.0-self.step_counter/self.epsilon_decay_steps)\n epsilon_min = .01\n epsilon_max = .8\n epsilon_step = epsilon_max - (epsilon_max - epsilon_min) * self.step_counter / self.epsilon_decay_steps\n self.epsilon = max(epsilon_min, epsilon_step)\n # self.epsilon = max(0.1, 1.0/self.step_counter)\n\n rand = random.uniform(0, 1)\n if rand < self.epsilon:\n # choose random action\n return np.random.choice(self.nA)\n else:\n # choose greedy action\n return np.argmax(self.Q[state])" ]
[ "0.70322484", "0.65847504", "0.65847504", "0.65610033", "0.65087974", "0.64800406", "0.64696527", "0.64690334", "0.6465", "0.64002377", "0.6380364", "0.6380364", "0.63314396", "0.6323784", "0.6322388", "0.6242467", "0.6234373", "0.6211339", "0.6201912", "0.6196667", "0.61705744", "0.6138628", "0.6126332", "0.6120129", "0.6120129", "0.60982144", "0.60855055", "0.60855055", "0.6066904", "0.60591686", "0.6049478", "0.60483754", "0.603781", "0.6024748", "0.6008022", "0.5987855", "0.59807193", "0.5979814", "0.5979814", "0.5975031", "0.59743345", "0.59739894", "0.5968738", "0.5965516", "0.59610796", "0.5953583", "0.59482914", "0.5926765", "0.5908953", "0.5908302", "0.59041667", "0.5902035", "0.58885384", "0.58695084", "0.5862256", "0.58600336", "0.5855518", "0.5854448", "0.58447444", "0.5835068", "0.5822633", "0.5817086", "0.5812684", "0.58126235", "0.5805076", "0.57832515", "0.577743", "0.57759297", "0.5766479", "0.5764765", "0.57590854", "0.5752014", "0.5741512", "0.5736663", "0.573217", "0.5722569", "0.5720356", "0.57135165", "0.57123536", "0.5710109", "0.570475", "0.56998694", "0.56829405", "0.56824493", "0.56693304", "0.5666538", "0.56625944", "0.5660521", "0.56596416", "0.565754", "0.5653676", "0.56535655", "0.5644431", "0.5635568", "0.5618702", "0.5617763", "0.56111103", "0.5609248", "0.56081784", "0.5603949", "0.56029904" ]
0.0
-1
Store an experience tuple in the ReplayBuffer
def store(self, experience): self.memory.store(experience)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_experience(self, s, a, r, t, s2):\n self.replay_buffer.add(s, a, r, t, s2)", "def add(self, experience):\n self.buffer.append(experience)", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def append(self, experience: Experience) -> None:\n self.buffer.append(experience)", "def append(self, experience):\n self.buffer.append(experience)\n self.number += 1", "def remember(self, experience):\n self.memory.append(experience)", "def add(self, experience: []):\n if len(self.buffer) + len(experience) >= self.buffer_size:\n self.buffer[0:1] = []\n self.buffer.append(experience)", "def experience_replay(self):\n return", "def add_game_to_replay_buffer(self, reward: float):\n game_length = len(self.action_log)\n\n if reward == self.win_value:\n buffer = self.replay_buffer_win\n elif reward == self.loss_value:\n buffer = self.replay_buffer_loss\n else:\n buffer = self.replay_buffer_draw\n\n for i in range(game_length - 1):\n buffer.add([self.board_position_log[i], self.action_log[i],\n self.board_position_log[i + 1], 0])\n\n buffer.add([self.board_position_log[game_length - 1], self.action_log[game_length - 1], None, reward])", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def add_experience(self, state, action, reward, next_state, done):\n experience = (state, action, reward, next_state, done)\n for i, k in enumerate(self.data_keys):\n getattr(self, k).append(experience[i])\n self.size += 1", "def store(self, state, action, reward, next_state, done):\n self.replay_memory.append((state, action, reward, next_state, done))", "def add(self, state, action, reward, next_state, done):\n self.replay_buffer.append([state, action, reward, next_state, done])", "def store_replay_memory(self, history, action, reward, next_history, dead):\n self.memory.append((history, action, reward, next_history, dead))", "def append(self, state, action, reward, next_state, is_terminal):\n idx, old_ele = self.experiences.append(self.Experience(\n state, action, reward, next_state, not is_terminal, 0))\n self.unplayed_experiences.append(idx)\n return idx, old_ele", "def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):\n\n utils.EzPickle.__init__(self, game, obs_type)\n assert obs_type in ('ram', 'image')\n\n self.game_path = atari_py.get_game_path(game)\n if not os.path.exists(self.game_path):\n raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))\n self._obs_type = obs_type\n self.frameskip = frameskip\n self.ale = ALEInterface()\n self.viewer = None\n\n # Tune (or disable) ALE's action repeat:\n # https://github.com/openai/gym/issues/349\n assert isinstance(repeat_action_probability, (float, int)), \"Invalid repeat_action_probability: {!r}\".format(repeat_action_probability)\n self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)\n\n self._seed()\n\n (screen_width, screen_height) = self.ale.getScreenDims()\n self._buffer = np.empty((screen_height, screen_width, 3), dtype=np.uint8)\n\n self._action_set = self.ale.getMinimalActionSet()\n self.action_space = spaces.Discrete(len(self._action_set))\n\n (screen_width,screen_height) = self.ale.getScreenDims()\n if self._obs_type == 'ram':\n self.observation_space = spaces.Box(low=np.zeros(128), high=np.zeros(128)+255)\n elif self._obs_type == 'image':\n self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3))\n else:\n raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))", "def learn(self, action, reward, is_terminal):\n\n\t\t# Add the experience to the replay memory\n\t\tself.replay_memory.record(self.state_history[:,:,3], action, reward, is_terminal)", "def accumulate_experience(teacher, exp_replay: Supervised_ExperienceReplay, config=student_config):\n\n env = gym.make(\"PongNoFrameskip-v4\")\n env = wrap_deepmind(env, frame_stack=True)\n steps = 0\n while 1:\n state = env.reset()\n state = np.asarray(state)\n done = False\n while not done:\n steps += 1\n teacher_q_value = teacher.get_q(state=np.reshape(state, (1, state.shape[0], state.shape[1], state.shape[2])))\n action = teacher.select_action(teacher_q_value)\n next_state, reward, done, _ = env.step(action + 1)\n next_state = np.asarray(next_state)\n exp_replay.add_memory(state, teacher_q_value, action) # feeding the experience replay\n state = next_state\n if steps > config.OBSERVE: # we have OBSERVE number of exp in exp_replay\n try:\n del env\n except ImportError:\n pass\n break", "def store(self, experience, priority):\n self._experience.append(experience)\n insert_index = self.fix_index(priority)\n if insert_index >= 0:\n self.exp_idx.insert(insert_index, len(self._experience) - 1)\n if(self.record_size > self.size):\n #self._experience.pop(0)\n sys.stderr.write(\"Experience overflow!\")\n return True\n\n elif insert_index == -10:\n sys.stderr.write('Insert failed\\n')\n return False", "def _store_episode(self):\n # For each transition in the last episode,\n # create a set of artificial transitions\n for transition_idx, transition in enumerate(self.episode_transitions):\n\n obs_t, action, reward, obs_tp1, done, info = transition\n\n # Add to the replay buffer\n self.replay_buffer.add(obs_t, action, reward, obs_tp1, done)\n\n # We cannot sample a goal from the future in the last step of an episode\n if (transition_idx == len(self.episode_transitions) - 1 and\n self.goal_selection_strategy == GoalSelectionStrategy.FUTURE):\n break\n\n # Sampled n goals per transition, where n is `n_sampled_goal`\n # this is called k in the paper\n sampled_goals = self._sample_achieved_goals(self.episode_transitions, transition_idx)\n # For each sampled goals, store a new transition\n for goal in sampled_goals:\n # Copy transition to avoid modifying the original one\n obs, action, reward, next_obs, done, info = copy.deepcopy(transition)\n\n # Convert concatenated obs to dict, so we can update the goals\n obs_dict, next_obs_dict = map(self.env.convert_obs_to_dict, (obs, next_obs))\n\n # Update the desired goal in the transition\n obs_dict['desired_goal'] = goal\n next_obs_dict['desired_goal'] = goal\n\n # Update the reward according to the new desired goal\n reward = self.env.compute_reward(next_obs_dict['achieved_goal'], goal, info)\n # Can we use achieved_goal == desired_goal?\n done = False\n\n # Transform back to ndarrays\n obs, next_obs = map(self.env.convert_dict_to_obs, (obs_dict, next_obs_dict))\n\n # Add artificial transition to the replay buffer\n self.replay_buffer.add(obs, action, reward, next_obs, done)", "def store_outcome(self, log_action_prob, reward):\n\n self.log_action_prob_list.append(log_action_prob)\n self.tensor_rewards_list.append(torch.Tensor([reward]))", "def remember_play(self, state, action, reward, next_state, done):\n # Use tuple to represent one play\n self.memory.append((state, action, reward, next_state, done))\n\n # Decrease epsilon (exploration rate)\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay", "def store_effect(self, idx, action, reward, done):\n self.action[idx] = action\n self.reward[idx] = reward\n self.done[idx] = done", "def __init__(self, buffer_size, batch_size):\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\"])", "def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)", "def add(self, obs_t, action, reward, obs_tp1, done, info):\n assert self.replay_buffer is not None\n # Update current episode buffer\n self.episode_transitions.append((obs_t, action, reward, obs_tp1, done, info))\n if done:\n # Add transitions (and imagined ones) to buffer only when an episode is over\n self._store_episode()\n # Reset episode buffer\n self.episode_transitions = []", "def __init__(self, buffer_size, batch_size):\n # Internal memory\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\",\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])", "def play_and_record(initial_state, agent, env, exp_replay, n_steps=1):\n s = initial_state\n sum_rewards = 0\n\n # Play the game for n_steps as per instructions above\n <YOUR CODE>\n\n return sum_rewards, s", "def remember(self, *args):\n state, action, reward, next_state, done = args\n self.memory.append((state, action, reward, next_state, done))", "def observe(self, observation, reward, action, terminal):\n reward = max(self.min_r, min(self.max_r, reward))\n # Add observed data to history and experience.\n self.history.add(observation)\n self.experience.add(observation, reward, action, terminal)", "def remember(self, state, action, reward, next_state, done):\n item = (state, action, reward, next_state, done)\n self.memory_buffer.append(item)", "def store(self, obs, act, rew, val, logp):\n assert self.ptr < self.max_size\n self.obs_buf[self.ptr] = obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.val_buf[self.ptr] = val\n self.logp_buf[self.ptr] = logp\n self.ptr += 1", "def append(self, state, action, reward, next_state, is_terminal):\n exp = self.Experience(\n state, action, reward, next_state, not is_terminal)\n idx, old_ele = self.experiences.append(exp)\n return idx, old_ele", "def memorize(self, state, action, reward, done, new_state, error=None):\n\n experience = (state, action, reward, done, new_state)\n if(self.with_per):\n priority = self.priority(error[0])\n self.buffer.add(priority, experience)\n self.count += 1\n else:\n # Check if buffer is already full\n if self.count < self.buffer_size:\n self.buffer.append(experience)\n self.count += 1\n else:\n self.buffer.popleft()\n self.buffer.append(experience)", "def experience_replay(batch_size):\n memory = []\n while True:\n experience = yield rsample(memory, batch_size) if batch_size <= len(memory) else None\n memory.append(experience)", "def __init__(self, buffer_size, batch_size):\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.episode = 0", "def __init__(self,buffer_size,state_dim,action_dim,random_seed=123):\n print(\"Creating Replay Buffer object\")\n self.buffer_size=buffer_size\n self.state_dim=state_dim\n self.action_dim=action_dim\n self.pointer=0\n self.states=np.zeros(shape=[buffer_size,state_dim])\n self.actions=np.zeros(shape=[buffer_size,action_dim])\n self.rewards=np.zeros(shape=[buffer_size,1])\n self.dones=np.zeros(shape=[buffer_size,1])\n self.next_states=np.zeros(shape=[buffer_size,state_dim])\n self.filled=False\n \n random.seed(random_seed)", "def store_frame(self, frame):\n if self.obs is None:\n self.obs = np.empty([self.size] + list(frame.shape), dtype=np.uint8)\n self.action = np.empty([self.size], dtype=np.int32)\n self.reward = np.empty([self.size], dtype=np.float32)\n self.done = np.empty([self.size], dtype=np.bool)\n self.obs[self.next_idx] = frame\n\n ret = self.next_idx\n self.next_idx = (self.next_idx + 1) % self.size\n self.num_in_buffer = min(self.size, self.num_in_buffer + 1)\n\n return ret", "def add_new_experience(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done, self.max_priority)\n self.memory.append(e)", "def self_play(self, n_episodes): \n eps = self.eps(self.agent.learning_iters)\n experiences = self_play_episodes(self.mdp, self.agent, n_episodes, eps) \n for state, action, reward, next_state, done in experiences:\n self.agent.replay_buffer.push(state, action, reward, next_state, done)", "def push(self, transition_tuple):\n if len(self.replay_memory) < self.state.replay_size:\n self.replay_memory.append(None)\n self.replay_memory[self.state.position] = transition_tuple\n self.state.position = (self.state.position + 1) % self.state.replay_size", "def __init__(self, action_size, buffer_size, batch_size, seed):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) \n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def update_step(self, replay_buffer_iter):\n\n transition = next(replay_buffer_iter)\n states = transition.observation[:, 0]\n actions = transition.action[:, 0]\n rewards = transition.reward[:, 0]\n next_states = transition.observation[:, 1]\n discounts = transition.discount[:, 0]\n\n next_actions, _ = self.actor(next_states, sample=True, with_log_probs=True)\n\n # entropy_rewards = self.discount * discounts * self.alpha * next_log_probs\n # rewards -= entropy_rewards\n critic_dict = self.fit_critic(states, actions, next_states, next_actions,\n rewards, discounts)\n actor_dict = self.fit_actor(states)\n\n return {**actor_dict, **critic_dict}", "def __init__(self, buffer_size, batch_size, seed):\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experiences\", field_names=[\"state\", \"state_full\", \"action\", \"reward\",\n \"next_state\", \"next_state_full\", \"done\"])\n self.seed = random.seed(seed)", "def update_history(self, play, coplay):\r\n self.history.append(play, coplay)\r\n self.base.history.append(play,coplay)\r\n self.trust.history.append(play,coplay)\r\n self.conviction.history.append(play,coplay)", "def get_single_experience(self, time_step):\r\n assert self.n_experience - 1 > time_step, \"Sample time step must be less than number of experience minus one.\"\r\n return self.buffer_experience[time_step]", "def _experience_replay(self, batch_size, discount=0.9, epochs=1):\r\n minibatch = random.sample(self.experience, batch_size)\r\n\r\n # TODO: The batch_size might not bee needed as an argument here if the reshape things can be resolved.\r\n states, actions, rewards, next_states, terminated = self._extract_data(batch_size, minibatch)\r\n targets = self._build_targets(batch_size, states, next_states, rewards, actions, terminated, discount)\r\n\r\n history = self.q_network.fit(states, targets, epochs=epochs, verbose=0, batch_size=1)\r\n #print(history.history['loss'])\r\n self.episode_loss.append(history.history['loss'][0])", "def play_and_record(initial_obs, agent, env, exp_replay, prev_memories, n_steps=1):\r\n s = initial_obs\r\n sum_rewards = 0\r\n\r\n # Play the game for n_steps as per instructions above\r\n for _ in range(n_steps):\r\n new_memories, qvalues = agent.get_qvalues(prev_memories, s[None, ...])\r\n a = agent.sample_actions(qvalues)[0]\r\n next_s, r, done, _ = env.step(a)\r\n exp_replay.add(s, a, r, done)\r\n\r\n s = next_s\r\n prev_memories = new_memories\r\n sum_rewards += r\r\n if done:\r\n s = env.reset()\r\n prev_memories = agent.get_initial_state(1)\r\n\r\n return sum_rewards, s, prev_memories", "def record(self, game):\n\n\t\topp = game.opponents[self]\n\t\tself.memory[opp.tag] = game.get_last_move(opp)\n\t\tself.movesThisGen.append(game.get_last_move(self))\n\t\tself.games_played += 1", "def populate_buffer(self, num_transitions):\n while len(self.replay_buffer) < self.buffer_sample_size:\n self.play(num_transitions)", "def sample(self):\r\n \r\n # the experiences of the minibatch are choosed randomly (the minibatch has the size batch_size)\r\n indices = np.random.randint(0, len(self.data), self.batch_size)\r\n states, actions, rewards, next_states, finishs = [], [], [], [], []\r\n \r\n # we add the experience in the minibatch\r\n for i in indices:\r\n states.append(self.data[i][0])\r\n actions.append(self.data[i][1])\r\n rewards.append(self.data[i][2])\r\n next_states.append(self.data[i][3])\r\n finishs.append(self.data[i][4])\r\n \r\n # converting numpy arrays to float tensors (pytorch can't work with numpy array)\r\n return states, torch.FloatTensor(actions), torch.FloatTensor(rewards), \\\r\n next_states, torch.FloatTensor(finishs)", "def record(self, game):\n\n\t\topp = game.opponents[self]\n\t\tself.memory[opp] = game.get_last_move(opp)\n\t\tself.movesThisGen.append(game.get_last_move(self))\n\t\tif len(self.memory.keys()) > 10:\n\t\t\tself.memory.popitem()\n\t\tself.games_played += 1", "def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n priority=None):\n if priority is None:\n if self._replay_scheme == 'uniform':\n priority = 1.\n else:\n priority = self._replay.sum_tree.max_recorded_priority\n\n if not self.eval_mode:\n self._replay.add(last_observation, action, reward, is_terminal, priority)", "def __init__(self, buffer_size, batch_size, seed):\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "async def on_experience(self, payload):\n\n self.keep = False\n self.stop()", "def store(self, obs, act, rew, next_obs, done, mask):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.mask_buf[self.ptr] = mask\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)", "def train_experience_replay(self, epochs, batch_size, iterations_per_epoch, capacity, n_obs, **kwargs):\n\n # Initialize losses dictionary and memory replay buffer\n losses = dict()\n mem = MemoryReplayBuffer(capacity)\n\n for ep in range(1, epochs+1):\n losses[ep] = []\n with tqdm(total=iterations_per_epoch, desc='Training epoch {}'.format(ep)) as p_bar:\n\n for it in range(1, iterations_per_epoch+1):\n \n # Determine n_obs and generate data on-the-fly\n if type(n_obs) is int:\n n_obs_it = n_obs\n else:\n n_obs_it = n_obs()\n # Simulate and add to buffer\n params, sim_data = self._forward_inference(batch_size, n_obs_it, **kwargs)\n mem.store(params, sim_data)\n\n # Sample from buffer\n params, sim_data = mem.sample()\n\n # One step backprop\n loss = self._train_step(params, sim_data)\n \n # Store loss into dictionary\n losses[ep].append(loss)\n\n # Update progress bar\n p_bar.set_postfix_str(\"Epoch {0},Iteration {1},Loss: {2:.3f},Running Loss: {3:.3f}\"\n .format(ep, it, loss, np.mean(losses[ep])))\n p_bar.update(1)\n\n # Store after each epoch, if specified\n if self.manager is not None:\n self.manager.save()\n return losses", "def __init__(self, action_size, buffer_size, batch_size, seed):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state_and_prev_recurrent\", \"action\", \"recurrent\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def update_step(self, replay_buffer_iter):\n\n states, actions, rewards, discounts, next_states = next(\n replay_buffer_iter)\n\n critic_dict = self.fit(states, actions, next_states, rewards, discounts)\n actor_dict = self.fit_alpha(states)\n\n return {**actor_dict, **critic_dict}", "def store_frames(self):\n self.stored_frames = (self.active_call, self.active_frame)", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority", "def __init__(self, buffer_size, batch_size):\n\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n # set.experience = namedtuple(\"Experience\", field_names=['state', 'action', 'reward', 'next_state', 'done'])", "def store_episode(self):\n episode_buffer = self._convert_episode_to_batch_major()\n episode_batch_size = len(episode_buffer['observation'])\n idx = self._get_storage_idx(episode_batch_size)\n\n for key in self._buffer:\n self._buffer[key][idx] = episode_buffer[key]\n self._n_transitions_stored = min(\n self._size_in_transitions, self._n_transitions_stored +\n self._time_horizon * episode_batch_size)", "def store(self, obs, act, rew):\n assert self.ptr < self.max_size\n\n for i in range(len(self.obs_dim)):\n if self.obs_dim[i][0] == None:\n list_obs = []\n for y in len(obs[i]):\n list_obs.append(obs[i][y])\n self.obs_buf[i].append(list_obs)\n else :\n self.obs_buf[i][self.ptr] = obs[i]\n\n for i in range(len(self.act_dim)):\n self.act_buf[i][self.ptr] = act[i]\n\n self.rew_buf[self.ptr] = rew\n self.ptr += 1", "def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n priority=None,\n episode_end=False):\n if priority is None:\n if self._replay_scheme == 'uniform':\n priority = 1.\n else:\n priority = self._replay.sum_tree.max_recorded_priority\n\n if not self.eval_mode:\n self._replay.add(last_observation, action, reward, is_terminal, priority)", "def learn(self, observation, action, reward, next_observation):\n\n # log transition in replay buffer\n transition = self.buffer_transition(\n observation, action, reward, next_observation\n )\n\n # choose a random amount of transitions from buffer\n random_amount = np.random.randint(0, len(self.replay_buffer))\n transitions = [choice(self.replay_buffer) for _ in range(random_amount)] + [\n transition\n ]\n\n # learn from transition and random transitions from the replay buffer\n self.update_q_table(transitions)", "def save_state(self):\n # add (turn number, active player, player 1, player 2) to game history\n # player 1 and player 2 contain data about active mods\n turn_number = self.turn_number\n player_1 = Class.copy_monster(self.player1)\n player_2 = Class.copy_monster(self.player2)\n # save which player's turn it is\n if self.current_player == self.player1:\n active_player = 'player 1'\n else:\n active_player = 'player 2'\n\n # add this information to history list\n self.history.append((turn_number, active_player, player_1, player_2))", "def __init__(self, buffer_size):\n self.num_experiences = 0\n self.buffer = deque(maxlen=buffer_size)", "def remember(self, r_state, r_action, r_reward, r_new_state, r_done):\n self.memory.append((r_state, r_action, r_reward, r_new_state, r_done))", "def experience_replay(self):\n s,a,r,sp,done = self.memory.sample(self.batch_size)\n # TODO: 5 lines missing.\n raise NotImplementedError(\"\")\n self.Q.fit(s, target=target)", "def __init__(self, action_size, buffer_size, batch_size, seed):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def __init__(self, action_size, buffer_size, batch_size, seed):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def __init__(self, action_size, buffer_size, batch_size, seed):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def __init__(self, action_size, buffer_size, batch_size, seed):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def __init__(self, action_size, buffer_size, batch_size, seed):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def __init__(self, action_size, buffer_size, batch_size, seed):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):\n curr_info = curr_all_info[self.brain_name]\n next_info = next_all_info[self.brain_name]\n\n for agent_id in curr_info.agents:\n self.training_buffer[agent_id].last_brain_info = curr_info\n self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs\n\n if curr_info.agents != next_info.agents:\n curr_to_use = self.construct_curr_info(next_info)\n else:\n curr_to_use = curr_info\n\n intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)\n\n for agent_id in next_info.agents:\n stored_info = self.training_buffer[agent_id].last_brain_info\n stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs\n if stored_info is not None:\n idx = stored_info.agents.index(agent_id)\n next_idx = next_info.agents.index(agent_id)\n if not stored_info.local_done[idx]:\n for i, _ in enumerate(stored_info.visual_observations):\n self.training_buffer[agent_id]['visual_obs%d' % i].append(\n stored_info.visual_observations[i][idx])\n self.training_buffer[agent_id]['next_visual_obs%d' % i].append(\n next_info.visual_observations[i][next_idx])\n if self.policy.use_vec_obs:\n self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])\n self.training_buffer[agent_id]['next_vector_in'].append(\n next_info.vector_observations[next_idx])\n if self.policy.use_recurrent:\n if stored_info.memories.shape[1] == 0:\n stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))\n self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])\n actions = stored_take_action_outputs['action']\n if self.policy.use_continuous_act:\n actions_pre = stored_take_action_outputs['pre_action']\n self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])\n else:\n self.training_buffer[agent_id]['action_mask'].append(\n stored_info.action_masks[idx])\n a_dist = stored_take_action_outputs['log_probs']\n value = stored_take_action_outputs['value']\n self.training_buffer[agent_id]['actions'].append(actions[idx])\n self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])\n self.training_buffer[agent_id]['masks'].append(1.0)\n if self.use_curiosity:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +\n intrinsic_rewards[next_idx])\n else:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])\n self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])\n self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])\n if agent_id not in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]\n if self.use_curiosity:\n if agent_id not in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0\n self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]\n if not next_info.local_done[next_idx]:\n if agent_id not in self.episode_steps:\n self.episode_steps[agent_id] = 0\n self.episode_steps[agent_id] += 1", "def collect_experience(env_, agent_, size):\n env_.reset()\n state, reward, done, _ = env_.step(env_.action_space.sample())\n for data in range(size):\n action = env_.action_space.sample()\n next_state, reward, done, _ = env_.step(action)\n # penalize reward based on the position of the cart\n reward = max(0, reward * (1 - abs(next_state[0]/2.4)))\n if done:\n next_state = np.zeros(state.shape)\n # save experience in agent's memory\n agent_.remember((state, action, reward, next_state))\n env_.reset()\n state, reward, done, _ = env_.step(env.action_space.sample())\n else:\n # save experience in agent's memory\n agent_.remember((state, action, reward, next_state))\n state = next_state", "def feedback(self, X, plays):\n return X[plays]", "def __init__(self, context_dim, num_actions, buffer_s=-1, intercept=False):\n\n self._context_dim = context_dim\n self._num_actions = num_actions\n self._contexts = None\n self._rewards = None\n self.actions = []\n self.buffer_s = buffer_s\n self.intercept = intercept", "def store_data(self, obs, actions, reward, discount, new_obs, active_games):\n\n new_data = {'old_obs': obs[active_games],\n 'actions': actions[active_games],\n 'rewards': reward[active_games],\n 'discount': discount[active_games],\n 'new_obs': new_obs[active_games]}\n\n for a in range(self._nmbr_agents):\n self._replay.add(new_data)\n new_data['old_obs'] = np.roll(new_data['old_obs'], -1, axis=1)\n new_data['new_obs'] = np.roll(new_data['new_obs'], -1, axis=1)\n new_data['actions'] = np.roll(new_data['actions'], -1, axis=1)", "def giveReward(self, r): \n assert self.lastobs != None\n assert self.lastaction != None\n\n # store state, action, r, loglh in dataset\n if self.remember:\n self.history.appendLinked(self.lastobs, self.lastaction, r, self.loglh)\n\n self.lastobs = None\n self.lastaction = None", "def storeState(self):\n\n self.action_history[self.trial] = self.action\n self.ball_history[self.trial] = self.ballcolor", "def _build_replay_buffer(self):\n if self._replay_scheme not in ['uniform', 'prioritized']:\n raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))\n # Both replay schemes use the same data structure, but the 'uniform' scheme\n # sets all priorities to the same value (which yields uniform sampling).\n return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype)", "def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))", "def __init__(self, action_size, buffer_size, batch_size, device, seed=0):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) \n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n self.device = device", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def AddPlays(self, plays):\n self.persistant['plays'] += plays", "def store(self, state, act, rew, next_state):\n # buffer has to have room so you can store\n if self.ptr == self.max_size:\n self.state_buf.pop(0)\n self.act_buf.pop(0)\n self.rew_buf.pop(0)\n self.next_state_buf.pop(0)\n self.ptr -= 1\n\n # Environment related, subject to change\n # Old version\n #self.state_buf.append(np.expand_dims(state, axis = 0))\n #self.act_buf.append(np.expand_dims(act, axis = 0))\n #self.rew_buf.append(np.array(rew, ndmin = 1))\n #self.next_state_buf.append(np.expand_dims(next_state, axis = 0))\n\n # New version (best suited for decentralized)\n self.state_buf.append(state)\n self.act_buf.append(act)\n self.rew_buf.append(rew)\n self.next_state_buf.append(next_state)\n self.ptr += 1", "def add(self, state, action, reward, next_state, done):\r\n e = self.experience(state, action, reward, next_state, done)\r\n self.memory.append(e)", "def parse_replaydata(self):\n pass", "def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1", "def add_experiences(\n self,\n curr_all_info: AllBrainInfo,\n next_all_info: AllBrainInfo,\n take_action_outputs: ActionInfoOutputs,\n ) -> None:\n self.trainer_metrics.start_experience_collection_timer()\n if take_action_outputs:\n self.stats[\"Policy/Entropy\"].append(take_action_outputs[\"entropy\"].mean())\n self.stats[\"Policy/Learning Rate\"].append(\n take_action_outputs[\"learning_rate\"]\n )\n for name, signal in self.policy.reward_signals.items():\n self.stats[signal.value_name].append(\n np.mean(take_action_outputs[\"value\"][name])\n )\n\n curr_info = curr_all_info[self.brain_name]\n next_info = next_all_info[self.brain_name]\n\n for agent_id in curr_info.agents:\n self.training_buffer[agent_id].last_brain_info = curr_info\n self.training_buffer[\n agent_id\n ].last_take_action_outputs = take_action_outputs\n\n if curr_info.agents != next_info.agents:\n curr_to_use = self.construct_curr_info(next_info)\n else:\n curr_to_use = curr_info\n\n tmp_rewards_dict = {}\n for name, signal in self.policy.reward_signals.items():\n tmp_rewards_dict[name] = signal.evaluate(curr_to_use, next_info)\n\n for agent_id in next_info.agents:\n stored_info = self.training_buffer[agent_id].last_brain_info\n stored_take_action_outputs = self.training_buffer[\n agent_id\n ].last_take_action_outputs\n if stored_info is not None:\n idx = stored_info.agents.index(agent_id)\n next_idx = next_info.agents.index(agent_id)\n if not stored_info.local_done[idx]:\n for i, _ in enumerate(stored_info.visual_observations):\n self.training_buffer[agent_id][\"visual_obs%d\" % i].append(\n stored_info.visual_observations[i][idx]\n )\n self.training_buffer[agent_id][\"next_visual_obs%d\" % i].append(\n next_info.visual_observations[i][next_idx]\n )\n if self.policy.use_vec_obs:\n self.training_buffer[agent_id][\"vector_obs\"].append(\n stored_info.vector_observations[idx]\n )\n self.training_buffer[agent_id][\"next_vector_in\"].append(\n next_info.vector_observations[next_idx]\n )\n if self.policy.use_recurrent:\n if stored_info.memories.shape[1] == 0:\n stored_info.memories = np.zeros(\n (len(stored_info.agents), self.policy.m_size)\n )\n self.training_buffer[agent_id][\"memory\"].append(\n stored_info.memories[idx]\n )\n actions = stored_take_action_outputs[\"action\"]\n if self.policy.use_continuous_act:\n actions_pre = stored_take_action_outputs[\"pre_action\"]\n self.training_buffer[agent_id][\"actions_pre\"].append(\n actions_pre[idx]\n )\n epsilons = stored_take_action_outputs[\"random_normal_epsilon\"]\n self.training_buffer[agent_id][\"random_normal_epsilon\"].append(\n epsilons[idx]\n )\n else:\n self.training_buffer[agent_id][\"action_mask\"].append(\n stored_info.action_masks[idx], padding_value=1\n )\n a_dist = stored_take_action_outputs[\"log_probs\"]\n # value is a dictionary from name of reward to value estimate of the value head\n value = stored_take_action_outputs[\"value\"]\n self.training_buffer[agent_id][\"actions\"].append(actions[idx])\n self.training_buffer[agent_id][\"prev_action\"].append(\n stored_info.previous_vector_actions[idx]\n )\n self.training_buffer[agent_id][\"masks\"].append(1.0)\n self.training_buffer[agent_id][\"done\"].append(\n next_info.local_done[next_idx]\n )\n\n for name, reward_result in tmp_rewards_dict.items():\n # 0 because we use the scaled reward to train the agent\n self.training_buffer[agent_id][\n \"{}_rewards\".format(name)\n ].append(reward_result.scaled_reward[next_idx])\n self.training_buffer[agent_id][\n \"{}_value_estimates\".format(name)\n ].append(value[name][idx][0])\n\n self.training_buffer[agent_id][\"action_probs\"].append(a_dist[idx])\n\n for name, rewards in self.collected_rewards.items():\n if agent_id not in rewards:\n rewards[agent_id] = 0\n if name == \"environment\":\n # Report the reward from the environment\n rewards[agent_id] += np.array(next_info.rewards)[next_idx]\n else:\n # Report the reward signals\n rewards[agent_id] += tmp_rewards_dict[name].scaled_reward[\n next_idx\n ]\n\n if not next_info.local_done[next_idx]:\n if agent_id not in self.episode_steps:\n self.episode_steps[agent_id] = 0\n self.episode_steps[agent_id] += 1\n self.trainer_metrics.end_experience_collection_timer()", "def main():\r\n\r\n if os.path.exists(FILEPATH):\r\n filetools.deldir(FILEPATH)\r\n\r\n buf = rb.FileWritableReplayBuffer(os.path.join(FILEPATH, '1'), exist_ok=False)\r\n\r\n sbuf = []\r\n\r\n for _ in range(5):\r\n exp = make_exp()\r\n buf.add(exp)\r\n sbuf.append(exp)\r\n\r\n buf2 = rb.FileWritableReplayBuffer(os.path.join(FILEPATH, '2'), exist_ok=False)\r\n\r\n for _ in range(5):\r\n exp = make_exp()\r\n buf2.add(exp)\r\n sbuf.append(exp)\r\n\r\n buf.close()\r\n buf2.close()\r\n\r\n rb.merge_buffers([os.path.join(FILEPATH, '2'), os.path.join(FILEPATH, '1')],\r\n os.path.join(FILEPATH, '3'))\r\n\r\n buf.close()\r\n buf = rb.FileReadableReplayBuffer(os.path.join(FILEPATH, '3'))\r\n\r\n for _ in range(3):\r\n missing = [exp for exp in sbuf]\r\n for _ in range(10):\r\n got = buf.sample(1)[0]\r\n for i in range(len(missing)): #pylint: disable=consider-using-enumerate\r\n if got == missing[i]:\r\n missing.pop(i)\r\n break\r\n else:\r\n raise ValueError(f'got bad value: {got} expected one of \\n'\r\n + '\\n'.join(repr(exp) for exp in missing))\r\n\r\n buf.mark()\r\n got = buf.sample(1)[0]\r\n buf.reset()\r\n got2 = buf.sample(1)[0]\r\n if got != got2:\r\n raise ValueError(f'mark did not retrieve same experience: {got} vs {got2}')\r\n\r\n buf.close()\r\n\r\n buf = rb.MemoryPrioritizedReplayBuffer(os.path.join(FILEPATH, '3'))\r\n\r\n saw = []\r\n buf.mark()\r\n for _ in range(15):\r\n got = buf.sample(1)[0]\r\n saw.append(got)\r\n buf.reset()\r\n for _ in range(15):\r\n got = buf.sample(1)[0]\r\n if got != saw[0]:\r\n raise ValueError(f'got bad value: {got}, expected {saw[-1]}')\r\n saw.pop(0)\r\n\r\n for _ in range(15):\r\n got = buf.pop()[2]\r\n found = False\r\n for exp in sbuf:\r\n if got == exp:\r\n found = True\r\n got.last_td_error = random.random()\r\n exp.last_td_error = got.last_td_error\r\n buf.add(got)\r\n break\r\n if not found:\r\n raise ValueError(f'got {got}, expected one of '\r\n + '\\n'.join(repr(exp) for exp in sbuf))\r\n\r\n buf.close()", "def __init__(self, action_size, buffer_size, batch_size, seed, device = None):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = recordclass(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\", \"priority\"])\n self.seed = random.seed(seed)\n self.max_priority = 1.\n self.device = MinorityResampledReplayBuffer.device if device is None else device", "def save_experience_data(experience, my_experience, user, user_created):\n for exp in experience:\n action = Action()\n action.date_performed = exp['date']\n action.experience = my_experience\n if exp.get('type', '') == 'scan' and exp.get('id'):\n try:\n item = Item.objects.get(id=exp['id'])\n except Item.DoesNotExist:\n my_experience.delete()\n if user_created:\n user.delete()\n return {\n \"status\": \"error\",\n \"error\": \"Item does not exist\"\n }, 404\n scan = Scan.objects.create(content=item)\n action.scan = scan\n if not exp.get('photo', '') == '':\n name, content = save_image(exp['photo'])\n photo = Photo()\n photo.content.save(name, content)\n action.photo = photo\n if not exp.get('text', '') == '':\n comment = Comment.objects.create(content=exp['text'])\n action.comment = comment\n action.save()\n return {\n \"status\": \"saved\",\n \"error\": \"\"\n }, 200", "def store_memory(memory, history, action, reward, next_history, dead):\n memory.append((history, action, reward, next_history, dead))", "def append(self, observation, action, reward, next_observation, done):\n self.buffer.append([observation, action, reward, next_observation, done])", "def update(experience_buffer, returns):\n rewards = np.array(experience_buffer[2])\n discount_rewards = rewards * (FLAGS.GAMMA ** np.arange(len(rewards)))\n current_return = discount_rewards.sum()\n returns.append(current_return)\n returns = returns[-100:] # Get recent 100 returns.\n baseline = sum(returns) / len(returns) # Baseline is the average of 100 returns.\n sess.run(train_op, {observation_: experience_buffer[0],\n action_: experience_buffer[1],\n advantage_: current_return - baseline}) \n return returns", "def reward_buffer(self):\n return self._reward_buffer" ]
[ "0.7511635", "0.6776789", "0.67517585", "0.6497582", "0.6471509", "0.6421251", "0.6248629", "0.6102861", "0.5914454", "0.58810997", "0.58605915", "0.5817004", "0.5794464", "0.57156426", "0.5708091", "0.5649744", "0.5612071", "0.5603931", "0.55979264", "0.55325574", "0.5514577", "0.5488867", "0.5481748", "0.54210675", "0.54201883", "0.5394401", "0.5370759", "0.53680605", "0.536278", "0.53622204", "0.53593886", "0.53521425", "0.5344278", "0.5341629", "0.53371", "0.5309304", "0.5303401", "0.52969146", "0.5285514", "0.5281178", "0.5269273", "0.5258839", "0.52548987", "0.5254146", "0.5247279", "0.5234185", "0.5204123", "0.5177278", "0.5173017", "0.5172391", "0.5136883", "0.5130774", "0.51240677", "0.5106707", "0.5102386", "0.5102027", "0.51001394", "0.50955814", "0.5084047", "0.508133", "0.50781107", "0.5075633", "0.5066097", "0.506521", "0.50536203", "0.50534564", "0.5052085", "0.50351334", "0.50347006", "0.50173664", "0.50053316", "0.50053316", "0.50053316", "0.50053316", "0.50053316", "0.50053316", "0.5003046", "0.49893025", "0.49883062", "0.4986588", "0.49832413", "0.4982227", "0.4979567", "0.4967696", "0.49572968", "0.49568167", "0.49560758", "0.49538916", "0.49440095", "0.49409854", "0.49387154", "0.4931175", "0.49298432", "0.49263954", "0.49240562", "0.490705", "0.49063134", "0.4904415", "0.48998702", "0.4892013" ]
0.62331957
7
Perform a learning step on all agents in the network.
def learn(self): self.t_step += 1 # Sample from replay buffer, which already has nstep rollout calculated. batch = self.memory.sample(self.batch_size) obs, next_obs, actions, rewards, dones = batch # Gather and concatenate actions because critic networks need ALL # actions as input, the stored actions were concatenated before storing # in the buffer target_actions = [agent.actor_target(next_obs[i]) for i, agent in enumerate(self.agents)] predicted_actions = [agent.actor(obs[i]) for i, agent in enumerate(self.agents)] target_actions = torch.cat(target_actions, dim=-1) predicted_actions = torch.cat(predicted_actions, dim=-1) # Change state data from [agent_count, batch_size] # to [batchsize, state_size * agent_count] # because critic networks need to ALL observations as input obs = obs.transpose(1,0).contiguous().view(self.batch_size, -1) next_obs = next_obs.transpose(1,0).contiguous().view(self.batch_size,-1) # Perform a learning step for each agent using concatenated data as well # as unique-perspective data where algorithmically called for for i, agent in enumerate(self.agents): agent.learn(obs, next_obs, actions, target_actions, predicted_actions, rewards[i], dones[i]) self.update_networks(agent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learn(self):\n for a in self.agents:\n a.learn()", "def learn(self, num_episodes=10000):\n for i in range(num_episodes):\n self.actor()\n self.learner()", "def learn(self, experiences, gamma):\n \n next_actions = []\n actions = []\n for i, agent in enumerate(self.agents):\n \n # collect current agent's ID, current and next states\n states, _, _, next_states, _ = experiences[i]\n agent_id = torch.tensor([i]).to(device)\n \n # extract the state of an agent and get action provided by actor \n state = states.reshape(-1, self.n_agents, self.state_size).index_select(1, agent_id).squeeze(1)\n action = agent.actor_local(state)\n actions.append(action)\n \n # extract the next state of an agent and get action provided by target actor\n next_state = next_states.reshape(-1, self.n_agents, self.state_size).index_select(1, agent_id).squeeze(1)\n next_action = agent.actor_target(next_state)\n next_actions.append(next_action)\n \n # perform learning for each agent, from its own sampled experience\n for i, agent in enumerate(self.agents):\n agent.learn(i, experiences[i], gamma, next_actions, actions)", "def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)", "def step(self, num_agent):\n # Save experience / reward\n # memory.add(state, action, reward, next_state, done)\n\n self.n_steps = (self.n_steps + 1) % UPDATE_EVERY ###\n # Learn, if enough samples are available in memory\n if len(memory) > BATCH_SIZE and self.n_steps == 0: ###\n experiences = memory.sample()\n self.learn(experiences, GAMMA, num_agent)\n \n self.n_steps += 1", "def step(self, states, actions, rewards, next_states, dones):\n for a in range(self.agents_count):\n # save for each agent\n self.memory.add(states[a], actions[a], rewards[a], next_states[a], dones[a])\n\n # Learn, if enough samples are available in memory\n if len(self.memory) > self.batch_size:\n experiences = self.memory.sample()\n self.learn(experiences)", "def step(self, states, actions, rewards, next_states, dones, timestep, num_agents):\n \n for i in range(num_agents):\n self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i])\n\n # Learn, if enough samples are available in memory\n beta = beta_by_frame(timestep)\n\n if len(self.memory) > BATCH_SIZE and timestep % LEARN_EVERY == 0:\n for _ in range(LEARN_NUM):\n experiences = self.memory.sample(beta)\n self.learn(experiences, GAMMA)", "def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)", "def learner(idx, agent) :\n\n\t# Each worker nodes start at different time.\n\ttime.sleep(2*np.random.rand())\n\n\t# Status of current game\n\ttotal_score = total_step = 0\n\n\t# Send processor name\n\tcomm.send(my_name, dest=MASTER, tag=2)\n\n\t# Receive initail weights from master.\n\tglobal_weights = comm.recv(source=MASTER, tag=0)\n\tagent.set_weights(global_weights)\n\n\t# Request lastest weights from master every $t_sync training iterations.\n\t# t : current training iteration.\n\tt, t_sync = 0, 1\n\n\twhile True :\n\t\tt = (t+1)%t_sync\n\t\tsync = (t == 0) # request lastest weights ?\n\n\t\t# Train the model for some game steps.\n\t\tn_step = np.random.randint(128, 256)\n\t\t(score, n_step, done), loss, raw_grads = agent.train(n_step)\n\t\t\n\t\t# Update game status.\n\t\ttotal_score+= score\n\t\ttotal_step+= n_step\n\n\t\t# Clipped gradients.\n\t\tgrads = [np.clip(x, -100, 100) for x in raw_grads]\n\t\tgrads = raw_grads\n\n\t\t# Game status.\n\t\tstats = {\"done\":done, \"sync\":sync}\n\t\tif done : # Game is finished.\n\t\t\t# Number of 4-frame steps. How long does he survive ?\n\t\t\ttotal_step = (total_step + early_skipping)*nb_frames/4.\n\t\t\tstats.update({\"score\":total_score, \"steps\": total_step, \"loss\":loss})\n\n\t\t\t# Make a new game. Reset game status.\n\t\t\ttotal_score = total_step = 0\n\n\t\t# Send game status and gradients to master.\n\t\tcomm.send(stats, dest=MASTER, tag=1)\n\t\tsendFLOAT(grads, dest=MASTER, tag=1)\n\n\t\t# Receive lastest weights from master.\n\t\tif sync :\n\t\t\t# global_weights = comm.recv(source=MASTER, tag=0)\n\t\t\trecvFLOAT(global_weights, src=MASTER, tag=0)\n\t\t\tagent.set_weights(global_weights)", "def learn(self, agent_id, experiences, gamma, all_next_actions, all_actions):\n\n states, actions, rewards, next_states, dones = experiences\n\n # ---------------------------- update critic ---------------------------- #\n self.critic_optimizer.zero_grad()\n agent_id = torch.tensor([agent_id]).to(device)\n actions_next = torch.cat(all_next_actions, dim=1).to(device)\n with torch.no_grad():\n q_targets_next = self.critic_target(next_states, actions_next)\n q_expected = self.critic_local(states, actions)\n # q_targets = reward of this timestep + discount * Q(st+1,at+1) from target network\n q_targets = rewards.index_select(1, agent_id) + (gamma * q_targets_next * (1 - dones.index_select(1, agent_id)))\n critic_loss = F.mse_loss(q_expected, q_targets.detach())\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # ---------------------------- update actor ---------------------------- #\n self.actor_optimizer.zero_grad()\n actions_pred = [actions if i == self.id else actions.detach() for i, actions in enumerate(all_actions)]\n actions_pred = torch.cat(actions_pred, dim=1).to(device)\n actor_loss = -self.critic_local(states, actions_pred).mean()\n actor_loss.backward()\n self.actor_optimizer.step()\n # ----------------------- update target networks ----------------------- #\n self.soft_update(self.critic_local, self.critic_target, self.tau)\n self.soft_update(self.actor_local, self.actor_target, self.tau)", "def step(self, states, actions, rewards, next_states, dones):\n \n states = states.reshape(1, -1)\n next_states = next_states.reshape(1, -1)\n self.memory.add(states, actions, rewards, next_states, dones)\n\n # for each agent, sample experiences from the shared buffer and learn\n if len(self.memory) > self.batch_size:\n experiences = [self.memory.sample() for _ in range(self.n_agents)]\n self.learn(experiences, self.gamma)", "def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()", "def learn(self, *args):\n\n # unpack args\n steps, episode_step, _, state_, reward, _, done, _ = args \n\n # we check if we should do a learning step,\n # otherwise, just store the reward and go on\n\n if done or episode_step == self.episode_length - 1:\n \n # get the last value\n if done: # terminal state\n last_value = 0 \n else: # predict the last value from the critic\n last_value, _ = self.actor_critic.predict(state_)\n\n # calculate the discounted reward\n self.rewards.append(reward)\n discounted_rewards = np.zeros(len(self.values))\n for i in reversed(range(len(self.rewards))):\n last_value = self.rewards[i] + self.discount_factor * last_value\n discounted_rewards[i] = last_value\n \n # update weights by calculating the loss and performing backward\n self.actor_critic.calc_loss(discounted_r=discounted_rewards,\n values=self.values,\n log_probs=self.log_probs,\n entropy=self.entropy,\n entropy_factor=self.entropy_factor)\n # clean up\n self.values.clear()\n self.rewards.clear()\n self.log_probs.clear()\n self.entropy.clear()\n else:\n self.rewards.append(reward)", "def train_agent(self):\n # Retrieve collected experiences from memory\n experiences = np.array(self.replay.get_all())\n # rewards = np.array([h['reward'] for h in experiences])\n #rewards = experiences[:,2]\n rewards = np.array([r[2] for r in experiences])\n\n # Discount and normalize rewards\n norm_rewards = self.discount_rewards_and_normalize(rewards)\n\n # Shuffle for better learning\n shuffled_experiences = np.random.shuffle(experiences)\n\n # Feed the experiences through the network with rewards to compute and\n # minimize the loss.\n\n feed={\n self.X: [r[0] for r in experiences],\n self.rewards:norm_rewards,\n self.ep_actions:experiences[:,1]\n }\n self.tf_sess.run(self.train,feed_dict=feed)\n\n pass", "def train(self):\n\n agent_step = self._num_actions_taken\n\n if agent_step >= self._train_after:\n if (agent_step % self._train_interval) == 0:\n pre_states, actions, post_states, rewards, terminals = self._memory.minibatch(self._minibatch_size)\n\n self._trainer.train_minibatch(\n self._trainer.loss_function.argument_map(\n pre_states=pre_states,\n actions=Value.one_hot(actions.reshape(-1, 1).tolist(), self.nb_actions),\n post_states=post_states,\n rewards=rewards,\n terminals=terminals\n )\n )\n\n # Update the Target Network if needed\n if (agent_step % self._target_update_interval) == 0:\n self._target_net = self._action_value_net.clone(CloneMethod.freeze)\n filename = \"models\\model%d\" % agent_step\n self._trainer.save_checkpoint(filename)", "def update_all_agent(self):\n for a in self.agents:\n soft_update(a.target_actor, a.actor, self.tau)\n soft_update(a.target_critic, a.critic, self.tau)\n self.num_iteration += 1", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def learner(self):\n for N in range(self.N_learn):\n trajectories = self.sample_trajectories()\n\n # TODO: Both these methods take the full trajectories at the moment, a speedup could be achieved here\n self.qmodel.train(trajectories)\n self.amodel.train(trajectories)", "def learn(self):\n if self.step_count < self.learn_start_step or self.step_count % self.learn_interval != 0:\n return\n\n s, a, r, s_, t = self.sample()\n self.update_critics(s, a, r, t, s_)\n self.update_actor_alpha(s)\n self.update_target()\n self.learn_cur += 1", "def train(args):\n # prepare environment\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n print('There are {} agents. Each observes a state with length: {}'.format(\n states.shape[0], state_size))\n print('The state for the first agent looks like:', states[0])\n\n # Crate instance of MADDPG Class, mainly possible to control the model dimensions, learnrates and batch sizes\n agent = MADDPG(state_size,\n action_size,\n lr_actor=args.lr_actor,\n lr_critic=args.lr_critic,\n lr_decay=args.lr_decay,\n replay_buff_size=args.replay_buff_size,\n gamma=args.gamma,\n batch_size=args.batch_size,\n random_seed=args.random_seed,\n soft_update_tau=args.soft_update_tau,\n actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3\n\n )\n\n total_rewards = []\n avg_scores = []\n max_avg_score = -1\n max_score = -1\n threshold_init = 20\n noise_t = args.epsilon\n noise_decay = args.epsilon_decay\n latest_avg_score = -1\n # for early-stopping training if consistently worsen for # episodes\n worsen_tolerance = threshold_init\n for i_episode in range(1, 1+args.num_episodes):\n\n env_inst = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_inst.vector_observations # get the current state\n # initialize score array\n scores = np.zeros(num_agents)\n dones = [False]*num_agents\n while not np.any(dones):\n # select an action\n actions = agent.act(states, noise_t)\n # send the action to the environment\n env_inst = env.step(actions)[brain_name]\n next_states = env_inst.vector_observations # get the next state\n rewards = env_inst.rewards # get the reward\n dones = env_inst.local_done # see if episode has finished\n agent.update(states, actions, rewards, next_states, dones)\n\n noise_t *= noise_decay\n scores += rewards # update scores\n states = next_states\n\n episode_score = np.max(scores)\n total_rewards.append(episode_score)\n print(\"\\rEpisodic {} Score: {:.4f}\\t Avg Score: {:.4f}\".format(\n i_episode, episode_score, latest_avg_score), end=' ')\n\n if max_score <= episode_score:\n max_score = episode_score\n # save best model so far\n agent.save(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n # record avg score for the latest 100 steps\n if len(total_rewards) >= args.test_n_run:\n latest_avg_score = sum(\n total_rewards[(len(total_rewards)-args.test_n_run):]) / args.test_n_run\n avg_scores.append(latest_avg_score)\n\n if max_avg_score <= latest_avg_score: # record better results\n worsen_tolerance = threshold_init # re-count tolerance\n max_avg_score = latest_avg_score\n else:\n if max_avg_score > 0.5:\n worsen_tolerance -= 1 # count worsening counts\n print(\"Loaded from last best model.\")\n # continue from last best-model\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n if worsen_tolerance <= 0: # earliy stop training\n print(\"Early Stop Training.\")\n break\n del agent\n return total_rewards", "def update_model(self, states, actions):\n self.learner.add_to_data(states, actions)\n self.learner.train_learner()\n self.iterations += 1", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def main():\n agent = Agent()\n env = init_env()\n for i in range(1000):\n agent.start()\n state, reward = env.reset()\n while not env.terminal:\n action = agent.step(state, reward)\n state, reward = env.update(action)\n agent.end(reward)", "def step(self, i_episode, states, actions, rewards, next_states, dones):\n #for stepping maddpg\n # index 0 is for agent 0 and index 1 is for agent 1\n full_states = np.reshape(states, newshape=(-1))\n full_next_states = np.reshape(next_states, newshape=(-1))\n \n # Save experience / reward\n self.memory.add(full_states, states, actions, rewards, full_next_states, next_states, dones)\n \n # Learn, if enough samples are available in memory\n if len(self.memory) > BATCH_SIZE and i_episode > self.episodes_before_training:\n for _ in range(NUM_LEARN_STEPS_PER_ENV_STEP): #learn multiple times at every step\n for agent_no in range(self.num_agents):\n samples = self.memory.sample()\n self.learn(samples, agent_no, GAMMA)\n self.soft_update_all()", "def train_agent(iterations, modeldir, logdir, policydir):\n\n # TODO: add code to instantiate the training and evaluation environments\n\n\n # TODO: add code to create a reinforcement learning agent that is going to be trained\n\n\n tf_agent.initialize()\n\n eval_policy = tf_agent.policy\n collect_policy = tf_agent.collect_policy\n\n tf_policy_saver = policy_saver.PolicySaver(collect_policy)\n\n # Use reverb as replay buffer\n replay_buffer_signature = tensor_spec.from_spec(tf_agent.collect_data_spec)\n replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)\n table = reverb.Table(\n REPLAY_BUFFER_TABLE_NAME,\n max_size=REPLAY_BUFFER_CAPACITY,\n sampler=reverb.selectors.Uniform(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1),\n signature=replay_buffer_signature,\n ) # specify signature here for validation at insertion time\n\n reverb_server = reverb.Server([table])\n\n replay_buffer = reverb_replay_buffer.ReverbReplayBuffer(\n tf_agent.collect_data_spec,\n sequence_length=None,\n table_name=REPLAY_BUFFER_TABLE_NAME,\n local_server=reverb_server,\n )\n\n replay_buffer_observer = reverb_utils.ReverbAddEpisodeObserver(\n replay_buffer.py_client, REPLAY_BUFFER_TABLE_NAME, REPLAY_BUFFER_CAPACITY\n )\n\n # Optimize by wrapping some of the code in a graph using TF function.\n tf_agent.train = common.function(tf_agent.train)\n\n # Evaluate the agent's policy once before training.\n avg_return = compute_avg_return_and_steps(\n eval_env, tf_agent.policy, NUM_EVAL_EPISODES\n )\n\n summary_writer = tf.summary.create_file_writer(logdir)\n\n for i in range(iterations):\n # TODO: add code to collect game episodes and train the agent\n\n\n logger = tf.get_logger()\n if i % EVAL_INTERVAL == 0:\n avg_return, avg_episode_length = compute_avg_return_and_steps(\n eval_env, eval_policy, NUM_EVAL_EPISODES\n )\n with summary_writer.as_default():\n tf.summary.scalar(\"Average return\", avg_return, step=i)\n tf.summary.scalar(\"Average episode length\", avg_episode_length, step=i)\n summary_writer.flush()\n logger.info(\n \"iteration = {0}: Average Return = {1}, Average Episode Length = {2}\".format(\n i, avg_return, avg_episode_length\n )\n )\n\n summary_writer.close()\n\n tf_policy_saver.save(policydir)", "def learn(self):\n event_batch = self.memory.sample(self.batch_size)\n \n if event_batch is None:\n return\n\n event_batch = self.memory.deserialize(event_batch)\n self.update_critic(event_batch)\n self.update_actor(event_batch)\n self.update_target(self.local_actor, self.target_actor)\n self.update_target(self.local_critic, self.target_critic)", "def onTrainLoopTaken(self, agent):\n pass", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def learn(self, trajectories: MutableSequence[Trajectory]) -> None:\n for trajectory in trajectories:\n batch = batch_transitions(trajectory)\n policies = self.act(batch.states)\n neglog = -policies.log_prob(batch.actions)\n\n advantage = self.critic.advantage(trajectory)\n\n loss = (neglog * advantage).sum()\n\n self.settings.optimizer.zero_grad()\n loss.backward()\n self.settings.optimizer.step()", "def train(\n env: DiscreteEnvironment[TState, TAction],\n agent: DiscreteAgent[TState, TAction],\n n_episodes: int,\n on_action: Callable[[TState, TAction, float, int], None] = None,\n on_episode_end: Callable[[int], None] = None,\n) -> None:\n for ep in range(n_episodes):\n t = 0\n while not env.terminated:\n s, a, r = agent.act_and_train(t) # returns (S_t, A_t, R_t)\n if on_action:\n on_action(s, a, r, t)\n t += 1\n agent.episode_end()\n if on_episode_end:\n on_episode_end(t)\n env.reset()", "def learn(self,n):\n for i in range(n):\n self.class_counts,self.feature_counts = self.em_step(self.class_counts,\n self.feature_counts)", "def training(self, dataset, repeat=1, gamma=1.0, learning_rate=0.1, model='3yo'):\n for _ in range(repeat):\n for episode in dataset:\n # 1- Get the data stored inside the dataset\n image_index = episode[0] # image of the object\n label_index = episode[1] # label given by the informant\n informant_index = episode[2] # a integer representing the informant\n informant_action = episode[3] # 0=reject, 1=accept\n\n # 2- The agent take an action (with softmax) considering is current state-action table\n # [0=cup, 1=book, 2=ball]\n col = (image_index * self.tot_images) + label_index\n action_array = self.actor_matrix[:, col]\n action_distribution = self._softmax(action_array)\n child_action = np.random.choice(self.tot_actions,\n 1,\n p=action_distribution) # select the action through softmax\n\n # 3- (External) New state and reward obtained from the environment\n # u_t = self.critic_vector[0, col] # previous state\n # New state is estimated, in this simple case nothing happen\n # because the next state is terminal\n # u_t1 = u_t # Only in this example they are the same\n\n # 4- (Intrinsic) The informant_reputation is updated:\n # agent_action, agent_confidence, informant_action, reward\n # informant_vector: 0=unreliable, 1=reliable\n # do_actions_agree: False, True\n # Estimating child_confidence\n distance = np.absolute(action_distribution[0] - action_distribution[1])\n child_confidence_distribution = [1 - distance, distance] # non-knowledgeable, knowledgeable\n child_confidence = np.random.choice(2, 1, p=child_confidence_distribution)\n # Check if child and informant agree\n if (child_action == informant_action):\n do_actions_agree = True\n else:\n do_actions_agree = False\n # Increment the counter in the informant_vector.\n # Here we update the counter distribtuion only if\n # the child is confident, because it is only in that\n # case that the child can say if the informant is\n # reliable or not.\n if (do_actions_agree == False and child_confidence == 1):\n self.informant_vector[informant_index][0] += 1 # unreliable\n elif (do_actions_agree == True and child_confidence == 1):\n self.informant_vector[informant_index][1] += 1 # reliable\n elif (do_actions_agree == False and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n elif (do_actions_agree == True and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n else:\n raise ValueError(\"ERROR: anomaly in the IF condition for informant_vector update\")\n # Using the informant_vector given as input it estimates the reputation of the informant\n informant_reputation_distribution = np.true_divide(self.informant_vector[informant_index],\n np.sum(self.informant_vector[informant_index]))\n informant_reputation = np.random.choice(2, 1, p=informant_reputation_distribution)\n\n # 5- (Intrinsic) The Cost is estimated:\n # current_state, agent_action, agent_confidence, informant_action, informant_reputation\n # child_confidence: 0=non-knowledgeable, 1=knowledgeable\n # informant_reputation: 0=non-knowledgeable, 1=knowledgeable\n # action: 0=reject, 1=accept\n # informant_action: 0=reject, 1=accept\n cost = self._return_cost(child_confidence,\n informant_reputation,\n child_action,\n informant_action,\n value=model)\n\n # 6- The utility table is updated using: previous_state, current_state, cost, reward\n # Updating the critic using Temporal Differencing Learning\n # In this simple case there is not a u_t1 state.\n # The current state is considered terminal.\n # We can delete the term (gamma*u_t1)-u_t and considering\n # only (reward-cost) as utility of the state (see Russel Norvig).\n reward = 0 # only for intrinsic learning reward=0\n delta = (reward - cost) # + (gamma*u_t1) - u_t\n self.critic_vector[0, col] += learning_rate * delta\n\n # 7- The actor table is updated using the delta from the critic\n # Update the ACTOR using the delta\n self.actor_matrix[child_action, col] += learning_rate * delta # the current action\n self.actor_matrix[1 - child_action, col] -= learning_rate * delta # the opposite action", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n return [a.state_action_table, a.reward_hist]", "def _run_agent(self):\n while not self.state.terminate:\n self._transition_q_learning()\n\n return self.state_list", "def learning_Utility(self):\n # Shape the input that we give to the neural network with the value of sensors, the previous actions the life of the agent \n # Get the results from the sensors according the different movement executed by the agent \n sensors_result_N = self.agent.sensors(self, direction=3) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(3)+[self.agent.get_previous_collision()]\n sensors_result_O = self.agent.sensors(self, direction=2) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(2) + [self.agent.get_previous_collision()]\n sensors_result_S = self.agent.sensors(self, direction=1) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(1) + [self.agent.get_previous_collision()]\n sensors_result_E = self.agent.sensors(self, direction=0) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(0) + [self.agent.get_previous_collision()]\n\n input_nn_N = np.asarray(sensors_result_N).astype(int) # input when the Nord action is performed \n input_nn_O = np.asarray(sensors_result_O).astype(int) # input when the West action is performed\n input_nn_S = np.asarray(sensors_result_S).astype(int) # input when the South action is performed\n input_nn_E = np.asarray(sensors_result_E).astype(int) # input when the West action is performed\n\n self.input_list = [input_nn_E.reshape(1,145),\n input_nn_S.reshape(1,145),\n input_nn_O.reshape(1,145),\n input_nn_N.reshape(1,145)]\n self.U_list = [self.nn.predict(i) for i in self.input_list ] #The utility according the different acts performed \n return self.actionSelector() #Select the action acording a propbabilitics distribution given in the paper", "def learn(self):\n pass", "def learn(self):\n pass", "def main(model_path, n_rounds):\n\n env = gym.make('LunarLander-v2')\n\n agent = Agent()\n\n state_dict = torch.load(model_path)\n agent.network.load_state_dict(state_dict)\n agent.network.eval()\n\n for i in range(n_rounds):\n\n state = env.reset()\n total_reward, total_step = 0, 0\n\n while True:\n env.render()\n action = agent.sample(state)\n state, reward, done, _ = env.step(action)\n total_reward += reward\n total_step += 1\n\n if done:\n print(f\"episode {i+1:3d}, \"\n f\"total_reward = {total_reward:6.1f}, \"\n f\"total step: {total_step:4d}\")\n break", "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def run(self):\n self.log.overall('Starting run')\n run_start = time()\n for epoch in xrange(self.n_epochs):\n self.agent.reset()\n self.n_epoch = epoch\n self._run_epoch()\n self.log.overall('End of run ({:.2f} s)'.format(time() - run_start))", "def train(self, iters, n_episodes):\n for i in range(iters):\n self.self_play(n_episodes)\n self.learn()", "def initiate_agent(self, nb_actions):\n\n self.model = Sequential()\n self.model.add(Dense(512, activation='relu', input_shape=env.observation_space)) # pylint: disable=no-member\n self.model.add(Dropout(0.2))\n self.model.add(Dense(512, activation='relu'))\n self.model.add(Dropout(0.2))\n self.model.add(Dense(512, activation='relu'))\n self.model.add(Dropout(0.2))\n self.model.add(Dense(nb_actions, activation='linear'))\n\n # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n # even the metrics!\n memory = SequentialMemory(limit=memory_limit, window_length=window_length) # pylint: disable=unused-variable\n policy = TrumpPolicy() # pylint: disable=unused-variable", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards", "def _Train(self, limit):\n if len(self.Memory)>BATCH_SIZE: \n # Limit of Agents to Train\n for i in range(limit): \n # 'n' number of rounds to train \n for _ in range(50):\n # Get Batch Data\n experiances = self.Memory.sample()\n # Train Models\n self._Learn(self.Actor[i], self.ActorTarget, self.actorOpt[i], experiances)", "def learn(self, purge_memory=True):\n observed_inputs, observed_reward, predicted_outputs, distance_from_reward = self._preprocess_experience()\n # now train. DataFeeder automatically reshuffles data.\n self.dataset_feeder = DataFeeder(\n [observed_inputs, predicted_outputs, observed_reward],\n batch_size=self.batch_size)\n # determine number of iterations:\n self.iterations = int(self.epochs * len(observed_inputs) / self.batch_size)\n for _ in range(self.iterations):\n self._batch()\n # TODO: write a method that computes and prints training stats\n # if _ % 1000:\n # self._train_stats(_)\n if purge_memory:\n self.purge_memory()", "def do_make_(self):\n global g_list_of_classifier\n\n for ite_clf in g_list_of_classifier:\n ite_clf.learn()\n return ''", "def learn(self):\n self._eval_td_online()", "def learn(self):\n self._eval_td_online()", "def learn(self,experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # --------- Update Critic network -------#\n actions_next = self.actor_target(next_states)\n Q_targets_next = self.critic_target(next_states,actions_next)\n Q_targets = rewards + (gamma * Q_targets_next * (1- dones))\n\n #--------- Compute critic loss using MSE--#\n Q_expected = self.critic_local(states,actions)\n critic_loss = F.mse_loss(Q_expected,Q_targets)\n\n #----------Minimize the loss -----------#\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n nn.utils.clip_grad_norm(self.critic_local.parameters(),1)\n self.critic_optimizer.step()\n\n #-------- Update Actor network---------#\n # get mu(s)\n actions_pred = self.actor_local(states)\n #get V(s,a)\n actor_loss = -self.critic_local(states,actions_pred).mean()\n #-------- Minize the loss -----------#\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n #------- Update Target Networks ----#\n self.soft_update(self.critic_local,self.critic_target,TAU)\n self.soft_update(self.actor_local,self.actor_target,TAU)", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def learn(self, experience):\n\n # Convert experiences to separate list\n states = np.vstack([e.state for e in experience if e is not None])\n actions = np.array([e.action for e in experience if e is not None]).astype(np.float32).reshape(-1, self.action_size)\n rewards = np.array([e.reward for e in experience if e is not None]).astype(np.float32).reshape(-1, 1)\n dones = np.array([e.done for e in experience if e is not None]).astype(np.uint8).reshape(-1, 1)\n next_states = np.vstack([e.next_state for e in experience if e is not None])\n\n # Get predicted next-state actions and Q values from target models\n # Q_targets_next = critic_target(next_state, actor_target(next_state))\n actions_next = self.actor_target.model.predict_on_batch(next_states)\n Q_targets_next = self.critic_target.model.predict_on_batch([next_states, actions_next])\n\n # Compute Q targets for current states and train critic model (local)\n Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones)\n self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets)\n\n # Train actor model (local)\n action_gradients = np.reshape(self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size))\n self.actor_local.train_fn([states, action_gradients, 1])\n\n # soft-update target models\n self.soft_update(self.critic_local.model, self.critic_target.model)\n self.soft_update(self.actor_local.model, self.actor_target.model)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def learn(self):\n batch = self.agent.replay_buffer.sample(self.batch_size)\n states = torch.tensor([x.state for x in batch], dtype=torch.float32).to(self.agent.device) # shape == (batch_size, 3, 6, 7)\n actions = [x.action for x in batch]\n rewards = torch.tensor([x.reward for x in batch], dtype=torch.float32).to(self.agent.device)\n next_states = torch.tensor([x.next_state for x in batch], dtype=torch.float32).to(self.agent.device)\n dones = [x.done for x in batch]\n\n self.optimizer.zero_grad()\n\n\n q_vals = self.agent.policy_net(states)[range(len(actions)), actions] # Q vals for actions taken\n q_next_vals = self.agent.target_net(next_states).detach() # we don't care about grad wrt target net\n q_next_vals[dones] = 0.0 # terminal states have no future expected value\n q_targets = rewards + self.gamma * torch.max(q_next_vals, dim=1)[0]\n\n # all_q_vals = self.agent.policy_net(states)\n # print()\n # print('actions')\n # print(actions)\n # print()\n # print('original all q vals')\n # print(self.agent.policy_net(states)) \n # print(self.agent.policy_net(states).shape)\n # print()\n # print('QVALS:', q_vals)\n # print(q_vals.shape)\n # print('\\n\\n')\n # print('QTARGETS:', q_targets)\n # print(q_targets.shape)\n\n # breakpoint()\n\n loss = self.loss_fn(q_targets, q_vals).to(self.agent.device)\n loss.backward()\n \n # for layer in self.agent.policy_net.named_parameters():\n \n # # print(f'layer: {layer[0]}')\n # # print(f'grad:', layer[1].grad)\n\n # # print('loss', loss)\n # # print('q_vals grad:', q_vals.grad)\n # # print('states:', )\n\n self.optimizer.step()\n\n self.agent.learning_iters += 1\n if self.agent.learning_iters % self.target_update_freq == 0:\n self.agent.update_target_net()\n # logger.info('Updated target net')", "def learn(\n flags,\n actor_model,\n model,\n batch,\n initial_agent_state,\n optimizer,\n scheduler,\n lock=threading.Lock(), # noqa: B008\n):\n with lock:\n learner_outputs, unused_state = model(batch, initial_agent_state)\n\n # Take final value function slice for bootstrapping.\n bootstrap_value = learner_outputs[\"baseline\"][-1]\n\n # Move from obs[t] -> action[t] to action[t] -> obs[t].\n batch = {key: tensor[1:] for key, tensor in batch.items()}\n learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}\n\n rewards = batch[\"reward\"]\n if flags.reward_clipping == \"abs_one\":\n clipped_rewards = torch.clamp(rewards, -1, 1)\n elif flags.reward_clipping == \"none\":\n clipped_rewards = rewards\n\n discounts = (~batch[\"done\"]).float() * flags.discounting\n\n vtrace_returns = vtrace.from_logits(\n behavior_policy_logits=batch[\"policy_logits\"],\n target_policy_logits=learner_outputs[\"policy_logits\"],\n actions=batch[\"action\"],\n discounts=discounts,\n rewards=clipped_rewards,\n values=learner_outputs[\"baseline\"],\n bootstrap_value=bootstrap_value,\n )\n\n pg_loss = compute_policy_gradient_loss(\n learner_outputs[\"policy_logits\"],\n batch[\"action\"],\n vtrace_returns.pg_advantages,\n )\n baseline_loss = flags.baseline_cost * compute_baseline_loss(\n vtrace_returns.vs - learner_outputs[\"baseline\"]\n )\n entropy_loss = flags.entropy_cost * compute_entropy_loss(\n learner_outputs[\"policy_logits\"]\n )\n\n total_loss = pg_loss + baseline_loss + entropy_loss\n\n episode_returns = batch[\"episode_return\"][batch[\"done\"]]\n stats = {\n \"episode_returns\": tuple(episode_returns.cpu().numpy()),\n \"mean_episode_return\": torch.mean(episode_returns).item(),\n \"total_loss\": total_loss.item(),\n \"pg_loss\": pg_loss.item(),\n \"baseline_loss\": baseline_loss.item(),\n \"entropy_loss\": entropy_loss.item(),\n }\n\n optimizer.zero_grad()\n total_loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)\n optimizer.step()\n scheduler.step()\n\n actor_model.load_state_dict(model.state_dict())\n return stats", "def train(self):\n\n if(self.net.killAll):\n self._kill()\n\n empty = False\n state = []\n actions = []\n rewards = []\n while(not empty):\n example = self.globalQueue.get()\n \n for prevState, action, reward in zip(example['prevStates'], example['actions'],example['rewards']):\n state.append(np.array(prevState).reshape(-1,84,84,4))\n actions.append(np.eye(self.actionSpace)[np.array(action)].reshape(-1,self.actionSpace).astype(np.float32))\n rewards.append(np.array(reward).reshape(-1))\n empty = self.globalQueue.empty()\n \n if(len(rewards) != 0 ):\n states = np.array(state).reshape(-1, 84,84,4)\n actions = np.array(actions).reshape(-1,self.actionSpace)\n rewards = np.array(rewards).reshape(-1)\n self.net.train(states, rewards, actions)", "def ddpg_learning(env, agent, brain_name, cfg,\n n_episodes=2000, max_t=100000,\n avg_score_cutoff=15,\n model_save_path=None):\n print(\"Training an agent with DDPG.\")\n\n env_info = env.reset(train_mode=True)[brain_name]\n action_size = env.brains[brain_name].vector_action_space_size\n # state_size = env_info.vector_observations.shape[1]\n num_agents = len(env_info.agents)\n\n if not os.path.exists(model_save_path):\n print(\"Creating directory {:s} to save model weights into!\".format(model_save_path))\n os.mkdir(model_save_path)\n\n all_scores = [] # list containing scores from each episode\n\n for i_episode in range(1, n_episodes + 1):\n\n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations\n\n scores = np.zeros(num_agents)\n\n for t in range(max_t):\n\n if cfg.maddpg:\n actions = agent.act(states)\n env_info = env.step(actions)[brain_name]\n else:\n actions = agent.act(states.reshape(-1))\n env_info = env.step(actions.reshape(num_agents, action_size))\n\n next_states = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n\n if cfg.maddpg:\n agent.step(states, actions, rewards, next_states, dones)\n else:\n # single agent with states and actions stacked together\n agent.step(states.reshape(-1), actions.reshape(-1),\n np.max(rewards), next_states.reshape(-1),\n np.any(dones))\n\n states = next_states\n scores += rewards\n if np.any(dones):\n break\n\n all_scores.append(scores) # save most recent score\n\n last100mean = np.mean(np.max(np.atleast_2d(all_scores), axis=1)[-100:])\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.4f}'.format(\n i_episode, last100mean))\n\n if model_save_path is not None:\n agent.save_weights(model_save_path)\n\n if cfg.save_scores:\n pd.DataFrame(scores).to_hdf(cfg.save_scores, \"scores\")\n\n if last100mean >= avg_score_cutoff:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.4f}'.format(\n i_episode, last100mean))\n\n break\n\n # save trained models a final time\n if model_save_path is not None:\n agent.save_weights(model_save_path)\n\n return pd.DataFrame(all_scores)", "def step(self, states, actions, rewards, next_states, dones):\n # Save experience / reward\n for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn, if enough samples are available in memory\n if len(self.memory) > BATCH_SIZE:\n self.learn(GAMMA)", "def learn(self, experiences, gamma):\r\n states, actions, rewards, next_states, dones = experiences\r\n\r\n # UPDATE CRITIC #\r\n actions_next = self.actor_target(next_states.to(device))\r\n Q_targets_next = self.critic_target(next_states.to(device), actions_next.to(device))\r\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\r\n Q_expected = self.critic_local(states, actions)\r\n critic_loss = F.mse_loss(Q_expected, Q_targets)\r\n\r\n self.critic_optimizer.zero_grad()\r\n critic_loss.backward()\r\n clip_grad_norm_(self.critic_local.parameters(), 1) # Clip the gradient when update critic network\r\n self.critic_optimizer.step()\r\n\r\n # UPDATE ACTOR #\r\n actions_pred = self.actor_local(states)\r\n actor_loss = -self.critic_local(states, actions_pred).mean()\r\n\r\n self.actor_optimizer.zero_grad()\r\n actor_loss.backward()\r\n self.actor_optimizer.step()\r\n\r\n # UPDATE TARGET NETWORKS #\r\n self.soft_update(self.critic_local, self.critic_target, RHO)\r\n self.soft_update(self.actor_local, self.actor_target, RHO) \r\n\r\n # UPDATE EPSILON AND NOISE # \r\n self.epsilon *= EPSILON_DECAY\r\n self.noise.reset()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def step(self, action):\n obs = self.gym.get_observations()\n all_actions = self.gym.act(obs)\n all_actions.insert(self.gym.training_agent, action)\n state, reward, terminal, info = self.gym.step(all_actions)\n agent_state = self.featurize(state[self.gym.training_agent])\n\n # agent_state_history = self.make_observation(agent_state, self.step)\n agent_reward = reward[self.gym.training_agent]\n\n # self.step += 1\n return agent_state, agent_reward, terminal, info", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n # print('learn states.shape', states.shape)\n # print('learn next_states.shape', next_states.shape)\n \n q_expected, q_targets = self.get_target_and_expected(states, \n actions, \n rewards, \n next_states, \n dones, \n gamma)\n\n\n # Compute loss\n loss = F.mse_loss(q_expected, q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def on_train_start(self, agent, **kwargs):\n self.train_start = timeit.default_timer()\n self.nb_steps = kwargs['nb_steps']\n print('Training for {} steps ...'.format(self.nb_steps))", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def step(self, itr, agent_state_dict):\n super().step(itr, agent_state_dict)\n self.agent.load_state_dict(agent_state_dict)\n\n self.agent.sample_mode(itr)\n samples, self.traj_infos = self.sampler.obtain_samples(itr)\n self.agent.train_mode(itr)\n self.opt_info = self.algo.optimize_agent(itr, samples)\n\n self.grad = self.algo.pass_gradients()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def train_models(self, states, actions, rewards, done):\n # Compute discounted rewards and Advantage (TD. Error)\n discounted_rewards = self.discount(rewards, done, states[-1])\n state_values = self.critic.predict(np.array(states))\n advantages = discounted_rewards - np.reshape(state_values, len(state_values))\n # Networks optimization\n self.a_opt([states, actions, advantages])\n self.c_opt([states, discounted_rewards])", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.00000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def train(self):\n total_steps = 0\n scores_history = [deque(maxlen=self.run_settings.averaging_window)\n for a in range(len(self.agents))]\n averages_history = [[] for a in range(len(self.agents))]\n\n for e in range(self.run_settings.num_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = rewards\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Train agents\n if total_steps > 0 and total_steps % self.run_settings.train_every == 0:\n for agent in self.agents:\n agent.train(self.run_settings)\n\n # Save agent model\n if total_steps > 0 and total_steps % self.run_settings.save_every == 0:\n for agent in self.agents:\n agent.save()\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores = [scores[a] + rewards[a] for a in range(len(self.agents))]\n # Push to agent Memories\n for a in range(len(self.agents)):\n self.agents[a].push_memory(states[a], actions[a], rewards[a], done)\n\n if done:\n averages = []\n for a in range(len(scores_history)):\n scores_history[a].append(scores[a])\n averages.append(np.mean(scores_history[a]))\n averages_history[a].append(averages[a])\n\n if len(scores) == 1:\n scores = scores[0]\n averages = averages[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}. Averages: {}\"\n .format(e+1, step, scores, averages))\n if (self.run_settings.graph_every > 0 and e > 0\n and e % self.run_settings.graph_every == 0):\n self.plot_results(averages_history)", "def take_one_step(self):\n\t\tfor i in range(len(self.agents)):\n\t\t\tself.agents[i].action(0)", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # ------------------- update critic ------------------- #\n next_actions = self.actor_target(next_states)\n # Get Q targets (for next states) from target model (on CPU)\n Q_targets_next = self.critic_target(next_states, next_actions).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.critic_local(states, actions)\n\n # Compute critic loss\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the critic loss\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) # As mentioned on project page\n self.critic_optimizer.step()\n\n # ------------------- update actor ------------------- #\n actions_expected = self.actor_local(states)\n # Compute actor loss based on expectation from actions_expected\n actor_loss = -self.critic_local(states, actions_expected).mean()\n # Minimize the actor loss\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.critic_local, self.critic_target, TAU) \n self.soft_update(self.actor_local, self.actor_target, TAU)", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def learn(self, experiences):\n rewards = to_tensor(experiences['reward']).float().to(self.device).unsqueeze(1)\n dones = to_tensor(experiences['done']).type(torch.int).to(self.device).unsqueeze(1)\n states = to_tensor(experiences['state']).float().to(self.device)\n actions = to_tensor(experiences['action']).to(self.device)\n next_states = to_tensor(experiences['next_state']).float().to(self.device)\n\n if (self.iteration % self.update_freq) == 0:\n self._update_value_function(states, actions, rewards, next_states, dones)\n\n if (self.iteration % self.update_policy_freq) == 0:\n self._update_policy(states)\n\n soft_update(self.target_actor, self.actor, self.tau)\n soft_update(self.target_critic, self.critic, self.tau)", "def _train(self):\n training_environment = self._training_environment\n evaluation_environment = self._evaluation_environment\n policy = self._policy\n pool = self._pool\n\n if not self._training_started:\n self._init_training()\n\n self._initial_exploration_hook(\n training_environment, self._initial_exploration_policy, pool)\n\n self.sampler.initialize(training_environment, policy, pool)\n\n gt.reset_root()\n gt.rename_root('RLAlgorithm')\n gt.set_def_unique(False)\n\n self._training_before_hook()\n\n for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):\n self._epoch_before_hook()\n gt.stamp('epoch_before_hook')\n\n start_samples = self.sampler._total_samples\n for i in count():\n samples_now = self.sampler._total_samples\n self._timestep = samples_now - start_samples\n\n if (samples_now >= start_samples + self._epoch_length\n and self.ready_to_train):\n break\n\n self._timestep_before_hook()\n gt.stamp('timestep_before_hook')\n\n self._do_sampling(timestep=self._total_timestep)\n gt.stamp('sample')\n\n if self.ready_to_train:\n self._do_training_repeats(timestep=self._total_timestep)\n gt.stamp('train')\n\n self._timestep_after_hook()\n gt.stamp('timestep_after_hook')\n\n training_paths = self.sampler.get_last_n_paths(math.ceil(self._epoch_length / self.sampler._max_path_length))\n gt.stamp('training_paths')\n evaluation_paths = self._evaluation_paths(policy, evaluation_environment)\n gt.stamp('evaluation_paths')\n\n training_metrics = self._evaluate_rollouts(training_paths, training_environment)\n gt.stamp('training_metrics')\n if evaluation_paths:\n evaluation_metrics = self._evaluate_rollouts(\n evaluation_paths, evaluation_environment)\n gt.stamp('evaluation_metrics')\n else:\n evaluation_metrics = {}\n\n self._epoch_after_hook(training_paths)\n gt.stamp('epoch_after_hook')\n\n sampler_diagnostics = self.sampler.get_diagnostics()\n\n diagnostics = self.get_diagnostics(\n iteration=self._total_timestep,\n batch=self._evaluation_batch(),\n training_paths=training_paths,\n evaluation_paths=evaluation_paths)\n\n time_diagnostics = gt.get_times().stamps.itrs\n\n diagnostics.update(OrderedDict((\n *(\n (f'evaluation/{key}', evaluation_metrics[key])\n for key in sorted(evaluation_metrics.keys())\n ),\n *(\n (f'training/{key}', training_metrics[key])\n for key in sorted(training_metrics.keys())\n ),\n *(\n (f'times/{key}', time_diagnostics[key][-1])\n for key in sorted(time_diagnostics.keys())\n ),\n *(\n (f'sampler/{key}', sampler_diagnostics[key])\n for key in sorted(sampler_diagnostics.keys())\n ),\n ('epoch', self._epoch),\n ('timestep', self._timestep),\n ('timesteps_total', self._total_timestep),\n ('train-steps', self._num_train_steps),\n )))\n\n if self._eval_render_kwargs and hasattr(\n evaluation_environment, 'render_rollouts'):\n # TODO(hartikainen): Make this consistent such that there's no\n # need for the hasattr check.\n training_environment.render_rollouts(evaluation_paths)\n\n yield diagnostics\n\n self.sampler.terminate()\n\n self._training_after_hook()\n\n yield {'done': True, **diagnostics}", "def trainNet():", "def learn(self, experiences, gamma):\n\n states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences\n batch_size = all_next_state.shape[0]\n\n all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)\n\n critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(\n device)\n with torch.no_grad():\n Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])\n Q_targets = rewards + (gamma * Q_target_next * (1 - dones))\n\n critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)\n Q_expected = self.critic_local(critic_local_input, action)\n\n # critic loss\n huber_loss = torch.nn.SmoothL1Loss()\n\n loss = huber_loss(Q_expected, Q_targets.detach())\n\n self.optimizer_critic.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)\n self.optimizer_critic.step()\n\n # actor loss\n\n action_pr_self = self.actor_local(states)\n action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()\n\n # critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)\n critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)\n p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()\n\n self.optimizer_actor.zero_grad()\n p_loss.backward()\n\n self.optimizer_actor.step()\n\n # ------------------- update target network ------------------- #\n self.TAU = min(5e-1, self.TAU * 1.001)\n self.soft_update(self.critic_local, self.critic_target, self.TAU)\n self.soft_update(self.actor_local, self.actor_target, self.TAU)", "def train_agent(\n self,\n *,\n env,\n test_env,\n save_name,\n train_every=1,\n max_episodes=1000,\n center_returns=True,\n render=True,\n ):\n\n agent = self.create_agent(env)\n\n for episode in range(1, max_episodes + 1):\n obs = env.reset()\n done = False\n\n episode_return = 0.0\n while not done:\n action = agent.act(obs, deterministic=False)\n next_obs, reward, done, _ = env.step(action)\n episode_return += reward\n agent.store_step(obs, action, reward, next_obs, done)\n obs = next_obs\n\n if render:\n env.render()\n\n if episode % train_every == 0:\n agent.perform_training(\n gamma=self.gamma, center_returns=center_returns\n )\n torch.save(agent, f\"saved_agents/{save_name}\")\n\n print(\"Episode {} -- return={}\".format(episode, episode_return))\n return agent", "def train_agent(\n self,\n *,\n env,\n test_env,\n save_name,\n train_every=32,\n eval_every=1000,\n max_steps=100000,\n start_epsilon=0.9,\n end_epsilon=0.001,\n epsilon_decay_steps=1000,\n render=True,\n ):\n\n agent = self.create_agent(env)\n curr_epsilon = start_epsilon\n epsilon_decay = self.get_decay_value(\n start_epsilon, end_epsilon, epsilon_decay_steps\n )\n\n obs = env.reset()\n action = agent.act(obs, epsilon=curr_epsilon)\n\n for step in range(1, max_steps + 1):\n next_obs, reward, done, _ = env.step(action)\n next_action = agent.act(next_obs, epsilon=curr_epsilon)\n agent.store_step(obs, action, reward, next_obs, next_action, done)\n obs = next_obs\n\n if render:\n env.render()\n\n if self.time_to(train_every, step):\n agent.perform_training(gamma=self.gamma)\n curr_epsilon = max(end_epsilon, curr_epsilon - epsilon_decay)\n\n if self.time_to(eval_every, step):\n self.evaluate_agent(agent, test_env, end_epsilon)\n torch.save(agent, f\"saved_agents/{save_name}\")\n\n if done:\n obs = env.reset()\n action = agent.act(obs, epsilon=curr_epsilon)\n\n print(\"At step {}\".format(step), end=\"\\r\")\n print(\"\\nDone!\")\n\n return agent", "def train(self):\n if len(self.memory) > self.batch_size:\n selecting_time_start = time.time()\n experiences = self.memory.sample()\n self.selecting_time += time.time() - selecting_time_start\n self.learn(experiences)", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score", "def train(self) -> None:\n\n self.envs = construct_envs(self.config, get_env_class(self.config.ENV_NAME))\n\n ppo_cfg = self.config.RL.PPO\n self.device = (\n torch.device(\"cuda\", self.config.TORCH_GPU_ID)\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n if not os.path.isdir(self.config.CHECKPOINT_FOLDER):\n os.makedirs(self.config.CHECKPOINT_FOLDER)\n self._setup_actor_critic_agent(ppo_cfg)\n logger.info(\n \"agent number of parameters: {}\".format(\n sum(param.numel() for param in self.agent.parameters())\n )\n )\n\n rollouts = RolloutStorage(\n ppo_cfg.num_steps,\n self.envs.num_envs,\n self.envs.observation_spaces[0],\n self.envs.action_spaces[0],\n ppo_cfg.hidden_size,\n )\n rollouts.to(self.device)\n\n observations = self.envs.reset()\n batch = batch_obs(observations)\n\n for sensor in rollouts.observations:\n rollouts.observations[sensor][0].copy_(batch[sensor])\n\n # batch and observations may contain shared PyTorch CUDA\n # tensors. We must explicitly clear them here otherwise\n # they will be kept in memory for the entire duration of training!\n batch = None\n observations = None\n\n running_episode_stats = dict(\n count=torch.zeros(self.envs.num_envs, 1),\n reward=torch.zeros(self.envs.num_envs, 1),\n )\n window_episode_stats = defaultdict(\n lambda: deque(maxlen=ppo_cfg.reward_window_size)\n )\n\n t_start = time.time()\n env_time = 0\n pth_time = 0\n forward_time = 0\n agent_update_time = 0\n count_steps = 0\n count_checkpoints = 0\n\n lr_scheduler = LambdaLR(\n optimizer=self.agent.optimizer,\n lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),\n )\n\n with TensorboardWriter(\n self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs\n ) as writer:\n for update in range(self.config.NUM_UPDATES):\n if ppo_cfg.use_linear_lr_decay:\n lr_scheduler.step()\n\n if ppo_cfg.use_linear_clip_decay:\n self.agent.clip_param = ppo_cfg.clip_param * linear_decay(\n update, self.config.NUM_UPDATES\n )\n\n for step in range(ppo_cfg.num_steps):\n (\n delta_pth_time,\n delta_forward_time,\n delta_env_time,\n delta_steps,\n ) = self._collect_rollout_step(\n rollouts, current_episode_reward, running_episode_stats,\n )\n pth_time += delta_pth_time\n forward_time += delta_forward_time\n env_time += delta_env_time\n count_steps += delta_steps\n\n (\n delta_pth_time,\n value_loss,\n action_loss,\n dist_entropy,\n ) = self._update_agent(ppo_cfg, rollouts)\n pth_time += delta_pth_time\n agent_update_time += delta_pth_time\n\n for k, v in running_episode_stats.items():\n window_episode_stats[k].append(v.clone())\n\n deltas = {\n k: (\n (v[-1] - v[0]).sum().item() if len(v) > 1 else v[0].sum().item()\n )\n for k, v in window_episode_stats.items()\n }\n deltas[\"count\"] = max(deltas[\"count\"], 1.0)\n\n writer.add_scalar(\n \"reward\", deltas[\"reward\"] / deltas[\"count\"], count_steps\n )\n\n # Check to see if there are any metrics\n # that haven't been logged yet\n metrics = {\n k: v / deltas[\"count\"]\n for k, v in deltas.items()\n if k not in {\"reward\", \"count\"}\n }\n if len(metrics) > 0:\n writer.add_scalars(\"metrics\", metrics, count_steps)\n\n losses = [value_loss, action_loss]\n\n writer.add_scalars(\n \"losses\",\n {k: l for l, k in zip(losses, [\"value\", \"policy\"])},\n count_steps,\n )\n\n # log stats\n if update > 0 and update % self.config.LOG_INTERVAL == 0:\n logger.info(\n \"update: {}\\tfps: {:.3f}\\t\".format(\n update, count_steps / (time.time() - t_start)\n )\n )\n logger.info(\n \"update: {}\\tenv-time: {:.3f}s\\tpth-time: {:.3f}s\\t\"\n \"forward-time: {:.3f}\\tagnet-update-time: {:.3f}\\t\"\n \"frames: {}\".format(\n update,\n env_time,\n pth_time,\n forward_time,\n agent_update_time,\n count_steps,\n )\n )\n\n logger.info(\n \"Average window size: {} {}\".format(\n len(window_episode_stats[\"count\"]),\n \" \".join(\n \"{}: {:.3f}\".format(k, v / deltas[\"count\"])\n for k, v in deltas.items()\n if k != \"count\"\n ),\n )\n )\n\n # checkpoint model\n if update % self.config.CHECKPOINT_INTERVAL == 0:\n self.save_checkpoint(\n f\"ckpt.{count_checkpoints}.pth\", dict(step=count_steps)\n )\n count_checkpoints += 1\n\n self.envs.close()", "def train(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.train()", "def step(self, state, action, reward, next_state, done, timestamp):\r\n # Save experience\r\n self.memory.add(state, action, reward, next_state, done)\r\n # Learn (if there are enough samples in memory)\r\n if len(self.memory) > BATCH_SIZE and timestamp % LEARN_EVERY == 0:\r\n for _ in range(LEARN_NUMBER):\r\n experiences = self.memory.sample()\r\n self.learn(experiences, GAMMA)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def run(self):\n # Observe the game by randomly sampling actions from the environment\n # and performing those actions\n self.__observe__()\n for i in xrange(self.num_epochs):\n self.environment.resetStatistics()\n time_now = time.time()\n for j in xrange(self.train_steps_per_epoch):\n # Get action using epsilon-greedy strategy\n action = self.__sample_epsilon_action__()\n # Perform action based on epsilon-greedy search and store the transitions\n # in experience replay\n self.__supply_action_to_environment__(action)\n # If the environment is in the terminal state, reset the environment, and\n # perform self.stack_num actions to reset the environment\n self.isGameOver()\n if j % self.train_frequency == 0:\n # print \"Started training\"\n # Sample minibatch of size self.minibatch_size from experience replay\n minibatch = self.experience_replay.sample()\n minibatch_states, minibatch_action, minibatch_reward, minibatch_next_states, \\\n minibatch_terminals = minibatch\n cost = self.network.train_network(minibatch_states,\n minibatch_action,\n minibatch_reward,\n minibatch_terminals,\n minibatch_next_states)\n if j % self.record_frequency == 0:\n total_score, num_games = self.environment.getStatistics()\n avg_score = total_score / num_games\n self.network.record_average_qvalue(\n self.experience_replay.getCurrentState(),\n i * self.train_steps_per_epoch + j,\n self.epsilon, avg_score)\n # Epsilon annealing\n self.__anneal_epsilon__()\n # if self.time_step % 1000 == 0:\n # print \"Cost at iteration\", self.time_step, \" is\", cost\n # print \"Value of epsilon is\", self.epsilon\n self.steps += 1\n if j % self.copy_steps == 0:\n self.network.copy_weights()\n total_score, num_games = self.environment.getStatistics()\n time_taken = (time.time() - time_now)\n logger.info(\"Finished epoch %d: Steps=%d; Time taken=%.2f\",\n i, j, time_taken)\n logger.info(\"\\tNumber of games: %d; Average reward: %.2f\", num_games, (total_score / num_games))\n logger.info(\"\\tFinal epsilon value for epoch: %f\", self.epsilon)\n self.network.create_checkpoint()", "def step(self, prev_states, states, actions, rewards, next_states, dones):\n # Save experience / reward\n for prev_state, state, action, reward, next_state, done in zip(prev_states, states, actions, rewards,\n next_states, dones):\n self.memory.add(prev_state, state, action, reward, next_state, done)\n\n # Learn, if enough samples are available in memory\n self.update_count+=1\n if len(self.memory) > self.p.BATCH_SIZE and self.update_count>self.p.STEPS_BEFORE_LEARN:\n self.update_count=0\n for _ in range(self.p.NUM_LEARN_STEPS):\n experiences = self.memory.sample()\n self.learn(experiences, self.p.GAMMA)", "def learn(self, s, a, reward, sprime, done):\n pass", "def learn(self, s, a, reward, sprime, done):\n pass", "def _run(self):\n if not self.is_train:\n return self.test() \n\n logger.debug(\"Actor {} resuming at Step {}, {}\".format(self.actor_id, \n self.global_step.value(), time.ctime()))\n\n s = self.emulator.get_initial_state()\n \n s_batch = []\n a_batch = []\n y_batch = []\n bonuses = deque(maxlen=100)\n\n exec_update_target = False\n total_episode_reward = 0\n episode_ave_max_q = 0\n episode_over = False\n qmax_down = 0\n qmax_up = 0\n prev_qmax = -10*6\n low_qmax = 0\n ep_t = 0\n \n while (self.global_step.value() < self.max_global_steps):\n # Sync local learning net with shared mem\n self.sync_net_with_shared_memory(self.local_network, self.learning_vars)\n self.save_vars()\n\n rewards = []\n states = []\n actions = []\n local_step_start = self.local_step\n \n while not episode_over:\n logger.debug('steps: {} / {}'.format(self.global_step.value(), self.max_global_steps))\n # Choose next action and execute it\n a, readout_t = self.choose_next_action(s)\n\n new_s, reward, episode_over = self.emulator.next(a)\n total_episode_reward += reward\n\n current_frame = new_s[...,-1]\n bonus = self.density_model.update(current_frame)\n bonuses.append(bonus)\n\n if (self.actor_id == 0) and (self.local_step % 200 == 0):\n bonus_array = np.array(bonuses)\n logger.debug('Mean Bonus={:.4f} / Max Bonus={:.4f}'.format(\n bonus_array.mean(), bonus_array.max()))\n\n # Rescale or clip immediate reward\n # reward = self.rescale_reward(reward + bonus)\n reward = self.rescale_reward(reward)\n ep_t += 1\n \n rewards.append(reward)\n states.append(s)\n actions.append(a)\n \n s = new_s\n self.local_step += 1\n episode_ave_max_q += np.max(readout_t)\n \n global_step, update_target = self.global_step.increment(\n self.q_target_update_steps)\n\n if update_target:\n update_target = False\n exec_update_target = True\n\n if self.local_step % 4 == 0:\n self.batch_update()\n \n self.local_network.global_step = global_step\n\n else:\n mc_returns = list()\n running_total = 0.0\n for r in reversed(rewards):\n running_total = r + self.gamma*running_total\n mc_returns.insert(0, running_total)\n\n mixed_returns = self.cts_eta*np.array(rewards) + (1-self.cts_eta)*np.array(mc_returns)\n\n states.append(new_s)\n episode_length = len(rewards)\n for i in range(episode_length):\n self.replay_memory.append((\n states[i],\n actions[i],\n mixed_returns[i],\n states[i+1],\n i+1 == episode_length))\n\n \n if exec_update_target:\n self.update_target()\n exec_update_target = False\n # Sync local tensorflow target network params with shared target network params\n if self.target_update_flags.updated[self.actor_id] == 1:\n self.sync_net_with_shared_memory(self.target_network, self.target_vars)\n self.target_update_flags.updated[self.actor_id] = 0\n\n s, total_episode_reward, _, ep_t, episode_ave_max_q, episode_over = \\\n self.prepare_state(s, total_episode_reward, self.local_step, ep_t, episode_ave_max_q, episode_over)", "def learn(\n flags,\n actor_model, # single actor model with shared memory? Confirm that?\n model,\n batch,\n initial_agent_state,\n optimizer,\n scheduler,\n lock=threading.Lock(), # noqa: B008\n):\n with lock:\n \n learner_outputs = model.learner_step(batch, initial_agent_state) \n \n # Take final value function slice for bootstrapping.\n bootstrap_value = learner_outputs[\"baseline_trg\"][-1] # V_learner(s_T)\n entropy = learner_outputs['entropy']\n \n #rearranged_batch = {}\n #rearranged_batch['done'] = batch['done'][:-1] # done_{0}, ..., done_{T-1}\n #rearranged_batch['done'] = batch['done'][1:]\n #rearranged_batch['bootstrap'] = batch['bootstrap'][1:]\n #rearranged_batch['reward'] = batch['reward'][1:] # reward_{0}, ..., reward_{T-1}\n #rearranged_batch['log_prob'] = batch['log_prob'][:-1] # log_prob_{0}, ..., log_prob_{T-1}\n \n # gets [log_prob_{0}, ..., log_prob_{T-1}] and [V_{0},...,V_{T-1}]\n learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items() if key != 'entropy'}\n\n rewards = batch['reward'][1:]\n if flags.reward_clipping == \"abs_one\":\n clipped_rewards = torch.clamp(rewards, -1, 1)\n elif flags.reward_clipping == \"none\":\n clipped_rewards = rewards\n\n #discounts = (~rearranged_batch[\"done\"]).float() * flags.discounting # 0 if done, gamma otherwise\n\n vtrace_returns = vtrace.from_logits(\n behavior_action_log_probs=batch['log_prob'][:-1], # actor\n target_action_log_probs=learner_outputs[\"log_prob\"], # learner\n not_done=(~batch['done'][1:]).float(),\n bootstrap=batch['bootstrap'][1:],\n gamma=flags.discounting,\n rewards=clipped_rewards,\n values=learner_outputs[\"baseline\"],\n values_trg=learner_outputs[\"baseline_trg\"],\n bootstrap_value=bootstrap_value, # coming from the learner too\n )\n\n pg_loss = compute_policy_gradient_loss(\n learner_outputs[\"log_prob\"],\n vtrace_returns.pg_advantages,\n )\n \n baseline_loss = flags.baseline_cost * compute_baseline_loss(\n vtrace_returns.vs - learner_outputs[\"baseline\"]\n )\n\n entropy_loss = flags.entropy_cost * entropy\n total_loss = pg_loss + baseline_loss + entropy_loss\n # not every time we get an episode return because the unroll length is shorter than the episode length, \n # so not every time batch['done'] contains some True entries\n episode_returns = batch[\"episode_return\"][batch[\"done\"]] # still to check, might be okay\n stats = {\n \"episode_returns\": tuple(episode_returns.cpu().numpy()),\n \"mean_episode_return\": torch.mean(episode_returns).item(),\n \"total_loss\": total_loss.item(),\n \"pg_loss\": pg_loss.item(),\n \"baseline_loss\": baseline_loss.item(),\n \"entropy_loss\": entropy_loss.item(),\n }\n\n optimizer.zero_grad()\n total_loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)\n optimizer.step()\n if flags.optim == \"RMSprop\":\n scheduler.step()\n actor_model.load_state_dict(model.state_dict())\n return stats", "def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def finish_learning(self):\n pass" ]
[ "0.81697196", "0.7225745", "0.70513433", "0.67381495", "0.6667428", "0.6628232", "0.65659773", "0.6510419", "0.64319664", "0.6419689", "0.6391004", "0.63875294", "0.6357127", "0.63503945", "0.6346165", "0.6324643", "0.62902844", "0.62848055", "0.6270551", "0.6256862", "0.62476563", "0.62450373", "0.62204736", "0.6190963", "0.6169547", "0.61635685", "0.61489296", "0.6119205", "0.61173826", "0.60972905", "0.6096169", "0.60863876", "0.60792977", "0.6072403", "0.6064492", "0.6062814", "0.6056369", "0.6056369", "0.6044813", "0.6025875", "0.60253143", "0.60248655", "0.60137206", "0.6004286", "0.5997148", "0.5993468", "0.59895855", "0.5978242", "0.5967791", "0.5962376", "0.5948296", "0.5948296", "0.5944163", "0.59352356", "0.5919318", "0.59039336", "0.58955306", "0.58929795", "0.58904994", "0.58902955", "0.5882618", "0.5882505", "0.58817226", "0.5880649", "0.5874567", "0.5871902", "0.5869915", "0.5869689", "0.5869368", "0.5848119", "0.5845205", "0.58421093", "0.58418775", "0.5841847", "0.58411324", "0.58407855", "0.58350956", "0.5831381", "0.58265513", "0.582638", "0.5823876", "0.58180267", "0.58176106", "0.581588", "0.58128726", "0.5811501", "0.5808226", "0.5801693", "0.57983685", "0.5797442", "0.57953656", "0.57900316", "0.57888275", "0.5785801", "0.5785801", "0.5777524", "0.5777191", "0.57662153", "0.5753927", "0.57439435" ]
0.68872505
3
Fills up the ReplayBuffer memory with PRETRAIN_LENGTH number of experiences before training begins.
def initialize_memory(self, pretrain_length, env): if self.memlen >= pretrain_length: print("Memory already filled, length: {}".format(len(self.memory))) return interval = max(10, int(pretrain_length/25)) print("Initializing memory buffer.") obs = env.states while self.memlen < pretrain_length: actions = np.random.uniform(-1, 1, (self.agent_count, self.action_size)) next_obs, rewards, dones = env.step(actions) self.store((obs, next_obs, actions, rewards, dones)) obs = next_obs if np.any(dones): env.reset() obs = env.states self.memory.init_n_step() if self.memlen % interval == 1 or self.memlen >= pretrain_length: print("...memory filled: {}/{}".format(self.memlen, pretrain_length)) print("Done!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_buffer(self, num_transitions):\n while len(self.replay_buffer) < self.buffer_sample_size:\n self.play(num_transitions)", "def collect_samples(self):\n self.replay_buffer = self.collect_initial_batch(\n self.replay_buffer, self.acm_pre_train_samples\n )", "def prepare_data(self, train_data, **kwargs):\n data_len = len(train_data[\"done\"])\n for index in range(data_len):\n if self.multi_step == 1:\n self.buff.add(train_data[\"cur_state\"][index],\n train_data[\"action\"][index],\n train_data[\"reward\"][index],\n train_data[\"next_state\"][index],\n float(train_data[\"done\"][index])) # Add replay buffer", "def _preallocate_samples(self):\n self.prealloc_samples_ = []\n for _ in range(self.num_prealloc_samples_):\n self.prealloc_samples_.append(self.sample())", "def __init__(self,buffer_size,state_dim,action_dim,random_seed=123):\n print(\"Creating Replay Buffer object\")\n self.buffer_size=buffer_size\n self.state_dim=state_dim\n self.action_dim=action_dim\n self.pointer=0\n self.states=np.zeros(shape=[buffer_size,state_dim])\n self.actions=np.zeros(shape=[buffer_size,action_dim])\n self.rewards=np.zeros(shape=[buffer_size,1])\n self.dones=np.zeros(shape=[buffer_size,1])\n self.next_states=np.zeros(shape=[buffer_size,state_dim])\n self.filled=False\n \n random.seed(random_seed)", "def __init__(self, buffer_size):\n self.num_experiences = 0\n self.buffer = deque(maxlen=buffer_size)", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def initBuffer(self, env):\n cnt = 0\n while len(self.memory) < self.memory.capacity:\n cnt += 1\n print(\"\\rWarmup Buffer [{:d}]\".format(cnt), end=\"\")\n s = env.reset()\n actionIdx, actionIdxTuple = self.select_action(s, explore=True)\n s_, r, done, info = env.step(actionIdxTuple)\n self.store_transition(s, actionIdx, r, s_, info)\n print(\"\\n => Warmup Buffer Ends\")", "def train_experience_replay(self, epochs, batch_size, iterations_per_epoch, capacity, n_obs, **kwargs):\n\n # Initialize losses dictionary and memory replay buffer\n losses = dict()\n mem = MemoryReplayBuffer(capacity)\n\n for ep in range(1, epochs+1):\n losses[ep] = []\n with tqdm(total=iterations_per_epoch, desc='Training epoch {}'.format(ep)) as p_bar:\n\n for it in range(1, iterations_per_epoch+1):\n \n # Determine n_obs and generate data on-the-fly\n if type(n_obs) is int:\n n_obs_it = n_obs\n else:\n n_obs_it = n_obs()\n # Simulate and add to buffer\n params, sim_data = self._forward_inference(batch_size, n_obs_it, **kwargs)\n mem.store(params, sim_data)\n\n # Sample from buffer\n params, sim_data = mem.sample()\n\n # One step backprop\n loss = self._train_step(params, sim_data)\n \n # Store loss into dictionary\n losses[ep].append(loss)\n\n # Update progress bar\n p_bar.set_postfix_str(\"Epoch {0},Iteration {1},Loss: {2:.3f},Running Loss: {3:.3f}\"\n .format(ep, it, loss, np.mean(losses[ep])))\n p_bar.update(1)\n\n # Store after each epoch, if specified\n if self.manager is not None:\n self.manager.save()\n return losses", "def collect_initial_batch(\n self, buffer: ReplayBufferAcM, samples_no: int\n ) -> ReplayBufferAcM:\n collected = 0\n while collected < samples_no:\n obs = self.env.reset()\n end = False\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n obs = obs.unsqueeze(0)\n\n prev_idx = buffer.add_obs(obs)\n ep_len = 0\n\n while not end:\n action = AcMTrainer.initial_act(self, obs)\n action_tensor = torch.tensor(action).unsqueeze(0)\n obs, rew, end, _ = self.env.step(action)\n ep_len += 1\n\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n obs = obs.unsqueeze(0)\n\n next_idx = buffer.add_obs(obs)\n buffer.add_timestep(prev_idx, next_idx, action_tensor)\n prev_idx = next_idx\n collected += 1\n return buffer", "def _build_replay_buffer(self):\n return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype)", "def replay(self):\n # Start only have enough memories\n if len(self.memory) < self.train_start:\n return\n\n batch_size = min(self.batch_size, len(self.memory))\n\n # Use mini_batch, sampling form the memory\n mini_batch = random.sample(self.memory, batch_size)\n\n # Since we are suing batch, we need to collect input and target\n input_update = np.zeros((batch_size, self.input_shape[0]))\n target_update = np.zeros((batch_size, self.output_num))\n\n for i in range(batch_size):\n state, action, reward, next_state, done = mini_batch[i]\n target = self.model.predict(state)[0]\n\n # Add future discounted reward\n if not done:\n # Use target_model here, because we want to keep the weights\n # not changing in one complete game\n target[action] = (1 - ALPHA) * reward + ALPHA * \\\n (self.gamma * np.amax(self.target_model.\n predict(next_state)[0]))\n else:\n target[action] = reward\n\n # Record the info into batch collection\n input_update[i] = state\n target_update[i] = target\n\n # Update model (also use a batch)\n self.model.fit(input_update, target_update, batch_size=batch_size,\n epochs=1, verbose=0)", "def initialize_replay_memory(n_steps: int, env: gym.Env, replay_memory: ReplayMemory) -> None:\n obs = env.reset()\n for _ in range(n_steps):\n a = env.action_space.sample()\n obs2, r, d, _ = env.step(a)\n\n replay_memory.store(obs, a, r, obs2, d)\n\n # For next iteration\n obs = env.reset() if d else obs2", "def _train_step(self):\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n\n if self._replay_scheme == 'prioritized':\n # The original prioritized experience replay uses a linear exponent\n # schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of\n # 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)\n # suggested a fixed exponent actually performs better, except on Pong.\n probs = self.replay_elements['sampling_probabilities']\n loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)\n loss_weights /= jnp.max(loss_weights)\n else:\n loss_weights = jnp.ones(self.replay_elements['state'].shape[0])\n\n self._rng, self.optimizer, loss, mean_loss= train(\n self.network_def,\n self.target_network_params,\n self.optimizer,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n loss_weights,\n self._target_opt,\n self.num_tau_samples,\n self.num_tau_prime_samples,\n self.num_quantile_samples,\n self.cumulative_gamma,\n self.double_dqn,\n self.kappa,\n self._tau,\n self._alpha,\n self._clip_value_min,\n self._num_actions,\n self._rng)\n\n if self._replay_scheme == 'prioritized':\n # Rainbow and prioritized replay are parametrized by an exponent\n # alpha, but in both cases it is set to 0.5 - for simplicity's sake we\n # leave it as is here, using the more direct sqrt(). Taking the square\n # root \"makes sense\", as we are dealing with a squared loss. Add a\n # small nonzero value to the loss to avoid 0 priority items. While\n # technically this may be okay, setting all items to 0 priority will\n # cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.\n self._replay.set_priority(self.replay_elements['indices'],\n jnp.sqrt(loss + 1e-10))\n\n\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(tag='ImplicitLoss',\n simple_value=mean_loss)])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def _build_replay_buffer(self):\n if self._replay_scheme not in ['uniform', 'prioritized']:\n raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))\n # Both replay schemes use the same data structure, but the 'uniform' scheme\n # sets all priorities to the same value (which yields uniform sampling).\n return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype)", "def initialize_replay_memory(self, size):\n if self.replay_memory:\n self.train_logger.info('INFO: Replay memory already initialized')\n return\n\n assert size >= self.batch_size, \"Initialize with size >= batch size\"\n\n old_state = self.env.reset()\n for i in range(size):\n action = random.choice(self.valid_actions)\n new_state, reward, done, _ = self.env.step(action)\n reward = np.clip(self.scale * reward, -self.clip, self.clip)\n self.replay_memory.append(transition(old_state, action,\n reward, new_state, done))\n\n if done:\n old_state = self.env.reset()\n else:\n old_state = new_state\n\n self.train_logger.info(f'INFO: Replay memory initialized with {size} experiences')", "def experience_replay(self):\n s,a,r,sp,done = self.memory.sample(self.batch_size)\n # TODO: 5 lines missing.\n raise NotImplementedError(\"\")\n self.Q.fit(s, target=target)", "def learn(self):\n\n if (len(self._replay_buffer) < self._batch_size or\n len(self._replay_buffer) < self._min_buffer_size_to_learn):\n return None\n # print(len(self._replay_buffer), self._min_buffer_size_to_learn)\n\n cum_v_loss = 0.\n cum_q_loss = 0.\n\n # ---------------- compute all targets (not necessary)---------------#\n # vectorize may have problem.\n self._replay_buffer.vectorize(frame_buffer=0, n_step_size=N_STEP_SIZE, gamma=GAMMA)\n # print(self._replay_buffer.idcs, self._replay_buffer.n_step)\n # change to getting all target values first\n all_tar_v, all_tar_q, q_plus = self.__compute_targets(self._sampled_indices)\n # if self._replay_buffer.n_step.shape == (0,):\n # return\n all_v_mb, all_q_mb = self._session.run([all_tar_v, all_tar_q],\n feed_dict={self._info_state_ph: self._replay_buffer.obs,\n self._n_step_ph: self._replay_buffer.n_step[self._replay_buffer.idcs],\n self._action_ph: self._replay_buffer.actions})\n\n # ----------------every iteration, sample and compute loss-------------#\n for i_iter in range(self.iteration):\n # Sample (get states & ...)\n mb_idcs = np.random.choice(len(self._replay_buffer), self._batch_size)\n mb_info_state, _, mb_actions, *_ = self._replay_buffer[mb_idcs] # problem\n\n mb_est_rew_w = self._replay_buffer.est_rew_weights[self._replay_buffer.idcs[mb_idcs]] # problem\n mb_est_non_zero = np.nonzero(mb_est_rew_w)\n n_step = self._replay_buffer.n_step[mb_idcs]\n\n if len(mb_est_non_zero[0]): # the array is not empty\n mb_est_non_zero = np.squeeze(mb_est_non_zero)\n mb_est_rew_idcs = (self._replay_buffer.idcs[mb_idcs][mb_est_non_zero] +\n self._replay_buffer.n_step_size).reshape(-1)\n\n # mb_v_prime_obs = next_info_state_ph\n mb_v_prime_obs, _, _, *_ = self._replay_buffer[mb_est_rew_idcs]\n else:\n mb_v_prime_obs = np.zeros((32, mb_info_state[0].size))\n\n # if self.player_id == 0:\n # print(\"In Learn Test Start: {}#################\".format(i_iter))\n # print(self.player_id)\n # print(mb_info_state.shape)\n # print(mb_actions.shape)\n # print(mb_v_prime_obs.shape)\n # print(n_step.shape)\n # print(mb_est_rew_w)\n # print(mb_est_non_zero)\n # print(\"In\n # Test End: {}###################\".format(i_iter))\n tar_v_mb = all_v_mb[mb_idcs]\n tar_q_mb = all_q_mb[mb_idcs]\n # print(\"In iteration: {}\".format(i_iter))\n # print(tar_v_mb.shape, tar_q_mb.shape)\n\n loss, _, v_loss, q_loss = self._session.run(\n [self._loss, self._learn_step, self.v_loss, self.q_loss],\n feed_dict={\n self._info_state_ph: mb_info_state,\n self._action_ph: mb_actions,\n self._next_info_state_ph: mb_v_prime_obs,\n self._mb_est_rew_ph: mb_est_rew_w,\n self._tar_q_ph: tar_q_mb,\n self._tar_v_ph: tar_v_mb\n })\n\n self._session.run(self._update_target_network)\n\n cum_q_loss += q_loss\n cum_v_loss += v_loss\n\n if (i_iter + 1) % (self.iteration / 10) == 0.:\n # print loss\n mean_v_loss = (cum_v_loss / int(self.iteration / 10))\n mean_q_loss = (cum_q_loss / int(self.iteration / 10))\n print(\"interation: {}, v_loss: {:.6f}, q_loss: {:.6f}\".format(\n i_iter + 1, mean_v_loss, mean_q_loss), end='\\r')\n if (i_iter + 1) == self.iteration:\n cum_v_loss = mean_v_loss\n cum_q_loss = mean_q_loss\n else:\n cum_v_loss = 0.0\n cum_q_loss = 0.0\n\n # transitions = self._replay_buffer.sample(self._batch_size)\n # info_states = [t.info_state for t in transitions]\n # actions = [t.action for t in transitions]\n # rewards = [t.reward for t in transitions]\n # next_info_states = [t.next_info_state for t in transitions]\n # are_final_steps = [t.is_final_step for t in transitions]\n # legal_actions_mask = [t.legal_actions_mask for t in transitions]\n # loss, _ = self._session.run(\n # [self._loss, self._learn_step],\n # feed_dict={\n # self._info_state_ph: info_states,\n # self._action_ph: actions,\n # self._reward_ph: rewards,\n # self._is_final_step_ph: are_final_steps,\n # self._next_info_state_ph: next_info_states,\n # self._legal_actions_mask_ph: legal_actions_mask,\n # })\n self._last_loss_value = [cum_v_loss, cum_q_loss]\n self.replay_buffer.clear()\n return [cum_v_loss, cum_q_loss]", "def __init__(self, size, train_frequency, avoid_episode_crossing=False,\n **kwargs):\n super().__init__(**kwargs)\n self.avoid_episode_crossing = avoid_episode_crossing\n self.size = size\n # A 'linear' history of all samples, in the order they were added.\n # Note these are just references to the same sample in the main\n # 'buffer' in the parent class, there is no RAM duplication here.\n # This defines the order samples are removed from the replay buffer\n self.linear_history = deque()\n self.train_frequency = train_frequency\n # This signifies how much train quota we have, for each sample\n # received this increases by 'train_frequency' and decreases on every\n # sample trained\n self.train_quota = 0", "def _build_replay_buffer(self, use_staging):\n return circular_replay_buffer.WrappedReplayBuffer(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n use_staging=use_staging,\n update_horizon=self.update_horizon,\n observation_dtype=self.observation_dtype.as_numpy_dtype)", "def __init__(self, num_prealloc_samples=0):\n self.num_prealloc_samples_ = num_prealloc_samples\n if self.num_prealloc_samples_ > 0:\n self._preallocate_samples()", "def __init__(self):\n self.replay_buffer = deque()", "def __init__(\n self, brain, reward_buff_cap, trainer_parameters, training, load, seed, run_id\n ):\n super().__init__(brain, trainer_parameters, training, run_id, reward_buff_cap)\n self.param_keys = [\n \"batch_size\",\n \"beta\",\n \"buffer_size\",\n \"epsilon\",\n \"hidden_units\",\n \"lambd\",\n \"learning_rate\",\n \"max_steps\",\n \"normalize\",\n \"num_epoch\",\n \"num_layers\",\n \"time_horizon\",\n \"sequence_length\",\n \"summary_freq\",\n \"use_recurrent\",\n \"summary_path\",\n \"memory_size\",\n \"model_path\",\n \"reward_signals\",\n ]\n self.check_param_keys()\n\n # Make sure we have at least one reward_signal\n if not self.trainer_parameters[\"reward_signals\"]:\n raise UnityTrainerException(\n \"No reward signals were defined. At least one must be used with {}.\".format(\n self.__class__.__name__\n )\n )\n\n self.step = 0\n self.policy = PPOPolicy(seed, brain, trainer_parameters, self.is_training, load)\n\n stats = defaultdict(list)\n # collected_rewards is a dictionary from name of reward signal to a dictionary of agent_id to cumulative reward\n # used for reporting only. We always want to report the environment reward to Tensorboard, regardless\n # of what reward signals are actually present.\n self.collected_rewards = {\"environment\": {}}\n for _reward_signal in self.policy.reward_signals.keys():\n self.collected_rewards[_reward_signal] = {}\n\n self.stats = stats\n\n self.training_buffer = Buffer()\n self.episode_steps = {}", "def _initialize_buffers(self) -> None:", "def setup_training(self):\n self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)\n self.total_rewards = []\n self.rewards = []\n self.steps = []\n self.average_rewards = []\n self.average_steps = []\n self.model = initialize_model()\n self.invalid_actions = 0\n self.average_invalid_actions = []\n self.total_invalid_actions = []", "def build_replay_buffer(agent, batch_size, steps_per_loop):\n buf = tf_uniform_replay_buffer.TFUniformReplayBuffer(\n data_spec=agent.policy.trajectory_spec,\n batch_size=batch_size,\n max_length=steps_per_loop)\n return buf", "def training_start(self, dataloader):\n self.datasize = len(dataloader)", "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def fit(self, env, num_iterations, max_episode_length=None):\n print ('initializing replay memory...')\n sys.stdout.flush()\n self.mode = 'init'\n self.memory.clear()\n self.preprocessor.reset()\n self.num_steps = 0\n num_updates = 0\n num_episodes = 0\n while num_updates < num_iterations:\n state = env.reset()\n self.preprocessor.reset()\n num_episodes += 1\n t = 0\n total_reward = 0\n while True:\n self.num_steps +=1\n t += 1\n action, _ = self.select_action(state)\n next_state, reward, is_terminal, debug_info = env.step(action)\n\n reward = self.preprocessor.process_reward(reward)\n total_reward += reward\n\n preprocessed_state = self.preprocessor.process_state_for_memory(state)\n\n self.memory.append(preprocessed_state, action, reward, is_terminal)\n\n if self.num_steps > self.num_burn_in:\n if self.mode != 'train':\n print('Finish Burn-in, Start Training!')\n\n self.mode = 'train'\n if self.num_steps % self.train_freq == 0:\n self.update_predict_network()\n num_updates += 1\n if num_updates % 10000 == 0:\n self.q_network.save_weights('%s/model_weights_%d.h5' % (self.save_path, num_updates // 10000))\n \n if is_terminal or (max_episode_length is not None and t > max_episode_length):\n break\n \n state = next_state\n #print ('episode %d ends, lasts for %d steps (total steps:%d), gets $d reward. (%d/%d updates.)' % (num_episodes, t, self.))", "def main():\n timer_total_start = time.time()\n rospy.init_node(\"ReplayBufferFiller\")\n rospy.loginfo(\"----- Replay Buffer Filler -----\")\n\n ground_list = [\"water1\",\n \"water2\",\n \"water3\",\n \"water4\",\n \"water5\",\n \"water6\",\n \"water7\",\n \"water8\",\n \"water9\",\n \"water10\"]\n\n replay_memory_size = 400000\n replay_buffer_path = \"./replay_buffer.pickle\"\n # replay_buffer_path_positive = \"./replay_buffer_positive.pickle\"\n # replay_buffer_path_negative = \"./replay_buffer_negative.pickle\"\n replay_buffer = ExperienceReplayBuffer(capacity=replay_memory_size)\n # replay_buffer_positive = ExperienceReplayBuffer(capacity=replay_memory_size)\n # replay_buffer_negative = ExperienceReplayBuffer(capacity=replay_memory_size)\n # Load the Replay buffer from file or accumulate experiences\n if(os.path.isfile(replay_buffer_path) == True): \n print(\"Replay buffer loading from file: \" +\n str(replay_buffer_path))\n replay_buffer.load(replay_buffer_path)\n else:\n\t print('No buffer_1 found')\n\n # if(os.path.isfile(replay_buffer_path_positive) == True): \n # print(\"Replay buffer loading from file: \" +\n # str(replay_buffer_path_positive))\n # replay_buffer_positive.load(replay_buffer_path_positive)\n # else:\n\t# print('No buffer_2 found') \n\n # if(os.path.isfile(replay_buffer_path_negative) == True): \n # print(\"Replay buffer loading from file: \" +\n # str(replay_buffer_path_negative))\n # replay_buffer_negative.load(replay_buffer_path_negative)\n # else:\n\t# print('No buffer_2 found') \n \n \n # Create a subscriber fot the greyscale image\n rospy.Subscriber(\"/quadrotor/ardrone/bottom/ardrone/bottom/image_raw\", ROSImage, image_callback)\n\n images_stack_size = 4\n tot_steps = 3000000 # finite-horizont simulation\n frame_preliminary = 0\n\n saving_every_tot_experiences = 2500\n is_buffer_saved = True\n\n noop_time = 2.0 # pause in seconds between actions\n steps_per_episodes = 30\n #saving_every_tot_experiences = 450 #TODO SET TO 250 JUST FOR TEST\n #r = rospy.Rate(10) # 10hz\n num_ground_plane = 15\n frame_per_ground_plane = int(replay_memory_size / num_ground_plane)\n frame_per_ground_plane = 3125 #!M positive / 4 classes / 10 grounds / 8 transformations\n actual_ground_index = 0\n episode_per_ground = 50\n #ground_counter = replay_buffer_positive.return_size() / frame_per_ground_plane\n ground_counter = 1\n positive_experience_counter = 0\n positive_experience_print_episode = 50\n old_positive_experience_counter = 0\n total_experience_counter = 0.0\n old_total_experience_counter = 0.0001\n episode = 1\n wrong_altitude = False\n quadrotor_pose = ModelState()\n quadrotor_pose.model_name = \"quadrotor\"\n quadrotor_pose.reference_frame = \"world\"\n while True:\n # if replay_buffer_positive.return_size() >= replay_memory_size:\n # break\n\n # if replay_buffer_positive.return_size() <= ground_counter * frame_per_ground_plane and episode != 1:\n # pass\n # else:\n # print ground_counter\n # generate_new_world(ground_list, ground_counter)\n # ground_counter = ground_counter + 1\n if(ground_counter < episode_per_ground) and episode != 1:\n ground_counter = ground_counter + 1\n else:\n ground = choose_random_ground(ground_list)\n generate_new_world(ground, ground_list)\n ground_counter = 1\n\n cumulated_reward = 0\n print \"\"\n print \"Preliminary Episode: \" + str(episode)\n print \"Ground counter value: \" + str(ground_counter)\n # Reset UAV at random pose\n reset_pose()\n send_action('stop')\n rospy.sleep(3.0)\n #get_image()\n image_t = _last_image\n # When the replay buffer is empty, fill it with the same picture 4\n # times\n image_t = np.stack([image_t] * images_stack_size, axis=2) # create a stack of X images\n timer_start = time.time()\n actual_time = rospy.get_rostime()\n \trospy_start_time = actual_time.secs + actual_time.nsecs / 1000000000.0\n frame_episode = 0\n \n done_reward = get_done_reward()\n update_quadrotor_pose(quadrotor_pose, done_reward)\n \n for step in range(tot_steps):\n # Execute a random action in the world and observe the reward and\n # state_t1.\n action = get_random_action()\n send_action(action)\n if action == \"descend\":\n # setpoint = round( quadrotor_pose.pose.position.z ) - 0.8\n # while True:\n # done_reward = get_done_reward()\n # update_quadrotor_pose(quadrotor_pose, done_reward)\n # if quadrotor_pose.pose.position.z < setpoint + 0.05 and quadrotor_pose.pose.position.z > setpoint - 0.05:\n # print \"Setpoint: \" + str(setpoint)\n # send_action(\"stop\")\n # rospy.sleep(2.0)\n # break\n rospy.sleep(5.0)\n send_action(\"stop\")\n rospy.sleep(1.0)\n #quadrotor_pose.pose.position.z = adjust_altitude(quadrotor_pose.pose.position.z)\n #set_pose(quadrotor_pose)\n else:\n #print \"Action taken: \" + action\n #send_action(action)\n rospy.sleep(noop_time)\n # Acquire a new frame and convert it in a numpy array\n image_t1 = _last_image\n done_reward = get_done_reward()\n send_action(\"stop\") #NOTE: moved here to fix problem with baricenter (partially reduced)\n\n # Get the reward and done status\n\n reward = done_reward.reward\n done = done_reward.done\n print \"Step(\" + str(step) + \"), Action: \" + action + \", Altitude: \" + str(done_reward.z) + \", Reward: \" + str(reward)\n wrong_altitude = done_reward.wrong_altitude\n if wrong_altitude == True:\n rospy.logerr(\"[ERROR] Wrong altitude!\")\n # Calculate the new cumulated_reward\n cumulated_reward += reward\n # state_t1, reward, done, info = env.step(action)\n image_t1 = np.expand_dims(image_t1, 2)\n # stack the images\n image_t1 = np.append(image_t[:, :, 1:], image_t1, axis=2)\n # Store the experience in the replay buffer\n if reward > 0:\n if action == \"descend\":\n # replay_buffer_positive.add_experience(image_t, action, reward, image_t1, done)\n # is_buffer_saved = False\n pass\n else:\n rospy.logerr(\"[POSITIVE]Wrong action for positive reward: %s\", action)\n elif reward == -1.0:\n if action == \"descend\":\n # replay_buffer_negative.add_experience(image_t, action, reward, image_t1, done)\n pass\n else:\n rospy.logerr(\"[NEGATIVE]Wrong action for negative reward: %s\", action)\n else:\n # pass\n replay_buffer.add_experience(image_t, action, reward, image_t1, done)\n frame_preliminary += 1 # To call every time a frame is obtained\n total_experience_counter += 1\n image_t = image_t1\n timer_episode_stop = time.time()\n frame_episode +=1\n update_quadrotor_pose(quadrotor_pose, done_reward)\n \n #rospy.sleep(2.0) #NOTE: fix the descend bug affecting the altitude\n if frame_episode >= steps_per_episodes:\n\t done = True\n # Save the buffer every 25000 experiences\n # if replay_buffer_positive.return_size() % saving_every_tot_experiences == 0 and is_buffer_saved == False:\n if replay_buffer.return_size() % saving_every_tot_experiences == 0 :\n timer_start = time.time()\n print(\"\")\n print(\"Saving the replay buffer in: \" + replay_buffer_path)\n print(\"Sit back and relax, it may take a while...\")\n replay_buffer.save(replay_buffer_path)\n timer_stop = time.time()\n print \"Time episode: \" + str(timer_stop - timer_start) + \" seconds\"\n print \"Time episode: \" + str((timer_stop - timer_start) / 60) + \" minutes\"\n print(\"Done!\")\n # timer_start = time.time()\n # print(\"\")\n # print(\"Saving the replay buffer in: \" + replay_buffer_path_positive)\n # print(\"Sit back and relax, it may take a while...\")\n # replay_buffer_positive.save(replay_buffer_path_positive)\n # timer_stop = time.time()\n # print \"Time episode: \" + str(timer_stop - timer_start) + \" seconds\"\n # print \"Time episode: \" + str((timer_stop - timer_start) / 60) + \" minutes\"\n # print(\"Done!\")\n # print(\"\")\n # print(\"Saving the replay buffer in: \" + replay_buffer_path_negative)\n # print(\"Sit back and relax, it may take a while...\")\n # replay_buffer_negative.save(replay_buffer_path_negative)\n # timer_stop = time.time()\n # print \"Time episode: \" + str(timer_stop - timer_start) + \" seconds\"\n # print \"Time episode: \" + str((timer_stop - timer_start) / 60) + \" minutes\"\n # print(\"Done!\")\n # print(\"\")\n # is_buffer_saved = True\n if done:\n episode += 1\n timer_stop = time.time()\n actual_time = rospy.get_rostime()\n rospy_stop_time = actual_time.secs + actual_time.nsecs / 1000000000.0\n rospy_time_elapsed = rospy_stop_time - rospy_start_time\n print \"Replay Buffer Size: \" + str(replay_buffer.return_size()) + \" out of \" + str(replay_memory_size)\n # print \"Replay Buffer Positive Size: \" + str(replay_buffer_positive.return_size()) + \" out of \" + str(replay_memory_size)\n # print \"Replay Buffer Negative Size: \" + str(replay_buffer_negative.return_size()) + \" out of \" + str(replay_memory_size)\n print \"Frame counter: \" + str(frame_preliminary)\n print \"Time episode: \" + str(timer_stop - timer_start) + \" seconds\"\n print( \"Ros time episode: \" + str(rospy_time_elapsed) + \" seconds\")\n if cumulated_reward >= 0:\n rospy.logwarn(\"Positive reward obtained!\")\n print \"Cumulated reward: \" + str(cumulated_reward)\n print \"Episode finished after {} timesteps\".format(step + 1)\n break\n\n # timer_total_stop = time.time()\n # print \"Total time simulation: \" + str((timer_total_stop - timer_total_start) / 60.0) + \" minutes\"\n # print \"Total time simulation: \" + str((timer_total_stop - timer_total_start) / 3600.0) + \" hours\"\n # # Once the buffer is filled, save it to disk\n # timer_saving_start = time.time()\n # print \"Saving the replay buffer in: \" + replay_buffer_positive_path\n # print \"Sit back and relax, it may take a while...\"\n # replay_buffer_positive.save(replay_buffer_positive_path)\n # print \"Done!\"\n # timer_saving_stop = time.time()\n # print \"Time to save the buffer: \" + str(timer_saving_stop - timer_saving_start) + \" seconds\"\n # print \"Time to save the buffer: \" + str((timer_saving_stop - timer_saving_start) / 60) + \" minutes\"\n # timer_saving_start = time.time()\n # print \"Saving the replay buffer in: \" + replay_buffer_negative_path\n # print \"Sit back and relax, it may take a while...\"\n # replay_buffer_negative.save(replay_buffer_negative_path)\n # print \"Done!\"\n # timer_saving_stop = time.time()\n # print \"Time to save the buffer: \" + str(timer_saving_stop - timer_saving_start) + \" seconds\"\n # print \"Time to save the buffer: \" + str((timer_saving_stop - timer_saving_start) / 60) + \" minutes\"\n # Shutdown the node\n rospy.signal_shutdown(\"Rospy Shutdown!\")", "def augment_train_data(self):\n # do not augment on evaluation dataset\n original_len = len(self.data_train)\n for i in range(len(self.data_train)):\n if i % 100 == 0:\n print(f\"Augmenting train data, progress: {i} / {original_len}\")\n title = self.data_train[i][\"Title\"]\n abstract = self.data_train[i][\"Abstract\"]\n label = self.data_train[i][\"Label\"]\n\n title = self.augmenter.augment(title)\n abstract = self.augmenter.augment(abstract)\n\n self.data_train.append({\"Title\": title, \"Abstract\": abstract, \"Label\": label})\n print(f\"Train data amount after augmenting: {len(self.data_train)}\")", "def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))", "def _fill_buffer(self, in_data, frame_count, time_info, status_flags):\n frames = in_data\n self._data_frame.append(frames)\n self._buff.put(in_data)\n return None, pyaudio.paContinue", "def replay(self,nb_lessons):\n minibatch = []\n # Sample minibatch from the memory\n for i in range(nb_lessons): \n k = self.chooseLessons(nb_lessons) \n # We go over the pexperiences into lessons \n replayed_experiences = self.lessons[k]\n replayed_experiences.reverse()\n for experience in replayed_experiences: \n \n # Extract informations from each memory\n #print(experience)\n state = experience[0]\n action = experience[1]\n next_states = experience[2]\n reward = experience[3]\n \n # if done, make our target reward\n target = reward\n if not(list(next_states)==None):\n # predict the future discounted reward\n #print(next_states)\n options = [self.nn.predict(i) for i in next_states]\n pmax = options[0]\n for i in range(len(options)):\n if options[i]>pmax:\n pmax = options[i]\n target = reward + self.gamma * pmax\n \n # Train the Neural Net with the state and next state input\n #self.nn.train_one_step_other(state,target)\n self.nn.train(state,tf.convert_to_tensor([[target]]))", "def __init__(self, action_size, buffer_size, batch_size, seed, device = None):\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = recordclass(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\", \"priority\"])\n self.seed = random.seed(seed)\n self.max_priority = 1.\n self.device = MinorityResampledReplayBuffer.device if device is None else device", "def collect_samples(self):\n # TODO refactor this to not to duplicate collect from DDPG\n # - not so easy due to logger :(\n collected = 0\n while collected < self.acm_pre_train_samples:\n obs = self.env.reset()\n end = False\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n obs = obs.unsqueeze(0)\n\n prev_idx = self.replay_buffer.add_obs(obs)\n ep_len = 0\n\n while not end:\n acm_action = AcMTrainer.initial_act(self, obs)\n self.replay_buffer.add_acm_action(acm_action)\n obs, rew, done, _ = self.env.step(acm_action)\n ep_len += 1\n\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n obs = obs.unsqueeze(0)\n\n end = done\n done = False if ep_len == self.max_ep_len else done\n\n next_idx = self.replay_buffer.add_obs(obs)\n self.replay_buffer.add_timestep(prev_idx, next_idx, obs, rew, done, end)\n prev_idx = next_idx\n collected += 1", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n \n # create the buffer of two frame sizes\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def finalize(self) :\n if not self.nTrain:\n self.nTrain = max(self.retrain_interval * 2, self.season * 3)\n if self.max_verbosity > 0:\n print \"Automatically set nTrain to\", self.nTrain\n if self.batch_size is None:\n #No batch learning\n self.batch_size = self.nTrain\n if self.nTrain % self.batch_size != 0:\n if self.max_verbosity > 0:\n print \"Adding\", self.batch_size - (self.nTrain % self.batch_size), \"to nTrain\", self.nTrain\n self.nTrain += self.batch_size - (self.nTrain % self.batch_size)\n self.numLags = 0.25 * self.nTrain #Don't immediately retrain\n\n #The first time at which we can actually predict: need enough headroom for both MASE calculation\n #and filling the lookback window\n self.front_buffer = max(self.season - self.predictionStep, self.lookback)\n if self.ignore_for_error: #offset for values not predicted\n self.ignore_for_error-= (self.front_buffer + self.predictionStep - 1)", "def train(self):\n if len(self.memory) > self.batch_size:\n selecting_time_start = time.time()\n experiences = self.memory.sample()\n self.selecting_time += time.time() - selecting_time_start\n self.learn(experiences)", "def __init__(self, buffer_size, batch_size):\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.episode = 0", "def train_replay(self):\n\n if len(self.memory) < self.train_start:\n return\n\n if self.epsilon > self.epsilon_end:\n self.epsilon -= self.epsilon_decay_step\n\n mini_batch = random.sample(self.memory, self.batch_size)\n\n history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n next_history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n\n # Initialize the Value targets to optimize\n v_target = np.zeros((self.batch_size,))\n\n action, reward, dead = [], [], []\n\n for i in range(self.batch_size):\n history[i] = np.float32(mini_batch[i][0] / 255.)\n next_history[i] = np.float32(mini_batch[i][3] / 255.)\n action.append(mini_batch[i][1])\n reward.append(mini_batch[i][2])\n dead.append(mini_batch[i][4])\n\n # current state-action values Q(st, at)\n q_outputs = self.q_duelling_part.predict(history)\n\n # TD-values for updating the networks coming from the target model\n if self.target_model is True:\n v_target_value = self.target_v_duelling_part.predict(next_history)\n elif self.target_model is False:\n v_target_value = self.v_duelling_part.predict(next_history)\n\n q_targets = []\n\n for i in range(self.batch_size):\n if dead[i]:\n v_target[i] = reward[i]\n q_outputs[i][action[i]] = reward[i]\n\n else:\n v_target[i] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n q_outputs[i][action[i]] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n\n q_targets.append(q_outputs[i][action[i]])\n\n self.optimizer([history, action, q_targets]) # optimize the state-action-value head\n self.v_duelling_part.fit(history, v_target, epochs=1, verbose=0) # optimize the state-value head", "def _train_step(self):\n # Run a train op at the rate of self.update_period if enough training steps\n # have been run. This matches the Nature DQN behaviour.\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n\n if self._replay_scheme == 'prioritized':\n # The original prioritized experience replay uses a linear exponent\n # schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of\n # 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)\n # suggested a fixed exponent actually performs better, except on Pong.\n probs = self.replay_elements['sampling_probabilities']\n # Weight the loss by the inverse priorities.\n loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)\n loss_weights /= jnp.max(loss_weights)\n else:\n loss_weights = jnp.ones(self.replay_elements['state'].shape[0])\n\n\n self.optimizer, loss, mean_loss = train(self.network_def,\n self.target_network_params,\n self.optimizer,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n loss_weights,\n self.cumulative_gamma,\n self._target_opt,\n self._mse_inf,\n self._tau,\n self._alpha,\n self._clip_value_min,\n self._rng)\n\n if self._replay_scheme == 'prioritized':\n # Rainbow and prioritized replay are parametrized by an exponent\n # alpha, but in both cases it is set to 0.5 - for simplicity's sake we\n # leave it as is here, using the more direct sqrt(). Taking the square\n # root \"makes sense\", as we are dealing with a squared loss. Add a\n # small nonzero value to the loss to avoid 0 priority items. While\n # technically this may be okay, setting all items to 0 priority will\n # cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.\n self._replay.set_priority(self.replay_elements['indices'],\n jnp.sqrt(loss + 1e-10))\n \n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(tag='HuberLoss', simple_value=mean_loss)])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def _train_step(self):\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n (self._rng, self.optimizer_state, self.online_params,\n loss, quantile_loss, coherence_loss, orthogonality_loss) = train(\n self.network_def,\n self.online_params,\n self.target_network_params,\n self.optimizer,\n self.optimizer_state,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n self.num_tau_samples,\n self.num_tau_prime_samples,\n self.num_quantile_samples,\n self.cumulative_gamma,\n self.double_dqn,\n self.kappa,\n self._rng,\n self._coherence_weight,\n self._option,\n self._use_ortho_loss,\n self._use_cohe_loss,\n self._tau,\n self._alpha,\n self._clip_value_min)\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n if self._use_ortho_loss and self._use_cohe_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Incoherence', simple_value=coherence_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Orthogonality',\n simple_value=orthogonality_loss),\n ])\n elif self._use_ortho_loss and not self._use_cohe_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Orthogonality', simple_value=orthogonality_loss),\n ])\n elif self._use_cohe_loss and not self._use_ortho_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Incoherence', simple_value=coherence_loss),\n ])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def _gen_mem_pre(self):\n params = self._get_model_params()\n\n # because we cant easily get intermediate values from ONNX runtime\n # use the pytorch value\n mem_0 = np.zeros(2_850)\n mem_1 = np.zeros(2_925)\n\n flat_input = np.ndarray.flatten(list(self.input_.values())[0])\n\n mem_0[: len(flat_input)] = flat_input\n\n tensors = params + [mem_0, mem_1]\n flat_tensors = [np.ndarray.flatten(tensor) for tensor in tensors]\n\n self.mem_pre = [f\"{x}\\n\" for tensor in flat_tensors for x in tensor.tolist()]", "def __init__(self, buffer_size, batch_size, seed):\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)", "def before_epoch(self):\n\n # Prepare prediction container in every epoch, set/reset here as new predictions are obtained after each epoch as NN learns\n self.y_pred = []", "def __init__(self, buffer_size, batch_size, seed):\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experiences\", field_names=[\"state\", \"state_full\", \"action\", \"reward\",\n \"next_state\", \"next_state_full\", \"done\"])\n self.seed = random.seed(seed)", "def __init__(self, buffer_size, random_seed=None):\n self.buffer_size = buffer_size\n self.count = 0\n self.oldPos = 0\n self.currPos = 0\n self.full = False\n self.buffer = []\n self.featCount = 3\n random.seed(random_seed)\n self.useSubBuffer = False", "def __init__(self, size, frame_history_len):\n self.size = size\n self.frame_history_len = frame_history_len\n\n self.next_idx = 0\n self.num_in_buffer = 0\n\n self.obs = None\n self.action = None\n self.reward = None\n self.done = None", "def bufferCnt():\n if(reset == 1):\n bufferCounter.next = 0\n else:\n if(decimationRatio > 0):\n if(bufferCounter == (decimationRatio-1)):\n bufferCounter.next = 0\n else:\n bufferCounter.next = bufferCounter + 1", "def _experience_replay(self, batch_size, discount=0.9, epochs=1):\r\n minibatch = random.sample(self.experience, batch_size)\r\n\r\n # TODO: The batch_size might not bee needed as an argument here if the reshape things can be resolved.\r\n states, actions, rewards, next_states, terminated = self._extract_data(batch_size, minibatch)\r\n targets = self._build_targets(batch_size, states, next_states, rewards, actions, terminated, discount)\r\n\r\n history = self.q_network.fit(states, targets, epochs=epochs, verbose=0, batch_size=1)\r\n #print(history.history['loss'])\r\n self.episode_loss.append(history.history['loss'][0])", "def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size", "def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size", "def add(self, experience: []):\n if len(self.buffer) + len(experience) >= self.buffer_size:\n self.buffer[0:1] = []\n self.buffer.append(experience)", "def __init__(self, capacity: int, storage_unit: str = \"timesteps\", **kwargs):\n super().__init__(capacity=capacity, storage_unit=\"timesteps\", **kwargs)\n self.replay_batches = []\n self.replay_index = 0", "def reset(self):\n self.head_pos = 0\n self.left_expands = 0\n self.memory = np.zeros(\n (\n self.max_memory if self.fixed_size else 1,\n self.memory_unit_size\n )\n )\n self.previous_read = np.zeros(self.memory_unit_size)\n if self.history is not None:\n self.history = defaultdict(list)", "def __init__(self, env, skip=4, blend=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((blend,) + env.observation_space.shape, dtype=env.observation_space.dtype)\n self._skip = skip\n self._blend = blend", "def _fill_buffer(self, in_data, frame_count, time_info, status_flags):\n self._buff.put(in_data)\n return None, pyaudio.paContinue", "def __init__(self, buffer_size, batch_size):\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\"])", "def _fill_buffer(buff, in_data, frame_count, time_info, status_flags):\n buff.put(in_data)\n return None, pyaudio.paContinue", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # for each batch\n for _ in range(self.params.num_batches):\n # sample memories\n mem_states, mem_controls, mem_rewards, mem_next_states, mem_continues = \\\n (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # train the critic\n max_q = self.sess.run(self.graph.target_critic_outputs, feed_dict={self.graph.states: mem_next_states})\n td_target = mem_rewards + mem_continues * self.params.discount_factor * max_q\n self.reg_loss_val, self.critic_loss_val, _ = self.sess.run(\n [self.graph.critic_reg_loss, self.graph.critic_loss, self.graph.critic_training_op],\n feed_dict={self.graph.states: mem_states, self.graph.actor_outputs: mem_controls,\n self.graph.td_target: td_target})\n # train the actor\n neg_mean_q_val, _ = self.sess.run([self.graph.neg_mean_q, self.graph.actor_training_op],\n feed_dict={self.graph.states: mem_states})\n self.mean_q_val = -1.0 * neg_mean_q_val\n # copy to target\n self.sess.run(self.graph.copy_online_to_target)", "def _set_and_fill_buffer(self):\n # Set the buffer size for the nodal solution steps data. Existing nodal\n # solution step data may be lost.\n required_buffer_size = self.settings[\"buffer_size\"].GetInt()\n if required_buffer_size < self.GetMinimumBufferSize():\n required_buffer_size = self.GetMinimumBufferSize()\n current_buffer_size = self.main_model_part.GetBufferSize()\n buffer_size = max(current_buffer_size, required_buffer_size)\n self.main_model_part.SetBufferSize(buffer_size)\n # Cycle the buffer. This sets all historical nodal solution step data to\n # the current value and initializes the time stepping in the process info.\n delta_time = self.main_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]\n time = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]\n step =-buffer_size\n time = time - delta_time * buffer_size\n self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.TIME, time)\n for i in range(0, buffer_size):\n step = step + 1\n time = time + delta_time\n self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.STEP, step)\n self.main_model_part.CloneTimeStep(time)", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def replay(self, batch_size):\n batch = random.sample(self.replay_memory, batch_size)\n for state, action, reward, next_state, done in batch:\n target = self.model.predict(state)\n if done:\n target[0][action] = reward\n else:\n t = self.target_model.predict(next_state)[0]\n target[0][action] = reward + self.gamma * np.amax(t)\n self.loss += self.model.train_on_batch(state, target)\n self.n_batches += 1\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay", "def new_data(self, data):\n if self.tbuffer is None:\n self.tbuffer = torch.FloatTensor(data).to(self.device)\n else:\n self.tbuffer = torch.cat((self.tbuffer, torch.FloatTensor(data).to(self.device)))\n if len(self.tbuffer) > self.look_back:\n l = len(self.tbuffer) - self.look_back + 1\n self.cbuffer = self.tbuffer[-l:].cpu()\n X_tests = self.pytorch_rolling_window(self.tbuffer, self.look_back, 1)\n self.tbuffer = X_tests[-1][1:]\n p_preds = torch.empty(1, self.max_ele+2).to(self.device)\n for X_test in X_tests:\n with torch.no_grad():\n y_pred = self.model(X_test)\n p_preds = torch.cat([p_preds, y_pred.reshape(1, self.max_ele+2)])\n p_preds = p_preds[1:] # drop first garbage sample\n p_preds_t = torch.transpose(p_preds, 0, 1).cpu()\n if self.lp:\n p_preds_t = np.apply_along_axis(lambda m: np.convolve(m, self.lp_win, mode='full'), axis=1, arr=p_preds_t)\n self.p_preds_t = p_preds_t[:,:-self.lp_len+1]\n else:\n self.p_preds_t = p_preds_t\n else:\n self.p_preds_t = None\n self.cbuffer = None", "def reset_train_pointer(self):\n self.train_pointer = 0\n\n if self.shuffle:\n self.shuffle_data()", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def replay(self):\n \n #grab random batch\n if len(self.memory) < self.batchsize:\n minibatch = self.memory\n else:\n minibatch = random.sample(self.memory,self.batchsize)\n \n #instantiate\n states = []\n Q_wants = []\n \n #Find updates\n for event in minibatch:\n state,action,reward,next_state,done = event\n states.append(state)\n \n #Find Q_target\n state_tensor = np.reshape(state,(1,len(state))) # keras takes 2d arrays\n Q_want = self.model.predict(state_tensor)[0] # all elements of this, except the action chosen, stay\n # the same \n \n #If state is terminal, Q_target(action) = reward\n if done == True:\n Q_want[action] = reward\n \n # Q_want(action) = reward + gamma*Q_target(next_state) -- note I sample from the target network\n else:\n next_state_tensor = np.reshape(next_state,(1,len(next_state))) \n\n \n Q_target_next_state_vec = self.target_model.predict(next_state_tensor)[0]\n Q_target_next_state_max = max(Q_target_next_state_vec)\n \n Q_want[action] = reward + self.gamma*Q_target_next_state_max\n Q_want_tensor = np.reshape(Q_want,(1,len(Q_want)))\n #self.model.fit(state_tensor,Q_want_tensor,verbose=False,epochs=1)\n \n Q_wants.append(Q_want)\n \n \n #Here I fit on the whole batch. Others seem to fit line-by-line\n #Dont' think (hope) it makes much difference\n states = np.array(states)\n Q_wants = np.array(Q_wants)\n self.model.fit(states,Q_wants,verbose=False, epochs=1)", "def __init__(self, buffer_size, batch_size):\n # Internal memory\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\",\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])", "def before_fit(self):\n self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, \"gather_preds\")\n if not self.run:\n return\n\n # Prepare ground truth container, set here as y_true's always stay the same\n self.y_true = []", "def experience_replay(batch_size):\n memory = []\n while True:\n experience = yield rsample(memory, batch_size) if batch_size <= len(memory) else None\n memory.append(experience)", "def __init__(self):\n self.num_mini_batches = 0", "def train(self):\n if len(self.experience) < self.minibatch_size:\n return\n\n # sample a minibatch_size of random episode with a number of transitions >= unrollings_num\n random_episodes_indecies = np.random.choice(len(self.experience), self.minibatch_size)\n random_episodes = []\n for index in random_episodes_indecies:\n episode = self.experience[index]\n\n # 0:random_transitions_space is the range from which a random transition\n # can be picked up while having unrollings_num - 1 transitions after it\n random_transitions_space = len(episode) - self.unrollings_num\n random_start = np.random.choice(random_transitions_space, 1)\n\n random_episodes.append(episode[random_start:random_start + self.unrollings_num])\n\n state_shape = tuple([self.minibatch_size, self.unrollings_num] + self.state_shape)\n\n # prepare the training data\n states = np.empty(state_shape, dtype=np.float32)\n next_states = np.empty(state_shape, dtype=np.float32)\n rewards = np.empty((self.minibatch_size, self.unrollings_num, ), dtype=np.float32)\n transition_action_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n next_legal_actions_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n\n for i, episode in enumerate(random_episodes):\n for j, transition in enumerate(episode):\n state, action, reward, nextstate, next_legal_actions = transition\n\n states[i,j], rewards[i,j], next_states[i,j] = state, reward, nextstate\n transition_action_filters[i,j][action] = 1.0\n next_legal_actions_filters[i,j][next_legal_actions] = 1.0\n\n self.prediction_nn.clearLSTMS(self.session)\n self.target_nn.clearLSTMS(self.session)\n\n loss,_ = self.session.run([self.loss, self.finalize], {\n self.states: states,\n self.next_states: next_states,\n self.rewards: np.reshape(rewards, (self.minibatch_size * self.unrollings_num, )),\n self.transition_action_filters: np.reshape(transition_action_filters, (self.minibatch_size * self.unrollings_num, self.actions_count)),\n self.next_legal_actions_filters: np.reshape(next_legal_actions_filters, (self.minibatch_size * self.unrollings_num, self.actions_count))\n })\n\n if self.iteration != 0 and self.iteration % self.freeze_period == 0:\n self.target_nn.assign_to(self.prediction_nn, self.session)\n\n self.iteration += 1\n\n return loss, self.iteration", "def __init__(self, buffer_size, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n random.seed(random_seed)\n self.last_recent_batch = 0", "def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0", "def fillGameMemoryWithRandomTransitions(gameMemory):\n print(\"Preparing Dataset\")\n progressBar = myProgressBar(START_REPLAY_MEMORY)\n while len(gameMemory) < START_REPLAY_MEMORY:\n ENVIRONMENT.reset()\n isDone = False\n currentLifes = 5\n while not isDone:\n action = ENVIRONMENT.action_space.sample()\n screen, reward, isDone, info = ENVIRONMENT.step(action)\n ENVIRONMENT.render(mode = 'rgb_array')\n reward = calculateRewardWithInfoGiven(reward, info, isDone)\n if info['ale.lives'] < currentLifes:\n currentLifes -= 1\n gameMemory.pushScreenActionReward(screen, action, reward, True)\n else:\n gameMemory.pushScreenActionReward(screen, action, reward, isDone)\n progressBar.update(len(gameMemory))\n print(\"dataset finished\")", "def train_agent(self):\n # Retrieve collected experiences from memory\n experiences = np.array(self.replay.get_all())\n # rewards = np.array([h['reward'] for h in experiences])\n #rewards = experiences[:,2]\n rewards = np.array([r[2] for r in experiences])\n\n # Discount and normalize rewards\n norm_rewards = self.discount_rewards_and_normalize(rewards)\n\n # Shuffle for better learning\n shuffled_experiences = np.random.shuffle(experiences)\n\n # Feed the experiences through the network with rewards to compute and\n # minimize the loss.\n\n feed={\n self.X: [r[0] for r in experiences],\n self.rewards:norm_rewards,\n self.ep_actions:experiences[:,1]\n }\n self.tf_sess.run(self.train,feed_dict=feed)\n\n pass", "def _allocate_buffer_memory(self):\n for channel in self._channels_dict.values():\n if channel.enabled:\n channel.allocate(self._num_captures, self._num_samples)", "def trainStep(self, batchSize=None):\n # Default behaviour waits for buffer to collect at least one batch_size of transitions\n if batchSize is None:\n if len(self.buffer) < self.batch_size:\n return\n batchSize = self.batch_size\n\n # Extract states, actions, rewards and action probabilities from transitions in buffer\n state = tensor([t.state for t in self.buffer], dtype=torch_float)\n action = tensor([t.action for t in self.buffer], dtype=torch_long).view(-1, 1)\n reward = [t.reward for t in self.buffer]\n old_action_log_prob = tensor([t.a_log_prob for t in self.buffer], dtype=torch_float).view(-1, 1)\n\n # Unroll rewards\n R = 0\n Gt = []\n for r in reward[::-1]:\n R = r + self.gamma * R\n Gt.insert(0, R)\n Gt = tensor(Gt, dtype=torch_float)\n\n # Send everything to cuda if used\n if self.use_cuda:\n state, action, old_action_log_prob = state.cuda(), action.cuda(), old_action_log_prob.cuda()\n Gt = Gt.cuda()\n\n # Repeat the update procedure for ppo_update_iters\n for i in range(self.ppo_update_iters):\n # Create randomly ordered batches of size batchSize from buffer\n for index in BatchSampler(SubsetRandomSampler(range(len(self.buffer))), batchSize, False):\n # Calculate the advantage at each step\n Gt_index = Gt[index].view(-1, 1)\n V = self.critic_net(state[index])\n delta = Gt_index - V\n advantage = delta.detach()\n\n # Get the current probabilities\n # Apply past actions with .gather()\n action_prob = self.actor_net(state[index]).gather(1, action[index]) # new policy\n\n # PPO\n ratio = (action_prob / old_action_log_prob[index]) # Ratio between current and old policy probabilities\n surr1 = ratio * advantage\n surr2 = clamp(ratio, 1 - self.clip_param, 1 + self.clip_param) * advantage\n\n # update actor network\n action_loss = -torch_min(surr1, surr2).mean() # MAX->MIN descent\n self.actor_optimizer.zero_grad() # Delete old gradients\n action_loss.backward() # Perform backward step to compute new gradients\n nn.utils.clip_grad_norm_(self.actor_net.parameters(), self.max_grad_norm) # Clip gradients\n self.actor_optimizer.step() # Perform training step based on gradients\n\n # update critic network\n value_loss = F.mse_loss(Gt_index, V)\n self.critic_net_optimizer.zero_grad()\n value_loss.backward()\n nn.utils.clip_grad_norm_(self.critic_net.parameters(), self.max_grad_norm)\n self.critic_net_optimizer.step()\n\n # After each training step, the buffer is cleared\n del self.buffer[:]", "def __init__(self, env, skip=4):\r\n gym.Wrapper.__init__(self, env)\r\n # most recent raw observations (for max pooling across time steps)\r\n self._obs_buffer = np.zeros(\r\n (2,) + env.observation_space.shape, dtype=np.uint8)\r\n self._skip = skip", "def stored_reset(self):\r\n\t\tself.stored_reward = np.zeros((self.num_timesteps - self.first_considered_reward_step,))\r\n\t\tself.stored_optimum = np.zeros_like(self.stored_reward)", "def __init__(self, buffer_size, batch_size):\n\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n # set.experience = namedtuple(\"Experience\", field_names=['state', 'action', 'reward', 'next_state', 'done'])", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype='uint8')\n self._skip = skip", "def __init__(self, sess, brain, reward_buff_cap, trainer_parameters, training, seed, run_id):\n super(PPOTrainer, self).__init__(sess, brain.brain_name, trainer_parameters, training, run_id)\n\n self.param_keys = ['batch_size', 'beta', 'buffer_size', 'epsilon', 'gamma', 'hidden_units', 'lambd',\n 'learning_rate', 'max_steps', 'normalize', 'num_epoch', 'num_layers',\n 'time_horizon', 'sequence_length', 'summary_freq', 'use_recurrent',\n 'graph_scope', 'summary_path', 'memory_size', 'use_curiosity', 'curiosity_strength',\n 'curiosity_enc_size']\n\n for k in self.param_keys:\n if k not in trainer_parameters:\n raise UnityTrainerException(\"The hyperparameter {0} could not be found for the PPO trainer of \"\n \"brain {1}.\".format(k, brain.brain_name))\n\n self.use_curiosity = bool(trainer_parameters['use_curiosity'])\n\n self.step = 0\n\n self.policy = PPOPolicy(seed, brain, trainer_parameters,\n sess, self.is_training)\n\n stats = {'cumulative_reward': [], 'episode_length': [], 'value_estimate': [],\n 'entropy': [], 'value_loss': [], 'policy_loss': [], 'learning_rate': []}\n if self.use_curiosity:\n stats['forward_loss'] = []\n stats['inverse_loss'] = []\n stats['intrinsic_reward'] = []\n self.intrinsic_rewards = {}\n self.stats = stats\n\n self.training_buffer = Buffer()\n self.cumulative_rewards = {}\n self._reward_buffer = deque(maxlen=reward_buff_cap)\n self.episode_steps = {}\n self.summary_path = trainer_parameters['summary_path']\n if not os.path.exists(self.summary_path):\n os.makedirs(self.summary_path)\n\n self.summary_writer = tf.summary.FileWriter(self.summary_path)", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def __init__(self, buffer_size, ep_length, random_seed=123):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n self.episode_length = ep_length\n random.seed(random_seed)", "def train_epoch(self):\n # We can't validate a winner for submissions generated by the learner,\n # so we will use a winner-less match when getting rewards for such states\n blank_match = {\"winner\":None}\n\n learner_submitted_actions = 0\n null_actions = 0\n\n # Shuffle match presentation order\n if(self.N_TEMP_TRAIN_MATCHES):\n path_to_db = \"../data/competitiveMatchData.db\"\n sources = {\"patches\":self.TEMP_TRAIN_PATCHES, \"tournaments\":[]}\n print(\"Adding {} matches to training pool from {}.\".format(self.N_TEMP_TRAIN_MATCHES, path_to_db))\n temp_matches = pool.match_pool(self.N_TEMP_TRAIN_MATCHES, path_to_db, randomize=True, match_sources=sources)[\"matches\"]\n else:\n temp_matches = []\n data = self.training_data + temp_matches\n\n shuffled_matches = random.sample(data, len(data))\n for match in shuffled_matches:\n for team in self.teams:\n # Process match into individual experiences\n experiences = mp.process_match(match, team)\n for pick_id, experience in enumerate(experiences):\n # Some experiences include NULL submissions (usually missing bans)\n # The learner isn't allowed to submit NULL picks so skip adding these\n # to the buffer.\n state,actual,_,_ = experience\n (cid,pos) = actual\n if cid is None:\n null_actions += 1\n continue\n # Store original experience\n self.replay.store([experience])\n self.step_count += 1\n\n # Give model feedback on current estimations\n if(self.step_count > self.observations):\n # Let the network predict the next action\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[state.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[state.get_valid_actions()]}\n q_vals = self.ddq_net.sess.run(self.ddq_net.online_ops[\"valid_outQ\"], feed_dict=feed_dict)\n sorted_actions = q_vals[0,:].argsort()[::-1]\n top_actions = sorted_actions[0:4]\n\n if(random.random() < self.epsilon):\n pred_act = random.sample(list(top_actions), 1)\n else:\n # Use model's top prediction\n pred_act = [sorted_actions[0]]\n\n for action in pred_act:\n (cid,pos) = state.format_action(action)\n if((cid,pos)!=actual):\n pred_state = deepcopy(state)\n pred_state.update(cid,pos)\n r = get_reward(pred_state, blank_match, (cid,pos), actual)\n new_experience = (state, (cid,pos), r, pred_state)\n\n self.replay.store([new_experience])\n learner_submitted_actions += 1\n\n if(self.epsilon > 0.1):\n # Reduce epsilon over time\n self.epsilon -= self.eps_decay_rate\n\n # Use minibatch sample to update online network\n if(self.step_count > self.pre_training_steps):\n self.train_step()\n\n if(self.step_count % self.target_update_frequency == 0):\n # After the online network has been updated, update target network\n _ = self.ddq_net.sess.run(self.ddq_net.target_ops[\"target_update\"])\n\n # Get training loss, training_acc, and val_acc to return\n loss, train_acc = self.validate_model(self.training_data)\n _, val_acc = self.validate_model(self.validation_data)\n return (loss, train_acc, val_acc)", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape,\n dtype=np.uint8)\n self._skip = skip", "def q_update(self):\n\n # exit if the experience buffer is not yet large enough\n if self.experience_buffer.size < self.batch_size:\n return\n \n # get the random batch\n states, action_indices, rewards, not_terminals, succ_states, succ_players, succ_legal_moves = self.experience_buffer.random_batch(self.batch_size)\n states = states.to(Globals.device)\n action_indices = action_indices.to(Globals.device)\n rewards = rewards.to(Globals.device)\n not_terminals = not_terminals.to(Globals.device)\n succ_states = succ_states.to(Globals.device)\n succ_players = succ_players.to(Globals.device)\n\n # prepare the training data\n q_values = self.target_network(succ_states)\n target = torch.empty(1, self.batch_size)\n for i in range(self.batch_size):\n if not_terminals[i] == 0:\n target[0, i] = rewards[i]\n continue\n\n if succ_players[i] == CONST.WHITE_MOVE:\n legal_q_values = q_values[0, 0:9][succ_legal_moves[i]]\n q_value, _ = legal_q_values.max(0)\n else:\n legal_q_values = q_values[0, 9:18][succ_legal_moves[i]]\n q_value, _ = legal_q_values.min(0)\n\n target[0, i] = rewards[i] + self.disc*not_terminals[i]*q_value\n\n # execute the training step of the network\n self.training_network.train_step(states, target, action_indices) # the eligibility trace is used as td target", "def reset_records(self):\n self.score_record = []\n self.score_window = deque(maxlen=100)", "def training_step(self, only_tensorboard=False):\n # if self.number_of_times_train_called % self.train_every_nth == 0:\n if len(self.experience) < self.minibatch_size:\n return\n if len(self.experience) \\\n < self.replay_start_size:\n # print 'returning'\n only_tensorboard = True\n # if len(self.experience)< self.minibatch_size:\n # return\n\n\n # sample experience.\n samples = random.sample(range(len(self.experience)), self.minibatch_size)\n samples = [self.experience[i] for i in samples]\n\n # bach states\n states = np.empty((len(samples), self.observation_size))\n newstates = np.empty((len(samples), self.observation_size))\n action_mask = np.zeros((len(samples), self.num_actions))\n\n newstates_mask = np.empty((len(samples),))\n rewards = np.empty((len(samples),))\n exp_sum_rewards = np.empty((len(samples),))\n\n for i, (state, action, reward, newstate, exp_sum_reward) in enumerate(samples):\n # Clip the reward to be in the range of -1 and 1 as suggested in the paper\n if self.clip_reward:\n reward = np.clip(reward, -1.0, 1.0)\n states[i] = state\n action_mask[i] = 0\n action_mask[i][action] = 1\n rewards[i] = reward\n exp_sum_rewards[i] = exp_sum_reward\n if newstate is not None:\n newstates[i] = newstate\n newstates_mask[i] = 1\n else:\n newstates[i] = 0\n newstates_mask[i] = 0\n\n monitor_interval = 1000\n calculate_summaries = self.iteration % monitor_interval == 0 and \\\n self.summary_writer is not None\n\n experiences = [0]\n if calculate_summaries:\n length_of_experience = len(self.experience)\n sample_size = 500\n\n if length_of_experience > sample_size:\n experiences = [self.experience[i][2] for i in range(-sample_size, -1)]\n\n\n feed_dict = {\n self.observation: states,\n self.next_observation: newstates,\n self.next_observation_mask: newstates_mask,\n self.action_mask: action_mask,\n self.rewards: rewards,\n self.calculated_future_rewards: exp_sum_rewards,\n self.prediction_error_collection_tf: self.collected_prediction_errors[-monitor_interval:],\n self.last_rewards: np.array(experiences)\n }\n\n if self.game_watcher is not None:\n game_watcher = self.game_watcher\n number_of_total_games = float(game_watcher.number_of_games)\n if number_of_total_games == 0:\n number_of_total_games = 1\n feed_dict[self.winning_games] = game_watcher.number_of_reached_goals/number_of_total_games\n feed_dict[self.got_lost] = game_watcher.number_of_lost_games/number_of_total_games\n feed_dict[self.stepped_outside] = game_watcher.number_of_outside_steps/number_of_total_games\n # print game_watcher.number_of_reached_goals/number_of_total_games\n # Number encodes whether game was lost or won ...\n number_of_games_monitor_interval = 100\n if len(game_watcher.collected_game_identity) >= number_of_games_monitor_interval:\n if len(game_watcher.collected_game_identity) % number_of_games_monitor_interval == 0:\n game_identities = game_watcher.collected_game_identity[-number_of_games_monitor_interval:]\n number_of_reached_goals =game_identities.count(1)\n number_of_lost_games = game_identities.count(3)\n number_of_outside_steps = game_identities.count(2)\n\n win_perc = number_of_reached_goals/float(number_of_games_monitor_interval)\n get_lost_perc = number_of_lost_games/float(number_of_games_monitor_interval)\n step_out_perc = number_of_outside_steps/float(number_of_games_monitor_interval)\n feed_dict[self.winning_games_last] = win_perc\n feed_dict[self.got_lost_last] = get_lost_perc\n feed_dict[self.stepped_outside_last] = step_out_perc\n self.game_watcher.add_memory_of_last_interval(win_perc, get_lost_perc, step_out_perc)\n summary_str = self.s.run(self.game_watch_summaries, feed_dict)\n for element in summary_str:\n self.summary_writer.add_summary(element, self.game_watcher.number_of_games)\n else:\n feed_dict[self.winning_games_last] = self.game_watcher.cur_winning_percentage\n feed_dict[self.got_lost_last] = self.game_watcher.cur_stepped_outside_percentage\n feed_dict[self.stepped_outside_last] = self.game_watcher.cur_getting_lost_percentage\n\n else:\n feed_dict[self.winning_games_last] = 0.\n feed_dict[self.got_lost_last] = 0.\n feed_dict[self.stepped_outside_last] = 0.\n\n\n\n if only_tensorboard:\n summary_str = self.s.run(self.summarize, feed_dict)\n # summary_str = self.s.run(self.game_watch_summaries[0], feed_dict)\n if calculate_summaries:\n self.summary_writer.add_summary(summary_str, self.iteration)\n else:\n cost, _, summary_str = self.s.run([\n self.prediction_error,\n self.train_op,\n self.summarize if calculate_summaries else self.no_op1,\n ], feed_dict)\n if not self.perfect_actions_known:\n if self.target_network_update_discrete:\n if self.iteration%int((1/(self.s.run(self.target_network_update_rate)))) == 0:\n self.s.run(self.target_network_update)\n else:\n self.s.run(self.target_network_update)\n\n if calculate_summaries:\n self.summary_writer.add_summary(summary_str, self.iteration)\n print 'cost', cost\n self.number_of_times_train_called += 1\n self.collected_prediction_errors.append(cost)\n\n\n self.iteration += 1", "def __init__(self, buffer_size: int, batch_size: int):\n self.buffer: list = list()\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.idx = 0", "def init_prey(n):\n \n prey = np.zeros(n, dtype='uint32, 2float64, uint8')\n prey.dtype.names = ('index', 'position', 'alive')\n prey['index'] = range(n)\n prey['position'] = np.random.rand(n, 2)\n prey['alive'] = np.ones(n)\n return prey", "def reset(self):\n\n self.rotation = 0\n self.iteration = 0\n self.predictions = []\n self.prediction = 0\n self.current_position = 0\n self.rotation_list = [0]\n self.prediction = 0\n self.initial_adjust = False", "def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means", "def __init__(self,\n state_space_size,\n action_space_size,\n target_update_freq=100, #1000, #cada n steps se actualiza la target network\n discount=0.99,\n batch_size=32,\n max_explore=1,\n min_explore=0.05,\n anneal_rate=(1/5000), #100000),\n replay_memory_size=100000,\n replay_start_size= 500): #500): #10000): #despues de n steps comienza el replay\n self.action_space_size = action_space_size\n\n self.online_network = Network(state_space_size, action_space_size)\n self.target_network = Network(state_space_size, action_space_size)\n\n self.update_target_network()\n\n # training parameters\n self.target_update_freq = target_update_freq\n self.discount = discount\n self.batch_size = batch_size\n\n # policy during learning\n self.max_explore = max_explore + (anneal_rate * replay_start_size)\n self.min_explore = min_explore\n self.anneal_rate = anneal_rate\n self.steps = 0\n\n # replay memory\n self.memory = Memory(replay_memory_size)\n self.replay_start_size = replay_start_size\n self.experience_replay = Memory(replay_memory_size)", "def __init__(self, buffer_size: int, n_step: int, gamma: float, demo: list = None):\n assert buffer_size > 0\n\n self.n_step_buffer: Deque = deque(maxlen=n_step)\n self.buffer_size = buffer_size\n self.buffer: list = list()\n self.n_step = n_step\n self.gamma = gamma\n self.demo_size = 0\n self.cursor = 0\n\n # if demo exists\n if demo:\n self.demo_size = len(demo)\n self.buffer.extend(demo)\n\n self.buffer.extend([None] * self.buffer_size)", "def reset(self):\n self.dynamic_predictions = {}\n self.position = 0\n self.references = []" ]
[ "0.7086755", "0.634814", "0.62348205", "0.61094946", "0.6062285", "0.601058", "0.59450614", "0.59385663", "0.59318626", "0.5931236", "0.5920841", "0.591392", "0.59087026", "0.5903313", "0.58007306", "0.57563967", "0.5704546", "0.56222826", "0.5611808", "0.56116015", "0.55923367", "0.5574717", "0.5571157", "0.5569781", "0.5561985", "0.55542725", "0.5531555", "0.55267376", "0.5523613", "0.5490623", "0.549047", "0.54888934", "0.5482198", "0.54786044", "0.5454529", "0.5451046", "0.543421", "0.542981", "0.5421077", "0.54192996", "0.5417941", "0.54122776", "0.5407545", "0.53920287", "0.53840417", "0.5367313", "0.5326736", "0.53178215", "0.5312215", "0.5292726", "0.5287888", "0.5264214", "0.5264214", "0.5259771", "0.5254575", "0.5247111", "0.5242366", "0.52402353", "0.5236755", "0.5236113", "0.523202", "0.52318716", "0.5223762", "0.52226084", "0.5222581", "0.521056", "0.52091366", "0.5205241", "0.52048767", "0.5197751", "0.5194411", "0.51906294", "0.51887625", "0.51802486", "0.5171881", "0.51595676", "0.5157524", "0.5153129", "0.513616", "0.5134112", "0.5127087", "0.51178837", "0.5108129", "0.51051056", "0.51037043", "0.5098505", "0.5098505", "0.5097858", "0.5087026", "0.50854164", "0.50784624", "0.5076028", "0.50749314", "0.50713956", "0.5069986", "0.5054426", "0.5051224", "0.5051203", "0.50477356", "0.50291204" ]
0.6525008
1
This property ensures that the annealing process is run every time that E is called. Anneals the epsilon rate down to a specified minimum to ensure there is always some noisiness to the policy actions. Returns as a property. Uses a modified TANH curve to roll off the values near min/max.
def e(self): ylow = self.e_min yhigh = self._e xlow = 0 xhigh = self.anneal_max steep_mult = 8 steepness = steep_mult / (xhigh - xlow) offset = (xhigh + xlow) / 2 midpoint = yhigh - ylow x = np.clip(self.avg_score, 0, xhigh) x = steepness * (x - offset) e = ylow + midpoint / (1 + np.exp(x)) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_epsilon_greedy_policy(estimator, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n# print(q_values)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def make_epsilon_greedy_policy(self, Q, epsilon, nA):\n\n def policy_fn(observation,p):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q=Q(observation,p)\n\n best_action = np.argmax(q)\n print(\"action called:\",self.env.action_labels[best_action])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn", "def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n print(f'q_values: {q_values}')\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def make_epsilon_greedy_policy(estimator, epsilon, nA):\r\n def policy_fn(observation):\r\n A = np.ones(nA, dtype=float) * epsilon / nA\r\n q_values = estimator.predict(observation)\r\n best_action = np.argmax(q_values)\r\n A[best_action] += (1.0 - epsilon)\r\n return A\r\n return policy_fn", "def current_epsilon(self):\n t = self.action_requests\n T = self.exploration_period\n if(t >= T):\n return self.epsilon_final\n\n epsilon0 = self.epsilon_initial\n epsilonT = self.epsilon_final\n\n return epsilon0 - (t * (epsilon0 - epsilonT)) / T", "def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def get_epsilongreedy_policy(epsilon):\n \n def epsilongreedy_policy(Qvalues_oa):\n \"\"\"Returns softmax action probabilites from Qvalues\"\"\"\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X\n \n return epsilongreedy_policy", "def treat(self):\r\n if self.noiseS > 0:\r\n self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))\r\n return self.alphasigma\r\n else:\r\n self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))\r\n return 1.0", "def epsilon(self):\n return self.__epsilon", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def get_initial_epsilon(self):\n return self.epsilon_percentile, True, self.max_rounds == 0", "def epsilon(self):\n return self._epsilon", "def training_policy(self, state):\n if self.epsilon > random.random():\n return random.randint(0, 1)\n return self.policy(state)", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n\n def policy_fn(observation):\n\n # get random number\n random_number = random.uniform(0, 1)\n\n # get actions with maximum value\n greedy_actions = np.argwhere(Q[observation] == np.amax(Q[observation])).squeeze()\n if not len(greedy_actions.shape):\n greedy_actions = [greedy_actions]\n action = random.choice(greedy_actions)\n\n # if number less than epsilon, get random other actions\n if random_number <= epsilon:\n all_actions = list(range(0, nA))\n if not len(greedy_actions) == nA:\n action = random.choice(all_actions)\n\n return int(action)\n\n return policy_fn", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n if random.random() < (1 - epsilon):\n return np.argmax(Q[observation])\n else:\n return random.choice(np.arange(nA))\n\n return policy_fn", "def update_epsilon(self):\n\t\tif self.epsilon > self.epsilon_min:\n\t\t\tself.epsilon *= self.epsilon_decay", "def anneal(self):\n # Initialize with the greedy solution.\n self.cur_solution, self.cur_fitness = self.initial_solution()\n\n print(\"Starting annealing.\")\n while self.T >= self.stopping_temperature and self.iteration < self.stopping_iter:\n candidate = list(self.cur_solution)\n l = random.randint(2, self.N - 1)\n i = random.randint(0, self.N - l)\n candidate[i : (i + l)] = reversed(candidate[i : (i + l)])\n self.accept(candidate)\n self.T *= self.alpha\n self.iteration += 1\n\n self.fitness_list.append(self.cur_fitness)\n\n print(\"Best fitness obtained: \", self.best_fitness)\n improvement = 100 * (self.fitness_list[0] - self.best_fitness) / (self.fitness_list[0])\n print(f\"Improvement over greedy heuristic: {improvement : .5f}%\")\n return self.best_fitness", "def set_temporal_adapted_epsilon(self):\n\t\t\n\t\t# Perfectly adapted activity level is based on the variables:\n\t\t# temporal_adaptation_mu_eps, temporal_adaptation_sigma_eps, \n\t\t# temporal_adaptation_mu_Ss0. These functions take the activity\n\t\t# level set by these variables at that signal intensity, to \n\t\t# adapt epsilon to the current Ss\n\t\tperfect_adapt_eps_base = sp.ones(self.Mm)*\\\n\t\t\t\tself.temporal_adaptation_mu_eps + random_matrix(self.Mm, \n\t\t\t\tparams=[0, self.temporal_adaptation_sigma_eps], \n\t\t\t\tseed=self.seed_eps)\n\t\tperfect_adapt_Ss = sp.zeros(self.Nn)\n\t\tperfect_adapt_Ss[self.Ss0 != 0] = self.temporal_adaptation_mu_Ss0\n\t\tperfect_adapt_Yy = receptor_activity(perfect_adapt_Ss, \n\t\t\t\t\t\t\t\tself.Kk1, self.Kk2, perfect_adapt_eps_base)\n\t\t\n\t\t# Make adaptation rate into a vector if it has not yet been set.\n\t\ttry:\n\t\t\tself.temporal_adaptation_rate_vector\n\t\texcept AttributeError:\n\t\t\tassert self.temporal_adaptation_rate_sigma == 0, \"Before \"\\\n\t\t\t\t\"setting new epsilon with set_temporal_adapted_epsilon, \"\\\n\t\t\t\t\"you must call set_ordered_temporal_adaptation_rate, since \"\\\n\t\t\t\t\"temporal_adaptation_rate_sigma is nonzero\"\n\t\t\tself.temporal_adaptation_rate_vector = sp.ones(self.Mm)*\\\n\t\t\t\tself.temporal_adaptation_rate\n\t\t\n\t\tif self.temporal_adaptation_type == 'imperfect':\n\t\t\td_eps_dt = self.temporal_adaptation_rate_vector*\\\n\t\t\t\t\t\t(self.Yy - perfect_adapt_Yy)\n\t\t\tdelta_t = self.signal_trace_Tt[1] - self.signal_trace_Tt[0]\n\t\t\tself.eps += delta_t*d_eps_dt \n\t\telif self.temporal_adaptation_type == 'perfect':\n\t\t\tself.eps = free_energy(self.Ss, self.Kk1, self.Kk2, \n\t\t\t\t\t\t\t\t\tperfect_adapt_Yy)", "def epsilon_delta(self):", "def epsilon_greedy_policy_improve(Q_value, nS, nA, epsilon):\n\n new_policy = epsilon * np.ones((nS, nA)) / nA # = epsilon / m, where m is the number of Actions, nA\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: IF TWO ACTIONS HAVE THE SAME MAXIMUM Q VALUE, THEY MUST BOTH BE EXECUTED EQUALLY LIKELY.\n # THIS IS IMPORTANT FOR EXPLORATION. This might prove useful:\n # https://stackoverflow.com/questions/17568612/how-to-make-numpy-argmax-return-all-occurrences-of-the-maximum\n \n # print(\"new_policy = {0}\".format(new_policy))\n \n for s_t in range (0, nS):\n # print(\"old_policy[{0}] = {1}\".format(s_t, new_policy[s_t]))\n # print(\"Q_value[{0}] = {1}\".format(s_t, Q_value[s_t]))\n Q_list = np.argwhere(Q_value[s_t] == np.amax(Q_value[s_t])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n # print(\"Q_list: \" + str(Q_list))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n # print(\"max_Q: \" + str(max_Q))\n \n # A_star = new_policy[s_t][max_Q]\n # print(\"A_star: \" + str(A_star))\n \n new_policy[s_t][max_Q] += 1 - epsilon # for the chosen maximal index of Q, set the polocy to epsilon/m + 1 - epsilon\n # print(\"new_policy[{0}] = {1}\".format(s_t, new_policy[s_t]))\n \n # for a_t in range (0, nA):\n # if a_t in Q_list:\n # new_policy[s_t][a_t] += (1 - epsilon) / len(Q_list)\n\n ############################\n # print(\"new_policy = {0}\".format(new_policy))\n return new_policy", "def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n probs = np.zeros(self._num_actions)\n if np.random.rand() < epsilon:\n action = np.random.choice(legal_actions)\n probs[legal_actions] = 1.0 / len(legal_actions)\n else:\n info_state = np.reshape(info_state, [1, -1])\n q_values = self._session.run(\n self._q_values, feed_dict={self._info_state_ph: info_state})[0]\n legal_q_values = q_values[legal_actions]\n action = legal_actions[np.argmax(legal_q_values)]\n probs[action] = 1.0\n return action, probs", "def epsilon_greedily_update_policy(self, current_Q, iterations_completed):\n iteration = iterations_completed+1\n # epsilon = min(1/np.log(iterations_completed+.0001),1)\n # epsilon = 1/iteration\n epsilon = 0.1\n def new_policy(state):\n heads = True if random.random() < epsilon else False # Flip our epsilon greedy coin\n if heads: # If heads comes up, choose random action\n return random.randint(0, self.nA-1)\n else: # If tails comes up, choose greedy option\n return np.argmax(current_Q[state]['Q(s,a)'])\n return new_policy", "def anneal_temp(self, T):\n new_T = self.anneal_rate * T\n return(new_T)", "def get_epsilon(self):\n step_size = float(self._eps_begin - self._eps_end) / self._total_steps\n self._epsilon = max(self._eps_end, self._epsilon - step_size)\n return self._epsilon", "def calc_epsilon(self, state_number, evaluation=False):\n if evaluation:\n return self.eps_evaluation\n elif state_number < self.replay_buffer_start_size:\n return self.eps_initial\n elif self.replay_buffer_start_size <= state_number < self.replay_buffer_start_size + self.eps_annealing_states:\n return self.slope * state_number + self.intercept\n elif state_number >= self.replay_buffer_start_size + self.eps_annealing_states:\n return self.slope_2 * state_number + self.intercept_2", "def rae(self) -> float:\n return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))", "def epsilon_greedy_policy(network, eps_end, eps_start, eps_decay, actions, device):\n def policy_fn(observation, steps_done):\n sample = np.random.random()\n eps_threshold = eps_end + (eps_start - eps_end) * math.exp(-1. * steps_done * eps_decay)\n if sample > eps_threshold:\n with torch.no_grad():\n if observation.dim() == 3:\n observation = observation.unsqueeze(0)\n elif observation.dim() < 3:\n NotImplementedError(\"Wrong input dim\")\n\n values = network.forward(observation.to(device))[0]\n best_action = torch.max(values, dim=0)[1]\n return best_action.cpu().item(), eps_threshold\n else:\n # return torch.tensor(np.random.randint(low=0, high=num_actions), dtype=torch.long), eps_threshold\n return random.choice(actions), eps_threshold\n return policy_fn", "def _get_epsilon(self, is_evaluation, power=1.0):\n if is_evaluation:\n return 0.0\n decay_steps = min(self._step_counter, self._epsilon_decay_duration)\n decayed_epsilon = (\n self._epsilon_end + (self._epsilon_start - self._epsilon_end) *\n (1 - decay_steps / self._epsilon_decay_duration) ** power)\n return decayed_epsilon", "def play_epsilon_greedy_policy(self, board):\n policy = np.random.choice(['random', 'q_agent'], 1, p=[self.epsilon, 1-self.epsilon])[0]\n if policy == 'random':\n move, _ = RandomAgent.play(board)\n else:\n move, q_value = self.play(board)\n self.after_move()\n return move, policy", "def update_epsilon(self):\n self.epsilon = self.epsilon * self.decay", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n def policyFunction(state):\n\n Action_probabilities = np.ones(num_actions,\n dtype = float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def _evaluate(self, state):\n leading_power_error = self.get_leading_power_error(state)\n if np.isfinite(leading_power_error):\n return -float(leading_power_error)\n else:\n return self._default_value", "def epsilon():\n return _EPSILON", "def testEpsK1Changes(self):\n with self.test_context() as session:\n initial_eps = 1e-3\n num_classes = 5\n rm = gpflow.likelihoods.RobustMax(num_classes, initial_eps)\n\n expected_eps_k1 = initial_eps / (num_classes - 1.)\n actual_eps_k1 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k1, actual_eps_k1)\n\n new_eps = 0.412\n rm.epsilon.assign(new_eps, session=session)\n expected_eps_k2 = new_eps / (num_classes - 1.)\n actual_eps_k2 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k2, actual_eps_k2)", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n\n def policyFunction(state):\n Action_probabilities = np.ones(num_actions,\n dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def epsilongreedy_policy(Qvalues_oa):\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X", "def annealing(self, precision=None, print_steps=True):\n if self.max_temp is None:\n self.get_max_temp()\n state = self.individuals[0]\n self._temp = self.max_temp\n generation = 0\n mins = []\n while self.evaluations < self.eval_limit:\n equilibrium = False\n while not equilibrium:\n solution = self.get_neighbour(state.solution)\n value = self.get_solutions_value(solution)\n fitness = self.get_fitness(value)\n if self.acceptance_probability(state.value, value, self._temp) > np.random.random():\n equilibrium = True\n if self.evaluations % 100 == 0:\n mins += [self.best_individual.value]\n if self.evaluations > self.eval_limit:\n break\n state.solution = solution\n state.value = value\n state.fitness = fitness\n self.update_temperature()\n if state.fitness > self.best_individual.fitness:\n self.best_individual = copy.deepcopy(state)\n if print_steps:\n print(\"Generation \", generation, \"; Evaluations: \", self.evaluations,\n \"; Temperature: \", self._temp, \"; \", state)\n if precision is not None and self.solution_precision(precision):\n break\n generation += 1\n return self.best_individual, mins", "def update_parameter(self):\n\n if self.testing: # 1. No random choice when testing\n self.epsilon = 0\n else: # 2. Update parameters when learning\n if self.epsilon > 0.:\n self.epsilon -= 0.01\n\n return self.epsilon", "def e(self):\n if self._e is None:\n # self._e = self.distributions.uniform(0.3,0.33)\n # return self._e\n # max is set by q but also limited by users choice of e_max.\n res_a = 29.9*((self.j[0]/self.k[0])**(2/3))\n q = self.distributions.truncated_normal(self.q_c, self.q_w, res_a*(1-0.8), res_a*(1-0.001))\n self._e = 1 - q/res_a\n return self._e", "def _define_epsilon(n,T,a=1):\n\n return np.sqrt(np.log(n)/T)*a", "def _evaluate(self, state):\n input_values_rmse = self.get_input_values_rmse(state)\n if not self._include_leading_powers:\n if np.isfinite(input_values_rmse):\n return -input_values_rmse\n else:\n return self._default_value\n # NOTE(leeley): If computing the leading power fails\n # (timeout or sympy ValueError) or functions in symbolic_properties return\n # nan (for example, 1 / (x - x)).\n leading_power_error = self.get_leading_power_error(state)\n\n if self._hard_penalty_default_value is None:\n # Soft penalty.\n if np.isfinite(leading_power_error):\n return -input_values_rmse - leading_power_error\n else:\n return self._default_value\n else:\n # Hard penalty.\n if (np.isfinite(leading_power_error)\n and np.isclose(leading_power_error, 0)):\n return -input_values_rmse\n else:\n return self._hard_penalty_default_value", "def __act__(\n self,\n t: int\n ) -> Action:\n\n if self.random_state.random_sample() < self.epsilon:\n a = self.random_state.choice(self.most_recent_state.AA)\n self.epsilon *= (1 - self.epsilon_reduction_rate)\n else:\n a = self.greedy_action\n\n return a", "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def anneal(solution):\n old_cost = cost(solution)\n T = 1.0\n T_min = 0.00001\n ALPHA = 0.9\n\n while T > T_min:\n i = 1\n while i <= 100:\n new_solution = neighbor(solution)\n new_cost = cost(new_solution)\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T * ALPHA\n\n return solution, old_cost", "def eps(self):\n return self.eps_mask*self.eps_scheduler.value", "def choose_epsilon_greedy(self, state: Tuple[int, ...], valid_actions: Tuple[int, ...]) -> int:\n if random.random() < self.__epsilon:\n return self.choose_uniform(valid_actions)\n return self.choose_greedy(state, valid_actions)", "def mean_approx(self, name='mean_approx'):\n with self._name_and_control_scope(name):\n loc = tf.convert_to_tensor(self.loc)\n scale = tf.convert_to_tensor(self.scale)\n monahan_stefanski_answer = approx_expected_sigmoid(\n loc, scale,\n MONAHAN_MIX_PROB[self.num_probit_terms_approx],\n MONAHAN_INVERSE_SCALE[self.num_probit_terms_approx])\n if self.gauss_hermite_scale_limit is None:\n return monahan_stefanski_answer\n else:\n gauss_hermite_answer = logit_normal_mean_gh(\n loc, scale, self.gauss_hermite_degree)\n return tf.where(scale < self.gauss_hermite_scale_limit,\n gauss_hermite_answer, monahan_stefanski_answer)", "def aerosols(self):\n return 1.", "def eps_greedy_policy(q_values, eps, forbidden_actions):\r\n\r\n q_values[forbidden_actions] = np.NINF\r\n indices = torch.nonzero(q_values == q_values.max())\r\n random_index = random.randint(0, indices.shape[1]-1)\r\n best_action_index = indices[random_index]\r\n l = len(q_values)\r\n n_forbidden_actions = np.count_nonzero(forbidden_actions)\r\n p = eps / (l-n_forbidden_actions)\r\n\r\n policy = np.full([l], p)\r\n policy[forbidden_actions] = 0\r\n policy[best_action_index] += 1 - eps\r\n\r\n return policy", "def set_epsilon(self,epsilon):\r\n\t\tself.epsilon = epsilon", "def _sigma_ee_nonrel(self,gam,eps):\n s0 = 4 * r0**2 * alpha / (15 * eps)\n x = 4 * eps / (gam**2 - 1)\n sigma_nonrel = s0 * self._F(x,gam)\n sigma_nonrel[np.where(eps >= 0.25*(gam**2 - 1.))] = 0.0\n sigma_nonrel[np.where(gam*np.ones_like(eps) < 1.0)] = 0.0\n return sigma_nonrel / mec2_unit", "def policies(self, QTable, epsilon, state, next_states, action_to_do): # Inspiration from https://www.geeksforgeeks.org/q-learning-in-python/?fbclid=IwAR1UXR88IuJBhhTakjxNq_gcf3nCmJB0puuoA46J8mZnEan_qx9hhoFzhK8\r\n num_actions = 5 # 5 actions-value, [moved_out, into_goal, send_opp_home, send_self_home, move_token] \r\n def epsilonGreedyPolicy(): \r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n valid_act_len = len(np.where(valid_actions==True)[0])\r\n\r\n Action_probabilities = np.ones(num_actions, dtype = float) * epsilon / valid_act_len # divides probability based on number of valid actions and epsilon (each 0.025 if 4 actions) \r\n Action_probabilities = np.multiply(Action_probabilities, valid_actions)\r\n\r\n # If same values in QTable choose random valid action \r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n Action_probabilities[best_action] += (1.0 - epsilon) # Assigns rest probability to best action so probability sums to 1\r\n\r\n return Action_probabilities \r\n\r\n def greedyPolicy():\r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n\r\n Action_probabilities = np.zeros(num_actions, dtype = float)\r\n\r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n\r\n Action_probabilities[best_action] += 1.0\r\n return Action_probabilities\r\n\r\n\r\n if(self.__chosenPolicy == \"epsilon greedy\"):\r\n return epsilonGreedyPolicy \r\n if(self.__chosenPolicy == \"greedy\"):\r\n return greedyPolicy", "def _epsilon(self, step):\n if step < 0:\n return self._start\n elif step > self._steps:\n return self._stop\n else:\n return self._step_size * step + self._start", "def get_epsilon_action(epsilon, env, mean_reward_per_bandit):\n explore = np.random.uniform() < epsilon\n\n if explore:\n return env.action_space.sample()\n else:\n return np.argmax(mean_reward_per_bandit)", "def MccEpsilon(self):\n if getattr(self, '_MccEpsilon', None) is None:\n self._MccEpsilon = Utils.sdiag(self.epsilon)\n return self._MccEpsilon", "def evaluate(self, _t):\n\n temp = self.init_temp*(self.decay**_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp", "def __init__(self, epsilon=1e-14):\n self.epsilon = epsilon", "def lapse(self):\n pass", "def decay_epsilon(self, epsilon, MIN_EPSILON,\r\n EPSILON_DECAY: float) -> float:\r\n if epsilon > MIN_EPSILON:\r\n epsilon *= EPSILON_DECAY\r\n epsilon = max(MIN_EPSILON, epsilon)\r\n return epsilon", "def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)", "def multiplicative_epsilon(front, **kargs):\n wobj = numpy.array([ind.fitness.wvalues for ind in front]) * -1\n\n def contribution(i):\n mwobj = numpy.ma.array(wobj)\n mwobj[i] = numpy.ma.masked\n return numpy.min(numpy.max(wobj[i] / mwobj, axis=1))\n\n contrib_values = list(map(contribution, list(range(len(front)))))\n\n # Select the minimum contribution value\n return numpy.argmin(contrib_values)", "def calc_alpha(epsilon): \n return float(0.5 * np.log((1-epsilon)/epsilon))", "def additive_epsilon(front, **kargs):\n wobj = numpy.array([ind.fitness.wvalues for ind in front]) * -1\n\n def contribution(i):\n mwobj = numpy.ma.array(wobj)\n mwobj[i] = numpy.ma.masked\n return numpy.min(numpy.max(wobj[i] - mwobj, axis=1))\n\n contrib_values = list(map(contribution, list(range(len(front)))))\n\n # Select the minimum contribution value\n return numpy.argmin(contrib_values)", "def epsilon_greedy_probs(self, nA, Q_s, i_count, eps=None):\r\n epsilon = 1.0 / i_count\r\n if eps is not None:\r\n epsilon = eps\r\n \r\n policy_s = np.ones(nA) * epsilon / nA\r\n policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / nA)\r\n return policy_s", "def evaluate(self, _t):\n\n temp = self.init_temp*np.exp(-1.0*self.exp_const*_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp", "def epsilon_greedy(Q, epsilon, state):\n random_number = random.random()\n if (random_number < epsilon) and (state not in critical_states):\n return env.action_space.sample()\n\n else:\n return np.argmax(Q[state])", "def compute_optimal_policy(self):\n\n self.theta_history.append(self.theta)\n\n since = time()\n for it in range(self.n_itr):\n print(\"lr: {} | Iteration N: {} \\r\".format(self.lr, it), end=\"\")\n\n self.policy = GaussianPolicy(self.theta, self.sigma)\n\n # Simulate N trajectories\n paths = collect_episodes(\n self.sim, policy=self.policy, horizon=self.T, n_episodes=self.n_episodes)\n\n avg_return = self._compute_performance(paths=paths)\n self.avg_returns.append(avg_return)\n\n # Gradient update\n self.theta += self.update_rule(self.policy.grad_J(\n paths, self.discounts, n_ep=self.n_episodes, T=self.T), lr=self.lr)\n\n # History update\n self.theta_history.append(self.theta)\n\n # print(\"\\nTook {}s\".format(round(time() - since, 2)))\n print(\"lr: {} | Iteration N: {} | Took: {}s\".format(self.lr, self.n_itr, round(time() - since, 2)))", "def get_epsilon(self, round, abc_history):\n if round > len(abc_history):\n t = np.percentile(abc_history[-1]['distances'], self.epsilon_percentile)\n else:\n t = np.percentile(abc_history[round - 1]['distances'], self.epsilon_percentile)\n return t, False, self.max_rounds and round + 1 == self.max_rounds", "def MAE_rel(self):\n try:\n return(self.MAE / self.price_open)\n except:\n return", "def __bestLambda(self):\n\t\t\n\t\t# Determine starting value for brent-method (to avoid local minimum).\n\t\tself.startValue = self.__findStartValue()\n\t\t\t\n\t\t# Check if there exists a minimum within the range of self.lamStart. \n\t\t# Otherwise, use fmin because we cannot provide an interval. \n\t\tif (self.startIdx != 0 and self.startIdx != self.nStartValues-1):\n\t\t\ts = scipy.optimize.brent(self.__minBayesianEvidence, brack=(self.logLamStart[self.startIdx-1], self.logLamStart[self.startIdx], self.logLamStart[self.startIdx+1]))\n\t\telse:\n\t\t\ts = scipy.optimize.fmin(self.__minBayesianEvidence, self.startValue, disp=False)[0]\n\t\t\n\t\treturn 10**s", "def _semi_relativistic_loss(eps):\n P = Pcoef * np.imag(1./eps) / np.real(Theta**2.+ThetaE**2.)\n return P", "def estimate(self, reps):\n return self.onerm / MaxCalc.coefficients[reps - 1]", "def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)", "def future_deceivedup(self, a): \n nfav, succfav = self.control[a.name]\n #f_n = n+1;\n # f_worldround = self.world.round+1\n f_successRate = float(a.success +1) / float(self.world.round+1)\n if hardrule:\n return (nfav+1 > 5) and ((self.world.round - nfav) > 5) and \\\n float(a.success+1-succfav)/(self.world.round+1 - nfav) > \\\n (float(succfav)/nfav) + epsilonD\n else:\n return nfav > 5 and (f_successRate > (float(succfav)/nfav) + epsilonD \\\n or f_successRate < epsilonD)", "def maape(self) -> float:\n return float(np.mean(np.arctan(np.abs((self.true - self.predicted) / (self.true + EPS)))))", "def myTwistFunctionAirliner(Epsilon):\n return -(6.53*Epsilon*Epsilon - 14.1*Epsilon + 4.24)", "def linear_annealing(self, n, total, p_initial, p_final):\n if n == total:\n print \"exploration period over\"\n if n >= total:\n return p_final\n else:\n return p_initial - (n * (p_initial - p_final)) / (total)", "def hard_monotonic_attention(e_ma, aw_prev, eps_wait, p_threshold=0.5):\n bs, H_ma, qlen, klen = e_ma.size()\n assert qlen == 1\n assert e_ma.size(-1) == aw_prev.size(-1)\n aw_prev = aw_prev[:, :, :, -klen:]\n _p_choose = torch.sigmoid(e_ma[:, :, 0:1])\n p_choose = (_p_choose >= p_threshold).float()\n p_choose *= torch.cumsum(aw_prev[:, :, 0:1, -e_ma.size(3):], dim=-1)\n alpha = p_choose * exclusive_cumprod(1 - p_choose)\n if eps_wait > 0:\n for b in range(bs):\n if alpha[b].sum() == 0:\n continue\n leftmost = alpha[b, :, -1].nonzero()[:, -1].min().item()\n rightmost = alpha[b, :, -1].nonzero()[:, -1].max().item()\n for h in range(H_ma):\n if alpha[b, h, -1].sum().item() == 0:\n alpha[b, h, -1, min(rightmost, leftmost + eps_wait)] = 1\n continue\n if alpha[b, h, -1].nonzero()[:, -1].min().item() >= leftmost + eps_wait:\n alpha[b, h, -1, :] = 0\n alpha[b, h, -1, leftmost + eps_wait] = 1\n return alpha, _p_choose", "def empirical_erm(self):\n return lambda samples: np.mean(samples) - 0.5 * self.alpha * np.var(samples)", "def evaluate(self, _t):\n\n temp = self.init_temp - (self.decay*_t)\n\n if temp < self.min_temp:\n temp = self.min_temp\n\n return temp", "def get_epsilon(step: int):\n return (epsilon_0 - epsilon) * math.exp(-step) + epsilon", "def simulate_power(self):\n if self.p_treatment - self.p_control < 0:\n thresh = 1 - self.alpha\n else:\n thresh = self.alpha\n\n try:\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n except:\n self.norm_distribution()\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n\n power = (1 - beta) if self.p_treatment > self.p_control else beta\n self.power = power\n\n return power", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def gmae(self) -> float:\n return _geometric_mean(np.abs(self._error()))", "def delayE(self):\n sinE = np.sin(self.E())\n return self.GAMMA", "def anneal_tau(epoch, max_epochs):\n return max(0.5, np.exp(-1 / max_epochs * epoch))", "def estimate(values, target):\n\n # next time\n # diff(values)\n\n\n return 1.", "def softmax_policy(Qvalues_oa):\n betaQoa = beta * Qvalues_oa\n betaQoa_ = betaQoa - betaQoa.mean(-1, keepdims=True)\n expQoa = np.exp(betaQoa_)\n assert not np.any(np.isinf(expQoa)), \"behavior policy contains infs\"\n return expQoa / expQoa.sum(axis=-1, keepdims=True)", "def get_a(self):\n return self.a_min * tf.exp(self.a_ * self.log_a_range)", "def evaporatePheromones(self):\n for u in self.graph.nodes:\n for v in self.graph.neighbors(u):\n if self.graph_data[u]['pheromones'][v] > 0 :\n self.graph_data[u]['pheromones'][v] -= self.parameters['evaporation_rate'] * self.graph_data[u]['pheromones'][v]\n self.graph_data[v]['pheromones'][u] -= self.parameters['evaporation_rate'] * self.graph_data[v]['pheromones'][u]", "def mch_approximation(sample, dlamda, calc_e=calc_e):\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj", "def simul_anneal(self, X, y, temperature, steps, cooldown='exponential', mean=0.0, stddev=1.0, quit=1e-4):\n cooldown = self.cooldown_method[cooldown]\n for i in range(steps):\n temperature = cooldown(temperature, i)\n before_energy = self._energy(X, y)\n move = self._to_neighbor(mean, stddev)\n after_energy = self._energy(X, y)\n dE = after_energy - before_energy\n if 0 < dE < quit:\n return\n if dE < 0.0 or np.exp(-dE/temperature) > np.random.rand():\n # accept the new state\n pass\n else:\n self.weight -= move", "def policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-8):\n value_function = np.zeros(nS)\n ############################\n # YOUR IMPLEMENTATION HERE #\n def next_state_reward(P,state,action,gamma,value_function):\n sum_reward=0\n for p,nextS,r,boolean_v in P[state][action]:\n sum_reward+=p*( r + gamma* value_function[nextS])\n #print(sum_reward) \n return sum_reward\n\n while True:\n delta=0;\n for state in range(nS):\n new_value=0;\n for action in range(nA):\n sum_reward=next_state_reward(P,state,action,gamma,value_function)\n new_value+=policy[state][action]*sum_reward\n delta= max(delta, abs(new_value-value_function[state]))\n value_function[state] = new_value\n #print(value_function)\n if(delta < tol):\n break\n\n ############################\n return value_function", "def makeEpsilonTransition(self, currentState):\n nextState = self.makeTransition(currentState, \"$\", True)\n #if the epsilon transition cannot be made\n if nextState is None:\n #return the last state automaton was found in before the transition\n return currentState\n #return the current state if it is an acceptable state\n if nextState in self.acceptableStates:\n return nextState\n #otherwise try to make a new epsilon transition recursively\n return self.makeEpsilonTransition(nextState)", "def uncertainty_ee(self,e1,e2):\n # reco\n unc = (self._eleRecoWeight[(e1.pt(),e1.eta())][1]/self._eleRecoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleRecoWeight[(e2.pt(),e2.eta())][1]/self._eleRecoWeight[(e2.pt(),e2.eta())][0])**2\n # id-isolation\n unc += (self._eleIdIsoWeight[(e1.pt(),e1.eta())][1]/self._eleIdIsoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleIdIsoWeight[(e2.pt(),e2.eta())][1]/self._eleIdIsoWeight[(e2.pt(),e2.eta())][0])**2\n # trigger (approximate)\n unc += (abs(self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n unc += ((self._ele8TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n #outcome\n return sqrt(unc)", "def test_prop_alpha(self):\n # reproducible arbitrariness\n np.random.seed(5001)\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n self.rule.alpha = 1.0\n self.rule.beta = 0.0\n\n tmax = 5*self.dt\n factor = 1.3\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(tmax)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.rule.alpha *= factor\n sim.run(tmax)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, factor*change1))", "def test_prop_alpha(self):\n # reproducible arbitrariness\n np.random.seed(5001)\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n self.rule.alpha = 1.0\n self.rule.beta = 0.0\n\n tmax = 5*self.dt\n factor = 1.3\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(tmax)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.rule.alpha *= factor\n sim.run(tmax)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, factor*change1))", "def getEta(self):\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n return self.__eta", "def __call__(self, w: tf.Tensor) -> tf.Tensor:\n return tf.maximum(w, self.epsilon)", "def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):\n phi = 0.0\n\n ## Approach A - NREL Approach\n if approach_call == \"A\":\n\n phi = float(Q_load_W) / float(Q_design_W)\n eta_max = 0.425 # from energy.gov\n\n if phi >= phi_threshold: # from NREL-Shape\n eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)\n\n if phi < phi_threshold:\n if phi <= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))\n\n if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3.0 + \\\n eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))\n\n if phi > 0.5 * phi_threshold and phi < phi_threshold:\n eta_el = eta_max * (2 / 3.0 + 0.25) + \\\n 1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))\n\n eta_therm_max = 0.45 # constant, after energy.gov\n\n if phi < phi_threshold:\n eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)\n\n else:\n eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))\n\n ## Approach B - Empiric Approach\n if approach_call == \"B\":\n\n if Q_design_W > 0:\n phi = float(Q_load_W) / float(Q_design_W)\n\n else:\n phi = 0\n\n eta_el_max = 0.39\n eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV\n eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4\n eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2\n\n eta_el = eta_el_max * eta_el_score\n eta_therm = eta_therm_max * eta_therm_score\n\n if phi < 0.2:\n eta_el = 0\n\n return eta_el, eta_therm" ]
[ "0.660565", "0.6559329", "0.6557568", "0.64600974", "0.64180017", "0.6403347", "0.6382049", "0.6335735", "0.6307492", "0.62434304", "0.62413085", "0.62142545", "0.6212238", "0.61981845", "0.61455894", "0.61093074", "0.60480744", "0.6001524", "0.5995188", "0.59935886", "0.5979415", "0.59665424", "0.59647036", "0.5961414", "0.5957847", "0.59133977", "0.58786035", "0.5872667", "0.5861227", "0.5856209", "0.58559847", "0.58409375", "0.58277094", "0.5820084", "0.58052254", "0.58031195", "0.57722014", "0.5768315", "0.5750438", "0.57418007", "0.56807363", "0.5680107", "0.56627464", "0.5649093", "0.5614923", "0.5604144", "0.5598729", "0.55923414", "0.5585133", "0.5581063", "0.55747294", "0.5572592", "0.5561909", "0.55580026", "0.55301994", "0.55135846", "0.5501189", "0.5497779", "0.54919875", "0.54758745", "0.54696494", "0.546458", "0.5458966", "0.54580003", "0.54550755", "0.54521596", "0.54507625", "0.5447532", "0.54453564", "0.5439488", "0.54371125", "0.5422894", "0.54194313", "0.540677", "0.5404876", "0.5395737", "0.53934574", "0.5385412", "0.53837305", "0.53827363", "0.5373759", "0.5369329", "0.5367274", "0.5366395", "0.5364782", "0.5363222", "0.53627115", "0.5357512", "0.53558135", "0.5355781", "0.5353075", "0.53309554", "0.5324863", "0.5324645", "0.5323896", "0.5314488", "0.5314488", "0.5307299", "0.5306137", "0.5305201" ]
0.6084767
16
Handle any cleanup or steps to begin a new episode of training.
def new_episode(self, scores): # Keep track of an average score for use with annealing epsilon, # TODO: this currently lives in new_episode() because we only want to # update epsilon each episode, not each timestep, currently. This should # be further investigate about moving this into the epsilon property # itself instead of here avg_across = np.clip(len(scores), 1, 50) self.avg_score = np.array(scores[-avg_across:]).mean() self.memory.init_n_step() self.episode += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _training_after_hook(self):\n pass", "def _end_episode(self):\n self.turn_cnt = 0\n self.episode_cnt += 1\n self.contexts = None\n self.seed_utterances = None\n self.reset_agents()", "def finalize(self) :\n if not self.nTrain:\n self.nTrain = max(self.retrain_interval * 2, self.season * 3)\n if self.max_verbosity > 0:\n print \"Automatically set nTrain to\", self.nTrain\n if self.batch_size is None:\n #No batch learning\n self.batch_size = self.nTrain\n if self.nTrain % self.batch_size != 0:\n if self.max_verbosity > 0:\n print \"Adding\", self.batch_size - (self.nTrain % self.batch_size), \"to nTrain\", self.nTrain\n self.nTrain += self.batch_size - (self.nTrain % self.batch_size)\n self.numLags = 0.25 * self.nTrain #Don't immediately retrain\n\n #The first time at which we can actually predict: need enough headroom for both MASE calculation\n #and filling the lookback window\n self.front_buffer = max(self.season - self.predictionStep, self.lookback)\n if self.ignore_for_error: #offset for values not predicted\n self.ignore_for_error-= (self.front_buffer + self.predictionStep - 1)", "def final(self, state):\n\n deltaReward = state.getScore() - self.pre_state.getScore()\n self.observeTransition(self.pre_state, self.action, state, deltaReward, True)\n self.stopEpisode()\n\n # Make sure we have this var\n if not 'episodeStartTime' in self.__dict__:\n self.episodeStartTime = time.time()\n if not 'lastWindowAccumRewards' in self.__dict__:\n self.lastWindowAccumRewards = 0.0\n self.lastWindowAccumRewards += state.getScore()\n\n NUM_EPS_UPDATE = 10000\n if self.episodesSoFar % NUM_EPS_UPDATE == 0:\n print('episode: ', self.episodesSoFar)\n print('Saving model...')\n self.model.save(MODEL_PATH)\n print('Reinforcement Learning Status:')\n windowAvg = self.lastWindowAccumRewards / float(NUM_EPS_UPDATE)\n if self.episodesSoFar <= self.numTraining:\n trainAvg = self.accumTrainRewards / float(self.episodesSoFar)\n print('\\tCompleted %d out of %d training episodes' % (self.episodesSoFar, self.numTraining))\n print('\\tAverage Rewards over all training: %.2f' % (trainAvg))\n\n else:\n testAvg = float(self.accumTestRewards) / (self.episodesSoFar - self.numTraining)\n print('\\tCompleted %d test episodes' % (self.episodesSoFar - self.numTraining))\n\n print('\\tAverage Rewards over testing: %.2f' % testAvg)\n\n print('\\tAverage Rewards for last %d episodes: %.2f' % (NUM_EPS_UPDATE, windowAvg))\n\n print('\\tEpisode took %.2f seconds' % (time.time() - self.episodeStartTime))\n\n self.lastWindowAccumRewards = 0.0\n self.episodeStartTime = time.time()\n\n if self.episodesSoFar == self.numTraining:\n msg = 'Training Done (turning off epsilon and alpha)'\n print('%s\\n%s' % (msg, '-' * len(msg)))", "def on_episode_begin(self, episode, logs):\n self.episode_start[episode] = timeit.default_timer()\n self.observations[episode] = []\n self.rewards[episode] = []\n self.actions[episode] = []\n self.metrics[episode] = []", "def on_episode_begin(self, episode, logs):\n self.episode_start[episode] = timeit.default_timer()\n self.observations[episode] = []\n self.rewards[episode] = []\n self.actions[episode] = []\n self.metrics[episode] = []", "def on_train_end(self):", "def training_end(self):\n pass", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def end_training(self):\n self.training = False", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 1.0 # no exploration\n self.lr = 0.0 # no learning", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 0.0 # no exploration\n self.alpha = 0.0 # no learning", "def on_train_end(self):\n self.epoch_bar.close()\n self.epoch_tqdm.close()\n\n for step_tqdm, step_bar in zip(self.step_tqdms, self.step_bars):\n step_bar.close()\n step_tqdm.close()\n\n self.step_tqdms = []\n self.step_bars = []", "def on_train_begin(self, logs):\n print(f\"Testing for {self.params['nb_episodes']} episodes ...\")", "def start_episode(self):\n self.last_sensation = self.env()\n self.next_action = self.agent(self.last_sensation)", "def _epoch_before_hook(self):\n self._train_steps_this_epoch = 0", "def _training_before_hook(self):\n pass", "def startEpisode(self):\n self.lastState = None\n self.lastAction = None\n self.episodeRewards = 0.0", "def on_train_end(self, logs=None):\n self.epoch_iter = 0", "def step(self, epoch):\n\n self.train(epoch)\n self.test(epoch)", "def train_loop_post(self, current_step):\r\n pass", "def OnEnterEpisode(self):\n pass", "def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)", "def end_episode(self):\n raise UnityTrainerException(\"The end_episode method was not implemented.\")", "def _post_training(self):\n self._write_state_key()", "def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)", "def end_episode(self):\n self.training_buffer.reset_all()\n for agent_id in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n for agent_id in self.episode_steps:\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n for agent_id in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0", "def episode_terminated(self):\n # Increase the number of episodes\n self.episode_count += 1\n self.representation.episode_terminated()\n # Set eligibility Traces to zero if it is end of the episode\n if self.eligibility_trace is not None:\n self.eligibility_trace = np.zeros_like(self.eligibility_trace)", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def on_episode_start(self, agent, **kwargs):\n self.episode_start[agent.brain.episode] = timeit.default_timer()\n self.observations[agent.brain.episode] = []\n self.rewards[agent.brain.episode] = []\n self.actions[agent.brain.episode] = []\n self.metrics[agent.brain.episode] = {}", "def final(self, state):\n deltaReward = state.getScore() - self.lastState.getScore()\n self.observeTransition(self.lastState, self.lastAction, state, deltaReward)\n self.stopEpisode()\n\n # Make sure we have this var\n if not 'episodeStartTime' in self.__dict__:\n self.episodeStartTime = time.time()\n if not 'lastWindowAccumRewards' in self.__dict__:\n self.lastWindowAccumRewards = 0.0\n self.lastWindowAccumRewards += state.getScore()\n\n NUM_EPS_UPDATE = 100\n if self.episodesSoFar % NUM_EPS_UPDATE == 0:\n print 'Reinforcement Learning Status:'\n windowAvg = self.lastWindowAccumRewards / float(NUM_EPS_UPDATE)\n if self.episodesSoFar <= self.numTraining:\n trainAvg = self.accumTrainRewards / float(self.episodesSoFar)\n print '\\tCompleted %d out of %d training episodes' % (\n self.episodesSoFar,self.numTraining)\n print '\\tAverage Rewards over all training: %.2f' % (\n trainAvg)\n else:\n testAvg = float(self.accumTestRewards) / (self.episodesSoFar - self.numTraining)\n print '\\tCompleted %d test episodes' % (self.episodesSoFar - self.numTraining)\n print '\\tAverage Rewards over testing: %.2f' % testAvg\n print '\\tAverage Rewards for last %d episodes: %.2f' % (\n NUM_EPS_UPDATE,windowAvg)\n print '\\tEpisode took %.2f seconds' % (time.time() - self.episodeStartTime)\n self.lastWindowAccumRewards = 0.0\n self.episodeStartTime = time.time()\n\n if self.episodesSoFar == self.numTraining:\n msg = 'Training Done (turning off epsilon and alpha)'\n print '%s\\n%s' % (msg,'-' * len(msg))", "def onTrainStepTaken(self, agent):\n pass", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 0.0 # no exploration\n self.alpha = 0.0 # no learning\n\n print(\"Agent Stop Episode\")\n print(self.episodeRewards)\n for feature, weight in self.weights.iteritems():\n print(\"\\t\" + str(feature) + \" - \" + str(weight))\n self.episodeRewardsList.append(self.episodeRewards)", "def before_train(self, runner) -> None:\n if runner._resume:\n for action_epoch, recipe in self.schedule.items():\n if action_epoch >= runner.epoch + 1:\n break\n self._do_switch(runner, recipe,\n f' (resume recipe of epoch {action_epoch})')", "def train_step(self):\n pass", "def end_episode(self):\n self.training_buffer.reset_local_buffers()\n for agent_id in self.episode_steps:\n self.episode_steps[agent_id] = 0\n for rewards in self.collected_rewards.values():\n for agent_id in rewards:\n rewards[agent_id] = 0", "def episode_step(self):\n self.nsteps += 1", "def _post_training_cleanup(self):\n tf.reset_default_graph()\n self.sess.close()\n os.chdir(\"../../\")", "def finish_learning(self):\n pass", "def startEpisode(self):\n self.lastState = None\n self.lastAction = None\n self.episodeRewards = 0.0\n\n print(\"Agent Start Episode #\" + str(self.episodesSoFar+1))", "def OnEpisodeStart(self):\n pass", "def _end_training(self):\n # Reset this variable as it is reused during evaluation phase\n self.is_filtered = False\n self.eval_config = {}\n \n #close the tf session\n self.sess_train.close()\n \n #set is_fitted to true to indicate that the model fitting is completed\n self.is_fitted = True", "def step(self, action):\n # Action indicates the position of a datapoint in self.indeces_unknown \n # that we want to sample in unknown_data\n # The index in train_data should be retrieved \n selection_absolute = self.indeces_unknown[action]\n # Label a datapoint: add its index to known samples and removes from unknown\n self.indeces_known = np.concatenate(([self.indeces_known, np.array([selection_absolute])]))\n self.indeces_unknown = np.delete(self.indeces_unknown, action) \n # Train a model with new labeled data\n known_data = self.dataset.train_data[self.indeces_known,:]\n known_labels = self.dataset.train_labels[self.indeces_known]\n known_labels = np.ravel(known_labels)\n self.model.fit(known_data, known_labels)\n # Get a new state \n classifier_state, next_action_state = self._get_state() \n # Update the number of available actions\n self.n_actions = np.size(self.indeces_unknown)\n # Compute the quality of the current classifier\n test_prediction = self.model.predict(self.dataset.test_data)\n new_score = self.quality_method(self.dataset.test_labels, test_prediction)\n self.episode_qualities.append(new_score)\n # Compute the reward\n reward = self._compute_reward()\n # Check if this episode terminated\n done = self._compute_is_terminal() \n return classifier_state, next_action_state, reward, done", "def on_train_end(self, logs=None):", "def on_train_end(self, logs=None):", "def do(self):\n super().do()\n logger.info(\"TrainPipeStep started...\")\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.num_models = len(records)\n self.num_epochs = self.num_models * TrainerConfig.epochs\n self.update_status(Status.running)\n self.master = create_master()\n self._train_multi_models(records)\n self.master.join()\n ReportServer().output_step_all_records(step_name=self.task.step_name)\n self.master.close()\n ReportServer().backup_output_path()\n self.update_status(Status.finished)", "def episode_init(self):\n self.nsteps = 0\n self.action = None\n self.state = None\n self.reward = None\n self.terminal = None\n self.total_reward = 0.0", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def run_episode(self, environment):\n state = environment.reset()\n self.steps_done = 0\n while True:\n state_tensor = FloatTensor([state])\n position = self.Q.sample_from_softmax_policy(state_tensor)\n action = position + 1\n next_state, reward, done, _ = environment.step(position.item())\n self.memory.push((state_tensor, action,))\n self.learn(state_tensor, action, next_state, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def onTrainLoopTaken(self, agent):\n pass", "def run(self):\n # This should do nothing if the user has already configured\n # logging, and will it least enable error messages otherwise.\n logging.basicConfig()\n\n # If this is resumption from a checkpoint, it is crucial to\n # reset `profile.current`. Otherwise, it simply does not hurt.\n self.profile.current = []\n\n # Sanity check for the most common case\n if (self._model and isinstance(self._model, Model) and\n isinstance(self.algorithm, GradientDescent)):\n if not (set(self._model.get_parameter_dict().values()) ==\n set(self.algorithm.parameters)):\n logger.warning(\"different parameters for model and algorithm\")\n\n with change_recursion_limit(config.recursion_limit):\n self.original_sigint_handler = signal.signal(\n signal.SIGINT, self._handle_epoch_interrupt)\n self.original_sigterm_handler = signal.signal(\n signal.SIGTERM, self._handle_batch_interrupt)\n try:\n logger.info(\"Entered the main loop\")\n if not self.status['training_started']:\n for extension in self.extensions:\n extension.main_loop = self\n self._run_extensions('before_training')\n with Timer('initialization', self.profile):\n self.algorithm.initialize()\n self.status['training_started'] = True\n # We can not write \"else:\" here because extensions\n # called \"before_training\" could have changed the status\n # of the main loop.\n if self.log.status['iterations_done'] > 0:\n self.log.resume()\n self._run_extensions('on_resumption')\n self.status['epoch_interrupt_received'] = False\n self.status['batch_interrupt_received'] = False\n with Timer('training', self.profile):\n while self._run_epoch():\n pass\n except TrainingFinish:\n self.log.current_row['training_finished'] = True\n except Exception as e:\n self._restore_signal_handlers()\n self.log.current_row['got_exception'] = traceback.format_exc()\n logger.error(\"Error occured during training.\" + error_message)\n try:\n self._run_extensions('on_error')\n except Exception:\n logger.error(traceback.format_exc())\n logger.error(\"Error occured when running extensions.\" +\n error_in_error_handling_message)\n reraise_as(e)\n finally:\n self._restore_signal_handlers()\n if self.log.current_row.get('training_finished', False):\n self._run_extensions('after_training')\n if config.profile:\n self.profile.report()", "def on_train_begin(self):\n self.epoch_tqdm = self.tqdm(total=self.trainer.total_epochs,\n unit='epoch',\n leave=True,\n position=0,\n ascii=self.ascii)\n self.epoch_bar = self.epoch_tqdm.__enter__()\n self.last_step = 0", "def on_train_end(self):\n for callback in self.callbacks:\n callback.on_train_end(self, self.get_model())", "def on_train_epoch_end(self) -> None:\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()", "def _step(self, action):\n if self._episode_ended:\n # The last action ended the episode. Ignore the current action and start a new episode\n return self.reset()\n\n env_action = self.y_train[self.id[self.episode_step]]\n self.episode_step += 1\n\n if action == env_action: # Correct action\n if env_action: # Minority\n reward = 1 # True Positive\n else: # Majority\n reward = self.imb_rate # True Negative\n\n else: # Incorrect action\n if env_action: # Minority\n reward = -1 # False Negative\n self._episode_ended = True # Stop episode when minority class is misclassified\n else: # Majority\n reward = -self.imb_rate # False Positive\n\n if self.episode_step == self.X_len - 1: # If last step in data\n self._episode_ended = True\n\n self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint\n\n if self._episode_ended:\n return ts.termination(self._state, reward)\n else:\n return ts.transition(self._state, reward)", "def reset(self):\n self.tracker.reset()\n self.episode += 1\n self.episode_step = 0", "def on_train_batch_end(self, step, logs=None):", "def after_batch(self):\n if self.trainer._mode == 'train':\n with open(os.path.join(self.root_path, 'loss.txt'), 'a+') as fout:\n fout.write(str(self.trainer._epoch) + '\\t' +\n str(self.trainer._loss.detach().cpu().item()) + '\\n')\n\n if self.trainer._mode == 'test' and (self.f is not None):\n for index in range(len(self.trainer._ids)):\n one_input = self.get_one(self.trainer._input, index)\n one_output = self.get_one(self.trainer._output, index)\n\n res = self.f(one_input, one_output)\n id = self.trainer._ids[index]\n\n self.show(res, id)", "def train_loop_begin(self):\r\n for _, train_loss_metric in self.training_losses.items():\r\n train_loss_metric.reset_states()\r\n\r\n for _, metrics in self.training_metrics.items():\r\n for metric in metrics:\r\n metric.reset_states()", "def run_episode(self, environment):\n state, texts = environment.reset()\n self.steps_done = 0\n action = None\n while True:\n state_tensor = FloatTensor([state])\n text_tensor = FloatTensor(texts).mean(dim=0, keepdim=True)\n action = self.Q.sample_from_softmax_policy(state_tensor, text_tensor)\n position = self.convert_action(action)\n (next_state, next_texts), reward, done, _ = environment.step(position)\n next_text_tensor = FloatTensor(next_texts).mean(dim=0, keepdim=True)\n for t1 in texts:\n t1_tensor = FloatTensor([t1])\n for t2 in next_texts:\n t2_tensor = FloatTensor([t2])\n self.memory.push(\n (state_tensor, t1_tensor, action, t2_tensor,) # action is already a tensor\n )\n self.learn(state_tensor, text_tensor, action, next_state, next_text_tensor, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def after_train_iter(self, runner):\n if not self.enabled or (runner.iter % self.interval != 0):\n return\n\n # EMA\n self._ema_model()", "def _train_simulate(self, env, train_episode=None):\n # The initial observation\n o_r_d_i = [env.reset()] + [None]*3 # o_r_d_i means \"Observation_Reward_Done_Info\"\n # Reset all the manager parameters\n self.reset(o_r_d_i[0][\"manager\"])\n done = False\n current_option = None\n # Render the current state\n if self.parameters[\"display_environment\"]:\n self.show_render.render(o_r_d_i[0])\n\n while not done:\n # If no option is activated then choose one\n if current_option is None:\n current_option = self.select_option(o_r_d_i, train_episode)\n assert current_option.score == 0, \"the option's reset function must reset the score to 0.\"\n\n # choose an action\n action = current_option.act(train_episode)\n\n # make an action and display the state space\n o_r_d_i = env.step(action)\n if self.parameters[\"display_environment\"]:\n self.show_render.render(o_r_d_i[0])\n\n # check if the option ended correctly\n correct_termination = self.check_end_option(current_option, o_r_d_i[0][\"manager\"])\n\n # update the option\n intra_reward = self.compute_intra_reward(o_r_d_i, correct_termination)\n current_option.update_option(o_r_d_i, action, correct_termination, train_episode, intra_reward)\n\n # If the option is done, update the manager\n if correct_termination is not None:\n if check_type(current_option, AbstractOption):\n # record the correct transition when the option is a regular option (i.e. not an explore option)\n self.successful_transition.append(correct_termination)\n self.write_success_rate_transitions()\n\n # the manager does not need to know if the correct_termination is 0 or 1.\n self.update_manager(o_r_d_i, current_option, train_episode)\n\n current_option = None\n\n done = self.check_end_manager(o_r_d_i)\n\n self.write_manager_score(train_episode)", "def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores", "def _reset(self):\n np.random.shuffle(self.id)\n self.episode_step = 0 # Reset episode step counter at the end of every episode\n self._state = self.X_train[self.id[self.episode_step]]\n self._episode_ended = False\n\n return ts.restart(self._state)", "def start_training(self):\n self.training = True", "def light_training_loop():\n try:\n logging.info('Starting RL training for %d epochs.', n_epochs)\n trainer.run(n_epochs, n_epochs_is_total_epochs=True)\n logging.info('Completed RL training for %d epochs.', n_epochs)\n trainer.close()\n logging.info('Trainer is now closed.')\n except Exception as e:\n raise e\n finally:\n logging.info('Encountered an exception, still calling trainer.close()')\n trainer.close()\n logging.info('Trainer is now closed.')", "def new_episode(self):\n self.game.new_episode()", "def run(self):\n time.sleep(np.random.rand())\n np.random.seed(np.int32(time.time() % 1000 * self.id))\n \n # Put this in a while loop that checks a shared variable\n # Will keep running episodes until the shared variable reports False\n while(self.exit_flag == 0):\n for experience in self.run_episode():\n print(experience.state, experience.reward)\n self.training_q.put(experience)", "def run_episode(self):\n self.reset_episode()\n obs = self.env.reset()\n while True:\n action = self.Policy[self.env.stateDict[obs]]\n new_obs, reward, done, _ = self.env.step(action)\n if self.mode=='debug':\n print(\"PrevObs:{}, Action:{}, Obs:{}, Reward:{}, Done:{}\"\n .format(obs, action, new_obs,reward,done))\n self.totalReward += reward\n self.totalSteps += 1\n if done:\n break\n else:\n obs = new_obs\n return self.totalReward", "def train(self):\n total_steps = 0\n scores_history = [deque(maxlen=self.run_settings.averaging_window)\n for a in range(len(self.agents))]\n averages_history = [[] for a in range(len(self.agents))]\n\n for e in range(self.run_settings.num_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = rewards\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Train agents\n if total_steps > 0 and total_steps % self.run_settings.train_every == 0:\n for agent in self.agents:\n agent.train(self.run_settings)\n\n # Save agent model\n if total_steps > 0 and total_steps % self.run_settings.save_every == 0:\n for agent in self.agents:\n agent.save()\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores = [scores[a] + rewards[a] for a in range(len(self.agents))]\n # Push to agent Memories\n for a in range(len(self.agents)):\n self.agents[a].push_memory(states[a], actions[a], rewards[a], done)\n\n if done:\n averages = []\n for a in range(len(scores_history)):\n scores_history[a].append(scores[a])\n averages.append(np.mean(scores_history[a]))\n averages_history[a].append(averages[a])\n\n if len(scores) == 1:\n scores = scores[0]\n averages = averages[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}. Averages: {}\"\n .format(e+1, step, scores, averages))\n if (self.run_settings.graph_every > 0 and e > 0\n and e % self.run_settings.graph_every == 0):\n self.plot_results(averages_history)", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def on_train_begin(self, logs={}):\n self.losses = []\n self.val_losses = []", "def run_episode(self, deterministic=False):\n\n\n obs = self.env.reset()\n Observations, Actions, Rewards = [], [], [] # original trajectory\n n_Observations, n_Rewards = [], [] # normalized trajectory\n done = False\n timestep = 0\n while not done and timestep < self.episode_horizon:\n Observations.append(obs)\n if self.state_preprocessor:\n n_obs = self.state_preprocessor.get_scaled_x(obs)\n else:\n n_obs = obs\n n_Observations.append(n_obs)\n action = self.policy.get_action(obs.astype(np.float32).reshape((1,-1)), deterministic=deterministic)\n Actions.append(action.flatten())\n obs, reward, done, _ = self.env.step(np.squeeze(action, axis=0))\n Rewards.append(reward)\n if self.reward_preprocessor:\n n_reward = self.reward_preprocessor.get_scaled_x(reward)\n else:\n n_reward = reward\n n_Rewards.append(n_reward)\n timestep += 1\n\n \n # append the last state\n Observations.append(obs)\n if self.state_preprocessor:\n n_obs = self.state_preprocessor.get_scaled_x(obs)\n else:\n n_obs = obs\n n_Observations.append(n_obs)\n\n unscaled_traj = {\"Observations\": np.array(Observations), \"Actions\": np.array(Actions), \"Rewards\": np.array(Rewards)}\n scaled_traj = {\"Observations\": np.array(n_Observations), \"Actions\": np.array(Actions), \"Rewards\": np.array(n_Rewards)}\n\n # update preprocessers\n if self.state_preprocessor:\n self.state_preprocessor.update(unscaled_traj['Observations'])\n # save preprocessor params for restoration\n self.state_preprocessor.save_params(os.path.join(self.logger.info_dir, \"state_preprocessor_params.pkl\"))\n if self.reward_preprocessor:\n self.reward_preprocessor.update(unscaled_traj['Rewards'])\n self.reward_preprocessor.save_params(os.path.join(self.logger.info_dir, \"reward_preprocessor_params.pkl\"))\n \n return unscaled_traj, scaled_traj", "def finish_training(self, error: bool = False, **info):\n pass", "def setup_training(self):\n self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)\n self.total_rewards = []\n self.rewards = []\n self.steps = []\n self.average_rewards = []\n self.average_steps = []\n self.model = initialize_model()\n self.invalid_actions = 0\n self.average_invalid_actions = []\n self.total_invalid_actions = []", "def episode_step(self, action: Schema) -> None:\n raise NotImplementedError(\"episode_step not implemented.\")", "def postRun(self):\n pass", "def _on_training_start(self) -> None:\n if self.eval_freq > 0:\n self.solver.run_tests(0, draw=self.draw, verbose=self.verbose)", "def on_pretrain_routine_end(self, model):\n for callback in self.callbacks:\n callback.on_pretrain_routine_end(self, model)", "def handle_failure(self, event: TrainingEndEvent) -> None:\n if event.status not in self.failure_types:\n if event.status != ModelStatus.Trained:\n _logger.warning('%s reports status %s. This is not a failure type.', event.model, event.status)\n return\n\n if self.action == 'metric':\n assert self.metric is not None\n\n if event.model.metric is not None:\n _logger.warning('%s failed, but it has a metric. Will send another metric of %f anyway.',\n event.model, self.metric)\n self.dispatch_model_event(ModelEventType.FinalMetric, metric=self.metric, model=event.model)\n\n elif self.action == 'retry':\n assert self.retry_patience is not None\n\n if self._retry_count[event.model] >= self.retry_patience:\n _logger.info('%s failed %d times. Will not retry it any more. Mark as failure.',\n event.model, self._retry_count[event.model])\n\n elif self.engine.budget_available(): # TODO: It'd be better to check idle worker and lock the resource here.\n self._retry_count[event.model] += 1\n _logger.debug('%s failed. Retrying. Attempt: %d', event.model, self._retry_count[event.model])\n\n # Maybe we should emit an event here?\n event.model.status = ModelStatus.Retrying\n\n # Clear its metrics.\n event.model.metrics.clear()\n\n # The rest of the callbacks shouldn't receive the event,\n # because for them, training didn't end.\n event.stop_propagation()\n\n self.engine.submit_models(event.model)\n\n else:\n _logger.info('Budget exhausted. Stop retrying although failed.')", "def reInitAndRun(self):\n self.playlists = self.readPlaylistData()\n self.audioDF = self.readAudioData(shouldProcess=True)\n self.clusterLabels = []\n self.models = Clusterers(k=len(self.playlists))\n self.processAndCluster()\n self.analyzeResults()", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def training_step(self, **kwargs):\n raise NotImplementedError", "def _prepare_to_stop(self):\n pass", "def reset(self):\n self.observation = None\n self.episode_done = True", "def begin_episode(self, observation):\n self._reset_state()\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_quantile_samples,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn,\n self._tau,\n self.optimizer)\n self.action = onp.asarray(self.action)\n return self.action", "def run( self ):\n # ---- Startup/Shutdown ----\n with self:\n\n # ---- Optionally reload from previous run ----\n if self.config.neuron.reload:\n self.reload()\n else:\n self.checkpoint()\n\n # --- Run until n_epochs ----\n while self.epoch < self.config.neuron.n_epochs:\n try:\n # ---- Train state ----\n self.run_epoch()\n\n # ---- Set weights on chain ----\n self.set_mechanism_weights()\n\n # ---- Checkpoint state ----\n self.checkpoint()\n\n except KeyboardInterrupt:\n # --- User ended session ----\n break\n\n except Exception as e:\n # --- Unknown error ----\n logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())\n if self.config.neuron.restart_on_failure == True:\n logger.info('Restarting from last saved state.')\n self.reload()\n else:\n break", "def testBeginEpisode(self):\n with tf.compat.v1.Session() as sess:\n agent = self._create_test_agent(sess)\n # We fill up the state with 9s. On calling agent.begin_episode the state\n # should be reset to all 0s.\n agent.state.fill(9)\n first_observation = np.ones(self.observation_shape + (1,))\n self.assertEqual(agent.begin_episode(first_observation), 0)\n # When the all-1s observation is received, it will be placed at the end of\n # the state.\n expected_state = self.zero_state\n expected_state[:, :, :, -1] = np.ones((1,) + self.observation_shape)\n self.assertAllEqual(agent.state, expected_state)\n self.assertAllEqual(agent._observation, first_observation[:, :, 0])\n # No training happens in eval mode.\n self.assertEqual(agent.training_steps, 0)\n\n # This will now cause training to happen.\n agent.eval_mode = False\n # Having a low replay memory add_count will prevent any of the\n # train/prefetch/sync ops from being called.\n agent._replay.memory.add_count = 0\n second_observation = np.ones(self.observation_shape + (1,)) * 2\n agent.begin_episode(second_observation)\n # The agent's state will be reset, so we will only be left with the all-2s\n # observation.\n expected_state[:, :, :, -1] = np.full((1,) + self.observation_shape, 2)\n self.assertAllEqual(agent.state, expected_state)\n self.assertAllEqual(agent._observation, second_observation[:, :, 0])\n # training_steps is incremented since we set eval_mode to False.\n self.assertEqual(agent.training_steps, 1)", "def on_train_begin(self, logs=None):", "def on_train_begin(self, logs=None):", "def episode_finish(self, reason: str) -> None:\n pass", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def train_step(self):\n step_actions = self.get_step_actions()\n *_, dones, _ = tf.numpy_function(\n self.step_envs, [step_actions, True, True], self.batch_dtypes\n )\n for done_idx in tf.where(dones):\n gradient_steps = self.gradient_steps or self.episode_steps[done_idx[0]]\n self.update_weights(gradient_steps)\n self.episode_steps.assign(\n (self.episode_steps + self.step_increment) * (1 - dones)\n )", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def begin_episode(self, observation):\n self._reset_state()\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn)\n self.action = onp.asarray(self.action)\n return self.action", "def _start_episode(self, test = False):\n self.env.true_reset()\n new_frame = self.env.reset()\n if not test:\n self.memory.add_experience(0, 0.0, new_frame, False)\n for i in range(self.num_stacked_frames):\n self._current_state[0, i] = new_frame\n new_frame, reward, done, _ = self.env.step(0)\n if not test:\n self.memory.add_experience(0, reward, new_frame, done)" ]
[ "0.65182483", "0.6447759", "0.62984306", "0.62847793", "0.62345207", "0.62345207", "0.623374", "0.62325156", "0.62036747", "0.6123561", "0.60841036", "0.6069675", "0.60509044", "0.6032578", "0.6024081", "0.6007592", "0.5988098", "0.59657085", "0.5957134", "0.5953631", "0.59447217", "0.59431905", "0.5931101", "0.5927832", "0.5922341", "0.59206146", "0.5876515", "0.5873242", "0.5852624", "0.5846757", "0.5830805", "0.5824563", "0.58207804", "0.58154964", "0.5786973", "0.5784327", "0.577061", "0.5768154", "0.5767179", "0.57606596", "0.5756764", "0.5750895", "0.5743343", "0.5734266", "0.5734266", "0.5732134", "0.5716877", "0.5710809", "0.570366", "0.56915516", "0.5688611", "0.5675237", "0.5660747", "0.5658659", "0.5655177", "0.5647666", "0.5626545", "0.56240153", "0.56168014", "0.56019956", "0.55906177", "0.5581301", "0.5581088", "0.5579561", "0.557585", "0.55745846", "0.55721813", "0.5568931", "0.55607194", "0.55541337", "0.5537986", "0.5518165", "0.55138034", "0.551047", "0.5508878", "0.55025125", "0.5501991", "0.5497697", "0.5493931", "0.5491725", "0.5484478", "0.54814684", "0.54774356", "0.5457017", "0.5455599", "0.54512924", "0.54483145", "0.5436675", "0.5434207", "0.5430013", "0.54201216", "0.54201216", "0.5419317", "0.54190946", "0.54177266", "0.54170406", "0.54122263", "0.54027975", "0.5400114", "0.53990257", "0.5397326" ]
0.0
-1
Updates the network using either DDPGstyle soft updates (w/ param TAU), or using a DQN/D4PG style hard update every C timesteps.
def update_networks(self, agent, force_hard=False): if self.update_type == "soft" and not force_hard: self._soft_update(agent.actor, agent.actor_target) self._soft_update(agent.critic, agent.critic_target) elif self.t_step % self.C == 0 or force_hard: self._hard_update(agent.actor, agent.actor_target) self._hard_update(agent.critic, agent.critic_target)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)", "def weight_update_adagrad(self, network):\n epsilon = 10e-8\n\n if self.ms_b is None or self.ms_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] += self.dc_db[l] ** 2\n self.ms_q[l] += self.dc_dq[l] ** 2\n\n self.ms_rx_inp[l] += self.dc_drx_inp[l] ** 2\n self.ms_ry_inp[l] += self.dc_dry_inp[l] ** 2\n\n self.ms_rx_pos_out[l] += self.dc_drx_pos_out[l] ** 2\n self.ms_ry_pos_out[l] += self.dc_dry_pos_out[l] ** 2\n self.ms_rx_neg_out[l] += self.dc_drx_neg_out[l] ** 2\n self.ms_ry_neg_out[l] += self.dc_dry_neg_out[l] ** 2\n\n layer.b += -self.alpha * self.dc_db[l] / np.sqrt(self.ms_b[l] + epsilon)\n layer.q += -self.alpha * self.dc_dq[l] / np.sqrt(self.ms_q[l] + epsilon)\n\n layer.rx_inp += -self.alpha * self.dc_drx_inp[l] / np.sqrt(self.ms_rx_inp[l] + epsilon)\n layer.ry_inp += -self.alpha * self.dc_dry_inp[l] / np.sqrt(self.ms_ry_inp[l] + epsilon)\n\n layer.rx_pos_out += -self.alpha * self.dc_drx_pos_out[l] / np.sqrt(self.ms_rx_pos_out[l] + epsilon)\n layer.ry_pos_out += -self.alpha * self.dc_dry_pos_out[l] / np.sqrt(self.ms_ry_pos_out[l] + epsilon)\n layer.rx_neg_out += -self.alpha * self.dc_drx_neg_out[l] / np.sqrt(self.ms_rx_neg_out[l] + epsilon)\n layer.ry_neg_out += -self.alpha * self.dc_dry_neg_out[l] / np.sqrt(self.ms_ry_neg_out[l] + epsilon)", "def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)", "def weight_update_adadelta(self, network):\n gamma = self.gamma\n one_m_gamma = 1.0 - gamma\n small = 0.001\n\n if self.ms_b is None or self.ms_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n # Initialize deltas to one such that first step is approximately equivalent to RMSprop\n if self.del_b is None or self.del_q is None:\n self.del_b = []\n self.del_q = []\n self.del_rx_inp = []\n self.del_ry_inp = []\n self.del_rx_pos_out = []\n self.del_ry_pos_out = []\n self.del_rx_neg_out = []\n self.del_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.del_b.append(np.ones(layer.b.shape) * small)\n self.del_q.append(np.ones(layer.q.shape) * small)\n self.del_rx_inp.append(np.ones(layer.input_size) * small)\n self.del_ry_inp.append(np.ones(layer.input_size) * small)\n self.del_rx_pos_out.append(np.ones(layer.output_size) * small)\n self.del_ry_pos_out.append(np.ones(layer.output_size) * small)\n self.del_rx_neg_out.append(np.ones(layer.output_size) * small)\n self.del_ry_neg_out.append(np.ones(layer.output_size) * small)\n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = gamma * self.ms_b[l] + one_m_gamma * self.dc_db[l] ** 2\n self.ms_q[l] = gamma * self.ms_q[l] + one_m_gamma * self.dc_dq[l] ** 2\n self.ms_rx_inp[l] = gamma * self.ms_rx_inp[l] + one_m_gamma * self.dc_drx_inp[l] ** 2\n self.ms_ry_inp[l] = gamma * self.ms_ry_inp[l] + one_m_gamma * self.dc_dry_inp[l] ** 2\n self.ms_rx_pos_out[l] = gamma * self.ms_rx_pos_out[l] + one_m_gamma * self.dc_drx_pos_out[l] ** 2\n self.ms_ry_pos_out[l] = gamma * self.ms_ry_pos_out[l] + one_m_gamma * self.dc_dry_pos_out[l] ** 2\n self.ms_rx_neg_out[l] = gamma * self.ms_rx_neg_out[l] + one_m_gamma * self.dc_drx_neg_out[l] ** 2\n self.ms_ry_neg_out[l] = gamma * self.ms_ry_neg_out[l] + one_m_gamma * self.dc_dry_neg_out[l] ** 2\n\n del_b = -self.alpha * self.dc_db[l] * self.sqrt_eps(self.del_b[l]) / self.sqrt_eps(self.ms_b[l])\n del_q = -self.alpha * self.dc_dq[l] * self.sqrt_eps(self.del_q[l]) / self.sqrt_eps(self.ms_q[l])\n del_rx_inp = -self.alpha * self.dc_drx_inp[l] * self.sqrt_eps(self.del_rx_inp[l]) / self.sqrt_eps(self.ms_rx_inp[l])\n del_ry_inp = -self.alpha * self.dc_dry_inp[l] * self.sqrt_eps(self.del_ry_inp[l]) / self.sqrt_eps(self.ms_ry_inp[l])\n del_rx_pos_out = -self.alpha * self.dc_drx_pos_out[l] * self.sqrt_eps(self.del_rx_pos_out[l]) / self.sqrt_eps(self.ms_rx_pos_out[l])\n del_ry_pos_out = -self.alpha * self.dc_dry_pos_out[l] * self.sqrt_eps(self.del_ry_pos_out[l]) / self.sqrt_eps(self.ms_ry_pos_out[l])\n del_rx_neg_out = -self.alpha * self.dc_drx_neg_out[l] * self.sqrt_eps(self.del_rx_neg_out[l]) / self.sqrt_eps(self.ms_rx_neg_out[l])\n del_ry_neg_out = -self.alpha * self.dc_dry_neg_out[l] * self.sqrt_eps(self.del_ry_neg_out[l]) / self.sqrt_eps(self.ms_ry_neg_out[l])\n\n layer.b += del_b\n layer.q += del_q\n layer.rx_inp += del_rx_inp\n layer.ry_inp += del_ry_inp\n layer.rx_pos_out += del_rx_pos_out\n layer.ry_pos_out += del_ry_pos_out\n layer.rx_neg_out += del_rx_neg_out\n layer.ry_neg_out += del_ry_neg_out\n\n self.del_b[l] = gamma * self.del_b[l] + one_m_gamma * del_b ** 2\n self.del_q[l] = gamma * self.del_q[l] + one_m_gamma * del_q ** 2\n self.del_rx_inp[l] = gamma * self.del_rx_inp[l] + one_m_gamma * del_rx_inp ** 2\n self.del_ry_inp[l] = gamma * self.del_ry_inp[l] + one_m_gamma * del_ry_inp ** 2\n self.del_rx_pos_out[l] = gamma * self.del_rx_pos_out[l] + one_m_gamma * del_rx_pos_out ** 2\n self.del_ry_pos_out[l] = gamma * self.del_ry_pos_out[l] + one_m_gamma * del_ry_pos_out ** 2\n self.del_rx_neg_out[l] = gamma * self.del_rx_neg_out[l] + one_m_gamma * del_rx_neg_out ** 2\n self.del_ry_neg_out[l] = gamma * self.del_ry_neg_out[l] + one_m_gamma * del_ry_neg_out ** 2", "def weight_update_steepest_descent(self, network):\n for l, layer in enumerate(network.layers):\n layer.b -= self.alpha * self.dc_db[l]\n layer.q -= self.alpha * self.dc_dq[l]\n layer.rx_inp -= self.alpha * self.dc_drx_inp[l]\n layer.ry_inp -= self.alpha * self.dc_dry_inp[l]\n layer.rx_pos_out -= self.alpha * self.dc_drx_pos_out[l]\n layer.ry_pos_out -= self.alpha * self.dc_dry_pos_out[l]\n layer.rx_neg_out -= self.alpha * self.dc_drx_neg_out[l]\n layer.ry_neg_out -= self.alpha * self.dc_dry_neg_out[l]", "def update_network(self, tr_d, lr, relz=\"\", lmbda=0.0, mu=0.0):\n trlen = float(len(tr_d))\n delta_b = [np.zeros(b.shape) for b in self.biases]\n delta_w = [np.zeros(w.shape) for w in self.weights]\n for x,y in tr_d:\n delta_b_single, delta_w_single = self.backppg_ce(x,y)\n delta_b = [db+dbs for db,dbs in zip(delta_b, delta_b_single)]\n delta_w = [dw+dws for dw,dws in zip(delta_w, delta_w_single)]\n #update the parameters in network\n if(relz==\"\"):\n mu=0.0\n elif(relz[0:2] == \"MO\"):\n relz = relz[2:]\n self.velw = [mu*vw-(lr/trlen)*dw for vw,dw in zip(self.velw, delta_w)]\n self.velb = [mu*vb-(lr/trlen)*db for vb,db in zip(self.velb, delta_b)]\n self.biases = [b + vb for b,vb in zip(self.biases, self.velb)]\n if(relz == \"L2\"):\n self.weights = [w + vw - (lr/trlen/100)*lmbda*w for w,vw in zip(self.weights, self.velw)]\n elif(relz == \"L1\"):\n self.weights = [w + vw - (lr/trlen/100)*lmbda*np.sign(w) for w,vw in zip(self.weights, self.velw)]\n else:\n self.weights = [w + vw for w,vw in zip(self.weights, self.velw)]", "def update(self,parameters, grads):\n \n L = len(parameters) // 2 # number of layers in the neural network\n #print(L)\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n \n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - self.alpha * grads[\"dW\" + str(l+1)]\n \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - self.alpha * grads[\"db\" + str(l+1)]\n \n parameters[\"W\" + str(l+1)][np.isnan(parameters[\"W\" + str(l+1)])] = 0\n parameters[\"b\" + str(l+1)][np.isnan(parameters[\"b\" + str(l+1)])] = 0\n \n return parameters", "def update_net(self) -> None:\n self.units.update_net()", "def update(self, sim, dt):\n #growth kinetics\n self.division_timer += dt\n #you can grow unless you are in the A state meaning apoptosis\n if(self.division_timer >= self.division_time and self._division):\n #now you can divide\n if(self.state == \"T1\"):\n #change the current sytate to D\n self.state = \"NSC\"\n self._division = False\n self.division_time = 36\n #progenitor time is faster with concentration factor\n\n #add the concentration\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n self.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## norm_mn = float(mn_count) / float(tot)\n## if(norm_mn < self._p2):\n## self.division_time = 36*(norm_mn) # in hours\n## self.division_time = max(self.division_time, 1) \n## else:\n## \n## print(norm_mn, self.division_time)\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n if(self.state == \"T2\"):\n #change the current sytate to D\n self.state = \"MN\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n self.set_gradient_source_sink_coeff(\"EGF\", 50.0*source, 1.0*consump_rate)\n if(self.state == \"T3\"):\n #change the current sytate to D\n self.state = \"G\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n #get the location\n #pick a random point on a sphere\n location = RandomPointOnSphere()*self.radius/2.0 + self.location\n #get the radius\n radius = self.radius\n #get the ID\n ID = sim.get_ID()\n #make the object\n sc = NueronalStemCell(location, radius, ID, self.state,\n division_time = self.division_time,\n params = [self._p1, self._p2,\n self._p3, self._p4, self._p5,\n self._p6, self.p7])\n #copy secretion to NSC progeny\n if(self.state == \"NSC\"):\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n sc.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n sc._division = False\n #set its soluble count\n## sc.sol_count = self.sol_count / 2.\n## self.sol_count = self.sol_count / 2.\n #copy over all of the coefficients to the new cells\n## prod_cons = self.get_gradient_source_sink_coeff(\"O2\")\n## sc.set_gradient_source_sink_coeff(\"O2\", prod_cons[0], prod_cons[1])\n prod_cons = self.get_gradient_source_sink_coeff(\"EGF\")\n sc.set_gradient_source_sink_coeff(\"EGF\", prod_cons[0], prod_cons[1]) \n #add it to the imsulation\n sim.add_object_to_addition_queue(sc)\n #reset the division time\n self.division_timer = 0\n \n if(self.state == \"U\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x = rand.random()\n prob = self._p1 #probability of turning into a NSC\n #longer before the differentiation starts\n if(x < prob):\n #differentiation occurs\n self.state = \"T1\"\n #also add a proabability to differentiate directly to a mn\n n1 = self._p4\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n #get the value fo the gradient and make differntiation inversly\n #inversly correlated with the proportion present\n norm_mn = self.get_gradient_value(\"EGF\")\n #probability of turning into a motor nueron\n n1 = self._p4\n## #normalize the result\n## if(tot != 0):\n## norm_mn = float(mn_count) / float(tot)\n## else:\n## norm_mn = 0\n #calculate the probability\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p2**n1 + norm_mn**n1)\n x1 = rand.random()\n if(x1 <= self._p1*prob_MN):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n \n if(self.state == \"NSC\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x1 = rand.random()\n x2 = rand.random()\n #Find all the motor nuerons\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## #normalize the result\n## norm_mn = float(mn_count) / float(tot)\n #Make differerntiationd ependant on the gradient value\n norm_mn = self.get_gradient_value(\"EGF\")\n #set the paramaters\n n1 = self._p4\n #update the division time\n## self.division_time = norm_mn * 38 #in hours takes care of the feedback\n #depends on other motor nuerons\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p3**n1 + norm_mn**n1) #probability of turning into a motor nueron\n## prob_G = (1.*norm_mn**n2)/(self._p3**n1 + norm_mn**n2) #of turning into a glial cell\n prob_G = self._p5\n #longer before the differentiation starts\n if(x1 <= prob_MN and x2 > prob_G):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n if(x1 > prob_MN and x2 <= prob_G):\n #differentiation occurs towards a glial cell\n self.state = \"T3\"\n #check to see if division enabled\n if(self._division == False):\n #check for mitotic speed up\n a = self._p6\n b = self._p7\n norm_nsc = self.get_gradient_value(\"TNF\")\n prob_divide = (1.*norm_nsc**b)/(a**b + norm_nsc**b)\n r = rand.random()\n if(r <= x):\n self._division = True", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def update_network_parameters(self, tau=None):\n\n #Is used during the first iteration such that the target networks get the same parameters of the normal networks (hard update)\n if tau is None:\n tau = self.tau\n\n #Update the target_actor weights\n weights = []\n targets = self.target_actor.weights\n for i, weight in enumerate(self.actor.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_actor.set_weights(weights)\n\n #Update the target_critic_1 weights\n weights = []\n targets = self.target_critic_1.weights\n for i, weight in enumerate(self.critic_1.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_critic_1.set_weights(weights)\n\n #Update the target_critic_2 weights\n weights = []\n targets = self.target_critic_2.weights\n for i, weight in enumerate(self.critic_2.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_critic_2.set_weights(weights)", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def Update(self):\n if self.NAtt > 0:\n self.D *= (self.NAcc / self.NAtt + 0.1) / (self.TargetAcc + 0.1)\n self.D = min(self.MaxD, max(self.MinD, self.D))", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate* grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters", "def _update_model(self, verbose: bool, raw=True, smooth=False):\n if (raw and smooth) or (not raw and not smooth):\n raise InversionsonError(\"SGDM updates can be raw or smooth, not both\")\n if raw:\n gradient = (\n self.comm.lasif.lasif_comm.project.paths[\"gradients\"]\n / f\"ITERATION_{self.iteration_name}\"\n / \"summed_gradient.h5\"\n )\n if not os.path.exists(self.raw_gradient_path):\n shutil.copy(gradient, self.raw_gradient_path)\n if not os.path.exists(self.raw_update_path):\n self._compute_raw_update()\n if smooth:\n self._apply_smooth_update()", "def update(self, t):\n if self.num_Xis > 0:\n # Try to calculate dW numbers separately\n\n # For nXi_updates > 1 then the ordering of calls to np.random.randn\n # needs to be equivalent to that for the corresponding smaller dt\n if self.nXi_updates > 1:\n self.dW_nums[:] = 0.0\n for j in range(self.nXi_updates):\n for i in range(self.num_Xis):\n self.dW_nums[i] += np.random.randn() * np.sqrt(self.dt/self.nXi_updates)\n\n else:\n for i in range(self.num_Xis):\n if self.smooth_t is not None:\n self.dW_nums[i] = self.smooth_t(t)\n elif self.fixed_dW is not None:\n self.dW_nums[i] = self.fixed_dW * np.sqrt(self.dt)\n else:\n self.dW_nums[i] = np.random.randn() * np.sqrt(self.dt)\n\n # This is to ensure we stick close to what the original code did\n [dw.assign(dw_num) for dw, dw_num in zip(self.dWs, self.dW_nums)]\n self.dXi_interpolator.interpolate()\n\n if self.scheme in ['hydrodynamic', 'LASCH_hydrodynamic']:\n self.dXi_x_interpolator.interpolate()\n self.dXi_xx_interpolator.interpolate()\n else:\n pass", "def compile_update_svdd(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n B = Cfg.B\n C = Cfg.C\n nu = Cfg.nu\n\n # initialize R\n if nnet.R_init > 0:\n nnet.Rvar = shared(floatX(nnet.R_init), name=\"R\")\n else:\n nnet.Rvar = shared(floatX(1), name=\"R\") # initialization with R=1\n\n # Loss\n feature_layer = nnet.all_layers[-1]\n rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=False)\n\n # initialize c (0.5 in every feature representation dimension)\n rep_dim = feature_layer.num_units\n # nnet.cvar = shared(floatX(np.ones(rep_dim) * (1. / (rep_dim ** 0.5))),\n # name=\"c\")\n nnet.cvar = shared(floatX(np.ones(rep_dim) * 0.5), name=\"c\")\n\n dist = T.sum(((rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n scores = dist - nnet.Rvar\n stack = T.stack([T.zeros_like(scores), scores], axis=1)\n loss = T.cast(T.sum(T.max(stack, axis=1)) / (inputs.shape[0] * nu),\n dtype='floatX')\n\n y_pred = T.argmax(stack, axis=1)\n acc = T.cast((T.sum(T.eq(y_pred.flatten(), targets), dtype='int32')\n * 1. / targets.shape[0]), 'floatX')\n\n # Network weight decay\n if Cfg.weight_decay:\n l2_penalty = (1/C) * get_l2_penalty(nnet,\n include_bias=Cfg.include_bias,\n pow=Cfg.pow)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Network activation sparsity regularization\n if Cfg.sparsity_penalty:\n sparsity_penalty = (1/B) * get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=False)\n else:\n sparsity_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation (hard-margin: only minimizing everything to a ball\n # centered at c)\n trainable_params = lasagne.layers.get_all_params(feature_layer,\n trainable=True)\n if Cfg.gaussian_blob:\n avg_dist = T.mean(1-T.exp(-dist), dtype=\"floatX\")\n else:\n avg_dist = T.mean(dist, dtype=\"floatX\")\n obj_ball = T.cast(floatX(0.5) * l2_penalty + avg_dist + sparsity_penalty,\n dtype='floatX')\n updates_ball = get_updates(nnet, obj_ball, trainable_params,\n solver=nnet.solver)\n nnet.backprop_ball = theano.function([inputs, targets], [obj_ball, acc],\n updates=updates_ball)\n\n # Backpropagation (without training R)\n obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + loss + sparsity_penalty,\n dtype='floatX')\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop_without_R = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n # Backpropagation (with training R)\n trainable_params.append(nnet.Rvar) # add radius R to trainable parameters\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n\n # Forwardpropagation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n\n test_dist = T.sum(((test_rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n\n test_scores = test_dist - nnet.Rvar\n test_stack = T.stack([T.zeros_like(test_scores), test_scores], axis=1)\n test_loss = T.cast(T.sum(T.max(test_stack, axis=1)) / (inputs.shape[0]*nu),\n dtype='floatX')\n\n test_y_pred = T.argmax(test_stack, axis=1)\n test_acc = T.cast((T.sum(T.eq(test_y_pred.flatten(), targets),\n dtype='int32')\n * 1. / targets.shape[0]), dtype='floatX')\n\n # Network activation sparsity regularization (with determinisitc=True)\n if Cfg.sparsity_penalty:\n test_sparsity_penalty = ((1 / B) *\n get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=True))\n else:\n test_sparsity_penalty = T.cast(0, dtype='floatX')\n\n test_obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + test_loss\n + test_sparsity_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_scores,\n floatX(0.5) * l2_penalty,\n test_sparsity_penalty, test_rep,\n test_rep_norm, test_loss, nnet.Rvar])", "def acUpdate(deltaT):\n pass", "def soft_update(source_net, target_net, tau):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )", "def _compute_raw_update(self):\n\n self.print(\"SGD with Momentum: Computing raw update...\", line_above=True)\n # Read task toml\n\n iteration_number = self.task_dict[\"iteration_number\"] + 1\n\n indices = self.get_parameter_indices(self.raw_gradient_path)\n # scale the gradients, because they can be tiny and this leads to issues\n g_t = self.get_h5_data(self.raw_gradient_path) * self.grad_scaling_fac\n\n if np.sum(np.isnan(g_t)) > 1:\n raise Exception(\n \"NaNs were found in the raw gradient.\" \"Something must be wrong.\"\n )\n\n if iteration_number == 1: # Initialize moments if needed\n shutil.copy(self.raw_gradient_path, self.moment_path)\n write_xdmf(self.moment_path)\n\n with h5py.File(self.moment_path, \"r+\") as h5:\n data = h5[\"MODEL/data\"]\n\n # initialize with zeros\n for i in indices:\n data[:, i, :] = np.zeros_like(data[:, i, :])\n\n v_t = self.beta * self.get_h5_data(self.moment_path) + (1 - self.beta) * g_t\n\n # Store first moment\n shutil.copy(\n self.moment_path,\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n )\n self.set_h5_data(\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n v_t,\n )\n\n # Correct bias\n v_t = v_t / (1 - self.beta ** (self.iteration_number + 1))\n update = self.alpha * v_t\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the raw update.\"\n \"Check if the gradient is not excessively small\"\n )\n\n # Write raw update to file for smoothing\n shutil.copy(self.raw_gradient_path, self.raw_update_path)\n self.set_h5_data(self.raw_update_path, update)", "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)", "def update(self, dLds, alpha, beta):\n T = len(self.x)\n self.nodes.reset_error()\n self.igate.reset_error()\n self.fgate.reset_error()\n self.ogate.reset_error()\n dLdx = np.zeros((T, self.input_size))\n dLdc = np.zeros(self.hidden_size)\n for t in xrange(T-1, -1, -1):\n dLdpo = dLds[t] * self.h[t] * self.gatefun.derivate(self.o[t])\n # parameters for output gate\n self.ogate.dLdu += np.outer(dLdpo, self.x[t])\n self.ogate.dLdw += np.outer(dLdpo, self.s[t-1])\n self.ogate.dLdv += np.outer(dLdpo, self.c[t-1])\n dLds[t-1] += np.dot(self.ogate.w.T, dLdpo)\n dLdx[t] += np.dot(self.ogate.u.T, dLdpo)\n dLdc += np.dot(self.ogate.v.T, dLdpo)\n\n dLdc += dLds[t] * self.o[t] * self.acfun.derivate(self.h[t])\n dLdpi = dLdc * self.g[t] * self.gatefun.derivate(self.i[t])\n dLdpf = dLdc * self.c[t-1] * self.gatefun.derivate(self.f[t])\n dLdpg = dLdc * self.i[t] * self.acfun.derivate(self.g[t])\n dLdc = dLdc * self.f[t]\n # parameters for nodes in hidden layer\n self.nodes.dLdu += np.outer(dLdpg, self.x[t])\n self.nodes.dLdw += np.outer(dLdpg, self.s[t-1])\n dLds[t-1] += np.dot(self.nodes.w.T, dLdpg)\n dLdx[t] += np.dot(self.nodes.u.T, dLdpg)\n # parameters for input gate\n self.igate.dLdu += np.outer(dLdpi, self.x[t])\n self.igate.dLdw += np.outer(dLdpi, self.s[t-1])\n self.igate.dLdv += np.outer(dLdpi, self.c[t-1])\n dLds[t-1] += np.dot(self.igate.w.T, dLdpi)\n dLdx[t] += np.dot(self.igate.u.T, dLdpi)\n dLdc += np.dot(self.igate.v.T, dLdpi)\n # parameters for forget gate\n self.fgate.dLdu += np.outer(dLdpf, self.x[t])\n self.fgate.dLdw += np.outer(dLdpf, self.s[t-1])\n self.fgate.dLdv += np.outer(dLdpf, self.c[t-1])\n dLds[t-1] += np.dot(self.fgate.w.T, dLdpf)\n dLdx[t] += np.dot(self.fgate.u.T, dLdpf)\n dLdc += np.dot(self.fgate.v.T, dLdpf)\n if self.en_bias:\n self.nodes.dLdb += dLdpg\n self.igate.dLdb += dLdpi\n self.fgate.dLdb += dLdpf\n self.ogate.dLdb += dLdpo\n # update weight matrix of current hidden node\n self.nodes.update(alpha, beta)\n self.igate.update(alpha, beta)\n self.fgate.update(alpha, beta)\n self.ogate.update(alpha, beta)\n return dLdx", "def weight_update_nesterov(self, network):\n # Before updating, take step back with current velocity\n for l, layer in enumerate(network.layers):\n layer.b -= self.beta * self.vel_b[l]\n layer.q -= self.beta * self.vel_q[l]\n layer.rx_inp -= self.beta * self.vel_rx_inp[l]\n layer.ry_inp -= self.beta * self.vel_ry_inp[l]\n layer.rx_pos_out -= self.beta * self.vel_rx_pos_out[l]\n layer.ry_pos_out -= self.beta * self.vel_ry_pos_out[l]\n layer.rx_neg_out -= self.beta * self.vel_rx_neg_out[l]\n layer.ry_neg_out -= self.beta * self.vel_ry_neg_out[l]\n\n # Now update\n for l, layer in enumerate(network.layers):\n self.vel_b[l] = -self.alpha * self.dc_db[l] + self.beta * self.vel_b[l]\n self.vel_q[l] = -self.alpha * self.dc_dq[l] + self.beta * self.vel_q[l]\n self.vel_rx_inp[l] = -self.alpha * self.dc_drx_inp[l] + self.beta * self.vel_rx_inp[l]\n self.vel_ry_inp[l] = -self.alpha * self.dc_dry_inp[l] + self.beta * self.vel_ry_inp[l]\n self.vel_rx_pos_out[l] = -self.alpha * self.dc_drx_pos_out[l] + self.beta * self.vel_rx_pos_out[l]\n self.vel_ry_pos_out[l] = -self.alpha * self.dc_dry_pos_out[l] + self.beta * self.vel_ry_pos_out[l]\n self.vel_rx_neg_out[l] = -self.alpha * self.dc_drx_neg_out[l] + self.beta * self.vel_rx_neg_out[l]\n self.vel_ry_neg_out[l] = -self.alpha * self.dc_dry_neg_out[l] + self.beta * self.vel_ry_neg_out[l]\n\n layer.b += self.vel_b[l]\n layer.q += self.vel_q[l]\n layer.rx_inp += self.vel_rx_inp[l]\n layer.ry_inp += self.vel_ry_inp[l]\n layer.rx_pos_out += self.vel_rx_pos_out[l]\n layer.ry_pos_out += self.vel_ry_pos_out[l]\n layer.rx_neg_out += self.vel_rx_neg_out[l]\n layer.ry_neg_out += self.vel_ry_neg_out[l]", "def _update_nn(self, bad_feats, good_feats, rate):\n # TODO: this is just adding another dimension to fit the parallelized scoring\n # (even if updates are not parallelized). Make it nicer.\n bad_feats = ([bad_feats[0]], [bad_feats[1]])\n good_feats = ([good_feats[0]], [good_feats[1]])\n\n cost_gcost = self.nn.update(*(bad_feats + good_feats + (rate,)))\n log_debug('Cost:' + str(cost_gcost[0]))\n param_vals = [param.get_value() for param in self.nn.params]\n log_debug('Param norms : ' + str(self._l2s(param_vals)))\n log_debug('Gparam norms: ' + str(self._l2s(cost_gcost[1:])))\n l1_params = param_vals[2]\n log_debug('Layer 1 parts :' + str(self._l2s([l1_params[0:100, :], l1_params[100:200, :],\n l1_params[200:350, :], l1_params[350:500, :],\n l1_params[500:, :]])))\n l1_gparams = cost_gcost[3]\n log_debug('Layer 1 gparts:' + str(self._l2s([l1_gparams[0:100, :], l1_gparams[100:200, :],\n l1_gparams[200:350, :], l1_gparams[350:500, :],\n l1_gparams[500:, :]])))", "def _update_nn(self, bad_feats, good_feats, rate):\n self.nn.update(bad_feats, good_feats, rate)", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads['dW' + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads['db' + str(l+1)]\n \n return parameters", "def updateGraph(self):\n self.initUnits()\n v = self.units.copy()\n v_old = v.copy() * 100 # initial value so it will skip the first break\n for step in range(self.numCycles): # for total number of cycles\n # keep the old version of v for paralel updating\n # if v_old and v every element differnce < 0.001, then stop\n if np.all(np.abs(v_old - v) < 0.001):\n break\n # assign to v_old v from the previous step\n v_old = v.copy()\n for i in range(self.graph.n): # for every unit in the graph\n if i not in self.graph.observed: # if the unit is not a special fixed value s\n net = np.dot(v_old, self.graph.c[i]) # compute total flow to the unit\n if net > 0:\n gradient = net*(self.min_max[1]-v_old[i])\n else:\n gradient = net*(v_old[i]-self.min_max[0])\n v[i] = v_old[i]*(1-self.decay) + gradient\n # should this be after every unit update, or after the whole graph updates ??\n v = np.where(v>1, self.min_max[1], v)\n v = np.where(v<-1,self.min_max[0],v)\n self.units = v", "def update_model_parameters(phi, T, nz, coord, SWVD, form=\"Calonne\"):\r\n D_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n D_eff = phi * (1 - phi) * D0 + D0\r\n elif form == \"Calonne\": # Calonne et al. (2014)\r\n x = 2 / 3 - phi\r\n b = np.heaviside(x, 1)\r\n D_eff = D0 * (1 - 3 / 2 * phi) * b\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective thermal conductivity W/m/K\r\n k_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a\r\n elif form == \"Calonne\": # Calonne et al. (2011)\r\n k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Löwe et al. (2019)\r\n rhoC_eff = np.zeros(nz)\r\n rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a\r\n\r\n ## Water Vapor density rho_v and its derivative rho_v_dT:\r\n [rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)\r\n\r\n return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT", "def update(self, dT):\r\n\r\n current_delta_time = dT\r\n for module in self.modules:\r\n module(self)", "def worker(D,graph=None):\n\n if graph ==None:\n graph = tf.Graph()\n # Build Tensorflow graph which computes gradients of the model with one mini-batch of examples\n with graph.as_default():\n \n # Get input and labels for learning from D\n inputs, labels = D\n logits = mdnn.CNN_model(inputs,graph)\n \n # Calculate loss.\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=logits))\n \n optimizer = tf.train.GradientDescentOptimizer(0.1)\n grads = optimizer.compute_gradients(loss)\n with tf.variable_scope(\"\",reuse=True):\n grads_var = {var.op.name:tf.Variable(tf.zeros(var.get_shape()),trainable=False,name=var.op.name+\"_grad\",collections=[\"W_grad\"]) for _,var in grads}\n train_op = [grads_var[var.op.name].assign(grad) for grad,var in grads]\n \n # Build an initialization operation.\n init = tf.global_variables_initializer()\n\n \n # Tensorflow op to update parameters from PS\n get_W = df.get_w(graph,\"W_global\")\n\n\n with tf.Session() as sess:\n #Initialize the TF variables\n sess.run([init])\n tf.train.start_queue_runners(sess=sess)\n iteration = 0\n s = sck.socket(sck.AF_INET, sck.SOCK_STREAM)\n s.connect((FLAGS.ip_PS, FLAGS.port))\n \n while iteration < FLAGS.iter_max:\n #Get the parameters from the PS\n com.send_msg(s,\"\",\"GET_W\")\n cmd,data= com.recv_msg(s)\n iteration,W= com.decode_variables(data)\n s.close()\n \n #Update the parameters\n sess.run(get_W,{key+\"_delta:0\":value for key,value in W.items()})\n \n #Compute gradients stored in Tensorflow variables\n inp,log,lab,loss_values,_ =sess.run([inputs,logits,labels,loss,train_op])\n\n print \"Loss\",loss_values\n \n #Encode the update with the local timer (iteration)\n update = com.encode_variables(sess,\"W_grad\",iteration,compression=FLAGS.compression_rate)\n \n #Push the update to PS\n s = sck.socket(sck.AF_INET, sck.SOCK_STREAM)\n s.connect((FLAGS.ip_PS, FLAGS.port))\n \n com.send_msg(s,update,\"PUSH\")\n print \"Worker\",FLAGS.id_worker,\" is closed\"", "def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())", "def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n\n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = beta1 * v[\"dW\" + str(l+1)] + (1 - beta1) * grads[\"dW\" + str(l+1)] ##None\n v[\"db\" + str(l+1)] = beta1 * v[\"db\" + str(l+1)] + (1 - beta1) * grads[\"db\" + str(l+1)] ##None\n ### END CODE HERE ###\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n v_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)] / (1 - np.power(beta1,t)) ##None\n v_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)] / (1 - np.power(beta1,t)) ##None\n ### END CODE HERE ###\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n ### START CODE HERE ### (approx. 2 lines)\n s[\"dW\" + str(l+1)] = beta2 * s[\"dW\" + str(l+1)] + (1 - beta2) * np.power(grads[\"dW\" + str(l+1)],2) ##None\n s[\"db\" + str(l+1)] = beta2 * s[\"db\" + str(l+1)] + (1 - beta2) * np.power(grads[\"db\" + str(l+1)],2) ##None\n ### END CODE HERE ###\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n s_corrected[\"dW\" + str(l+1)] = s[\"dW\" + str(l+1)] / (1 - np.power(beta2,t)) ##None\n s_corrected[\"db\" + str(l+1)] = s[\"db\" + str(l+1)] / (1 - np.power(beta2,t)) ##None\n ### END CODE HERE ###\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * v_corrected[\"dW\" + str(l+1)] / (np.sqrt(s_corrected[\"dW\" + str(l+1)]) + epsilon) ##None\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * v_corrected[\"db\" + str(l+1)] / (np.sqrt(s_corrected[\"db\" + str(l+1)]) + epsilon) ##None\n ### END CODE HERE ###\n\n return parameters, v, s", "def update(self, weights, grads):\n\n learn_rate = self.learn_rate\n\n # Scale the learning rate by the iteration number\n t = self.iters + 1\n learn_rate_t = learn_rate * (np.sqrt(1 - self.beta2**t) /\n (1 - self.beta1**t))\n\n # Store the momentum and velocities for each node\n if self.ms is None:\n self.ms = [np.zeros(w.shape) for w in weights]\n if self.vs is None:\n self.vs = [np.zeros(w.shape) for w in weights]\n ms, vs = self.ms, self.vs\n\n # Make sure everything has the right length\n assert len(weights) == len(grads)\n assert len(weights) == len(ms)\n assert len(weights) == len(vs)\n\n # Now, for each weight stack, update momentum, velocity, weights\n new_ms = []\n new_vs = []\n new_weights = []\n for w, g, m, v in zip(weights, grads, ms, vs):\n # Momentum update\n m_t = (self.beta1 * m) + (1.0 - self.beta1) * g\n\n # Velocity update\n v_t = (self.beta2 * v) + (1.0 - self.beta2) * g**2\n\n # Update the weights\n w_t = w - learn_rate_t * m_t / (np.sqrt(v_t) + 1e-8)\n\n new_ms.append(m_t)\n new_vs.append(v_t)\n new_weights.append(w_t)\n\n self.ms = new_ms\n self.vs = new_vs\n return new_weights", "def update_all(self,delta_t):\n self.update_thrust()\n self.update_climb_rate()\n self.update_height(delta_t)", "def update_model(self, verbose):\n if self.comm.project.meshes == \"multi-mesh\":\n self.comm.lasif.move_gradient_to_cluster()\n\n if not self.task_dict[\"summing_completed\"]:\n grad_summer = GradientSummer(comm=self.comm)\n grad_summer.sum_gradients(\n events=self.comm.project.non_val_events_in_iteration,\n output_location=self.raw_gradient_path,\n batch_average=True,\n sum_vpv_vph=True,\n store_norms=True,\n )\n write_xdmf(self.raw_gradient_path)\n self.task_dict[\"summing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Summing already done\")\n\n if not self.task_dict[\"raw_update_completed\"]:\n self._update_model(raw=True, smooth=False, verbose=verbose)\n self.task_dict[\"raw_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Raw updating already completed\")\n\n if not self.task_dict[\"smoothing_completed\"]:\n self.perform_smoothing()\n self.task_dict[\"smoothing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smoothing already done\")\n\n if not self.task_dict[\"smooth_update_completed\"]:\n self._update_model(raw=False, smooth=True, verbose=verbose)\n self.task_dict[\"smooth_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smooth updating already completed\")\n\n if not self.task_dict[\"iteration_finalized\"]:\n self._finalize_iteration(verbose=verbose)\n self.task_dict[\"iteration_finalized\"] = True\n self._update_task_file()\n else:\n self.print(\"Iteration already finalized\")\n\n self.finish_task()", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n\n self.target_model.set_weights(pars_target)", "def add_grad_updates(self):\n \n gradients = T.grad(self.cost, self.theta)\n \n for target_param, grad in zip(self.theta, gradients):\n \n if target_param.name ==\"W\" and self.num_hidden ==0\\\n and self.zero_diag:\n \n grad = grad - T.diag(T.diag(grad)) # no x i - xi connections\n # for all i = 1, ..., D\n ##############################################################\n if target_param.name ==\"b\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n elif target_param.name ==\"bhid\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n else:\n \n if self.use_momentum:\n \n # alternative definition (mostly seen):\n #g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n #T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n #self.updates[target_param] = target_param + g_tilda\n \n g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n (1-self.momentum)*grad\n \n self.updates[target_param] = target_param +\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*g_tilda\n \n # store g_tilda for next iteration:\n self.updates[self.grad_vec[target_param.name]] = g_tilda\n \n else:\n \n self.updates[target_param] = target_param -\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n \n if (\"PCD\" in self.algorithm) and self.num_hidden > 0:\n \n self.updates[self.persistent_gibbs] = self.hid_samples", "def update_stationlist(time_res='hourly',dbase_dir='dbase'):\r\n\r\n \r\n dwd_abbr = {'air_temperature': 'TU',\r\n 'cloud_type': 'CS', \r\n 'cloudiness': 'N',\r\n 'dew_point' : 'TD',\r\n 'extreme_temperature': 'TX',\r\n 'extreme_wind': 'FX',\r\n 'precipitation': 'RR',\r\n 'pressure': 'P0',\r\n 'soil_temperature': 'EB',\r\n 'solar': 'ST',\r\n 'sun': 'SD',\r\n 'visibility': 'VV',\r\n 'wind': 'FF',\r\n 'wind_synop': 'F'\r\n }\r\n \r\n # lets start\r\n print('Updating station list')\r\n \r\n # create output directory if not existing\r\n \r\n if not os.path.exists(dbase_dir):\r\n os.makedirs(dbase_dir)\r\n \r\n #check whether we have an up-to-date-station-list-already\r\n try:\r\n stations_network_old=[s for s in os.listdir(dbase_dir) if 'dwd_station_network' in s][0]\r\n datetime_network=datetime.date(datetime.strptime(re.findall('\\d+',stations_network_old)[0],'%Y%m%d'))\r\n #update if more than 24hours\r\n dt_today=datetime.date(datetime.now())\r\n if (dt_today-datetime_network)<timedelta(days=1):\r\n print('DWD network list is up-to-date, no update needed')\r\n filename_stations=dbase_dir+'\\\\'+stations_network_old\r\n return filename_stations\r\n else:\r\n print('DWD network list neeeds to be updated')\r\n os.remove(dbase_dir+'\\\\'+stations_network_old)\r\n except:\r\n print('DWD network list neeeds to be updated')\r\n pass\r\n \r\n \r\n # header\r\n stations_network=pd.DataFrame()\r\n \r\n # connect to ftp server and go to the folder\r\n \r\n # Connect to the Server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #change to subfolder\r\n ftp.cwd('/climate_environment/CDC/observations_germany/climate/' + time_res +'/')\r\n #get dwd categories\r\n dwd_categories=ftp.nlst()\r\n #loop through the subfolders to get the station lists\r\n for category in dwd_categories:\r\n print('retrieve stationlist for', category)\r\n #try to get historical data\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/historical/'\r\n ftp.cwd(dir_path)\r\n except Exception as e:\r\n print(e, 'try to download category', category, 'from other folder')\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/'\r\n ftp.cwd(dir_path)\r\n except:\r\n print('Category', category, 'could not have been downloaded')\r\n pass\r\n #retrieve the stationlist\r\n stationlist = []\r\n # try to retrieve file\r\n retrieved=False\r\n filename=dwd_abbr[category]+'_Stundenwerte_Beschreibung_Stationen.txt'\r\n while not retrieved:\r\n try:\r\n ftp.retrlines(\"RETR \" + filename, stationlist.append)\r\n #ftp.retrbinary(\"RETR \" + filestr, stationlist.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(dir_path)\r\n #remove first two lines\r\n stationlist=stationlist[2:]\r\n #delete uncessary blanks\r\n stationlist=[re.sub(' +', ' ', station.rstrip()) for station in stationlist]\r\n #split the list\r\n stationlist=[station.split(\" \")[:7] for station in stationlist]\r\n #read as dataframe\r\n dfstations=pd.DataFrame(stationlist,columns=['STATIONS_ID','date_start','date_end','height','geo_lat','geo_lon','name'])\r\n #add true information to category\r\n dfstations[category]=True\r\n \r\n stations_network=stations_network.append(dfstations,sort=False,ignore_index=True)\r\n #A=[sub.split(\" \") for sub in stationlist] \r\n \r\n #replace all Na by False\r\n stations_network[stations_network.isna()]=0 \r\n #aggregate\r\n stations_network=stations_network.groupby(['STATIONS_ID'],as_index=False).agg('max')\r\n #replace zero by False in order to have pure boolean data\r\n stations_network.replace(0,False,inplace=True)\r\n #fix the error with station 14138 and 05614 and 07325, which does not have pressure cord\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='05614','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='07325','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='01572','pressure']=False\r\n #for temperature the same\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','air_temperature']=False\r\n #save to database writing the time as well\r\n filename_stations=dbase_dir+'\\\\dwd_station_network_'+datetime.now().strftime('%Y%m%d')+'.csv'\r\n stations_network.to_csv(filename_stations,index=False)\r\n \r\n print('Updating station list...finished')\r\n \r\n return filename_stations", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def momentum_update(self, online_net, target_net, momentum):\n for param_ol, param_tgt in zip(online_net.parameters(), target_net.parameters()):\n param_tgt.data = param_tgt.data * momentum + param_ol.data * (1. - momentum)", "def update_weights(architecture,grad_weights,grad_bias,m,v,t,lr,optimizer=\"adam\"):\n \n for layer in range(len(architecture)):\n if not (grad_weights['layer{}'.format(layer+1)] is None) and grad_bias['layer{}'.format(layer+1)] is not None:\n grad_weightsi = grad_weights['layer{}'.format(layer+1)]\n grad_weightsi /= bs\n grad_biasi = grad_bias['layer{}'.format(layer+1)]\n grad_biasi /= bs\n\n \n if optimizer.lower()==\"sgd\":\n # Mini-Batch SGD\n qw = lr*grad_weightsi\n qb = lr*grad_biasi\n else:\n # Mini-Batch Adam\n mw,mb = m['layer{}'.format(layer+1)]\n vw,vb = v['layer{}'.format(layer+1)]\n qw,mw,vw = adam(grad_weightsi,beta_1,beta_2,mw,vw,t,lr) # Have obtained dw\n qb,mb,vb = adam(grad_biasi,beta_1,beta_2,mb,vb,t,lr) # Have obtained db\n\n architecture['layer{}'.format(layer+1)][2].requires_grad = False\n architecture['layer{}'.format(layer+1)][3].requires_grad = False\n # Updating weights and biases now\n try:\n architecture['layer{}'.format(layer+1)][2] -= torch.Tensor(qw)\n except:\n architecture['layer{}'.format(layer+1)][2] -= torch.t(torch.Tensor(qw))\n try:\n architecture['layer{}'.format(layer+1)][3] -= torch.Tensor(qb)\n except:\n architecture['layer{}'.format(layer+1)][3] -= torch.t(torch.Tensor(qb))\n\n m['layer{}'.format(layer+1)][0] = torch.Tensor(mw)\n m['layer{}'.format(layer+1)][1] = torch.Tensor(mb)\n v['layer{}'.format(layer+1)][0] = torch.Tensor(vw)\n v['layer{}'.format(layer+1)][1] = torch.Tensor(vb)\n grad_weights['layer{}'.format(layer+1)] = torch.zeros(grad_weightsi.shape)\n grad_bias['layer{}'.format(layer+1)] = torch.zeros(grad_biasi.shape)\n return grad_weights,grad_bias,m,v", "def _update(\r\n self, dt: float, S: np.ndarray, E: np.ndarray, I: np.ndarray, R: np.ndarray\r\n ):\r\n for i in range(1, len(S)):\r\n f = self._deriv(S[i - 1], E[i - 1], I[i - 1])\r\n S[i] = S[i - 1] + dt * f[0]\r\n E[i] = E[i - 1] + dt * f[1]\r\n I[i] = I[i - 1] + dt * f[2]\r\n R[i] = R[i - 1] + dt * f[3]\r\n return S, E, I, R", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n ctr = 0\n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n ctr += 1\n\n self.target_model.set_weights(pars_target)", "def weight_update_steepest_descent_with_momentum(self, network):\n # Initialize velocities to zero for momentum\n if self.vel_b is None or self.vel_q is None:\n self.vel_b = []\n self.vel_q = []\n self.vel_rx_inp = []\n self.vel_ry_inp = []\n self.vel_rx_pos_out = []\n self.vel_ry_pos_out = []\n self.vel_rx_neg_out = []\n self.vel_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.vel_b.append(np.zeros(layer.b.shape))\n self.vel_q.append(np.zeros(layer.q.shape))\n self.vel_rx_inp.append(np.zeros(layer.input_size))\n self.vel_ry_inp.append(np.zeros(layer.input_size))\n self.vel_rx_pos_out.append(np.zeros(layer.output_size))\n self.vel_ry_pos_out.append(np.zeros(layer.output_size))\n self.vel_rx_neg_out.append(np.zeros(layer.output_size))\n self.vel_ry_neg_out.append(np.zeros(layer.output_size))\n\n for l, layer in enumerate(network.layers):\n self.vel_b[l] = -self.alpha * self.dc_db[l] + self.beta * self.vel_b[l]\n self.vel_q[l] = -self.alpha * self.dc_dq[l] + self.beta * self.vel_q[l]\n self.vel_rx_inp[l] = -self.alpha * self.dc_drx_inp[l] + self.beta * self.vel_rx_inp[l]\n self.vel_ry_inp[l] = -self.alpha * self.dc_dry_inp[l] + self.beta * self.vel_ry_inp[l]\n\n self.vel_rx_pos_out[l] = -self.alpha * self.dc_drx_pos_out[l] + self.beta * self.vel_rx_pos_out[l]\n self.vel_ry_pos_out[l] = -self.alpha * self.dc_dry_pos_out[l] + self.beta * self.vel_ry_pos_out[l]\n self.vel_rx_neg_out[l] = -self.alpha * self.dc_drx_neg_out[l] + self.beta * self.vel_rx_neg_out[l]\n self.vel_ry_neg_out[l] = -self.alpha * self.dc_dry_neg_out[l] + self.beta * self.vel_ry_neg_out[l]\n\n layer.b += self.vel_b[l]\n layer.q += self.vel_q[l]\n\n layer.rx_inp += self.vel_rx_inp[l]\n layer.ry_inp += self.vel_ry_inp[l]\n\n layer.rx_pos_out += self.vel_rx_pos_out[l]\n layer.ry_pos_out += self.vel_ry_pos_out[l]\n layer.rx_neg_out += self.vel_rx_neg_out[l]\n layer.ry_neg_out += self.vel_ry_neg_out[l]", "def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ###### Temperature in Kelvin\n model.T_kelvin = model.zero_celsius + model.T_celsius*kelvin\n \n ##### Potentials\n # Resting potential (calculated with Goldman equation)\n model.V_res = (model.R*model.T_kelvin)/model.F * np.log((model.P_K*model.n_init**2*model.K_e + model.P_Na*model.h_init*model.m_init**3*model.Na_e)/\\\n (model.P_K*model.n_init**2*model.K_i + model.P_Na*model.h_init*model.m_init**3*model.Na_i))\n \n # Nerst potential for leakage current; leakage chanels were excluded but could be added by using: g_L*(E_L-(v-V_res)) \n model.E_L = (-1/model.g_L)*(model.P_Na*model.m_init**3*model.h_init*(model.V_res*model.F**2)/(model.R*model.T_kelvin) * \\\n (model.Na_e-model.Na_i*exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))) + \\\n model.P_K*model.n_init**2*(model.V_res*model.F**2)/(model.R*model.T_kelvin) *\\\n (model.K_e-model.K_i*np.exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))))\n \n \n ##### structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5)\n model.structure = np.array(list(np.tile([2,1],model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### Compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(model.structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # dendrite\n model.fiber_inner_diameter = 0.7* model.fiber_outer_diameter\n model.compartment_diameters[:] = model.fiber_inner_diameter\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacitivites\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # internodes\n model.c_m[np.where(model.structure == 1)] = 0*uF/cm**2\n # nodes\n model.c_m[np.where(model.structure == 2)] = model.c_m_layer\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2 \n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Noise term\n model.P_Na_vector = np.zeros(model.nof_comps)*um/second\n model.P_Na_vector[model.structure == 2] = model.P_Na\n model.noise_term = np.sqrt(model.A_surface*model.P_Na_vector)\n \n ##### Compartments to plot\n model.comps_to_plot = range(1,model.nof_comps)\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.n = model.n_init\n neuron.h = model.h_init\n \n ##### Set parameter values of differential equations\n # conductances active compartments\n neuron.g_Na = model.g_Na\n neuron.g_K = model.g_K\n \n # conductances internodes\n neuron.g_Na[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n neuron.g_K[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.E_L = model.E_L\n neuron.g_L = model.g_L \n \n return neuron, model", "def update_network(self):\n\n device = torch.device(\"cpu\")\n self.model = ProLoNet(input_dim=13,\n weights=None,\n comparators=None,\n leaves=32,\n output_dim=1,\n bayesian_embedding_dim=8,\n alpha=1.5,\n use_gpu=False,\n vectorized=True,\n is_value=True).to(device)\n\n self.embedding_optimizer = torch.optim.RMSprop([{'params': self.model.bayesian_embedding.parameters()}], lr=.1)\n self.embedding_list = [torch.ones(3) * 1 / 3 for i in range(2000)]\n self.opt = torch.optim.RMSprop(\n [{'params': list(self.model.parameters())[:-1]}, {'params': self.model.bayesian_embedding.parameters(), 'lr': .01}], lr=.01)\n\n criterion = torch.nn.BCELoss()\n\n n_epochs = 4000 + self.global_schedule_num * 3\n for epoch in range(n_epochs):\n which_schedule = np.random.randint(len(self.data_so_far))\n timestep_within_schedule = np.random.randint(len(self.teacher_actions[which_schedule]))\n\n index_within_network_state = timestep_within_schedule * 20\n timestep_data_from_agg = self.data_so_far[which_schedule][index_within_network_state:index_within_network_state+20]\n task = self.teacher_actions[which_schedule][timestep_within_schedule]\n # set the embedding\n self.model.set_bayesian_embedding(self.embedding_list[which_schedule].clone())\n # update loop\n\n phi_i_num = task\n phi_i = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, phi_i_num)\n phi_i_numpy = np.asarray(phi_i)\n loss_counter = 0\n # iterate over pairwise comparisons\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_i_numpy - phi_j_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.ones((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.ones((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model(feature_input))\n loss = criterion(output, label)\n # prepare optimizer, compute gradient, update params\n loss_counter += loss.item()\n self.opt.zero_grad()\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_j_numpy - phi_i_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.zeros((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.zeros((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model.forward(feature_input))\n\n self.opt.zero_grad()\n loss = criterion(output, label)\n loss_counter += loss.item()\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n self.loss_array.append(loss_counter / 38)\n self.embedding_list[which_schedule] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()).clone() # very ugly", "def updateG(self, dt):\n\t\tself.tissue.G.project( (self.initial * dt + Identity(3)) * self.tissue.G )", "def update_parameters(self, parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural network\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n self.t += 1\n\n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n self.v[\"dW\" + str(l + 1)] = self.beta1 * self.v[\"dW\" + str(l + 1)] + (1 - self.beta1) * grads['dW' + str(l + 1)]\n self.v[\"db\" + str(l + 1)] = self.beta1 * self.v[\"db\" + str(l + 1)] + (1 - self.beta1) * grads['db' + str(l + 1)]\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n v_corrected[\"dW\" + str(l + 1)] = self.v[\"dW\" + str(l + 1)] / (1 - np.power(self.beta1, self.t))\n v_corrected[\"db\" + str(l + 1)] = self.v[\"db\" + str(l + 1)] / (1 - np.power(self.beta1, self.t))\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n self.s[\"dW\" + str(l + 1)] = self.beta2 * self.s[\"dW\" + str(l + 1)] + (1 - self.beta2) * np.power(grads['dW' + str(l + 1)], 2)\n self.s[\"db\" + str(l + 1)] = self.beta2 * self.s[\"db\" + str(l + 1)] + (1 - self.beta2) * np.power(grads['db' + str(l + 1)], 2)\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n s_corrected[\"dW\" + str(l + 1)] = self.s[\"dW\" + str(l + 1)] / (1 - np.power(self.beta2, self.t))\n s_corrected[\"db\" + str(l + 1)] = self.s[\"db\" + str(l + 1)] / (1 - np.power(self.beta2, self.t))\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * v_corrected[\"dW\" + str(l + 1)] / np.sqrt(self.s[\"dW\" + str(l + 1)] + self.epsilon)\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * v_corrected[\"db\" + str(l + 1)] / np.sqrt(self.s[\"db\" + str(l + 1)] + self.epsilon)\n\n return parameters", "def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)", "def update(self,dt):\n t1 = time()\n\n if SPLIT:\n self.check_refine()\n if AMALGAMATE:\n self.check_amalg(self.nl_default)\n\n t = time()\n self.rebuild_lists()\n self.timing['nlist rebuild time'] = time() - t\n\n # Is this derivative step required?\n t = time()\n self.derivatives()\n self.timing['deriv time'] = time() - t\n \n t = time()\n self.step(self.gather_state,self.derivatives, \\\n self.gather_derivatives,self.scatter_state,dt)\n self.timing['integrate time'] = time() - t\n \n self.box.apply(self)\n\n if self.thermostat:\n self.apply_thermostat(self.thermostat_temp)\n \n self.timing['update time'] = time() - t1\n self.steps += 1", "def update(self, dt):\n # get normal random iscipyut\n u = self.prng.normal(size=(1, self._x.shape[1]))\n # calculate state time derivative with state space equation\n dx_dt = self._A.dot(self._x) + self._B * u\n # apply update with Euler integration\n self._x += dx_dt * dt", "def weight_update_rmsprop(self, network):\n epsilon = 10e-8\n gamma = self.gamma\n one_m_gamma = 1.0 - gamma\n\n if self.ms_b is None or self.ms_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = gamma * self.ms_b[l] + one_m_gamma * self.dc_db[l]**2\n self.ms_q[l] = gamma * self.ms_q[l] + one_m_gamma * self.dc_dq[l]**2\n\n self.ms_rx_inp[l] = gamma * self.ms_rx_inp[l] + one_m_gamma * self.dc_drx_inp[l]**2\n self.ms_ry_inp[l] = gamma * self.ms_ry_inp[l] + one_m_gamma * self.dc_dry_inp[l]**2\n\n self.ms_rx_pos_out[l] = gamma * self.ms_rx_pos_out[l] + one_m_gamma * self.dc_drx_pos_out[l]**2\n self.ms_ry_pos_out[l] = gamma * self.ms_ry_pos_out[l] + one_m_gamma * self.dc_dry_pos_out[l]**2\n self.ms_rx_neg_out[l] = gamma * self.ms_rx_neg_out[l] + one_m_gamma * self.dc_drx_neg_out[l]**2\n self.ms_ry_neg_out[l] = gamma * self.ms_ry_neg_out[l] + one_m_gamma * self.dc_dry_neg_out[l]**2\n\n layer.b += -self.alpha * self.dc_db[l] / np.sqrt(self.ms_b[l] + epsilon)\n layer.q += -self.alpha * self.dc_dq[l] / np.sqrt(self.ms_q[l] + epsilon)\n\n layer.rx_inp += -self.alpha * self.dc_drx_inp[l] / np.sqrt(self.ms_rx_inp[l] + epsilon)\n layer.ry_inp += -self.alpha * self.dc_dry_inp[l] / np.sqrt(self.ms_ry_inp[l] + epsilon)\n\n layer.rx_pos_out += -self.alpha * self.dc_drx_pos_out[l] / np.sqrt(self.ms_rx_pos_out[l] + epsilon)\n layer.ry_pos_out += -self.alpha * self.dc_dry_pos_out[l] / np.sqrt(self.ms_ry_pos_out[l] + epsilon)\n layer.rx_neg_out += -self.alpha * self.dc_drx_neg_out[l] / np.sqrt(self.ms_rx_neg_out[l] + epsilon)\n layer.ry_neg_out += -self.alpha * self.dc_dry_neg_out[l] / np.sqrt(self.ms_ry_neg_out[l] + epsilon)", "def weight_update_conjugate_gradient(self, network):\n # compute beta: Fletcher-Reeves\n num = 0.0\n for l, layer in enumerate(network.layers):\n num += np.sum(self.dc_db[l] ** 2)\n num += np.sum(self.dc_dq[l] ** 2)\n num += np.sum(self.dc_drx_inp[l] ** 2)\n num += np.sum(self.dc_dry_inp[l] ** 2)\n num += np.sum(self.dc_drx_pos_out[l] ** 2)\n num += np.sum(self.dc_dry_pos_out[l] ** 2)\n num += np.sum(self.dc_drx_neg_out[l] ** 2)\n num += np.sum(self.dc_dry_neg_out[l] ** 2)\n\n # Initialize velocities to zero for momentum\n if self.vel_b is None or self.vel_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n # Take steepest descent step\n for l, layer in enumerate(network.layers):\n layer.b -= self.alpha * self.dc_db[l]\n layer.q -= self.alpha * self.dc_dq[l]\n layer.rx_inp -= self.alpha * self.dc_drx_inp[l]\n layer.ry_inp -= self.alpha * self.dc_dry_inp[l]\n layer.rx_pos_out -= self.alpha * self.dc_drx_pos_out[l]\n layer.ry_pos_out -= self.alpha * self.dc_dry_pos_out[l]\n layer.rx_neg_out -= self.alpha * self.dc_drx_neg_out[l]\n layer.ry_neg_out -= self.alpha * self.dc_dry_neg_out[l]\n\n else:\n # compute beta\n beta = num / self.denominator\n\n # compute s_n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = -self.alpha * self.dc_db[l] + beta * self.ms_b[l]\n self.ms_q[l] = -self.alpha * self.dc_dq[l] + beta * self.ms_q[l]\n self.ms_rx_inp[l] = -self.alpha * self.dc_drx_inp[l] + beta * self.ms_rx_inp[l]\n self.ms_ry_inp[l] = -self.alpha * self.dc_dry_inp[l] + beta * self.ms_ry_inp[l]\n self.ms_rx_pos_out[l] = -self.alpha * self.dc_drx_pos_out[l] + beta * self.ms_rx_pos_out[l]\n self.ms_ry_pos_out[l] = -self.alpha * self.dc_dry_pos_out[l] + beta * self.ms_ry_pos_out[l]\n self.ms_rx_neg_out[l] = -self.alpha * self.dc_drx_neg_out[l] + beta * self.ms_rx_neg_out[l]\n self.ms_ry_neg_out[l] = -self.alpha * self.dc_dry_neg_out[l] + beta * self.ms_ry_neg_out[l]\n\n # Take step\n for l, layer in enumerate(network.layers):\n layer.b += self.alpha * self.ms_b[l]\n layer.q += self.alpha * self.ms_q[l]\n layer.rx_inp += self.alpha * self.ms_rx_inp[l]\n layer.ry_inp += self.alpha * self.ms_ry_inp[l]\n layer.rx_pos_out += self.alpha * self.ms_rx_pos_out[l]\n layer.ry_pos_out += self.alpha * self.ms_ry_pos_out[l]\n layer.rx_neg_out += self.alpha * self.ms_rx_neg_out[l]\n layer.ry_neg_out += self.alpha * self.ms_ry_neg_out[l]\n\n # store num for next iteration to be used as denominator\n self.denominator = num", "async def update_dps(self, dps=None):\n if self.version in [3.2, 3.3]: # 3.2 behaves like 3.3 with type_0d\n if dps is None:\n if not self.dps_cache:\n await self.detect_available_dps()\n if self.dps_cache:\n dps = [int(dp) for dp in self.dps_cache]\n # filter non whitelisted dps\n dps = list(set(dps).intersection(set(UPDATE_DPS_WHITELIST)))\n self.debug(\"updatedps() entry (dps %s, dps_cache %s)\", dps, self.dps_cache)\n payload = self._generate_payload(UPDATEDPS, dps)\n enc_payload = self._encode_message(payload)\n self.transport.write(enc_payload)\n return True", "def update(self):\n\n # First we need to scale the sufficient statistics by batch size\n self._counts /= self._corpus_size\n\n # We'll only train the network with 20 iterations.\n # A more common technique is to use a hold-out validation set.\n # When the validation error starts to increase, the network is overfitting,\n # so we stop training the net. This is called \"early stopping\", which we won't do here.\n done_looping = False\n best_cost = np.inf\n best_iter = 0\n learning_rate = self._learning_rate\n patience = self._patience\n\n # TODO: implement adagrad\n for iteration in range(self._max_iterations):\n\n # Train the network using the entire training set.\n current_cost = self._train(self._X, self._counts, learning_rate)\n logging.debug('[%d] MLP cost=%s', iteration, current_cost)\n\n # Give it a chance to update cost and patience\n if current_cost < best_cost:\n if current_cost < best_cost * self._improvement_threshold:\n if iteration >= patience - self._patience_increase:\n patience += self._patience_increase\n best_cost = current_cost\n best_iter = iteration\n\n # Check patience\n if iteration > self._patience:\n logging.debug('Ran out of patience in iteration %d', iteration)\n break\n\n # Finally, we update the CPDs and reset the sufficient statistics to zero\n self._cpds = self._mlp_output(self._X)\n self._counts = np.zeros((self.n_input, self.n_output), dtype=theano.config.floatX)", "def optimize_parameters(self):\n # forward\n for i in range(min(self.big_iter+1,len(self.orders_rev))):\n if(self.orders_rev):\n # compute fake images and reconstruction images.\n self.forward(i,False)\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders_rev[i]].zero_grad()\n # calculate gradients for G_A and G_B\n self.backward_G(i,False)\n # update G_A and G_B's weights\n self.optimizers_G[self.orders_rev[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() \n self.backward_D(i,False) \n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DY() # calculate gradients for D_A\n self.optimizer_D.step()\n for i in range(min(self.big_iter+1, len(self.orders))):\n if(self.orders):\n if(i>0):\n self.real_A = self.fake_B.detach()\n self.forward(i,True) # compute fake images and reconstruction images.\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders[i]].zero_grad()\n self.backward_G(i,True) # calculate gradients for G_A and G_B\n # update G_A and G_B's weights\n self.optimizers_G[self.orders[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D(i,True) # calculate gradients for D_A\n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DX() # calculate gradients for D_A\n self.optimizer_D.step() \n self.current_label=self.labels[0]\n self.current_order=self.orders\n self.current_pred = np.concatenate((self.pred_real.detach().cpu().numpy().mean(\n axis=2).mean(axis=2), self.pred_fake.detach().cpu().numpy().mean(axis=2).mean(axis=2)))", "def optimize_dcr(dg):\n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n tb_data = sto.read_object(tb_raw, f_raw)\n \n cycle = dg.fileDB['cycle'].values[0]\n f_results = f'./temp_{cycle}.h5'\n \n write_output = True\n \n # adjust dsp config \n with open('opt_dcr.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n # pprint(dsp_config)\n # exit()\n \n # set dcr parameters\n # rise, flat, dcr_tstart = 200, 1000, 'tp_0+1.5*us' # default\n # dcr_rise, dcr_flat, dcr_tstart = 100, 3000, 'tp_0+3*us' # best so far?\n dcr_rise, dcr_flat, dcr_tstart = 100, 2500, 'tp_0+1*us'\n dsp_config['processors']['dcr_raw']['args'][1] = dcr_rise\n dsp_config['processors']['dcr_raw']['args'][2] = dcr_flat\n dsp_config['processors']['dcr_raw']['args'][3] = dcr_tstart\n \n # set trap energy parameters\n # ene_rise, ene_flat = \"2*us\", \"1*us\" # best? from optimize_trap\n ene_rise, ene_flat = \"10*us\", \"5*us\"\n dsp_config['processors']['wf_trap']['args'][1] = ene_rise\n dsp_config['processors']['wf_trap']['args'][2] = ene_flat\n \n # adjust pole-zero constant\n dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '64.4*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '50*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '100*us'\n \n # run dsp\n print('Running DSP ...')\n t_start = time.time()\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=1)\n pc.execute()\n t_elap = (time.time() - t_start)/60\n print(f'Done. Elapsed: {t_elap:.2f} min')\n \n df_out = tb_out.get_dataframe()\n \n if write_output:\n df_out.to_hdf(f_results, key='opt_dcr')\n print('Wrote output file:', f_results)", "def densitychange(self,dt=0.1):\n #Using conservation of mass and diffusion\n dp_dt = -div(self.u*self.d)\n dp_dt += ndimage.laplace(self.d)\n #This term seems to make the density clump together, producing \n #waves which can make the simulation blow up.\n #dp_dt -= np.add.reduce(self.u*np.array(np.gradient(self.d)))\n #Edge density shouldn't change.\n dp_dt[[0,-1]] = dp_dt[:,[0,-1]] = 0\n self.d += dp_dt*dt\n #Change pressure accordingly to ideal gas law\n #AAAAAAAAAAAAAAAA this fixed most of the poblems from before!!!\n self.P = self.d*8.214*273\n #Conserve mass by spreading out fluctuations \n self.d[1:-1,1:-1] += (self.mass-np.sum(self.d))/self.vol", "def update_predict_network(self):\n states, actions, rewards, new_states, is_terminals = self.memory.sample(self.batch_size)\n\n preprocessed_states, preprocessed_new_states = self.preprocessor.process_batch(states, new_states)\n\n actions = self.preprocessor.process_action(actions)\n # update network\n q_values = self.cal_target_q_values(preprocessed_new_states)\n max_q_values = np.max(q_values, axis=1)\n max_q_values[is_terminals] = 0.0\n targets = rewards + self.gamma * max_q_values\n targets = np.expand_dims(targets, axis=1)\n\n self.q_network.train_on_batch([preprocessed_states, actions], targets)\n if self.num_steps % self.target_update_freq ==0:\n print(\"Update target network at %d steps\" % self.num_steps)\n self.update_target_network()", "def update_target_net(self, sess):\n sess.run(self.update_target_net_op)", "def _update_weights(self, good_da, bad_da, good_tree, bad_tree, good_feats, bad_feats):\n # import ipdb; ipdb.set_trace()\n if self.diffing_trees:\n good_sts, bad_sts = good_tree.diffing_trees(bad_tree, symmetric=True)\n for good_st, bad_st in zip(good_sts, bad_sts):\n good_feats = self._extract_feats(good_st, good_da)\n bad_feats = self._extract_feats(bad_st, bad_da)\n subtree_w = 1\n if self.diffing_trees.endswith('weighted'):\n subtree_w = (len(good_st) + len(bad_st)) / float(len(good_tree) + len(bad_tree))\n self._update_nn(bad_feats, good_feats, subtree_w * self.alpha)\n else:\n self._update_nn(bad_feats, good_feats, self.alpha)", "def one_step_gd(self, batch):\n\n # get target values yj\n targets = self.get_target(batch)\n phi_input = np.vstack(batch[0])\n masks = self.get_masks(batch[1])\n dummy_targets = targets.max(axis=1)\n\n X = [phi_input, targets, masks]\n Y = [dummy_targets, targets]\n\n # update main network with one step of gradient descent\n # self.Qmodel.fit(X, Y, batch_size=len(X))\n # pdb.set_trace()\n metrics = self.train_model.train_on_batch(X, Y)\n\n # every fixed number of steps, update target network\n self.c_count += 1\n # print(self.c_count, self.c)\n\n if self.c_count == self.c:\n # if self.verbose:\n # print('* Target network updated')\n\n # update target network to be equal the main network\n self.update_target_network()\n\n # reset counter\n self.c_count = 0\n\n return metrics[0]", "def acUpdate(deltaT):#-------------------------------- AC UPDATE\n pass # -> Delete this line if you do something here !", "def update_parameters(parameters, grads, learning_rate=0.01):\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n ### END CODE HERE ###\n\n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n dW3 = grads[\"dW3\"]\n db3 = grads[\"db3\"]\n ## END CODE HERE ###\n\n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - (learning_rate * dW1)\n b1 = b1 - (learning_rate * db1)\n W2 = W2 - (learning_rate * dW2)\n b2 = b2 - (learning_rate * db2)\n W3 = W3 - (learning_rate * dW3)\n b3 = b3 - (learning_rate * db3)\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n\n return parameters", "def update_parameters_with_adam(parameters, grads, v, s, t, lambd, learning_rate, mini_batch_size, beta1, beta2, epsilon):\n\n L = len(parameters) // 2 \n v_corrected = {}\n s_corrected = {}\n\n for l in range(L):\n v[\"dW\" + str(l + 1)] = beta1 * v[\"dW\" + str(l + 1)] + (1 - beta1) * grads['dW' + str(l + 1)]\n v[\"db\" + str(l + 1)] = beta1 * v[\"db\" + str(l + 1)] + (1 - beta1) * grads['db' + str(l + 1)]\n v_corrected[\"dW\" + str(l + 1)] = v[\"dW\" + str(l + 1)] / (1 - np.power(beta1, t))\n v_corrected[\"db\" + str(l + 1)] = v[\"db\" + str(l + 1)] / (1 - np.power(beta1, t))\n\n s[\"dW\" + str(l + 1)] = beta2 * s[\"dW\" + str(l + 1)] + (1 - beta2) * np.power(grads['dW' + str(l + 1)], 2)\n s[\"db\" + str(l + 1)] = beta2 * s[\"db\" + str(l + 1)] + (1 - beta2) * np.power(grads['db' + str(l + 1)], 2)\n s_corrected[\"dW\" + str(l + 1)] = s[\"dW\" + str(l + 1)] / (1 - np.power(beta2, t))\n s_corrected[\"db\" + str(l + 1)] = s[\"db\" + str(l + 1)] / (1 - np.power(beta2, t))\n parameters[\"W\" + str(l + 1)] = (1-learning_rate*(lambd/mini_batch_size))*parameters[\"W\" + str(l + 1)] \n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * v_corrected[\"dW\" + str(l + 1)] / np.sqrt(s_corrected[\"dW\" + str(l + 1)] + epsilon)\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * v_corrected[\"db\" + str(l + 1)] / np.sqrt(s_corrected[\"db\" + str(l + 1)] + epsilon)\n\n\n return parameters, v, s", "def compile_update_softmax(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n C = Cfg.C\n\n final_layer = nnet.all_layers[-1]\n trainable_params = lasagne.layers.get_all_params(final_layer,\n trainable=True)\n\n # Regularization\n if Cfg.weight_decay:\n l2_penalty = (floatX(0.5) / C) * get_l2_penalty(nnet, Cfg.include_bias)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation\n prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=False)\n if Cfg.ad_experiment:\n train_loss = T.mean(l_objectives.binary_crossentropy(\n prediction.flatten(), targets),\n dtype='floatX')\n train_acc = T.mean(l_objectives.binary_accuracy(prediction.flatten(),\n targets),\n dtype='floatX')\n else:\n train_loss = T.mean(l_objectives.categorical_crossentropy(prediction,\n targets),\n dtype='floatX')\n train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), targets),\n dtype='floatX')\n\n\n train_obj = T.cast(train_loss + l2_penalty, dtype='floatX')\n updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets],\n [train_obj, train_acc],\n updates=updates)\n\n # Forwardpropagation\n test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=True)\n if Cfg.ad_experiment:\n test_loss = T.mean(l_objectives.binary_crossentropy(\n test_prediction.flatten(), targets), dtype='floatX')\n test_acc = T.mean(l_objectives.binary_accuracy(\n test_prediction.flatten(), targets), dtype='floatX')\n else:\n test_loss = T.mean(l_objectives.categorical_crossentropy(\n test_prediction, targets), dtype='floatX')\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), targets),\n dtype='floatX')\n test_obj = T.cast(test_loss + l2_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_prediction,\n l2_penalty, test_loss])", "def _apply_smooth_update(self):\n self.print(\"SGD with Momentum: Applying smooth update...\", line_above=True)\n\n raw_update = self.get_h5_data(self.raw_update_path)\n update = self.get_h5_data(self.smooth_update_path)\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the smoothed update.\"\n \"Check the raw update and smoothing process.\"\n )\n\n max_upd = np.max(np.abs(update))\n print(f\"Max smooth model update: {max_upd}\")\n\n update_scaling_fac_alpha = self.alpha / max_upd\n\n self.print(\n f\"Recaling based on alpha: {update_scaling_fac_alpha},\"\n f\"New maximum update is: {max_upd * update_scaling_fac_alpha}\"\n )\n\n update *= update_scaling_fac_alpha\n\n # normalise theta and apply update\n theta_0 = self.get_h5_data(self._get_path_for_iteration(0, self.model_path))\n\n # Update parameters\n if max(self.roughness_decay_smoothing_length) > 0.0:\n theta_prev = self.get_h5_data(self.smoothed_model_path)\n\n # If relative perturbations are smoothed, make model physical\n if self.roughness_decay_type == \"relative_perturbation\":\n theta_prev = (theta_prev + 1) * theta_0\n else:\n theta_prev = self.get_h5_data(self.model_path)\n\n # Normalize the model and prevent division by zero in the outer core.\n theta_prev[theta_0 != 0] = theta_prev[theta_0 != 0] / theta_0[theta_0 != 0] - 1\n\n # Make sure that the model is only updated where theta is non_zero\n theta_new = np.zeros_like(theta_0)\n theta_new[theta_0 != 0] = (\n theta_prev[theta_0 != 0]\n - update[theta_0 != 0]\n - (1 - self.beta) * self.perturbation_decay * theta_prev[theta_0 != 0]\n )\n\n # Remove normalization from updated model and write physical model\n theta_physical = (theta_new + 1) * theta_0\n shutil.copy(\n self.model_path,\n self.tmp_model_path,\n )\n self.set_h5_data(\n self.tmp_model_path,\n theta_physical,\n )", "def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n L = len(parameters) // 2 # number of layers in the neural networks\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n \n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n v[\"dW\" + str(l+1)] = beta1*v[\"dW\" + str(l+1)]+(1-beta1)*grads['dW'+str(l+1)]\n v[\"db\" + str(l+1)] = beta1*v[\"db\" + str(l+1)]+(1-beta1)*grads['db'+str(l+1)]\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n v_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)]/(1-pow(beta1,t))\n v_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)]/(1-pow(beta1,t))\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n s[\"dW\" + str(l+1)] = beta2*s[\"dW\" + str(l+1)]+(1-beta2)*np.power(grads['dW'+str(l+1)],2)\n s[\"db\" + str(l+1)] = beta2*s[\"db\" + str(l+1)]+(1-beta2)*np.power(grads['db'+str(l+1)],2)\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n s_corrected[\"dW\" + str(l+1)] = s[\"dW\" + str(l+1)]/(1-pow(beta2,t))\n s_corrected[\"db\" + str(l+1)] = s[\"db\" + str(l+1)]/(1-pow(beta2,t))\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate*np.divide(v_corrected[\"dW\" + str(l+1)],np.sqrt(s_corrected[\"dW\" + str(l+1)])+epsilon)\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate*np.divide(v_corrected[\"db\" + str(l+1)],np.sqrt(s_corrected[\"db\" + str(l+1)])+epsilon)\n\n return parameters, v, s", "def update(self, ex):\r\n if not self.optimizer:\r\n raise RuntimeError('No optimizer set.')\r\n\r\n # Train mode\r\n self.network.train()\r\n\r\n source_ids = ex['source_ids']\r\n source_pos_ids = ex['source_pos_ids']\r\n source_type_ids = ex['source_type_ids']\r\n source_mask = ex['source_mask']\r\n label = ex['label']\r\n\r\n if self.use_cuda:\r\n label = label.cuda(non_blocking=True)\r\n source_ids = source_ids.cuda(non_blocking=True)\r\n source_pos_ids = source_pos_ids.cuda(non_blocking=True) \\\r\n if source_pos_ids is not None else None\r\n source_type_ids = source_type_ids.cuda(non_blocking=True) \\\r\n if source_type_ids is not None else None\r\n source_mask = source_mask.cuda(non_blocking=True) \\\r\n if source_mask is not None else None\r\n\r\n # Run forward\r\n score = self.network(source_ids=source_ids,\r\n source_pos_ids=source_pos_ids,\r\n source_type_ids=source_type_ids,\r\n source_mask=source_mask)\r\n\r\n # Compute loss and accuracies\r\n loss = self.criterion(score, label)\r\n\r\n if self.args.gradient_accumulation_steps > 1:\r\n loss = loss / self.args.gradient_accumulation_steps\r\n\r\n if self.args.fp16:\r\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n else:\r\n loss.backward()\r\n\r\n if (self.updates + 1) % self.args.gradient_accumulation_steps == 0:\r\n if self.args.fp16:\r\n torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.grad_clipping)\r\n else:\r\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\r\n\r\n self.optimizer.step()\r\n self.scheduler.step() # Update learning rate schedule\r\n self.optimizer.zero_grad()\r\n\r\n self.updates += 1\r\n\r\n return loss.item()", "def update(self, ex):\n if not self.optimizer:\n raise RuntimeError('No optimizer set.')\n\n # Train mode\n self.network.train()\n\n if self.use_cuda:\n for key in ex:\n #if isinstance(ex[key], torch.Tensor):\n try:\n ex[key] = ex[key].cuda(non_blocking=True)\n except:\n pass\n\n # Run forward\n net_loss = self.network(ex)\n\n loss = net_loss[\"total_loss\"]\n\n loss.backward()\n\n clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n self.updates += 1\n return {\n 'loss': loss,\n \"loc_loss\": net_loss[\"loc_loss\"],\n \"fix_loss\": net_loss[\"target_loss\"],\n }", "def create_update(nnet):\n\n if nnet.data._X_val.ndim == 2:\n inputs = T.matrix('inputs')\n elif nnet.data._X_val.ndim == 4:\n inputs = T.tensor4('inputs')\n\n targets = T.ivector('targets')\n\n # compile theano functions\n if Cfg.softmax_loss:\n compile_update_softmax(nnet, inputs, targets)\n elif Cfg.ocsvm_loss:\n if Cfg.rho_fixed:\n compile_update_ocsvm_rho_fixed(nnet, inputs, targets)\n else:\n compile_update_ocsvm(nnet, inputs, targets)\n elif Cfg.svdd_loss:\n compile_update_svdd(nnet, inputs, targets)\n elif Cfg.reconstruction_loss:\n create_autoencoder(nnet)\n else:\n compile_update_default(nnet, inputs, targets)", "def update(self,parameters, grads):\n \n self.i=self.i+1\n epsilon=1e-07\n self.L = len(parameters) // 2\n \n if(self.init_momentum):\n self.init_momentums(grads)\n \n m0_hat=self.m0\n m1_hat=self.m1\n \n for l in range(self.L):\n \n \n self.m0[\"dW\" + str(l+1)]=(self.m0[\"dW\" + str(l+1)]*self.beta1) + ((1-self.beta1)*(grads[\"dW\" + str(l+1)]))\n self.m1[\"dW\" + str(l+1)]=(self.m1[\"dW\" + str(l+1)]*self.beta2) + ((1-self.beta2)*(grads[\"dW\" + str(l+1)]**2))\n self.m0[\"dW\" + str(l+1)][np.isnan(self.m0[\"dW\" + str(l+1)])] = 0\n self.m1[\"dW\" + str(l+1)][np.isnan(self.m1[\"dW\" + str(l+1)])] = 0\n \n self.m0[\"db\" + str(l+1)]=(self.m0[\"db\" + str(l+1)]*self.beta1) + ((1-self.beta1)*(grads[\"db\" + str(l+1)]))\n self.m1[\"db\" + str(l+1)]=(self.m1[\"db\" + str(l+1)]*self.beta2) + ((1-self.beta2)*(grads[\"db\" + str(l+1)]**2))\n self.m0[\"db\" + str(l+1)][np.isnan(self.m0[\"db\" + str(l+1)])] = 0\n self.m1[\"db\" + str(l+1)][np.isnan(self.m1[\"db\" + str(l+1)])] = 0\n \n m0_hat[\"dW\" + str(l+1)]=(self.m0[\"dW\" + str(l+1)])/((1-self.beta1)**self.i)\n m1_hat[\"dW\" + str(l+1)]=(self.m1[\"dW\" + str(l+1)])/((1-self.beta2)**self.i)\n m0_hat[\"dW\" + str(l+1)][np.isnan(m0_hat[\"dW\" + str(l+1)])] = 0\n m1_hat[\"dW\" + str(l+1)][np.isnan(m1_hat[\"dW\" + str(l+1)])] = 0\n \n \n m0_hat[\"db\" + str(l+1)]=(self.m0[\"db\" + str(l+1)])/((1-self.beta1)**self.i)\n m1_hat[\"db\" + str(l+1)]=(self.m1[\"db\" + str(l+1)])/((1-self.beta2)**self.i)\n m0_hat[\"db\" + str(l+1)][np.isnan(m0_hat[\"db\" + str(l+1)])] = 0\n m1_hat[\"db\" + str(l+1)][np.isnan(m1_hat[\"db\" + str(l+1)])] = 0\n \n parameters[\"W\" + str(l+1)]=parameters[\"W\" + str(l+1)] - ((self.alpha)*((m0_hat[\"dW\" + str(l+1)])/(np.sqrt(m1_hat[\"dW\" + str(l+1)])+epsilon)))\n parameters[\"b\" + str(l+1)]=parameters[\"b\" + str(l+1)] - ((self.alpha)*((m0_hat[\"db\" + str(l+1)])/(np.sqrt(m1_hat[\"db\" + str(l+1)])+epsilon)))\n parameters[\"W\" + str(l+1)][np.isnan(parameters[\"W\" + str(l+1)])] = 0\n parameters[\"b\" + str(l+1)][np.isnan(parameters[\"b\" + str(l+1)])] = 0\n \n return parameters", "def update_parameters(parameters, grads, learning_rate):\n pass", "def compute_updated_dists(self, samples):\n start = time.time()\n num_tasks = len(samples[0])\n param_keys = self.all_params.keys()\n update_param_keys = param_keys\n no_update_param_keys = []\n\n sess = tf.get_default_session()\n input_list = []\n for n in range(len(samples)):\n obs_list, action_list, adv_list = [], [], []\n for i in range(num_tasks):\n inputs = ext.extract(samples[n][i],\n 'observations', 'actions', 'advantages')\n obs_list.append(inputs[0])\n action_list.append(inputs[1])\n adv_list.append(inputs[2])\n input_list += obs_list + action_list + adv_list\n\n # To do a second update, replace self.all_params below with the params that were used to collect the policy.\n init_param_values = None\n if self.all_param_vals is not None:\n init_param_values = self.get_variable_values(self.all_params)\n\n step_size = self.step_size\n for i in range(num_tasks):\n if self.all_param_vals is not None:\n self.assign_params(self.all_params, self.all_param_vals[i])\n\n \"\"\"\n if 'all_fast_params_tensor' not in dir(self):\n # make computation graph once\n self.all_fast_params_tensor = []\n for i in range(num_tasks):\n gradients = dict(zip(update_param_keys, tf.gradients(self.surr_objs[i], [self.all_params[key] for key in update_param_keys])))\n fast_params_tensor = OrderedDict(zip(update_param_keys, [self.all_params[key] - step_size*gradients[key] for key in update_param_keys]))\n for k in no_update_param_keys:\n fast_params_tensor[k] = self.all_params[k]\n self.all_fast_params_tensor.append(fast_params_tensor)\n \"\"\"\n\n # pull new param vals out of tensorflow, so gradient computation only done once ## first is the vars, second the values\n # these are the updated values of the params after the gradient step\n self.all_param_vals = sess.run(self.all_fast_params_tensor, feed_dict=dict(list(zip(self.input_list_for_grad, input_list))))\n\n if init_param_values is not None:\n self.assign_params(self.all_params, init_param_values)\n\n outputs = []\n self._cur_f_dist_i = {}\n inputs = tf.split(self.input_tensor, num_tasks, 0)\n for i in range(num_tasks):\n # TODO - use a placeholder to feed in the params, so that we don't have to recompile every time.\n task_inp = inputs[i]\n info, _ = self.dist_info_sym(task_inp, dict(), all_params=self.all_param_vals[i],\n is_training=False)\n\n outputs.append([info['mean'], info['log_std']])\n\n self._cur_f_dist = tensor_utils.compile_function(\n inputs = [self.input_tensor],\n outputs = outputs,\n )\n total_time = time.time() - start\n logger.record_tabular(\"ComputeUpdatedDistTime\", total_time)", "def _update_pressure_bc(\n self,\n states: FlowFieldMap,\n additional_states: FlowFieldMap,\n ):\n bc_p = [[None, None], [None, None], [None, None]]\n\n velocity_keys = ['u', 'v', 'w']\n grid_spacing = (self._params.dx, self._params.dy, self._params.dz)\n\n def grad_per_dim(f, dim):\n \"\"\"Computes the diffusion term in a specific dimension.\"\"\"\n grad_ops = (\n lambda f: self._kernel_op.apply_kernel_op_x(f, 'kDx'),\n lambda f: self._kernel_op.apply_kernel_op_y(f, 'kDy'),\n lambda f: self._kernel_op.apply_kernel_op_z(f, 'kDz', 'kDzsh'),\n )\n return tf.nest.map_structure(\n lambda grad: grad / (2.0 * grid_spacing[dim]), grad_ops[dim](f))\n\n def ddh_per_dim(f, dim):\n \"\"\"Computes the second order derivative of `f` along `dim`.\"\"\"\n diff_ops = [\n lambda f: self._kernel_op.apply_kernel_op_x(f, 'kddx'),\n lambda f: self._kernel_op.apply_kernel_op_y(f, 'kddy'),\n lambda f: self._kernel_op.apply_kernel_op_z(f, 'kddz', 'kddzsh'),\n ]\n return tf.nest.map_structure(lambda diff: diff / grid_spacing[dim]**2,\n diff_ops[dim](f))\n\n # The diffusion term for the 3 velocity component can be expressed in vector\n # form as:\n # 𝛁·𝛕 = 𝜇 𝛁²u + 1/3𝜇 𝛁(𝛁·u).\n # We rearange terms in the wall-oriented coordinates (n is for the direction\n # normal to the wall, and t is for directions parallel/tangent to the wall).\n # Because the wall normal velocity component uₙ is 0 at the wall, 𝜕uₙ/𝜕t = 0\n # the equation above can be expressed as:\n # 𝛁·𝛕ₙ = 4/3 𝜇 𝜕²uₙ/𝜕n² + 1/3𝜇 𝜕/𝜕n (𝜕uₜ/𝜕t),\n # where n is for the direction normal to the wall, and t is for directions\n # parallel/tangent to the wall.\n # In additional, we assume that there's no turbulence at the wall, therefore\n # 𝜇 is the molecular viscosity.\n def diff_fn(\n mu_i: tf.Tensor,\n ddu_n_i: tf.Tensor,\n ddu_t_i: tf.Tensor,\n ) -> tf.Tensor:\n \"\"\"Computes the diffusion term at walls.\"\"\"\n return mu_i * (4.0 / 3.0 * ddu_n_i + 1.0 / 3.0 * ddu_t_i)\n\n mu = tf.nest.map_structure(lambda rho_i: self._params.nu * rho_i,\n states['rho'])\n ddu_n = [ddh_per_dim(states[velocity_keys[i]], i) for i in range(3)]\n du_dx = [grad_per_dim(states[velocity_keys[i]], i) for i in range(3)]\n du_t = (\n # The x component.\n tf.nest.map_structure(tf.math.add, du_dx[1], du_dx[2]),\n # The y component.\n tf.nest.map_structure(tf.math.add, du_dx[0], du_dx[2]),\n # The z component.\n tf.nest.map_structure(tf.math.add, du_dx[0], du_dx[1]),\n )\n ddu_t = [grad_per_dim(du_t[i], i) for i in range(3)]\n\n diff = [\n tf.nest.map_structure(diff_fn, mu, ddu_n[i], ddu_t[i]) for i in range(3)\n ]\n\n # Updates the pressure boundary condition based on the simulation setup.\n for i in range(3):\n for j in range(2):\n if (self._params.bc_type[i][j] ==\n boundary_condition_utils.BoundaryType.PERIODIC):\n bc_p[i][j] = None\n\n elif (self._params.bc_type[i][j] ==\n boundary_condition_utils.BoundaryType.INFLOW):\n bc_p[i][j] = (halo_exchange.BCType.NEUMANN_2, 0.0)\n\n elif (self._params.bc_type[i][j]\n == boundary_condition_utils.BoundaryType.OUTFLOW):\n if self._pressure_params.pressure_outlet:\n # Enforce a pressure outlet boundary condition on demand.\n bc_p[i][j] = (halo_exchange.BCType.DIRICHLET, 0.0)\n else:\n bc_p[i][j] = (halo_exchange.BCType.NEUMANN_2, 0.0)\n\n elif self._params.bc_type[i][j] in (\n boundary_condition_utils.BoundaryType.SLIP_WALL,\n boundary_condition_utils.BoundaryType.NON_SLIP_WALL,\n boundary_condition_utils.BoundaryType.SHEAR_WALL):\n\n bc_value = common_ops.get_face(diff[i], i, j,\n self._params.halo_width - 1,\n grid_spacing[i])[0]\n if i == self.g_dim:\n # Ensures the pressure balances with the buoyancy at the first fluid\n # layer by assigning values to the pressure in halos adjacent to the\n # fluid domain.\n rho_0 = self._thermodynamics.rho_ref(\n additional_states.get('zz', None), additional_states\n )\n b = eq_utils.buoyancy_source(self._kernel_op, states['rho_thermal'],\n rho_0, self._params, i)\n\n bc_value = tf.nest.map_structure(\n tf.math.add, bc_value,\n common_ops.get_face(b, i, j, self._params.halo_width - 1,\n grid_spacing[i])[0])\n\n # The boundary condition for pressure is applied at the interface\n # between the boundary and fluid only. Assuming everything is\n # homogeneous behind the halo layer that's closest to the fluid, a\n # homogeneous Neumann BC is applied to all other layers for pressure.\n zeros = [tf.nest.map_structure(tf.zeros_like, bc_value)] * (\n self._params.halo_width - 1)\n\n bc_planes = zeros + [bc_value] if j == 0 else [bc_value] + zeros\n\n bc_p[i][j] = (halo_exchange.BCType.NEUMANN_2, bc_planes)\n else:\n raise ValueError('{} is not defined for pressure boundary.'.format(\n self._params.bc_type[i][j]))\n\n self._bc['p'] = bc_p", "def update(self, d_t, **kwargs):\n # '_update' depends on the 'backend'\n self._update(d_t * self.Time_Scale, **kwargs)\n for func in self.callbacks:\n func()", "def update_target_net(self):\n if self.n_steps % self.target_update_interval == 0:\n self.target_q.load_state_dict(self.working_q.state_dict())", "def refugia_adj_5_full_2_iter4 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def update_target_network(self):\n self.target_Qmodel = clone_model(self.Qmodel)\n self.target_Qmodel.set_weights(self.Qmodel.get_weights())\n\n # target network is never compiled\n self.target_Qmodel.compile(loss='mse', optimizer=Adam())", "def _update_dnsmasq(self, network_id):\n\n # Check whether we should really do the following processing.\n if self.suppress_dnsmasq_updates:\n LOG.debug(\"Don't update dnsmasq yet;\"\n \" must be processing a snapshot\")\n self.dirty_networks.add(network_id)\n return\n\n self.dnsmasq_updater.update_network(network_id)", "def update_target_dqn(self):\n\n for learning_parameter in self.dqn.learning_parameters:\n dqn_value = self.dqn.get_value(learning_parameter, self.tf_session)\n if(dqn_value is not None):\n self.target_dqn.set_value(\n learning_parameter, dqn_value, self.tf_session)\n else:\n print(\"Impossible to set value: None\")", "def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)", "def refugia_adj_5_full_2_iter3 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def Update(self, X, D, eta):\n Y = self.Forward(X)\n diff = Y!=D\n self.w += eta * (X[diff] * D[diff, None]).sum(axis=0)\n self.w0 += eta * (D[diff, None]).sum()", "def update(self, ob_no, next_ob_no, re_n, terminal_n):\n\n # TODO: Implement the pseudocode below:\n\n # do the following (self.num_grad_steps_per_target_update * self.num_target_updates) times:\n # every self.num_grad_steps_per_target_update steps (which includes the first step),\n # recompute the target values by\n #a) calculating V(s') by querying this critic network (ie calling 'forward') with next_ob_no\n #b) and computing the target values as r(s, a) + gamma * V(s')\n # HINT: don't forget to use terminal_n to cut off the V(s') (ie set it to 0) when a terminal state is reached\n # every time,\n # update this critic using the observations and targets\n # HINT1: need to sess.run the following:\n #a) critic_update_op\n #b) critic_loss\n # HINT2: need to populate the following (in the feed_dict):\n #a) sy_ob_no with ob_no\n #b) sy_target_n with target values calculated above\n\n for i in range(self.num_grad_steps_per_target_update * self.num_target_updates):\n if i % self.num_grad_steps_per_target_update == 0:\n next_V_n = self.forward(next_ob_no) * (1 - terminal_n)\n target_n = re_n + self.gamma * next_V_n\n loss, _ = self.sess.run([self.critic_loss, self.critic_update_op], feed_dict={self.sy_ob_no: ob_no, self.sy_target_n: target_n})\n\n return loss", "def update(self):\n # Update the weight matrix: \n self.W -= self.lr * self.grad_W \n \n # Update the bias matrices:\n self.b -= self.lr * np.array(self.grad_b) \n self.c -= self.lr * np.array(self.grad_c)", "def backward_D_basic(self, netD, netDadv,real,fake,i,direction):\n # if(not self.orders):\n # pred_real = netD(real) \n # loss_D_real = self.criterionGAN_D(pred_real, self.labels[i])\n # # Fake\n # pred_fake = netD(fake.detach())\n # loss_D_fake = self.criterionGAN_D(pred_fake, self.labels[i+1])\n # # Combined loss and calculate gradients\n # self.loss_D_cls = (loss_D_real + loss_D_fake) * 0.5\n # else:\n if(direction):\n self.pred_real = netD(real)\n self.loss_D_cls = self.criterionGAN_D(self.pred_real, self.labels[i])\n ifvalidAdorn=netDadv(real)\n ifvalidNoAdorn=netDadv(fake.detach())\n loss_D_adv_real = self.criterionGAN_D(ifvalidAdorn,True)\n loss_D_adv_fake = self.criterionGAN_D(ifvalidNoAdorn,False)\n self.loss_D_adv=(loss_D_adv_fake+loss_D_adv_real)*0.5\n else:\n pred_real = netD(real)\n self.loss_D_cls = self.criterionGAN_D(\n pred_real, self.labels_rev[i])\n ifvalidAdorn=netDadv(real)\n ifvalidNoAdorn=netDadv(fake.detach())\n loss_D_adv_real = self.criterionGAN_D(ifvalidAdorn,True)\n loss_D_adv_fake = self.criterionGAN_D(ifvalidNoAdorn,False)\n self.loss_D_adv=(loss_D_adv_fake+loss_D_adv_real)*0.5\n\n loss_D = self.loss_D_cls+self.loss_D_adv\n loss_D.backward()\n return loss_D", "def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ##### Structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5\n model.structure = np.array(list(np.tile([2] + np.tile([1],model.nof_segments_internodes).tolist(),model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes / model.nof_segments_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # same diameter for whole fiber\n model.compartment_diameters[:] = model.diameter_fiber\n \n ##### conductivity of leakage channels\n model.g_L = model.g_L_node/model.surface_aria_node\n\n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacities\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # nodes\n model.c_m[model.structure == 2] = model.c_m_node/model.surface_aria_node\n # internodes\n model.c_m[structure == 1] = model.c_m_layer/(1+model.nof_myelin_layers)\n \n ##### Condactivities internodes\n # initialize\n model.g_m = np.zeros_like(model.structure)*msiemens/cm**2\n # internodes\n model.g_m[model.structure == 1] = model.g_m_layer/(1+model.nof_myelin_layers)\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2\n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Noise term\n model.gamma_Na_vector = np.zeros(model.nof_comps)*psiemens\n model.gamma_Na_vector[model.structure == 2] = model.gamma_Na\n model.noise_term = np.sqrt(model.A_surface*model.gamma_Na_vector*model.rho_Na)\n \n ##### Compartments to plot\n # get indexes of all compartments that are not segmented\n model.indexes_comps = np.where(model.structure == 2)[0]\n # calculate middle compartments of internodes\n model.middle_comps_internodes = np.ceil(model.indexes_comps[:-1] + model.nof_segments_internodes/2).astype(int)\n # create array with all compartments to plot\n model.comps_to_plot = np.sort(np.append(model.indexes_comps, model.middle_comps_internodes))\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.h = model.h_init\n neuron.n = model.n_init\n \n ##### Set parameter values of differential equations\n # conductances nodes\n neuron.gamma_Na = model.gamma_Na\n neuron.gamma_K = model.gamma_K\n neuron.g_L = model.g_L\n \n # conductances internodes\n neuron.g_myelin = model.g_m\n neuron.gamma_Na[np.asarray(np.where(model.structure == 1))] = 0*psiemens\n neuron.gamma_K[np.asarray(np.where(model.structure == 1))] = 0*psiemens\n neuron.g_L[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # conductances peripheral terminal\n neuron.gamma_Na[np.where(model.structure == 0)[0]] = model.gamma_Na_terminal\n neuron.gamma_K[np.where(model.structure == 0)[0]] = model.gamma_K_terminal\n neuron.g_L[np.where(model.structure == 0)[0]] = model.g_L_terminal\n \n # conductances soma\n neuron.gamma_Na[index_soma] = 0*psiemens\n neuron.gamma_K[index_soma] = 0*psiemens\n neuron.g_L[index_soma] = 0*msiemens/cm**2\n \n # Nernst potential for leakage current\n neuron.E_Leak = model.E_L\n neuron.E_Leak[np.where(model.structure == 0)[0]] = E_L_terminal\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.rho_Na = model.rho_Na\n neuron.rho_K = model.rho_K\n \n return neuron, model", "def refugia_adj_5_full_2_iter2 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def refugia_adj_5_full_2_iter1 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def soft_update_critic(self):\n local_weights = np.array(self.critic_local.model.get_weights())\n target_weights = np.array(self.critic_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.critic_target.model.set_weights(new_weights)", "def update_parameters(parameters: Dict,\n grads: Dict, learning_rate: float) -> Dict:\n L = len(parameters)//2 # number of layers\n\n for l in range(1, L+1):\n parameters['W'+str(l)] -= learning_rate * grads['dW'+str(l)]\n parameters['b'+str(l)] -= learning_rate * grads['db'+str(l)]\n\n return parameters", "def update(self, dt):\n # handle spawing events from the bursts\n for burst in self.bursts.keys():\n b = self.bursts[burst] \n trigger = b.update(dt) \n if trigger: \n pool, gain, position = trigger \n name = self.unique_name()\n self.spawn_pool(pool, name)\n snd = self.sounds[name]\n snd.gain.set_with_time(gain, 0.0)\n snd.position.set_with_time(np.array(position), 0.0) \n self._start_sound(snd,[])\n \n kill_list = [] \n # and then each of layers (e.g. to make sure fades take effect)\n for sound in self.sounds.keys():\n snd = self.sounds[sound]\n if snd is None or snd.finished:\n kill_list.append(sound)\n else:\n snd.update(dt)\n \n # each of the channel groups (must do this after sounds for override to work)\n \n for ch_group in self.channel_groups.keys():\n self.channel_groups[ch_group].update(dt) \n \n for sound in kill_list:\n if self.sounds[sound] is not None and self.sounds[sound].transient:\n logging.debug(\"Removing finished sound %s\" % sound) \n del self.sounds[sound]\n # update FMOD \n system.update()", "def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)", "def update_dht(self, d_ip, d_port):\n self.dht_ip = d_ip\n self.dht_port = d_port", "def compile_update_default(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n C = Cfg.C\n\n if len(nnet.all_layers) > 1:\n feature_layer = nnet.all_layers[-2]\n else:\n feature_layer = nnet.input_layer\n final_layer = nnet.svm_layer\n trainable_params = lasagne.layers.get_all_params(final_layer,\n trainable=True)\n\n # Regularization\n if Cfg.weight_decay:\n l2_penalty = (floatX(0.5) / C) * get_l2_penalty(nnet, Cfg.include_bias)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation\n prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=False)\n objective, train_acc = final_layer.objective(prediction, targets)\n train_loss = T.cast((objective) / targets.shape[0], dtype='floatX')\n train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX')\n train_obj = l2_penalty + train_loss\n updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets],\n [train_obj, train_acc],\n updates=updates)\n\n # Hinge loss\n nnet.hinge_loss = theano.function([inputs, targets],\n [train_loss, train_acc])\n\n # Forwardpropagation\n test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=True)\n if nnet.data.n_classes == 2:\n scores = test_prediction[:, 1] - test_prediction[:, 0]\n else:\n scores = T.zeros_like(targets)\n objective, test_acc = final_layer.objective(test_prediction, targets)\n test_loss = T.cast(objective / targets.shape[0], dtype='floatX')\n test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX')\n test_obj = l2_penalty + test_loss\n # get network feature representation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, scores, l2_penalty,\n test_rep_norm, test_loss])", "def update(all=True, QDomni=False, omni=False, omni2=False, leapsecs=False, PSDdata=False):\n from spacepy.datamodel import SpaceData, dmarray, fromCDF, toHDF5\n from spacepy import DOT_FLN, config\n\n if sys.version_info[0]<3:\n import urllib as u\n else:\n import urllib.request as u\n\n if 'user_agent' in config and config['user_agent']:\n class AppURLopener(u.FancyURLopener):\n version = config['user_agent']\n u._urlopener = AppURLopener()\n\n datadir = os.path.join(DOT_FLN, 'data')\n if not os.path.exists(datadir):\n os.mkdir(datadir)\n os.chmod(datadir, 0o777)\n\n #leapsec_url ='ftp://maia.usno.navy.mil/ser7/tai-utc.dat'\n leapsec_fname = os.path.join(datadir, 'tai-utc.dat')\n\n # define location for getting omni\n #omni_url = 'ftp://virbo.org/QinDenton/hour/merged/latest/WGhour-latest.d.zip'\n omni_fname_zip = os.path.join(datadir, 'WGhour-latest.d.zip')\n omni2_fname_zip = os.path.join(datadir, 'omni2-latest.cdf.zip')\n omni_fname_pkl = os.path.join(datadir, 'omnidata.pkl')\n omni_fname_json = os.path.join(datadir, 'omnidata.txt')\n omni_fname_h5 = os.path.join(datadir, 'omnidata.h5')\n omni2_fname_h5 = os.path.join(datadir, 'omni2data.h5')\n\n PSDdata_fname = os.path.join('psd_dat.sqlite')\n\n if (omni or omni2 or QDomni or leapsecs or PSDdata):\n all = False #if an option is explicitly selected, turn 'all' off\n\n if all == True:\n omni = True\n omni2 = True\n leapsecs = True\n\n if QDomni == True:\n omni = True\n omni2 = True\n\n if omni == True:\n # retrieve omni, unzip and save as table\n print(\"Retrieving Qin_Denton file ...\")\n u.urlretrieve(config['qindenton_url'], omni_fname_zip, reporthook=progressbar)\n fh_zip = zipfile.ZipFile(omni_fname_zip)\n data = fh_zip.read(fh_zip.namelist()[0])\n fh_zip.close()\n if not str is bytes:\n data = data.decode('ascii')\n A = np.array(data.split('\\n'))\n print(\"Now processing (this may take a minute) ...\")\n\n # create a keylist\n keys = A[0].split()\n keys.remove('8')\n keys.remove('6')\n keys[keys.index('status')] = '8_status'\n keys[keys.index('stat')] = '6_status'\n keys[keys.index('dst')] = 'Dst'\n keys[keys.index('kp')] = 'Kp'\n #keys[keys.index('Hr')] = 'Hr'\n keys[keys.index('V_SW')] = 'velo'\n keys[keys.index('Den_P')] = 'dens'\n keys[keys.index('Day')] = 'DOY'\n keys[keys.index('Year')] = 'Year'\n\n # remove keyword lines and empty lines as well\n idx = np.where(A != '')[0]\n # put it into a 2D table\n tab = [val.split() for val in A[idx[1:]]]\n stat8 = [val[11] for val in tab]\n stat6 = [val[27] for val in tab]\n\n tab = np.array(tab, dtype='float32')\n # take out where Dst not available ( = 99999) or year == 0\n idx = np.where((tab[:,12] !=99.0) & (tab[:,0] != 0))[0]\n tab = tab[idx,:]\n stat8 = np.array(stat8)[idx]\n stat6 = np.array(stat6)[idx]\n\n omnidata = SpaceData()\n # sort through and make an omni dictionary\n # extract keys from line above\n for ikey, i in zip(keys,range(len(keys))):\n if ikey in ('Year', 'DOY', 'Hr', 'Dst'):\n omnidata[ikey] = dmarray(tab[:, i], dtype='int16')\n else:\n omnidata[ikey] = dmarray(tab[:,i])\n\n # add TAI to omnidata\n nTAI = len(omnidata['DOY'])\n\n # add interpolation quality flags\n omnidata['Qbits'] = SpaceData()\n arr = dmarray(stat8.view(stat8.dtype.kind + '1'),\n dtype=np.byte).reshape((8, nTAI))\n for ik, key in enumerate(['ByIMF', 'BzIMF', 'velo', 'dens', 'Pdyn', 'G1', 'G2', 'G3']):\n omnidata['Qbits'][key] = arr[ik,:]\n if stat6.dtype.str[1:] == 'U6':\n stat6 = np.require(stat6, dtype='|S6')\n arr = dmarray(stat6.view(stat6.dtype.kind + '1'),\n dtype=np.byte).reshape((6, nTAI))\n for ik, key in enumerate(['W1', 'W2', 'W3', 'W4', 'W5', 'W6']):\n omnidata['Qbits'][key] = arr[ik,:]\n\n #remove string status keys\n foo = omnidata.pop('6_status')\n foo = omnidata.pop('8_status')\n\n # add time information to omni pickle (long loop)\n omnidata['UTC'] = dmarray([datetime.datetime(int(omnidata['Year'][i]), 1, 1) +\n datetime.timedelta(days=int(omnidata['DOY'][i]) - 1,\n hours=int(omnidata['Hr'][i]))\n for i in range(nTAI)])\n\n omnidata['ticks'] = spt.Ticktock(omnidata['UTC'], 'UTC')\n omnidata['RDT'] = omnidata['ticks'].RDT\n del omnidata['ticks'] #Can be quickly regenerated on import\n del omnidata['Year']\n del omnidata['Hr']\n\n print(\"Now saving... \")\n ##for now, make one file -- think about whether monthly/annual files makes sense\n toHDF5(omni_fname_h5, omnidata)\n\n # delete left-overs\n os.remove(omni_fname_zip)\n\n\n if omni2 == True:\n # adding missing values from original omni2\n print(\"Retrieving OMNI2 file ...\")\n u.urlretrieve(config['omni2_url'], omni2_fname_zip, reporthook=progressbar)\n fh_zip = zipfile.ZipFile(omni2_fname_zip)\n fh_zip.extractall();\n fh_zip.close()\n omnicdf = fromCDF(fh_zip.namelist()[0])\n #add RDT\n omnicdf['RDT'] = spt.Ticktock(omnicdf['Epoch'],'UTC').RDT\n #remove keys that get in the way\n del omnicdf['Hour']\n del omnicdf['Year']\n del omnicdf['Decimal_Day']\n\n # save as HDF5\n toHDF5(omni2_fname_h5, omnicdf)\n\n # delete left-overs\n os.remove(omni2_fname_zip)\n\n if leapsecs == True:\n print(\"Retrieving leapseconds file ... \")\n u.urlretrieve(config['leapsec_url'], leapsec_fname)\n\n if PSDdata == True:\n print(\"Retrieving PSD sql database\")\n u.urlretrieve(config['psddata_url'], PSDdata_fname, reporthook=progressbar)\n return datadir", "def update(self, dLds, alpha, beta):\n T = len(self.x)\n dLdx = np.zeros((T, self.input_size))\n self.nodes.reset_error()\n for t in xrange(T):\n dLdp = dLds[t] * self.acfun.derivate(self.s[t])\n self.nodes.dLdu += np.outer(dLdp, self.x[t])\n if self.en_bias: self.nodes.dLdb += dLdp\n dLdx[t] = np.dot(self.nodes.u.T, dLdp)\n self.nodes.update(alpha, beta)\n return dLdx" ]
[ "0.64842385", "0.64803463", "0.61421794", "0.60652184", "0.59276474", "0.5910837", "0.59055424", "0.5872152", "0.5856507", "0.58434814", "0.5840877", "0.5840877", "0.5834914", "0.58075476", "0.5797457", "0.57930094", "0.57774806", "0.5751198", "0.5739794", "0.5714073", "0.5667545", "0.5660706", "0.56560606", "0.5639968", "0.56377673", "0.5631715", "0.56044984", "0.5599487", "0.55933344", "0.55932194", "0.55609125", "0.5545952", "0.55432314", "0.55249393", "0.5522628", "0.5503821", "0.5502001", "0.5473632", "0.54691625", "0.54598415", "0.5451368", "0.54419506", "0.5428968", "0.54191136", "0.53987074", "0.5389267", "0.5364289", "0.53624165", "0.5360248", "0.5357749", "0.5355532", "0.53513825", "0.5350809", "0.53486437", "0.5346941", "0.5345274", "0.5345252", "0.53228575", "0.5321542", "0.5321187", "0.5300101", "0.5294187", "0.52795935", "0.52731436", "0.5272123", "0.5259721", "0.5244862", "0.524229", "0.52363706", "0.52336556", "0.5228083", "0.5215178", "0.5210353", "0.5209947", "0.52074254", "0.5191916", "0.518439", "0.5168147", "0.51617634", "0.51456016", "0.514537", "0.51448464", "0.5136984", "0.5133746", "0.5132213", "0.512043", "0.5104936", "0.51041746", "0.5096266", "0.5096009", "0.50854415", "0.5082899", "0.5081985", "0.50707906", "0.50648206", "0.50626355", "0.5062118", "0.50606096", "0.5058822", "0.50587755" ]
0.6279729
2
Slowly updated the network using everystep partial network copies modulated by parameter TAU.
def _soft_update(self, active, target): for t_param, param in zip(target.parameters(), active.parameters()): t_param.data.copy_(self.tau*param.data + (1-self.tau)*t_param.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)", "def soft_update(source_net, target_net, tau):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )", "def update_net(self) -> None:\n self.units.update_net()", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def update_network(self):\n\n device = torch.device(\"cpu\")\n self.model = ProLoNet(input_dim=13,\n weights=None,\n comparators=None,\n leaves=32,\n output_dim=1,\n bayesian_embedding_dim=8,\n alpha=1.5,\n use_gpu=False,\n vectorized=True,\n is_value=True).to(device)\n\n self.embedding_optimizer = torch.optim.RMSprop([{'params': self.model.bayesian_embedding.parameters()}], lr=.1)\n self.embedding_list = [torch.ones(3) * 1 / 3 for i in range(2000)]\n self.opt = torch.optim.RMSprop(\n [{'params': list(self.model.parameters())[:-1]}, {'params': self.model.bayesian_embedding.parameters(), 'lr': .01}], lr=.01)\n\n criterion = torch.nn.BCELoss()\n\n n_epochs = 4000 + self.global_schedule_num * 3\n for epoch in range(n_epochs):\n which_schedule = np.random.randint(len(self.data_so_far))\n timestep_within_schedule = np.random.randint(len(self.teacher_actions[which_schedule]))\n\n index_within_network_state = timestep_within_schedule * 20\n timestep_data_from_agg = self.data_so_far[which_schedule][index_within_network_state:index_within_network_state+20]\n task = self.teacher_actions[which_schedule][timestep_within_schedule]\n # set the embedding\n self.model.set_bayesian_embedding(self.embedding_list[which_schedule].clone())\n # update loop\n\n phi_i_num = task\n phi_i = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, phi_i_num)\n phi_i_numpy = np.asarray(phi_i)\n loss_counter = 0\n # iterate over pairwise comparisons\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_i_numpy - phi_j_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.ones((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.ones((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model(feature_input))\n loss = criterion(output, label)\n # prepare optimizer, compute gradient, update params\n loss_counter += loss.item()\n self.opt.zero_grad()\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_j_numpy - phi_i_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.zeros((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.zeros((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model.forward(feature_input))\n\n self.opt.zero_grad()\n loss = criterion(output, label)\n loss_counter += loss.item()\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n self.loss_array.append(loss_counter / 38)\n self.embedding_list[which_schedule] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()).clone() # very ugly", "def update_network_parameters(self, tau=None):\n\n #Is used during the first iteration such that the target networks get the same parameters of the normal networks (hard update)\n if tau is None:\n tau = self.tau\n\n #Update the target_actor weights\n weights = []\n targets = self.target_actor.weights\n for i, weight in enumerate(self.actor.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_actor.set_weights(weights)\n\n #Update the target_critic_1 weights\n weights = []\n targets = self.target_critic_1.weights\n for i, weight in enumerate(self.critic_1.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_critic_1.set_weights(weights)\n\n #Update the target_critic_2 weights\n weights = []\n targets = self.target_critic_2.weights\n for i, weight in enumerate(self.critic_2.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_critic_2.set_weights(weights)", "def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def update_target_q_network(self):\n assert self.target_network != None\n self.target_network.run_copy()", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n\n self.target_model.set_weights(pars_target)", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n ctr = 0\n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n ctr += 1\n\n self.target_model.set_weights(pars_target)", "def sync(net, net_tar):\n for var, var_tar in zip(net.trainable_weights,\n net_tar.trainable_weights):\n var_tar.assign(var)", "def update_target_network(self):\n variables = self.online_network.trainable_variables\n variables_copy = [tf.Variable(v) for v in variables]\n self.target_network.trainable_variables = variables_copy", "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)", "def sync_target_network(self):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(e)", "def weight_update_steepest_descent(self, network):\n for l, layer in enumerate(network.layers):\n layer.b -= self.alpha * self.dc_db[l]\n layer.q -= self.alpha * self.dc_dq[l]\n layer.rx_inp -= self.alpha * self.dc_drx_inp[l]\n layer.ry_inp -= self.alpha * self.dc_dry_inp[l]\n layer.rx_pos_out -= self.alpha * self.dc_drx_pos_out[l]\n layer.ry_pos_out -= self.alpha * self.dc_dry_pos_out[l]\n layer.rx_neg_out -= self.alpha * self.dc_drx_neg_out[l]\n layer.ry_neg_out -= self.alpha * self.dc_dry_neg_out[l]", "def weight_update_adadelta(self, network):\n gamma = self.gamma\n one_m_gamma = 1.0 - gamma\n small = 0.001\n\n if self.ms_b is None or self.ms_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n # Initialize deltas to one such that first step is approximately equivalent to RMSprop\n if self.del_b is None or self.del_q is None:\n self.del_b = []\n self.del_q = []\n self.del_rx_inp = []\n self.del_ry_inp = []\n self.del_rx_pos_out = []\n self.del_ry_pos_out = []\n self.del_rx_neg_out = []\n self.del_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.del_b.append(np.ones(layer.b.shape) * small)\n self.del_q.append(np.ones(layer.q.shape) * small)\n self.del_rx_inp.append(np.ones(layer.input_size) * small)\n self.del_ry_inp.append(np.ones(layer.input_size) * small)\n self.del_rx_pos_out.append(np.ones(layer.output_size) * small)\n self.del_ry_pos_out.append(np.ones(layer.output_size) * small)\n self.del_rx_neg_out.append(np.ones(layer.output_size) * small)\n self.del_ry_neg_out.append(np.ones(layer.output_size) * small)\n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = gamma * self.ms_b[l] + one_m_gamma * self.dc_db[l] ** 2\n self.ms_q[l] = gamma * self.ms_q[l] + one_m_gamma * self.dc_dq[l] ** 2\n self.ms_rx_inp[l] = gamma * self.ms_rx_inp[l] + one_m_gamma * self.dc_drx_inp[l] ** 2\n self.ms_ry_inp[l] = gamma * self.ms_ry_inp[l] + one_m_gamma * self.dc_dry_inp[l] ** 2\n self.ms_rx_pos_out[l] = gamma * self.ms_rx_pos_out[l] + one_m_gamma * self.dc_drx_pos_out[l] ** 2\n self.ms_ry_pos_out[l] = gamma * self.ms_ry_pos_out[l] + one_m_gamma * self.dc_dry_pos_out[l] ** 2\n self.ms_rx_neg_out[l] = gamma * self.ms_rx_neg_out[l] + one_m_gamma * self.dc_drx_neg_out[l] ** 2\n self.ms_ry_neg_out[l] = gamma * self.ms_ry_neg_out[l] + one_m_gamma * self.dc_dry_neg_out[l] ** 2\n\n del_b = -self.alpha * self.dc_db[l] * self.sqrt_eps(self.del_b[l]) / self.sqrt_eps(self.ms_b[l])\n del_q = -self.alpha * self.dc_dq[l] * self.sqrt_eps(self.del_q[l]) / self.sqrt_eps(self.ms_q[l])\n del_rx_inp = -self.alpha * self.dc_drx_inp[l] * self.sqrt_eps(self.del_rx_inp[l]) / self.sqrt_eps(self.ms_rx_inp[l])\n del_ry_inp = -self.alpha * self.dc_dry_inp[l] * self.sqrt_eps(self.del_ry_inp[l]) / self.sqrt_eps(self.ms_ry_inp[l])\n del_rx_pos_out = -self.alpha * self.dc_drx_pos_out[l] * self.sqrt_eps(self.del_rx_pos_out[l]) / self.sqrt_eps(self.ms_rx_pos_out[l])\n del_ry_pos_out = -self.alpha * self.dc_dry_pos_out[l] * self.sqrt_eps(self.del_ry_pos_out[l]) / self.sqrt_eps(self.ms_ry_pos_out[l])\n del_rx_neg_out = -self.alpha * self.dc_drx_neg_out[l] * self.sqrt_eps(self.del_rx_neg_out[l]) / self.sqrt_eps(self.ms_rx_neg_out[l])\n del_ry_neg_out = -self.alpha * self.dc_dry_neg_out[l] * self.sqrt_eps(self.del_ry_neg_out[l]) / self.sqrt_eps(self.ms_ry_neg_out[l])\n\n layer.b += del_b\n layer.q += del_q\n layer.rx_inp += del_rx_inp\n layer.ry_inp += del_ry_inp\n layer.rx_pos_out += del_rx_pos_out\n layer.ry_pos_out += del_ry_pos_out\n layer.rx_neg_out += del_rx_neg_out\n layer.ry_neg_out += del_ry_neg_out\n\n self.del_b[l] = gamma * self.del_b[l] + one_m_gamma * del_b ** 2\n self.del_q[l] = gamma * self.del_q[l] + one_m_gamma * del_q ** 2\n self.del_rx_inp[l] = gamma * self.del_rx_inp[l] + one_m_gamma * del_rx_inp ** 2\n self.del_ry_inp[l] = gamma * self.del_ry_inp[l] + one_m_gamma * del_ry_inp ** 2\n self.del_rx_pos_out[l] = gamma * self.del_rx_pos_out[l] + one_m_gamma * del_rx_pos_out ** 2\n self.del_ry_pos_out[l] = gamma * self.del_ry_pos_out[l] + one_m_gamma * del_ry_pos_out ** 2\n self.del_rx_neg_out[l] = gamma * self.del_rx_neg_out[l] + one_m_gamma * del_rx_neg_out ** 2\n self.del_ry_neg_out[l] = gamma * self.del_ry_neg_out[l] + one_m_gamma * del_ry_neg_out ** 2", "def update_target_net(self):\n if self.n_steps % self.target_update_interval == 0:\n self.target_q.load_state_dict(self.working_q.state_dict())", "def slow(newETM): #Status: Done, not tested\r\n pass", "def refugia_adj_5_full_2_iter3 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def update_target_net(self, sess):\n sess.run(self.update_target_net_op)", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def refugia_adj_5_full_2_iter1 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)", "def refugia_adj_5_full_2_iter5 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def momentum_update(self, online_net, target_net, momentum):\n for param_ol, param_tgt in zip(online_net.parameters(), target_net.parameters()):\n param_tgt.data = param_tgt.data * momentum + param_ol.data * (1. - momentum)", "def update_weights(self, a, obs, t):\n \n new_w_tilde = np.zeros(self.Npar) # the unnormalized new weight vector\n for k in range(self.Npar):\n lh = model.calculate_likelihood(self.Particles[k], a, obs)\n #print('likelihood =', lh)\n new_w_tilde[k] = lh * self.w[k]\n new_w = 1.0/(np.sum(new_w_tilde)) * new_w_tilde # normalizing\n #print('new_w =', new_w)\n self.w = new_w\n \n return None", "def update_target_network(self):\n self.target_Qmodel = clone_model(self.Qmodel)\n self.target_Qmodel.set_weights(self.Qmodel.get_weights())\n\n # target network is never compiled\n self.target_Qmodel.compile(loss='mse', optimizer=Adam())", "def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def update_target_network(self):\n self.target.set_weights(self.policy.get_weights()) # Update weights of target network with weights of policy network", "def SoftUpdate(self, local, target, tau):\n for target_param, local_param in zip(target.parameters(), local.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def refugia_adj_5_full_2_iter2 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def update(self):\n result = [], 0, False\n\n if self.t % self.t_train_freq == 0:\n result = self.q_learning_minibatch()\n\n if self.t % self.t_target_q_update_freq == self.t_target_q_update_freq - 1:\n # Copy \n self.update_target_q_network()\n\n return result", "def optimize_fore(self):\n self.u = np.random.uniform(-1, 1, (32, 288, 1, 1))\n \n self.l2 = torch.from_numpy(self.u).float()\n print('self u shape',self.l2.shape)\n self.n = torch.randn(32, 1, 28, 28)\n self.l1 = self.enc(self.input + self.n)\n self.del1=self.dec(self.l1)\n self.del2=self.dec(self.l2)\n self.update_netc()\n self.update_netd()\n\n self.update_l2()\n self.update_netg()\n print('sssssssssssssssssssssss')", "def update_state(self, a, obs, t):\n \n self.update_weights(a, obs, t) # only update weights, not particles \n self.update_running_average_weights(t) \n return None", "def update_recurrent_weights_step(self):\n \n # update weights: hebbian term\n self.delta_Wee=self.learn_rate*(self.rr[0:self.N_e]-self.input_mean)*\\\n (self.rr[0:self.N_e].T-self.input_mean)\n \n self.W_ee+=self.dt*self.delta_Wee\n\n # update weights: normalize to fixed mean of incoming and outgoing weights\n self.W_ee-=(self.W_ee.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n self.W_ee-=(self.W_ee.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n self.W_ee=np.clip(self.W_ee,0,self.W_max_ee)\n \n # update excitatory weights in the big weight matrix\n self.W[:self.N_e,:self.N_e]=self.W_ee", "def refugia_adj_5_full_2_iter4 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def updateGraph(self):\n self.initUnits()\n v = self.units.copy()\n v_old = v.copy() * 100 # initial value so it will skip the first break\n for step in range(self.numCycles): # for total number of cycles\n # keep the old version of v for paralel updating\n # if v_old and v every element differnce < 0.001, then stop\n if np.all(np.abs(v_old - v) < 0.001):\n break\n # assign to v_old v from the previous step\n v_old = v.copy()\n for i in range(self.graph.n): # for every unit in the graph\n if i not in self.graph.observed: # if the unit is not a special fixed value s\n net = np.dot(v_old, self.graph.c[i]) # compute total flow to the unit\n if net > 0:\n gradient = net*(self.min_max[1]-v_old[i])\n else:\n gradient = net*(v_old[i]-self.min_max[0])\n v[i] = v_old[i]*(1-self.decay) + gradient\n # should this be after every unit update, or after the whole graph updates ??\n v = np.where(v>1, self.min_max[1], v)\n v = np.where(v<-1,self.min_max[0],v)\n self.units = v", "def momentum_init(self, online_net, target_net):\n for param_ol, param_tgt in zip(online_net.parameters(), target_net.parameters()):\n param_tgt.data.copy_(param_ol.data)\n param_tgt.requires_grad = False", "def mc_micro_sweep(self): \n for i in range(self.N):\n if random.random()>0.3:\n self.mc_update_micro_fixed(i,xy = True)", "def u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=50): \n # PREMULTIPLIED LHS IS AN EXTRA ARGUMENT! Set it to None and add solver! \n \"\"\"In the following +[[]] and [:-1] are added to keep thing 1dim array of objects and still multiply it elemtwisely\"\"\" \n# #B.append([]) #THIS IS WRONG, CHANGES THE LIST \n# B_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(B+[[]])[:-1], axis = 0) \n# A_ls = np.concatenate([(1/np.sqrt(2*eta0))*A, B_concat], axis = 0) \n# #print(np.array(B).shape) \n# #print(w[0].shape) \n# #print(w, eta) \n# #w.append([]) THIS IS WRONG, CHANGES THE LIST \n# w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n# eta_w = np.expand_dims(1/np.sqrt(2*eta),1)*np.array(w) \n# print(eta_w.shape) \n# b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, eta_w.flatten()], axis = 0) \n #Use correct broadcasting?\n w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, w_concat, (1/np.sqrt(2*eta_lin))*w_lin], axis = 0) \n# print(np.sum(eta_w.flatten() != w_concat)) \n# premultiplied_time_start = time.time() \n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray() \n# premultiplied_time_end = time.time() \n# print('premultiplying took {}'.format(premultiplied_time_end - premultiplied_time_start)) \n# premultiplied_rhs = eta_T_H_stacked.T.dot(b_ls) \n# u_next = nnls_predotted(premultiplied_lhs, premultiplied_rhs, tol=1e-5) \n# print(eta_T_H_stacked.shape, b_ls.shape) \n# A_ls_t_b = eta_T_H_stacked.T.dot(b_ls) \n# w =scipy.sparse.linalg.spsolve_triangular(RT, A_ls_t_b, lower = True) \n# x = scipy.sparse.linalg.spsolve_triangular(R, w, lower = False) \n# u_next = x \n u_next = scipy.optimize.lsq_linear(eta_T_H_L_stacked, b_ls, bounds = (0, np.inf), tol=1e-3, lsmr_tol=1e-3, max_iter=nnls_max_iter, verbose=1).x \n# u = scipy.optimize.lsq_linear(premultiplied_lhs, premultiplied_rhs, bounds = (0, np.inf), tol=1e-5).x \n return u_next", "def run(self):\n\n # initializing random network activity\n s_rand_T = np.zeros((self.T, self.N_rand))\n p_rand_T = np.zeros((self.T, self.N_rand))\n r_rand_T = np.zeros((self.T, self.N_rand))\n\n s_rand_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_rand))\n\n # initializing sensory networks\n s_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n p_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n r_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n s_sens_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_sensory_nets * self.N_sensory))\n\n # extend input to be T timesteps and only nonzero for 100 ts\n s_ext_T = np.broadcast_to(self.s_ext, (self.T, self.N_sensory * self.N_sensory_nets)).copy()\n # stimulus is presented for 100 ms\n stim_T = int(200/self.rand_net.dt)\n s_ext_T[:100] = 0\n s_ext_T[100+stim_T:] = 0\n # s_ext_T *= 0\n\n for t in range(1, self.T):\n if (t + 1) % 100 == 0:\n print(f'step {t} of {self.T}')\n s_sens_prev = s_sens_T[t - 1]\n s_rand_prev = s_rand_T[t - 1]\n p_rand_prev = p_rand_T[t - 1]\n s_ext = s_ext_T[t - 1]\n step = self.forward(s_ext=s_ext, s_rand_prev=s_rand_prev, s_sens_prev=s_sens_prev, p_rand_prev=p_rand_prev)\n s_sens_T[t] = step['s_sens']\n p_sens_T[t] = step['p_sens']\n r_sens_T[t] = step['r_sens']\n s_rand_T[t] = step['s_rand']\n r_rand_T[t] = step['r_rand']\n p_rand_T[t] = step['p_rand']\n\n p_sens_T = p_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_ext_T = s_ext_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n r_sens_T = r_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_sens_T = s_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n\n return dict(\n n_sensory=self.N_sensory,\n n_rand=self.N_rand,\n mus=self.mus,\n sigma=self.sigma,\n s_ext=s_ext_T,\n s_sens=s_sens_T,\n r_sens=r_sens_T,\n p_sens=p_sens_T,\n s_rand=s_rand_T,\n r_rand=r_rand_T,\n p_rand=p_rand_T\n )", "def weight_update_nesterov(self, network):\n # Before updating, take step back with current velocity\n for l, layer in enumerate(network.layers):\n layer.b -= self.beta * self.vel_b[l]\n layer.q -= self.beta * self.vel_q[l]\n layer.rx_inp -= self.beta * self.vel_rx_inp[l]\n layer.ry_inp -= self.beta * self.vel_ry_inp[l]\n layer.rx_pos_out -= self.beta * self.vel_rx_pos_out[l]\n layer.ry_pos_out -= self.beta * self.vel_ry_pos_out[l]\n layer.rx_neg_out -= self.beta * self.vel_rx_neg_out[l]\n layer.ry_neg_out -= self.beta * self.vel_ry_neg_out[l]\n\n # Now update\n for l, layer in enumerate(network.layers):\n self.vel_b[l] = -self.alpha * self.dc_db[l] + self.beta * self.vel_b[l]\n self.vel_q[l] = -self.alpha * self.dc_dq[l] + self.beta * self.vel_q[l]\n self.vel_rx_inp[l] = -self.alpha * self.dc_drx_inp[l] + self.beta * self.vel_rx_inp[l]\n self.vel_ry_inp[l] = -self.alpha * self.dc_dry_inp[l] + self.beta * self.vel_ry_inp[l]\n self.vel_rx_pos_out[l] = -self.alpha * self.dc_drx_pos_out[l] + self.beta * self.vel_rx_pos_out[l]\n self.vel_ry_pos_out[l] = -self.alpha * self.dc_dry_pos_out[l] + self.beta * self.vel_ry_pos_out[l]\n self.vel_rx_neg_out[l] = -self.alpha * self.dc_drx_neg_out[l] + self.beta * self.vel_rx_neg_out[l]\n self.vel_ry_neg_out[l] = -self.alpha * self.dc_dry_neg_out[l] + self.beta * self.vel_ry_neg_out[l]\n\n layer.b += self.vel_b[l]\n layer.q += self.vel_q[l]\n layer.rx_inp += self.vel_rx_inp[l]\n layer.ry_inp += self.vel_ry_inp[l]\n layer.rx_pos_out += self.vel_rx_pos_out[l]\n layer.ry_pos_out += self.vel_ry_pos_out[l]\n layer.rx_neg_out += self.vel_rx_neg_out[l]\n layer.ry_neg_out += self.vel_ry_neg_out[l]", "def _freeze_tgt_networks(self):\n q1 = zip(self.tgt_q1.parameters(), self.soft_q1.parameters())\n q2 = zip(self.tgt_q2.parameters(), self.soft_q2.parameters())\n\n # Copy parameters\n for target_param, param in q1:\n target_param.data.copy_(param.data)\n for target_param, param in q2:\n target_param.data.copy_(param.data)\n\n # Freeze gradients\n for param in self.tgt_q1.parameters():\n param.requires_grad = False\n for param in self.tgt_q2.parameters():\n param.requires_grad = False", "def soft_update(target, source, tau):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)", "def train_segmentation():\n\n start = time.time()\n\n model_base = load_pretrained(get_base(), PRETRAINED)\n cut, lr = model_meta[arch]\n m = to_gpu(Unet34(model_base))\n model = UnetModel(m)\n\n sz = 256\n bs = 64\n\n md = get_data(sz, bs)\n\n learn = ConvLearner(md, model)\n learn.opt_fn = optim.Adam()\n learn.crit = LossBinary(jaccard_weight=5)\n learn.metrics = [accuracy_thresh(0.5), dice, IoU]\n wd = 1e-7\n lr = 1e-2\n\n learn.freeze_to(1)\n learn.fit(lr, 1, wds=wd, cycle_len=1, use_clr=(5,8))\n learn.unfreeze() # unfreeze encoder\n learn.bn_freeze(True)\n\n lrs = np.array([lr/100, lr/10, lr])\n learn.fit(lrs/3, 2, wds=wd, cycle_len=2, use_clr=(20,8))\n\n learn.save('./models/weighted_unet_256_p1')\n\n sz = 384\n bs = 32\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/5, 1, wds=wd, cycle_len=2, use_clr(10,8)) # first increase in image size with decreased bs\n learn.save('./models/weighted_unet_384_p1')\n\n sz = 512\n bs = 16\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/10, 2, wds=wd, cycle_len=1, use_clr=(10,8), best_save_name='./models/weighted_unet_512_p1') # second increase in image size with further decreased bs\n\n sz = 768\n bs = 8\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/50, 10, wds=5e-8, cycle_len=1, use_clr=(10,10), best_save_name='./models/weighted_unet_768_p1') # full image size with further decreased bs\n\n learn.crit = MixedLoss(10., 2.)\n learn.fit(lrs/50, 10, wds=5e-8, cycle_len=1, use_clr=(10,10), best_save_name='./models/weighted_unet_768_p2') # full image size with further decreased bs (final run)\n\n learn.save('./models/weighted_unet_768_final')\n\n print(f'Training finished in {time.time() - start) / 60 :.3} minutes.')", "def hard_update(source_net, target_net):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(param.data)", "def update(self):\n\n self._eps_count += 1\n if self._replay.size >= self._min_replay_size:\n for _ in range(self._learning_updates):\n samples_indices, minibatch = self._replay.sample(self._batch_size)\n tf_minibatch = [tf.constant(mat, dtype=tf_type) for mat, tf_type in zip(minibatch, [tf.float32, tf.int32, tf.float32, tf.float32, tf.float32])]\n self._learn(*tf_minibatch)\n\n self._learn_iter_counter += 1\n if (self._target_update_period > 1) and (self._learn_iter_counter % self._target_update_period == 0):\n self._update_target_nets()", "def runconnectome(self, ):\n for ps in self.postSynaptic:\n if ps[:3] not in self.muscles and abs(self.postSynaptic[ps][self.thisState]) > self.threshold:\n self.fireNeuron(ps)\n self.motorcontrol()\n for ps in self.postSynaptic:\n # if self.postSynaptic[ps][thisState] != 0:\n # print ps\n # print \"Before Clone: \", self.postSynaptic[ps][thisState]\n\n # fired neurons keep getting reset to previous weight\n # wtf deepcopy -- So, the concern is that the deepcopy doesnt\n # scale up to larger neural networks?? \n self.postSynaptic[ps][self.thisState] = copy.deepcopy(self.postSynaptic[ps][self.nextState]) \n\n # this deep copy is not in the functioning version currently.\n # print \"After Clone: \", self.postSynaptic[ps][thisState]\n\n self.thisState, self.nextState = self.nextState, self.thisState", "def load(uDir):\n import sys\n sys.path.append(uDir)\n from net_spec import spec\n \n builder = NetworkBuilder(spec)\n htm = builder.build()\n htm.start()\n \n ## restore each node state\n layers = htm.layers\n \n for l in range(len(layers) - 1):\n (r,c) = spec[l]['shape']\n\n if layers[l].node_sharing:\n state = {}\n state['coincidences'] = np.load(uDir + str(l) + \".0.0.coincidences.npy\")\n state['temporal_groups'] = [] ## !FIXME temporal groups should be saved, first\n state['PCG'] = np.load(uDir + str(l) + \".0.0.PCG.npy\")\n\n for i in range(r):\n for j in range(c):\n layers[l].pipes[i][j].send((\"set_state\", state))\n\n else:\n for i in range(r):\n for j in range(c):\n state = {}\n state['coincidences'] = np.load(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".coincidences.npy\")\n state['temporal_groups'] = [] ## !FIXME temporal groups should be saved, first\n state['PCG'] = np.load(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".PCG.npy\")\n layers[l].pipes[i][j].send((\"set_state\", state))\n \n ## restore also last node's state\n state = {}\n state['coincidences'] = np.load(uDir + str(len(layers) - 1) + \".0.0.coincidences.npy\")\n state['cls_prior_prob'] = np.load(uDir + str(len(layers) - 1) + \".0.0.cls_prior_prob.npy\")\n state['PCW'] = np.load(uDir + str(len(layers) - 1) + \".0.0.PCW.npy\")\n layers[-1].pipes[0][0].send((\"set_state\", state))\n\n return htm", "def _update_target_net(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()", "def update(self):\n self.brain.update()", "def set_t_FAST(self):\n\t\n\tself.N = 2**7\n\tdt = self.Orbit.Tobs/self.N\n\tself.t = np.linspace(0, self.N-1, self.N)*self.Orbit.Tobs/self.N\n\t\n\treturn", "def refugia_adj_5_full(params, ns):\n #22 parameters \n nu1_1a, nu1_1b, nu1_2, nu1_3, nuA_a, nuA_b, nu2_2, nu2_3, nu3_2, nu3_3, m1_12, m1_21, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1a, T1b, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1a\n nu_T1a = [nu1_1a, nuA_a]\n fs.integrate(nu_T1a, T1a)\n ## Population function and migration matrix for T1b\n nu_T1b = [nu1_1b, nuA_b] \n mig1 = numpy.array([[0, m1_12],[m1_21, 0]])\n fs.integrate(nu_T1b, T1b, m=mig1) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1_2, nu2_2, nu3_2]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3\n nu_T3 = [nu1_3, nu2_3, nu3_3]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3) \n return fs", "def training(data_to_train, network_dimensions=np.array([5,5]), n_iterations=2000, init_learning_rate=0.01, normalise_data=True, normalise_by_column=False):\n #transforma em float para fazer normalizacao\n raw_data = np.float64(data_to_train)\n\n #tamanho baseado nos dados\n m = raw_data.shape[0]\n n = raw_data.shape[1]\n\n #matriz de pesos tem que ter o mesmo tamanho do vetor de entrada (RGB = 3 entradas) \n #para cada neuronio no mapa (mapa 5x5)\n #inicializa pesos com valores aleatorios\n net = np.random.random((network_dimensions[0], network_dimensions[1], m))\n\n #raio da vizinhanca inicial (qual distancia eu procuro por vizinhos para atualizar)\n init_radius = max(network_dimensions[0], network_dimensions[1]) / 2\n #quanto o raio ira diminuir\n time_constant = n_iterations / np.log(init_radius)\n\n #cria matriz auxiliar caso precise normalizar\n data = raw_data\n\n if normalise_data:\n data = normalise(raw_data, normalise_by_column)\n \n #PROCESSO DE APRENDIZADO:\n #1. Encontra o neuronio com o vetor 3D mais proximo do vetor 3D do dataset - Best Matching Unit\n #\n #2. Move o vetor do neuronio BMU mais proximo do vetor de entrada no espaco\n #\n #3. Identifica os neuronios vizinhos do BMU e move os vetores mais proximos\n #\n #4. Reduz taxa de aprendizado\n for i in range(n_iterations):\n #seleciona um exemplo aleatorio do dataset\n t = data[:, np.random.randint(0,n)].reshape(np.array([m, 1]))\n\n #encotra o Best Matching Unit\n bmu, bmu_index = find_bmu(t, net, m)\n\n #diminui parametros de aprendizado usando\n #usa exponetial decay sigma_t = sigma_0 * exp(-t / lambda)\n #sigma_t eh o novo valor\n #sigma_0 eh o valor anterior\n #t eh o instante de tempo\n #lamba eh o time_constant\n r = decay_radius(init_radius, i, time_constant)\n l = decay_learning_rate(init_learning_rate, i, n_iterations)\n\n #move o BMU e seus vizinhos mais perto\n #atualizando pesos do BMU: w_t+1 = w_t + L_t * (V_i - w_t)\n #peso atual mais diferenca entre vetor de entrada e peso atual multipicado pela taxa de aprendiz\n #movendo o BMU mais perto do vetor de entrada\n #\n #depois, encontra outros neuronios dentro do raio definido\n #atualiza peso desses neuronios proporcionalmente a distancia ate o BMU (gaussiana)\n #para calcular essa influencia usa i_t = exp(-d^2 / (2 * sigma^2_t))\n #onde d eh a distancia entre os neuronios e sigma eh o raio no tempo atual\n\n for x in range(net.shape[0]):\n for y in range(net.shape[1]):\n w = net[x, y, :].reshape(m, 1) #pesos do neuronio atual\n #pega distancia euclidiana quadrada entre\n #posicao do neuronio atual e indice do bmu\n w_dist = np.sum((np.array([x, y]) - bmu_index) ** 2)\n #se a distancia eh menor que o raio atual (ao quadrado pq a distancia eh quadrada)\n if w_dist <= r**2:\n #calcula influencia do neuronio\n influence = calculate_influence(w_dist, r)\n #atualiza pesos do neuronio\n #w_novo = w_atual + (aprendizado * influencia * delta)\n #delta = entrada - w_atual\n new_w = w + (l * influence * (t - w))\n #coloca novo peso na matriz\n net[x, y, :] = new_w.reshape(1, 1)\n\n \n return net", "def _update_nn(self, bad_feats, good_feats, rate):\n self.nn.update(bad_feats, good_feats, rate)", "def update_network(self, tr_d, lr, relz=\"\", lmbda=0.0, mu=0.0):\n trlen = float(len(tr_d))\n delta_b = [np.zeros(b.shape) for b in self.biases]\n delta_w = [np.zeros(w.shape) for w in self.weights]\n for x,y in tr_d:\n delta_b_single, delta_w_single = self.backppg_ce(x,y)\n delta_b = [db+dbs for db,dbs in zip(delta_b, delta_b_single)]\n delta_w = [dw+dws for dw,dws in zip(delta_w, delta_w_single)]\n #update the parameters in network\n if(relz==\"\"):\n mu=0.0\n elif(relz[0:2] == \"MO\"):\n relz = relz[2:]\n self.velw = [mu*vw-(lr/trlen)*dw for vw,dw in zip(self.velw, delta_w)]\n self.velb = [mu*vb-(lr/trlen)*db for vb,db in zip(self.velb, delta_b)]\n self.biases = [b + vb for b,vb in zip(self.biases, self.velb)]\n if(relz == \"L2\"):\n self.weights = [w + vw - (lr/trlen/100)*lmbda*w for w,vw in zip(self.weights, self.velw)]\n elif(relz == \"L1\"):\n self.weights = [w + vw - (lr/trlen/100)*lmbda*np.sign(w) for w,vw in zip(self.weights, self.velw)]\n else:\n self.weights = [w + vw for w,vw in zip(self.weights, self.velw)]", "def soft_update_actor(self):\n local_weights = np.array(self.actor_local.model.get_weights())\n target_weights = np.array(self.actor_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.actor_target.model.set_weights(new_weights)", "def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out", "def transfer_weights(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)", "def adjust_layer_temps(self):\n\n if self.layer_count == 1:\n self.t_s_0 = self.new_tsno(\n self.m_s_0,\n self.t_s_0,\n self.cc_s_0)\n self.t_s = self.t_s_0\n\n elif self.layer_count == 2:\n if self.isothermal:\n self.t_s = FREEZE\n self.t_s_l = FREEZE\n self.t_s_0 = FREEZE\n else:\n self.t_s_0 = self.new_tsno(\n self.m_s_0,\n self.t_s_0,\n self.cc_s_0)\n self.t_s_l = self.new_tsno(\n self.m_s_l,\n self.t_s_l,\n self.cc_s_l)\n self.t_s = self.new_tsno(\n self.m_s,\n self.t_s,\n self.cc_s)", "def _update_nn(self, bad_feats, good_feats, rate):\n # TODO: this is just adding another dimension to fit the parallelized scoring\n # (even if updates are not parallelized). Make it nicer.\n bad_feats = ([bad_feats[0]], [bad_feats[1]])\n good_feats = ([good_feats[0]], [good_feats[1]])\n\n cost_gcost = self.nn.update(*(bad_feats + good_feats + (rate,)))\n log_debug('Cost:' + str(cost_gcost[0]))\n param_vals = [param.get_value() for param in self.nn.params]\n log_debug('Param norms : ' + str(self._l2s(param_vals)))\n log_debug('Gparam norms: ' + str(self._l2s(cost_gcost[1:])))\n l1_params = param_vals[2]\n log_debug('Layer 1 parts :' + str(self._l2s([l1_params[0:100, :], l1_params[100:200, :],\n l1_params[200:350, :], l1_params[350:500, :],\n l1_params[500:, :]])))\n l1_gparams = cost_gcost[3]\n log_debug('Layer 1 gparts:' + str(self._l2s([l1_gparams[0:100, :], l1_gparams[100:200, :],\n l1_gparams[200:350, :], l1_gparams[350:500, :],\n l1_gparams[500:, :]])))", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def izhinet(params: dict, in_current: np.ndarray, runtime: int, deltat: float) -> np.ndarray:\n # params['ntypes'] (N,) True for excitatory, False for inhibitory\n # params['nrands'] (N,)\n # params['weights'] (N, N)\n # params['delays'] (N, N)\n # in_current (B, N)\n ntypes = params['ntypes'] # (N,)\n nrands = params['nrands'] # (N,)\n # We will look back in time, so need to transpose these\n recv_weights = params['weights'].T # (N, N)\n recv_delays = params['delays'].T # (N, N)\n # ---------------------------\n # Setup variables\n bs = in_current.shape[0] # batch size B\n ns = ntypes.shape[0] # number of neurons N\n ns_range = np.arange(ns) # (N,)\n\n firings = np.zeros((bs, ns, runtime), dtype=np.bool) # (B, N, T)\n\n # https://www.izhikevich.org/publications/spikes.pdf\n # Neuron parameters as described in the paper\n a = ntypes*0.02 + (1-ntypes)*(0.02+0.08*nrands) # (N,)\n b = ntypes*0.2 + (1-ntypes)*(0.25-0.5*nrands) # (N,)\n nrsquared = nrands*nrands # (N,)\n c = ntypes*(-65+15*nrsquared) + (1-ntypes)*-65 # (N,)\n d = ntypes*(8-6*nrsquared) + (1-ntypes)*2 # (N,)\n a, b, c, d = [np.repeat(x[None], bs, axis=0) for x in (a, b, c, d)] # (B, N)\n\n # Runtime state of neurons, v is the membrane voltage\n v = np.ones((bs, ns), dtype=np.float32)*-65 # (B, N)\n u = v * b # (B, N)\n # ---------------------------\n for t in range(runtime): # milliseconds\n # Compute input current\n past = t-recv_delays # (N, N)\n # This is okay because nothing has fired at the current time yet\n past[past < 0] = t # reset negative values to current time\n # Look back in time for neurons firing\n past_fired = firings[:, ns_range[None, :], past] # (B, N, N)\n icurrent = (past_fired*recv_weights).sum(-1) # (B, N)\n icurrent += in_current # (B, N)\n # ---------------------------\n fired = firings[..., t] # (B, N)\n # Integrate using the Euler method\n for _ in range(int(1/deltat)): # delta t to update differential equations\n # To avoid overflows with large input currents,\n # keep updating only neurons that haven't fired this millisecond.\n notfired = np.logical_not(fired) # (B, N)\n nfv, nfu = v[notfired], u[notfired] # (NF,), (NF,)\n # https://www.izhikevich.org/publications/spikes.pdf\n v[notfired] += deltat*(0.04*nfv*nfv + 5*nfv + 140 - nfu + icurrent[notfired]) # (B, N)\n u[notfired] += deltat*(a[notfired]*(b[notfired]*nfv - nfu)) # (B, N)\n # Update firings\n fired[:] = np.logical_or(fired, v >= 30) # threshold potential in mV\n # ---------------------------\n # Reset for next millisecond\n v[fired] = c[fired] # (F,)\n u[fired] += d[fired] # (F,)\n return firings", "def _update_model(self, verbose: bool, raw=True, smooth=False):\n if (raw and smooth) or (not raw and not smooth):\n raise InversionsonError(\"SGDM updates can be raw or smooth, not both\")\n if raw:\n gradient = (\n self.comm.lasif.lasif_comm.project.paths[\"gradients\"]\n / f\"ITERATION_{self.iteration_name}\"\n / \"summed_gradient.h5\"\n )\n if not os.path.exists(self.raw_gradient_path):\n shutil.copy(gradient, self.raw_gradient_path)\n if not os.path.exists(self.raw_update_path):\n self._compute_raw_update()\n if smooth:\n self._apply_smooth_update()", "def bn_update(loader, model):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n for input, _ in loader:\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def train_punet(epochs,\n iteration,\n train_batch_size,\n lr,\n num_filters,\n input_channels,\n latent_dim,\n no_conv_fcomb,\n num_classes,\n beta,\n test_samples_no,\n dataset_path,\n dataset_tag):\n\n for itr in range(iteration):\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n train_path = dataset_path + '/train'\n validate_path = dataset_path + '/validate'\n test_path = dataset_path + '/test'\n\n dataset_train = CustomDataset_punet(dataset_location=train_path, dataset_tag=dataset_tag, noisylabel='p_unet', augmentation=True)\n dataset_val = CustomDataset_punet(dataset_location=validate_path, dataset_tag=dataset_tag, noisylabel='multi', augmentation=False)\n dataset_test = CustomDataset_punet(dataset_location=test_path, dataset_tag=dataset_tag, noisylabel='multi', augmentation=False)\n # dataset_size = len(dataset_train)\n\n # indices = list(range(dataset_size))\n # split = int(np.floor(0.1 * dataset_size))\n # np.random.shuffle(indices)\n # train_indices, test_indices = indices[split:], indices[:split]\n # train_sampler = SubsetRandomSampler(train_indices)\n # test_sampler = SubsetRandomSampler(test_indices)\n # print(\"Number of training/test patches:\", (len(train_indices),len(test_indices)))\n\n train_loader = DataLoader(dataset_train, batch_size=train_batch_size, shuffle=True, num_workers=4, drop_last=True)\n\n val_loader = DataLoader(dataset_val, batch_size=1, shuffle=False, num_workers=1, drop_last=False)\n\n test_loader = DataLoader(dataset_test, batch_size=1, shuffle=False, num_workers=1, drop_last=False)\n\n # net = ProbabilisticUnet(input_channels=3, num_classes=1, num_filters=[8, 16, 32, 64], latent_dim=4, no_convs_fcomb=2, beta=10)\n net = ProbabilisticUnet(input_channels=input_channels, num_classes=num_classes, num_filters=num_filters, latent_dim=latent_dim, no_convs_fcomb=no_conv_fcomb, beta=beta)\n\n net.to(device)\n\n optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-5)\n\n # epochs = 100\n\n training_iterations = len(dataset_train) // train_batch_size - 1\n\n for epoch in range(epochs):\n #\n net.train()\n #\n for step, (patch, mask, mask_name) in enumerate(train_loader):\n #\n # mask_list = [mask_over, mask_under, mask_wrong, mask_true]\n # mask = random.choice(mask_list)\n # print(np.unique(mask))\n #\n patch = patch.to(device)\n mask = mask.to(device)\n # mask = torch.unsqueeze(mask,1)\n net.forward(patch, mask, training=True)\n elbo, reconstruction, kl = net.elbo(mask)\n # reg_loss = l2_regularisation(net.posterior) + l2_regularisation(net.prior) + l2_regularisation(net.fcomb.layers)\n # loss = -elbo + 1e-5 * reg_loss\n loss = -elbo\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n #\n epoch_noisy_labels = []\n epoch_noisy_segs = []\n #\n if (step + 1) == training_iterations:\n #\n validate_iou = 0\n generalized_energy_distance_epoch = 0\n #\n validate_iou, generalized_energy_distance_epoch = evaluate_punet(net=net, val_data=val_loader, class_no=num_classes, sampling_no=4)\n print('epoch:' + str(epoch))\n print('val dice: ' + str(validate_iou))\n print('val generalized_energy: ' + str(generalized_energy_distance_epoch))\n print('train loss: ' + str(loss.item()))\n print('kl is: ' + str(kl.item()))\n print('reconstruction loss is: ' + str(reconstruction.item()))\n print('\\n')\n #\n print('\\n')\n #\n save_path = '../Exp_Results_PUnet'\n #\n try:\n #\n os.mkdir(save_path)\n #\n except OSError as exc:\n #\n if exc.errno != errno.EEXIST:\n #\n raise\n #\n pass\n #\n save_path = save_path + '/Exp_' + str(itr) + \\\n '_punet_' + \\\n '_train_batch_' + str(train_batch_size) + \\\n '_latent_dim_' + str(latent_dim) + \\\n '_lr_' + str(lr) + \\\n '_epochs_' + str(epochs) + \\\n '_beta_' + str(beta) + \\\n '_test_sample_no_' + str(test_samples_no)\n #\n test_punet(net=net, testdata=test_loader, save_path=save_path, sampling_times=test_samples_no)\n #\n print('Training is finished.')", "def update_temperature(self):\n self.iteration += 1 \n self.T = self.T0 * 0.9935**self.iteration", "def upgrade_readout(simulation_dict):\n logging.info(\"Upgrading readout network to a full perceptron\")\n import PVM_framework.SharedArray as SharedArray\n import PVM_framework.MLP as MLP\n simulation_dict['stage0_size'] = len(simulation_dict['stage0'])\n needed = True\n for i in range(simulation_dict['stage0_size']):\n if len(simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['layers']) == 2 and len(simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['weights']) == 1:\n nhidden = simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['layers'][0]['activation'].shape[0]-1\n nadditional = simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['layers'][-1]['activation'].shape[0]-1\n layer = {'activation': SharedArray.SharedNumpyArray((nhidden+1), np.float),\n 'error': SharedArray.SharedNumpyArray((nhidden+1), np.float),\n 'delta': SharedArray.SharedNumpyArray((nhidden+1), np.float)\n }\n\n simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['layers'].insert(1, layer)\n simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['weights'] = \\\n [MLP.initialize_weights(SharedArray.SharedNumpyArray((nhidden+1, nhidden), np.float)),\n MLP.initialize_weights(SharedArray.SharedNumpyArray((nhidden+1, nadditional), np.float)),\n ]\n else:\n needed = False\n if needed:\n logging.info(\"Upgrade complete\")\n else:\n logging.info(\"Upgrade was not nescessary\")", "def test_net_weight_update(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.update_weights([2, 3], [0], test=True)\n\n test_weight = nn.layers[-1].nodes[0].weights[0]\n self.assertEqual(round(test_weight, 4), 0.9901)", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def slowRPC(newETM): #Status: Done, not tested\r\n pass", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n\n loader = tqdm.tqdm(loader, total=num_batches)\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def refugia_adj_5_simsplit_3epochs(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3) \n return fs", "def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass", "def freeze_lowperf(self):\n \n self.freeze_model()\n\n # defreeze params of only being used by the high-performance model\n for i in range(1,5):\n layer = getattr(self, \"layer\"+str(i))\n if self.block_type == 'Bottleneck':\n layer[0].conv3.weight.requires_grad = True\n layer[0].bn3.train()\n elif self.block_type == 'BasicBlock':\n layer[0].conv2.weight.requires_grad = True\n layer[0].bn2.train()\n else:\n print(\"[Error] Unknown block type\")\n\n\n num_skip = len(layer)//2\n for j in range(1, num_skip+1):\n for param in layer[j].parameters():\n param.requires_grad = True\n layer[j].train()", "def refugia_adj_5(params, ns):\n #17 parameters \n nu1_1, nu1_2, nuA, nu2, nu3, m1_12, m1_21, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1a, T1b, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1a\n nu_T1 = [nu1_1, nuA]\n fs.integrate(nu_T1, T1a)\n ## Population function and migration matrix for T1b\n mig1 = numpy.array([[0, m1_12],[m1_21, 0]])\n fs.integrate(nu_T1, T1b, m=mig1) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1_2, nu2, nu3]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3\n nu_T3 = [nu1_2, nu2, nu3]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3) \n return fs", "def update_target(self):\n with torch.no_grad():\n for target_q_param, q_param in zip(self.target_q_funcs.parameters(), self.q_funcs.parameters()):\n target_q_param.data.copy_(self.tau * q_param.data + (1.0 - self.tau) * target_q_param.data)\n for target_pi_param, pi_param in zip(self.target_policy.parameters(), self.policy.parameters()):\n target_pi_param.data.copy_(self.tau * pi_param.data + (1.0 - self.tau) * target_pi_param.data)", "def _compute_raw_update(self):\n\n self.print(\"SGD with Momentum: Computing raw update...\", line_above=True)\n # Read task toml\n\n iteration_number = self.task_dict[\"iteration_number\"] + 1\n\n indices = self.get_parameter_indices(self.raw_gradient_path)\n # scale the gradients, because they can be tiny and this leads to issues\n g_t = self.get_h5_data(self.raw_gradient_path) * self.grad_scaling_fac\n\n if np.sum(np.isnan(g_t)) > 1:\n raise Exception(\n \"NaNs were found in the raw gradient.\" \"Something must be wrong.\"\n )\n\n if iteration_number == 1: # Initialize moments if needed\n shutil.copy(self.raw_gradient_path, self.moment_path)\n write_xdmf(self.moment_path)\n\n with h5py.File(self.moment_path, \"r+\") as h5:\n data = h5[\"MODEL/data\"]\n\n # initialize with zeros\n for i in indices:\n data[:, i, :] = np.zeros_like(data[:, i, :])\n\n v_t = self.beta * self.get_h5_data(self.moment_path) + (1 - self.beta) * g_t\n\n # Store first moment\n shutil.copy(\n self.moment_path,\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n )\n self.set_h5_data(\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n v_t,\n )\n\n # Correct bias\n v_t = v_t / (1 - self.beta ** (self.iteration_number + 1))\n update = self.alpha * v_t\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the raw update.\"\n \"Check if the gradient is not excessively small\"\n )\n\n # Write raw update to file for smoothing\n shutil.copy(self.raw_gradient_path, self.raw_update_path)\n self.set_h5_data(self.raw_update_path, update)", "def update_weights(architecture,grad_weights,grad_bias,m,v,t,lr,optimizer=\"adam\"):\n \n for layer in range(len(architecture)):\n if not (grad_weights['layer{}'.format(layer+1)] is None) and grad_bias['layer{}'.format(layer+1)] is not None:\n grad_weightsi = grad_weights['layer{}'.format(layer+1)]\n grad_weightsi /= bs\n grad_biasi = grad_bias['layer{}'.format(layer+1)]\n grad_biasi /= bs\n\n \n if optimizer.lower()==\"sgd\":\n # Mini-Batch SGD\n qw = lr*grad_weightsi\n qb = lr*grad_biasi\n else:\n # Mini-Batch Adam\n mw,mb = m['layer{}'.format(layer+1)]\n vw,vb = v['layer{}'.format(layer+1)]\n qw,mw,vw = adam(grad_weightsi,beta_1,beta_2,mw,vw,t,lr) # Have obtained dw\n qb,mb,vb = adam(grad_biasi,beta_1,beta_2,mb,vb,t,lr) # Have obtained db\n\n architecture['layer{}'.format(layer+1)][2].requires_grad = False\n architecture['layer{}'.format(layer+1)][3].requires_grad = False\n # Updating weights and biases now\n try:\n architecture['layer{}'.format(layer+1)][2] -= torch.Tensor(qw)\n except:\n architecture['layer{}'.format(layer+1)][2] -= torch.t(torch.Tensor(qw))\n try:\n architecture['layer{}'.format(layer+1)][3] -= torch.Tensor(qb)\n except:\n architecture['layer{}'.format(layer+1)][3] -= torch.t(torch.Tensor(qb))\n\n m['layer{}'.format(layer+1)][0] = torch.Tensor(mw)\n m['layer{}'.format(layer+1)][1] = torch.Tensor(mb)\n v['layer{}'.format(layer+1)][0] = torch.Tensor(vw)\n v['layer{}'.format(layer+1)][1] = torch.Tensor(vb)\n grad_weights['layer{}'.format(layer+1)] = torch.zeros(grad_weightsi.shape)\n grad_bias['layer{}'.format(layer+1)] = torch.zeros(grad_biasi.shape)\n return grad_weights,grad_bias,m,v", "def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ##### Structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5\n model.structure = np.array(list(np.tile([2] + np.tile([1],model.nof_segments_internodes).tolist(),model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes / model.nof_segments_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # same diameter for whole fiber\n model.compartment_diameters[:] = model.diameter_fiber\n \n ##### conductivity of leakage channels\n model.g_L = model.g_L_node/model.surface_aria_node\n\n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacities\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # nodes\n model.c_m[model.structure == 2] = model.c_m_node/model.surface_aria_node\n # internodes\n model.c_m[structure == 1] = model.c_m_layer/(1+model.nof_myelin_layers)\n \n ##### Condactivities internodes\n # initialize\n model.g_m = np.zeros_like(model.structure)*msiemens/cm**2\n # internodes\n model.g_m[model.structure == 1] = model.g_m_layer/(1+model.nof_myelin_layers)\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2\n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Noise term\n model.gamma_Na_vector = np.zeros(model.nof_comps)*psiemens\n model.gamma_Na_vector[model.structure == 2] = model.gamma_Na\n model.noise_term = np.sqrt(model.A_surface*model.gamma_Na_vector*model.rho_Na)\n \n ##### Compartments to plot\n # get indexes of all compartments that are not segmented\n model.indexes_comps = np.where(model.structure == 2)[0]\n # calculate middle compartments of internodes\n model.middle_comps_internodes = np.ceil(model.indexes_comps[:-1] + model.nof_segments_internodes/2).astype(int)\n # create array with all compartments to plot\n model.comps_to_plot = np.sort(np.append(model.indexes_comps, model.middle_comps_internodes))\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.h = model.h_init\n neuron.n = model.n_init\n \n ##### Set parameter values of differential equations\n # conductances nodes\n neuron.gamma_Na = model.gamma_Na\n neuron.gamma_K = model.gamma_K\n neuron.g_L = model.g_L\n \n # conductances internodes\n neuron.g_myelin = model.g_m\n neuron.gamma_Na[np.asarray(np.where(model.structure == 1))] = 0*psiemens\n neuron.gamma_K[np.asarray(np.where(model.structure == 1))] = 0*psiemens\n neuron.g_L[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # conductances peripheral terminal\n neuron.gamma_Na[np.where(model.structure == 0)[0]] = model.gamma_Na_terminal\n neuron.gamma_K[np.where(model.structure == 0)[0]] = model.gamma_K_terminal\n neuron.g_L[np.where(model.structure == 0)[0]] = model.g_L_terminal\n \n # conductances soma\n neuron.gamma_Na[index_soma] = 0*psiemens\n neuron.gamma_K[index_soma] = 0*psiemens\n neuron.g_L[index_soma] = 0*msiemens/cm**2\n \n # Nernst potential for leakage current\n neuron.E_Leak = model.E_L\n neuron.E_Leak[np.where(model.structure == 0)[0]] = E_L_terminal\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.rho_Na = model.rho_Na\n neuron.rho_K = model.rho_K\n \n return neuron, model", "def acUpdate(deltaT):\n pass", "def _update_target_model(self):\n self.target_network.model.set_weights(self.policy_network.model.get_weights())", "def get_obj_new(model_old, demand_path, slink_dict, demand_dict, w_a, w_b, theta, s_limit):\n model = copy.deepcopy(model_old)\n \n #cpu_vector = model.cost_dict['cpu']\n dist_matrix = model.cost_dict['dist']\n rtt_matrix = model.cost_dict['rtt']\n #bw_matrix = model.cost_dict['bw']\n w_a1, w_a2 = w_a\n w_b1, w_b2 = w_b\n theta1, theta2, theta3 = theta\n # find total capacity and used bw on substrate nodes\n node_port_list, used_bw_list = total_port(model)\n #print \"CHECK POINT-1\", used_bw_list, node_port_list\n vnet_info = model.get_vnet_info()\n infeasible = 0\n\n fail_nodes = model.failed_dict\n start_time = time.time()\n select_dict, slink_dict = find_standby2(model, s_limit, w_a, w_b, theta, demand_path, demand_dict, slink_dict)\n # random selection\n #select_dict, slink_dict = find_random(model, s_limit, w_a, w_b, theta, demand_path, demand_dict, slink_dict)\n #print \"selection takes: \", time.time() - start_time\n #print \"Selected\", select_dict\n sum_cost_1_2 = 0\n r_list = {}\n \n for node_id in range(0,len(used_bw_list)):\n r_list[node_id] = used_bw_list[node_id]/node_port_list[node_id]\n #print \"INITIAL\", r_list\n svr_subset = {}\n for vnet in model.vnets:\n j = vnet.vnet_id\n f = fail_nodes[j]\n # only count the failed virtual network\n if f == -1:\n pass\n else:\n for node_id in vnet_info[j]['standby']:\n svr_subset[node_id] = r_list[node_id]\n #print \"Subset SVR: \", svr_subset\n \n if j in select_dict:\n #print \"FEASIBLE FOUND\"\n #print vnet_info[j]['standby']\n i = select_dict[j]\n failed_node = vnet.vnodes[f]\n vneighbors = failed_node.vneighbors\n for k in vneighbors: \n eta = operation_cost()\n #print \"a1: \", w_a1, \"a2: \", w_a2, \"b1: \", w_b1, \"b2: \", w_b2\n dist_c = dist_cost(i, f, k, dist_matrix, w_a1, w_a2)\n rtt_c = rtt_cost(i, k, rtt_matrix)\n sigma = connect_cost(dist_c, rtt_c, w_b1, w_b2)\n sum_cost_1_2 += theta1 * eta + theta2 * sigma\n #find residual bw on substrate node\n #xi = resource_cost(bw_matrix, i)\n req_bw = sum(failed_node.neighbor_traffic.values())\n #util = req_bw / xi\n #print req_bw, i, used_bw_list[i], node_port_list[i]\n util = req_bw/node_port_list[i]\n \n if i not in r_list:\n r_list[i] = util #+ used_bw_list[i]/node_port_list[i]\n else:\n r_list[i] += util\n svr_subset[i] = r_list[i]\n #print sigma, \" v_\" + str(vnet.vnet_id) + \"_\" + str(f) + \"_\" + str(i) + \"_\" + str(k) + \"_\" + str(k)\n else:\n print \"INFEASIBLE at vnet: \", j\n infeasible = 1\n #print \"DONE\"\n \n if infeasible == 0:\n #print svr_subset\n max_util = max(svr_subset.values())\n obj = sum_cost_1_2 + theta3 * max_util\n else:\n obj = \"infeasible\"\n max_util = \"none\"\n #print obj\n used_time = time.time() - start_time\n return obj, select_dict, max_util, used_time", "def optimize(self):\n self.check_is_ready()\n self.check_infeasibility()\n solution_graph, obj_val = self.find_shortest_network_with_ADH((self.old_network_graph is not None))\n self.solution_graph = gnx.GeoMultiGraph(solution_graph, crs=self.optimization_graph.crs)", "def update_all(self,delta_t):\n self.update_thrust()\n self.update_climb_rate()\n self.update_height(delta_t)", "def time_update(self, U):\n self.X = self.runge_kutta(self.process_model, self.X, U, self.dt)\n \n J = self.F.subs({'d_t': self.dt, 'v': U[0], '\\Theta': self.X[2]})\n \n self.P = J*self.P*J.T + self.Q", "def simplify_by_refinement(pts, jparams):\n print(\"=== TIN simplification ===\")\n start = time.time()\n print(\"start measuring time of refinement\")\n bbox_size = 1 #variable for bounding box size\n y_max = max(pts[:,1])\n x_max = max(pts[:,0])\n y_min = min(pts[:,1])\n x_min = min(pts[:,0])\n y_delta = y_max-y_min\n x_delta = x_max-x_min\n y_max += y_delta*0.5*(bbox_size-1)\n y_min -= y_delta*0.5*(bbox_size-1)\n x_max += x_delta*0.5*(bbox_size-1)\n x_min -= x_delta*0.5*(bbox_size-1)\n z_avg = sum(pts[:,2])/len(pts[:,2])\n dt_vertices = np.array([[x_min,y_min,z_avg], [x_max, y_min,z_avg], [x_max, y_max,z_avg], [x_min, y_max,z_avg]])\n #print(dt_vertices)\n dt_2d = scipy.spatial.Delaunay([i[0:2] for i in dt_vertices])\n error_track = 0\n highest_diff = np.inf\n while highest_diff>jparams[\"error-threshold\"] and error_track==0:\n diff_list = []\n for pt_index in range(0,len(pts)):\n point = pts[pt_index]\n triangle_idx = dt_2d.find_simplex(point[0:2])\n #print(triangle_idx)\n if triangle_idx == -1:\n print(\"!!! error creating the bounding box !!!\")\n error_track = 1\n break\n else: #calculate the difference between the existing TIN and the actual z value of the point\n interpolation = TIN_interpolator(dt_vertices, dt_2d, triangle_idx, point)\n diff_list.append(abs(point[2]-interpolation))\n #update values and triangulation\n highest_diff = max(diff_list)\n if highest_diff>jparams[\"error-threshold\"]:\n max_idx = diff_list.index(max(diff_list))\n dt_vertices = np.append(dt_vertices,[pts[max_idx]], axis=0)\n dt_2d = scipy.spatial.Delaunay([i[0:2] for i in dt_vertices])\n np.delete(pts,pt_index)\n #print(\"%.32f\" %highest_diff)\n #print(max(diff_list), min(diff_list))\n end = time.time()\n print(\"refinement takes \",end - start)\n if len(dt_vertices)>4:\n #print(\"There are \",len(dt_vertices)-4,\"important points\")\n return dt_vertices[4:len(dt_vertices)] # Remember: the vertices of the initial TIN should not be returned\n else:\n return None", "def update_q_net(\n q_net: VisualQNetwork, \n optimizer: torch.optim, \n buffer: Buffer, \n action_size: int\n ):\n BATCH_SIZE = 1000\n NUM_EPOCH = 3\n GAMMA = 0.9\n batch_size = min(len(buffer), BATCH_SIZE)\n random.shuffle(buffer)\n # Split the buffer into batches\n batches = [\n buffer[batch_size * start : batch_size * (start + 1)]\n for start in range(int(len(buffer) / batch_size))\n ]\n for _ in range(NUM_EPOCH):\n for batch in batches:\n # Create the Tensors that will be fed in the network\n obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))\n reward = torch.from_numpy(\n np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)\n )\n done = torch.from_numpy(\n np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)\n )\n action = torch.from_numpy(np.stack([ex.action for ex in batch]))\n next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))\n\n # Use the Bellman equation to update the Q-Network\n target = (\n reward\n + (1.0 - done)\n * GAMMA\n * torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values\n )\n mask = torch.zeros((len(batch), action_size))\n mask.scatter_(1, action, 1)\n prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True)\n criterion = torch.nn.MSELoss()\n loss = criterion(prediction, target)\n\n # Perform the backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()", "def __init__(self, \n n_neurons = \"micro\", # else: \"brunel\" or arrays\n C_ab = \"micro\", # else: \"brunel\" or arrays\n area = net.area, # simulation size\n neuron_model = net.neuron_model, # \"iaf_psc_delta\" or \"iaf_psc_exp\"\n connection_rule = net.connection_rule, # \"fixed_total_number\" or \"fixed_indegree\"\n j02 = net.j02, \n weight_rel_sd = net.weight_rel_sd, \n delay_rel_sd = net.delay_rel_sd, \n g = net.g, \n rate_ext = net.rate_ext):\n ###################################################\n ### \tNetwork parameters\t\t### \n ###################################################\n\n # area of network in mm^2; scales numbers of neurons\n # use 1 for the full-size network (77,169 neurons)\n self.area = area\n \n self.layers = net.layers #np.array([\"L23\", \"L4\", \"L5\", \"L6\"])\n self.types = net.types #np.array([\"e\", \"i\"]) \n self.populations = np.array([layer + typus for layer in self.layers for typus in self.types])\n self.n_populations = len(self.populations)\n self.n_layers = len(self.layers)\n self.n_types = len(self.types)\n \n # Neuron numbers\n if n_neurons == \"micro\":\n self.n_neurons = np.int_(net.full_scale_n_neurons * self.area)\n elif n_neurons == \"brunel\":\n # Provide an array of equal number of neurons in each exc./inh. population\n gamma = 0.25\n inh_factor = 1. / (gamma + 1.)\n exc_factor = 1. - inh_factor \n n_total_micro = np.sum(net.full_scale_n_neurons * self.area)\n N_exc = n_total_micro/self.n_populations * exc_factor\n N_inh = n_total_micro/self.n_populations * inh_factor\n self.n_neurons = np.tile([N_exc, N_inh], self.n_layers).astype(int)\n else:\n if type(n_neurons) == np.ndarray:\n if n_neurons.shape == (self.n_populations, ):\n self.n_neurons = np.int_(n_neurons)\n else:\n raise Exception(\"'n_neurons' has wrong shape. \"+\n \"Expects (%i,)\"%self.n_populations)\n else: \n raise Exception(\"'n_neurons' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n self.n_total = np.sum(self.n_neurons)\n\n \n # Synapse numbers\n # Connection probabilities: conn_probs[post, pre] = conn_probs[target, source]\n conn_probs = net.conn_probs\n # Scale synapse numbers of the C_ab\n if net.scale_C_linearly:\n n_outer_full = np.outer(net.full_scale_n_neurons, net.full_scale_n_neurons)\n C_full_scale = np.log(1. - conn_probs) / np.log(1. - 1. / n_outer_full)\n C_scaled = np.int_(C_full_scale * self.area)\n else:\n n_outer = np.outer(self.n_neurons, self.n_neurons)\n C_scaled = np.int_(np.log(1. - conn_probs) / np.log(1. - 1. / n_outer))\n\n self.connection_rule = connection_rule\n if self.connection_rule == \"fixed_total_number\":\n C_ab_micro = C_scaled # total number, do not divide! \n elif self.connection_rule == \"fixed_indegree\":\n C_ab_micro = (C_scaled.T / (net.full_scale_n_neurons * self.area)).T\n else:\n raise Exception(\"Unexpected connection type. Use 'fixed_total_number' for microcircuit \" + \n \"model or 'fixed_indegree' for Brunel's model!\")\n\n if C_ab == \"micro\":\n self.C_ab = C_ab_micro # shall not be integer at this point!\n elif C_ab == \"brunel\":\n C_e = np.mean(C_ab_micro) # mean for microcircuit (= 501 in full scale)\n C_i = gamma * C_e\n self.C_ab = np.tile([C_e, C_i], (self.n_populations, self.n_layers)).astype(int) \n else:\n if type(C_ab) == np.ndarray:\n if C_ab.shape == (self.n_populations, self.n_populations):\n self.C_ab = np.int_(C_ab)\n else:\n raise Exception(\"'C_ab' has wrong shape. \"+\n \"Expects (%i, %i)\"%(self.n_populations, self.n_populations))\n else: \n raise Exception(\"'C_ab' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n\n\n ###################################################\n ### Single-neuron parameters\t\t### \n ###################################################\n self.neuron_model = neuron_model\n self.Vm0_mean = net.Vm0_mean # mean of initial membrane potential (mV)\n self.Vm0_std = net.Vm0_std # std of initial membrane potential (mV)\n self.model_params = net.model_params\n if not self.neuron_model==\"iaf_psc_delta\":\n self.model_params[\"tau_syn_ex\"] = net.tau_syn_ex # excitatory synaptic time constant (ms)\n self.model_params[\"tau_syn_in\"] = net.tau_syn_in # inhibitory synaptic time constant (ms)\n self.tau_syn_ex = net.tau_syn_ex # ms\n self.tau_syn_in = net.tau_syn_in # ms\n self.tau_syn = np.tile([self.tau_syn_ex, self.tau_syn_in], (self.n_populations, self.n_layers))\n # Rescaling for model calculations: these values are not used in the simulation!\n self.tau_m = self.model_params[\"tau_m\"] # ms\n self.t_ref = self.model_params[\"t_ref\"] # ms\n self.E_L = self.model_params[\"E_L\"] # mV\n self.V_r = self.model_params[\"V_reset\"] - self.E_L # mV\n self.theta = self.model_params[\"V_th\"] - self.E_L # mV\n self.C_m = self.model_params[\"C_m\"] # pF\n\n\n ######################################################\n # Synaptic weights. Depend on neuron_model! ##\n ######################################################\n self.g = g\n self.j02 = j02\n\n g_all = np.tile([1., -self.g], (self.n_populations, self.n_layers))\n L23e_index = np.where(self.populations == \"L23e\")[0][0]\n L4e_index = np.where(self.populations == \"L4e\")[0][0]\n g_all[L23e_index, L4e_index] *= self.j02\n \n self.J = net.PSP_e # mv; mean PSP, used as reference PSP\n self.J_ab = self.J * g_all\n self.weight_rel_sd = weight_rel_sd # Standard deviation of weight relative to mean weight\n # Transformation from peak PSP to PSC\n delta_tau = self.tau_syn - self.tau_m\n ratio_tau = self.tau_m / self.tau_syn\n PSC_over_PSP = self.C_m * delta_tau / (self.tau_m * self.tau_syn * \\\n (ratio_tau**(self.tau_m / delta_tau) - ratio_tau**(self.tau_syn / delta_tau))) \n # Actual weights have to be adapted: from peak PSP to PSC (and back...)\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * PSC_over_PSP # neuron populations\n elif self.neuron_model==\"iaf_psc_delta\":\n self.weights = self.J_ab * PSC_over_PSP * (self.tau_syn_ex) / self.C_m\n # This might be an overkill / doing things twice...\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * np.exp(1) / (self.tau_syn_ex) / self.C_m\n else:\n raise Exception(\"Neuron model should be iaf_psc_ - {delta, exp, alpha}!\")\n\n\n ###################################################\n ### Delays and dicts ### \n ###################################################\n # mean dendritic delays for excitatory and inhibitory transmission (ms)\n self.delay_e = net.delay_e # ms, excitatory synapses\n self.delay_i = net.delay_i # ms, inhibitory synapses\n\n self.delays = np.tile([self.delay_e, self.delay_i], (self.n_populations, self.n_layers)) # adapt...\n self.delay_rel_sd = delay_rel_sd \n \n # Synapse dictionaries\n # default connection dictionary\n self.conn_dict = {\"rule\": connection_rule}\n # weight distribution of connections between populations\n self.weight_dict_exc = net.weight_dict_exc\n self.weight_dict_inh = net.weight_dict_inh\n # delay distribution of connections between populations\n self.delay_dict = net.delay_dict\n # default synapse dictionary\n self.syn_dict = net.syn_dict\n \n \n ###################################################\n ### External stimuli ## \n ###################################################\n # rate of background Poisson input at each external input synapse (spikes/s) \n self.rate_ext = rate_ext # Hz \n self.J_ext = net.PSP_ext # external synaptic weight\n self.delay_ext = self.delay_e # ms; mean delay of external input\n self.dc_amplitude = net.dc_amplitude # constant bg amplitude\n self.C_aext = net.C_aext # in-degrees for background input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * np.exp(1) / self.tau_syn_ex / self.C_m\n\n # optional additional thalamic input (Poisson)\n self.n_th = net.n_th # size of thalamic population\n self.th_start = net.th_start # onset of thalamic input (ms)\n self.th_duration = net.th_duration # duration of thalamic input (ms)\n self.th_rate = net.th_rate # rate of thalamic neurons (spikes/s)\n self.J_th = net.PSP_th # mean EPSP amplitude (mV) for thalamic input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * np.exp(1) / self.tau_syn_ex / self.C_m\n\n \n # connection probabilities for thalamic input\n conn_probs_th = net.conn_probs_th\n if net.scale_C_linearly:\n if not self.n_th == 0:\n C_th_full_scale = np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * net.full_scale_n_neurons))\n self.C_th_scaled = np.int_(C_th_full_scale * self.area)\n else:\n if not self.n_th == 0:\n self.C_th_scaled = np.int_(np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * self.n_neurons_micro)))\n if self.n_th == 0:\n self.C_th_scaled = None\n \n # mean delay of thalamic input (ms)\n self.delay_th = net.delay_th\n # standard deviation relative to mean delay of thalamic input\n self.delay_th_rel_sd = net.delay_th_rel_sd\n\n\n ######################################################\n # Predefine matrices for mean field ##\n ######################################################\n if self.neuron_model==\"iaf_psc_delta\":\n self.J_mu = self.weights\n self.J_sd = self.weights\n self.J_mu_ext = self.weight_ext \n self.J_sd_ext = self.weight_ext\n elif self.neuron_model==\"iaf_psc_exp\":\n self.J_mu = self.weights * self.tau_syn / self.C_m\n self.J_sd = self.weights * np.sqrt(self.tau_syn / 2.) / self.C_m\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex / self.C_m\n self.J_sd_ext = self.weight_ext * np.sqrt(self.tau_syn_ex / 2.) / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\":\n self.J_mu = self.weights * self.tau_syn**2 / self.C_m\n self.J_sd = self.weights * self.tau_syn**(3./2.) / (self.C_m * 2.)\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex**2 / self.C_m\n self.J_sd_ext = self.weight_ext * self.tau_syn_ex**(3./2.) / (self.C_m * 2.)\n self.mat_mu = self.tau_m * 1e-3 * self.J_mu * self.C_ab\n self.mu_ext = self.tau_m * 1e-3 * self.J_mu_ext * self.C_aext * self.rate_ext\n self.mat_var = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd**2 * self.C_ab\n self.var_ext = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd_ext**2 * self.C_aext * self.rate_ext", "def update_speed_weights_step(self):\n \n weights_list = [self.W_speed_east, self.W_speed_west,self.W_speed_north,self.W_speed_south]\n speed_input_list = [self.speed_inputs_east,self.speed_inputs_west,\n self.speed_inputs_north,self.speed_inputs_south]\n \n if self.use_eight_directions is True:\n weights_list+=[self.W_speed_north_east,\n self.W_speed_north_west,self.W_speed_south_east,self.W_speed_south_west]\n \n speed_input_list+=[self.speed_inputs_north_east,self.speed_inputs_north_west, \n self.speed_inputs_south_east,self.speed_inputs_south_west]\n\n \n for weights,speed_input in zip(weights_list,speed_input_list):\n \n \n weight_update=speed_input*(self.rr[:self.N_e]-self.input_mean)*(self.rr_e_trace.T-self.input_mean)\n weights+=self.learn_rate_speed_weights*weight_update\n\n\n # normalize to fixed mean of incoming and outgoing weights\n weights-=(weights.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n weights-=(weights.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n np.clip(weights,0,self.W_max_e,out=weights)", "def sweep_relay():", "def update(self, sim, dt):\n #growth kinetics\n self.division_timer += dt\n #you can grow unless you are in the A state meaning apoptosis\n if(self.division_timer >= self.division_time and self._division):\n #now you can divide\n if(self.state == \"T1\"):\n #change the current sytate to D\n self.state = \"NSC\"\n self._division = False\n self.division_time = 36\n #progenitor time is faster with concentration factor\n\n #add the concentration\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n self.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## norm_mn = float(mn_count) / float(tot)\n## if(norm_mn < self._p2):\n## self.division_time = 36*(norm_mn) # in hours\n## self.division_time = max(self.division_time, 1) \n## else:\n## \n## print(norm_mn, self.division_time)\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n if(self.state == \"T2\"):\n #change the current sytate to D\n self.state = \"MN\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n self.set_gradient_source_sink_coeff(\"EGF\", 50.0*source, 1.0*consump_rate)\n if(self.state == \"T3\"):\n #change the current sytate to D\n self.state = \"G\"\n self.division_time = 56 #in hours\n #also set the current consumption rate\n## source, consump_rate = self.get_gradient_source_sink_coeff(\"EGF\")\n## self.set_gradient_source_sink_coeff(\"EGF\", source, 1.0*consump_rate)\n #get the location\n #pick a random point on a sphere\n location = RandomPointOnSphere()*self.radius/2.0 + self.location\n #get the radius\n radius = self.radius\n #get the ID\n ID = sim.get_ID()\n #make the object\n sc = NueronalStemCell(location, radius, ID, self.state,\n division_time = self.division_time,\n params = [self._p1, self._p2,\n self._p3, self._p4, self._p5,\n self._p6, self.p7])\n #copy secretion to NSC progeny\n if(self.state == \"NSC\"):\n source, consump_rate = self.get_gradient_source_sink_coeff(\"TNF\")\n sc.set_gradient_source_sink_coeff(\"TNF\", 50.0*source, 1.0*consump_rate)\n sc._division = False\n #set its soluble count\n## sc.sol_count = self.sol_count / 2.\n## self.sol_count = self.sol_count / 2.\n #copy over all of the coefficients to the new cells\n## prod_cons = self.get_gradient_source_sink_coeff(\"O2\")\n## sc.set_gradient_source_sink_coeff(\"O2\", prod_cons[0], prod_cons[1])\n prod_cons = self.get_gradient_source_sink_coeff(\"EGF\")\n sc.set_gradient_source_sink_coeff(\"EGF\", prod_cons[0], prod_cons[1]) \n #add it to the imsulation\n sim.add_object_to_addition_queue(sc)\n #reset the division time\n self.division_timer = 0\n \n if(self.state == \"U\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x = rand.random()\n prob = self._p1 #probability of turning into a NSC\n #longer before the differentiation starts\n if(x < prob):\n #differentiation occurs\n self.state = \"T1\"\n #also add a proabability to differentiate directly to a mn\n n1 = self._p4\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n #get the value fo the gradient and make differntiation inversly\n #inversly correlated with the proportion present\n norm_mn = self.get_gradient_value(\"EGF\")\n #probability of turning into a motor nueron\n n1 = self._p4\n## #normalize the result\n## if(tot != 0):\n## norm_mn = float(mn_count) / float(tot)\n## else:\n## norm_mn = 0\n #calculate the probability\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p2**n1 + norm_mn**n1)\n x1 = rand.random()\n if(x1 <= self._p1*prob_MN):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n \n if(self.state == \"NSC\"):\n #HANDLE DIFFERENTIATION\n #RANDOM RULE\n x1 = rand.random()\n x2 = rand.random()\n #Find all the motor nuerons\n## #get neighbors\n## nbs = sim.network.neighbors(self)\n## #get the total\n## tot = len(nbs)\n## mn_count = 0\n## if(tot > 0):\n## #count up the states fo all fo these\n## for i in range(0, tot):\n## if(nbs[i].state == \"MN\" or nbs[i].state == \"T2\"): \n## mn_count += 1\n## #normalize the result\n## norm_mn = float(mn_count) / float(tot)\n #Make differerntiationd ependant on the gradient value\n norm_mn = self.get_gradient_value(\"EGF\")\n #set the paramaters\n n1 = self._p4\n #update the division time\n## self.division_time = norm_mn * 38 #in hours takes care of the feedback\n #depends on other motor nuerons\n prob_MN = 1 - (1.*norm_mn**n1)/(self._p3**n1 + norm_mn**n1) #probability of turning into a motor nueron\n## prob_G = (1.*norm_mn**n2)/(self._p3**n1 + norm_mn**n2) #of turning into a glial cell\n prob_G = self._p5\n #longer before the differentiation starts\n if(x1 <= prob_MN and x2 > prob_G):\n #differentiation occurs towards a motor nueron\n self.state = \"T2\"\n if(x1 > prob_MN and x2 <= prob_G):\n #differentiation occurs towards a glial cell\n self.state = \"T3\"\n #check to see if division enabled\n if(self._division == False):\n #check for mitotic speed up\n a = self._p6\n b = self._p7\n norm_nsc = self.get_gradient_value(\"TNF\")\n prob_divide = (1.*norm_nsc**b)/(a**b + norm_nsc**b)\n r = rand.random()\n if(r <= x):\n self._division = True", "def update_target_network(self) -> NoReturn:\n self.target.load_state_dict(self.model.state_dict())", "def temp_update(self):\n a_w = self.k / self.dx\n a_e = self.k / self.dx\n a_n = self.k / self.dy\n a_s = self.k / self.dy\n a_p = a_w + a_e + a_n + a_s + self.rho * self.cp * self.dx / self.dt\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n self.T[i,\n j] = (a_w * self.T[i - 1, j] + a_e * self.T[i + 1, j] +\n a_s * self.T[i, j - 1] + a_n * self.T[i, j + 1]) / a_p" ]
[ "0.7093877", "0.7012822", "0.6283256", "0.6237948", "0.6235717", "0.6108989", "0.6108989", "0.5920825", "0.588449", "0.5847011", "0.58270663", "0.5826606", "0.5730364", "0.5714644", "0.5706353", "0.5686066", "0.5660529", "0.5652498", "0.56278974", "0.56147575", "0.5596634", "0.5589962", "0.5578698", "0.55748343", "0.551713", "0.55128354", "0.5492103", "0.5483582", "0.5467347", "0.54608285", "0.5453617", "0.5445381", "0.5442255", "0.5428936", "0.5427896", "0.5415914", "0.54148906", "0.54123026", "0.541229", "0.5410394", "0.54010993", "0.53890455", "0.5366096", "0.53644997", "0.5344738", "0.5300019", "0.5290634", "0.5287678", "0.5283568", "0.52786756", "0.52542907", "0.52393806", "0.52360505", "0.52349097", "0.52325946", "0.5201886", "0.5198806", "0.5192444", "0.5186451", "0.51603734", "0.51588905", "0.51546943", "0.5137515", "0.51245606", "0.5114152", "0.51105297", "0.5105453", "0.5103328", "0.5099271", "0.5088716", "0.50850344", "0.5078367", "0.5076711", "0.5075947", "0.5067513", "0.5056788", "0.5056712", "0.5056485", "0.50528836", "0.5052602", "0.5052558", "0.5052362", "0.5050262", "0.50482273", "0.5040417", "0.50135887", "0.50129306", "0.5009481", "0.49997154", "0.49987045", "0.4995431", "0.49946722", "0.4991755", "0.4984024", "0.49833226", "0.49769837", "0.49748626", "0.49698707", "0.49679947", "0.4966172" ]
0.5398285
41
Fully copy parameters from active network to target network. To be used in conjunction with a parameter "C" that modulated how many timesteps between these hard updates.
def _hard_update(self, active, target): target.load_state_dict(active.state_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def hard_update(source_net, target_net):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(param.data)", "def soft_update(source_net, target_net, tau):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )", "def copy_params(self):\n tf.get_default_session().run(self.copy_ops)", "def update_target_network(self):\n variables = self.online_network.trainable_variables\n variables_copy = [tf.Variable(v) for v in variables]\n self.target_network.trainable_variables = variables_copy", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def copy_network_parameters(sess):\n e1_params = [t for t in tf.trainable_variables() if t.name.startswith('q_network')]\n e1_params = sorted(e1_params, key=lambda v: v.name)\n e2_params = [t for t in tf.trainable_variables() if t.name.startswith('target')]\n e2_params = sorted(e2_params, key=lambda v: v.name)\n\n update_ops = []\n for e1_v, e2_v in zip(e1_params, e2_params):\n op = e2_v.assign(e1_v)\n update_ops.append(op)\n\n sess.run(update_ops)", "def hard_update(self,target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\t\ttarget_param.data.copy_(param.data)", "def update_network_parameters(self, tau=None):\n\n #Is used during the first iteration such that the target networks get the same parameters of the normal networks (hard update)\n if tau is None:\n tau = self.tau\n\n #Update the target_actor weights\n weights = []\n targets = self.target_actor.weights\n for i, weight in enumerate(self.actor.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_actor.set_weights(weights)\n\n #Update the target_critic_1 weights\n weights = []\n targets = self.target_critic_1.weights\n for i, weight in enumerate(self.critic_1.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_critic_1.set_weights(weights)\n\n #Update the target_critic_2 weights\n weights = []\n targets = self.target_critic_2.weights\n for i, weight in enumerate(self.critic_2.weights):\n weights.append(weight * tau + targets[i]*(1-tau))\n\n self.target_critic_2.set_weights(weights)", "def momentum_init(self, online_net, target_net):\n for param_ol, param_tgt in zip(online_net.parameters(), target_net.parameters()):\n param_tgt.data.copy_(param_ol.data)\n param_tgt.requires_grad = False", "def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)", "def sync_target_network(self):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(e)", "def hard_update(target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def set_params(self, new_params: torch.Tensor) -> None:\n assert new_params.size() == self.get_params().size()\n progress = 0\n for pp in list(self.net.parameters()):\n cand_params = new_params[progress: progress +\n torch.tensor(pp.size()).prod()].view(pp.size())\n progress += torch.tensor(pp.size()).prod()\n pp.data = cand_params", "def transfer_parameters_call_optimization(self, mainCl, mainPt, consCall=None, consPt=None):", "def update_target_q_network(self):\n assert self.target_network != None\n self.target_network.run_copy()", "def _freeze_tgt_networks(self):\n q1 = zip(self.tgt_q1.parameters(), self.soft_q1.parameters())\n q2 = zip(self.tgt_q2.parameters(), self.soft_q2.parameters())\n\n # Copy parameters\n for target_param, param in q1:\n target_param.data.copy_(param.data)\n for target_param, param in q2:\n target_param.data.copy_(param.data)\n\n # Freeze gradients\n for param in self.tgt_q1.parameters():\n param.requires_grad = False\n for param in self.tgt_q2.parameters():\n param.requires_grad = False", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n\n self.target_model.set_weights(pars_target)", "def update_disc_copy(self):\n source = self.discriminator\n dest = self.discriminator_copy\n\n assert len(source.layers) == len(dest.layers)\n for dest_layer, source_layer in zip(dest.layers, source.layers):\n dest_layer.set_weights(source_layer.get_weights())", "def update_disc_copy(self):\n source = self.discriminator\n dest = self.discriminator_copy\n\n assert len(source.layers) == len(dest.layers)\n for dest_layer, source_layer in zip(dest.layers, source.layers):\n dest_layer.set_weights(source_layer.get_weights())", "def soft_update(target, source, tau):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)", "def hard_copy_weights(self, target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def soft_update_critic(self):\n local_weights = np.array(self.critic_local.model.get_weights())\n target_weights = np.array(self.critic_target.model.get_weights())\n\n assert len(local_weights) == len(\n target_weights), ('Local and target model parameters must have '\n 'the same size')\n\n new_weights = self.tau * local_weights + (1 - self.tau) * target_weights\n self.critic_target.model.set_weights(new_weights)", "def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())", "def _reset_parameters(self) -> None:\n self._setup_input = {\n \"P\": csc_matrix(2.0 * self.opt.P(self.p).toarray()),\n \"q\": self.opt.q(self.p).toarray().flatten(),\n }\n if self.opt_type in CONSTRAINED_OPT:\n A = self.opt.A(self.p)\n b = self.opt.b(self.p)\n self._setup_input[\"A\"] = csc_matrix(\n cs.vertcat(self.opt.M(self.p), A, -A).toarray()\n )\n self._setup_input[\"l\"] = (\n cs.vertcat(-self.opt.c(self.p), -b, b).toarray().flatten()\n )", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def _reset_parameters(self):\n\n nn.init.xavier_normal_(self._W_x2i)\n nn.init.xavier_normal_(self._W_x2f)\n nn.init.xavier_normal_(self._W_x2o)\n nn.init.xavier_normal_(self._W_x2c)\n \n nn.init.orthogonal_(self._W_h2i)\n nn.init.orthogonal_(self._W_h2f)\n nn.init.orthogonal_(self._W_h2o)\n nn.init.orthogonal_(self._W_h2c)\n \n nn.init.uniform_(self._W_c2i)\n nn.init.uniform_(self._W_c2f)\n nn.init.uniform_(self._W_c2o)\n \n nn.init.constant_(self._b_i, 0)\n nn.init.constant_(self._b_f, 1)\n nn.init.constant_(self._b_o, 0)\n nn.init.constant_(self._b_c, 0)\n\n if self._chrono_init:\n print(self._t_max)\n b_f = torch.from_numpy(np.log(np.random.randint(1, self._t_max+1, size=self._hidden_size)))\n self._b_f.data.copy_(b_f)\n self._b_i.data.copy_(-b_f)", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def update_target_net(self):\n if self.n_steps % self.target_update_interval == 0:\n self.target_q.load_state_dict(self.working_q.state_dict())", "def update_target_network(self) -> NoReturn:\n self.target.load_state_dict(self.model.state_dict())", "def soft_update_target_network(self):\n \n pars_behavior = self.model.get_weights() # these have form [W1, b1, W2, b2, ..], Wi = weights of layer i\n pars_target = self.target_model.get_weights() # bi = biases in layer i\n \n ctr = 0\n for par_behavior,par_target in zip(pars_behavior,pars_target):\n par_target = par_target*(1-self.tau) + par_behavior*self.tau\n pars_target[ctr] = par_target\n ctr += 1\n\n self.target_model.set_weights(pars_target)", "def update_target_network(self):\n self.target.set_weights(self.policy.get_weights()) # Update weights of target network with weights of policy network", "def update_target_network(self):\n self.target_Qmodel = clone_model(self.Qmodel)\n self.target_Qmodel.set_weights(self.Qmodel.get_weights())\n\n # target network is never compiled\n self.target_Qmodel.compile(loss='mse', optimizer=Adam())", "def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)", "def update_params(self):\n if self.clip > 0:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n self.optimizer.step()", "def _soft_update(self, active, target):\n\n for t_param, param in zip(target.parameters(), active.parameters()):\n t_param.data.copy_(self.tau*param.data + (1-self.tau)*t_param.data)", "def copy_state_to_network(self):\n state = self.rigid_body_state\n\n state.position = self.transform.world_position.copy()\n state.orientation = self.transform.world_orientation.copy()\n state.angular = self.physics.world_angular.copy()\n state.velocity = self.physics.world_velocity.copy()\n # state.collision_group = self.physics.collision_group\n # state.collision_mask = self.physics.collision_mask\n self.rigid_body_time = WorldInfo.elapsed", "def forgiving_state_copy(target_net, source_net):\n net_state_dict = target_net.state_dict()\n loaded_dict = source_net.state_dict()\n new_loaded_dict = {}\n for k in net_state_dict:\n if k in loaded_dict and net_state_dict[k].size() == loaded_dict[k].size():\n new_loaded_dict[k] = loaded_dict[k]\n print(\"Matched\", k)\n else:\n print(\"Skipped loading parameter \", k)\n # logging.info(\"Skipped loading parameter %s\", k)\n net_state_dict.update(new_loaded_dict)\n target_net.load_state_dict(net_state_dict)\n return target_net", "def update_parameters_NN(v_samples, c_samples, silent=True):\n\n # Prepare the data into statistical setting of\n # X: training data (capacity, time) in row\n # y: true outcomes (value) in row\n\n v = v_samples\n c = c_samples\n\n v_df = pd.DataFrame(v) # nothing happening in time period 0 (first action in t=1)\n v_df.columns = [\"vt\"+str(i) for i in v_df.columns]\n v_df = v_df.stack()\n\n t_df = pd.DataFrame([np.arange(T+1)] * len(v))\n t_df.columns = [\"t\"+str(i) for i in t_df.columns]\n t_df = tidy_up_t(t_df)\n\n # to allow for multiple resources\n cs = {}\n for h in resources:\n c_df = pd.DataFrame(c[0, :, h]).T\n for i in np.arange(len(c) - 1) + 1:\n c_df = c_df.append(pd.DataFrame(c[i, :, h]).T)\n c_df.columns = [\"c-h\"+str(h)+\"-t\"+str(i) for i in c_df.columns]\n c_df = tidy_up_c(c_df)\n c_df.index = t_df.index\n cs[h] = c_df\n\n X = pd.concat([t_df, *[cs[h] for h in resources]], axis=1)\n y = v_df\n y.index = X.index\n\n # Note the difference in argument order\n neural_net = MLPRegressor(alpha=0.1, hidden_layer_sizes=(10,), max_iter=50000,\n activation='logistic', verbose=not silent, learning_rate='adaptive')\n m = neural_net.fit(X, y)\n\n y_true = y\n y_pred = m.predict(X)\n r2_score(y_true, y_pred)\n\n # Still the OLS for comparison\n model = sm.OLS(y, X).fit()\n model.params\n\n y_pred_ols = model.predict(X) # make the predictions by the model\n r2_score(y_true, y_pred_ols)\n\n # # Print out the statistics\n # model.summary()\n #\n #\n # # Print out the statistics\n # if not silent:\n # model.summary()\n\n return m", "def _update_target_net(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()", "def mutate(parent, parameters):\r\n if parameters['only_ncp']:\r\n if parameters['reporter']:\r\n print('Mutate Non Convolutional Part...')\r\n start_time = timeit.default_timer()\r\n child_ncp, times = mutate_ncp(parent, None, parameters)\r\n ncp_time = \"{:.4f}\".format(timeit.default_timer() - start_time)\r\n child_cp, cp_time = None, None\r\n #print(\"NCP Time:\\t{}\".format(ncp_time))\r\n\r\n else:\r\n if parameters['reporter']:\r\n print('\\nMutate Convolutional Part...')\r\n start_time = timeit.default_timer()\r\n child_cp = mutate_cp(parent, parameters)\r\n cp_time = \"{:.2f}\".format(timeit.default_timer() - start_time)\r\n\r\n if parameters['reporter']:\r\n print('Mutate Non Convolutional Part...')\r\n start_time = timeit.default_timer()\r\n child_ncp, times = mutate_ncp(parent, child_cp, parameters)\r\n ncp_time = \"{:.2f}\".format(timeit.default_timer() - start_time)\r\n #print(\"CP Time:\\t{}\\tNCP Time:\\t{}\".format(cp_time, ncp_time))\r\n\r\n # if False:\r\n # '''In depth profile of Mutation Times'''\r\n # profile_times = [ncp_time] + times\r\n # profile_times = \",\".join(profile_times)\r\n # file_name = \"ncp_mutation_time_05.csv\"\r\n # file = open(file_name, 'a+')\r\n # file.write(profile_times + '\\n')\r\n # file.close()\r\n\r\n return ConvolutionalNeuralNetwork(child_cp, child_ncp, feed_original_X=parameters['feed_original_X'],\r\n only_ncp=parameters['only_ncp']), cp_time, ncp_time", "def copy_model_parameters(sess, net1, net2):\n\n copy_scope_parameters(sess, net1.scope, net2.scope)", "def optimize_parameters(self):\n self.loss_total.backward() # calculate gradients\n self.optimizer.step()\n self.optimizer.zero_grad()\n torch.cuda.empty_cache()", "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n num_gpus=1, model='resnet50', distortions=False, forward_only=True)", "def sync(net, net_tar):\n for var, var_tar in zip(net.trainable_weights,\n net_tar.trainable_weights):\n var_tar.assign(var)", "def reset_layer(self):\n if self.W is None:\n if self.sparse_initialize:\n W_values = self.sparse_initialize_weights()\n else:\n if self.activation == theano.tensor.tanh:\n born = np.sqrt(6. / (self.n_in + self.n_out))\n else:\n born = 4 * np.sqrt(6. / (self.n_in + self.n_out))\n W_values = np.asarray(self.rng.uniform(\n low=-born,\n high=born,\n size=(self.n_in, self.n_out)),\n dtype=theano.config.floatX)\n\n self.W = theano.shared(value=W_values, name='W', borrow=True)\n\n if self.b is None:\n b_values = np.zeros(int(self.n_out/self.num_pieces),\n dtype=theano.config.floatX)\n self.b = theano.shared(value=b_values, name='b', borrow=True)\n\n if self.sparser is None:\n s_values = np.ones(\n int(self.n_out/self.num_pieces), dtype=theano.config.floatX)\n self.sparser = theano.shared(value=s_values, name='sparser',\n borrow=True)\n # The layer parameters\n self.params = [self.W, self.b]", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50', batch_size=128, distortions=False,\n optimizer='momentum')", "def copy_weights(copy_from: nn.Module, copy_to: nn.Module, polyak=None):\n if polyak is not None:\n for target_param, param in zip(copy_to.parameters(), copy_from.parameters()):\n target_param.data.copy_(polyak * param + (1 - polyak) * target_param)\n else:\n copy_to.load_state_dict(copy_from.state_dict())", "def set_params(self, params):\n cpt = 0\n for param in self.parameters():\n tmp = np.product(param.size())\n\n if torch.cuda.is_available():\n param.data.copy_(torch.from_numpy(\n params[cpt:cpt + tmp]).view(param.size()).cuda())\n else:\n param.data.copy_(torch.from_numpy(\n params[cpt:cpt + tmp]).view(param.size()))\n cpt += tmp", "def __prepare_parameter__(self, in_args):\n if self.__use_remote_sparse_updater__():\n self.__gradient_machine__.prefetch(in_args)\n self.__parameter_updater__.getParametersRemote()", "def copy_params(source, target):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def update_target(self):\n with torch.no_grad():\n for target_q_param, q_param in zip(self.target_q_funcs.parameters(), self.q_funcs.parameters()):\n target_q_param.data.copy_(self.tau * q_param.data + (1.0 - self.tau) * target_q_param.data)\n for target_pi_param, pi_param in zip(self.target_policy.parameters(), self.policy.parameters()):\n target_pi_param.data.copy_(self.tau * pi_param.data + (1.0 - self.tau) * target_pi_param.data)", "def _copy_streamflow_values(self):\r\n log('Creating streamflow variable', 'INFO')\r\n q_var = self.cf_nc.createVariable(\r\n self.output_flow_var_name, 'f4', (self.output_id_dim_name, 'time'))\r\n q_var.long_name = 'Discharge'\r\n q_var.units = 'm^3/s'\r\n q_var.coordinates = 'time lat lon z'\r\n q_var.grid_mapping = 'crs'\r\n q_var.source = ('Generated by the Routing Application for Parallel ' +\r\n 'computatIon of Discharge (RAPID) river routing model.')\r\n q_var.references = 'http://rapid-hub.org/'\r\n q_var.comment = ('lat, lon, and z values taken at midpoint of river ' +\r\n 'reach feature')\r\n\r\n log('Copying streamflow values', 'INFO')\r\n master_begin_time_step_index = 1\r\n master_end_time_step_index = len(self.cf_nc.dimensions['time'])\r\n \r\n #to reduce RAM, copy by chunks\r\n max_2d_dimension = 1000000000 #~8GB Max\r\n for raw_nc_index, raw_nc in enumerate(self.raw_nc_list):\r\n max_time_step_size = min(raw_nc.size_time, max(1, int(float(max_2d_dimension)/float(raw_nc.size_river_id))))\r\n raw_nc_begin_time_step_index = 0\r\n raw_nc_end_time_step_index = raw_nc.size_time\r\n for raw_nc_time_index in xrange(0, raw_nc.size_time, max_time_step_size):\r\n time_interval_size = max(1, min(raw_nc.size_time-raw_nc_time_index, max_time_step_size))\r\n\r\n raw_nc_end_time_step_index = raw_nc_begin_time_step_index + time_interval_size\r\n master_end_time_step_index = master_begin_time_step_index + time_interval_size\r\n \r\n q_var[:,master_begin_time_step_index:master_end_time_step_index] = raw_nc.get_qout(time_index_start=raw_nc_begin_time_step_index,\r\n time_index_end=raw_nc_end_time_step_index)\r\n \r\n master_begin_time_step_index = master_end_time_step_index\r\n raw_nc_begin_time_step_index = raw_nc_end_time_step_index\r\n\r\n log('Adding initial streamflow values', 'INFO')\r\n #add initial flow to RAPID output file\r\n if self.qinit_file and self.rapid_connect_file:\r\n lookup_table = csv_to_list(self.rapid_connect_file)\r\n lookup_comids = np.array([int(float(row[0])) for row in lookup_table])\r\n \r\n init_flow_table = csv_to_list(self.qinit_file)\r\n \r\n for index, comid in enumerate(self.cf_nc.variables[self.output_id_dim_name][:]):\r\n try:\r\n lookup_index = np.where(lookup_comids == comid)[0][0]\r\n except Exception:\r\n log('COMID %s misssing in rapid_connect file' % comid,\r\n 'ERROR')\r\n q_var[index,0] = float(init_flow_table[lookup_index][0])\r\n else:\r\n for index, comid in enumerate(self.cf_nc.variables[self.output_id_dim_name][:]):\r\n q_var[index,0] = 0", "def update_target(self):\n if self.soft:\n self.soft_update_target(self.critic1_target, self.critic1)\n self.soft_update_target(self.critic2_target, self.critic2)\n else:\n if self.learn_cur % self.learn_replace == 0:\n self.critic1_target.load_state_dict(self.critic1.state_dict())\n self.critic2_target.load_state_dict(self.critic2.state_dict())", "def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def SoftUpdate(self, local, target, tau):\n for target_param, local_param in zip(target.parameters(), local.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def runconnectome(self, ):\n for ps in self.postSynaptic:\n if ps[:3] not in self.muscles and abs(self.postSynaptic[ps][self.thisState]) > self.threshold:\n self.fireNeuron(ps)\n self.motorcontrol()\n for ps in self.postSynaptic:\n # if self.postSynaptic[ps][thisState] != 0:\n # print ps\n # print \"Before Clone: \", self.postSynaptic[ps][thisState]\n\n # fired neurons keep getting reset to previous weight\n # wtf deepcopy -- So, the concern is that the deepcopy doesnt\n # scale up to larger neural networks?? \n self.postSynaptic[ps][self.thisState] = copy.deepcopy(self.postSynaptic[ps][self.nextState]) \n\n # this deep copy is not in the functioning version currently.\n # print \"After Clone: \", self.postSynaptic[ps][thisState]\n\n self.thisState, self.nextState = self.nextState, self.thisState", "def _assign_model_params(self, sess):\n with self.graph.as_default():\n for nn in range(self.num_networks):\n self.networks[nn].assign_model_params(sess)", "def tuned_for_ec():\n # TODO(theosanderson): update these to true SOTA values\n hparams = contrib_training.HParams()\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('batch_size', 34)\n hparams.add_hparam('dilation_rate', 5)\n hparams.add_hparam('filters', 411)\n hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed\n hparams.add_hparam('kernel_size', 7)\n hparams.add_hparam('num_layers', 5)\n hparams.add_hparam('pooling', 'mean')\n hparams.add_hparam('resnet_bottleneck_factor', 0.88152)\n hparams.add_hparam('lr_decay_rate', 0.9977)\n hparams.add_hparam('learning_rate', 0.00028748)\n hparams.add_hparam('decision_threshold', 0.3746)\n hparams.add_hparam('denominator_power', 0.88)\n\n hparams.add_hparam('train_steps', 650000)\n return hparams", "def momentum_update(self, online_net, target_net, momentum):\n for param_ol, param_tgt in zip(online_net.parameters(), target_net.parameters()):\n param_tgt.data = param_tgt.data * momentum + param_ol.data * (1. - momentum)", "def sync_params(module):\n if num_procs()==1:\n return\n for p in module.parameters():\n p_data = p.data.cpu()\n p_numpy = p_data.numpy()\n broadcast(p_numpy)\n if p.device.type != 'cpu' and proc_id() != 0:\n p.data.copy_(p_data) # copy parameters back to GPU", "def restore(self):\n pert_params = list(self.net.parameters())\n saved_params = list(self.saved_net.parameters())\n for perturbed, saved in zip(pert_params, saved_params):\n perturbed_shape = perturbed.shape\n saved_shape = saved.shape\n perturbed = perturbed.flatten()\n saved = saved.flatten()\n for i, _ in enumerate(perturbed.data):\n perturbed.data[i] = saved.data[i]\n perturbed = perturbed.view(perturbed_shape)\n saved = saved.view(saved_shape)", "def send_update(self, target_block, DIR):\r\n new_opts = []\r\n new_weights = []\r\n if len(self.block_opts) != 1:\r\n raise Exception (\"Improperly collapsed block!\")\r\n i = self.block_opts[0] #our state\r\n for k in range(len(target_block.block_opts)): #k is their state\r\n #print(\"Checking \",i,k,DIR)\r\n if check_allowed(i,target_block.block_opts[k],DIR):\r\n new_opts.append(target_block.block_opts[k])\r\n new_weights.append(target_block.block_weights[k])\r\n target_block.block_opts = new_opts\r\n n = sum(new_weights)\r\n target_block.block_weights = [x/n for x in new_weights]\r\n target_block.block_weights = new_weights\r\n target_block.arr = target_block.superposition()\r\n return", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n num_gpus=1,\n model='resnet50',\n num_warmup_batches=5,\n num_batches=50,\n distortions=False,\n forward_only=True,\n device='cpu',\n data_format='NHWC',\n num_intra_threads=0)", "def copy_model(self, tf_seed=0):\n\n # Assemble network_list\n target = NDN(self.network_list, ffnet_out=self.ffnet_out,\n noise_dist=self.noise_dist, tf_seed=tf_seed)\n\n target.poisson_unit_norm = self.poisson_unit_norm\n target.data_pipe_type = self.data_pipe_type\n target.batch_size = self.batch_size\n\n # Copy all the parameters\n for nn in range(self.num_networks):\n for ll in range(self.networks[nn].num_layers):\n target.networks[nn].layers[ll].weights = \\\n self.networks[nn].layers[ll ].weights.copy()\n target.networks[nn].layers[ll].biases = \\\n self.networks[nn].layers[ll].biases.copy()\n target.networks[nn].layers[ll].reg = \\\n self.networks[nn].layers[ll].reg.reg_copy()\n target.networks[nn].input_masks = deepcopy(self.networks[nn].input_masks)\n return target", "def update_target_net(self, sess):\n sess.run(self.update_target_net_op)", "def _build_sync_op(self):\n # Get trainable variables from online and target DQNs\n sync_qt_ops = []\n online_vars = sparse_utils.get_all_variables_and_masks(self.online_convnet)\n target_vars = sparse_utils.get_all_variables_and_masks(self.target_convnet)\n for (v_online, v_target) in zip(online_vars, target_vars):\n # Assign weights from online to target network.\n sync_qt_ops.append(v_target.assign(v_online, use_locking=True))\n return sync_qt_ops", "def sync_parameters(self, model: nn.Module) -> None:\n # before ema, copy weights from orig\n avg_param = (\n itertools.chain(self.module.parameters(), self.module.buffers()))\n src_param = (itertools.chain(model.parameters(), model.buffers()))\n for p_avg, p_src in zip(avg_param, src_param):\n p_avg.data.copy_(p_src.data)", "def sync_parameters(self, model: nn.Module) -> None:\n # before ema, copy weights from orig\n avg_param = (\n itertools.chain(self.module.parameters(), self.module.buffers()))\n src_param = (itertools.chain(model.parameters(), model.buffers()))\n for p_avg, p_src in zip(avg_param, src_param):\n p_avg.data.copy_(p_src.data)", "def optimize_parameters(self):\n # forward\n for i in range(min(self.big_iter+1,len(self.orders_rev))):\n if(self.orders_rev):\n # compute fake images and reconstruction images.\n self.forward(i,False)\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders_rev[i]].zero_grad()\n # calculate gradients for G_A and G_B\n self.backward_G(i,False)\n # update G_A and G_B's weights\n self.optimizers_G[self.orders_rev[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() \n self.backward_D(i,False) \n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DY() # calculate gradients for D_A\n self.optimizer_D.step()\n for i in range(min(self.big_iter+1, len(self.orders))):\n if(self.orders):\n if(i>0):\n self.real_A = self.fake_B.detach()\n self.forward(i,True) # compute fake images and reconstruction images.\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders[i]].zero_grad()\n self.backward_G(i,True) # calculate gradients for G_A and G_B\n # update G_A and G_B's weights\n self.optimizers_G[self.orders[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D(i,True) # calculate gradients for D_A\n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DX() # calculate gradients for D_A\n self.optimizer_D.step() \n self.current_label=self.labels[0]\n self.current_order=self.orders\n self.current_pred = np.concatenate((self.pred_real.detach().cpu().numpy().mean(\n axis=2).mean(axis=2), self.pred_fake.detach().cpu().numpy().mean(axis=2).mean(axis=2)))", "def main(hotstart_input, hotstart_bcg, hotstart_output):\r\n fpath_in = hotstart_input\r\n fpath_out = hotstart_output\r\n fpath_bcg = hotstart_bcg\r\n\r\n with nc.Dataset(fpath_in, 'r') as src, \\\r\n nc.Dataset(fpath_bcg, 'r') as bcg, \\\r\n nc.Dataset(fpath_out, \"w\") as dst:\r\n # copy attributes\r\n for name in src.ncattrs():\r\n dst.setncattr(name, src.getncattr(name))\r\n # copy dimensions\r\n print(\"Copy dimensions...\")\r\n for name, dimension in src.dimensions.items():\r\n dst.createDimension(\r\n name,\r\n (len(dimension) if not dimension.isunlimited()\r\n else None))\r\n # Copy variables\r\n print(\"Copy variables...\")\r\n for name, variable in src.variables.items():\r\n print(\"Variable: \", name)\r\n dimensions = variable.dimensions\r\n dst.createVariable(\r\n name, variable.datatype, dimensions)\r\n if name == 'SED3D_bedfrac':\r\n dst.variables[name][:] = bcg.variables[name][:]\r\n else:\r\n dst.variables[name][:] = src.variables[name][:]", "def load(uDir):\n import sys\n sys.path.append(uDir)\n from net_spec import spec\n \n builder = NetworkBuilder(spec)\n htm = builder.build()\n htm.start()\n \n ## restore each node state\n layers = htm.layers\n \n for l in range(len(layers) - 1):\n (r,c) = spec[l]['shape']\n\n if layers[l].node_sharing:\n state = {}\n state['coincidences'] = np.load(uDir + str(l) + \".0.0.coincidences.npy\")\n state['temporal_groups'] = [] ## !FIXME temporal groups should be saved, first\n state['PCG'] = np.load(uDir + str(l) + \".0.0.PCG.npy\")\n\n for i in range(r):\n for j in range(c):\n layers[l].pipes[i][j].send((\"set_state\", state))\n\n else:\n for i in range(r):\n for j in range(c):\n state = {}\n state['coincidences'] = np.load(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".coincidences.npy\")\n state['temporal_groups'] = [] ## !FIXME temporal groups should be saved, first\n state['PCG'] = np.load(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".PCG.npy\")\n layers[l].pipes[i][j].send((\"set_state\", state))\n \n ## restore also last node's state\n state = {}\n state['coincidences'] = np.load(uDir + str(len(layers) - 1) + \".0.0.coincidences.npy\")\n state['cls_prior_prob'] = np.load(uDir + str(len(layers) - 1) + \".0.0.cls_prior_prob.npy\")\n state['PCW'] = np.load(uDir + str(len(layers) - 1) + \".0.0.PCW.npy\")\n layers[-1].pipes[0][0].send((\"set_state\", state))\n\n return htm", "def set_parameters(self, new_param):\n\n current_idx = 0\n for idx, param in enumerate(self.__network.parameters()):\n temp_param = \\\n new_param[current_idx:current_idx + self.__net_sizes[idx]]\n temp_param = temp_param.reshape(self.__net_shapes[idx])\n param.data = tr.from_numpy(temp_param).float()\n current_idx += self.__net_sizes[idx]", "def _update_parameters(self, loss):\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def load_parameters(self, params):\n # load (aka. deep copy) parameters in params into network\n c=0\n self.params = []\n names = ['W_i']\n for n,p in zip(names, params):\n self.params.append(theano.shared(name = p.name,\n value = p.get_value(borrow=True)))\n \n setattr(self, n, self.params[c])\n c+=1\n assert( len(self.params) == c )", "def _update_params(self, new_params):\n # update all the parameters\n for old_param, new_param in zip(self.model.parameters(), new_params):\n old_param.data += torch.from_numpy(new_param).to(old_param.device)", "def _reset_parameters(self):\n self._solver_input[\"P\"] = cvxopt.matrix(2.0 * self.opt.P(self.p).toarray())\n self._solver_input[\"q\"] = cvxopt.matrix(self.opt.q(self.p).toarray().flatten())\n if self.opt_type in CONSTRAINED_OPT:\n if self.opt.nk > 0:\n self._solver_input[\"G\"] = cvxopt.matrix(-self.opt.M(self.p).toarray())\n self._solver_input[\"h\"] = cvxopt.matrix(\n self.opt.c(self.p).toarray().flatten()\n )\n if self.opt.na > 0:\n self._solver_input[\"A\"] = cvxopt.matrix(self.opt.A(self.p).toarray())\n self._solver_input[\"b\"] = cvxopt.matrix(-self.opt.b(self.p).toarray())", "def mutate_ncp(parent, child_cp, parameters):\r\n\r\n if not parameters['one_child_keep_child']:\r\n child_nn = NeuralNetworkBuilder.clone_neural_network(parent.ncp.nn)\r\n else:\r\n child_nn = parent.ncp.nn\r\n\r\n if parameters['ncp_fully_connect_mutation'] or parameters['ncp_only_mutation_nodes']:\r\n child_nn.mutation_input_layer = []\r\n\r\n added_input_layer_X = None\r\n\r\n if not parameters['only_ncp']:\r\n # neuron_id = len(child_nn.input_layer)\r\n\r\n added_input_layer_X = concatenate(child_cp.conv_network.output_layer.mutation_semantics, axis=1)\r\n\r\n if parameters['recompute']:\r\n del child_cp.conv_network.output_layer.mutation_semantics\r\n for node, tensor in zip(child_cp.conv_network.output_layer.mutation_nodes, child_cp.conv_network.output_layer.mutation_tensors):\r\n del node\r\n del tensor\r\n del child_cp.conv_network.output_layer.mutation_nodes\r\n del child_cp.conv_network.output_layer.mutation_tensors\r\n\r\n # for node in child_cp.conv_network.output_layer.mutation_nodes:\r\n #\r\n # if added_input_layer_X is None:\r\n # added_input_layer_X = node.semantics\r\n # else:\r\n # added_input_layer_X = concatenate((added_input_layer_X, node.semantics), axis=1)\r\n #\r\n # for _ in node.semantics.T:\r\n # new_input_neuron = InputNeuron(neuron_id, None)\r\n # #===============================================================\r\n # # new_input_neuron = InputNeuron(neuron_id, input_data)\r\n # #===============================================================\r\n # neuron_id += 1\r\n # if parameters['ncp_fully_connect_mutation'] or parameters['ncp_only_mutation_nodes']:\r\n # child_nn.mutation_input_layer.append(new_input_neuron)\r\n # child_nn.input_layer.append(new_input_neuron)\r\n # if parameters['recompute']:\r\n # del node.semantics\r\n\r\n if parameters['only_ncp']:\r\n added_test_input_layer_X = None\r\n original_test_X = parameters['X_test']\r\n channels = original_test_X.shape[3]\r\n for i in range(channels):\r\n X = original_test_X[:, :, :, i]\r\n X = X.reshape((X.shape[0], X.shape[1] * X.shape[2]))\r\n if added_test_input_layer_X is None:\r\n added_test_input_layer_X = X\r\n else:\r\n added_test_input_layer_X = concatenate((added_test_input_layer_X, X), axis=1)\r\n else:\r\n added_test_input_layer_X = concatenate(child_cp.conv_network.output_layer.mutation_test_semantics, axis=1)\r\n\r\n if parameters['recompute'] and not parameters['only_ncp']:\r\n del child_cp.conv_network.output_layer.mutation_test_semantics\r\n\r\n\r\n random_state = parameters['random_state']\r\n learning_step = 'optimized'\r\n sparseness = { 'sparse': parameters['ncp_sparseness'],\r\n 'minimum_sparseness': parameters['ncp_min_sparseness'],\r\n 'maximum_sparseness': parameters['ncp_max_sparseness'],\r\n 'fully_connect_mutation_nodes' : parameters['ncp_fully_connect_mutation'],\r\n 'only_mutation_nodes' : parameters['ncp_only_mutation_nodes'],\r\n 'min_output_sparseness' : parameters['min_output_sparseness'],\r\n 'max_output_sparseness' : parameters['max_output_sparseness'],\r\n 'prob_skip_connection': 0}\r\n\r\n #===========================================================================\r\n # sparseness = { 'sparse': False, 'minimum_sparseness': 0, 'maximum_sparseness': 1, 'prob_skip_connection': 0}\r\n #===========================================================================\r\n\r\n maximum_new_neurons_per_layer = parameters['ncp_max_mutation_nodes']\r\n minimum_new_neurons_per_layer = parameters['ncp_min_mutation_nodes']\r\n\r\n maximum_bias_weight = parameters['ncp_max_bias_weight']\r\n maximum_neuron_connection_weight = parameters['ncp_max_connection_weight']\r\n\r\n X = None\r\n #X = parameters['X']\r\n y = parameters['y']\r\n global_preds = parent.get_predictions()\r\n delta_target = y - global_preds\r\n hidden_activation_functions_ids = [parameters['ncp_activation']]\r\n prob_activation_hidden_layers = 1\r\n child_nn, times = mutate_hidden_layers(added_input_layer_X, added_test_input_layer_X, X, y, child_nn, random_state, learning_step, sparseness, maximum_new_neurons_per_layer, maximum_neuron_connection_weight, maximum_bias_weight, delta_target, global_preds, hidden_activation_functions_ids, prob_activation_hidden_layers, params=parameters, minimum_new_neurons_per_layer=minimum_new_neurons_per_layer)\r\n\r\n if parameters['ncp_clear_semantics']:\r\n child_nn.clear_hidden_semantics()\r\n if parameters['ncp_only_mutation_nodes'] and not parameters['only_ncp']:\r\n # [input_neuron.clear_semantics() for input_neuron in child_nn.input_layer]\r\n [input_neuron.clear_semantics() for input_neuron in child_nn.mutation_input_layer]\r\n\r\n return NonConvolutionalPart(child_nn), times", "def _backup_and_load_cache(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])", "def _backup_and_load_cache(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])", "def _update_params_for_conv(self,\n cls_set,\n prev_layer_params: libpymo.EqualizationParams,\n curr_layer_params: libpymo.EqualizationParams):\n self._update_weight_for_layer_from_libpymo_obj(prev_layer_params, cls_set[0].get_module())\n self._update_weight_for_layer_from_libpymo_obj(curr_layer_params, cls_set[1].get_module())\n\n if not prev_layer_params.isBiasNone:\n bias_param = ParamUtils.get_param(self._model.model, cls_set[0].get_module(),\n BIAS_INDEX)\n bias_param.raw_data = np.asarray(prev_layer_params.bias, dtype=np.float32).tobytes()", "def update(self):\n result = [], 0, False\n\n if self.t % self.t_train_freq == 0:\n result = self.q_learning_minibatch()\n\n if self.t % self.t_target_q_update_freq == self.t_target_q_update_freq - 1:\n # Copy \n self.update_target_q_network()\n\n return result", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def soft_copy_param(target_link, source_link, tau):\n target_params = dict(target_link.namedparams())\n for param_name, param in source_link.namedparams():\n if target_params[param_name].data is None:\n raise TypeError(\n 'target_link parameter {} is None. Maybe the model params are '\n 'not initialized.\\nPlease try to forward dummy input '\n 'beforehand to determine parameter shape of the model.'.format(\n param_name))\n target_params[param_name].data[:] *= (1 - tau)\n target_params[param_name].data[:] += tau * param.data\n\n # Soft-copy Batch Normalization's statistics\n target_links = dict(target_link.namedlinks())\n for link_name, link in source_link.namedlinks():\n if isinstance(link, L.BatchNormalization):\n target_bn = target_links[link_name]\n target_bn.avg_mean[:] *= (1 - tau)\n target_bn.avg_mean[:] += tau * link.avg_mean\n target_bn.avg_var[:] *= (1 - tau)\n target_bn.avg_var[:] += tau * link.avg_var", "def freeze_lowperf(self):\n \n self.freeze_model()\n\n # defreeze params of only being used by the high-performance model\n for i in range(1,5):\n layer = getattr(self, \"layer\"+str(i))\n if self.block_type == 'Bottleneck':\n layer[0].conv3.weight.requires_grad = True\n layer[0].bn3.train()\n elif self.block_type == 'BasicBlock':\n layer[0].conv2.weight.requires_grad = True\n layer[0].bn2.train()\n else:\n print(\"[Error] Unknown block type\")\n\n\n num_skip = len(layer)//2\n for j in range(1, num_skip+1):\n for param in layer[j].parameters():\n param.requires_grad = True\n layer[j].train()", "def update_send(self, parameters, loss): #parameters为训练网络的参数\n # Increase the clock value\n self.clock += 1\n\n # Serve the new parameters\n state = {'clock': self.clock, 'loss': loss}\n # 在rx线程中保存此时的loss和模型参数等\n self.rx.set_current_state(state, parameters)\n\n self.fetching = True\n self.tx.fetch_send()", "def get_obj_new(model_old, demand_path, slink_dict, demand_dict, w_a, w_b, theta, s_limit):\n model = copy.deepcopy(model_old)\n \n #cpu_vector = model.cost_dict['cpu']\n dist_matrix = model.cost_dict['dist']\n rtt_matrix = model.cost_dict['rtt']\n #bw_matrix = model.cost_dict['bw']\n w_a1, w_a2 = w_a\n w_b1, w_b2 = w_b\n theta1, theta2, theta3 = theta\n # find total capacity and used bw on substrate nodes\n node_port_list, used_bw_list = total_port(model)\n #print \"CHECK POINT-1\", used_bw_list, node_port_list\n vnet_info = model.get_vnet_info()\n infeasible = 0\n\n fail_nodes = model.failed_dict\n start_time = time.time()\n select_dict, slink_dict = find_standby2(model, s_limit, w_a, w_b, theta, demand_path, demand_dict, slink_dict)\n # random selection\n #select_dict, slink_dict = find_random(model, s_limit, w_a, w_b, theta, demand_path, demand_dict, slink_dict)\n #print \"selection takes: \", time.time() - start_time\n #print \"Selected\", select_dict\n sum_cost_1_2 = 0\n r_list = {}\n \n for node_id in range(0,len(used_bw_list)):\n r_list[node_id] = used_bw_list[node_id]/node_port_list[node_id]\n #print \"INITIAL\", r_list\n svr_subset = {}\n for vnet in model.vnets:\n j = vnet.vnet_id\n f = fail_nodes[j]\n # only count the failed virtual network\n if f == -1:\n pass\n else:\n for node_id in vnet_info[j]['standby']:\n svr_subset[node_id] = r_list[node_id]\n #print \"Subset SVR: \", svr_subset\n \n if j in select_dict:\n #print \"FEASIBLE FOUND\"\n #print vnet_info[j]['standby']\n i = select_dict[j]\n failed_node = vnet.vnodes[f]\n vneighbors = failed_node.vneighbors\n for k in vneighbors: \n eta = operation_cost()\n #print \"a1: \", w_a1, \"a2: \", w_a2, \"b1: \", w_b1, \"b2: \", w_b2\n dist_c = dist_cost(i, f, k, dist_matrix, w_a1, w_a2)\n rtt_c = rtt_cost(i, k, rtt_matrix)\n sigma = connect_cost(dist_c, rtt_c, w_b1, w_b2)\n sum_cost_1_2 += theta1 * eta + theta2 * sigma\n #find residual bw on substrate node\n #xi = resource_cost(bw_matrix, i)\n req_bw = sum(failed_node.neighbor_traffic.values())\n #util = req_bw / xi\n #print req_bw, i, used_bw_list[i], node_port_list[i]\n util = req_bw/node_port_list[i]\n \n if i not in r_list:\n r_list[i] = util #+ used_bw_list[i]/node_port_list[i]\n else:\n r_list[i] += util\n svr_subset[i] = r_list[i]\n #print sigma, \" v_\" + str(vnet.vnet_id) + \"_\" + str(f) + \"_\" + str(i) + \"_\" + str(k) + \"_\" + str(k)\n else:\n print \"INFEASIBLE at vnet: \", j\n infeasible = 1\n #print \"DONE\"\n \n if infeasible == 0:\n #print svr_subset\n max_util = max(svr_subset.values())\n obj = sum_cost_1_2 + theta3 * max_util\n else:\n obj = \"infeasible\"\n max_util = \"none\"\n #print obj\n used_time = time.time() - start_time\n return obj, select_dict, max_util, used_time", "def __freeze(self):\r\n features_layer = self._model._net\r\n for param in features_layer.parameters():\r\n param.requires_grad = False", "def update_predict_network(self):\n states, actions, rewards, new_states, is_terminals = self.memory.sample(self.batch_size)\n\n preprocessed_states, preprocessed_new_states = self.preprocessor.process_batch(states, new_states)\n\n actions = self.preprocessor.process_action(actions)\n # update network\n q_values = self.cal_target_q_values(preprocessed_new_states)\n max_q_values = np.max(q_values, axis=1)\n max_q_values[is_terminals] = 0.0\n targets = rewards + self.gamma * max_q_values\n targets = np.expand_dims(targets, axis=1)\n\n self.q_network.train_on_batch([preprocessed_states, actions], targets)\n if self.num_steps % self.target_update_freq ==0:\n print(\"Update target network at %d steps\" % self.num_steps)\n self.update_target_network()", "def _update_target_model(self):\n self.target_network.model.set_weights(self.policy_network.model.get_weights())", "def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out", "def update_net(self) -> None:\n self.units.update_net()", "def upgrade_readout(simulation_dict):\n logging.info(\"Upgrading readout network to a full perceptron\")\n import PVM_framework.SharedArray as SharedArray\n import PVM_framework.MLP as MLP\n simulation_dict['stage0_size'] = len(simulation_dict['stage0'])\n needed = True\n for i in range(simulation_dict['stage0_size']):\n if len(simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['layers']) == 2 and len(simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['weights']) == 1:\n nhidden = simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['layers'][0]['activation'].shape[0]-1\n nadditional = simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['layers'][-1]['activation'].shape[0]-1\n layer = {'activation': SharedArray.SharedNumpyArray((nhidden+1), np.float),\n 'error': SharedArray.SharedNumpyArray((nhidden+1), np.float),\n 'delta': SharedArray.SharedNumpyArray((nhidden+1), np.float)\n }\n\n simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['layers'].insert(1, layer)\n simulation_dict['stage0'][i][\"MLP_parameters_additional\"]['weights'] = \\\n [MLP.initialize_weights(SharedArray.SharedNumpyArray((nhidden+1, nhidden), np.float)),\n MLP.initialize_weights(SharedArray.SharedNumpyArray((nhidden+1, nadditional), np.float)),\n ]\n else:\n needed = False\n if needed:\n logging.info(\"Upgrade complete\")\n else:\n logging.info(\"Upgrade was not nescessary\")", "def _update_params(self):\n _load = not self.san_interface.runmode\n params={}\n if ('iosched' in self._updatedattr or _load) and self.iosched<>IoSchedType.default:\n params['iosched']=str(self.iosched)\n if ('readahead' in self._updatedattr or _load) and self.readahead :\n params['readahead']=self.readahead\n if params:\n for pt in self.paths():\n pt.provider.set_dev_params(pt,params)", "def __init__(self, state_size, action_size, fc1_units, fc2_units, buffer_size, batch_size, alpha, gamma, tau,\n local_update_every, target_update_every, seed, a, b, b_increase, b_end, dbl_dqn=False, priority_rpl=False, duel_dqn=False):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n\n # Hyperparameters\n self.alpha = alpha # Learning rate\n self.gamma = gamma # Discount parameter\n self.tau = tau # Interpolation parameter\n self.local_update_every = local_update_every # Number of actions to take before updating local net weights\n self.target_update_every = target_update_every # Number of actions to take before updating target net weights\n self.batch_size = batch_size # Number of experiences to sample during learning\n self.buffer_size = buffer_size # Size of memory buffer\n self.a = a # Sampling probability (0=random | 1=priority)\n self.b = b # Influence of importance sampling weights over learning\n self.b_increase = b_increase # Amount to increase b by every learning step\n self.b_end = b_end # Maximum value for b\n\n # Agent modifications\n self.dbl_dqn = dbl_dqn # Double Q Learning\n self.priority_rpl = priority_rpl # Prioritised Experience Replay\n self.duel_dqn = duel_dqn # Duelling Q Networks\n\n # Q-Network\n if self.duel_dqn:\n self.qnetwork_local = DuellingQNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.qnetwork_target = DuellingQNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n else:\n self.qnetwork_local = QNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.qnetwork_target = QNetwork(state_size, action_size, fc1_units, fc2_units, seed).to(device)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=self.alpha)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, buffer_size, batch_size, seed, priority_rpl)\n # Initialize time step (for updating every local_update_every/target_update_every steps)\n self.t_step = 0", "def __copy__(self):\n #new = MCTS(copy=True) # don't run _predict() twice\n new = MCTS(self.env, copy=True) # don't set pi and Q twice\n new.env = self.env.__copy__()\n # can't use __dict__.update() without effecting env __copy__()\n # in theory, you don't need to copy the env. just use one copy for simulating, and restore it to root\n # since _Q() evaluates the env.done() of children, you need self.done = env.done() in __init__()\n # same for env.winner\n new.pi = []\n new. Q = 0\n new.net = self.net\n new.t = self.t\n new.expl = self.expl\n new.children = []\n new.parent = None\n return new", "def update(self,parameters, grads):\n \n L = len(parameters) // 2 # number of layers in the neural network\n #print(L)\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n \n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - self.alpha * grads[\"dW\" + str(l+1)]\n \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - self.alpha * grads[\"db\" + str(l+1)]\n \n parameters[\"W\" + str(l+1)][np.isnan(parameters[\"W\" + str(l+1)])] = 0\n parameters[\"b\" + str(l+1)][np.isnan(parameters[\"b\" + str(l+1)])] = 0\n \n return parameters" ]
[ "0.66702074", "0.6463969", "0.6463969", "0.63160634", "0.6189461", "0.61107165", "0.6051058", "0.599364", "0.5967248", "0.5884471", "0.5775594", "0.57238007", "0.5721437", "0.56973237", "0.56926984", "0.5677991", "0.5631507", "0.5583762", "0.55708534", "0.5542699", "0.55350375", "0.55350375", "0.55322874", "0.55246925", "0.550585", "0.5482655", "0.5479796", "0.54549515", "0.5454674", "0.5453146", "0.5452543", "0.5449828", "0.54403055", "0.5402546", "0.5398247", "0.5382936", "0.5376395", "0.53745484", "0.5358059", "0.53554857", "0.52817106", "0.5272842", "0.5266439", "0.5247205", "0.52465165", "0.52374816", "0.5237423", "0.5226245", "0.5224297", "0.5212691", "0.5205972", "0.5199862", "0.51910454", "0.51818156", "0.5173515", "0.5173477", "0.51647073", "0.5162088", "0.51545256", "0.5150319", "0.51402384", "0.51397276", "0.51341516", "0.51297206", "0.51259005", "0.51090765", "0.5104009", "0.5103657", "0.5102606", "0.5091911", "0.508053", "0.5079556", "0.5079556", "0.50774795", "0.5070369", "0.5060956", "0.50590825", "0.5047422", "0.50411254", "0.5038661", "0.50350446", "0.50348717", "0.50326246", "0.50326246", "0.50273114", "0.502696", "0.5021589", "0.5013431", "0.50075126", "0.50068057", "0.50063115", "0.5006016", "0.5003711", "0.5003557", "0.49996567", "0.49991068", "0.4986294", "0.49855283", "0.4978416", "0.49743447", "0.49640158" ]
0.0
-1
Returns the epsilon scaled noise distribution for adding to Actor calculated action policy.
def _gauss_noise(self, shape): n = np.random.normal(0, 1, shape) return self.e*n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def get_epsilongreedy_policy(epsilon):\n \n def epsilongreedy_policy(Qvalues_oa):\n \"\"\"Returns softmax action probabilites from Qvalues\"\"\"\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X\n \n return epsilongreedy_policy", "def get_noise_multiplier(\n target_epsilon: float,\n target_delta: float,\n sample_rate: float,\n epochs: int,\n alphas: [float],\n sigma_min: float = 0.01,\n sigma_max: float = 10.0,\n) -> float:\n \n from opacus import privacy_analysis\n \n eps = float(\"inf\")\n while eps > target_epsilon:\n sigma_max = 2 * sigma_max\n rdp = privacy_analysis.compute_rdp(\n sample_rate, sigma_max, epochs / sample_rate, alphas\n )\n eps = privacy_analysis.get_privacy_spent(alphas, rdp, target_delta)[0]\n if sigma_max > 2000:\n raise ValueError(\"The privacy budget is too low.\")\n\n while sigma_max - sigma_min > 0.01:\n sigma = (sigma_min + sigma_max) / 2\n rdp = privacy_analysis.compute_rdp(\n sample_rate, sigma, epochs / sample_rate, alphas\n )\n eps = privacy_analysis.get_privacy_spent(alphas, rdp, target_delta)[0]\n\n if eps < target_epsilon:\n sigma_max = sigma\n else:\n sigma_min = sigma\n\n return sigma", "def get_epsilon_action(epsilon, env, mean_reward_per_bandit):\n explore = np.random.uniform() < epsilon\n\n if explore:\n return env.action_space.sample()\n else:\n return np.argmax(mean_reward_per_bandit)", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n def policyFunction(state):\n\n Action_probabilities = np.ones(num_actions,\n dtype = float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n probs = np.zeros(self._num_actions)\n if np.random.rand() < epsilon:\n action = np.random.choice(legal_actions)\n probs[legal_actions] = 1.0 / len(legal_actions)\n else:\n info_state = np.reshape(info_state, [1, -1])\n q_values = self._session.run(\n self._q_values, feed_dict={self._info_state_ph: info_state})[0]\n legal_q_values = q_values[legal_actions]\n action = legal_actions[np.argmax(legal_q_values)]\n probs[action] = 1.0\n return action, probs", "def make_epsilon_greedy_policy(self, Q, epsilon, nA):\n\n def policy_fn(observation,p):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q=Q(observation,p)\n\n best_action = np.argmax(q)\n print(\"action called:\",self.env.action_labels[best_action])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn", "def treat(self):\r\n if self.noiseS > 0:\r\n self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))\r\n return self.alphasigma\r\n else:\r\n self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))\r\n return 1.0", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # retrieve number of galaxies in each bins\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n if isinstance(self.config[\"sigma_e\"], list):\n sigma_e = np.array([s for s in self.config[\"sigma_e\"]])\n else:\n sigma_e = self.config[\"sigma_e\"]\n return sigma_e ** 2 / ngals", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n\n def policyFunction(state):\n Action_probabilities = np.ones(num_actions,\n dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def get_noise_multiplier(self, target_epsilon):\n if self.dp_type == 'dp':\n return self.epochs * np.sqrt(2 * np.log(1.25 * self.epochs / self.target_delta)) / target_epsilon\n \n elif self.dp_type == 'adv_cmp':\n return np.sqrt(self.epochs * np.log(2.5 * self.epochs / self.target_delta)) * (np.sqrt(np.log(2 / self.target_delta) + 2 * target_epsilon) + np.sqrt(np.log(2 / self.target_delta))) / target_epsilon\n \n elif self.dp_type == 'zcdp':\n return np.sqrt(self.epochs / 2) * (np.sqrt(np.log(1 / self.target_delta) + target_epsilon) + np.sqrt(np.log(1 / self.target_delta))) / target_epsilon\n \n else: # if self.dp_type == 'rdp' or 'gdp'\n return self.search_optimal_noise_multiplier(target_epsilon)", "def make_epsilon_greedy_policy(estimator, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n# print(q_values)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def _sample_noise(self) -> np.ndarray:\n return np.random.randn(self.actor_action_size)", "def training_policy(self, state):\n if self.epsilon > random.random():\n return random.randint(0, 1)\n return self.policy(state)", "def make_epsilon_greedy_policy(estimator, epsilon, nA):\r\n def policy_fn(observation):\r\n A = np.ones(nA, dtype=float) * epsilon / nA\r\n q_values = estimator.predict(observation)\r\n best_action = np.argmax(q_values)\r\n A[best_action] += (1.0 - epsilon)\r\n return A\r\n return policy_fn", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n if random.random() < (1 - epsilon):\n return np.argmax(Q[observation])\n else:\n return random.choice(np.arange(nA))\n\n return policy_fn", "def epsilon_greedy_probs(self, nA, Q_s, i_count, eps=None):\r\n epsilon = 1.0 / i_count\r\n if eps is not None:\r\n epsilon = eps\r\n \r\n policy_s = np.ones(nA) * epsilon / nA\r\n policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / nA)\r\n return policy_s", "def calc_alpha(epsilon): \n return float(0.5 * np.log((1-epsilon)/epsilon))", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n\n def policy_fn(observation):\n\n # get random number\n random_number = random.uniform(0, 1)\n\n # get actions with maximum value\n greedy_actions = np.argwhere(Q[observation] == np.amax(Q[observation])).squeeze()\n if not len(greedy_actions.shape):\n greedy_actions = [greedy_actions]\n action = random.choice(greedy_actions)\n\n # if number less than epsilon, get random other actions\n if random_number <= epsilon:\n all_actions = list(range(0, nA))\n if not len(greedy_actions) == nA:\n action = random.choice(all_actions)\n\n return int(action)\n\n return policy_fn", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)", "def calc_epsilon(y_true, y_pred, weights):\n return float(np.dot(weights, y_pred == y_true))", "def epsilonGreedyChooser(normalAction, state, stepsDone):\n epsThreshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * stepsDone / EPS_DECAY)\n randomSample = random.random()\n if randomSample > epsThreshold:\n action = normalAction(state).max(1)[1].view(1, 1)[0].item()\n #print(action)\n return action\n else:\n return ENVIRONMENT.action_space.sample()", "def Noise(self, eps, size):\n return eps * (np.random.uniform(size=size) * 2 - 1)", "def act(self, observation):\n if np.random.random() >= self.epsilon:\n return np.argmax(self.expvalue)\n else:\n return np.random.randint(0, 9)", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n print(f'q_values: {q_values}')\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def _sigma_ee_nonrel(self,gam,eps):\n s0 = 4 * r0**2 * alpha / (15 * eps)\n x = 4 * eps / (gam**2 - 1)\n sigma_nonrel = s0 * self._F(x,gam)\n sigma_nonrel[np.where(eps >= 0.25*(gam**2 - 1.))] = 0.0\n sigma_nonrel[np.where(gam*np.ones_like(eps) < 1.0)] = 0.0\n return sigma_nonrel / mec2_unit", "def epsilon_greedy(Q, epsilon, state):\n random_number = random.random()\n if (random_number < epsilon) and (state not in critical_states):\n return env.action_space.sample()\n\n else:\n return np.argmax(Q[state])", "def noise_factor(cls,\n epoch,\n max_epoch,\n max_noise,\n min_noise):\n mxn = min(cls.eps, max_noise)\n mnn = min(cls.eps, min_noise)\n ns = (math.log(mxn) - math.log(mnn)) / (1 - max_epoch)\n return max_noise * math.exp(-(ns * epoch))", "def stdProbabilityNorm(self):\n return 0.5", "def epsilon(self):\n return self.__epsilon", "def _get_epsilon(self, is_evaluation, power=1.0):\n if is_evaluation:\n return 0.0\n decay_steps = min(self._step_counter, self._epsilon_decay_duration)\n decayed_epsilon = (\n self._epsilon_end + (self._epsilon_start - self._epsilon_end) *\n (1 - decay_steps / self._epsilon_decay_duration) ** power)\n return decayed_epsilon", "def awgn(input, noise_std):\n\tif not isinstance(noise_std, (list, tuple)):\n\t\tsigma = noise_std\n\telse: # uniform sampling of sigma\n\t\tsigma = noise_std[0] + \\\n\t\t (noise_std[1] - noise_std[0])*torch.rand(len(input),1,1,1, device=input.device)\n\treturn input + torch.randn_like(input) * (sigma/255)", "def awgn(input, noise_std):\n\tif not isinstance(noise_std, (list, tuple)):\n\t\tsigma = noise_std\n\telse: # uniform sampling of sigma\n\t\tsigma = noise_std[0] + \\\n\t\t (noise_std[1] - noise_std[0])*torch.rand(len(input),1,1,1, device=input.device)\n\treturn input + torch.randn_like(input) * (sigma/255)", "def epsilon_delta(self):", "def epsilon_greedy_policy(network, eps_end, eps_start, eps_decay, actions, device):\n def policy_fn(observation, steps_done):\n sample = np.random.random()\n eps_threshold = eps_end + (eps_start - eps_end) * math.exp(-1. * steps_done * eps_decay)\n if sample > eps_threshold:\n with torch.no_grad():\n if observation.dim() == 3:\n observation = observation.unsqueeze(0)\n elif observation.dim() < 3:\n NotImplementedError(\"Wrong input dim\")\n\n values = network.forward(observation.to(device))[0]\n best_action = torch.max(values, dim=0)[1]\n return best_action.cpu().item(), eps_threshold\n else:\n # return torch.tensor(np.random.randint(low=0, high=num_actions), dtype=torch.long), eps_threshold\n return random.choice(actions), eps_threshold\n return policy_fn", "def get_alternate_weights(self, epsilon):\n new_eps = epsilon\n old_eps = self.epsilon\n\n # A simple version of generic reweighting code\n # w = self.weights\n # w /= torch.exp(-0.5*sqd / old_eps**2.)\n # w *= torch.exp(-0.5*sqd / new_eps**2.)\n # w /= sum(w)\n\n if new_eps == 0:\n w = self.weights\n # Remove existing distance-based weight contribution\n w /= torch.exp(-0.5 * self.sqd / old_eps**2.)\n # Replace with a indicator function weight contribution\n w = torch.where(\n self.sqd==0.,\n w,\n torch.zeros_like(w)\n )\n else:\n # TODO Avoid need to normalise by always using log weights?\n # Normalising sqd part 1\n # Ignore distances if weight already zero\n # (to avoid rare possibility of setting all weights to zero)\n sqd_pos_weight = torch.where(\n self.weights > 0,\n self.sqd,\n torch.full_like(self.sqd, self.sqd.max())\n )\n # Normalising sqd part 2\n # Reduce chance of exponentiation giving zero weights\n sqd_norm = sqd_pos_weight - sqd_pos_weight.min()\n # A more efficient way to do the generic case\n a = 0.5 * (old_eps**-2. - new_eps**-2.)\n w = self.weights * torch.exp(sqd_norm*a)\n\n wsum = w.sum()\n if wsum > 0.:\n w /= wsum\n\n return w", "def eps_greedy_policy(q_values, eps, forbidden_actions):\r\n\r\n q_values[forbidden_actions] = np.NINF\r\n indices = torch.nonzero(q_values == q_values.max())\r\n random_index = random.randint(0, indices.shape[1]-1)\r\n best_action_index = indices[random_index]\r\n l = len(q_values)\r\n n_forbidden_actions = np.count_nonzero(forbidden_actions)\r\n p = eps / (l-n_forbidden_actions)\r\n\r\n policy = np.full([l], p)\r\n policy[forbidden_actions] = 0\r\n policy[best_action_index] += 1 - eps\r\n\r\n return policy", "def compute_eps_uniform(\n epoch: float, noise_multi: float, n: int, batch_size: int, delta: float\n) -> float:\n return eps_from_mu(compute_mu_uniform(epoch, noise_multi, n, batch_size), delta)", "def epsilon(self):\n return self._epsilon", "def log_nse(self, epsilon=0.0) -> float:\n s, o = self.predicted + epsilon, self.true + epsilon # todo, check why s is here\n return float(1 - sum((np.log(o) - np.log(o)) ** 2) / sum((np.log(o) - np.mean(np.log(o))) ** 2))", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def noise(self, stddev):\n #add noise to weights\n pass", "def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def action_distribution(self, state):\n means, stds = self.__call__(state)\n dist = Normal(means, torch.exp(stds))\n\n return dist", "def update_epsilon(self, epsilon):\n self.weights = self.get_alternate_weights(epsilon)\n self.epsilon = epsilon\n self.ess = effective_sample_size(self.weights)", "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def current_epsilon(self):\n t = self.action_requests\n T = self.exploration_period\n if(t >= T):\n return self.epsilon_final\n\n epsilon0 = self.epsilon_initial\n epsilonT = self.epsilon_final\n\n return epsilon0 - (t * (epsilon0 - epsilonT)) / T", "def get_epsilon(step: int):\n return (epsilon_0 - epsilon) * math.exp(-step) + epsilon", "def get_estimated_noise(self):\n return self.gp_core.noise_var", "def act(self, state, epsilon=None):\r\n if epsilon is None: epsilon = self.epsilon\r\n q_values = self.value(state)\r\n q_values = q_values.squeeze(1)\r\n if random.random() < epsilon:\r\n aciton = random.randrange(self.action_space.n)\r\n else:\r\n aciton = q_values.argmax(1)[0]\r\n return aciton", "def epsilon():\n return _EPSILON", "def epsilon_greedy(Q, epsilon, n_actions, s, train=False):\n if train or np.random.rand() < epsilon:\n action = np.argmax(Q[s, :])\n else:\n action = np.random.randint(0, n_actions)\n return action", "def update_epsilon(self):\n\t\tif self.epsilon > self.epsilon_min:\n\t\t\tself.epsilon *= self.epsilon_decay", "def update_epsilon(self):\n self.epsilon = self.epsilon * self.decay", "def update_parameter(self):\n\n if self.testing: # 1. No random choice when testing\n self.epsilon = 0\n else: # 2. Update parameters when learning\n if self.epsilon > 0.:\n self.epsilon -= 0.01\n\n return self.epsilon", "def action(self, observation, epsilon=0):\n if epsilon and epsilon>np.random.rand():\n return np.random.randint(self.action_shape)\n activations = super().predict(observation.observation)\n return np.argmax(activations)", "def make_epsilon_greedy_policy(action_count: int, epsilon=0.0, q: dict = None,\n estimator=None, distribute_prob=True):\n if q is None and estimator is None:\n raise ValueError('Cannot make policy: both q and estimator are none')\n\n def policy_func(observation, eps=epsilon):\n actions = np.ones(action_count, dtype=float) * eps / action_count\n if q is not None:\n q_values = q[observation]\n else:\n q_values = estimator.predict(observation)\n if distribute_prob:\n best_actions = np.argwhere(q_values == np.max(q_values)).flatten()\n for i in best_actions:\n actions[i] += (1.0 - eps) / len(best_actions)\n else:\n best_action = np.argmax(q_values)\n actions[best_action] += (1.0 - eps)\n return actions\n\n return policy_func", "def epsilongreedy_policy(Qvalues_oa):\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X", "def get_probs(self,Q_s, epsilon, nA):\n policy_s = np.ones(nA) * epsilon / nA\n best_a = np.argmax(Q_s)\n policy_s[best_a] = 1 - epsilon + (epsilon / nA)\n return policy_s", "def noiseReduction(self):\n pass", "def random_gaussian_noise(self, img, p = 0.5):\n if self.decision(p):\n mean = 30.0\n std = 80.0\n img = img + np.random.normal(mean, std, img.shape)\n\n img = np.clip(img, 0, 255).astype('uint8')\n return img", "def add_noise(self, action, step, noise_scale = 0.1):\n\t\t#noise_scale *= 1/((step+1)**0.5)\n\t\taction += noise_scale * np.random.randn(self.act_dim)\n\t\taction = np.clip(action, -self.act_limit, self.act_limit)[0]\n\t\treturn action", "def _define_epsilon(n,T,a=1):\n\n return np.sqrt(np.log(n)/T)*a", "def act(self, state, epsilon):\n if random.random() > epsilon:\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n action = q_values.argmax().item()\n else:\n action = self.env.action_space.sample()\n return action", "def create_preference_weighted_epsilon_neighbourhood(self):\n \n A = self.pref_weighted_similarity_measures # distances matrix\n A[A>self.epsilon] = 0 # set distances greater than epsilon to 0\n A.eliminate_zeros() # then remove these entries from matrix\n # For each entry in data get neighbor indices with preference weighted distance less than epsilon\n weighted_eps_neighbh = np.split(A.indices, A.indptr)[1:-1] \n\n return weighted_eps_neighbh", "def eval_gradient_noise_scale(sess=None):\n return eval_global_variable(GraphKeys.GRADIENT_NOISE_SCALE, sess)", "def empirical_erm(self):\n return lambda samples: np.mean(samples) - 0.5 * self.alpha * np.var(samples)", "def e(self):\n\n ylow = self.e_min\n yhigh = self._e\n\n xlow = 0\n xhigh = self.anneal_max\n\n steep_mult = 8\n\n steepness = steep_mult / (xhigh - xlow)\n offset = (xhigh + xlow) / 2\n midpoint = yhigh - ylow\n\n x = np.clip(self.avg_score, 0, xhigh)\n x = steepness * (x - offset)\n e = ylow + midpoint / (1 + np.exp(x))\n return e", "def nats(self) -> float:\n return self.entropy()", "def get_probs(Q_s, epsilon, nA):\n policy_s = np.ones(nA) * epsilon / nA\n best_a = np.argmax(Q_s)\n policy_s[best_a] = 1 - epsilon + (epsilon / nA)\n return policy_s", "def eps(self):\n return self.eps_mask*self.eps_scheduler.value", "def get_estimated_noise(self):\n return self.gp_core.likelihood.noise.item()", "def eps(self, theta, star_params=None):\n if star_params is None:\n star_params = []\n theta = np.append(star_params, theta)\n lp = self.lnprior(theta)\n if not np.isfinite(lp):\n return np.ones_like(self.y) * 1e90\n return (self.y - self.solve(theta)) / self.dy", "def sample(self, i_episode, action_values):\n sigma = max(self.max_sigma + (self.min_sigma - self.max_sigma)/self.end_episode * i_episode, self.min_sigma) \n return np.random.normal(action_values, sigma)", "def stationary_distribution(self):\n P = self.markov_transition()\n N = len(P)\n I = np.identity(N)\n A = P.T - I # get right-kernel\n pi = null_space(A)\n pi = pi / sum(pi)\n pi = [float(item) for item in pi]\n return pi", "def normalisedProductionPDF(p, theta, mDarkPhoton, epsilon, norm):\n return (1. / norm) * dNdPdTheta(p, theta, mDarkPhoton, epsilon)", "def gen_weight(sigma, delta, act_fn=F.relu):\n alpha = 1.-torch.exp(-act_fn(sigma.squeeze(-1))*delta)\n weight = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1),device = alpha.device), 1.-alpha+1e-10], -1), -1)[:, :-1]\n return weight", "def e(self):\n if self._e is None:\n # self._e = self.distributions.uniform(0.3,0.33)\n # return self._e\n # max is set by q but also limited by users choice of e_max.\n res_a = 29.9*((self.j[0]/self.k[0])**(2/3))\n q = self.distributions.truncated_normal(self.q_c, self.q_w, res_a*(1-0.8), res_a*(1-0.001))\n self._e = 1 - q/res_a\n return self._e", "def choose_epsilon_greedy(self, state: Tuple[int, ...], valid_actions: Tuple[int, ...]) -> int:\n if random.random() < self.__epsilon:\n return self.choose_uniform(valid_actions)\n return self.choose_greedy(state, valid_actions)", "def get_epsilon(self):\n step_size = float(self._eps_begin - self._eps_end) / self._total_steps\n self._epsilon = max(self._eps_end, self._epsilon - step_size)\n return self._epsilon", "def get_noise_distribution(corpus: List[str],\n vocabulary: np.ndarray,\n dist_alpha: float\n ) -> List[int]:\n all_words = [word for text in corpus for word in text]\n arr = np.array(list(map(\n lambda x: all_words.count(x)**dist_alpha, vocabulary\n )))\n return arr/arr.sum() # frequencies, normalised, in order of vocabulary", "def play_epsilon_greedy_policy(self, board):\n policy = np.random.choice(['random', 'q_agent'], 1, p=[self.epsilon, 1-self.epsilon])[0]\n if policy == 'random':\n move, _ = RandomAgent.play(board)\n else:\n move, q_value = self.play(board)\n self.after_move()\n return move, policy", "def nmse(gt, pred):\n return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2", "def nmse(gt, pred):\n return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2", "def epsilon_greedy(q, s, eps = 0.5):\n if random.random()<eps:\n return uniform_dist(q.actions).draw()\n else:\n return greedy(q,s)", "def get_initial_epsilon(self):\n return self.epsilon_percentile, True, self.max_rounds == 0", "def naive_gaussian_noise(true_depth: np.ndarray) -> np.ndarray:\n return true_depth + np.random.normal(0, 0.0012 + 0.0019 * np.square(true_depth - 0.4))", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def decay_epsilon(self, epsilon, MIN_EPSILON,\r\n EPSILON_DECAY: float) -> float:\r\n if epsilon > MIN_EPSILON:\r\n epsilon *= EPSILON_DECAY\r\n epsilon = max(MIN_EPSILON, epsilon)\r\n return epsilon", "def acceptance_rate(target_distribution, x0, xs, accepteds):\n return np.mean(accepteds)", "def sampling(args):\n z_mean, z_log_sigma = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean = 0., stddev=0.1)\n return z_mean + K.exp(z_log_sigma) * epsilon", "def _sigma_ep(self,gam,eps):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return self._sigma_1(gam,eps)", "def evaluate(self, xs):\n # ps = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \\\n # np.exp(-0.5 * (self.mean - xs) * self.invvar * (self.mean - xs))\n # ps = np.zeros_like(xs)\n # for n, x in enumerate(xs):\n # ps[n] += self.evaluate_one(x)\n ps = self.dist.probability(xs)\n return ps", "def __call__(self, state):\n if random.random() > self._epsilon:\n return self._max_policy(state)\n return random.choice(np.arange(self._action_size))", "def process_noise_dist(self, dt=0.0):\n Q = self.process_noise_cov(dt)\n return dist.MultivariateNormal(\n torch.zeros(Q.shape[-1], dtype=Q.dtype, device=Q.device), Q\n )", "def _epsilon(vds) -> np.ndarray:\n return vds[\"rhod_tot\"] / vds[\"rho\"]", "def get_estimated_noise(self):\n raise NotImplementedError('Abstract Method.')" ]
[ "0.65060216", "0.64552784", "0.64374816", "0.64273673", "0.633723", "0.63370866", "0.63252556", "0.6308487", "0.62902623", "0.62704015", "0.6251092", "0.6214871", "0.6212052", "0.61300147", "0.6115993", "0.60839003", "0.6075641", "0.60724646", "0.6063236", "0.6057245", "0.602047", "0.6016604", "0.6009425", "0.60080284", "0.59970474", "0.5988655", "0.5970128", "0.5957479", "0.59485096", "0.5931866", "0.59208417", "0.59123904", "0.5881756", "0.587842", "0.587842", "0.5874945", "0.5855786", "0.58534664", "0.5848968", "0.5845527", "0.5843043", "0.5841954", "0.58241725", "0.5817208", "0.58105505", "0.5807443", "0.57652444", "0.5752057", "0.5749788", "0.57488537", "0.57405955", "0.5738802", "0.5735735", "0.5713904", "0.57089585", "0.57067776", "0.5676855", "0.56746274", "0.5672995", "0.56729823", "0.567092", "0.56655735", "0.5657878", "0.5631171", "0.5621202", "0.56092507", "0.5599895", "0.5588627", "0.5582571", "0.5582188", "0.5581155", "0.5568002", "0.55670285", "0.5532862", "0.55324376", "0.5527847", "0.55247873", "0.55170965", "0.55081666", "0.5503996", "0.55016094", "0.5500446", "0.5496062", "0.5491685", "0.5491575", "0.5491575", "0.548526", "0.548386", "0.54816556", "0.5480107", "0.5480107", "0.54517055", "0.5445589", "0.5441419", "0.5440172", "0.54357284", "0.5434919", "0.542009", "0.5419022", "0.5417315" ]
0.551613
78
Returns length of memory buffer as a property.
def memlen(self): return len(self.memory)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self) -> int:\n return len(self.buffer)", "def __len__(self):\n return len(self.buffer)", "def __len__(self):\n return len(self.buffer)", "def __len__(self):\n\t\treturn len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\r\n return len(self.memory)", "def _get_length(self):\n return self._length", "def __len__(self):\n return len(self.buffer)", "def getLen(self):\n return self.len", "def getLength(self):\n return self.length", "def get_length(self):\n return self._length", "def get_length(self):\n return self._length", "def length(self):\n return self.__length", "def length(self):\n return self.__length", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def length(self):\n return self._info.length # pylint: disable=E1101", "def __len__(self) -> int:\n return len(self.getvalue())", "def get_length(self):\n\n return self.length", "def get_length(self):\n return self.resource.get_size()", "def length(self):\n return self.size", "def size(self):\n return len(self.buffer)", "def __len__(self):\n return len(self.bytes)", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length # pylint: disable = E1101", "def length(self):\n return self._length", "def length(self):\n return self._length", "def getLength(self):\n return self.n", "def get_length(self):\n\n return self._length", "def length(self):\n return self.length", "def __len__(self):\n return self.__length", "def __len__(self):\n return(self.data_len)", "def length(self) -> 'int':\n return self._info.len", "def getLength(self):\n return self.count", "def getLen(self):\n return len(self.data)", "def __len__(self):\n return self.lengths[0]", "def __len__(self) -> int:\n return self._length", "def getLength(self):\n return None", "def getLength(self):\n return None", "def __len__(self):\n return int(self.size._value)", "def __len__(self) -> int:\n return self._len", "def __len__(self) -> int:\n if self.preload:\n return len(self.data_ram)\n else:\n return len(self.data)", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def size(self):\r\n return self.__length", "def length(self) -> int:\r\n\r\n return self.__length", "def getLength(self) -> float:\n return self.length", "def __len__(self):\n return self._length", "def length(self) -> int:\n return self.size", "def length(self) -> int:\n return self.size", "def __len__(self):\n return self._len", "def length(self):\n mmap = self.memmap;\n self.memmap = 'r';\n data = self.load();\n self.memmap = mmap;\n return data.shape[0];", "def __len__(self):\n\n return self.length", "def __len__(self) -> int:\n return self.length", "def __len__(self):\n return self.size_", "def length(self):\n\n return self._length", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size" ]
[ "0.7560014", "0.7533779", "0.7533779", "0.74142253", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.73740363", "0.726588", "0.72483706", "0.7241332", "0.7240929", "0.7164012", "0.7164012", "0.70962816", "0.70962816", "0.70866525", "0.7067873", "0.7060306", "0.7051593", "0.70473534", "0.70421225", "0.70382226", "0.70141673", "0.700095", "0.700095", "0.700095", "0.6999378", "0.6991984", "0.6991984", "0.6990921", "0.6987044", "0.69782364", "0.6966394", "0.6927766", "0.69264746", "0.6925067", "0.6919693", "0.6915714", "0.6892638", "0.688044", "0.688044", "0.6862938", "0.6860579", "0.6857332", "0.68510085", "0.68510085", "0.68510085", "0.68510085", "0.68510085", "0.68510085", "0.6833912", "0.6832501", "0.68227506", "0.6820824", "0.68194276", "0.68194276", "0.68083864", "0.6803469", "0.67996395", "0.67929476", "0.67888486", "0.67859745", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797", "0.6781797" ]
0.0
-1
Initialize a D4PG Agent.
def __init__(self, state_size, action_size, args, agent_count = 1, l2_decay = 0.0001): self.framework = "D4PG" self.device = args.device self.eval = args.eval self.actor_learn_rate = args.actor_learn_rate self.critic_learn_rate = args.critic_learn_rate self.gamma = args.gamma self.rollout = args.rollout self.num_atoms = args.num_atoms self.vmin = args.vmin self.vmax = args.vmax self.atoms = torch.linspace(self.vmin, self.vmax, self.num_atoms).to(self.device) self.atoms = self.atoms.unsqueeze(0) # Initialize ACTOR networks # self.actor = ActorNet(args.layer_sizes, state_size, action_size).to(self.device) self.actor_target = ActorNet(args.layer_sizes, state_size, action_size).to(self.device) self.actor_optim = optim.Adam(self.actor.parameters(), lr=self.actor_learn_rate, weight_decay=l2_decay) # Initialize CRITIC networks # c_input_size = state_size * agent_count c_action_size = action_size * agent_count self.critic = CriticNet(args.layer_sizes, c_input_size, c_action_size, self.num_atoms).to(self.device) self.critic_target = CriticNet(args.layer_sizes, c_input_size, c_action_size, self.num_atoms).to(self.device) self.critic_optim = optim.Adam(self.critic.parameters(), lr=self.critic_learn_rate, weight_decay=l2_decay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent_init(self):\n pass", "def __init__(self, agent: AEA) -> None:\n self._agent = agent\n super().__init__()", "def __init__(self, agent_id=\"default\", experiment_id=\"default\"):\n self.runtime = runtime()\n self.agent_id = agent_id\n self.experiment_id = experiment_id", "def Init(self, agentEnv):\n self.agentEnv = agentEnv", "def __init__(self, env, args):\n\n self.framework = \"MAD4PG\"\n self.t_step = 0\n self.episode = 1\n self.avg_score = 0\n\n self.C = args.C\n self._e = args.e\n self.e_min = args.e_min\n self.e_decay = args.e_decay\n self.anneal_max = args.anneal_max\n self.update_type = args.update_type\n self.tau = args.tau\n self.state_size = env.state_size\n self.action_size = env.action_size\n\n # Create all the agents to be trained in the environment\n self.agent_count = env.agent_count\n self.agents = [D4PG_Agent(self.state_size,\n self.action_size,\n args,\n self.agent_count)\n for _ in range(self.agent_count)]\n self.batch_size = args.batch_size\n\n # Set up memory buffers, currently only standard replay is implemented\n self.memory = ReplayBuffer(args.device,\n args.buffer_size,\n args.gamma,\n args.rollout,\n self.agent_count)\n self.memory.init_n_step()\n\n for agent in self.agents:\n self.update_networks(agent, force_hard=True)", "def __init__(self, agent):\n self.agent = agent", "def __init__(self, env, agent, agent_config, remote=None):\n self.env = Environment(env)\n self.episodes = 1000\n self.remote = remote\n\n if remote:\n self.has_remote_agent = True\n else:\n self.has_remote_agent = False\n\n \"\"\"Agent Class.\"\"\"\n self._agent_class = agent\n\n \"\"\"Initialize Agent\"\"\"\n self.agent = self._agent_class(**agent_config)", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)", "def initialize(\n self,\n env: VecEnv,\n agent: BaseAgent,\n save_path: str = 'logs',\n load_path: str = '',\n use_gpu: bool = True,\n ):\n self.env = env\n self.agent = agent\n self.use_gpu = use_gpu\n\n # Set up agent\n if (\n not isinstance(agent.actor, A2CAgent)\n and not isinstance(agent.actor, DQNAgent)\n and not isinstance(agent.actor, PPOAgent)\n ):\n raise ValueError(\n 'GCL is only compatible with A2C, DQN, and PPO actors.'\n )\n if use_gpu:\n self.agent.to_gpu()\n if load_path:\n self.agent.load(load_path)\n\n # Set up runner\n self.runner = Runner(env, self.agent, use_gpu)", "def init(self, parameters, agent_parameters):\n pass", "def init_agent(config: configparser.ConfigParser, env, device='cpu'):\n\n # ===\n # Initialize agent\n agent_type = config['Agent']['type']\n\n if agent_type == 'a2c':\n agent = A2CAgent(\n action_space=env.action_space,\n observation_shape=env.observation_space.shape,\n observation_dtype=torch.float,\n gamma=config['Agent'].getfloat('gamma'),\n use_recurrent_net=config['Agent'].getboolean('use_recurrent_net'),\n num_rollout_steps=config['Agent'].getint('num_rollout_steps'),\n value_loss_coef=config['Agent'].getfloat('value_loss_coef'),\n entropy_coef=config['Agent'].getfloat('entropy_coef'),\n max_grad_norm=config['Agent'].getfloat('max_grad_norm'),\n use_acktr=config['Agent'].getboolean('use_acktr'),\n device=device\n )\n else:\n raise NotImplementedError\n\n return agent", "def __init__(self, agent_id):\n # Properties of the agent\n #--------------------------------------#\n # The following parameters are mandatory\n self.agent_id = int(agent_id)\n\n # The states\n self.task_dict = dict() # Elements are {task_id:TASK(), ...}\n\n # Activation\n self.num_activated_task = 0\n self.is_activated = False # If there is a task being activated, this is set to True", "def __init__(self, agent_name):\n\n self._agent_name = agent_name", "def __init__(self, agent_host, agent_port, mission_type, mission_seed, solution_report, state_space_graph):\n self.AGENT_MOVEMENT_TYPE = 'Continuous'\n self.AGENT_NAME = 'Random'\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = state_space\n self.solution_report = solution_report # Python makes call by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)", "def __init__(self, agent_host, agent_port, mission_type, mission_seed, solution_report, state_space):\n self.AGENT_MOVEMENT_TYPE = 'Discrete' # HINT: You can change this if you want {Absolute, Discrete, Continuous}\n self.AGENT_NAME = 'Simple'\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = state_space\n self.solution_report = solution_report # Python calls by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)", "def __init__(self, config_parser, **kwargs):\n BaseAgent.__init__(self, config_parser)\n\n self.SERVICE_ID = config_parser.get('agent', 'SERVICE_ID')\n self.GENERIC_DIR = config_parser.get('agent', 'CONPAAS_HOME')\n self.VAR_CACHE = config_parser.get('agent', 'VAR_CACHE')\n self.CODE_DIR = join(self.VAR_CACHE, 'bin')\n self.VOLUME_DIR = '/media'\n self.env = {}\n self.processes = {}", "def __init__(self, objective):\n self.objective = objective\n\n # Initialize players\n # We use three dummy player for the target position\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n\n # Initialize the internal environment\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None", "def __init__(self):\n rospy.logdebug(\"Start ParrotDroneEnv INIT...\")\n\n #Spawn Parrot AR Drone through launch file\n self.ros_pkg_name=\"drone_construct\"\n self.launch_file_name=\"put_drone_in_world.launch\"\n \n super(ParrotDroneEnv, self).__init__(\n ros_pkg_name=self.ros_pkg_name,\n launch_file=self.launch_file_name,\n start_init_physics_parameters=True,\n reset_world_or_sim='WORLD')\n\n rospy.logdebug(\"Finished ParrotDroneEnv INIT...\")", "def init(self,agent,env,init_agent=True,init_env=True):\n self.agent = agent\n self.env = env\n env.sim = self\n agent.sim = self\n if init_env and 'init' in dir(env): env.init()\n if init_agent and 'init' in dir(agent): agent.init()\n self.last_sensation = TERMINAL_STATE", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setup_start_agents = False", "def __init__(self, gym_env: gym.Env) -> None:\n super().__init__()\n self._queue: Queue = Queue()\n self._action_counter: int = 0\n self.gym_address = str(GYM_CONNECTION_PUBLIC_ID)\n self._agent = ProxyAgent(\n name=\"proxy\", gym_env=gym_env, proxy_env_queue=self._queue\n )\n self._agent_thread = Thread(target=self._agent.start)\n self._active_dialogue = None # type: Optional[GymDialogue]\n self.gym_skill = \"fetchai/gym:0.1.0\"\n self.gym_dialogues = GymDialogues(self.gym_skill, role_from_first_message)", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def __init__(self,agent_host,agent_port, mission_type, mission_seed, solution_report, state_space):\n self.AGENT_MOVEMENT_TYPE = 'Discrete'\n self.AGENT_NAME = 'Random'\n self.AGENT_ALLOWED_ACTIONS = [\"movenorth 1\", \"movesouth 1\", \"movewest 1\", \"moveeast 1\"]\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = state_space;\n self.solution_report = solution_report; # Python makes call by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)", "def __init__(\n self,\n assign_id: str,\n env_desc: Dict[str, Any],\n algorithm_candidates: Dict[str, Any],\n training_agent_mapping: Callable,\n observation_spaces: Dict[AgentID, gym.spaces.Space],\n action_spaces: Dict[AgentID, gym.spaces.Space],\n exp_cfg: Dict[str, Any],\n population_size: int = -1,\n algorithm_mapping: Callable = None,\n ):\n\n IndependentAgent.__init__(\n self,\n assign_id,\n env_desc,\n algorithm_candidates,\n training_agent_mapping,\n observation_spaces,\n action_spaces,\n exp_cfg,\n population_size,\n algorithm_mapping,\n )", "def initialize(self):\n self.candidate_disease_list = []\n self.candidate_symptom_list = []\n self.agent_action = {\n \"turn\":None,\n \"action\":None,\n \"request_slots\":{},\n \"inform_slots\":{},\n \"explicit_inform_slots\":{},\n \"implicit_inform_slots\":{},\n \"speaker\":\"agent\"\n }", "def initialization_step(self):\n # Update where agents are\n self.update_agent_location_vector()\n # update task locations\n self.update_task_location_vector()\n # update deadlines\n self.populate_deadline_vector()\n # update distances to each task and orientation to each task\n self.update_agent_distances_vector()\n self.update_agent_orientation_vector()", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def __init__(self, test_conditions_4_current_env):\n # Init Gaia Class\n self.test_conditions_4_current_env = test_conditions_4_current_env\n self.gaia = GaiaClass(gaia_tap_server=conf.HOST_URL, gaia_data_server=conf.HOST_URL)", "def setup(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"myagent\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n # copy the 'packages' directory in the parent of the agent folder.\n shutil.copytree(Path(CUR_PATH, \"..\", \"packages\"), Path(cls.t, \"packages\"))\n\n os.chdir(cls.t)\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"init\", \"--author\", AUTHOR],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"create\", \"--local\", cls.agent_name],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n os.chdir(cls.agent_name)\n # add connection first time", "def __init__( self ):\n self._env = None\n self._steps = None\n\n self._initialize( )", "def __init__(self, *args, **kwargs):\n AgentModule.__init__(self, *args, **kwargs)\n self.name = 'MonitorAgents'\n self.setup = \"Production\"\n self.enabled = False\n self.restartAgents = False\n self.restartExecutors = False\n self.restartServices = False\n self.controlComponents = False\n self.commitURLs = False\n self.diracLocation = \"/opt/dirac/pro\"\n\n self.sysAdminClient = SystemAdministratorClient(socket.gethostname())\n self.jobMonClient = JobMonitoringClient()\n self.nClient = NotificationClient()\n self.csAPI = None\n self.agents = dict()\n self.executors = dict()\n self.services = dict()\n self.errors = list()\n self.accounting = defaultdict(dict)\n\n self.addressTo = [\"ilcdirac-admin@cern.ch\"]\n self.addressFrom = \"ilcdirac-admin@cern.ch\"\n self.emailSubject = \"MonitorAgents on %s\" % socket.gethostname()", "def __init__(self):\n\n # For now, we'll connect to the target via the Apollo debug controller.\n # This should be replaced by a high-speed USB link soon; but for now\n # we'll use the slow debug connection.\n self._debugger = ApolloDebugger()\n self._serial = self._find_serial_connection()", "def __init__(self, node):\r\n # set the agent's id and increment class counter\r\n self.__id = Agent.agent_id; Agent.agent_id+= 1\r\n self.__health_status = { \"label\": \"healthy\", \r\n \"days_infected\": -1 }\r\n\r\n self.__age_categorize(random.randint(0, 82))\r\n self.__location = node\r\n self.__susceptibility_factor = math.e**(-(self.__age) / 10)", "def initialize( self, layout, numGhostAgents=1000 ):\n self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py).", "def __init__(self, base_platform, initial_position=None, name=None, allow_overlapping=True, noise_params=None):\n\n if name is None:\n self.name = 'agent_' + str(Agent.index_agent)\n else:\n self.name = name\n Agent.index_agent += 1\n\n # List of sensors\n self.sensors = []\n\n # Body parts\n self.base_platform = base_platform\n self.parts = [self.base_platform]\n\n # Default starting position\n self.initial_position = initial_position\n\n # Information about sensor types\n self.has_geometric_sensor = False\n self.has_visual_sensor = False\n\n # Replaced when agent is put in playground\n self.size_playground = (0, 0)\n\n # Keep track of the actions for display\n self.current_actions = None\n\n # Allows overlapping when placing the agent\n self.allow_overlapping = allow_overlapping\n\n # Motor noise\n self._noise = False\n if noise_params is not None:\n self._noise = True\n self._noise_type = noise_params.get('type', 'gaussian')\n\n if self._noise_type == 'gaussian':\n self._noise_mean = noise_params.get('mean', 0)\n self._noise_scale = noise_params.get('scale', 1)\n\n else:\n raise ValueError('Noise type not implemented')\n\n self._controller = None\n\n # Reward\n self.reward = 0", "async def init(self):\n logger.info(\"Init device: %s\", self._serial)\n self._callback(STATUS_INIT)\n\n self._init_binaries()\n self._init_apks()\n await self._init_forwards()\n\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --stop\")\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --nouia -d\")", "def __init__(self, agent_host, agent_port, mission_type, mission_seed, solution_report, state_space_graph):\n self.AGENT_NAME = 'Helper'\n self.AGENT_MOVEMENT_TYPE = 'Absolute' # Note the helper needs absolute movements\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = StateSpace()\n self.solution_report = solution_report; # Python is call by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)", "def __init__(self, environment, agent, repeat_actions=1):\n self.logger = logging.getLogger(__name__)\n self.environment = environment\n self.agent = agent\n self.repeat_actions = repeat_actions\n\n # Update schedule if worker is performing updates.\n self.updating = None\n self.steps_before_update = None\n self.update_interval = None\n self.update_steps = None\n self.sync_interval = None", "def initialize_robot():\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()\n\n proxy_autonomous_life = naoqi.ALProxy(\"ALAutonomousLife\", IP_ROBOT, PORT_ROBOT)\n proxy_autonomous_life.setState(\"disabled\")\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()", "def __init__(self, agent_id, motion_policy, sensor, epsilon=1e-9, look_after_move=False):\n self._agent_id = agent_id\n self._motion_policy = motion_policy\n self._epsilon = epsilon\n self._look_after_move = look_after_move\n self._sensor = sensor", "def setup_class(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"myagent\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n # copy the 'packages' directory in the parent of the agent folder.\n shutil.copytree(Path(CUR_PATH, \"..\", \"packages\"), Path(cls.t, \"packages\"))\n cls.connection_id = str(HTTP_CLIENT_PUBLIC_ID)\n cls.connection_name = \"http_client\"\n\n os.chdir(cls.t)\n result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, \"init\", \"--author\", AUTHOR])\n assert result.exit_code == 0\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"create\", \"--local\", cls.agent_name],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n os.chdir(cls.agent_name)\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"add\", \"--local\", \"connection\", cls.connection_id],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def init_agent(self, kwargs):\n\n exp_params = [('agents_info', is_string),\n ('ip', is_string)]\n try:\n agents_info, agent_ip = check_arguments(exp_params, kwargs)\n agents_info = simplejson.loads(agents_info)\n except Exception as ex:\n return HttpErrorResponse(\"%s\" % ex)\n\n self.logger.info('Setting agent environment')\n\n target_dir = self.VAR_CACHE\n with open(join(target_dir, 'agents.json'), 'w') as outfile:\n simplejson.dump(agents_info, outfile)\n\n agent_role = [i['role'] for i in agents_info if i['ip'] == agent_ip][0]\n master_ip = [i['ip'] for i in agents_info if i['role'] == 'master'][0]\n\n self.env.update({'MY_IP':agent_ip})\n self.env.update({'MY_ROLE':agent_role})\n self.env.update({'MASTER_IP':master_ip})\n\n self.logger.info('Agent initialized')\n return HttpJsonResponse()", "def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # Note: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n self.kappa = agent_info.get(\"kappa\", 0.001)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, tau, etc.\n # The visitation-counts can be stored as a table as well, like the action values \n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.tau = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {}", "def _initialize(self):\n self.send_init_command()", "def __init__(self, env, actor=None, critic=None, weights=None, warmup_actor=1, warmup_critic=1, gamma=0.99):\n self.env = env\n nactions = np.product(self.env.action_shape)\n actor = self._actor() if actor is None else actor\n critic, action_input = self._critic() if critic is None else critic\n membuf = SequentialMemory(int(1E5), window_length=1)\n random = OrnsteinUhlenbeckProcess(size=nactions, theta=0.15, mu=0.0, sigma=0.3)\n self.agent = DDPGAgent(nb_actions=nactions,\n actor=actor,\n critic=critic,\n critic_action_input=action_input,\n memory=membuf,\n nb_steps_warmup_critic=warmup_critic,\n nb_steps_warmup_actor=warmup_actor,\n random_process=random,\n gamma=gamma,\n target_model_update=1E-3)\n self.agent.compile(keras.optimizers.Adam(lr=0.001, clipnorm=1.0), metrics=['mae', 'accuracy'])\n\n # Potentially load the Agent's weights from disk\n if weights is not None:\n basename, ext = os.path.splitext(weights)\n cweights = basename + \"_critic\" + ext\n aweights = basename + \"_actor\" + ext\n if not os.path.isfile(cweights):\n raise ValueError(\"Could not find file\", cweights)\n elif not os.path.isfile(aweights):\n raise ValueError(\"Could not find file\", aweights)\n else:\n self.agent.load_weights(weights)", "def initVariable(self, state):\n self.nb_agent = state.getNumAgents()\n self.first_call = False", "def _connect(self) -> None:\n if self._agent_thread.is_alive():\n raise ValueError(\"Agent already running.\")\n self._agent_thread.start()\n\n while not self._agent.runtime.is_running: # check agent completely running\n time.sleep(0.01)", "def init(p4):\n started_counter = p4gf_const.P4GF_COUNTER_INIT_STARTED\n complete_counter = p4gf_const.P4GF_COUNTER_INIT_COMPLETE\n _maybe_perform_init(p4, started_counter, complete_counter, _global_init)\n client_name = p4gf_util.get_object_client_name()\n started_counter = client_name + '-init-started'\n complete_counter = client_name + '-init-complete'\n home = os.environ.get(\"HOME\")\n p4gf_dir = os.path.join(home, p4gf_const.P4GF_DIR)\n\n def client_init(p4):\n '''Perform host-specific initialization (and create sample usermap).'''\n # Set up the host-specific client.\n _create_client(p4, client_name, p4gf_dir)\n # Ensure the default user map file is in place.\n _write_user_map(p4, client_name, p4gf_dir)\n if not _maybe_perform_init(p4, started_counter, complete_counter, client_init):\n # If client already created, make sure it hasn't been tweaked.\n ###: do we really need to handle this case? this is here just to pass the tests\n view = ['//{depot}/... //{client}/...'.format(depot=p4gf_const.P4GF_DEPOT,\n client=client_name)]\n p4gf_util.ensure_spec_values(p4, \"client\", client_name,\n {'Root': p4gf_dir, 'View': view})", "def __init__(self, agent_host, agent_port, mission_type, mission_seed, solution_report, state_space_graph):\n self.AGENT_MOVEMENT_TYPE = 'Absolute' # HINT: You can change this if you want {Absolute, Discrete, Continuous}\n self.AGENT_NAME = 'Realistic'\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = None # NOTE: The Realistic can not know anything about the state_space a priori !\n self.solution_report = solution_report # Python is call by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)\n self.first_time_flag = True\n self.diamond_award = 30\n self.already_visited_reward = 10\n self.history = {}\n self.rewards_by_location_id = {}\n self.previous_rewards_total = 0", "def __init__(self, a):\n # sanity check\n assert(isinstance(a, agent.Agent))\n \n # save the agent reference\n self.a = a\n \n self.a.tc_h1(\"CONNECT TESTS\")\n \n # create the host instance\n self.host = host.Host(a, STA)\n # create the access point instances\n self.ap1 = accesspoint.AccessPoint(a, AP1, channel=5, period=100, ssid=\"louis\")\n self.ap2 = accesspoint.AccessPoint(a, AP2, channel=11, period=100, ssid=\"louis\")\n\n # reset the host (resetting the MIBs)\n self.host.reset(True)\n\n # set the host MAC address\n self.host.dbg_macaddr()", "def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # NOTE: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, etc.\n # A simple way to implement the model is to have a dictionary of dictionaries, \n # mapping each state to a dictionary which maps actions to (reward, next state) tuples.\n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {} # model is a dictionary of dictionaries, which maps states to actions to \n # (reward, next_state) tuples", "def agent_init(self, agent_config):\n if agent_config.get('name') is None or not (agent_config.get('name').lower() == 'q-learning' or agent_config.get('name').lower() == 'expected_sarsa'):\n raise AssertionError(\"Invalid agent name. Accepted agents: 'q-learning', 'expected_sarsa'\")\n self.name = agent_config['name']\n self.device = agent_config['device']\n self.replay_buffer = ReplayBuffer(agent_config['replay_buffer_size'],\n agent_config['minibatch_size'],\n agent_config.get('seed'))\n self.network = DQN(agent_config['network_config']).to(self.device) # The latest state of the network that is getting replay updates\n self.network_target = DQN(agent_config['network_config']).to(self.device)\n\n optim_config = agent_config['optimizer_config']\n self.optimizer = optim.Adam(self.network.parameters(), lr=optim_config['step_size'], betas=optim_config['betas'])\n self.num_actions = agent_config['network_config']['action_dim']\n self.num_replay = agent_config['num_replay_updates_per_step']\n self.discount = agent_config['gamma']\n self.tau = agent_config['tau']\n\n self.rand_generator = np.random.RandomState(agent_config.get('seed'))\n\n self.last_state = None\n self.last_action = None\n\n self.sum_rewards = 0\n self.epsiode_steps = 0\n\n checkpoint_dir = agent_config.get('checkpoint_dir')\n if checkpoint_dir is None:\n self.checkpoint_dir = 'model_weights'\n else:\n self.checkpoint_dir = checkpoint_dir\n \n if not os.path.isdir(self.checkpoint_dir):\n os.makedirs(self.checkpoint_dir)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_agents_once = False\n self.start_servers_once = False\n self.setup_start_agents = False\n self.setup_start_servers = False", "def __init__(self, env):\n self.env = env\n #self.gator = CycleGator()\n self.gator = GroupGator()", "def initialize(self):\n\n \"*** YOUR CODE HERE\"\n self.path = []\n MyAgent.customFood = None\n MyAgent.foodLeft = 0\n MyAgent.specialWalls = {}\n self.followOne = False\n if self.index == 0:\n MyAgent.finding = []\n MyAgent.finding.append(False)", "def __init__( self ):\n self.lock = threading.Lock()\n self.agent = None", "def __init__(self):\n # Manage command line args\n args = ut_generic.getArgsParser().parse_args()\n self.gzclient = args.gzclient\n self.multiInstance = args.multiInstance\n self.port = args.port\n\n # Launch simulation in a new Process\n self.launch_subp = ut_launch.startLaunchServiceProcess(\n ut_launch.generateLaunchDescription(\n self.gzclient, self.multiInstance, self.port))\n\n # Create the node after the new ROS_DOMAIN_ID is set in\n # generate_launch_description()\n rclpy.init(args=None)\n self.node = rclpy.create_node(self.__class__.__name__)\n\n # class variables\n # self._observation_msg = None\n self._observation_img = None\n # self.max_episode_steps = 1024 # default value, can be updated from baselines\n self.max_episode_steps = 100\n self.iterator = 0\n self.reset_flag = True\n\n # ai_agent\n self.pub = self.node.create_publisher(String, '/pos/action_id')\n camera_names = ['/cam/custom_camera/image_raw']\n self.sub_img = self.node.create_subscription(\n Image, camera_names[0], self.observation_img_callback)\n self.sub_odom = self.node.create_subscription(Odometry,'/pos/odom_pos', self.odom_get_callback)\n self.reset_sim = self.node.create_client(Empty, '/reset_simulation')\n\n # 0: \"forward\", 1: \"left\", 2: \"right\"\n self.action_space = gym.spaces.Discrete(3)\n\n self.pos = np.array([0, 0])\n self.target_pos = np.array([-6, 1])\n\n # observation = (240,320,3)\n screen_height, screen_width = (240, 320)\n self.observation_space = spaces.Box(\n low=0, high=255, shape=(\n screen_height, screen_width, 3), dtype=np.uint8)\n\n self.bridge = CvBridge()", "def onStart(self, agent):\n\n pass", "def init_acs_agent(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def __init__(__self__, *,\n agent_auto_upgrade: Optional[pulumi.Input[Union[str, 'AutoUpgradeOptions']]] = None,\n agent_version: Optional[pulumi.Input[str]] = None):\n if agent_auto_upgrade is None:\n agent_auto_upgrade = 'Enabled'\n if agent_auto_upgrade is not None:\n pulumi.set(__self__, \"agent_auto_upgrade\", agent_auto_upgrade)\n if agent_version is not None:\n pulumi.set(__self__, \"agent_version\", agent_version)", "def __init__(self, env_path, criteria=13, seed=0):\n self.env = UnityEnvironment(file_name = env_path)\n self.brain_name = self.env.brain_names[0]\n self.brain = self.env.brains[self.brain_name] \n self.action_size = self.brain.vector_action_space_size\n self.state_size = self.brain.vector_observation_space_size\n self.agent = Agent(self.state_size, self.action_size, seed=seed)\n self.criteria = criteria\n \n self.score_record = []\n self.score_window = deque(maxlen=100)\n \n self.env_info = self.env.reset(train_mode=True)[self.brain_name]", "def __init__(__self__, *,\n arc_agent_profile: Optional[pulumi.Input['ArcAgentProfileArgs']] = None):\n if arc_agent_profile is not None:\n pulumi.set(__self__, \"arc_agent_profile\", arc_agent_profile)", "def init_ai_agent(self, model, name=None, camera_follow=False):\n if model is None:\n raise ValueError('invalid model given')\n\n if name == None:\n name = 'Agent' + str(GameState.ID_counter)\n GameState.ID_counter += 1\n\n # starting mass is either random in certain range or fixed\n if self.with_random_mass_init:\n mass = random.randint(conf.RANDOM_MASS_INIT_LO,\n conf.RANDOM_MASS_INIT_HI)\n else:\n mass = conf.AGENT_STARTING_MASS\n\n radius = utils.mass_to_radius(mass)\n pos = utils.gen_random_position(radius)\n ai_agent = Agent(\n self,\n model,\n pos[0],\n pos[1],\n radius,\n mass=mass,\n color=conf.BLUE_COLOR,\n name=name,\n manual_control=False,\n camera_follow=camera_follow,\n )\n self.agents[model.id] = ai_agent\n if camera_follow:\n self.set_camera(ai_agent)", "def __init__(self, simulator):\r\n self.initialize(simulator)", "def __init__(self, mdp, discount = 0.9, iterations = 100, theta = 1e-5):\n self.theta = theta\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, mdp, discount = 0.9, iterations = 100, theta = 1e-5):\n self.theta = theta\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, mdp, discount = 0.9, iterations = 100, theta = 1e-5):\n self.theta = theta\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, mdp, discount = 0.9, iterations = 100, theta = 1e-5):\n self.theta = theta\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, mdp, discount = 0.9, iterations = 100, theta = 1e-5):\n self.theta = theta\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, asa_factory: AsaFactory):\n super().__init__(asa_factory) # initialize step_in_progress flag\n self.agent, self.sampler, self.algo = asa_factory()\n self.batch_spec = self.sampler.batch_spec\n self.grad = None\n self.traj_infos = None\n self.opt_info = None", "def init_agent(cm, sc, conf):\n\n try:\n drivers = load_drivers(sc, conf)\n except Exception as err:\n msg = (\"Loadbalaner V2 agent failed to load service drivers. %s\"\n % (str(err).capitalize()))\n LOG.error(msg)\n raise err\n else:\n msg = (\"Loadbalaner V2 agent loaded service\"\n \" drivers successfully.\")\n LOG.debug(msg)\n\n rpcmgr = LBaaSv2RpcManager(sc, conf)\n\n try:\n events_init(sc, drivers, rpcmgr)\n except Exception as err:\n msg = (\"Loadbalaner V2 agent failed to initialize events. %s\"\n % (str(err).capitalize()))\n LOG.error(msg)\n raise err\n else:\n msg = (\"Loadbalaner V2 agent initialized\"\n \" events successfully.\")\n LOG.debug(msg)\n\n try:\n register_service_agent(cm, sc, conf, rpcmgr)\n except Exception as err:\n msg = (\"Failed to register Loadbalaner V2 agent with\"\n \" configurator module. %s\" % (str(err).capitalize()))\n LOG.error(msg)\n raise err\n else:\n msg = (\"Loadbalaner V2 agent registered with configuration\"\n \" module successfully.\")\n LOG.debug(msg)", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def __init__(self, *, _initialized_with_create=False):\n assert _initialized_with_create, \"Please use DHTProtocol.create coroutine to spawn new protocol instances\"\n super().__init__()", "def test_agent():\n\n # default parameters\n print('Testing an agent with default parameters')\n uid = 'test_agent'\n params = {'use_checkpointer': False}\n verify.verify_agent(uid, params)\n verify.log_graph(uid, write_logs=False)\n print('\\n' + '#' * 65 + '\\n')\n\n # random parameters\n for _ in range(9):\n rand_params = utils.get_random_params()\n rand_params['use_checkpointer'] = False\n print(f'Testing an agent with parameters: {rand_params}')\n verify.verify_agent(uid, rand_params)\n verify.log_graph(uid, rand_params, False)\n print('\\n' + '#' * 65 + '\\n')\n\n # cleaning up\n path = os.path.join(configs.LOG_DIR, 'test_agent')\n shutil.rmtree(path)", "def setup(self):\n env_name = rospy.get_param('ros_gym/environment_name')\n max_episode_steps = rospy.get_param('ros_gym/max_episode_steps')\n self.task_env = self.register_env(env_name, max_episode_steps)\n\n self.agent = \\\n AgentBase.get_agent(rospy.get_param('~agent'), env=self.task_env)\n rospy.loginfo('Using agent of type: {}'.format(self.agent.name))\n\n # Set the logging system\n rospack = rospkg.RosPack()\n pkg_path = rospack.get_path('ros_gym')\n outdir = pkg_path + '/training_results'\n self.task_env = wrappers.Monitor(self.task_env, outdir, force=True)", "def initiate_agent(self, nb_actions):\n\n self.model = Sequential()\n self.model.add(Dense(512, activation='relu', input_shape=env.observation_space)) # pylint: disable=no-member\n self.model.add(Dropout(0.2))\n self.model.add(Dense(512, activation='relu'))\n self.model.add(Dropout(0.2))\n self.model.add(Dense(512, activation='relu'))\n self.model.add(Dropout(0.2))\n self.model.add(Dense(nb_actions, activation='linear'))\n\n # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n # even the metrics!\n memory = SequentialMemory(limit=memory_limit, window_length=window_length) # pylint: disable=unused-variable\n policy = TrumpPolicy() # pylint: disable=unused-variable", "def __init__(self):\n \n rospy.logdebug(\"Start CATVehicle_ENV INIT...\")\n \n self.controllers_list = []\n self.publishers_array = []\n self.robot_name_space = \"\"\n self.reset_controls = False\n\n \n \n # We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv\n super(CATVehicleEnv, self).__init__(controllers_list=self.controllers_list,\n robot_name_space=self.robot_name_space,\n reset_controls=False,\n start_init_physics_parameters=False,\n reset_world_or_sim=\"WORLD\")\n \n self.gazebo.unpauseSim()\n self._check_all_sensors_ready()\n \n self._cmd_vel_pub = rospy.Publisher('/catvehicle/cmd_vel', Twist, queue_size=10)\n \n rospy.Subscriber(\"/catvehicle/distanceEstimatorSteeringBased/dist\", Float64, self._distsb_callback)\n rospy.Subscriber(\"/catvehicle/distanceEstimatorSteeringBased/angle\", Float64, self._anglesb_callback)\n rospy.Subscriber(\"/catvehicle/distanceEstimator/dist\", Float32, self._dist_callback)\n rospy.Subscriber(\"/catvehicle/distanceEstimator/angle\", Float32, self._angle_callback)\n rospy.Subscriber(\"/catvehicle/odom\", Odometry, self._odom_callback)\n \n self._check_publishers_connection()\n self.gazebo.pauseSim()\n \n rospy.logdebug(\"Finished TurtleBot2Env INIT...\")", "def __init__(self):\n self.nagios_message = NagiosMessage()\n\n self.arg_parser = argparse.ArgumentParser(add_help=True)\n\n self.setup_default_args()\n\n # can be overridden in subclass\n self.add_extra_args()\n\n self.args = self.arg_parser.parse_args()\n\n if self.args.browser not in self.capabilities_mapping.keys():\n self.nagios_message.add_msg(\n 'browser is invalid: %s' % self.args.browser)\n self.nagios_message.raise_status(\n NagiosMessage.NAGIOS_STATUS_UNKNOWN)\n sys.exit(self.nagios_message.status_code)\n\n self.global_timeout = self.args.timeout\n\n self.driver = None", "def init(self):\r\n self._parse_options(self._force_args)\r\n self._maybe_daemonize()\r\n self._setup_modules()\r\n self._state = self.INITIALIZED", "def __init__(self, id, team, settings=None, field_rects=None, field_grid=None, nav_mesh=None):\n self.id = id\n self.team = team\n self.mesh = nav_mesh\n self.grid = field_grid\n self.settings = settings\n self.motivation = None\n self.goal = None\n\n # Determine the strategy for this agent\n if self.id == 1 or self.id == 2:\n self.strategy = STRATEGY_DEFENCE\n elif self.id == 3 or self.id == 4:\n self.strategy = STRATEGY_OFFENCE\n else:\n self.strategy = STRATEGY_NORMAL\n\n if SETTINGS_DEBUG_ON:\n self.log = open((\"log%d.txt\" % id),\"w\")\n \n # Recommended way to share variables between agents.\n if id == 0:\n self.all_agents = self.__class__.all_agents = []\n\n tilesize = getattr(self.settings, 'tilesize', DEFAULT_FIELD_TILESIZE)\n if field_rects is None:\n self.__class__.field_width = DEFAULT_FIELD_WIDTH*tilesize\n self.__class__.field_height = DEFAULT_FIELD_HEIGHT*tilesize\n else:\n self.__class__.field_width = len(field_grid[0])*tilesize\n self.__class__.field_height = len(field_grid)*tilesize\n\n self.all_agents.append(self)", "def initialise(self):\n self.logger.debug(\" %s [GenerateNextPose::initialise()]\" % self.name)", "def __init__(self, simulation_params, starting_node_id, person_type=None):\n # import simulation parameters\n self.simulation_params = simulation_params\n self.env = simulation_params.get('simpy_env', None)\n self.dc = simulation_params.get('data_collector', None)\n self.routing = simulation_params.get('routing', None) \n self.time_interval = simulation_params.get('time_interval', None)\n\n # keep a record of person IDs\n self.PID = next(Person_base.get_new_id)\n\n # Routing is the list of environments that the person traverses\n self.routing_node_id = starting_node_id\n\n # Person type is a characteristic which affects behaviour in the microenvironment\n self.person_type = person_type", "def __init__(self, name, agent, all_locations):\n super().__init__(name)\n self.agent = agent\n self.world = agent.world\n self.all_locations = all_locations\n self.location_feat = get_location_key(agent)", "def __init__(\n self,\n node_ip_address,\n dashboard_agent_port,\n gcs_address,\n minimal,\n metrics_export_port=None,\n node_manager_port=None,\n listen_port=ray_constants.DEFAULT_DASHBOARD_AGENT_LISTEN_PORT,\n disable_metrics_collection: bool = False,\n *, # the following are required kwargs\n object_store_name: str,\n raylet_name: str,\n log_dir: str,\n temp_dir: str,\n session_dir: str,\n logging_params: dict,\n agent_id: int,\n session_name: str,\n ):\n # Public attributes are accessible for all agent modules.\n self.ip = node_ip_address\n self.minimal = minimal\n\n assert gcs_address is not None\n self.gcs_address = gcs_address\n\n self.temp_dir = temp_dir\n self.session_dir = session_dir\n self.log_dir = log_dir\n self.dashboard_agent_port = dashboard_agent_port\n self.metrics_export_port = metrics_export_port\n self.node_manager_port = node_manager_port\n self.listen_port = listen_port\n self.object_store_name = object_store_name\n self.raylet_name = raylet_name\n self.logging_params = logging_params\n self.node_id = os.environ[\"RAY_NODE_ID\"]\n self.metrics_collection_disabled = disable_metrics_collection\n self.agent_id = agent_id\n self.session_name = session_name\n # TODO(edoakes): RAY_RAYLET_PID isn't properly set on Windows. This is\n # only used for fate-sharing with the raylet and we need a different\n # fate-sharing mechanism for Windows anyways.\n if sys.platform not in [\"win32\", \"cygwin\"]:\n self.ppid = int(os.environ[\"RAY_RAYLET_PID\"])\n assert self.ppid > 0\n logger.info(\"Parent pid is %s\", self.ppid)\n\n # grpc server is None in mininal.\n self.server = None\n # http_server is None in minimal.\n self.http_server = None\n\n # Used by the agent and sub-modules.\n # TODO(architkulkarni): Remove gcs_client once the agent exclusively uses\n # gcs_aio_client and not gcs_client.\n self.gcs_client = GcsClient(address=self.gcs_address)\n _initialize_internal_kv(self.gcs_client)\n assert _internal_kv_initialized()\n self.gcs_aio_client = GcsAioClient(address=self.gcs_address)\n\n if not self.minimal:\n self._init_non_minimal()", "def initialize():\n environment = Environment()\n environment.setup()", "def __init__( self, agentName, loadName, baseAgentName = False, properties = {} ):\n if baseAgentName and agentName == baseAgentName:\n self.log = gLogger\n standaloneModule = True\n else:\n self.log = gLogger.getSubLogger( agentName, child = False )\n standaloneModule = False\n\n self.__basePath = gConfig.getValue( '/LocalSite/InstancePath', rootPath )\n self.__agentModule = None\n self.__codeProperties = {}\n self.__getCodeInfo()\n\n self.__moduleProperties = { 'fullName' : agentName,\n 'loadName' : loadName,\n 'section' : PathFinder.getAgentSection( agentName ),\n 'loadSection' : PathFinder.getAgentSection( loadName ),\n 'standalone' : standaloneModule,\n 'cyclesDone' : 0,\n 'totalElapsedTime' : 0,\n 'setup' : gConfig.getValue( \"/DIRAC/Setup\", \"Unknown\" ),\n 'alive' : True }\n self.__moduleProperties[ 'system' ], self.__moduleProperties[ 'agentName' ] = agentName.split( \"/\" )\n self.__configDefaults = {}\n self.__configDefaults[ 'MonitoringEnabled'] = True\n self.__configDefaults[ 'Enabled'] = self.am_getOption( \"Status\", \"Active\" ).lower() in ( 'active' )\n self.__configDefaults[ 'PollingTime'] = self.am_getOption( \"PollingTime\", 120 )\n self.__configDefaults[ 'MaxCycles'] = self.am_getOption( \"MaxCycles\", 500 )\n self.__configDefaults[ 'ControlDirectory' ] = os.path.join( self.__basePath,\n 'control',\n *agentName.split( \"/\" ) )\n self.__configDefaults[ 'WorkDirectory' ] = os.path.join( self.__basePath,\n 'work',\n *agentName.split( \"/\" ) )\n self.__configDefaults[ 'shifterProxy' ] = ''\n self.__configDefaults[ 'shifterProxyLocation' ] = os.path.join( self.__configDefaults[ 'WorkDirectory' ],\n '.shifterCred' )\n\n\n if type( properties ) == types.DictType:\n for key in properties:\n self.__moduleProperties[ key ] = properties[ key ]\n self.__moduleProperties[ 'executors' ] = [ ( self.execute, () ) ]\n self.__moduleProperties[ 'shifterProxy' ] = False\n\n self.__monitorLastStatsUpdate = -1\n self.monitor = None\n self.__initializeMonitor()\n self.__initialized = False", "def __init__(self, player_client, config):\n\n self.player_client = player_client\n self.agents = []\n\n # Create a WaypointFollower for each agent\n for agent in config['agents']:\n\n # Create a positon2d_proxy for each agent\n pp = playerc_position2d(self.player_client, config['agents'][agent]['position2d_index'])\n if pp.subscribe(PLAYERC_OPEN_MODE) != 0:\n print \"failed to subscribe position2d proxy for %s\" % (agent)\n print playerc_error_str()\n sys.exit(2)\n print (config['agents'][agent]['waypoints'])\n\n # Allocate each agent a WaypointFollower\n self.agents.append(WaypointFollower(name=agent,\n waypoints=config['agents'][agent]['waypoints'],\n position2d_proxy=pp,\n waypoint_distance_tolerance=config['waypoint-distance-tolerance']))", "def initialize(self) -> None:\n pass", "def initialize_simulation(self) -> Simulation:\n pass", "def __init__(__self__, *,\n agent_id: pulumi.Input[int],\n project: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"agent_id\", agent_id)\n pulumi.set(__self__, \"project\", project)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()", "def _start_pagent(self):\n\n port = self._support.start_pagent()\n log.info('Port agent started at port %i',port)\n \n # Configure driver to use port agent port number.\n DVR_CONFIG['comms_config'] = {\n 'addr' : 'localhost',\n 'port' : port,\n 'cmd_port' : CMD_PORT\n }", "def initialize(self, args):\n\t\tpass", "def init_ADASYN(self, sampling_strategy, n_neighbors, n_jobs):\n self.object = over_sampling.ADASYN(sampling_strategy=sampling_strategy,\n random_state=self.random_state,\n n_neighbors=n_neighbors,\n n_jobs=n_jobs)\n\n self.sampling_strategy = sampling_strategy\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs", "def Setup(self):\n self.Peers = [] # active nodes that we're connected to\n self.KNOWN_ADDRS = [] # node addresses that we've learned about from other nodes\n self.DEAD_ADDRS = [] # addresses that were performing poorly or we could not establish a connection to\n self.MissionsGlobal = []\n self.NodeId = random.randint(1294967200, 4294967200)", "def init(self, *args):\n return self.cmd('init', *args)", "def __init__(self, agi):\n self.agi = agi\n self.agi.status = \"NEW\"\n self.context = self.agi.variables['agi_context']\n ivrlib.__init__(self)\n self.initLogger()\n self.agi.onClose().addErrback(self.onHangup) #register a callback to clean up on Hangup.\n self.dbtries = 0\n self.entries=0\n self.agi.enrollment = \"None\"\n self.times = None\n self.checkenroll()", "def initialize(self, externalSeeding=None, solutionExport=None):\n self._targetEvaluation = self.assemblerDict['TargetEvaluation'][0][3]\n if self._requireSolnExport and solutionExport is None:\n self.raiseAnError(IOError, 'No <SolutionExport> found this step! Required for this sampling strategy.')\n self._solutionExport = solutionExport\n Sampler.initialize(self, externalSeeding=externalSeeding, solutionExport=solutionExport)\n self._validateSolutionExportVariables(solutionExport)", "def __init__(\n self,\n **kwargs,\n ):\n\n self._step_counter = 0\n\n # The most recent robot state proto received.\n self._robot_state = None\n\n # Use the instance default worker and the gin configured ip address and\n # port.\n serial_port = next(list_ports.grep(\".*ttyACM0.*\")).device\n self._hardware_interface = interface.Interface(serial_port)\n time.sleep(0.25)\n self._hardware_interface.set_joint_space_parameters(kp=50.0, kd=5.0, max_current=7.0)\n super().__init__(**kwargs)\n self._clock = time.time" ]
[ "0.7342154", "0.6851554", "0.68496484", "0.673467", "0.67185724", "0.6604147", "0.6552631", "0.6546194", "0.6505822", "0.6481763", "0.6343824", "0.63030684", "0.6272267", "0.6253052", "0.62168545", "0.6205499", "0.6164115", "0.6161617", "0.61410064", "0.61237025", "0.60737205", "0.6068146", "0.59716904", "0.5957555", "0.59470195", "0.59359276", "0.59310585", "0.5913987", "0.5895104", "0.58841234", "0.58804846", "0.5870828", "0.5864633", "0.58548695", "0.5819713", "0.5801633", "0.5799536", "0.57374376", "0.5728556", "0.572638", "0.5717591", "0.56886923", "0.5681664", "0.56737983", "0.56726074", "0.5663965", "0.5644424", "0.5633572", "0.56310886", "0.5611251", "0.560728", "0.5593601", "0.5591478", "0.5579878", "0.5575804", "0.5541053", "0.5534236", "0.55255485", "0.5508067", "0.5506003", "0.5498208", "0.5497016", "0.5485593", "0.5482671", "0.5479076", "0.54737544", "0.54737544", "0.54737544", "0.54737544", "0.54737544", "0.54682773", "0.546723", "0.5465048", "0.5462642", "0.5430146", "0.54260105", "0.5425314", "0.54190534", "0.5416777", "0.5398063", "0.5397535", "0.5395849", "0.53861815", "0.5381406", "0.53800243", "0.53796136", "0.5365727", "0.53622735", "0.53568745", "0.5356197", "0.5355554", "0.5352136", "0.53373265", "0.5336422", "0.5334434", "0.5327685", "0.5324324", "0.5315735", "0.5311912", "0.5310066" ]
0.57521296
37
Returns the projected value distribution for the input state/action pair While there are several very similar implementations of this Categorical Projection methodology around github, this function owes the most
def _categorical(self, rewards, probs, dones): # Create local vars to keep code more concise vmin = self.vmin vmax = self.vmax atoms = self.atoms num_atoms = self.num_atoms gamma = self.gamma rollout = self.rollout # rewards/dones shape from [batchsize,] to [batchsize,1] rewards = rewards.unsqueeze(-1) dones = dones.unsqueeze(-1).type(torch.float) delta_z = (vmax - vmin) / (num_atoms - 1) projected_atoms = rewards + gamma**rollout * atoms * (1 - dones) projected_atoms.clamp_(vmin, vmax) b = (projected_atoms - vmin) / delta_z # It seems that on professional level GPUs (for instance on AWS), the # floating point math is accurate to the degree that a tensor printing # as 99.00000 might in fact be 99.000000001 in the backend, perhaps due # to binary imprecision, but resulting in 99.00000...ceil() evaluating # to 100 instead of 99. Forcibly reducing the precision to the minimum # seems to be the only solution to this problem, and presents no issues # to the accuracy of calculating lower/upper_bound correctly. precision = 1 b = torch.round(b * 10**precision) / 10**precision lower_bound = b.floor() upper_bound = b.ceil() m_lower = (upper_bound + (lower_bound == upper_bound).float() - b) * probs m_upper = (b - lower_bound) * probs projected_probs = torch.tensor(np.zeros(probs.size())).to(self.device) for idx in range(probs.size(0)): projected_probs[idx].index_add_(0, lower_bound[idx].long(), m_lower[idx].double()) projected_probs[idx].index_add_(0, upper_bound[idx].long(), m_upper[idx].double()) return projected_probs.float()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def action_distribution(self, state):\n means, stds = self.__call__(state)\n dist = Normal(means, torch.exp(stds))\n\n return dist", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n transition_state_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n # Add each state and probability to q_value\n q_value = 0\n for state_, probability in transition_state_probs:\n state_reward = self.mdp.getReward(state, state_, action)\n q_value += probability * (state_reward + self.discount * self.values[state_])\n return q_value", "def get_action_probability_dict(self, state):\n pass", "def sample(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob, dist", "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob", "def computeQValueFromValues(self, state, action):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n # Code to remove --- from here\n transitions = self.mdp.getTransitionStatesAndProbabilities(state, action)\n qvalue = 0\n for (nextState, probability) in transitions:\n reward = self.mdp.getReward(state, action, nextState)\n qvalue += probability *(reward + self.discount*self.values[nextState])\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"\n \n return qvalue", "def computeQValueFromValues(self, state, action):\n newState_prob = self.mdp.getTransitionStatesAndProbs(state, action)\n sum = 0\n for i in newState_prob:\n sum += i[1]*(self.mdp.getReward(state, action, i[0])+(self.discount*self.getValue(i[0])))\n return sum \n \n util.raiseNotDefined()", "def act(self, state):\n state = torch.from_numpy(state).float()\n logits, values = self.forward(state)\n distribution = torch.distributions.Categorical(logits=logits)\n action = distribution.sample()\n log_prob = distribution.log_prob(action).unsqueeze(-1)\n entropy = distribution.entropy().unsqueeze(-1)\n action = action.item() if len(action) == 1 else action.data.numpy()\n return action, log_prob, entropy, values", "def computeQValueFromValues(self, state, action):\n #get the Transition function and nextStates\n state_prob_pair=self.mdp.getTransitionStatesAndProbs(state,action)\n #initialize the value to zero\n actual_value=0\n #iterate over probabilities (transition functions) and next states\n for pair in state_prob_pair:\n #compute qvalue\n actual_value+=pair[1]*(self.mdp.getReward(state,action,pair[0])+self.discount*self.values[pair[0]])\n #print \"The Q value is \",actual_value\n return actual_value", "def get_action(self, state):\n # 1. get action with actor, and add noise\n raw_action, q = self.inner_ddpg.pi(state, apply_noise=True,\n compute_Q=True) # DDPG action is always [-1,1]\n return self.scale(self.scale(raw_action))", "def act(self, state):\n state = torch.from_numpy(state).float().unsqueeze(0)\n logits = self.forward(state)\n distribution = torch.distributions.Categorical(logits=logits)\n action = distribution.sample()\n log_prob = distribution.log_prob(action).unsqueeze(-1)\n entropy = distribution.entropy().unsqueeze(-1)\n return action.item(), log_prob, entropy", "def computeQValueFromValues(self, state, action):\n # assign q value to 0\n qvalue = 0\n\n # get the possible transition states and actions\n possible_tansition = self.mdp.getTransitionStatesAndProbs(state, action)\n\n # for each transition in list of possible transitions\n # transition[0] has the successor state (s-prime) represented in co-ordinates\n for transition in possible_tansition:\n\n # calculate reward transition\n reward = self.mdp.getReward(state, action, transition[0])\n\n # transition[1] has the probablity of reaching a particular successor state (s-prime) from state, action pair\n probability = transition[1]\n\n # get the utility value from tansition[0] which has successor state represented in coordinates\n utility_value = self.getValue(transition[0])\n\n # compute q value collectively using reward and probability transition\n qvalue = qvalue + (probability * (reward + (self.discount * utility_value)))\n\n # return q value\n return qvalue", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qValue = 0\n transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n #print('Transitions: ' + str(transitions))\n for t in transitions:\n nextState, prob = t\n reward = self.mdp.getReward(state, action, nextState)\n #print('Reward: ' + str(reward))\n oneTransition = prob * (reward + self.discount * self.values[nextState])\n qValue = qValue + oneTransition\n return qValue", "def distribution_probability(self, game):\n dist_probability = {}\n\n total_visits = sum(self.root.n_a.values())\n\n for action, visits in self.root.n_a.items():\n dist_probability[action] = visits/total_visits\n return dist_probability", "def computeQValueFromValues(self, state, action):\n\n # Find expected utility of making this move\n x = 0\n for t in self.mdp.getTransitionStatesAndProbs(state,action):\n x += t[1] * self.getValue(t[0])\n\n\n # Return Reward + discounted expected utility\n return self.mdp.getReward(state,None,None) + self.discount*x", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n\n sum_weights = sum(self.weights[s])\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * w / sum_weights) + (g / n) for w in self.weights[s]]\n # print(state, pi_s)\n return pi_s", "def get_state_action_probability_dict_dict(self):\n pass", "def act(self, state):\n # Random\n if np.random.rand() <= self.epsilon:\n return self.environment.action_space.sample()\n\n # Model prediction\n return np.argmax(self.model.predict(state)[0])", "def computeQValueFromValues(self, state, action):\n \n \n next_states_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n # liste des recompenses R(s,a,s')\n rewards = []\n # liste des probas de transitions P(s'|a,s)\n probs = []\n # liste des Vk(s')\n previous_values = []\n # occurence[0] = les next_state\n # occurence[1] = les proba de transi\n for occurence in next_states_probs:\n rewards.append(self.mdp.getReward(state, action, occurence[0]))\n probs.append(occurence[1])\n previous_values.append(self.getValue(occurence[0]))\n Q_value = 0\n # boucle qui calcule somme des ( P(s'|a,s) * [R(s,a,s') + gamma * Vk(s')] ) sur les s'\n for i in range(len(probs)):\n Q_value += probs[i] * (rewards[i] + self.discount * previous_values[i])\n \n return Q_value\n util.raiseNotDefined()", "def get_value(self, state):\n epsilon = self.epsilon\n possible_actions = self.get_legal_actions(state)\n\n #If there are no legal actions, return 0.0\n if len(possible_actions) == 0:\n return 0.0\n\n optimal_action = possible_actions[\n np.argmax([self.get_qvalue(state, action) for action in possible_actions])\n ]\n state_value = 0\n for action in possible_actions:\n if action == optimal_action:\n state_value += (1 - epsilon) * self.get_qvalue(state, action)\n state_value += (epsilon / len(possible_actions)) * self.get_qvalue(state, action)\n return state_value", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def compute_qvalues(self, state):\n return self.model.predict([np.expand_dims(item, 0) for item in state])[0]", "def computeQValueFromValues(self, state, action):\n #get all possible actions from current state\n possibleActions = self.mdp.getPossibleActions(state)\n\n #if action is illegal return 0\n if action in possibleActions:\n transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n returnVal = 0\n index = 0\n\n #loop through all trainsitions/probabilities\n while index < len(transitions):\n #This returns s' and T(s, a, s')\n (nextState, prob) = transitions[index]\n\n\n #This returns R(s,a,s')\n rewardVal = self.mdp.getReward(state, action, nextState)\n\n #This is gamma * V(s')\n lastPartOfEquation = self.values[nextState] *self.discount\n\n #This is T(s, a, s')[R(s, a, s') + gamma * V(s'). Bellman equation\n returnVal = returnVal + prob * (rewardVal + lastPartOfEquation)\n index = index + 1\n\n return returnVal\n else:\n return 0", "def get_action(self, state):\n action_probs = self.forward(state)\n action = torch.distributions.Categorical(probs=action_probs).sample()\n action = action.detach().cpu().numpy()\n return action", "def get_action_probs(self, state):\n state = state.astype(np.float32)\n return self.session.run(self.action_probs,\n feed_dict={self.s_placeholder: state})", "def computeQValueFromValues(self, state, action):\r\n #\r\n weightedVfvsSum = 0\r\n reward = 0\r\n # to get possible next state(s)\r\n for nextState, prob in self.mdp.getTransitionStatesAndProbs(state, action):\r\n reward += self.mdp.getReward(state, action, nextState) * prob\r\n #print \":computeQValueFromValues: nextState is: \", nextState, \" | self.values[nextState] is: \", self.values[nextState]\r\n weightedVfvsSum += prob * self.getValue(nextState)\r\n #\r\n return ( reward + ( self.discount * weightedVfvsSum) ) # making the actual qvalue\r", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n #get the value of the state\n qVal = self.values[state]\n #iterate through the MDP transition states from the current state\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #q value = discount * expected value of reward of state\n qVal += self.discount * probability * self.values[transitionState]\n return qVal\n # END OUR CODE", "def produce_action_and_action_info(self, state):\n action_probabilities = self.actor_local(state)\n max_probability_action = torch.argmax(action_probabilities, dim=-1)\n action_distribution = create_actor_distribution(self.action_types, action_probabilities, self.action_size)\n action = action_distribution.sample().cpu()\n # Have to deal with situation of 0.0 probabilities because we can't do log 0\n z = action_probabilities == 0.0\n z = z.float() * 1e-8\n log_action_probabilities = torch.log(action_probabilities + z)\n return action, (action_probabilities, log_action_probabilities), max_probability_action", "def computeQValueFromValues(self, state, action):\n totalsum = 0\n reward = 0\n for nextstate, transition in self.mdp.getTransitionStatesAndProbs(state, action):\n reward = self.mdp.getReward(state, action, nextstate)\n totalsum += transition * (reward + self.discount * self.getValue(nextstate)) # right way to access previous value?\n return totalsum", "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def computevaluefromqvalues(self, state):\n legalactions = env.getlegalactions(deepcopy(env.state_to_array(state)))\n if len(legalactions) == 0:\n return 0.0\n tmp = Counter()\n for action in legalactions:\n tmp[action] = self.getqvalue(state, action)\n return tmp[tmp.argMax()]", "def cdf(self,x):\n return self.categoricalDist.cdf(x)", "def get_action(self,state):\n \n q_values = self.__network.predict(state[None])[0]\n \n ###YOUR CODE\n if np.random.rand()<self.epsilon:\n return np.random.choice(self.n_actions)\n return np.argmax(q_values)", "def get_all_state_action_values(self, state):\n return torch.tensor([[self.get_state_action_value(state, action) for action in range(self.action_space.n)]]).float()", "def getQValue(self, state, action):\n # sumP = 0\n # for state_inner in self.mdp.getStates():\n # if state_inner == \"TERMINAL_STATE\":\n # continue\n # a = util.Counter(dict(self.mdp.getTransitionStatesAndProbs(state, action)))\n # sumP += a[state_inner] * self.values[state_inner]\n # return self.mdp.getReward(state, action, None) + self.discount *\n next_state_and_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n sumP = 0\n for _, prob in next_state_and_probs:\n sumP += prob\n return self.mdp.getReward(state, None, None) + self.discount * sumP", "def get_action(self, state, action=None):\n logits = self.actor(state)\n\n # Multinomial Distribution (to sample from action spaces with probabilities governed by logits)\n probs = Categorical(logits=logits)\n if action is None:\n action = probs.sample()\n return action, probs.log_prob(action), probs.entropy()", "def select_action(policy, state):\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n pr = policy(Variable(state))\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def select_action(policy, state):\n #torch.manual_seed(RAND_SEED) # Seed here is causing kernel to crash\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n #print(state) # for 2b\n pr = policy(Variable(state))\n #print(pr) # for 2c\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def calc_D(state):\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state", "def __call__(self, state, q_values):\n\n if self.policy_type == \"greedy\":\n is_greedy = True\n else:\n is_greedy = random.uniform(0, 1) > self.epsilon\n\n if is_greedy :\n # choose greedy action\n index_action = np.argmax(q_values[state])\n else:\n # get a random action\n index_action = random.randint(0,3)\n\n return actions_dict[index_action]", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return 0.0\n\n return max([self.getQValue(str(state), action) for action in legal_actions])", "def __call__(self, state, action):\n s = float(state.__hash__()) # pybrain secretly casts state to float when we do rl\n a = int(action)\n qvalues = self.module.getActionValues(s)\n maxq = max(qvalues)\n if qvalues[a] == maxq:\n n_max = sum([1 if q == maxq else 0 for q in qvalues])\n return 1.0 / n_max\n return 0", "def get_action_value(mdp, state_values, state, action, gamma):\n\n Q = 0\n # YOUR CODE HERE\n return Q", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return 0.0\n\n return max([self.getQValue(state, action) for action in legal_actions])", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return 0.0\n\n return max([self.getQValue(state, action) for action in legal_actions])", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return 0.0\n\n return max([self.getQValue(state, action) for action in legal_actions])", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return 0.0\n\n return max([self.getQValue(state, action) for action in legal_actions])", "def probit(x):\n from tensorflow_probability import distributions\n return distributions.Normal(0, 1).cdf(x)", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n a = self.config['alpha']\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n weights = self.weights[state]\n # print(weights)\n\n\n # obtains the probability vector from Hedge: p_i(t) = (1+alpha)^s_i(t) / sum_{j \\in K} (1+alpha)^s_j(t)\n sum_weights_exponentials = sum([(1 + a) ** w for w in weights])\n pre_prob = [(((1 + a) ** w) / sum_weights_exponentials) for w in weights]\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * p) + (g / n) for p in pre_prob]\n\n return pi_s", "def get_state_action_values(self, state, state_values):\n action_values = []\n\n for action in self.grid.actions:\n probs = self.grid.state_actions[state][action]\n state_action_values = self.get_state_values(state, state_values)\n action_values.append(sum([p * b for p, b in zip(probs, state_action_values)]))\n\n assert len(action_values) == len(self.grid.actions)\n return action_values", "def computeValueFromQValues(self, state):\n bestAction = self.computeActionFromQValues(state)\n if bestAction == None:\n return 0.0\n return self.getQValue(state, bestAction)", "def value(self, state):\n num_visits = len(state)\n value = 0\n\n for i in range(1, num_visits):\n try:\n value += distances[(state[i-1], state[i])]\n except KeyError:\n value += distances[(state[i], state[i - 1])]\n\n value = -1 * value\n\n return value", "def get_greedy_actions(self, state):\n state_action_values = self.get_action_values(state) # What are the value that we could get from current state\n\n max_action_value = max(state_action_values) # What is the higher value\n max_value_indices = [i for i, value in enumerate(state_action_values) if\n value == max_action_value] # Gets their indices\n\n # Prepares action probabilites for the ones with the higher value\n action_probs = np.zeros((4,))\n action_probs[max_value_indices] = 1 / (len(max_value_indices) if type(max_value_indices) is list else 1)\n\n return action_probs", "def projected():\n # pull updated list of donors(class)\n projected_list = create_donors_list()\n print('''Welcome to the Projection Option. Here you can run projections for contributions. \n Help Companies structure their matching donations based on past contribution amounts.\n Simply enter the minumum and maximum donation range that will be matched and see the total contribution:''')\n try:\n minimum_input = float(\n input('Enter a minimum donation amount (0 if none): '))\n maximum_input = float(\n input('Enter a maximum donation amount (0 if none): '))\n factor = float(\n input('Please enter the factor you wish to multiply these donations by >> '))\n except ValueError:\n print('Please follow instructions and enter a number only')\n\n projections = projection(projected_list, factor,\n minimum_input, maximum_input)\n print('\\nProjected contribution value: ${:,.2f}'.format(projections))", "def computeActionFromValues(self, state):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n \n # Code to remove --- from here\n resultingAction = None\n if self.mdp.isTerminal(state):\n return resultingAction\n else:\n bestq = float(\"-inf\")\n actions = self.mdp.getPossibleActions(state)\n for action in actions:\n qvalue = self.computeQValueFromValues(state, action)\n if qvalue > bestq:\n bestq = qvalue\n resultingAction = action\n return resultingAction\n\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"", "def get_log_prob(self, states, actions):\n\n mean, log_std = self.__network.forward(tr.from_numpy(states).float())\n\n actions = tr.from_numpy(actions).float()\n log_prob = - (actions - mean) ** 2\n log_prob /= (2.0 * tr.exp(log_std) ** 2 + 1e-10)\n log_prob -= log_std + 0.5 * self.__output_dim * np.log(2 * np.pi)\n return log_prob.sum(1, keepdim=True)", "def act(self, state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.n_actions)\n act_values = self.model.predict(state)\n return np.argmax(act_values[0])", "def get_state_value(self, state):\n values = self.get_all_state_action_values(state)\n policy = self.target_policy(values)\n return (values @ policy.probs.t()).item()", "def build_distribution_value(self):\n timeseries = {\n \"metricKind\": \"DELTA\", \n \"metric\": {\n \"type\": \"serviceruntime.googleapis.com/api/response_sizes\"\n }, \n \"points\": [\n {\n \"interval\": {\n \"endTime\": \"2019-02-19T04:00:00.841487Z\", \n \"startTime\": \"2019-02-19T03:00:00.841487Z\"\n }, \n \"value\": {\n \"distributionValue\": {\n \"count\": \"56\", \n \"mean\": 17,\n \"sumOfSquaredDeviation\": 1.296382457204002e-25,\n \"bucketCounts\": [\"56\"], \n \"bucketOptions\": {\n \"exponentialBuckets\": {\n \"scale\": 1, \n \"growthFactor\": 10, \n \"numFiniteBuckets\": 8\n }\n }\n }\n }\n }\n ], \n \"resource\": {\n \"labels\": {\n \"service\": \"monitoring.googleapis.com\", \n \"credential_id\": \"serviceaccount:106579349769273816070\", \n \"version\": \"v3\", \n \"location\": \"us-central1\", \n \"project_id\": \"ms-demo-app01\", \n \"method\": \"google.monitoring.v3.MetricService.ListMetricDescriptors\"\n }, \n \"type\": \"consumed_api\"\n }, \n \"valueType\": \"DISTRIBUTION\"}\n return timeseries", "def projection(input_list, factor, min_donation=None, max_donation=None):\n projected_contribution = 0\n for donor in input_list:\n projected_contribution += sum(filter_factor_map(factor,\n donor.donations,\n min_donation,\n max_donation))\n # returns the projection\n return projected_contribution", "def predict_Q(self, state, action):\r\n state = np.reshape(state, (-1, self.feature_number))\r\n action = np.reshape(action, (-1, self.action_space))\r\n Q_value = self.critic.predict([state, action])\r\n return Q_value", "def ppf(self,x):\n return self.categoricalDist.ppf(x)", "def _graph_fn_get_distribution_log_probs(self, key, parameters, actions):\n # For bounded continuous action spaces, need to unscale (0.0 to 1.0 for beta distribution).\n if self.bounded_action_space[key] is True:\n actions = (actions - self.action_space.low) / (self.action_space.high - self.action_space.low)\n return self.distributions[key].log_prob(parameters, actions)", "def get_move_probs(self, state, temp=1e-3):\n for n in range(self._n_playout):\n state_copy = copy.deepcopy(state)\n self._playout(state_copy)\n\n # calc the move probabilities based on visit counts at the root node\n act_visits = [(act, node._n_visits)\n for act, node in self._root._children.items()]\n acts, visits = zip(*act_visits)\n act_probs = softmax(1.0 / temp * np.log(np.array(visits) + 1e-10))\n\n return acts, act_probs", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n\n\n #check if teminal\n if self.mdp.isTerminal(state):\n return None\n else:\n # get all actions for state\n actionArr = self.mdp.getPossibleActions(state)\n\n #Q val and action at index 0 of action Array\n currentQ = self.computeQValueFromValues(state, actionArr[0])\n currentAction = actionArr[0]\n\n #loop through action Array\n for action in actionArr:\n #compute q at each index\n possibleQ = self.computeQValueFromValues(state, action)\n\n if (possibleQ == currentQ):\n currentAction = random.choice([currentAction, action])\n\n\n elif (possibleQ > currentQ):\n # want to return action for greatest Q value\n currentAction = action\n # update Q to keep track of corresponding value\n currentQ = possibleQ\n\n\n\n\n return currentAction", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n # Initialize max_value as - infinity\n # Initialize best action as None, choose max_value action\n max_value = float(\"-inf\")\n computed_action = None\n\n for action in actions:\n # Find q value of specified action\n q_value = self.computeQValueFromValues(state, action)\n # Update action if it's the best so far\n if q_value > max_value:\n max_value = q_value\n computed_action = action\n return computed_action", "def compute_q_value(self, state, actions):\n state_values = self.forward(state)\n state_action_value = state_values.gather(1, actions)\n return state_action_value.squeeze()", "def choose_action(self, state):\n prob = [] # Probability distribution\n for i in range(len(ACTIONS)):\n prob.append(self.epsilon/4)\n Q_func = self.policy.predict(process_state(state))\n Q_vals = Q_func[0]\n max_index = []\n Qmax = np.amax(Q_vals)\n for i in range(len(prob)):\n if Q_vals[i] == Qmax:\n # max_index.append(i)\n prob[i] = 1 - self.epsilon + self.epsilon/4\n break\n # ind = np.random.choice(max_index)\n # prob[ind] = 1 - self.epsilon + self.epsilon/4\n action = np.random.choice(ACTIONS, p = prob)\n return action", "def create_state_value_function(q):\n state_values = defaultdict(float)\n for state, action_values in q.items():\n state_values[state] = np.max(action_values)\n return state_values", "def sample(self, state):\n state = torch.FloatTensor(state)\n\n action_prob = self.network(state)\n action_distribution = Categorical(action_prob)\n action = action_distribution.sample()\n\n return action.cpu().item()", "def get_move_probs(self, state, temp=1e-3):\n for n in range(self._n_playout):\n state_copy = copy.deepcopy(state)\n self._playout(state_copy)\n\n # calc the move probabilities based on visit counts at the root node\n act_visits = [(act, node._n_visits)\n for act, node in self._root._children.items()]\n acts, visits = zip(*act_visits)\n act_probs = softmax(1.0/temp * np.log(np.array(visits) + 1e-10))\n\n return acts, act_probs", "def initialize_distribution(states, actions):\n dist = {}\n\n for i in states:\n dist[i] = {}\n for j in actions:\n dist[i][j] = [0.0]\n\n return dist", "def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p", "def sac_actor_fc_continuous_network(state_dims: int,\n action_dims: int,\n env_action_lb: Union[int, float],\n env_action_ub: Union[int, float],\n log_std_min: Union[int, float],\n log_std_max: Union[int, float],\n num_hidden_layers: int = 2,\n hidden_size: int = 256,\n ) -> tf.keras.Model:\n inputs = layers.Input(shape=(state_dims,), name=\"input_layer\")\n\n # Create shared hidden layers\n hidden = inputs\n for i in range(num_hidden_layers):\n hidden = layers.Dense(hidden_size, activation=\"relu\", name=f\"hidden_layer{i}\")(hidden)\n\n # Output mean and log_std\n mu = layers.Dense(action_dims)(hidden)\n log_std = layers.Dense(action_dims)(hidden)\n log_std = tf.clip_by_value(log_std, log_std_min, log_std_max)\n\n # Create Normal distribution with outputs\n std = tf.math.exp(log_std)\n pi_dist = tfd.Normal(mu, std)\n\n # To obtain actions, we sample from the distribution\n # We use the reparameterization trick here\n action = pi_dist.sample()\n\n # Get the log probability of that action w.r.t the distribution\n logp_pi = tf.reduce_sum(pi_dist.log_prob(action), axis=1)\n\n # NOTE: The correction formula is a little bit magic. To get an understanding\n # of where it comes from, check out the original SAC paper (arXiv 1801.01290)\n # and look in appendix C. This is a more numerically-stable equivalent to Eq 21.\n logp_pi -= tf.reduce_sum(2. * (tf.math.log(2.) - action - tf.math.softplus(-2. * action)), axis=1)\n\n # Squash the Gaussian policy\n action_squashed = tf.math.tanh(action)\n\n # Now either change output by multiplying by max_action,\n # action_squashed = action_squashed * self.env_action_ub\n\n # OR scale outputs to be within environment action space bounds\n resize_fn = lambda x: (((x + 1) * (env_action_ub - env_action_lb)) / 2) + env_action_lb\n action_squashed = resize_fn(action_squashed)\n\n model = tf.keras.Model(inputs=inputs, outputs=[action_squashed, logp_pi])\n return model", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n optimalAction = None\n maxValue = float('-inf')\n for a in actions:\n qValue = self.computeQValueFromValues(state, a)\n if qValue > maxValue:\n maxValue = qValue\n optimalAction = a\n return optimalAction", "def sample(self, state):\n\n # get neural network estimation for action distribution mean and log std\n mean, log_std = self.forward(state)\n # get std by exponentiation\n std = log_std.exp()\n\n # sample independent zero mean unit variance gaussian noise\n normal = Normal(0, 1)\n z = normal.sample().to(self.device)\n\n # compute action as the squashed tanh of gaussian prediction multiplied by independent noise,\n # needed for the reparametrization trick (see Haarnoja et al. paper)\n action = torch.tanh(mean + std * z)\n\n # to recover normalized log probabilities (see Harnojaa et al. Appendix \"Enforcing Bounds\")\n log_prob = Normal(mean, std).log_prob(mean + std * z) \\\n - torch.log(1 - action.pow(2) + EPS).sum(1, keepdim=True)\n\n return action, log_prob, z, mean, log_std", "def obs_cost_fn(self, state):\n # Weights for different terms\n W_PUSHER = 1\n W_GOAL = 2\n W_DIFF = 5\n\n length = state.shape[0]\n # pusher_x, pusher_y = state[:, 0], state[:, 1]\n box_x, box_y = state[:, 2], state[:, 3]\n # goal_x, goal_y = np.tile(self.goal[0], (length, 1)), np.tile(self.goal[1], (length, 1))\n\n pusher = state[:, 0:2]\n box = state[:, 2:4]\n goal = np.tile(self.goal, (length, 1))\n goal_x, goal_y = goal[:, 0], goal[:, 1]\n\n d_box = np.linalg.norm(pusher - box, axis=1, ord=2)\n d_goal = np.linalg.norm(box - goal, axis=1, ord=2)\n\n\n # pusher_box = np.array([box_x - pusher_x, box_y - pusher_y])\n # box_goal = np.array([goal_x - box_x, goal_y - box_y])\n # d_box = np.sqrt(np.dot(pusher_box, pusher_box))\n # d_goal = np.sqrt(np.dot(box_goal, box_goal))\n diff_coord = np.abs(box_x / (box_y + EPSILON) - goal_x / (goal_y + EPSILON))\n # the -0.4 is to adjust for the radius of the box and pusher\n return W_PUSHER * np.max([d_box - 0.4, np.zeros(len(d_box))], axis=0) + W_GOAL * d_goal + W_DIFF * diff_coord", "def valueLookAhead(self, state, estState, action):\n # Get the state distribution, assuming we take action.\n newDist = {}\n for pState in range(self.P):\n for newState, prob in self.trans(state, pState)[action].iteritems():\n newF, newP = newState\n if newState not in newDist:\n newDist[newF] = [0.0] * self.P\n # Note that newDist[newF] is a (not-normalized)\n # state probability distribution.\n newDist[newF][newP] += prob * estState[pState]\n\n # For each possible newF, calculate the maximum value.\n maxValue = -float('inf')\n for newF, dist in newDist.iteritems():\n normDist = [x/sum(dist) for x in dist]\n for vector in self.vectors[newF]:\n dotProduct = sum(vector[i] * normDist[i] for i in range(self.P))\n if dotProduct > maxValue:\n maxValue = dotProduct\n\n rewardValue = 0\n for pState in range(self.P):\n rewardValue += self.reward(state, pState, action) * estState[pState]\n return maxValue + rewardValue", "def mlp_categorical_policy(x, a, hidden_sizes, activation, output_activation, action_space):\n\n # number of actions possible...they are numbered 0 through n-1\n act_dim = action_space.n\n\n # get a tensorflow neural network to give us a vector output\n # of pre-normalized log probabilities of each action\n logits = mlp(x, list(hidden_sizes)+[act_dim], activation, None)\n\n # then do a softmax to normalize the probabilities\n # so logp_all is the log of the normalized probabilities of each action\n logp_all = tf.nn.log_softmax(logits)\n\n\n # now, create `pi`,\n # which will be a tensor containing the index\n # of the action we have selected (randomly, according to the\n # probabilities implied by the neural network)\n\n # the line that does this is dense, so here is some commentary:\n # squeeze removes all dimensions of size one, and\n # multinomial draws samples according to the multinomial distribution,\n # ie. according to the probabilities implied by the logits\n # https://www.tensorflow.org/api_docs/python/tf/random/multinomial\n # TODO: tf is deprecating multinomial;\n # we should probably change this to tf.random.categorical instead\n pi = tf.squeeze(tf.multinomial(logits, 1), axis=1)\n\n # calculate the log of the probability of selecting the specific\n # actions (pi / a) given states x\n # to do this, use a one_hot on the action index to get a vector\n # with a one in that slot and 0s elsewhere,\n # then dot with logp_all (which we already constructed)\n # to get a the value of the probability of that specific action\n # reduce_sum will give us a tensor which is just a number with this value\n # (or the sum of the log probs of multiple actions, if we used this\n # function to calculate probabilities over a trajectory, ie.\n # x and a/pi both contain several elements, representing different\n # actions to take in different states.\n # in this case, by summing the log probs, we essentially\n # log the product of individual probabilities, ie. finding\n # the log prob of the entire trajectory)\n logp = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logp_all, axis=1)\n logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logp_all, axis=1)\n\n return pi, logp, logp_pi", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.getLegalActions(state)\n if len(actions) == 0:\n return 0.0\n values = [self.getQValue(state, action) for action in actions]\n return max(values)", "def get_state_action_value(self, state, action):\n state_tensor = torch.from_numpy(state).float().to(self.device)\n output = torch.dot(self.weights[action,:],state_tensor)\n return output", "def get_action(self, state):\n state = torch.from_numpy(state).unsqueeze(0).to(self.device, dtype=torch.float)\n action, log_prob, z, mean, log_std = self.sample(state)\n return action[0]", "def get_state_action_value(self, state, action):\n state_tensor = torch.from_numpy(state).float().to(self.device)\n output = torch.dot(self.weights[action,:],state_tensor.view(-1))\n return output", "def tree_distribution(value_dict, tree, order):\n\n # Note list below excludes contexts involving the last variable\n leaves = [n for n in tree.nodes\n if nx.shortest_path_length(tree, \"Root\", n) == len(order)-1]\n # All outcomes must include the possibilities of the last variable\n # which are however excluded in our CStree graph\n cardinalities = [len(value_dict[i]) for i in order]\n tree_distr = DiscreteFactor([\"X\"+str(i) for i in order],\n cardinalities,\n np.ones(np.prod(cardinalities))/np.prod(cardinalities))\n\n # Each stage encodes a distribution for the variable that comes\n # after the variable that represents the node of that stage\n distrs = {}\n\n first_var_outcomes = len(value_dict[order[0]])\n\n first_probabilities = np.random.dirichlet(\n [10*first_var_outcomes if np.random.rand()<0.5\n else 0.5*first_var_outcomes for _ in range(first_var_outcomes)])\n first_probabilities = (1/first_var_outcomes)*np.ones(first_var_outcomes)\n\n # Gathering all probabilities to see if they sum to 1\n prs = []\n\n # For each outcome from the tree\n # TODO This could be done by appending nodes onto\n # previous path instead of generating paths this way\n for leaf in leaves:\n path = nx.shortest_path(tree, \"Root\", leaf)\n for val in value_dict[order[-1]]:\n # Adding last (variable, value) pair to the path\n actual_path = path+[path[-1]+((order[-1],val),)]\n # Probability of first outcome, path[1] is actual first node\n # since first is Root, [0] to access the node itself since\n # it is a tuple of tuples, -1 for the actual value\n pr = first_probabilities[path[1][0][-1]]\n # For each next node, get probabilities according to staging\n # or create them if encountering the staging for first time\n # A stage of a node is determined uniquely by the level of the\n # node in the tree and the context which fixes the stage\n\n # Skipping over first node which is root\n # since that value is taken into pr, skipping last\n # since we need nodes to get non-singleton stages,\n # from which we get the probability values for the\n # level afterwards and the last node is always\n # in the singleton stage\n #for level in range(1,self.p):\n # node = actual_path[level]\n for node in actual_path[1:-1]:\n #print(\"Node is\", node)\n # Next variable and its outcomes\n level = len(node)\n # Below is next variable since Python has 0 indexing\n next_var = order[level]\n\n # Edges coming out of node in level i represent\n # outcomes of variable in level i+1\n outcomes = len(value_dict[next_var])\n #print(\"var and no oucome\", var, outcomes)\n\n #level = len(node)\n\n context = frozenset(tree.nodes[node].get(\"context\",node))\n if distrs.get((level, context), None) is None:\n alpha = [10 if np.random.rand()<0.5\n else 0.5 for _ in range(outcomes)]\n distrs[(level, context)]=np.random.dirichlet(alpha)\n # We need the next outcome value of the path\n # First -1 gets the last node in the current path\n # Next -1 gets the value from that node\n\n next_outcome = actual_path[level+1][-1][-1]\n #print(level, next_outcome, next_var)\n pr = pr*distrs[(level, context)][next_outcome]\n\n\n # Adding probabilities at this level otherwise you miss the last outcome\n actual_leaf = actual_path[-1]\n kwargs = {\"X\"+str(var):val for (var,val) in actual_leaf}\n tree_distr.set_value(pr, **kwargs)\n prs.append(pr)\n\n # Final check to make sure all outcomes sum to 1\n # print(\"All probabilities sum to \",sum(prs))\n return tree_distr", "def __init_probability_functions(self):\n probability_functions = {}\n for state in self.non_terminal_spaces:\n for action in self.action_space:\n resulting_state = state + self.action_space[action]\n if self.__off_grid_move(resulting_state, state):\n key = (state, -1, state, action)\n else:\n key = (resulting_state, -1, state, action)\n probability_functions[key] = 1\n return probability_functions", "def calc_q_values(self, state): \n q_vals = self.q_network.predict(np.swapaxes(state,0,3))\n return q_vals", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n if not self.getLegalActions(state): return 0\n\n best_action = self.computeActionFromQValues(state)\n return self.getQValue(state, best_action)", "def call(self, states):\n dist, mode = self.get_dist_and_mode(states)\n samples = dist.sample()\n log_probs = dist.log_prob(samples)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return mode, samples, log_probs", "def discrete_rv(p):\n u = np.random.uniform()\n cdf = np.cumsum(p)\n j = np.searchsorted(cdf, u)\n return j", "def decision(self, state_space: np.ndarray, action_space: ActionSpace):\n # preprocess state space\n # normalizing state space between zero and one ( 2 is max value of stone and -2 is min value of stone\n state_space = min_max_scaling(state_space)\n state_space = state_space.reshape(1, multiply(*state_space.shape))\n qvalues = self._get_qvalues([state_space])\n decision = self._sample_actions(qvalues, action_space)\n return decision", "def calc_value(self, state: int, action: str):\n (x, y) = self.get_position(state)\n\n if self.environment.is_wall(x, y):\n return -2\n\n self.environment.set_state(x, y)\n if self.environment.is_terminal_state():\n return 0\n\n new_position, reward, _ = self.environment.step(action)\n new_state = self.get_index(*new_position)\n\n return reward + self.discount * self.V[new_state]", "def get_transition_prob(self, state, action, next_state):\n return self.get_next_states(state, action).get(next_state, 0.0)", "def act(self, a_state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.n_actions)\n else:\n action_values = self.model.predict(a_state)\n\n return np.argmax(action_values[0])", "def select_action(self, state):\n\n if state in self.Q:\n prob = self.get_probs(self.Q[state])\n else:\n prob = np.ones(self.nA) / self.nA\n return np.random.choice(np.arange(self.nA), p = prob)", "def ComputeDPPrincipalProjection(data, projection_dims, orders, sigma):\n\n # Normalize each row.\n normalized_data = normalize(data, norm='l2', axis=1)\n covar = np.matmul(np.transpose(normalized_data), normalized_data)\n\n # Since the data is already normalized, there is no need to clip\n # the covariance matrix.\n\n gaussian_noise, rdp_budget = gaussian_rdp(covar.reshape([1,-1]), 1.0, orders, sigma)\n\n saned_covar = covar + gaussian_noise.reshape(covar.shape)\n\n # Symmetrize saned_covar. This also reduces the noise variance.\n saned_covar = 0.5 * (saned_covar + np.transpose(saned_covar))\n\n # Compute the eigen decomposition of the covariance matrix, and\n # return the top projection_dims eigen vectors, represented as columns of\n # the projection matrix.\n eigvals, eigvecs = np.linalg.eig(saned_covar)\n\n topk_indices = eigvals.argsort()[::-1][:projection_dims] \n topk_indices = np.reshape(topk_indices, [projection_dims])\n\n # Gather and return the corresponding eigenvectors.\n return np.transpose(np.take(np.transpose(eigvecs), topk_indices, axis=0)), rdp_budget", "def get_dist_and_mode(self, states):\n out = self.trunk(states)\n mu, log_std = tf.split(out, num_or_size_splits=2, axis=1)\n mode = tf.nn.tanh(mu)\n\n log_std = tf.nn.tanh(log_std)\n assert LOG_STD_MAX > LOG_STD_MIN\n log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1)\n\n std = tf.exp(log_std)\n\n dist = ds.TransformedDistribution(\n distribution=ds.Normal(loc=0., scale=1.),\n bijector=tfp.bijectors.Chain([\n tfp.bijectors.Tanh(),\n tfp.bijectors.Affine(shift=mu, scale_diag=std),\n ]),\n event_shape=[mu.shape[-1]],\n batch_shape=[mu.shape[0]])\n return dist, mode", "def __call__(self, state):\n if random.random() > self._epsilon:\n return self._max_policy(state)\n return random.choice(np.arange(self._action_size))", "def value(self, state):\n value = self.value_model.predict(state)\n return value[0]", "def DCG_p(results, topic, p):\n rel = lambda label: gold_topic_labels[topic][label]\n top_p = results[:p]\n dcg = 0\n for idx, label in enumerate(top_p):\n rank = idx + 1\n if idx == 0:\n dcg += rel(label)\n continue\n dcg += rel(label)/ math.log(rank,2)\n return dcg", "def cumulative_gamma(self, state: EnvState, params: EnvParams) -> float:\n return params.gamma**state.step" ]
[ "0.6413469", "0.5924323", "0.58160555", "0.5754944", "0.57004154", "0.56944776", "0.5661935", "0.56419706", "0.5618396", "0.56179804", "0.5609571", "0.55921984", "0.558994", "0.55720055", "0.55444455", "0.5505725", "0.55023724", "0.5493882", "0.5492896", "0.5434973", "0.54172194", "0.54085505", "0.5405877", "0.540427", "0.53881997", "0.5384611", "0.5365196", "0.53647566", "0.5358731", "0.5355195", "0.53482103", "0.5337819", "0.5333222", "0.5325491", "0.53176653", "0.5298768", "0.5278311", "0.52655244", "0.5255621", "0.52445185", "0.52419704", "0.52401733", "0.5237916", "0.5215133", "0.52109957", "0.52109957", "0.52109957", "0.52109957", "0.52015644", "0.519701", "0.5193921", "0.51904213", "0.5185297", "0.5184562", "0.51840186", "0.5178839", "0.51758504", "0.5158363", "0.51523256", "0.5149464", "0.5143343", "0.51398903", "0.51392764", "0.5130669", "0.5129566", "0.51284707", "0.5123179", "0.5118442", "0.51179737", "0.5107544", "0.5107142", "0.5106681", "0.5104383", "0.51024556", "0.5099858", "0.50946504", "0.50816923", "0.50740546", "0.50713557", "0.50686824", "0.50681883", "0.5067161", "0.50662434", "0.5060877", "0.5059608", "0.5056626", "0.50539654", "0.50522006", "0.50509447", "0.50451756", "0.504335", "0.5041131", "0.5039036", "0.50366914", "0.50333023", "0.50286627", "0.5027836", "0.5024122", "0.5017796", "0.50079465", "0.49994603" ]
0.0
-1
Representation of a circuit as an "Structured script". Structure means that the circuit is provided as a list of layers with 1 qubit operations and 2 qubit operations. That is a circuit of the shape 1Q_layer, [2Q_layer,1Q_layer] x num_layers, RO To be of this shape, the number of 1Q layers need to be one more than the number of 2Q layers.
def __init__(self, lines_1Q, lines_2Q, n_qubits): self.n_qubits = n_qubits # 1Qubit- and 2Qubit-operations layers do not match in size if not((len(lines_2Q)+1) == len(lines_1Q)): raise ValueError( '1Qubit- and 2Qubit-operations layers do not match in size') self.depth = 1+2*len(lines_2Q) self.lines_1Q = lines_1Q self.lines_2Q = lines_2Q
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_circuit_instructions(n_qubits, depth, type_circuit):\n\n if type_circuit in [0, 1, 2]:\n\n # if type_circuit == 1:\n # if depth > 8:\n # raise ValueError(\n # \"For type-1 circuits, only at most depth=8 allowed!\"\n # )\n\n # define rotations for circuit in each layer, 0: identity, 1:X, 2:Y 3:Z\n ini_pauli = np.zeros([depth, n_qubits], dtype=int)\n\n # set first and second layer, rest comes later\n ini_pauli[0, :] = 2 # y rotation\n if depth > 1:\n ini_pauli[1, :] = 3 # z rotation\n\n # construct natural parameterized circuit\n # gives which type of entangling gates at each layer -- first entry is\n # first qubit index, second is second qubit index, third entry is type\n # of entangling gate\n entangling_gate_index_list = [[] for i in range(depth)]\n orderList = []\n for i in range(n_qubits//2):\n if i % 2 == 0:\n orderList.append(i//2)\n else:\n orderList.append((n_qubits-i)//2)\n\n if n_qubits > 1:\n shiftList = [orderList[0]]\n else:\n shiftList = []\n for i in range(1, n_qubits//2):\n shiftList.append(orderList[i])\n shiftList += shiftList[:-1]\n\n # this list gives which entangling gates are applied in which layer\n if type_circuit == 0:\n # deep natural PQC, includes non-nearest neighbor gates\n for j in range(min(len(shiftList), int(np.ceil(depth/2))-1)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif type_circuit == 1:\n # only do 2 entangling layers at max, and only do gates with\n # nearest neighbor and no ring\n for j in range(min(len(shiftList), 3)):\n if j == 0:\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif (j == 1 or j == 2):\n # exclude ring gate and gate 0,1 on third entangling layer\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(1, n_qubits//2)\n ]\n\n elif type_circuit == 2:\n # only do 3 regular entangling layers in a ring topology, then two\n # more phase gates with next-nearst neighbor, which requires one\n # swap. This adds 4 more parameters\n for j in range(min(len(shiftList), 3)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n entangling_gate_index_list[1+2*3] = [\n [n_qubits-1, 1, 0],\n [0, n_qubits-2, 0]\n ]\n\n for i in range(len(entangling_gate_index_list)-1):\n if len(entangling_gate_index_list[i]) > 0:\n for j in range(len(entangling_gate_index_list[i])):\n qubit_index = entangling_gate_index_list[i][j][0]\n ini_pauli[i+1, qubit_index] = 2\n if i+2 < depth:\n ini_pauli[i+2, qubit_index] = 3\n\n elif type_circuit == 3:\n\n ini_pauli = np.ones([depth, n_qubits], dtype=int)*2\n\n for i in range(1, depth, 2):\n ini_pauli[i, :] = 3\n\n if n_qubits % 2 == 0:\n # even qubits ALT circuit needs to get rid of boundary rotations at\n # even entangling layers\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n ini_pauli[i+1, -1] = 0\n else:\n # for odd qubits, get rid of boundary either on top or bottom qubit\n for i in range(2, depth, 4):\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, -1] = 0\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n\n # CNOT entangling gates\n entangling_gate_index_list = [[] for i in range(depth)]\n counter = 0\n # third index indicates type of entangling gate\n for k in range(1, depth-1, 2):\n\n # place entangler every second layer, do not place any at last\n if counter % 2 == 0:\n # even layer\n entangling_gate_index_list[k] = [\n [2*j, 2*j+1, 1] for j in range(n_qubits//2)\n ]\n else:\n # odd layer\n entangling_gate_index_list[k] = [\n [2*j+1, 2*j+2, 1] for j in range((n_qubits-1)//2)\n ]\n counter += 1\n\n else:\n raise ValueError('type_circuit='+f'{type_circuit}'+' not recognised.')\n\n return ini_pauli, entangling_gate_index_list", "def test_construct_subcircuit_layers(self):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def circuit(params):\r\n # section 1\r\n qml.RX(params[0], wires=0)\r\n # section 2\r\n qml.RY(params[1], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 3\r\n qml.RX(params[2], wires=0)\r\n qml.RY(params[3], wires=1)\r\n qml.RZ(params[4], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 4\r\n qml.RX(params[5], wires=0)\r\n qml.RY(params[6], wires=1)\r\n qml.RZ(params[7], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n\r\n params = np.ones([8])\r\n tapes = circuit.metric_tensor(params, only_construct=True)\r\n\r\n # this circuit should split into 4 independent\r\n # sections or layers when constructing subcircuits\r\n assert len(tapes) == 4\r\n\r\n # first layer subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second layer subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # # third layer subcircuit\r\n assert len(tapes[2].operations) == 8\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n assert isinstance(tapes[2].operations[3], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[2].operations[4], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[2].operations[5], qml.PauliZ)\r\n assert isinstance(tapes[2].operations[6], qml.S)\r\n assert isinstance(tapes[2].operations[7], qml.Hadamard)\r\n\r\n # # fourth layer subcircuit\r\n assert len(tapes[3].operations) == 13\r\n assert isinstance(tapes[3].operations[0], qml.RX)\r\n assert isinstance(tapes[3].operations[1], qml.RY)\r\n assert isinstance(tapes[3].operations[2], qml.CNOT)\r\n assert isinstance(tapes[3].operations[3], qml.CNOT)\r\n assert isinstance(tapes[3].operations[4], qml.RX)\r\n assert isinstance(tapes[3].operations[5], qml.RY)\r\n assert isinstance(tapes[3].operations[6], qml.RZ)\r\n assert isinstance(tapes[3].operations[7], qml.CNOT)\r\n assert isinstance(tapes[3].operations[8], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[3].operations[9], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[3].operations[10], qml.PauliZ)\r\n assert isinstance(tapes[3].operations[11], qml.S)\r\n assert isinstance(tapes[3].operations[12], qml.Hadamard)", "def __init__(self, n_qubit:int, copies:int=1,\n rotation_blocks:Optional[Union[str, cirq.Gate, Callable, 'TemplateCircuitBlock',\n List[str],List[cirq.Gate],List[Callable],\n List['TemplateCircuitBlock']]] =None,\n entanglement_blocks:Optional[Union[str, cirq.Gate, Callable, 'TemplateCircuitBlock',\n List[str],List[cirq.Gate],List[Callable],\n List['TemplateCircuitBlock']]] =None,\n entangle_strategy:Optional[Union[str,List[str], Callable[[int,int],List[Tuple[int]]],\n List[Callable[[int,int],List[Tuple[int]]]]]]=None,\n parameter_symbol:str='θ',\n final_rotation_layer:bool=False,\n flatten_circuit:bool=False,\n reuse_param_per_depth:bool=False,\n reuse_param_per_layer:bool=False,\n reuse_param_per_template:bool=False,\n parameter_index:Optional[int]=None,\n parameter_scale=1,\n name:str='ParameterisedCircuit',\n *args, **kwargs):\n super().__init__(n_qubit, name=name, *args, **kwargs)\n self._parameter_symbol = parameter_symbol\n self._parameters = np.array([], dtype=object)\n self._readout_qubit = None\n self._flatten_circuit = flatten_circuit\n self._entangle_strategy = entangle_strategy if entangle_strategy else 'full'\n self._parameter_index = parameter_index\n self._reuse_param_per_depth = reuse_param_per_depth\n self._reuse_param_per_layer = reuse_param_per_layer\n self._reuse_param_per_template = reuse_param_per_template\n self._parameter_scale = parameter_scale \n self.build(rotation_blocks, entanglement_blocks, entangle_strategy, copies,\n final_rotation_layer)", "def logic_program_form(self):\r\n return '% -------------------------------------\\n' +\\\r\n '% Structure ' + self.name + '\\n' +\\\r\n '% -------------------------------------\\n\\n'", "def layer(zs, ps, Ts, qvap, qliq, IFORM=1):\n assert IFORM == 0 or IFORM == 1\n assert len(zs) == 2\n assert len(ps) == 2\n assert len(Ts) == 2\n dz = zs[1] - zs[0]\n assert dz > 0\n pave = 0.5 * sum(ps)\n Tave = 0.5 * sum(Ts)\n Rave = (1-qvap)*Rdry + qvap*Rwat\n ρave = 100*pave / Tave / Rave\n # Calculate column number density of water from specific humidity\n H2O = (qvap # Specific humidity [kg/kg]\n * ρave # Density of water vapor → [kg/m³]\n / 0.018 # 0.018 kg of water is 1 mol → [mol/m³]\n * avogadro # Number density → [molecules/m³]\n * dz # Column number density → [molecules/m²]\n * 1.0e-4 # MonoRTM wants cm² → [molecules/cm²]\n )\n # Cloud amout in mm contained in column\n CLW = (qliq # Specific CLW [kg/kg]\n * ρave # Density of CLW [kg/m³]\n * dz # Column CLW [kg/m²], corresponds to [mm]\n )\n if CLW == 0: CLW = None\n # Broadening gas amount must be given as column density (see __doc__) ↓cm²\n broadening = mixing_ratio_Ar * dz * (pave*100) / Tave / boltzmann * 1.0e-4\n # Give species 1 (H2O), 2 (CO2), 7 (O2) and 22 (N2)\n row1 = [H2O, mixing_ratio_CO2, 0., 0., 0., 0., mixing_ratio_O2]\n row2 = [ 0., 0., 0., 0., 0., 0., 0., 0.]\n row3 = [ 0., 0., 0., 0., 0., 0., mixing_ratio_N2, None]\n # Select Record matching IFORM parameter\n Record211 = Record211_IFORM0 if IFORM == 0 else Record211_IFORM1\n return [Record211(PAVE=pave, TAVE=Tave, ALTZB=zs[0]/1000, PZB=ps[0],\n TZB=Ts[0], ALTZT=zs[1]/1000, PZT=ps[1], TZT=Ts[1],\n CLW=CLW), # z in km\n Record212_first(WKL=row1, WBROADL=broadening),\n Record212_other(WKL=row2),\n Record212_other(WKL=row3)\n ]", "def test_construct_subcircuit(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n def circuit(a, b, c):\r\n qml.RX(a, wires=0)\r\n qml.RY(b, wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.PhaseShift(c, wires=1)\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n tapes = circuit.metric_tensor(1.0, 1.0, 1.0, only_construct=True)\r\n assert len(tapes) == 3\r\n\r\n # first parameter subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second parameter subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # third parameter subcircuit\r\n assert len(tapes[2].operations) == 4\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n # Phase shift generator\r\n assert isinstance(tapes[2].operations[3], qml.QubitUnitary)", "def sample_circuit(self, request):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def non_parametrized_layer(a, b, c):\r\n qml.RX(a, wires=0)\r\n qml.RX(b, wires=1)\r\n qml.RX(c, wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n qml.RZ(a, wires=0)\r\n qml.Hadamard(wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n qml.RZ(b, wires=1)\r\n qml.Hadamard(wires=0)\r\n\r\n a = 0.5\r\n b = 0.1\r\n c = 0.5\r\n\r\n def final(x, y, z, h, g, f):\r\n non_parametrized_layer(a, b, c)\r\n qml.RX(x, wires=0)\r\n qml.RY(y, wires=1)\r\n qml.RZ(z, wires=2)\r\n non_parametrized_layer(a, b, c)\r\n qml.RY(f, wires=1)\r\n qml.RZ(g, wires=2)\r\n qml.RX(h, wires=1)\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2))\r\n\r\n final = qml.QNode(final, dev, diff_method=request.param)\r\n\r\n return dev, final, non_parametrized_layer, a, b, c", "def construct_qcbm(circuit, n_qubits, depth):\n\n for d in range(depth):\n for i in range(n_qubits):\n circuit.append_gate(Gate('X', target = i, angle = np.random.random()*np.pi*2))\n circuit.append_gate(Gate('Z', target = i, angle = np.random.random()*np.pi*2))\n if n_qubits != 1:\n for i in range(n_qubits):\n circuit.append_gate(Gate('CNOT', control = i, target = (i+1)%n_qubits))\n return circuit", "def slabs(self, structure=None):\n layers = np.zeros((2, 5))\n\n # thicknesses\n layers[0, 0] = float(self.thickness_heads)\n layers[1, 0] = float(self.thickness_tails)\n\n # real and imag SLD's\n head_sld_real, tail_sld_real = self.sld_(self.b_heads_real, #real\n self.b_tails_real,\n self.b_mscl_real)\n head_sld_imag, tail_sld_imag = self.sld_(self.b_heads_imag, #imaginary\n self.b_tails_imag,\n self.b_mscl_imag)\n layers[0, 1] = head_sld_real\n layers[0, 2] = head_sld_imag\n\n layers[1, 1] = tail_sld_real\n layers[1, 2] = tail_sld_imag\n\n # roughnesses\n layers[0, 3] = float(self.rough_preceding_mono)\n layers[1, 3] = float(self.rough_head_tail)\n\n # volume fractions\n # head region\n volfrac = self.vm_head() / (self.apm.value *\n self.thickness_heads.value)\n layers[0, 4] = 1 - volfrac\n if self.head_solvent is not None:\n # we do the solvation here, not in Structure.slabs\n layers[0] = Structure.overall_sld(layers[0], self.head_solvent)\n layers[0, 4] = 0\n\n # tail region\n volfrac = self.vm_tail() / (self.apm.value *\n self.thickness_tails.value)\n\n layers[1, 4] = 1 - volfrac\n if self.tail_solvent is not None:\n # we do the solvation here, not in Structure.slabs\n layers[1] = Structure.overall_sld(layers[1], self.tail_solvent)\n layers[1, 4] = 0\n\n if self.reverse_monolayer:\n layers = np.flipud(layers)\n layers[:, 3] = layers[::-1, 3]\n\n return layers", "def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))", "def __init__(self,\n name,\n vertices_location,\n connectivity,\n connectivity_ids=None,\n connectivity_label=None,\n connectivity_label_metadata=None,\n connectivity_colormap = None,\n connector_size=2.6,\n global_deselect_alpha=0.2,\n global_select_alpha=1.0,\n skeleton_linewidth=2.0):\n super(Microcircuit, self).__init__( name )\n\n if not connectivity_ids is None:\n self.connectivity_ids = connectivity_ids\n\n if not connectivity_label is None:\n self.connectivity_labels = connectivity_label\n\n if not connectivity_label_metadata is None:\n\n for semanticdict in connectivity_label_metadata:\n # name needs to be based on convention, TODO: best from ontology id rather than string!\n # TODO: use microcircuit convention\n if semanticdict.has_key(\"name\"):\n name = semanticdict[\"name\"]\n if \"skeleton\" in name:\n self.con_skeleton = int(semanticdict[\"value\"])\n elif \"presynaptic\" in name:\n self.con_pre = int(semanticdict[\"value\"])\n elif \"postsynaptic\" in name:\n self.con_post = int(semanticdict[\"value\"])\n\n else:\n # TODO: default\n self.con_skeleton = 1\n self.con_pre = 2\n self.con_post = 3\n\n # selection stores integer ids from connectivity_selectionID\n # when selected\n self.skeleton_selection = []\n\n # use the connectivity labels to extract the connectivity for the skeletons\n self.index_skeleton = np.where(self.connectivity_labels == self.con_skeleton)[0]\n self.index_allpre = np.where(self.connectivity_labels == self.con_pre)[0]\n self.index_allpost = np.where(self.connectivity_labels == self.con_post)[0]\n \n self.vertices = vertices_location\n self.connectivity = connectivity\n\n connectivity_skeleton = self.connectivity[self.index_skeleton]\n self.vertices_skeleton = self.vertices[ connectivity_skeleton.ravel() ]\n \n # we have a simplified connectivity now\n self.connectivity_skeleton = np.array( range(len(self.vertices_skeleton)), dtype = np.uint32 )\n self.connectivity_skeleton = self.connectivity_skeleton.reshape( (len(self.connectivity_skeleton)/2, 2) )\n self.connectivity_ids_skeleton = self.connectivity_ids[ self.index_skeleton ]\n\n # look up the start and end vertex id\n # map these to _skeleton arrays, and further to actor???\n\n # colors for skeletons\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_skeleton ):\n self.connectivity_skeleton_colors = np.repeat(connectivity_colormap[self.con_skeleton], len(self.connectivity_skeleton), axis=0).astype( np.float32 )\n\n ##########\n # Incoming connectors\n ##########\n\n # extract the pre connectivity and create cones\n # store the indices for to be used to create the vector scatter\n # by itself, it represent implicitly the index used to select/deselect the vectors\n if len(self.index_allpre) == 0:\n if DEBUG:\n print \"no presynaptic connection\"\n self.pre_actor = None\n else:\n self.vertices_pre = self.vertices[ connectivity[self.index_allpre].ravel() ]\n self.pre_p1 = self.vertices_pre[::2, :] # data is NOT copied here\n self.pre_p2 = self.vertices_pre[1::2, :]\n pren = len(self.index_allpre)\n r1 = np.ones( pren, dtype = np.float32 ) * connector_size\n r2 = np.zeros( pren, dtype = np.float32 )\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_pre ):\n preval = np.ones( pren, dtype = np.dtype(type(self.con_pre)) ) * self.con_pre\n else:\n preval = None\n self.pre_actor = VectorScatter( \"PreConnector\", self.pre_p1, self.pre_p2, r1, r2, values = preval,\n resolution = 8, colormap = connectivity_colormap )\n # len(self.index_pre) = len(self.pre_p1) = len(preval)\n\n ##########\n # Outgoing connectors\n ##########\n\n # extract the post connectivity and create cones\n if len(self.index_allpost) == 0:\n if DEBUG:\n print \"no postsynaptic connection\"\n self.post_actor = None\n else:\n self.vertices_post = self.vertices[ connectivity[self.index_allpost].ravel() ]\n self.post_p1 = self.vertices_post[::2, :]\n self.post_p2 = self.vertices_post[1::2, :]\n postn = len(self.index_allpost)\n r1 = np.zeros( postn, dtype = np.float32 )\n r2 = np.ones( postn, dtype = np.float32 ) * connector_size\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_post ):\n postval = np.ones( postn, dtype = np.dtype(type(self.con_post)) ) * self.con_post\n else:\n postval = None\n self.post_actor = VectorScatter( \"PostConnector\", self.post_p1, self.post_p2, r1, r2, values = postval,\n resolution = 8, colormap = connectivity_colormap )\n\n ##########\n # Skeletons\n ##########\n self.skeleton = Skeleton( name = \"Polygon Lines\",\n vertices = self.vertices_skeleton,\n connectivity = self.connectivity_skeleton,\n connectivity_colors = self.connectivity_skeleton_colors,\n connectivity_ID = self.connectivity_ids_skeleton,\n linewidth = skeleton_linewidth,\n global_deselect_alpha = global_deselect_alpha,\n global_select_alpha = global_select_alpha )\n\n self.connectivity_skeletononly_ids = None\n self.connectivity_preonly_ids = None\n self.connectivity_postonly_ids = None\n\n self.global_deselect_alpha = global_deselect_alpha\n self.global_select_alpha = global_select_alpha", "def get_qubitops(H, verbose):\n num_nodes = H.shape[0]\n pauli_list = [] \n s = \"\"\n for i in range(num_nodes):\n xp = np.zeros(num_nodes, dtype=np.bool)\n zp = np.zeros(num_nodes, dtype=np.bool)\n zp[i] = True\n pauli_list.append([ H[i, i], Pauli(zp, xp)]) \n s += ' {}*Z[{}]'.format(H[i,i], i)\n for j in range(i):\n if H[i, j] != 0:\n xp = np.zeros(num_nodes, dtype=np.bool)\n zp = np.zeros(num_nodes, dtype=np.bool)\n zp[i] = True\n zp[j] = True\n pauli_list.append([ H[i, j], Pauli(zp, xp)]) \n s += ' + {}*Z[{}]*Z[{}]'.format(H[i,j], i, j) \n if verbose > 0:\n print(s)\n return Operator(paulis=pauli_list)", "def test_build_basic(self):\n # Get the components for a network\n data = array([[0, 1], [1, 0]])\n cdata = CData(data)\n encoder = BinaryEncoding(cdata)\n layer = ProductAnsatz(2)\n measure = Measurement(2, [0])\n\n # Make the network\n qnn = Network([encoder, layer, measure], computer=\"2q-qvm\")\n\n # Build each circuit for the network\n net0 = qnn._build(0)\n net1 = qnn._build(1)\n\n # Check that each circuit is a BaseAnsatz\n self.assertEqual(type(net0), BaseAnsatz)\n self.assertEqual(type(net1), BaseAnsatz)", "def logic_program_form(self):\n #TODO\n # determine step variable\n a_set = set()\n step = self.determineStepVariable()\n s = ''\n # assemble head\n s = s + self.assembleHead(a_set, step)\n # occ\n self.assembleOCC(a_set, step)\n # body\n s = s + self.assembleBody(a_set, step)\n return s", "def r_3(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSaAc = Compound(\"NaHSO4\")\r\n if len(comps) == 1:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaAc = comps[0]\r\n else:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaAc = comps[0]\r\n else:\r\n iSaAc = comps[1]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaAc.formula)\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n if \"iAcNox\" in iAc.comp_type:\r\n return \"\"\r\n (nme, nme_oxs) = iAc_el_oxs(iAc.formula)\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, an, an_oxs))\r\n iOxAc = Compound(iOx_create(nme, nme_oxs))\r\n\r\n react = f\"{iSaAc} -> {iSaNo} + {iOxAc} + H2O\"\r\n else:\r\n iSaNo = Compound(\"Na2SO4\")\r\n iOxAc = Compound(\"SO3\")\r\n if len(comps) == 1:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n (_, (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n (nme, nme_oxs) = iAc_el_oxs(iAc.formula)\r\n iAcOx = Compound(iOx_create(nme, nme_oxs))\r\n elif \"iOx\" in comps[0].comp_type:\r\n iOxAc = comps[0]\r\n (nme, nme_oxs) = iOx_oxs(iOxAc.formula)\r\n iAc = Compound(iAc_el_create(nme, nme_oxs))\r\n (an, an_oxs) = iAc_oxs(iAc.formula)\r\n iSaNo = Compound(iSaNo_create(\"Na\", 1, an, an_oxs))\r\n elif len(comps) == 2:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSa\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n if \"iOx\" in comps[0].comp_type:\r\n iOxAc = comps[0]\r\n elif \"iOx\" in comps[1].comp_type:\r\n iOxAc = comps[1]\r\n else:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n if \"iOx\" in comps[1].comp_type:\r\n iOxAc = comps[1]\r\n else:\r\n iOxAc = comps[2]\r\n elif \"iSa\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n if \"iOx\" in comps[0].comp_type:\r\n iOxAc = comps[0]\r\n else:\r\n iOxAc = comps[1]\r\n else:\r\n iSaNo = comps[0]\r\n if \"iOx\" in comps[1].comp_type:\r\n iOxAc = comps[1]\r\n else:\r\n iOxAc = comps[2]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an_oxs == 1:\r\n return \"\"\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n if \"iAcNox\" in iAc.comp_type:\r\n return \"\"\r\n\r\n (nme1, nme1_oxs) = iAc_el_oxs(iAc.formula)\r\n (nme2, nme2_oxs) = iOx_oxs(iOxAc.formula)\r\n\r\n if (nme1, nme1_oxs) != (nme2, nme2_oxs):\r\n return \"\"\r\n\r\n iSaAc = Compound(iSaAc_create(me, me_oxs, an, an_oxs))\r\n\r\n react = f\"{iSaAc} -> {iSaNo} + {iOxAc} + H2O\"\r\n\r\n return Reaction(react, \"t \")", "def logic_program_form(self):\r\n #TODO\r\n # determine step variable\r\n a_set = set()\r\n step = self.determineStepVariable()\r\n s = ''\r\n # assemble head\r\n s = s + self.assembleHead(a_set, step)\r\n # occ\r\n self.assembleOCC(a_set, step)\r\n # body\r\n s = s + self.assembleBody(a_set, step)\r\n return s", "def QCNN_layer_gen(self):\n pixels = self.filter_size**2\n # filter size: 2^n only for this version!\n if np.log2(pixels) % 1 != 0:\n raise NotImplementedError(\"filter size: 2^n only available\")\n cirq_qubits = cirq.GridQubit.rect(self.filter_size, self.filter_size)\n # mapping input data to circuit\n input_circuit = cirq.Circuit()\n input_params = [sympy.symbols('a%d' %i) for i in range(pixels)]\n for i, qubit in enumerate(cirq_qubits):\n input_circuit.append(cirq.rx(np.pi*input_params[i])(qubit))\n # apply learnable gate set to QCNN circuit\n QCNN_circuit = cirq.Circuit()\n step_size = [2**i for i in range(np.log2(pixels).astype(np.int32))]\n for step in step_size:\n for target in range(0, pixels, 2*step):\n QCNN_circuit.append(self._QConv(step, target, cirq_qubits))\n # merge the circuits\n full_circuit = cirq.Circuit()\n full_circuit.append(input_circuit)\n full_circuit.append(QCNN_circuit)\n self.circuit = full_circuit # save circuit to the QCNN layer obj.\n self.params = input_params + self.learning_params\n self.op = cirq.Z(cirq_qubits[0])", "def construct_circuit(self, parameters, q=None):\n if len(parameters) != self._num_parameters:\n raise ValueError('The number of parameters has to be {}'.format(self._num_parameters))\n\n if q is None:\n q = QuantumRegister(self._num_qubits, name='q')\n if self._initial_state is not None:\n circuit = self._initial_state.construct_circuit('circuit', q)\n else:\n circuit = QuantumCircuit(q)\n \n for b in range(self._depth):\n for i in range(len(self._h_list)):\n if not self._h_list[i].is_empty():\n circuit+=self._h_list[i].evolve(evo_time=parameters[i], quantum_registers=q)\n \n for i in range(len(self._h_list)-1,-1,-1):\n if not self._h_list[i].is_empty():\n circuit+=self._h_list[i].evolve(evo_time=parameters[i], quantum_registers=q)\n return circuit", "def logic_program_form(self):\n #TODO\n # determine step variable\n a_set = set()\n step = self.determineStepVariable()\n # occ\n s = self.assembleOCC(a_set, step)\n # body\n s = s + self.assembleBody(a_set, step)\n return s", "def r_3(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe = Compound(\"Zn\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n iSiMe = comps[1]\r\n\r\n me = list(iSiMe.formula.consist.keys())[0].name\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, \"NO3\", 1))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + N2O + H2O\"\r\n else:\r\n iSaNo = Compound(\"Zn(NO3)2\")\r\n if len(comps) == 1:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif len(comps) == 2:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n iSaNo = comps[2]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an != \"NO3\":\r\n return \"\"\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSiMe = Compound(simple(me))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + N2O + H2O\"\r\n\r\n return Reaction(react)", "def compiler(input_ckt:pathlib.Path, design_name:str, pdk_dir:pathlib.Path, flat=0, Debug=False):\n logger.info(\"Starting topology identification...\")\n input_dir = input_ckt.parents[0]\n logger.debug(f\"Reading subckt {input_ckt}\")\n #\n # TODO: flatten should be separate pass\n #\n sp = SpiceParser(input_ckt, design_name, flat, pdk_dir)\n circuit_graphs = sp.sp_parser()\n assert circuit_graphs !=None , f\"No subcircuit with name {design_name} found in spice {input_ckt}\"\n circuit = circuit_graphs[0]\n\n design_setup = read_setup(input_dir / f'{design_name}.setup')\n logger.debug(f\"template parent path: {pathlib.Path(__file__).parent}\")\n lib_path = pathlib.Path(__file__).resolve().parent.parent / 'config' / 'basic_template.sp'\n logger.debug(f\"template library path: {lib_path}\")\n basic_lib = SpiceParser(lib_path)\n library = basic_lib.sp_parser()\n lib_path=pathlib.Path(__file__).resolve().parent.parent / 'config' / 'user_template.sp'\n user_lib = SpiceParser(lib_path)\n library += user_lib.sp_parser()\n library = [HierDictNode(**x, constraints=[], ports_weight={}) for x in library]\n library=sorted(library, key=lambda k: max_connectivity(k.graph), reverse=True)\n\n logger.debug(f\"dont use cells: {design_setup['DONT_USE_CELLS']}\")\n logger.debug(f\"all library elements: {[ele['name'] for ele in library]}\")\n if len(design_setup['DONT_USE_CELLS'])>0:\n library=[lib_ele for lib_ele in library if lib_ele['name'] not in design_setup['DONT_USE_CELLS']]\n #read lef to not write those modules as macros\n lef_path = pathlib.Path(__file__).resolve().parent.parent / 'config'\n all_lef = read_lef(lef_path)\n logger.debug(f\"Available library cells: {', '.join(all_lef)}\")\n\n if Debug==True:\n _write_circuit_graph(circuit[\"name\"], circuit[\"graph\"],\n \"./circuit_graphs/\")\n for lib_circuit in library:\n _write_circuit_graph(lib_circuit[\"name\"], lib_circuit[\"graph\"],\n \"./circuit_graphs/\")\n #Converting graph to dict\n const_parse = ConstraintParser(pdk_dir, input_dir)\n create_data = CreateDatabase(circuit[\"graph\"], const_parse)\n hier_graph_dict = create_data.read_inputs(circuit[\"name\"])\n logger.debug(\"START preprocessing\")\n stacked_subcircuit=[]\n\n #\n # TODO: Re-implement stacked transistor detection using new passes\n #\n for circuit_name, circuit in hier_graph_dict.items():\n logger.debug(f\"preprocessing circuit name: {circuit_name}\")\n G1 = circuit[\"graph\"]\n if circuit_name not in design_setup['DIGITAL']:\n define_SD(circuit,design_setup['POWER'],design_setup['GND'], design_setup['CLOCK'])\n stacked_subcircuit.append(preprocess_stack_parallel(hier_graph_dict,circuit_name,G1))\n for circuit_name in stacked_subcircuit:\n if circuit_name in hier_graph_dict.keys() and circuit_name is not design_name:\n logger.debug(f\"removing stacked subcircuit {circuit_name}\")\n del hier_graph_dict[circuit_name]\n #\n # TODO: pg_pins should be marked using constraints. Not manipulating netlist\n #\n logger.debug(\"Modifying pg pins in design for PnR\")\n pg_pins = design_setup['POWER']+design_setup['GND']\n remove_pg_pins(hier_graph_dict,design_name, pg_pins)\n\n logger.debug( \"\\n################### FINAL CIRCUIT AFTER preprocessing #################### \\n\")\n for circuit in hier_graph_dict.values():\n for node in circuit[\"graph\"].nodes(data=True):\n if node[1][\"inst_type\"]!='net':\n logger.debug(node)\n\n annotate = Annotate(hier_graph_dict, design_setup, library, all_lef)\n annotate.annotate()\n return hier_graph_dict", "def variational_circuit(params):\n non_parametrized_layer()\n qml.RX(params[0], wires=0)\n qml.RY(params[1], wires=1)\n qml.RZ(params[2], wires=2)\n non_parametrized_layer()\n qml.RX(params[3], wires=0)\n qml.RY(params[4], wires=1)\n qml.RZ(params[5], wires=2)", "def circuit(self):\n return jet.Circuit(num_wires=4, dim=2)", "def __str__(self):\n rep=\"This system has \"+str(self.NL)+\" layers.\\n\"\n rep+=\"The parameters for the each layers are:\\n\"\n for i in range(self.NL-1):\n rep+=\"Layer no. \"+str(i)+\":\\t \"+str(self.layers[i])\n rep+=\"Coupled to the next layer with strength:\\t\"+str(self.couplings[i])+\"\\n\"\n rep+=\"Layer no. \"+str(self.NL-1)+\":\\t \"+str(self.layers[self.NL-1])\n \n return rep", "def to_circuit(self):\n from qiskit.synthesis.cnotdihedral import synth_cnotdihedral_full\n\n return synth_cnotdihedral_full(self)", "def build_topology(self):\n# errstr = \"build_topology() is not implemented.\\n\"\n# errstr += textwrap.dedent(self.build_topology.__doc__)\n# raise NotImplementedError(errstr)\n pass # May be a 1-compartment neuron. No need to abstract. ", "def logic_program_form(self):\n #TODO\n # determine step variable\n a_set = set()\n step = self.determineStepVariable()\n s = ''\n # assemble head\n s = s + self.assembleHead(a_set, step)\n # body\n s = s + self.assembleBody(a_set, step)\n return s", "def __init__(\n self,\n data: CNOTDihedral | QuantumCircuit | Instruction | None = None,\n num_qubits: int | None = None,\n validate: bool = True,\n ):\n\n if num_qubits:\n # initialize n-qubit identity\n self._num_qubits = num_qubits\n # phase polynomial\n self.poly = SpecialPolynomial(self._num_qubits)\n # n x n invertible matrix over Z_2\n self.linear = np.eye(self._num_qubits, dtype=np.int8)\n # binary shift, n coefficients in Z_2\n self.shift = np.zeros(self._num_qubits, dtype=np.int8)\n\n # Initialize from another CNOTDihedral by sharing the underlying\n # poly, linear and shift\n elif isinstance(data, CNOTDihedral):\n self.linear = data.linear\n self.shift = data.shift\n self.poly = data.poly\n\n # Initialize from ScalarOp as N-qubit identity discarding any global phase\n elif isinstance(data, ScalarOp):\n if not data.is_unitary() or set(data._input_dims) != {2} or data.num_qubits is None:\n raise QiskitError(\"Can only initialize from N-qubit identity ScalarOp.\")\n self._num_qubits = data.num_qubits\n # phase polynomial\n self.poly = SpecialPolynomial(self._num_qubits)\n # n x n invertible matrix over Z_2\n self.linear = np.eye(self._num_qubits, dtype=np.int8)\n # binary shift, n coefficients in Z_2\n self.shift = np.zeros(self._num_qubits, dtype=np.int8)\n\n # Initialize from a QuantumCircuit or Instruction object\n elif isinstance(data, (QuantumCircuit, Instruction)):\n self._num_qubits = data.num_qubits\n elem = self._from_circuit(data)\n self.poly = elem.poly\n self.linear = elem.linear\n self.shift = elem.shift\n\n elif isinstance(data, Pauli):\n self._num_qubits = data.num_qubits\n elem = self._from_circuit(data.to_instruction())\n self.poly = elem.poly\n self.linear = elem.linear\n self.shift = elem.shift\n\n else:\n raise QiskitError(\"Invalid input type for CNOTDihedral class.\")\n\n # Initialize BaseOperator\n super().__init__(num_qubits=self._num_qubits)\n\n # Validate the CNOTDihedral element\n if validate and not self._is_valid():\n raise QiskitError(\"Invalid CNOTDihedral element.\")", "def make_natural_parameterised_circuit(\n n_qubits,\n depth,\n n_features,\n type_circuit=0,\n initial_angles='natural',\n random_seed=None,\n):\n if (n_qubits % 2 == 1) and (type_circuit in [0, 1, 2]):\n raise ValueError(\n 'Requested circuit setup, n_qubits='+f'{n_qubits}'\n + ', type_circuit='+f'{type_circuit}'+' not allowed. Circuit types'\n + ' 0, 1, 2 only support even numbers of qubits.'\n )\n\n # random generator used\n rng = np.random.default_rng(random_seed)\n\n # define angles for circuit\n if initial_angles == 'natural':\n # note that not all angles are actually used, the ones where\n # ini_pauli=0 are ignored\n ini_angles = _make_natural_angles(n_qubits, depth)\n elif initial_angles == 'random':\n ini_angles = rng.random([depth, n_qubits])*2*np.pi\n elif initial_angles == 'zeros':\n ini_angles = np.zeros([depth, n_qubits])\n else:\n raise ValueError(\n 'Invalid option for initial_angles: '+f'{initial_angles}'\n + ', please choose \"natural\", \"random\" or \"zeros\".'\n )\n\n # get circuit instructions\n ini_pauli, entangling_gate_index_list = _make_circuit_instructions(\n n_qubits, depth, type_circuit)\n\n # make circuit\n circuit = QuantumCircuit(n_qubits)\n parameters = []\n for j in range(depth):\n for k in range(n_qubits):\n type_pauli = ini_pauli[j][k]\n if type_pauli != 0:\n\n angle = ini_angles[j][k]\n if len(parameters) < n_features:\n new_param = Parameter('R'+str(len(parameters)))\n parameters.append(new_param)\n angle += new_param\n\n if type_pauli == 1:\n circuit.rx(angle, k)\n elif type_pauli == 2:\n circuit.ry(angle, k)\n elif type_pauli == 3:\n circuit.rz(angle, k)\n else:\n raise ValueError(\n 'rotation gate type: '+f'{type_pauli}'\n + ', not recognised.'\n )\n\n if len(entangling_gate_index_list[j]) > 0:\n for gate_indices in entangling_gate_index_list[j]:\n if gate_indices[2] == 0:\n # entangling gates of nPQC, pi/2 y rotation on control\n # qubit, followed by CPHASE\n circuit.ry(np.pi/2, gate_indices[0])\n circuit.cz(gate_indices[0], gate_indices[1])\n elif gate_indices[2] == 1:\n # CNOT\n circuit.cnot(gate_indices[0], gate_indices[1])\n else:\n raise ValueError(\n 'entangling gate type: '+f'{gate_indices[2]}'\n + ', not recognised.'\n )\n\n if len(parameters) < n_features:\n raise ValueError(\n 'Circuit did not have enough gates ('+f'{len(parameters)}'\n + ' params) to encode requested number of'+' features: '\n + f'{n_features}'+'.'\n )\n\n return circuit, parameters", "def make_circuit(A, b, num_clock_qubits):\n \n # save smaller circuit example for display\n global QC_, U_, UI_, QFT_, QFTI_, HP_, INVROT_\n\n # read in number of qubits\n N = len(A)\n n = int(np.log2(N))\n n_t = num_clock_qubits # number of qubits in clock register\n \n # lower bound on eigenvalues of A. Fixed for now\n C = 1/4\n \n ''' Define sets of qubits for this algorithm '''\n \n # create 'input' quantum and classical measurement register\n qr = QuantumRegister(n, name='input')\n qr_b = QuantumRegister(n, name='in_anc') # ancillas for Hamiltonian simulation (?)\n cr = ClassicalRegister(n)\n \n # create 'clock' quantum register\n qr_t = QuantumRegister(n_t, name='clock') # for phase estimation\n \n # create 'ancilla' quantum and classical measurement register\n qr_a = QuantumRegister(1, name='ancilla') # ancilla qubit\n cr_a = ClassicalRegister(1)\n \n # create the top-level HHL circuit, with all the registers\n qc = QuantumCircuit(qr, qr_b, qr_t, qr_a, cr, cr_a)\n\n ''' Initialize the input and clock qubits '''\n \n # initialize the |b> state - the 'input'\n qc = initialize_state(qc, qr, b)\n \n #qc.barrier()\n\n # Hadamard the phase estimation register - the 'clock'\n for q in range(n_t):\n qc.h(qr_t[q])\n\n qc.barrier()\n \n ''' Perform Quantum Phase Estimation on input (b), clock, and ancilla '''\n \n # perform controlled e^(i*A*t)\n for q in range(n_t):\n control = qr_t[q]\n anc = qr_a[0]\n phase = -(2*pi)*2**q \n qc_u = shs.control_Ham_sim(n, A, phase)\n if phase <= 0:\n qc_u.name = \"e^{\" + str(q) + \"iAt}\"\n else:\n qc_u.name = \"e^{-\" + str(q) + \"iAt}\"\n if U_ == None:\n U_ = qc_u\n qc.append(qc_u, qr[0:len(qr)] + qr_b[0:len(qr_b)] + [control] + [anc])\n\n qc.barrier()\n \n ''' Perform Inverse Quantum Fourier Transform on clock qubits '''\n \n #qc = IQFT(qc, qr_t)\n \n qc_qfti = inv_qft_gate(n_t, method=2)\n qc.append(qc_qfti, qr_t)\n\n if QFTI_ == None:\n QFTI_ = qc_qfti\n \n qc.barrier()\n \n ''' Perform inverse rotation with ancilla '''\n \n # reset ancilla\n qc.reset(qr_a[0])\n \n # compute angles for inversion rotations\n alpha = [2*np.arcsin(C)]\n for x in range(1,2**n_t):\n x_bin_rev = np.binary_repr(x, width=n_t)[::-1]\n lam = int(x_bin_rev,2)/(2**n_t)\n if lam < C:\n alpha.append(0)\n elif lam >= C:\n alpha.append(2*np.arcsin(C/lam))\n theta = ucr.alpha2theta(alpha)\n \n # do inversion step\n\n qc_invrot = ucr.uniformly_controlled_rot(n_t, theta)\n qc.append(qc_invrot, qr_t[0:len(qr_t)] + [qr_a[0]])\n \n if INVROT_ == None:\n INVROT_ = qc_invrot\n \n # and measure ancilla\n \n qc.measure(qr_a[0], cr_a[0])\n qc.reset(qr_a[0])\n\n qc.barrier()\n \n ''' Perform Quantum Fourier Transform on clock qubits '''\n \n #qc = QFT(qc, qr_t)\n \n qc_qft = qft_gate(n_t, method=2)\n qc.append(qc_qft, qr_t)\n\n if QFT_ == None:\n QFT_ = qc_qft\n \n qc.barrier()\n \n ''' Perform Inverse Quantum Phase Estimation on input (b), clock, and ancilla '''\n \n # uncompute phase estimation\n # perform controlled e^(-i*A*t)\n for q in reversed(range(n_t)):\n control = qr_t[q]\n phase = (2*pi)*2**q \n qc_ui = shs.control_Ham_sim(n, A, phase)\n if phase <= 0:\n qc_ui.name = \"e^{\" + str(q) + \"iAt}\"\n else:\n qc_ui.name = \"e^{-\" + str(q) + \"iAt}\"\n if UI_ == None:\n UI_ = qc_ui\n qc.append(qc_ui, qr[0:len(qr)] + qr_b[0:len(qr_b)] + [control] + [anc])\n\n qc.barrier()\n \n # Hadamard (again) the phase estimation register - the 'clock'\n for q in range(n_t):\n qc.h(qr_t[q])\n \n qc.barrier()\n \n ''' Perform final measurements '''\n \n # measure ancilla and main register\n qc.measure(qr[0:], cr[0:])\n\n if QC_ == None:\n QC_ = qc\n #print(f\"... made circuit = \\n{QC_}\")\n\n return qc", "def build_model(self):\n\n input_placeholder = Input(shape = self.input_shape)\n x = ZeroPadding2D((3, 3))(input_placeholder)\n\n # Stage 1\n x = self.main_path_block(x, 64, (7, 7), 'valid', 'conv1', 'bn_conv1', 'relu', (2, 2))\n x = MaxPooling2D((3, 3), strides = (2, 2))(x)\n\n # Stage 2\n x = self.convolutional_block(x, 3, [64, 64, 256], 2, 'a', 1)\n x = self.identity_block(x, 3, [64, 64, 256], 2, 'b')\n x = self.identity_block(x, 3, [64, 64, 256], 2, 'c')\n\n # Stage 3\n x = self.convolutional_block(x, 3, [128, 128, 512], 3, 'a', 2)\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'b')\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'c')\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'd')\n\n # Stage 4\n x = self.convolutional_block(x, 3, [256, 256, 1024], 4, 'a', 2)\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'b')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'c')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'd')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'e')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'f')\n\n # Stage 5\n x = self.convolutional_block(x, 3, [512, 512, 2048], 5, 'a', 2)\n x = self.identity_block(x, 3, [512, 512, 2048], 5, 'b')\n x = self.identity_block(x, 3, [512, 512, 2048], 5, 'c')\n \n # Average Pooling Layer\n x = AveragePooling2D((2, 2), name = 'avg_pool')(x)\n \n # Fully Connected Layer\n x = Flatten()(x)\n x = Dense(\n self.classes,\n activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet50')", "def build_reactive_complex(self, settings_manager: SettingsManager):\n import scine_database as db\n import scine_utilities as utils\n\n start_structure_ids = self._calculation.get_structures()\n start_structures = [db.Structure(sid, self._structures) for sid in start_structure_ids]\n self.save_initial_graphs_and_charges(settings_manager, start_structures)\n if len(start_structures) == 1:\n # For an intramolecular structure it is sufficient to provide one\n # structure that is both, start structure and reactive complex\n structure = start_structures[0]\n atoms = structure.get_atoms()\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n if len(start_structures) == 2:\n # Intermolecular reactions reactions require in situ generation of the reactive complex\n s0 = start_structures[0]\n s1 = start_structures[1]\n\n # Get coordinates\n atoms1 = s0.get_atoms()\n atoms2 = s1.get_atoms()\n elements1 = atoms1.elements\n elements2 = atoms2.elements\n coordinates1 = atoms1.positions\n coordinates2 = atoms2.positions\n # Calculate reactive center mean position\n if self.exploration_key + \"_lhs_list\" in self.settings[self.exploration_key]:\n sites1 = self.settings[self.exploration_key][self.exploration_key + \"_lhs_list\"]\n sites2 = self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"]\n self.settings[self.exploration_key][self.exploration_key + \"_rhs_list\"] = list(\n idx + len(elements1) for idx in sites2\n )\n elif \"nt_associations\" in self.settings[self.exploration_key]:\n sites1 = []\n sites2 = []\n nAtoms1 = len(atoms1.elements)\n for i in range(0, len(self.settings[self.exploration_key][\"nt_associations\"]), 2):\n at1 = self.settings[self.exploration_key][\"nt_associations\"][i]\n at2 = self.settings[self.exploration_key][\"nt_associations\"][i + 1]\n if at1 >= nAtoms1 > at2:\n sites1.append(at2)\n sites2.append(at1 - nAtoms1)\n if at2 >= nAtoms1 > at1:\n sites1.append(at1)\n sites2.append(at2 - nAtoms1)\n else:\n self.raise_named_exception(\n \"Reactive complex can not be build: missing reactive atoms list(s).\"\n )\n reactive_center1 = np.mean(coordinates1[sites1], axis=0)\n reactive_center2 = np.mean(coordinates2[sites2], axis=0)\n # Place reactive center mean position into origin\n coord1 = coordinates1 - reactive_center1\n coord2 = coordinates2 - reactive_center2\n positions = self._orient_coordinates(coord1, coord2)\n atoms = utils.AtomCollection(elements1 + elements2, positions)\n self.random_displace_atoms(atoms, self.settings[self.rc_key][\"displacement\"]) # breaks symmetry\n return atoms\n\n # should not be reachable\n self.raise_named_exception(\n \"Reactive complexes built from more than 2 structures are not supported.\"\n )", "def r_1(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSaAc = Compound(\"NaHCO3\")\r\n iBa = Compound(\"NaOH\")\r\n if len(comps) == 1:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaAc = comps[0]\r\n ((me, me_oxs), _) = iSa_oxs(iSaAc.formula)\r\n iBa = Compound(iBa_create(me, me_oxs))\r\n else:\r\n iBa = comps[0]\r\n (me, me_oxs) = iBa_oxs(iBa.formula)\r\n iSaAc = Compound(iSaAc_create(me, me_oxs, \"CO3\", 2))\r\n else:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaAc = comps[0]\r\n iBa = comps[1]\r\n else:\r\n iSaAc = comps[1]\r\n iBa = comps[0]\r\n\r\n ((me1, me1_oxs), (an, an_oxs)) = iSa_oxs(iSaAc.formula)\r\n (me2, me2_oxs) = iBa_oxs(iBa.formula)\r\n if (me1, me1_oxs) != (me2, me2_oxs):\r\n return \"\"\r\n\n iSaNo = Compound(iSaNo_create(me1, me1_oxs, an, an_oxs))\r\n\r\n react = f\"{iSaAc} + {iBa} -> {iSaNo} + H2O\"\r\n else:\r\n iSaNo = Compound(\"Na2CO3\")\r\n if len(comps) == 1:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n else:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n else:\r\n iSaNo = comps[1]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an_oxs == 1:\r\n return \"\"\r\n\r\n iSaAc = Compound(iSaAc_create(me, me_oxs, an, an_oxs))\r\n iBa = Compound(iBa_create(me, me_oxs))\r\n\r\n react = f\"{iSaAc} + {iBa} -> {iSaNo} + H2O\"\r\n\n return Reaction(react)", "def test_constructor(self, circuit):\n assert list(circuit.wires) == [jet.Wire(i, 0, False) for i in range(4)]\n assert list(circuit.operations) == [jet.Operation(jet.Qubit(), [i]) for i in range(4)]", "def test_serialize_circuit_no_rotations(self):\n dev = QeQiskitDevice(wires=1, shots=1000, backend=\"statevector_simulator\", analytic=True)\n\n def circuit():\n qml.Hadamard(wires=[0])\n return qml.expval(qml.Hadamard(0))\n\n qnode = qml.QNode(circuit, dev)\n qnode._construct([], {})\n\n qasm = dev.serialize_circuit(qnode.circuit)\n expected = 'OPENQASM 2.0;\\ninclude \"qelib1.inc\";\\nqreg q[1];\\ncreg c[1];\\nh q[0];\\n'\n assert qasm == expected", "def get_simulation(\n component: ComponentOrFactory,\n port_extension: Optional[float] = 4.0,\n layer_stack: LayerStack = LAYER_STACK,\n thickness_pml: float = 1.0,\n xmargin: float = 0,\n ymargin: float = 0,\n xmargin_left: float = 0,\n xmargin_right: float = 0,\n ymargin_top: float = 0,\n ymargin_bot: float = 0,\n zmargin: float = 1.0,\n clad_material: str = \"sio2\",\n port_source_name: str = \"o1\",\n port_margin: float = 0.5,\n port_source_offset: float = 0.1,\n distance_source_to_monitors: float = 0.2,\n resolution: float = 50,\n wavelength_start: float = 1.50,\n wavelength_stop: float = 1.60,\n wavelength_points: int = 50,\n plot_modes: bool = False,\n num_modes: int = 2,\n run_time_ps: float = 10.0,\n dispersive: bool = False,\n material_name_to_tidy3d_index: Dict[str, float] = MATERIAL_NAME_TO_TIDY3D_INDEX,\n material_name_to_tidy3d_name: Dict[str, str] = MATERIAL_NAME_TO_TIDY3D_NAME,\n is_3d: bool = True,\n with_all_monitors: bool = False,\n) -> td.Simulation:\n component = component() if callable(component) else component\n assert isinstance(component, Component)\n\n layer_to_thickness = layer_stack.get_layer_to_thickness()\n layer_to_material = layer_stack.get_layer_to_material()\n layer_to_zmin = layer_stack.get_layer_to_zmin()\n # layer_to_sidewall_angle = layer_stack.get_layer_to_sidewall_angle()\n\n if dispersive:\n material_name_to_tidy3d = material_name_to_tidy3d_name\n else:\n material_name_to_tidy3d = material_name_to_tidy3d_index\n\n assert isinstance(\n component, Component\n ), f\"component needs to be a gf.Component, got Type {type(component)}\"\n if port_source_name not in component.ports:\n warnings.warn(\n f\"port_source_name={port_source_name} not in {component.ports.keys()}\"\n )\n port_source = component.get_ports_list(port_type=\"optical\")[0]\n port_source_name = port_source.name\n warnings.warn(f\"Selecting port_source_name={port_source_name} instead.\")\n\n component_padding = gf.add_padding_container(\n component,\n default=0,\n top=ymargin or ymargin_top,\n bottom=ymargin or ymargin_bot,\n left=xmargin or xmargin_left,\n right=xmargin or xmargin_right,\n )\n component_extended = (\n gf.components.extension.extend_ports(\n component=component_padding, length=port_extension, centered=True\n )\n if port_extension\n else component_padding\n )\n\n gf.show(component_extended)\n component_extended = component_extended.flatten()\n\n component_ref = component_padding.ref()\n component_ref.x = 0\n component_ref.y = 0\n\n clad_material_name_or_index = material_name_to_tidy3d[clad_material]\n clad = td.Structure(\n geometry=td.Box(\n size=(td.inf, td.inf, td.inf),\n center=(0, 0, 0),\n ),\n medium=get_medium(name_or_index=clad_material_name_or_index),\n )\n structures = [clad]\n\n layers_thickness = [\n layer_to_thickness[layer]\n for layer in component.get_layers()\n if layer in layer_to_thickness\n ]\n\n if len(layer_to_thickness) < 1:\n raise ValueError(f\"{component.get_layers()} not in {layer_to_thickness.keys()}\")\n\n t_core = max(layers_thickness)\n cell_thickness = (\n thickness_pml + t_core + thickness_pml + 2 * zmargin\n if is_3d\n else 1 / resolution\n )\n\n sim_size = [\n component_ref.xsize + 2 * thickness_pml,\n component_ref.ysize + 2 * thickness_pml,\n cell_thickness,\n ]\n\n for layer in component.layers:\n if layer in layer_to_thickness and layer in layer_to_material:\n thickness = layer_to_thickness[layer]\n zmin = layer_to_zmin[layer] if is_3d else -td.inf\n zmax = zmin + thickness if is_3d else td.inf\n\n if (\n layer in layer_to_material\n and layer_to_material[layer] in material_name_to_tidy3d\n ):\n name_or_index = material_name_to_tidy3d[layer_to_material[layer]]\n medium = get_medium(name_or_index=name_or_index)\n index = get_index(name_or_index=name_or_index)\n logger.debug(\n f\"Add {layer}, {name_or_index!r}, index = {index:.3f}, \"\n f\"thickness = {thickness}, zmin = {zmin}, zmax = {zmax}\"\n )\n\n polygons = td.PolySlab.from_gds(\n gds_cell=component_extended,\n gds_layer=layer[0],\n gds_dtype=layer[1],\n axis=2,\n slab_bounds=(zmin, zmax),\n )\n\n for polygon in polygons:\n geometry = td.Structure(\n geometry=polygon,\n medium=medium,\n )\n structures.append(geometry)\n elif layer not in layer_to_material:\n logger.debug(f\"Layer {layer} not in {layer_to_material.keys()}\")\n elif layer_to_material[layer] not in material_name_to_tidy3d:\n materials = list(material_name_to_tidy3d.keys())\n logger.debug(f\"material {layer_to_material[layer]} not in {materials}\")\n\n # Add source\n port = component_ref.ports[port_source_name]\n angle = port.orientation\n width = port.width + 2 * port_margin\n size_x = width * abs(np.sin(angle * np.pi / 180))\n size_y = width * abs(np.cos(angle * np.pi / 180))\n size_x = 0 if size_x < 0.001 else size_x\n size_y = 0 if size_y < 0.001 else size_y\n size_z = cell_thickness - 2 * zmargin if is_3d else td.inf\n\n source_size = [size_x, size_y, size_z]\n source_center = port.center.tolist() + [0] # (x, y, z=0)\n\n xy_shifted = move_polar_rad_copy(\n np.array(port.center), angle=angle * np.pi / 180, length=port_source_offset\n )\n source_center_offset = xy_shifted.tolist() + [0] # (x, y, z=0)\n\n wavelengths = np.linspace(wavelength_start, wavelength_stop, wavelength_points)\n freqs = td.constants.C_0 / wavelengths\n freq0 = td.constants.C_0 / np.mean(wavelengths)\n fwidth = freq0 / 10\n\n msource = td.ModeSource(\n size=source_size,\n center=source_center,\n source_time=td.GaussianPulse(freq0=freq0, fwidth=fwidth),\n direction=\"+\",\n )\n\n # Add port monitors\n monitors = {}\n ports = sort_ports_x(sort_ports_y(component_ref.get_ports_list()))\n for port in ports:\n port_name = port.name\n angle = port.orientation\n width = port.width + 2 * port_margin\n size_x = width * abs(np.sin(angle * np.pi / 180))\n size_y = width * abs(np.cos(angle * np.pi / 180))\n size_x = 0 if size_x < 0.001 else size_x\n size_y = 0 if size_y < 0.001 else size_y\n size = (size_x, size_y, size_z)\n\n # if monitor has a source move monitor inwards\n length = -distance_source_to_monitors if port_name == port_source_name else 0\n xy_shifted = move_polar_rad_copy(\n np.array(port.center), angle=angle * np.pi / 180, length=length\n )\n center = xy_shifted.tolist() + [0] # (x, y, z=0)\n\n monitors[port_name] = td.ModeMonitor(\n center=center,\n size=size,\n freqs=freqs,\n mode_spec=td.ModeSpec(num_modes=1),\n name=port.name,\n )\n\n zcenter = (zmax + zmin) / 2 if is_3d else 0\n domain_monitor = td.FieldMonitor(\n center=[0, 0, zcenter],\n size=[sim_size[0], sim_size[1], 0] if is_3d else [td.inf, td.inf, 0],\n freqs=[freq0],\n name=\"field\",\n )\n monitors = list(monitors.values())\n monitors += [domain_monitor] if with_all_monitors else []\n\n sim = td.Simulation(\n size=sim_size,\n grid_size=3 * [1 / resolution],\n structures=structures,\n sources=[msource],\n monitors=monitors,\n run_time=20 * run_time_ps / fwidth,\n pml_layers=3 * [td.PML()] if is_3d else [td.PML(), td.PML(), None],\n )\n\n if plot_modes:\n src_plane = td.Box(center=source_center_offset, size=source_size)\n ms = td.plugins.ModeSolver(simulation=sim, plane=src_plane, freq=freq0)\n mode_spec = td.ModeSpec(num_modes=num_modes)\n modes = ms.solve(mode_spec=mode_spec)\n\n print(\n \"Effective index of computed modes: \",\n \", \".join([f\"{mode.n_eff:1.4f}\" for mode in modes]),\n )\n\n if is_3d:\n fig, axs = plt.subplots(num_modes, 2, figsize=(12, 12))\n else:\n fig, axs = plt.subplots(num_modes, 3, figsize=(12, 12))\n\n for mode_ind in range(num_modes):\n if is_3d:\n abs(modes[mode_ind].field_data.Ey).plot(\n x=\"y\", y=\"z\", cmap=\"magma\", ax=axs[mode_ind, 0]\n )\n abs(modes[mode_ind].field_data.Ez).plot(\n x=\"y\", y=\"z\", cmap=\"magma\", ax=axs[mode_ind, 1]\n )\n else:\n abs(modes[mode_ind].field_data.Ex).plot(ax=axs[mode_ind, 0])\n abs(modes[mode_ind].field_data.Ey).plot(ax=axs[mode_ind, 1])\n abs(modes[mode_ind].field_data.Ez).plot(ax=axs[mode_ind, 2])\n\n axs[mode_ind, 0].set_title(f\"|Ex|: mode_index={mode_ind}\")\n axs[mode_ind, 1].set_title(f\"|Ey|: mode_index={mode_ind}\")\n axs[mode_ind, 2].set_title(f\"|Ez|: mode_index={mode_ind}\")\n\n if is_3d:\n axs[mode_ind, 0].set_aspect(\"equal\")\n axs[mode_ind, 1].set_aspect(\"equal\")\n plt.show()\n return sim", "def produce(self, layout, layers, parameters, cell):\n self._layers = layers\n self.cell = cell\n self._param_values = parameters\n self.layout = layout\n\n\n # cell: layout cell to place the layout\n # LayerSiN: which layer to use\n # r: radius\n # w: waveguide width\n # length units in dbu\n\n from math import pi, cos, sin\n from SiEPIC.utils import arc, arc_to_waveguide, points_per_circle, arc_wg\n\n # fetch the parameters\n dbu = self.layout.dbu\n ly = self.layout\n shapes = self.cell.shapes\n \n LayerSi = self.silayer\n LayerSiN = self.silayer_layer\n LayerPinRecN = ly.layer(self.pinrec)\n LayerDevRecN = ly.layer(self.devrec)\n \n w = int(round(self.wg_width/dbu))\n r = int(round(self.radius/dbu))\n\n # draw the quarter-circle\n x = -r\n y = r\n # layout_arc_wg_dbu(self.cell, LayerSiN, x, y, r, w, 270, 360)\n t = Trans(Trans.R0,x, y)\n self.cell.shapes(LayerSiN).insert(arc_to_waveguide(arc(r, 270, 360), w).transformed(t))\n \n # Create the pins on the waveguides, as short paths:\n from SiEPIC._globals import PIN_LENGTH as pin_length\n \n # Pin on the top side:\n p2 = [Point(0, y-pin_length/2), Point(0, y+pin_length/2)]\n p2c = Point(0, y)\n self.set_p2 = p2c\n self.p2 = p2c\n pin = Path(p2, w)\n shapes(LayerPinRecN).insert(pin)\n t = Trans(Trans.R0, 0, y)\n text = Text (\"pin2\", t)\n shape = shapes(LayerPinRecN).insert(text)\n shape.text_size = 0.4/dbu\n\n # Pin on the left side:\n p1 = [Point(pin_length/2+x,0), Point(-pin_length/2+x,0)]\n p1c = Point(x,0)\n self.set_p1 = p1c\n self.p1 = p1c\n pin = Path(p1, w)\n shapes(LayerPinRecN).insert(pin)\n t = Trans(Trans.R0, x, 0)\n text = Text (\"pin1\", t)\n shape = shapes(LayerPinRecN).insert(text)\n shape.text_size = 0.4/dbu\n\n # Create the device recognition layer -- make it 1 * wg_width away from the waveguides.\n t = Trans(Trans.R0,x, y)\n self.cell.shapes(LayerDevRecN).insert(arc_to_waveguide(arc(r, 270, 360), w*3).transformed(t))\n #layout_arc_wg_dbu(self.cell, LayerDevRecN, x, y, r, w*3, 270, 360)\n\n # Compact model information\n t = Trans(Trans.R0, x+r/10, 0)\n text = Text (\"Lumerical_INTERCONNECT_library=Design kits/EBeam\", t)\n shape = shapes(LayerDevRecN).insert(text)\n shape.text_size = r/100\n t = Trans(Trans.R0, x+r/10, r/4)\n text = Text ('Component=ebeam_bend_1550', t)\n shape = shapes(LayerDevRecN).insert(text)\n shape.text_size = r/100\n t = Trans(Trans.R0, x+r/10, r/2)\n text = Text ('Spice_param:radius=%.3fu wg_width=%.3fu'% (self.radius,self.wg_width), t)\n shape = shapes(LayerDevRecN).insert(text)\n shape.text_size = r/100", "def r_4(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe = Compound(\"Al\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n iSiMe = comps[1]\r\n\r\n me = list(iSiMe.formula.consist.keys())[0].name\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, \"NO3\", 1))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + N2 + H2O\"\r\n else:\r\n iSaNo = Compound(\"Al(NO3)3\")\r\n if len(comps) == 1:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif len(comps) == 2:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n iSaNo = comps[2]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an != \"NO3\":\r\n return \"\"\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSiMe = Compound(simple(me))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + N2 + H2O\"\r\n\r\n return Reaction(react)", "def unitary_builder(qubit_register, circuit): \n \n no_of_qubits = math.log(next(x for x in qubit_register.shape if x != 1), 2)\n qubit_ordering = []\n operations_in_slice = []\n operation_list = None\n for slice in circuit:\n for step in slice[\"operations\"]:\n qubit_ordering.extend(step[1])\n operations_in_slice.extend([step[0]])\n identity_operation_count = int(no_of_qubits - len(qubit_ordering))\n operations_in_slice.extend([qeye(2)] * identity_operation_count)\n qubit_ordering.extend([x for x in range(int(no_of_qubits)) if x not in qubit_ordering])\n operation_slice = tensor(operations_in_slice).permute(qubit_ordering)\n if operation_list is None:\n operation_list = [operation_slice]\n else:\n operation_list.extend([operation_slice])\n qubit_ordering = []\n operations_in_slice = [] \n \n circuit_unitary = reduce((lambda x, y: x * y), operation_list)\n \n return circuit_unitary", "def qchannel_to_qiskit(representation):\n\n rep = representation.representation\n # Find what representation it is.\n # Then create the corresponding matrix and shape it like qiskit is expecting it.\n # Finally, create the qiskit representation from that matrix.\n if rep in (RepresentationType.PTM, RepresentationType.CHOI):\n matri = representation.matrix\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)\n if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):\n final_data = []\n for matri in representation.basis:\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n final_data.append(data)\n if rep == RepresentationType.CHI:\n return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])\n return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])\n if rep == RepresentationType.KRAUS:\n final_data = []\n for matri in representation.kraus_ops:\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n final_data.append(data)\n return Kraus(final_data)\n return None", "def test_dag():\n qr = QuantumRegister(5, 'qr')\n cr = ClassicalRegister(5, 'cr')\n ghz = QuantumCircuit(qr, cr, name='ghz')\n\n ghz.h(qr[2])\n ghz.cx(qr[2], qr[1])\n ghz.cx(qr[1], qr[0])\n ghz.cx(qr[2], qr[3])\n ghz.cx(qr[3], qr[4])\n ghz.draw()\n\n # ghz_dag = circuit_to_dag(ghz)\n\n # print(ghz.width(), ghz_dag.width())", "def _build_compound_model(in_map):\n model_list = []\n for model_name in in_map[MODEL_NAME]:\n model_list.append(_build_single_model(in_map[MODEL_NAME], model_name=model_name))\n\n formula = in_map[EXPRESSION_NAME]\n\n return Spectrum1DRefModelLayer.from_formula(model_list, formula), formula", "def custom_constr(x, qr, inverse, depth):\n qc = QuantumCircuit(qr)\n maxi, mini = max(x), min(x)\n n = x.shape[0]\n #qc_wv = Wavelets(n).construct_circuit(register=qr)\n for _ in range(depth):\n qc.h(qr)\n for i in range(n):\n qc.u2(np.pi*(x[(i+1) % n]-mini)/(maxi-mini), 2*np.pi*(x[i]-mini)/(maxi-mini), qr[i])\n for i in range(n):\n qc.cx(qr[i], qr[(i + 1) % n])\n qc.u2(np.pi*(x[(i+1) % n]-mini)/(maxi-mini),\n ((2*np.pi)**2*(x[i]-mini)*(x[(i+1) % n]-mini)/(maxi-mini)**2) % 2*np.pi,\n qr[(i + 1) % n])\n qc.cx(qr[i], qr[(i + 1) % n])\n #qc = qc + qc_wv\n if inverse:\n return qc.inverse()\n return qc", "def pb_to_circuit(program: PBProgram):\n pbCircuit = program.body.circuit\n\n # Obtain the circuit width\n bit_idxes = set()\n for gate in pbCircuit:\n bit_idxes.update(gate.qRegList)\n width = max(bit_idxes) + 1\n if width <= 0:\n raise Error.ArgumentError(f\"Invalid circuit ({program}) in the program!\\n\"\n \"This circuit is empty and has no qubit.\",\n ModuleErrorCode,\n FileErrorCode, 7)\n\n # Instantiate ``Circuit`` and map gates and measurements to methods in ``Circuit``\n circuit = Circuit(width)\n\n # Warning: In the circuit model, the quantum states are initialized with |0>.\n # While in MBQC model, the quantum states are initialized with |+>.\n # Therefore, each qubit in the MBQC circuit should be operated by a ``Hadamard`` gate in the front.\n for i in range(width):\n circuit.h(i)\n\n for PBGate in pbCircuit:\n op = PBGate.WhichOneof('op')\n # Map ``fixedGate`` (including 'H', 'CX', 'X', 'Y', 'Z', 'S', 'T', 'CZ') to the methods in ``Circuit``\n if op == 'fixedGate':\n fixedGate: PBFixedGate = PBGate.fixedGate\n gateName = PBFixedGate.Name(fixedGate)\n bit_idx = PBGate.qRegList\n if gateName == 'H':\n circuit.h(bit_idx[0])\n elif gateName == 'CX':\n circuit.cnot(bit_idx)\n elif gateName == 'X':\n circuit.x(bit_idx[0])\n elif gateName == 'Y':\n circuit.y(bit_idx[0])\n elif gateName == 'Z':\n circuit.z(bit_idx[0])\n elif gateName == 'S':\n circuit.s(bit_idx[0])\n elif gateName == 'T':\n circuit.t(bit_idx[0])\n elif gateName == 'CZ':\n # CZ [q1, q2] = H [q2] + CNOT [q1, q2] + H [q2]\n circuit.h(bit_idx[1])\n circuit.cnot(bit_idx)\n circuit.h(bit_idx[1])\n else:\n raise Error.ArgumentError(f\"Invalid gate: ({gateName})!\\n\"\n \"Only 'H', 'CX', 'X', 'Y', 'Z', 'S', 'T', 'CZ' are supported as the \"\n \"fixed gates in UBQC in this version.\", ModuleErrorCode,\n FileErrorCode, 8)\n\n # Map ``rotationGate`` (including 'RX', 'RY', 'RZ', 'U') to the methods in ``Circuit``\n elif op == 'rotationGate':\n rotationGate: PBRotationGate = PBGate.rotationGate\n gateName = PBRotationGate.Name(rotationGate)\n bit_idx = PBGate.qRegList\n\n if gateName == 'RX':\n circuit.rx(PBGate.argumentValueList[0], bit_idx[0])\n elif gateName == 'RY':\n circuit.ry(PBGate.argumentValueList[0], bit_idx[0])\n elif gateName == 'RZ':\n circuit.rz(PBGate.argumentValueList[0], bit_idx[0])\n\n # Warning: unitary gate in MBQC has a decomposition form different from the commonly used ``U3`` gate!\n elif gateName == 'U':\n # In circuit model, the ``U3`` gate has a decomposition form of \"Rz Ry Rz\",\n # with angles of \"theta, phi, lamda\", that is:\n # U3(theta, phi, lamda) = Rz(phi) Ry(theta) Rz(lamda)\n angles = PBGate.argumentValueList\n\n # Warning: Sometimes, The angles have only one or two valid parameters!\n # In these cases, set the other parameters to be zeros\n if len(angles) == 1:\n theta1 = angles[0]\n phi1 = 0\n lamda1 = 0\n elif len(angles) == 2:\n theta1 = angles[0]\n phi1 = angles[1]\n lamda1 = 0\n else:\n theta1 = angles[0]\n phi1 = angles[1]\n lamda1 = angles[2]\n u3 = u3_gate(theta1, phi1, lamda1)\n\n # In MBQC model, the unitary gate has a decomposition form of \"Rz Rx Rz\",\n # with angles of \"theta, phi, lamda\", that is:\n # U(theta, phi, lamda) = Rz(phi) Rx(theta) Rz(lamda)\n theta2, phi2, lamda2 = decompose(u3)\n\n circuit.u(theta2, phi2, lamda2, bit_idx[0])\n\n elif op == 'customizedGate':\n raise Error.ArgumentError(f\"Invalid gate type: ({op})!\\n\"\n \"Customized gates are not supported in UBQC in this version.\",\n ModuleErrorCode,\n FileErrorCode, 9)\n\n elif op == 'measure':\n measurement_qubits = set(PBGate.qRegList)\n if measurement_qubits != set(range(width)):\n raise Error.ArgumentError(f\"Invalid measurement qubits: ({measurement_qubits})!\\n\"\n \"All qubits must be measured in UBQC in this version.\",\n ModuleErrorCode,\n FileErrorCode, 10)\n\n for qReg in PBGate.qRegList:\n typeName: PBMeasure = PBMeasure.Type.Name(PBGate.measure.type)\n if typeName == 'Z':\n circuit.measure(qReg)\n else:\n raise Error.ArgumentError(f\"Invalid measurement type: ({typeName})!\\n\"\n \"Only 'Z measurement' is supported as the measurement type \"\n \"in UBQC in this version.\",\n ModuleErrorCode,\n FileErrorCode, 11)\n else:\n raise Error.ArgumentError(f\"Invalid operation: ({op})!\\n\"\n \"This operation is not supported in UBQC in this version.\",\n ModuleErrorCode,\n FileErrorCode, 12)\n\n return circuit", "def __init__(self, \n n_neurons = \"micro\", # else: \"brunel\" or arrays\n C_ab = \"micro\", # else: \"brunel\" or arrays\n area = net.area, # simulation size\n neuron_model = net.neuron_model, # \"iaf_psc_delta\" or \"iaf_psc_exp\"\n connection_rule = net.connection_rule, # \"fixed_total_number\" or \"fixed_indegree\"\n j02 = net.j02, \n weight_rel_sd = net.weight_rel_sd, \n delay_rel_sd = net.delay_rel_sd, \n g = net.g, \n rate_ext = net.rate_ext):\n ###################################################\n ### \tNetwork parameters\t\t### \n ###################################################\n\n # area of network in mm^2; scales numbers of neurons\n # use 1 for the full-size network (77,169 neurons)\n self.area = area\n \n self.layers = net.layers #np.array([\"L23\", \"L4\", \"L5\", \"L6\"])\n self.types = net.types #np.array([\"e\", \"i\"]) \n self.populations = np.array([layer + typus for layer in self.layers for typus in self.types])\n self.n_populations = len(self.populations)\n self.n_layers = len(self.layers)\n self.n_types = len(self.types)\n \n # Neuron numbers\n if n_neurons == \"micro\":\n self.n_neurons = np.int_(net.full_scale_n_neurons * self.area)\n elif n_neurons == \"brunel\":\n # Provide an array of equal number of neurons in each exc./inh. population\n gamma = 0.25\n inh_factor = 1. / (gamma + 1.)\n exc_factor = 1. - inh_factor \n n_total_micro = np.sum(net.full_scale_n_neurons * self.area)\n N_exc = n_total_micro/self.n_populations * exc_factor\n N_inh = n_total_micro/self.n_populations * inh_factor\n self.n_neurons = np.tile([N_exc, N_inh], self.n_layers).astype(int)\n else:\n if type(n_neurons) == np.ndarray:\n if n_neurons.shape == (self.n_populations, ):\n self.n_neurons = np.int_(n_neurons)\n else:\n raise Exception(\"'n_neurons' has wrong shape. \"+\n \"Expects (%i,)\"%self.n_populations)\n else: \n raise Exception(\"'n_neurons' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n self.n_total = np.sum(self.n_neurons)\n\n \n # Synapse numbers\n # Connection probabilities: conn_probs[post, pre] = conn_probs[target, source]\n conn_probs = net.conn_probs\n # Scale synapse numbers of the C_ab\n if net.scale_C_linearly:\n n_outer_full = np.outer(net.full_scale_n_neurons, net.full_scale_n_neurons)\n C_full_scale = np.log(1. - conn_probs) / np.log(1. - 1. / n_outer_full)\n C_scaled = np.int_(C_full_scale * self.area)\n else:\n n_outer = np.outer(self.n_neurons, self.n_neurons)\n C_scaled = np.int_(np.log(1. - conn_probs) / np.log(1. - 1. / n_outer))\n\n self.connection_rule = connection_rule\n if self.connection_rule == \"fixed_total_number\":\n C_ab_micro = C_scaled # total number, do not divide! \n elif self.connection_rule == \"fixed_indegree\":\n C_ab_micro = (C_scaled.T / (net.full_scale_n_neurons * self.area)).T\n else:\n raise Exception(\"Unexpected connection type. Use 'fixed_total_number' for microcircuit \" + \n \"model or 'fixed_indegree' for Brunel's model!\")\n\n if C_ab == \"micro\":\n self.C_ab = C_ab_micro # shall not be integer at this point!\n elif C_ab == \"brunel\":\n C_e = np.mean(C_ab_micro) # mean for microcircuit (= 501 in full scale)\n C_i = gamma * C_e\n self.C_ab = np.tile([C_e, C_i], (self.n_populations, self.n_layers)).astype(int) \n else:\n if type(C_ab) == np.ndarray:\n if C_ab.shape == (self.n_populations, self.n_populations):\n self.C_ab = np.int_(C_ab)\n else:\n raise Exception(\"'C_ab' has wrong shape. \"+\n \"Expects (%i, %i)\"%(self.n_populations, self.n_populations))\n else: \n raise Exception(\"'C_ab' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n\n\n ###################################################\n ### Single-neuron parameters\t\t### \n ###################################################\n self.neuron_model = neuron_model\n self.Vm0_mean = net.Vm0_mean # mean of initial membrane potential (mV)\n self.Vm0_std = net.Vm0_std # std of initial membrane potential (mV)\n self.model_params = net.model_params\n if not self.neuron_model==\"iaf_psc_delta\":\n self.model_params[\"tau_syn_ex\"] = net.tau_syn_ex # excitatory synaptic time constant (ms)\n self.model_params[\"tau_syn_in\"] = net.tau_syn_in # inhibitory synaptic time constant (ms)\n self.tau_syn_ex = net.tau_syn_ex # ms\n self.tau_syn_in = net.tau_syn_in # ms\n self.tau_syn = np.tile([self.tau_syn_ex, self.tau_syn_in], (self.n_populations, self.n_layers))\n # Rescaling for model calculations: these values are not used in the simulation!\n self.tau_m = self.model_params[\"tau_m\"] # ms\n self.t_ref = self.model_params[\"t_ref\"] # ms\n self.E_L = self.model_params[\"E_L\"] # mV\n self.V_r = self.model_params[\"V_reset\"] - self.E_L # mV\n self.theta = self.model_params[\"V_th\"] - self.E_L # mV\n self.C_m = self.model_params[\"C_m\"] # pF\n\n\n ######################################################\n # Synaptic weights. Depend on neuron_model! ##\n ######################################################\n self.g = g\n self.j02 = j02\n\n g_all = np.tile([1., -self.g], (self.n_populations, self.n_layers))\n L23e_index = np.where(self.populations == \"L23e\")[0][0]\n L4e_index = np.where(self.populations == \"L4e\")[0][0]\n g_all[L23e_index, L4e_index] *= self.j02\n \n self.J = net.PSP_e # mv; mean PSP, used as reference PSP\n self.J_ab = self.J * g_all\n self.weight_rel_sd = weight_rel_sd # Standard deviation of weight relative to mean weight\n # Transformation from peak PSP to PSC\n delta_tau = self.tau_syn - self.tau_m\n ratio_tau = self.tau_m / self.tau_syn\n PSC_over_PSP = self.C_m * delta_tau / (self.tau_m * self.tau_syn * \\\n (ratio_tau**(self.tau_m / delta_tau) - ratio_tau**(self.tau_syn / delta_tau))) \n # Actual weights have to be adapted: from peak PSP to PSC (and back...)\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * PSC_over_PSP # neuron populations\n elif self.neuron_model==\"iaf_psc_delta\":\n self.weights = self.J_ab * PSC_over_PSP * (self.tau_syn_ex) / self.C_m\n # This might be an overkill / doing things twice...\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * np.exp(1) / (self.tau_syn_ex) / self.C_m\n else:\n raise Exception(\"Neuron model should be iaf_psc_ - {delta, exp, alpha}!\")\n\n\n ###################################################\n ### Delays and dicts ### \n ###################################################\n # mean dendritic delays for excitatory and inhibitory transmission (ms)\n self.delay_e = net.delay_e # ms, excitatory synapses\n self.delay_i = net.delay_i # ms, inhibitory synapses\n\n self.delays = np.tile([self.delay_e, self.delay_i], (self.n_populations, self.n_layers)) # adapt...\n self.delay_rel_sd = delay_rel_sd \n \n # Synapse dictionaries\n # default connection dictionary\n self.conn_dict = {\"rule\": connection_rule}\n # weight distribution of connections between populations\n self.weight_dict_exc = net.weight_dict_exc\n self.weight_dict_inh = net.weight_dict_inh\n # delay distribution of connections between populations\n self.delay_dict = net.delay_dict\n # default synapse dictionary\n self.syn_dict = net.syn_dict\n \n \n ###################################################\n ### External stimuli ## \n ###################################################\n # rate of background Poisson input at each external input synapse (spikes/s) \n self.rate_ext = rate_ext # Hz \n self.J_ext = net.PSP_ext # external synaptic weight\n self.delay_ext = self.delay_e # ms; mean delay of external input\n self.dc_amplitude = net.dc_amplitude # constant bg amplitude\n self.C_aext = net.C_aext # in-degrees for background input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * np.exp(1) / self.tau_syn_ex / self.C_m\n\n # optional additional thalamic input (Poisson)\n self.n_th = net.n_th # size of thalamic population\n self.th_start = net.th_start # onset of thalamic input (ms)\n self.th_duration = net.th_duration # duration of thalamic input (ms)\n self.th_rate = net.th_rate # rate of thalamic neurons (spikes/s)\n self.J_th = net.PSP_th # mean EPSP amplitude (mV) for thalamic input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * np.exp(1) / self.tau_syn_ex / self.C_m\n\n \n # connection probabilities for thalamic input\n conn_probs_th = net.conn_probs_th\n if net.scale_C_linearly:\n if not self.n_th == 0:\n C_th_full_scale = np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * net.full_scale_n_neurons))\n self.C_th_scaled = np.int_(C_th_full_scale * self.area)\n else:\n if not self.n_th == 0:\n self.C_th_scaled = np.int_(np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * self.n_neurons_micro)))\n if self.n_th == 0:\n self.C_th_scaled = None\n \n # mean delay of thalamic input (ms)\n self.delay_th = net.delay_th\n # standard deviation relative to mean delay of thalamic input\n self.delay_th_rel_sd = net.delay_th_rel_sd\n\n\n ######################################################\n # Predefine matrices for mean field ##\n ######################################################\n if self.neuron_model==\"iaf_psc_delta\":\n self.J_mu = self.weights\n self.J_sd = self.weights\n self.J_mu_ext = self.weight_ext \n self.J_sd_ext = self.weight_ext\n elif self.neuron_model==\"iaf_psc_exp\":\n self.J_mu = self.weights * self.tau_syn / self.C_m\n self.J_sd = self.weights * np.sqrt(self.tau_syn / 2.) / self.C_m\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex / self.C_m\n self.J_sd_ext = self.weight_ext * np.sqrt(self.tau_syn_ex / 2.) / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\":\n self.J_mu = self.weights * self.tau_syn**2 / self.C_m\n self.J_sd = self.weights * self.tau_syn**(3./2.) / (self.C_m * 2.)\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex**2 / self.C_m\n self.J_sd_ext = self.weight_ext * self.tau_syn_ex**(3./2.) / (self.C_m * 2.)\n self.mat_mu = self.tau_m * 1e-3 * self.J_mu * self.C_ab\n self.mu_ext = self.tau_m * 1e-3 * self.J_mu_ext * self.C_aext * self.rate_ext\n self.mat_var = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd**2 * self.C_ab\n self.var_ext = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd_ext**2 * self.C_aext * self.rate_ext", "def pre_defined_circuit(env: 'QEnv', q: List['QRegStorage'], gate_list: List[operatorType]) -> 'QEnv':\n if gate_list:\n for gate in gate_list:\n if gate.bits == 1:\n gate(q[0])\n elif gate.bits == 2:\n gate(q[0], q[1])\n return env", "def surface_template(self, chain):\n output_name = output_folder + \"/surface_output_\" + str(self.input_pdb_path).split(\"/\")[-1][0:-4] + \"_\" + chain\n output_string = 'mol new ' + self.input_pdb_path + '\\n' \\\n + 'set allsel [atomselect top \"all and chain ' + chain + '\"]' + '\\n' \\\n + 'set chain ' + chain + '\\n' \\\n + 'set tot_sasa [dict create ARG 241 TRP 259 TYR 229' \\\n + ' LYS 211 PHE 218 MET 204 GLN 189 HIS 194 HSD 194 HSE 194 HSP 194 GLU' \\\n + ' 183 LEU 180 ILE 182 ASN 158 ASP 151 CYS 140 VAL 160 THR 146 PRO 143 ' \\\n + 'SER 122 ALA 113 GLY 85]' + '\\n' \\\n + 'set residlist [lsort -unique [$allsel get resid]]' + '\\n' \\\n + 'set surf_list [list]' + '\\n' \\\n + 'foreach r $residlist {' + '\\n' \\\n + 'set sel [atomselect top \"resid $r and chain $chain\"]' + '\\n' \\\n + 'set temp_rsasa [measure sasa 1.4 $allsel -restrict $sel]' + '\\n' \\\n + 'set temp_name [lsort -unique [$sel get resname]]' + '\\n' \\\n + 'set temp_id [lsort -unique [$sel get resid]]' + '\\n' \\\n + 'set temp_tot [dict get $tot_sasa $temp_name]' + '\\n' \\\n + 'set rsasa [expr $temp_rsasa/$temp_tot]' + '\\n' \\\n + 'if {$rsasa > 0.2} {lappend surf_list \"$temp_id $temp_name\"}' + '\\n' \\\n + '}' + '\\n' \\\n + 'set fileId [open ' + output_name + ' w]' + '\\n' \\\n + 'puts $fileId $surf_list' + '\\n' \\\n + 'close $fileId' + '\\n' \\\n + 'quit' + '\\n' \\\n + 'exit' + '\\n'\n return output_string", "def buildInterfaceStructure(self, idx = 0, z_1 = 1, z_2 = 1, d = 2.5,\\\n verbose = 1, filename = None, vacuum = 0,\\\n translation = None, surface = None, ab = False):\n\n if filename is None:\n if translation is None:\n filename = \"InterfaceStructure_%s\" % (idx)\n else:\n filename = \"InterfaceStructure_%s_T%s\" % (idx, translation)\n\n \"\"\"Build the selected interface\"\"\"\n base, pos, type_n, mass = self.buildInterface(idx = idx, z_1 = z_1, z_2 = z_2, d = d,\\\n verbose = verbose, vacuum = vacuum,\\\n translation = translation, surface = surface,\\\n ab = ab)\n\n \"\"\"Sort first based on type then Z-position then Y-position\"\"\"\n ls = np.lexsort((pos[:, 1], pos[:, 2], type_n))\n type_n = type_n[ls]\n mass = mass[ls]\n pos = pos[ls]\n\n \"\"\"After sorting, index all positions\"\"\"\n index = np.arange(type_n.shape[0])\n\n \"\"\"Build an Atoms object\"\"\"\n atoms = structure.Structure(cell = base, pos = pos, type_n = type_n, type_i = None,\\\n mass = mass, idx = index, filename = filename, pos_type = \"c\")\n\n \"\"\"Align the first dimension to the x-axis\"\"\"\n atoms.alignStructure(dim = [1, 0, 0], align = [1, 0, 0])\n\n \"\"\"Retrun the structure object\"\"\"\n return atoms", "def construct_circuit(self, mode='circuit', register=None):\n dim = (self._num_qubits + 1)//2\n\n if mode == 'vector':\n state = np.zeros((2**(2*dim - 1)), dtype=np.complex)\n for i in range(2**(dim-1)):\n j1 = i << dim\n j2 = i\n state[j1 + j2] = 1\n state[j1 + j2 + (1 << (dim-1))] = 1j\n return state/np.linalg.norm(state)\n elif mode == 'circuit':\n if register is None:\n register = QuantumRegister(2*dim - 1, name='q')\n quantum_circuit = QuantumCircuit(register)\n\n quantum_circuit.h(list(range(dim - 1, 2*dim - 1)))\n quantum_circuit.s(dim - 1)\n quantum_circuit.cx(list(range(dim, 2*dim - 1)), list(range(dim - 1)))\n\n return quantum_circuit\n else:\n raise AquaError('Mode should be either \"vector\" or \"circuit\"')", "def make_psi_json(mol, label, method, basisset, calctype='opt', mem=None):\n # check that specified calctype is valid\n check_calc(calctype)\n\n inputdict = {}\n moldict = {}\n modeldict = {}\n keydict = {}\n inputdict[\"schema_name\"] = \"qc_schema_input\"\n inputdict[\"schema_version\"] = 1\n\n # specify memory requirements, if defined\n if mem != None:\n inputdict[\"memory\"] = mem\n\n #TODO -- json version\n # charge and multiplicity; multiplicity hardwired to singlet (usually is)\n #inputdict[\"charge\"] = oechem.OENetCharge( mol)\n #inputdict[\"multiplicity\"] = 1\n\n # get atomic symbol and coordinates of each atom\n geom_list = []\n elem_list = []\n xyz = oechem.OEFloatArray(3)\n for atom in mol.GetAtoms():\n mol.GetCoords(atom, xyz)\n geom_list.append(xyz[0])\n geom_list.append(xyz[1])\n geom_list.append(xyz[2])\n elem_list.append(oechem.OEGetAtomicSymbol(atom.GetAtomicNum()))\n moldict[\"geometry\"] = geom_list\n moldict[\"symbols\"] = elem_list\n inputdict[\"molecule\"] = moldict\n\n #TODO -- json version\n ## check if mol has a \"freeze\" tag\n #for x in oechem.OEGetSDDataPairs(mol):\n # if calctype==\"opt\" and \"atoms to freeze\" in x.GetTag():\n # b = x.GetValue()\n # y = b.replace(\"[\", \"\")\n # z = y.replace(\"]\", \"\")\n # a = z.replace(\" \", \"\")\n # freeze_list = a.split(\",\")\n # inputstring += (\"\\n\\nfreeze_list = \\\"\\\"\\\"\\n {} xyz\\n {} xyz\\n {} \"\n # \"xyz\\n {} xyz\\n\\\"\\\"\\\"\".format(freeze_list[0],\n # freeze_list[1], freeze_list[2], freeze_list[3]))\n # inputstring += \"\\nset optking frozen_cartesian $freeze_list\"\n # inputstring += (\"\\nset optking dynamic_level = 1\\nset optking \"\n # \"consecutive_backsteps = 2\\nset optking intrafrag_step_limit = \"\n # \"0.1\\nset optking interfrag_step_limit = 0.1\\n\")\n\n #TODO -- json version\n ## explicitly specify MP2 RI-auxiliary basis for Ahlrichs basis set\n ## http://www.psicode.org/psi4manual/master/basissets_byfamily.html\n ## DFMP2 *should* get MP2 aux sets fine for Pople/Dunning\n ## http://www.psicode.org/psi4manual/master/dfmp2.html\n #if method.lower()=='mp2' and 'def2' in basisset:\n # if basisset.lower()=='def2-sv(p)':\n # inputstring+=('\\nset df_basis_mp2 def2-sv_p_-ri')\n # elif basisset.lower()!='def2-qzvpd': # no aux set for qzvpd 10-6-18\n # inputstring+=('\\nset df_basis_mp2 %s-ri' % (basisset))\n\n modeldict[\"basis\"] = basisset\n modeldict[\"method\"] = method\n inputdict[\"model\"] = modeldict\n\n #inputstring+=('\\nset freeze_core True')\n # specify command for type of calculation\n if calctype == 'opt':\n # TODO\n pass\n elif calctype == 'spe':\n inputdict[\"driver\"] = 'energy'\n elif calctype == 'hess':\n inputdict[\"driver\"] = 'hessian'\n keydict[\"return_wfn\"] = True\n inputdict[\"keywords\"] = keydict\n\n return inputdict", "def r_4(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe1 = Compound(\"Mg\")\r\n iSaAc = Compound(\"Cu(HSO4)2\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe1 = comps[0]\r\n me = list(iSiMe1.formula.consist.keys())[0].name\r\n me_oxs = get_me_oxs(me)\r\n\r\n if me_oxs == 0:\r\n return \"\"\r\n if is_me_activer(\"Cu\", 2, me, me_oxs):\r\n return \"\"\r\n else:\r\n iSaAc = comps[0]\r\n ((me, me_oxs), _) = iSa_oxs(iSaAc.formula)\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe1 = comps[0]\r\n iSaAc = comps[1]\r\n else:\r\n iSiMe1 = comps[1]\r\n iSaAc = comps[0]\r\n\r\n me1 = list(iSiMe1.formula.consist.keys())[0].name\r\n me1_oxs = get_me_oxs(me1)\r\n if me1_oxs == 0:\r\n return \"\"\r\n ((me2, me2_oxs), (an, an_oxs)) = iSa_oxs(iSaAc.formula)\r\n if not is_me_activer(me1, me1_oxs, me2, me2_oxs):\r\n return \"\"\r\n if not is_me_activer(\"Na\", 1, me1, me1_oxs):\r\n return \"\"\r\n\r\n iSiMe2 = Compound(simple(me2))\r\n iSaNo = Compound(iSaNo_create(me1, me1_oxs, an, an_oxs))\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n\r\n react = f\"{iSiMe1} + {iSaAc} -> {iSiMe2} + {iSaNo} + {iAc}\"\r\n else:\r\n iSiMe2 = Compound(\"Cu\")\r\n iSaNo = Compound(\"MgSO4\")\r\n iAc = Compound(\"H2SO4\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe2 = comps[0]\r\n me = list(iSiMe2.formula.consist.keys())[0].name\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n elif \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n if is_me_activer(\"Cu\", 2, me, me_oxs):\r\n return \"\"\r\n if me == \"Cu\":\r\n iSiMe = Compound(\"Ag\")\r\n else:\r\n iAc = comps[0]\r\n (an, an_oxs) = iAc_oxs(iAc.formula)\r\n iSaNo = Compound(iSaNo_create(\"Zn\", 2, an, an_oxs))\r\n elif len(comps) == 2:\r\n for i in range(0, 2):\r\n for j in range(0, 2):\r\n if \"iSi\" in comps[i].comp_type and \"iSa\" in comps[j].comp_type:\r\n iSiMe = comps[i]\r\n iSaNo = comps[j]\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n break\r\n if \"iSi\" in comps[i].comp_type and \"iAc\" in comps[j].comp_type:\r\n iSiMe = comps[i]\r\n iAc = comps[j]\r\n (an, an_oxs) = iAc_oxs(iAc.formula)\r\n iSaNo = Compound(iSaNo_create(\"Mg\", 2, an, an_oxs))\r\n break\r\n if \"iSa\" in comps[i].comp_type and \"iAc\" in comps[j].comp_type:\r\n iSaNo = comps[i]\r\n iAc = comps[j]\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if me == \"Cu\":\r\n iSiMe = Compound(\"Ag\")\r\n break\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe2 = comps[0]\r\n if \"iSa\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n iAc = comps[2]\r\n else:\r\n iSaNo = comps[2]\r\n iAc = comps[1]\r\n elif \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n if \"iSi\" in comps[1].comp_type:\r\n iSiMe2 = comps[1]\r\n iAc = comps[2]\r\n else:\r\n iSiMe2 = comps[2]\r\n iAc = comps[1]\r\n elif \"iAc\" in comps[0].comp_type:\r\n iAc = comps[0]\r\n if \"iSi\" in comps[1].comp_type:\r\n iSiMe2 = comps[1]\r\n iSaNo = comps[2]\r\n else:\r\n iSiMe2 = comps[2]\r\n iSaNo = comps[1]\r\n\r\n me2 = list(iSiMe2.formula.consist.keys())[0].name\r\n me2_oxs = get_me_oxs(me2)\r\n if me2_oxs == 0:\r\n return \"\"\r\n ((me1, me1_oxs), (an1, an1_oxs)) = iSa_oxs(iSaNo.formula)\r\n (an2, an2_oxs) = iAc_oxs(iAc.formula)\r\n if an1 != an2:\r\n return \"\"\r\n if an1_oxs == 1:\r\n return \"\"\r\n if not is_me_activer(me1, me1_oxs, me2, me2_oxs):\r\n return \"\"\r\n if not is_me_activer(\"Na\", 1, me1, me1_oxs):\r\n return \"\"\r\n\r\n iSiMe1 = Compound(simple(me1))\r\n iSaAc = Compound(iSaAc_create(me2, me2_oxs, an1, an1_oxs))\r\n\r\n react = f\"{iSiMe1} + {iSaAc} -> {iSiMe2} + {iSaNo} + {iAc}\"\r\n\r\n return Reaction(react)", "def build(width, height, depth, classes, stages, filters, include_top, pooling,\n reg=1e-3, bnEps=2e-5, bnMom=0.0):\n inputShape = (height, width, depth)\n chanDim = -1\n\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n inputs = Input(shape=inputShape)\n\n\n # block 1 (initial conv block)\n x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)\n x = Conv2D(64, (7,7), use_bias=False, strides=(2,2),\n kernel_initializer=\"he_normal\", kernel_regularizer=l2(reg))(x)\n x = BatchNormalization(axis=chanDim, name=\"bn_conv1\")(x)\n x = Activation(\"relu\")(x)\n x = ZeroPadding2D(padding=((1,1), (1,1)), name=\"pool1_pad\")(x)\n x = MaxPooling2D(3, strides=2)(x)\n\n for i in range(0, len(stages)):\n stride = (1,1) if i == 0 else (2,2) # block 2 (projection block) w stride(1,1)\n\n print(\"Stage {}, Stride={}\".format(i, stride))\n x = SEResNet.residual_module(x, filters[i+1], stride,\n chanDim=chanDim, red=True, bnEps=bnEps, bnMom=bnMom)\n for j in range(0, stages[i] + 1): #stacking res block to each depth layer\n x = SEResNet.residual_module(x, filters[i+1], stride=(1,1),\n chanDim=chanDim, bnEps=bnEps,\n bnMom=bnMom)\n x = BatchNormalization(axis=chanDim, epsilon=bnEps,\n momentum=bnMom)(x)\n x = Activation(\"relu\")(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, use_bias=False, kernel_regularizer=l2(reg),\n activation='softmax')(x)\n else:\n if pooling == 'avg':\n print(\"Adding average pool\")\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n model = Model(inputs=inputs, outputs=x, name=\"SEResNet\")\n return model", "def compile(self):\n m, n = self.input_shape[1], self.input_shape[2]\n\n inp = Input(shape=self.input_shape, traces=True)\n self.add_layer(inp, \"DoG\")\n\n s1 = LIFNodes(shape=(18, m, n), traces=True)\n self.add_layer(s1, \"conv_1\")\n c1 = LIFNodes(shape=(18, m // 2, n // 2), traces=True)\n self.add_layer(c1, \"pool_1\")\n\n s2 = LIFNodes(shape=(24, m // 2, n // 2), traces=True)\n self.add_layer(s2, \"conv_2\")\n c2 = LIFNodes(shape=(24, m // 4, n // 4), traces=True)\n self.add_layer(c2, \"pool_2\")\n\n s3 = LIFNodes(shape=(32, m // 4, n // 4), traces=True)\n self.add_layer(s3, \"conv_3\")\n f = LIFNodes(shape=(32, 1), traces=True)\n self.add_layer(f, \"global_pool\")\n\n conv1 = Conv2dConnection(inp, s1, 5, padding=2, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv1, \"DoG\", \"conv_1\")\n pool1 = MaxPool2dConnection(s1, c1, 2, 2, decay=0.5)\n self.add_connection(pool1, \"conv_1\", \"pool_1\")\n\n conv2 = Conv2dConnection(c1, s2, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv2, \"pool_1\", \"conv_2\")\n pool2 = MaxPool2dConnection(s2, c2, 2, 2, decay=0.5)\n self.add_connection(pool2, \"conv_2\", \"pool_2\")\n\n conv3 = Conv2dConnection(c2, s3, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv3, \"pool_2\", \"conv_3\")\n global_pool = MaxPool2dConnection(s3, f, (m // 4, n // 4), decay=0.5)\n self.add_connection(global_pool, \"conv_3\", \"global_pool\")\n\n monitor = NetworkMonitor(self, layers=[\"DoG\", \"conv_1\", \"pool_1\",\n \"conv_2\", \"pool_2\",\n \"conv_3\", \"global_pool\"],\n connections=[(\"DoG\", \"conv_1\"),\n (\"pool_1\", \"conv_2\"),\n (\"pool_2\", \"conv_3\")],\n state_vars=[\"w\", \"s\"])\n self.add_monitor(monitor, \"network_monitor\")\n\n return self", "def pack_spheres(shape, scale, number_of_cells, algorithm):\r\n if algorithm == 'simple':\r\n data = simple_packing(shape, scale, number_of_cells)\r\n else:\r\n create_input(number_of_cells)\r\n make_csd(shape, scale, number_of_cells)\r\n generate_structure(algorithm)\r\n data = read_results()\r\n np.savetxt('Project01.rco', data)\r\n render_packing(data)", "def logic_program_form(self):\r\n return '% -------------------------------------\\n' +\\\r\n '% Theory ' + self.name + '\\n' +\\\r\n '% -------------------------------------\\n\\n' +\\\r\n GENERAL_AXIOMS", "def proto():\n # Make a few wires and connect them to each other\n # Typically this will not be possible\n nodes = [Wire(), Nand(), Wire(), Wire(), Wire()]\n for i in range(len(nodes) - 1):\n nodes[i+1].inputs = [nodes[i]]\n\n wireWorld = World(nodes)\n print(textwrap.dedent(\"\"\"\n Wire World\n\n Standard wires propagate signal one per tick\n \"\"\"))\n for x in range(4):\n wireWorld.print()\n wireWorld.sim()\n print('----')\n\n # Make a Nand gate, connect it to itself via a wire\n nodes = [Wire(), Nand()]\n nodes[0].inputs = [nodes[1]]\n nodes[1].inputs = [nodes[0]]\n\n clockWorld = World(nodes)\n print(textwrap.dedent(\"\"\"\n Clock World\n\n Two-tick clock due to wire caching state.\n \"\"\"))\n for x in range(6):\n clockWorld.print()\n clockWorld.sim()\n print('----')\n\n # Make a Nand gate, connect it to itself via a instawire\n nodes = [InstaWire(), Nand()]\n nodes[0].inputs = [nodes[1]]\n nodes[1].inputs = [nodes[0]]\n\n instaclockWorld = World(nodes)\n print(textwrap.dedent(\"\"\"\n Instaclock World\n\n The instantaneous wire allows us to produce a one-tick clock.\n \"\"\"))\n for x in range(6):\n instaclockWorld.print()\n instaclockWorld.sim()\n print('----')", "def representation_2_txyz(self: Q, representation: str = \"\") -> List:\n\n symbolic = False\n\n if (\n hasattr(self.t, \"free_symbols\")\n or hasattr(self.x, \"free_symbols\")\n or hasattr(self.y, \"free_symbols\")\n or hasattr(self.z, \"free_symbols\")\n ):\n symbolic = True\n\n if representation == \"\":\n box_t, box_x, box_y, box_z = self.t, self.x, self.y, self.z\n\n elif representation == \"polar\":\n amplitude, theta_x, theta_y, theta_z = self.t, self.x, self.y, self.z\n\n theta = (theta_x ** 2 + theta_y ** 2 + theta_z ** 2) ** (1 / 2)\n\n if theta == 0:\n box_t = self.t\n box_x, box_y, box_z = 0, 0, 0\n\n else:\n if symbolic:\n box_t = amplitude * sp.cos(theta)\n box_x = self.x / theta * amplitude * sp.sin(theta)\n box_y = self.y / theta * amplitude * sp.sin(theta)\n box_z = self.z / theta * amplitude * sp.sin(theta)\n else:\n box_t = amplitude * math.cos(theta)\n box_x = self.x / theta * amplitude * math.sin(theta)\n box_y = self.y / theta * amplitude * math.sin(theta)\n box_z = self.z / theta * amplitude * math.sin(theta)\n\n elif representation == \"spherical\":\n box_t, R, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_x = R * sp.sin(theta) * sp.cos(phi)\n box_y = R * sp.sin(theta) * sp.sin(phi)\n box_z = R * sp.cos(theta)\n else:\n box_x = R * math.sin(theta) * math.cos(phi)\n box_y = R * math.sin(theta) * math.sin(phi)\n box_z = R * math.cos(theta)\n\n elif representation == \"hyperbolic\":\n u, v, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_t = v * sp.exp(u)\n box_x = v * sp.exp(-u)\n box_y = v * sp.sin(theta) * sp.sin(phi)\n box_z = v * sp.cos(theta)\n\n else:\n box_t = v * math.exp(u)\n box_x = v * math.exp(-u)\n box_y = v * math.sin(theta) * sp.sin(phi)\n box_z = v * math.cos(theta)\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return [box_t, box_x, box_y, box_z]", "def r_1(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe = Compound(\"Cu\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n iSiMe = comps[1]\r\n\r\n me = list(iSiMe.formula.consist.keys())[0].name\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, \"NO3\", 1))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + NO2 + H2O\"\r\n else:\r\n iSaNo = Compound(\"Cu(NO3)2\")\r\n if len(comps) == 1:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif len(comps) == 2:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n iSaNo = comps[2]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an != \"NO3\":\r\n return \"\"\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n\r\n iSiMe = Compound(simple(me))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + NO2 + H2O\"\r\n\r\n return Reaction(react)", "def construct_circuit(self, x, qr=None, inverse=False):\n if not isinstance(x, np.ndarray):\n raise TypeError(\"x must be numpy array.\")\n if x.ndim != 1:\n raise ValueError(\"x must be 1-D array.\")\n if x.shape[0] != self._num_qubits:\n raise ValueError(\"number of qubits and data dimension must be the same.\")\n if qr is None:\n qr = QuantumRegister(self._num_qubits, name='q')\n qc = self._constructor_function(x, qr, inverse, *self._feature_param)\n #qc.draw(output='mpl', filename='C:/Users/RaphaelLambert/Pictures/custom.png')\n return qc", "def test_integration(self):\n\n m = 5 # number of wires in A\n M = 2**m\n\n xmax = np.pi # bound to region [-pi, pi]\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.sin(xs[i]) ** 2\n r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])\n\n A_wires = [0, \"a\", -1.1, -10, \"bbb\"]\n target_wire = \"Ancilla\"\n wires = A_wires + [target_wire]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\"]\n\n def fn():\n qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=A_wires)\n r_unitary(qml.RY, r_rotations, control_wires=A_wires[::-1], target_wire=target_wire)\n\n qmc_circuit = qml.quantum_monte_carlo(\n fn, wires=wires, target_wire=target_wire, estimation_wires=estimation_wires\n )\n\n with qml.queuing.AnnotatedQueue() as q:\n qmc_circuit()\n qml.probs(estimation_wires)\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape = tape.expand(depth=2)\n\n assert all(\n not isinstance(op, (qml.MultiControlledX, qml.templates.QFT, qml.tape.QuantumScript))\n for op in tape.operations\n )\n\n dev = qml.device(\"default.qubit\", wires=wires + estimation_wires)\n res = dev.execute(tape)\n\n @qml.qnode(dev)\n def circuit():\n qml.templates.QuantumMonteCarlo(\n probs, func, target_wires=wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n res_expected = circuit()\n assert np.allclose(res, res_expected)", "def compiler_output(input_ckt, hier_graph_dict, design_name:str, result_dir:pathlib.Path, pdk_dir:pathlib.Path, uniform_height=False):\n layers_json = pdk_dir / 'layers.json'\n with open(layers_json,\"rt\") as fp:\n pdk_data=json.load(fp)\n design_config = pdk_data[\"design_info\"]\n\n if not result_dir.exists():\n result_dir.mkdir()\n logger.debug(f\"Writing results in dir: {result_dir} {hier_graph_dict}\")\n input_dir = input_ckt.parents[0]\n\n verilog_tbl = { 'modules': [], 'global_signals': []}\n\n design_setup = read_setup(input_dir / (design_name + '.setup'))\n try:\n POWER_PINS = [design_setup['GND'][0],design_setup['POWER'][0]]\n except (IndexError, ValueError):\n POWER_PINS = []\n logger.info(\"Power and ground nets not found. Power grid will not be constructed.\")\n\n #read lef to not write those modules as macros\n lef_path = pathlib.Path(__file__).resolve().parent.parent / 'config'\n all_lef = read_lef(lef_path)\n logger.debug(f\"Available library cells: {', '.join(all_lef)}\")\n\n primitives = {}\n for name,member in hier_graph_dict.items():\n logger.debug(f\"Found module: {name} {member['graph'].nodes()}\")\n graph = member[\"graph\"]\n constraints = member[\"constraints\"]\n\n for const in constraints:\n if isinstance(const, constraint.GuardRing):\n primitives['guard_ring'] = {'primitive':'guard_ring'}\n\n logger.debug(f\"Reading nodes from graph: {name}\")\n for node, attr in graph.nodes(data=True):\n if 'net' in attr['inst_type']: continue\n #Dropping floating ports\n lef_name = attr['inst_type']\n\n if \"values\" in attr and (lef_name in all_lef):\n block_name, block_args = generate_lef(lef_name, attr, primitives, design_config, uniform_height)\n #block_name_ext = block_name.replace(lef_name,'')\n logger.debug(f\"Created new lef for: {block_name} {lef_name}\")\n #Multiple instances of same module\n if 'inst_copy' in attr:\n for nm in list(hier_graph_dict.keys()):\n if nm == lef_name + attr['inst_copy']:\n if block_name not in hier_graph_dict.keys():\n logger.debug('Trying to modify a dictionary while iterating over it!')\n hier_graph_dict[block_name] = hier_graph_dict.pop(nm)\n else:\n #For cells with extra parameters than current primitive naming convention\n all_lef.append(nm)\n graph.nodes[node][\"inst_type\"] = block_name\n all_lef.append(block_name)\n\n # Only unit caps are generated\n if block_name.lower().startswith('cap'):\n graph.nodes[node]['inst_type'] = block_args['primitive']\n block_args['primitive'] = block_name\n else:\n graph.nodes[node]['inst_type'] = block_name\n\n if block_name in primitives:\n if block_args != primitives[block_name]:\n logging.warning(f\"two different primitve {block_name} of size {primitives[block_name]} {block_args}got approximated to same unit size\")\n else:\n primitives[block_name] = block_args\n elif \"values\" in attr and 'inst_copy' in attr:\n member[\"graph\"].nodes[node][\"inst_type\"]= lef_name + attr[\"inst_copy\"]\n all_lef.append(block_name)\n\n else:\n logger.debug(f\"No physical information found for: {name}\")\n logger.debug(f\"generated data for {name} : {pprint.pformat(primitives, indent=4)}\")\n logger.debug(f\"All available cell generator with updates: {all_lef}\")\n for name,member in hier_graph_dict.items():\n graph = member[\"graph\"]\n logger.debug(f\"Found module: {name} {graph.nodes()}\")\n inoutpin = []\n floating_ports=[]\n if \"ports_match\" in member and member[\"ports_match\"]:\n for key in member[\"ports_match\"].keys():\n if key not in POWER_PINS:\n inoutpin.append(key)\n if member[\"ports\"]:\n logger.debug(f'Found module ports: {member[\"ports\"]} {member[\"name\"]}')\n floating_ports = set(inoutpin) - set(member[\"ports\"]) - set(design_setup['POWER']) -set(design_setup['GND'])\n if len(list(floating_ports))> 0:\n logger.error(f\"floating ports found: {name} {floating_ports}\")\n raise SystemExit('Please remove floating ports')\n else:\n inoutpin = member[\"ports\"]\n if name not in all_lef:\n\n ## Removing constraints to fix cascoded cmc\n if name not in design_setup['DIGITAL']:\n logger.debug(f\"call constraint generator writer for block: {name}\")\n stop_points = design_setup['POWER'] + design_setup['GND'] + design_setup['CLOCK']\n constraints = member[\"constraints\"]\n if name not in design_setup['NO_CONST']:\n constraints = FindConst(graph, name, inoutpin, member[\"ports_weight\"], constraints, stop_points)\n constraints = CapConst(graph, name, design_config[\"unit_size_cap\"], constraints, design_setup['MERGE_SYMM_CAPS'])\n hier_graph_dict[name] = hier_graph_dict[name].copy(\n update={'constraints': constraints}\n )\n ## Write out modified netlist & constraints as JSON\n logger.debug(f\"call verilog writer for block: {name}\")\n wv = WriteVerilog(name, inoutpin, hier_graph_dict, POWER_PINS)\n verilog_tbl['modules'].append( wv.gen_dict())\n if len(POWER_PINS)>0:\n for i, nm in enumerate(POWER_PINS):\n verilog_tbl['global_signals'].append( { 'prefix' :'global_power', 'formal' : f'supply{i}', 'actual' : nm})\n\n with (result_dir / f'{design_name}.verilog.json').open( 'wt') as fp:\n json.dump( verilog_tbl, fp=fp, indent=2)\n\n with (result_dir / f'{design_name}.v').open( 'wt') as fp:\n write_verilog( verilog_tbl, fp)\n\n logger.info(\"Completed topology identification.\")\n logger.debug(f\"OUTPUT verilog json netlist at: {result_dir}/{design_name}.verilog.json\")\n logger.debug(f\"OUTPUT verilog netlist at: {result_dir}/{design_name}.v\")\n logger.debug(f\"OUTPUT const file at: {result_dir}/{design_name}.pnr.const.json\")\n return primitives", "def build_network(self):\n net = self.ccf_data\n\n # Reshape [length] -> [length, 1].\n net = tf.expand_dims(net, -1)\n\n # create summary object\n summary = []\n\n for i in self.hparams.conv_block_filters:\n for _ in range(self.hparams.conv_layers_per_block):\n input_shape = net.shape.as_list()\n conv_op = tf.keras.layers.Conv1D(filters=i, kernel_size=self.hparams.kernel_size, padding='same',\n activation=tf.nn.relu)\n net = conv_op(net)\n summary.append(\"Conv1D-{}-{}. Input shape: {}. Output shape: {}\".format(self.hparams.kernel_size, i, input_shape,\n net.shape.as_list()))\n pool_size = 2\n strides = 2\n max_pool = tf.keras.layers.MaxPool1D(pool_size=pool_size, strides=strides)\n net = max_pool(net)\n summary.append(\"MaxPool1D-{}. Pool Size: {}. Strides: {}\".format(self.hparams.kernel_size, pool_size, strides))\n\n for i in self.hparams.final_conv_num_filters:\n conv_op = tf.keras.layers.Conv1D(filters=i, kernel_size=self.hparams.kernel_size, padding='same',\n activation=tf.nn.relu)\n net = conv_op(net)\n flatten = tf.keras.layers.Flatten()\n net = flatten(net)\n\n for i in self.hparams.dense_num_layers:\n dense = tf.keras.layers.Dense(i, activation=tf.nn.relu)\n net = dense(net)\n\n # output layer\n output = tf.keras.layers.Dense(1)\n net = tf.squeeze(output(net))\n\n self.summary = \"\\n\".join(summary)\n self.predicted_rv = net", "def r_5(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe = Compound(\"Mg\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n iSiMe = comps[1]\r\n\r\n me = list(iSiMe.formula.consist.keys())[0].name\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, \"NO3\", 1))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + AmmNO3 + H2O\"\r\n else:\r\n iSaNo = Compound(\"Mg(NO3)2\")\r\n if len(comps) == 1:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif len(comps) == 2:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n iSaNo = comps[2]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an != \"NO3\":\r\n return \"\"\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSiMe = Compound(simple(me))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + AmmNO3 + H2O\"\r\n\r\n return Reaction(react)", "def qlm_to_qiskit(qlm_circuit, qubits=None):\n # Init measured qubits\n if qubits is None:\n qubits = list(range(qlm_circuit.nbqbits))\n\n qreg = QuantumRegister(qlm_circuit.nbqbits)\n creg = None\n param_list = []\n if qlm_circuit.nbcbits > 0:\n creg = ClassicalRegister(max(qlm_circuit.nbcbits, len(qubits)))\n q_circ = QuantumCircuit(qreg, creg)\n else:\n q_circ = QuantumCircuit(qreg)\n dic = _gen_qiskit_gateset(q_circ)\n for gate_op in qlm_circuit:\n if gate_op.type == OpType.GATETYPE:\n name, params = extract_syntax(\n qlm_circuit.gateDic[gate_op.gate], qlm_circuit.gateDic,\n var_dic=qlm_circuit.var_dic)\n nbctrls = name.count('C-')\n # changes variables and expressions to format used by Qiskit\n for index, param in enumerate(params):\n if isinstance(param, Variable):\n params[index] = _variable_to_parameter(\n param_list, variable=param)\n elif isinstance(param, ArithExpression):\n arith_expr_list = param.to_thrift().split()\n params[index] = _arith_expr_list_to_parameter_expression(\n param_list, arith_expr_list, param)\n try:\n if name == \"MS\":\n q_circ.ms(params[0], [qreg[i] for i in gate_op.qbits])\n else:\n if (nbctrls > 0 and name not in SUPPORTED_CTRLS):\n tmp = name\n count = 0\n gate = None\n while True:\n last = tmp\n tmp = tmp.replace(\"C-\", \"\", 1)\n if last == tmp:\n raise ValueError(\n \"Gate {} not supported by Qiskit API\".format(name)\n )\n else:\n count += 1\n gate = _get_qiskit_gate_from_name(tmp)\n if gate != None:\n gate = gate(*params).control(count)\n break\n if gate != None:\n q_circ.append(gate, [qreg[i] for i in gate_op.qbits])\n else:\n dic[name](* params + [qreg[i] for i in gate_op.qbits])\n except KeyError:\n raise ValueError(\n \"Gate {} not supported by Qiskit API\".format(name)\n )\n elif gate_op.type == OpType.MEASURE:\n for index in range(len(gate_op.qbits)):\n q_circ.measure(gate_op.qbits[index], gate_op.cbits[index])\n\n # Adding measures to unify the interface\n for qbit_index, cbit in zip(qubits, creg):\n q_circ.measure(qreg[qbit_index], cbit)\n return q_circ", "def mk_intcircuit(task_info):\n # -------------------------------------\n # Decision circuit parameters\n # -------------------------------------\n # populations\n N_E = task_info['dec']['populations']['N_E'] # number of exc neurons (1600)\n N_I = task_info['dec']['populations']['N_I'] # number of inh neurons (400)\n sub = task_info['dec']['populations']['sub'] # fraction of stim-selective exc neurons\n N_D1 = int(N_E * sub) # size of exc pop D1\n N_D2 = N_D1 # size of exc pop D2\n N_D3 = int(N_E * (1 - 2 * sub)) # size of exc pop D3, the rest\n\n # local recurrent connections\n w_p = task_info['dec']['connectivity']['w_p'] # relative synaptic strength of synapses within pop D1 and D2\n w_m = 1 - sub * (w_p - 1) / (1 - sub) # relative synaptic strength of synapses across pop D1 and D2\n gEEa = task_info['dec']['connectivity']['gEEa'] # AMPA weight of EE synapses\n gEEn = task_info['dec']['connectivity']['gEEn'] # NMDA weight of EE synapses\n gEIa = task_info['dec']['connectivity']['gEIa'] # AMPA weight of EI synapses\n gEIn = task_info['dec']['connectivity']['gEIn'] # NMDA weight of EI synapses\n gIE = task_info['dec']['connectivity']['gIE'] # GABA weight of IE synapses, vs 1.3*nS from before\n gII = task_info['dec']['connectivity']['gII'] # GABA weight of II synapses\n d = task_info['dec']['connectivity']['delay'] # transmission delays of E synapses\n\n # external connections\n gXE = task_info['dec']['connectivity']['gXE'] # weight of XE (ext to exc) synapses\n gXI = task_info['dec']['connectivity']['gXI'] # weight of XI (ext to inh) synapses\n\n # neuron models\n CmE = task_info['dec']['neuron']['CmE'] # membrane capacitance of E neurons\n CmI = task_info['dec']['neuron']['CmI'] # membrane capacitance of I neurons\n gleakE = task_info['dec']['neuron']['gleakE'] # leak conductance of E neurons\n gleakI = task_info['dec']['neuron']['gleakI'] # leak conductance of I neurons\n Vl = task_info['dec']['neuron']['Vl'] # resting potential\n Vt = task_info['dec']['neuron']['Vt'] # spiking threshold\n Vr = task_info['dec']['neuron']['Vr'] # reset potential\n tau_refE = task_info['dec']['neuron']['tau_refE'] # absolute refractory period of E neurons\n tau_refI = task_info['dec']['neuron']['tau_refI'] # absolute refractory period of I neurons\n nu_ext = task_info['dec']['neuron']['nu_ext'] # firing rate of ext Poisson input to D1 and D2\n nu_ext1 = task_info['dec']['neuron']['nu_ext1'] # firing rate of ext Poisson input to D3 and DI\n\n # synapse models\n VrevE = task_info['dec']['synapse']['VrevE'] # reversal potential for E synapses\n VrevI = task_info['dec']['synapse']['VrevI'] # reversal potential for I synapses\n tau_ampa = task_info['dec']['synapse']['tau_ampa'] # decay constant of AMPA conductances\n tau_gaba = task_info['dec']['synapse']['tau_gaba'] # decay constant of GABA conductances\n tau_nmda_d = task_info['dec']['synapse']['tau_nmda_d'] # decay constant of NMDA conductances\n tau_nmda_r = task_info['dec']['synapse']['tau_nmda_r'] # rise constant of NMDA conductances\n alpha_nmda = task_info['dec']['synapse']['alpha_nmda'] # saturation constant of NMDA conductances\n\n # namespace with params\n paramint = {'w_p': w_p, 'w_m': w_m, 'gEEa': gEEa, 'gEEn': gEEn, 'gEIa': gEIa, 'gEIn': gEIn,\n 'gIE': gIE, 'gII': gII, 'gXE': gXE, 'gXI': gXI, 'gleakE': gleakE, 'gleakI': gleakI,\n 'Vl': Vl, 'Vt': Vt, 'Vr': Vr, 'VrevE': VrevE, 'VrevI': VrevI, 'tau_ampa': tau_ampa,\n 'tau_gaba': tau_gaba, 'tau_nmda_d': tau_nmda_d, 'tau_nmda_r': tau_nmda_r, 'alpha_nmda': alpha_nmda,\n 'sub': sub, 'CmE': CmE, 'CmI': CmI}\n\n # numerical integration method\n nummethod = task_info['simulation']['nummethod']\n\n # -------------------------------------\n # Set up the model and connections\n # -------------------------------------\n # neuron equations\n eqsE = '''\n dV/dt = (-g_ea*(V-VrevE) - g_ent*(V-VrevE)/(1+exp(-V/mV*0.062)/3.57) - g_i*(V-VrevI) - (V-Vl)) / tau : volt (unless refractory)\n dg_ea/dt = -g_ea / tau_ampa : 1\n dg_i/dt = -g_i / tau_gaba : 1\n dg_en/dt = -g_en / tau_nmda_d + alpha_nmda * x_en *(1-g_en) : 1\n dx_en/dt = -x_en / tau_nmda_r : 1\n g_ent : 1\n tau = CmE/gleakE : second\n label : integer (constant)\n '''\n\n eqsI = '''\n dV/dt = (-g_ea*(V-VrevE) - g_entI*(V-VrevE)/(1+exp(-V/mV*0.062)/3.57) - g_i*(V-VrevI) - (V-Vl)) / tau : volt (unless refractory)\n dg_ea/dt = -g_ea/tau_ampa : 1\n dg_i/dt = -g_i/tau_gaba : 1\n g_entI = w_nmda * g_ent : 1\n g_ent : 1 (linked)\n w_nmda : 1\n tau = CmI/gleakI : second\n '''\n\n # setup of integration circuit\n decE = NeuronGroup(N_E, model=eqsE, method=nummethod, threshold='V>=Vt', reset='V=Vr',\n refractory=tau_refE, namespace=paramint, name='decE')\n decE1 = decE[:N_D1]\n decE2 = decE[N_D1:N_D1 + N_D2]\n decE3 = decE[-N_D3:]\n decE1.label = 1\n decE2.label = 2\n decE3.label = 3\n\n decI = NeuronGroup(N_I, model=eqsI, method=nummethod, threshold='V>=Vt', reset='V=Vr',\n refractory=tau_refI, namespace=paramint, name='decI')\n\n # weight according the different subgroups\n condsame = '(label_pre == label_post and label_pre != 3)'\n conddiff = '(label_pre != label_post and label_pre != 3) or (label_pre == 3 and label_post != 3)'\n condrest = '(label_post == 3)'\n\n # NMDA: exc --> exc\n eqsNMDA = '''\n g_ent_post = w_nmda * g_en_pre : 1 (summed)\n w_nmda : 1 (constant)\n w : 1 (constant)\n '''\n\n synDEDEn = Synapses(decE, decE, model=eqsNMDA, method=nummethod, on_pre='x_en += w', delay=d,\n namespace=paramint, name='synDEDEn')\n synDEDEn.connect()\n synDEDEn.w['i == j'] = 1\n synDEDEn.w['i != j'] = 0\n synDEDEn.w_nmda[condsame] = 'w_p * gEEn/gleakE'\n synDEDEn.w_nmda[conddiff] = 'w_m * gEEn/gleakE'\n synDEDEn.w_nmda[condrest] = 'gEEn/gleakE'\n\n # NMDA: exc --> inh\n decI.w_nmda = '(gEIn/gleakI) / (gEEn/gleakE)'\n decI.g_ent = linked_var(decE3, 'g_ent', index=range(N_I))\n\n # AMPA: exc --> exc\n synDEDEa = Synapses(decE, decE, model='w : 1', method=nummethod,\n on_pre='g_ea += w', delay=d,\n namespace=paramint, name='synDEDEa')\n synDEDEa.connect()\n synDEDEa.w[condsame] = 'w_p * gEEa/gleakE'\n synDEDEa.w[conddiff] = 'w_m * gEEa/gleakE'\n synDEDEa.w[condrest] = 'gEEa/gleakE'\n\n # AMPA: exc --> inh\n synDEDIa = Synapses(decE, decI, model='w : 1', method=nummethod,\n on_pre='g_ea += w', delay=d,\n namespace=paramint, name='synDEDIa')\n synDEDIa.connect()\n synDEDIa.w = 'gEIa/gleakI'\n\n # GABA: inh --> exc\n synDIDE = Synapses(decI, decE, model='w : 1', method=nummethod,\n on_pre='g_i += w', delay=d,\n namespace=paramint, name='synDIDE')\n synDIDE.connect()\n synDIDE.w = 'gIE/gleakE'\n\n # GABA: inh --> inh\n synDIDI = Synapses(decI, decI, model='w : 1', method=nummethod,\n on_pre='g_i += w', delay=d,\n namespace=paramint, name='synDIDI')\n synDIDI.connect()\n synDIDI.w = 'gII/gleakI'\n\n # external inputs and connections\n extE = PoissonInput(decE[:N_D1 + N_D2], 'g_ea', N=1, rate=nu_ext1, weight='gXE/gleakE')\n extE3 = PoissonInput(decE3, 'g_ea', N=1, rate=nu_ext, weight='gXE/gleakE')\n extI = PoissonInput(decI, 'g_ea', N=1, rate=nu_ext, weight='gXI/gleakI')\n\n # variables to return\n groups = {'DE': decE, 'DI': decI, 'DX': extE, 'DX3': extE3, 'DXI': extI}\n subgroups = {'DE1': decE1, 'DE2': decE2, 'DE3': decE3}\n synapses = {'synDEDEn': synDEDEn,\n 'synDEDEa': synDEDEa, 'synDEDIa': synDEDIa,\n 'synDIDE': synDIDE, 'synDIDI': synDIDI} # 'synDEDIn': synDEDIn,\n\n return groups, synapses, subgroups", "def load_flow(self, network):\n # main.m\n alpha = 1\n nb_brackets = network.get_nb_brackets()-1\n # Battery settings\n bat_node = 2\n bat_phase = 2\n bat = (bat_node-2)*3 + bat_phase\n Ebat = 0\n Ebat_max = 120000\n Pbat = 60000\n # End\n # Grid_definition.m\n grid = self.grid_definition(network)\n K = grid['K']\n Zbr = grid['Zbr']\n vec_phases_index = grid['vec_phases_index']\n # End of Grid_Definition\n brackets = network.get_brackets()[1:]\n network_nodes = [brackets[i].get_node() for i in range(nb_brackets)]\n # load_flow.m\n Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128)\n Ibus = Ibus[:, np.newaxis]\n Vnl = network.get_slack_voltage()\n Vnl = Vnl[vec_phases_index]\n Vbus = Vnl\n Vbr_prev = Vnl\n # If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape\n # (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape\n # (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions\n # and then will join'em. If Vnl(57,1) & Newmat(10,96):\n # Result = (1, 57*10, 96)... Which is not really what we want.\n Tmp = (Vnl * 0)\n Tmp = Tmp[:, np.newaxis]\n V = np.tile(Tmp, (1,1,1))\n I = np.tile(Tmp, (1,1,1))\n # We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug\n # that has been resolved earlier won't happen here\n # Imean = np.tile(Vnl*0, (96))\n # Vmean = np.tile(Vnl*0, (96))\n powers = []\n\n for node in network_nodes:\n n_pow = []\n for user in node.get_users():\n n_pow.append(user.get_P())\n powers.extend(n_pow)\n\n \"\"\"\n Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain\n a little bit more efficiency.\n \"\"\"\n # NumPy Functions\n conj = np.conj\n divide = np.divide\n absolute = np.abs\n less = np.less\n zeros = np.zeros\n # Here is the wrapping of the load flow:\n # h = 0, nb iterations\n # q = 0, 96\n P = np.asarray(powers)\n P = divide(P, 2)\n Q = np.dot(P, np.array([0]))\n # Initializing arrays to optimize\n Ibr = zeros((nb_brackets, 1))\n Vbr = zeros((nb_brackets, 1))\n # Before we enter the loop, we make sure we are going to work with matrices instead of arrays.\n Ibr = np.matrix(Ibr)\n Vbr = np.matrix(Vbr)\n # LOAD FLOW LOOP\n k = 0\n t = process_time()\n while True:\n k += 1\n bal = 0\n for i in range(len(P)):\n if k == 1:\n Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj())\n else:\n Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj())\n if i % 3 == bat:\n bal = bal + P[i]\n if bat != 0:\n if bal < 0:\n if Ebat < Ebat_max:\n Ibus[bat] = min([conj(-Pbat/Vbus[bat]),\n conj(bal/Vbus[bat]),\n conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))])\n Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25\n elif Ebat > 0:\n Ibus[bat] = min([conj(Pbat/Vbus[bat]),\n conj(bal/Vbus[bat]),\n conj(Ebat/(Vbus[bat]*0.25))])\n Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.\n Ibr = K * Ibus\n Vbr = Zbr * Ibr\n if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all():\n break\n Vbr = Vbr_prev + (alpha * (Vbr - Vbr_prev))\n Vbr_prev = Vbr\n Vbus = Vnl + np.dot(K.conj().T, Vbr)\n Vbus = Vnl + np.dot(K.conj().T, Vbr)\n V[:] = Vbus[:, :, np.newaxis]\n I[:] = Ibr[:, :, np.newaxis]\n Pbr = Qbr = np.array([[[0 for k in range(2)]for j in range(len(vec_phases_index))] for i in range(nb_brackets)])\n for i in range(nb_brackets):\n for j in range(len(vec_phases_index)):\n i_to_j = self.powerflow(Vbus[i], Ibr[i])\n j_to_i = self.powerflow(Vbus[i+1], Ibr[i])\n Pbr[i][j][0] = i_to_j['active']\n Pbr[i][j][1] = j_to_i['active']\n Qbr[i][j][0] = i_to_j['reactive']\n Qbr[i][j][1] = j_to_i['reactive']\n print(np.shape(Pbr), Qbr.shape)\n # END OF LOAD FLOW\n # End of load_flow.m\n print(\"Process executed in\", process_time() - t, \"s\")\n dic = {\n 'Ibus_bat': Ibus[bat],\n 'Ebat': Ebat,\n 'V': V,\n 'Vbr': Vbr,\n 'Vbus': Vbus,\n 'I': I,\n 'Ibus': Ibus,\n 'Ibr': Ibr,\n 'Zbr': Zbr,\n 'P': P,\n 'K': K,\n 'Vnl': Vnl,\n 'Pbr': Pbr,\n 'Qbr': Qbr\n }\n return dic", "def my_cir(para: List[float]):\n\n env = QEnv()\n env.backend(BackendName.LocalBaiduSim2)\n q = env.Q.createList(2 * n)\n\n # Prepare a state\n for i in range(2):\n state_prepare(q, 2 * i)\n\n # Add parameterized circuit\n for i in range(2):\n universal_cir(q, 2 * i, para)\n\n # DIP test\n for i in range(2):\n CX(q[i], q[i + n])\n\n MeasureZ(*env.Q.toListPair())\n taskResult = env.commit(shots, fetchMeasure=True)\n\n return taskResult['counts']", "def _network_template(self, state):\n net = tf.cast(state, tf.float32)\n net = tf.div(net, 255.)\n net = slim.conv2d(\n net, int(32 * self.network_size_expansion), [8, 8], stride=4)\n net = slim.conv2d(\n net, int(64 * self.network_size_expansion), [4, 4], stride=2)\n net = slim.conv2d(\n net, int(64 * self.network_size_expansion), [3, 3], stride=1)\n net = slim.flatten(net)\n net = slim.fully_connected(net, int(512 * self.network_size_expansion))\n\n q_values = []\n for _ in range(self.number_of_gammas):\n gamma_q_value = slim.fully_connected(\n net, self.num_actions, activation_fn=None)\n q_values.append(gamma_q_value)\n\n # Estimate the hyperbolic discounted q-values\n hyp_q_value = agent_utils.integrate_q_values(q_values,\n self.integral_estimate,\n self.eval_gammas,\n self.number_of_gammas,\n self.gammas)\n\n return self._get_network_type()(hyp_q_value, q_values)", "def buildCircuit(circuit, frequencies, *parameters, eval_string='', index=0):\n\n parameters = np.array(parameters).tolist()\n frequencies = np.array(frequencies).tolist()\n circuit = circuit.replace(' ', '')\n\n def parse_circuit(circuit, parallel=False, series=False):\n \"\"\" Splits a circuit string by either dashes (series) or commas\n (parallel) outside of any paranthesis. Removes any leading 'p('\n or trailing ')' when in parallel mode \"\"\"\n\n assert parallel != series, \\\n 'Exactly one of parallel or series must be True'\n\n def count_parens(string):\n return string.count('('), string.count(')')\n\n if parallel:\n special = ','\n if circuit.endswith(')') and circuit.startswith('p('):\n circuit = circuit[2:-1]\n if series:\n special = '-'\n\n split = circuit.split(special)\n result = []\n skipped = []\n for i, sub_str in enumerate(split):\n if i not in skipped:\n if '(' not in sub_str and ')' not in sub_str:\n result.append(sub_str)\n else:\n open_parens, closed_parens = count_parens(sub_str)\n if open_parens == closed_parens:\n result.append(sub_str)\n else:\n uneven = True\n while i < len(split) - 1 and uneven:\n sub_str += special + split[i+1]\n\n open_parens, closed_parens = count_parens(sub_str)\n uneven = open_parens != closed_parens\n\n i += 1\n skipped.append(i)\n result.append(sub_str)\n return result\n\n parallel = parse_circuit(circuit, parallel=True)\n series = parse_circuit(circuit, series=True)\n\n if parallel is not None and len(parallel) > 1:\n eval_string += \"p([\"\n split = parallel\n elif series is not None and len(series) > 1:\n eval_string += \"s([\"\n split = series\n\n for i, elem in enumerate(split):\n if ',' in elem or '-' in elem:\n eval_string, index = buildCircuit(elem, frequencies,\n *parameters,\n eval_string=eval_string,\n index=index)\n else:\n param_string = \"\"\n elem_number = len(elem.split(\"/\"))\n\n param_string += str(parameters[index:index + elem_number])\n new = elem[0] + '(' + param_string + ',' + str(frequencies) + ')'\n eval_string += new\n\n index += elem_number\n\n if i == len(split) - 1:\n eval_string += '])'\n else:\n eval_string += ','\n\n return eval_string, index", "def test_BuildModel3(self):\n print(\"\\nTest 7: Building a more complicated Model\")\n builder = StaticBuilder(\"BreakIt\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3)\n enc2 = builder.addInner(5, num_islots=2)\n out1 = builder.addOutput()\n out2 = builder.addOutput()\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(in2, enc2, islot=0)\n builder.addDirectedLink(enc1, enc2, islot=1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc2, out2)\n \n builder.build()", "def _get_structure(self, blocks: np.array) -> Structure:\n\n return Structure(\n {\n \"DataVersion\": tag.Int(2730), # 1.17.1\n \"size\": [tag.Int(x) for x in blocks.shape],\n \"palette\": [\n {\"Name\": \"minecraft:air\"},\n {\"Name\": \"minecraft:black_concrete\"},\n {\"Name\": \"minecraft:red_concrete\"},\n {\"Name\": \"minecraft:white_concrete\"},\n ],\n \"blocks\": [\n {\"pos\": coords, \"state\": tag.Int(block)}\n for coords, block in np.ndenumerate(blocks)\n ],\n }\n )", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def crystal17_gui_string(structdata, symmdata, fractional_ops=True):\n\n dimensionality = 3 if structdata[\"pbc\"] is True else sum(structdata[\"pbc\"])\n atomic_numbers = structdata[\"atomic_numbers\"]\n ccoords = structdata[\"ccoords\"]\n lattice = structdata[\"lattice\"]\n\n crystal_type = symmdata[\"crystal_type\"]\n origin_setting = symmdata[\"centring_code\"]\n sg_num = symmdata[\"space_group\"]\n symops = symmdata[\"operations\"]\n\n if fractional_ops:\n symops = ops_frac_to_cart(symops, lattice)\n\n # sort the symmetry operations (useful to standardize for testing)\n # symops = np.sort(symops, axis=0)\n\n num_symops = len(symops)\n sym_lines = []\n for symop in symops:\n sym_lines.append(symop[0:3])\n sym_lines.append(symop[3:6])\n sym_lines.append(symop[6:9])\n sym_lines.append(symop[9:12])\n\n # for all output numbers, we round to 9 dp and add 0, so we don't get -0.0\n\n geom_str_list = []\n geom_str_list.append(\"{0} {1} {2}\".format(dimensionality, origin_setting,\n crystal_type))\n geom_str_list.append(\"{0:17.9E} {1:17.9E} {2:17.9E}\".format(*(\n np.round(lattice[0], 9) + 0.)))\n geom_str_list.append(\"{0:17.9E} {1:17.9E} {2:17.9E}\".format(*(\n np.round(lattice[1], 9) + 0.)))\n geom_str_list.append(\"{0:17.9E} {1:17.9E} {2:17.9E}\".format(*(\n np.round(lattice[2], 9) + 0.)))\n geom_str_list.append(str(num_symops))\n for sym_line in sym_lines:\n geom_str_list.append(\"{0:17.9E} {1:17.9E} {2:17.9E}\".format(*(\n np.round(sym_line, 9) + 0.)))\n geom_str_list.append(str(len(atomic_numbers)))\n for anum, coord in zip(atomic_numbers, ccoords):\n geom_str_list.append(\"{0:3} {1:17.9E} {2:17.9E} {3:17.9E}\".format(\n anum, *(np.round(coord, 10) + 0.)))\n\n geom_str_list.append(\"{0} {1}\".format(sg_num, num_symops))\n geom_str_list.append(\"\")\n\n return \"\\n\".join(geom_str_list)", "def addControl(ham: Dict[str, Any], name: str, onQubits: Union[int, List[int]] = None,\n matrices: Union[numpy.ndarray, List[numpy.ndarray]] = None) -> None:\n\n sysLevel = ham[\"circuit\"][\"sys_level\"]\n qubitNum = ham[\"circuit\"][\"qubits\"]\n\n if onQubits is None:\n # Input the complete matrices directly.\n if isinstance(sysLevel, int):\n dim = sysLevel ** qubitNum\n elif isinstance(sysLevel, list):\n dim = 1\n for i in sysLevel:\n dim = dim * i\n assert numpy.shape(matrices) == (dim, dim), \"Dimension does not match.\"\n matrices = [matrices]\n else:\n # Add the matrix on exact qubit\n if isinstance(onQubits, int):\n assert onQubits < qubitNum, \"Qubit index is out of range.\"\n onQubits = [onQubits]\n if isinstance(matrices, numpy.ndarray):\n matrices = [matrices]\n elif isinstance(onQubits, list):\n maxQubitId = max(onQubits)\n assert maxQubitId < qubitNum, \"Qubit index is out of range.\"\n else:\n assert False, \"Variable onQubits should be a list of int.\"\n\n # Check whether the name exists\n assert name not in (ham[\"control\"]).keys(), f\"Control term name ({name}) already existed.\"\n\n term = {\n \"on_qubits\": onQubits,\n \"matrices\": matrices,\n \"waveforms\": []\n }\n\n ham[\"control\"][name] = term", "def test_decomposed_operator_correct_wires(self, obs, expected):\n dev = qml.device('orquestra.qulacs', wires=3)\n\n res = dev.serialize_operator(obs)\n assert res == expected", "def to_swc(self, contributors=\"\"):\n from . import __version__\n sx, sy, sz = np.diag(self.transform)[:3]\n\n swc_header = f\"\"\"# ORIGINAL_SOURCE CloudVolume {__version__}\n# CREATURE \n# REGION\n# FIELD/LAYER\n# TYPE\n# CONTRIBUTOR {contributors}\n# REFERENCE\n# RAW \n# EXTRAS \n# SOMA_AREA\n# SHINKAGE_CORRECTION \n# VERSION_NUMBER {__version__}\n# VERSION_DATE {datetime.datetime.utcnow().isoformat()}\n# SCALE {sx:.6f} {sy:.6f} {sz:.6f}\n\"\"\"\n\n def generate_swc(skel, offset):\n if skel.edges.size == 0:\n return \"\"\n\n index = defaultdict(set)\n visited = defaultdict(bool)\n for e1, e2 in skel.edges:\n index[e1].add(e2)\n index[e2].add(e1)\n\n stack = [ skel.edges[0,0] ]\n parents = [ -1 ]\n\n swc = \"\"\n\n while stack:\n node = stack.pop()\n parent = parents.pop()\n\n if visited[node]:\n continue\n\n swc += \"{n} {T} {x:0.6f} {y:0.6f} {z:0.6f} {R:0.6f} {P}\\n\".format(\n n=(node + 1 + offset),\n T=skel.vertex_types[node],\n x=skel.vertices[node][0],\n y=skel.vertices[node][1],\n z=skel.vertices[node][2],\n R=skel.radii[node],\n P=parent if parent == -1 else (parent + 1 + offset),\n )\n\n visited[node] = True\n \n for child in index[node]:\n stack.append(child)\n parents.append(node)\n\n return swc\n\n skels = self.components()\n\n swc = swc_header + \"\\n\"\n offset = 0\n for skel in skels:\n swc += generate_swc(skel, offset) + \"\\n\"\n offset += skel.vertices.shape[0]\n\n return swc", "def conj(q_1: Q, conj_type: int = 0) -> Q:\n\n end_q_type = f\"{q_1.q_type}*\"\n c_t, c_x, c_y, c_z = q_1.t, q_1.x, q_1.y, q_1.z\n cq = Q()\n\n if conj_type % 4 == 0:\n cq.t = c_t\n if c_x != 0:\n cq.x = -1 * c_x\n if c_y != 0:\n cq.y = -1 * c_y\n if c_z != 0:\n cq.z = -1 * c_z\n\n elif conj_type % 4 == 1:\n if c_t != 0:\n cq.t = -1 * c_t\n cq.x = c_x\n if c_y != 0:\n cq.y = -1 * c_y\n if c_z != 0:\n cq.z = -1 * c_z\n end_q_type += \"1\"\n\n elif conj_type % 4 == 2:\n if c_t != 0:\n cq.t = -1 * c_t\n if c_x != 0:\n cq.x = -1 * c_x\n cq.y = c_y\n if c_z != 0:\n cq.z = -1 * c_z\n end_q_type += \"2\"\n\n elif conj_type % 4 == 3:\n if c_t != 0:\n cq.t = -1 * c_t\n if c_x != 0:\n cq.x = -1 * c_x\n if c_y != 0:\n cq.y = -1 * c_y\n cq.z = c_z\n end_q_type += \"3\"\n\n cq.q_type = end_q_type\n cq.representation = q_1.representation\n\n return cq", "def build_macrocomplex(directory, output, max_chains=300, num_models=1, template=False, dirty=False, verbose=False, stech_string=False):\n print(\"Program is running, please wait...\")\n # Reads and stores pdb objects in a list\n in_pdbmodels = read_pdbs(directory, verbose)\n # Unifies all ids by sequence, updates the pdb list with new chain ids and returns a sequence dictionary: {seq: id,}\n seq_dict = unify_ids(in_pdbmodels, verbose)\n # Checks each pdb object for chain interactions and stores it in a dictionary of dictionaries:\n # {\n # Chain1_id : { residues_tuple_1_to_2 : chain1_object, chain2_object, residues_tuple_2_to_1}\n # Chain2_id : {residues_tuple_2_to_1 : chain2_object, chain1_object, residues_tuple_1_to_2}\n # ...\n # }\n interaction_dict = get_interaction_dict(in_pdbmodels, verbose=verbose)\n # Changes interaction_dict chain objects to CustomChain instances and adds the interactions to each instance\n update_interactions_dict(interaction_dict, verbose)\n stech_dict = {}\n # If a template or a string has been given to set Stoichometry, it generates a dictionary of it\n # { \"A\":5, \"B\":2, \"C\":6, .. }\n if template:\n stech_dict = get_template_stech_dict(template, seq_dict, verbose=verbose)\n elif stech_string:\n stech_dict = get_string_stech_dict(stech_string)\n # Starts iterating the interaction pair with more known interactions and generates the model/s\n out_pdbmodels = main_loop(num_models, output, interaction_dict, verbose, max_chains, dirty, stech_dict=stech_dict)\n # Saves the model/s to ciff format\n save_results(out_pdbmodels, output)", "def __init__(self,shape,layer_type='input',\\\r\n dtype=None,nsamp=None, name=None,\\\r\n layer_class_name='Layer', damp_neg=0.): \r\n if layer_type not in ['input', 'output', 'middle']:\r\n raise ValueError('Unknown layer type %s' % layer_type)\r\n self.layer_type = layer_type\r\n\r\n self.shape = shape \r\n self.nsamp = nsamp\r\n if (dtype == None):\r\n if (self.layer_type == 'input'):\r\n dtype = np.float\r\n else:\r\n dtype = [np.float, np.float]\r\n self.dtype = dtype\r\n \r\n # Set layer name\r\n if name is None:\r\n self.name = ('Layer %d' % SELayer.glob_layer_num )\r\n else:\r\n self.name = name\r\n SELayer.glob_layer_num += 1\r\n self.layer_class_name = layer_class_name\r\n \r\n # Other parameters\r\n self.pp_var_min = 1e-10\r\n self.damp_neg = damp_neg\r\n self.gam0_neg_last = None\r\n self.tau_last = None", "def optimised_structure(self):\n\n with open(f'gj_{self.molecule.name}.log', 'r') as log_file:\n\n lines = log_file.readlines()\n\n opt_coords_pos = []\n for pos, line in enumerate(lines):\n if 'Input orientation' in line:\n opt_coords_pos.append(pos + 5)\n\n start_pos = opt_coords_pos[-1]\n\n num_atoms = len(self.molecule.molecule['input'])\n\n opt_struct = []\n\n for pos, line in enumerate(lines[start_pos: start_pos + num_atoms]):\n\n vals = line.split()[-3:]\n vals = [self.molecule.molecule['input'][pos][0]] + [float(i) for i in vals]\n opt_struct.append(vals)\n\n return opt_struct", "def build_net(self, inputs):\n with tf.variable_scope(self._scope, self._scope, [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.batch_norm],\n outputs_collections=end_points_collection):\n net = slim.conv2d(inputs, 32, 3, 1, scope='conv1')\n net = slim.conv2d(net, 32, 3, 1, scope='conv2')\n\n net = slim.conv2d(net, 64, 3, 1, scope='conv3')\n net = slim.conv2d(net, 64, 3, 1, scope='conv4')\n\n net = slim.max_pool2d(net, 2, 2, scope='pool1')\n\n net = slim.conv2d(net, 128, 3, 1, scope='conv5')\n net = slim.conv2d(net, 128, 3, 1, scope='conv6')\n\n net = slim.max_pool2d(net, 2, 2, scope='pool2')\n\n net = slim.conv2d(net, 256, 3, scope='conv7')\n net = slim.conv2d(net, 256, 3, scope='conv8')\n\n net = slim.max_pool2d(net, 2, [2, 1], scope='pool3')\n\n net = slim.conv2d(net, 512, 3, scope='conv9')\n net = slim.conv2d(net, 512, 3, scope='conv10')\n\n net = slim.max_pool2d(net, 2, [1, 1], scope='pool4')\n\n net = slim.conv2d(net, 512, 2, padding='VALID', scope='conv11')\n\n net = slim.dropout(net, keep_prob=0.5)\n\n self.end_points = utils.convert_collection_to_dict(end_points_collection)\n self.net = net", "def EncodeDataWithCoupling(data,codingParams):\n # NEW: Determine block type and set a,b\n if(codingParams.blocksize < 2):\n b = codingParams.longBlockSize/2\n else:\n b = codingParams.shortBlockSize/2\n if(codingParams.blocksize == 1 or codingParams.blocksize == 2):\n a = codingParams.shortBlockSize/2\n else:\n a = codingParams.longBlockSize/2\n N = a+b\n halfN = N/2\n #print \"A: \", a\n #print \"B: \", b\n #print \"halfN: \", halfN\n\n # Reclaim nScaleBits from bands with 0 lines\n # vary bark width of bands\n # pass different flimits to AssignMDCTLines...\n\n nScaleBits = codingParams.nScaleBits\n maxMantBits = (1<<codingParams.nMantSizeBits) # 1 isn't an allowed bit allocation so n size bits counts up to 2^n\n if maxMantBits>16: maxMantBits = 16 # to make sure we don't ever overflow mantissa holders\n # vectorizing the Mantissa function call\n# vMantissa = np.vectorize(Mantissa)\n sfBands = codingParams.sfBands\n # db print \"Encode coupling: \", sfBands.nLines\n # NEW compute target bit rate based on block type\n bitBudget = codingParams.targetBitsPerSample * halfN # this is overall target bit rate\n #bitBudget -= nScaleBits*(sfBands.nBands + 1) # less scale factor bits (including overall scale factor)\n #bitBudget -= codingParams.nMantSizeBits*sfBands.nBands # less mantissa bit allocation bits\n #bitBudget -= 2 # block ID size TODO: make this a variable\n mdctLinesFull = []\n for iCh in range(codingParams.nChannels):\n if codingParams.doSBR == True:\n # Calculate Spectral Envelope based on original signal\n specEnv = calcSpecEnv(data[iCh],codingParams.sbrCutoff,codingParams.sampleRate)\n # Append in spectral envelope for this channel into empty container\n codingParams.specEnv[iCh][:] = specEnv\n\n #Decimate and lowpass signal by factor determined by cutoff frequency\n doDecimate = False\n if doDecimate==True:\n Wc = codingParams.sbrCutoff/float(codingParams.sampleRate/2.)# Normalized cutoff frequency\n B,A = signal.butter(4,Wn)\n data[iCh] = signal.lfilter(B,A,data[iCh])\n\n # window data for side chain FFT and also window and compute MDCT\n timeSamples = data[iCh]\n # Window data based on block size\n mdctTimeSamples = np.append(SineWindow(np.append(timeSamples[:a],np.zeros(a)))[:a],SineWindow(np.append(np.zeros(b),timeSamples[a:]))[b:])\n # Call MDCT with a, b reflecting block size\n mdctLines = MDCT(mdctTimeSamples, a, b)\n\n # compute overall scale factor for this block and boost mdctLines using it\n maxLine = np.max( np.abs(mdctLines) )\n overallScale = ScaleFactor(maxLine,nScaleBits) #leading zeroaes don't depend on nMantBits\n mdctLines *= (1<<overallScale)\n mdctLinesFull.append(mdctLines)\n\n uncoupledData, coupledChannel, couplingParams = ChannelCoupling(mdctLinesFull,codingParams.sampleRate,codingParams.nCouplingStart)\n codingParams.couplingParams = couplingParams\n codingParams.coupledChannel = coupledChannel\n mdctLinesFull = uncoupledData\n\n scaleFactorFull= []\n bitAllocFull = []\n mantissaFull = []\n overallScaleFull = []\n for iCh in range(codingParams.nChannels):\n # compute the mantissa bit allocations\n # compute SMRs in side chain FFT\n SMRs = CalcSMRs(timeSamples, mdctLines, overallScale, codingParams.sampleRate, sfBands)\n if codingParams.doSBR == True:\n # Critical band starting here are above cutoff\n cutBin = freqToBand(codingParams.sbrCutoff)\n # perform bit allocation using SMR results\n bitAlloc = BitAllocSBR(bitBudget, maxMantBits, sfBands.nBands, sfBands.nLines, SMRs, codingParams.bitReservoir, codingParams.blocksize, cutBin)\n else:\n bitAlloc = BitAlloc(bitBudget, maxMantBits, sfBands.nBands, sfBands.nLines, SMRs, codingParams.bitReservoir, codingParams.blocksize)\n codingParams.bitReservoir += bitBudget - np.sum(bitAlloc * sfBands.nLines)\n # db print \"blocksize: \", codingParams.blocksize\n # db print \"Bit Reservoir: \", codingParams.bitReservoir\n # db if codingParams.blocksize == 2:\n # db print bitAlloc\n # given the bit allocations, quantize the mdct lines in each band\n scaleFactor = np.empty(sfBands.nBands,dtype=np.int32)\n nMant = halfN\n\n for iBand in range(sfBands.nBands):\n if not bitAlloc[iBand]: nMant-= sfBands.nLines[iBand] # account for mantissas not being transmitted\n mantissa=np.empty(nMant,dtype=np.int32)\n iMant=0\n for iBand in range(sfBands.nBands):\n lowLine = sfBands.lowerLine[iBand]\n highLine = sfBands.upperLine[iBand] + 1 # extra value is because slices don't include last value\n nLines= sfBands.nLines[iBand]\n if(highLine - lowLine > 0):\n scaleLine = np.max(np.abs( mdctLines[lowLine:highLine] ) )\n else:\n scaleLine = abs(mdctLines[lowLine])\n scaleFactor[iBand] = ScaleFactor(scaleLine, nScaleBits, bitAlloc[iBand])\n if bitAlloc[iBand]:\n mantissa[iMant:iMant+nLines] = vMantissa(mdctLines[lowLine:highLine],scaleFactor[iBand], nScaleBits, bitAlloc[iBand])\n iMant += nLines\n # end of loop over scale factor bands\n scaleFactorFull.append(scaleFactor)\n bitAllocFull.append(bitAlloc)\n mantissaFull.append(mantissa)\n overallScaleFull.append(overallScale)\n # return results\n return (scaleFactorFull, bitAllocFull, mantissaFull, overallScaleFull)", "def self_defined_noisy_circuit() -> 'QEnv':\n # Create environment\n env = QEnv()\n # Choose backend Baidu local simulator\n env.backend(BackendName.LocalBaiduSim2)\n\n # Number of qubits, no larger than 20 \n num_qubit = 13\n # Number of gates in each for loop\n gate_num = 3 # Depth of circuit = num_qubit * gate_num\n\n assert num_qubit > 2\n assert gate_num > 2\n\n # Initialize a QCompute circuit\n q = env.Q.createList(num_qubit)\n\n # A noisy random H + CX + RX circuit\n for i in range(num_qubit - 1):\n H(q[i])\n CX(q[i], q[i + 1])\n # Random rotation angles\n rotation_list = [random.uniform(0, 6.28) for _ in range(gate_num - 2)]\n # random quantum registers\n qreg_list = [random.randint(0, num_qubit - 1) for _ in range(gate_num - 2)]\n for i in range(gate_num - 2):\n RX(rotation_list[i])(q[qreg_list[i]])\n\n # Measure with the computational basis\n MeasureZ(*env.Q.toListPair())\n\n # Define noise instances \n # Define a Bit Flip noise instance\n bfobj = BitFlip(0.1)\n # Define a 2-qubit Depolarizing noise instance\n dpobj = Depolarizing(2, 0.1)\n\n # Add noises\n env.noise(['H', 'RX'], [bfobj])\n env.noise(['CX'], [dpobj])\n\n return env", "def r_5(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSaNo = Compound(\"K2SO4\")\r\n iAc = Compound(\"H2SO4\")\r\n if len(comps) == 1:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n (_, (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n else:\r\n iAc = comps[0]\r\n (an, an_oxs) = iAc_oxs(iAc.formula)\r\n iSaNo = Compound(iSaNo_create(\"K\", 1, an, an_oxs))\r\n else:\r\n if \"iSa\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n iAc = comps[1]\r\n else:\r\n iSaNo = comps[1]\r\n iAc = comps[0]\r\n\r\n ((me, me_oxs), (an1, an1_oxs)) = iSa_oxs(iSaNo.formula)\r\n (an2, an2_oxs) = iAc_oxs(iAc.formula)\r\n if an1 != an2:\r\n return \"\"\r\n if an1_oxs == 1:\r\n return \"\"\r\n\r\n iSaAc = Compound(iSaAc_create(me, me_oxs, an1, an1_oxs))\r\n\r\n react = f\"{iSaNo} + {iAc} -> {iSaAc}\"\r\n else:\r\n iSaAc = comps[0]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaAc.formula)\r\n if an_oxs == 1:\r\n return \"\"\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, an, an_oxs))\r\n iAc = Compound(iAc_create(an, an_oxs))\r\n\r\n react = f\"{iSaNo} + {iAc} -> {iSaAc}\"\r\n\r\n return Reaction(react)", "def layer(i):\n with tf.name_scope('layer_{}'.format(i)):\n # displacement gate\n Dgate(tf.clip_by_value(d_r[i], -disp_clip, disp_clip), d_phi[i]) | q[0]\n # rotation gate\n Rgate(r1[i]) | q[0]\n # squeeze gate\n Sgate(tf.clip_by_value(sq_r[i], -sq_clip, sq_clip), sq_phi[i]) | q[0]\n # rotation gate\n Rgate(r2[i]) | q[0]\n # Kerr gate\n Kgate(tf.clip_by_value(kappa1[i], -kerr_clip, kerr_clip)) | q[0]", "def simulation():\n\n return {\n \"type\": \"class\",\n \"base\": \"iso.process_step\",\n \"is_abstract\": False,\n \"is_document\": True,\n \"pstr\": (\"({}/{}/{})\", (\"used\", \"ran_for_experiments\", \"ensemble_id\")),\n \"properties\": [\n (\n \"part_of_project\",\n \"linked_to(designing.project)\",\n \"1.N\",\n \"Project or projects for which simulation was run\",\n ),\n (\n \"ran_for_experiments\",\n \"linked_to(designing.numerical_experiment)\",\n \"1.N\",\n \"One or more experiments with which the simulation is \"\n \"associated\",\n ),\n (\n \"sub_experiment\",\n \"linked_to(designing.numerical_experiment)\",\n \"0.1\",\n \"For start-date ensembles, this will indicate the beginning \"\n \"year; for offline models driven by output from another \"\n \"model, this will provide the source_id and variant_label \"\n \"for the 'driving' model.\",\n ),\n (\n \"used\",\n \"linked_to(science.model)\",\n \"1.1\",\n \"The model used to run the simulation\",\n ),\n (\n \"primary_ensemble\",\n \"linked_to(activity.ensemble)\",\n \"0.1\",\n \"Primary Ensemble (ensemble for which this simulation was \"\n \"first run).\",\n ),\n (\n \"institution\",\n \"linked_to(shared.party)\",\n \"0.1\",\n \"institution which carried out the simulation\",\n ),\n (\n \"parent_of\",\n \"linked_to(activity.child_simulation)\",\n \"0.N\",\n \"If appropriate, links to simulations which branched from \"\n \"this one\",\n ),\n (\n \"produced\",\n \"linked_to(data.dataset)\",\n \"0.N\",\n \"Products of the simulation\",\n ),\n (\n \"had_performance\",\n \"linked_to(platform.performance)\",\n \"0.1\",\n \"Performance of the simulation.\",\n ),\n (\n \"ran_on\",\n \"linked_to(platform.machine)\",\n \"0.1\",\n \"The machine on which the simulation was run.\",\n ),\n (\n \"errata\",\n \"shared.online_resource\",\n \"0.1\",\n \"Link to errata associated with this simulation.\",\n ),\n (\n \"ensemble_id\",\n \"activity.axis_member\",\n \"0.N\",\n \"Identification within ensemble axes via axis member. \"\n \"(Multiple axis members within a simulation cannot share the \"\n \"same ensemble_axis.) (There must be an axis_member instance \"\n \"for each ensemble axis in a parent ensemble.)\",\n ),\n # Time\n (\n \"start_time\",\n \"time.date_time\",\n \"0.1\",\n \"The start date-time of the simulation. e.g. \"\n \"2012-04-01 00:00:00\",\n ),\n (\n \"end_time\",\n \"time.date_time\",\n \"0.1\",\n \"The end date-time of the simulation. e.g. \"\n \"2087-11-30 12:00:00\",\n ),\n (\n \"calendar\",\n \"time.calendar\",\n \"0.1\",\n \"The calendar used in the simulation\",\n ),\n # Further Info URL\n (\n \"documentation\",\n \"shared.online_resource\",\n \"0.1\",\n \"On-line location of additional documentation\",\n ),\n # Extra attributes\n (\n \"extra_attributes\",\n \"shared.extra_attribute\",\n \"0.N\",\n \"Additional attributes provided with simulation.\",\n ),\n ],\n \"constraints\": [\n (\"cardinality\", \"rationale\", \"0.0\"),\n ],\n }", "def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains", "def build(self, inputs_shape):\n if inputs_shape[-1] is None:\n raise ValueError(\n \"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % str(inputs_shape)\n )\n _check_supported_dtypes(self.dtype)\n d = inputs_shape[-1]\n h = self._real_units\n s = self._slots\n\n self._basic_build(inputs_shape)\n\n self._writing_W = self.add_variable(\n name=\"_writing_W\", shape=[d + h, h], initializer=self._kernel_initializer\n )\n self._writing_b = self.add_variable(\n name=\"_writing_b\",\n shape=[h],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n self._writing_W02 = self.add_variable(\n name=\"_writing_W02\", shape=[h, s], initializer=self._kernel_initializer\n )\n\n self.parameter_set = [\n self._erase_W,\n self._erase_b,\n self._reset_W,\n self._reset_b,\n self._add_W,\n self._add_b,\n self.heads,\n self._writing_W,\n self._writing_W02,\n self._writing_b,\n ]\n\n self.built = True", "def __init__(self, workplane, measures):\n\n cq.Workplane.bracket = utilities.bracket\n cq.Workplane.transformedWorkplane = utilities.transformedWorkplane\n cq.Workplane.bolt = utilities.bolt\n cq.Workplane.cutEachAdaptive = utilities.cutEachAdaptive\n\n self.model = workplane\n self.debug = False\n self.measures = measures\n m = self.measures\n\n # The bracket lengths are measured at the outside, but the construction actually uses a \n # central cuboid block with two attached brackets. Adapting the measures accordingly.\n m.center_block = Measures(\n # Naming is as seen from the horizontal leg.\n width = max(m.horizontal_leg.width, m.vertical_leg.width),\n depth = m.vertical_leg.height,\n height = m.horizontal_leg.height\n )\n m.horizontal_leg.depth -= m.center_block.depth\n m.vertical_leg.depth -= m.center_block.height\n\n # Create hole specs which combine the other hole measures in the format expected by bolthole().\n m.horizontal_leg.hole_specs = [\n {\n \"diameter\": m.horizontal_leg.hole_diameters[i] if isinstance(m.horizontal_leg.hole_diameters, list) else m.horizontal_leg.hole_diameters,\n \"clamp_length\": m.horizontal_leg.clamp_lengths[i] if isinstance(m.horizontal_leg.clamp_lengths, list) else m.horizontal_leg.clamp_lengths, \n \"nuthole_size\": m.horizontal_leg.nuthole_sizes[i] if isinstance(m.horizontal_leg.nuthole_sizes, list) else m.horizontal_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.vertical_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.horizontal_leg.hole_count)\n ]\n m.vertical_leg.hole_specs = [\n {\n \"diameter\": m.vertical_leg.hole_diameters[i] if isinstance(m.vertical_leg.hole_diameters, list) else m.vertical_leg.hole_diameters,\n \"clamp_length\": m.vertical_leg.clamp_lengths[i] if isinstance(m.vertical_leg.clamp_lengths, list) else m.vertical_leg.clamp_lengths, \n \"nuthole_size\": m.vertical_leg.nuthole_sizes[i] if isinstance(m.vertical_leg.nuthole_sizes, list) else m.vertical_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.horizontal_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.vertical_leg.hole_count)\n ]\n\n # TODO: Initialize missing measures with defaults.\n\n self.build()", "def get_operator(self):\n\n Operator = []\n\n '''\n print('Create H - 150 & 220 GHz')\n ope=[]\n for i in range(self.nfreqs):\n ope.append(self.H150.operands[i])\n for i in range(self.nfreqs):\n ope.append(self.H220.operands[i])\n self.Hboth = BlockRowOperator(ope, new_axisin=0)\n self.H=self.Hboth\n '''\n\n\n\n H_qubic = self.qubic.get_operator()\n R_qubic = ReshapeOperator(H_qubic.shapeout, H_qubic.shape[0])\n Operator.append(R_qubic(H_qubic))\n\n H_planck = self.planck.get_operator()\n R_planck = ReshapeOperator(H_planck.shapeout, H_planck.shape[0])\n Operator.append(R_planck(H_planck))\n return BlockColumnOperator(Operator, axisout=0)", "def render_circuit(self,\n circuit: ACircuit,\n map_param_kid: dict = None,\n shift: int = 0,\n recursive: bool = False,\n precision: float = 1e-6,\n nsimplify: bool = True):\n if not isinstance(circuit, Circuit):\n variables = circuit.get_variables(map_param_kid)\n description = format_parameters(variables, precision, nsimplify)\n self.append_circuit([p + shift for p in range(circuit.m)], circuit, description)\n\n if circuit.is_composite() and circuit.ncomponents() > 0:\n for _, (r, c) in enumerate(circuit._components):\n shiftr = [p+shift for p in r]\n if c.is_composite() and c._components:\n if recursive:\n self.open_subblock(shiftr, c.name, self.get_circuit_size(c, recursive=True), c._color)\n self.render_circuit(c, shift=shiftr[0], map_param_kid=map_param_kid,\n precision=precision, nsimplify=nsimplify)\n self.close_subblock(shiftr)\n else:\n component_vars = c.get_variables(map_param_kid)\n description = format_parameters(component_vars, precision, nsimplify)\n self.append_subcircuit(shiftr, c, description)\n else:\n component_vars = c.get_variables(map_param_kid)\n description = format_parameters(component_vars, precision, nsimplify)\n self.append_circuit(shiftr, c, description)\n\n self.extend_pos(0, circuit.m - 1)", "def __init__(self, n = 1, m = 1, p = 1):\n \n #############################\n # Parameters\n #############################\n\n # Dimensions\n self.n = n \n self.m = m \n self.p = p\n \n # Labels\n self.name = 'ContinuousDynamicSystem'\n self.state_label = []\n self.input_label = []\n self.output_label = []\n \n # Units\n self.state_units = []\n self.input_units = []\n self.output_units = []\n \n # Default Label and units\n for i in range(n):\n self.state_label.append('State '+str(i))\n self.state_units.append('')\n for i in range(m):\n self.input_label.append('Input '+str(i))\n self.input_units.append('')\n for i in range(p):\n self.output_label.append('Output '+str(i))\n self.output_units.append('')\n \n # Default state and input domain\n self.x_ub = np.zeros(self.n) +10 # States Upper Bounds\n self.x_lb = np.zeros(self.n) -10 # States Lower Bounds\n self.u_ub = np.zeros(self.m) +1 # Control Upper Bounds\n self.u_lb = np.zeros(self.m) -1 # Control Lower Bounds\n \n # Default state and inputs values \n self.xbar = np.zeros(self.n)\n self.ubar = np.zeros(self.m)\n \n # Plot params\n self.domain = [ (-10,10) , (-10,10) , (-10,10) ]\n self.linestyle = 'o-'\n self.linestyle_plus = '--'\n self.linescolor = 'b'\n self.linescolor_plus = 'r'\n self.lines_plus = True # Bool to active second graphic outpout\n self.is_3d = False # Use 2d plot by default\n \n ################################\n # Variables\n ################################\n \n # Initial value for simulations\n self.x0 = np.zeros(self.n) \n \n # Result of last simulation\n self.traj = None\n \n # Cost function for evaluation\n # default is a quadratic cost function with diag Q and R matrices\n self.cost_function = costfunction.QuadraticCostFunction.from_sys(self)", "def build_simple_block(self, incoming_layer, names,\n num_filters, filter_size, stride, pad,\n use_bias=False, nonlin=rectify):\n net = []\n net.append((\n names[0],\n ConvLayer(incoming_layer, num_filters, filter_size, pad, stride,\n flip_filters=False, nonlinearity=None) if use_bias\n else ConvLayer(incoming_layer, num_filters, filter_size, stride, pad, b=None,\n flip_filters=False, nonlinearity=None)\n ))\n \n net.append((\n names[1],\n BatchNormLayer(net[-1][1])\n ))\n if nonlin is not None:\n net.append((\n names[2],\n NonlinearityLayer(net[-1][1], nonlinearity=nonlin)\n ))\n \n return dict(net), net[-1][0]", "def synthesize(inputs, full_trace, num_samples, extra_consts=[], group_results=False, config={}):\n\n # update synthesizer config\n example_trace = full_trace\n\n # apply inverse semantics to obtain symbolic output table and vis programs\n abstract_designs = VisDesign.inv_eval(example_trace) \n \n # sort pairs based on complexity of tables\n abstract_designs.sort(key=lambda x: len(x[0].values[0]) \n if not isinstance(x[0], (list,)) else sum([len(y.values[0]) for y in x[0]]))\n\n candidates = []\n for full_sym_data, chart in abstract_designs:\n\n # split case based on single layered chart or multi layered chart\n if not isinstance(full_sym_data, (list,)):\n # single-layer chart\n\n synthesizer = table_synthesizer.Synthesizer(config=config[\"grammar\"])\n\n if num_samples:\n sym_data = eval_utils.sample_symbolic_table(full_sym_data, num_samples)\n else:\n sym_data = full_sym_data\n\n print(\"==> table synthesis input:\")\n #print(sym_data.instantiate())\n\n candidate_progs = synthesizer.enumerative_synthesis(\n inputs, sym_data.instantiate(), \n max_prog_size=config[\"max_prog_size\"],\n time_limit_sec=config[\"time_limit_sec\"],\n solution_sketch_limit=config[\"solution_sketch_limit\"],\n solution_limit=config[\"solution_limit\"],\n disable_provenance_analysis=config[\"disable_provenance_analysis\"])\n\n for p in candidate_progs:\n\n output = p.eval(inputs).to_dict(orient=\"records\")\n\n field_mappings = synth_utils.align_table_schema(sym_data.values, output, find_all_alignments=True)\n assert(len(field_mappings) > 0)\n\n for field_mapping in field_mappings:\n vis_design = VisDesign(data=output, chart=copy.deepcopy(chart))\n vis_design.update_field_names(field_mapping)\n candidates.append((p.stmt_string(), vis_design))\n\n\n else:\n synthesizer = table_synthesizer.Synthesizer(config=config[\"grammar\"])\n \n sym_data = []\n for d in full_sym_data:\n if num_samples:\n sym_data.append(eval_utils.sample_symbolic_table(d, num_samples))\n else:\n sym_data.append(d)\n\n # multi-layer charts\n # layer_candidate_progs[i] contains all programs that transform inputs to output[i]\n # synthesize table transformation programs for each layer\n layer_candidate_progs = []\n for d in sym_data:\n\n print(\"==> table synthesis input:\")\n #print(d.instantiate())\n layer_candidate_progs.append(\n synthesizer.enumerative_synthesis(\n inputs, d.instantiate(), \n max_prog_size=config[\"max_prog_size\"], \n time_limit_sec=config[\"time_limit_sec\"],\n solution_sketch_limit=config[\"solution_sketch_limit\"],\n solution_limit=config[\"solution_limit\"],\n disable_provenance_analysis=config[\"disable_provenance_analysis\"]))\n \n # iterating over combinations for different layers\n layer_id_lists = [list(range(len(l))) for l in layer_candidate_progs]\n for layer_id_choices in itertools.product(*layer_id_lists):\n\n #layer_prog[i] is the transformation program for the i-th layer\n progs = [layer_candidate_progs[i][layer_id_choices[i]] for i in range(len(layer_id_choices))]\n\n # apply each program on inputs to get output table for each layer\n outputs = [p.eval(inputs).to_dict(orient=\"records\") for p in progs]\n\n all_field_mappings = [synth_utils.align_table_schema(sym_data[k].values, output, find_all_alignments=True) \n for k, output in enumerate(outputs)]\n\n mapping_id_lists = [list(range(len(l))) for l in all_field_mappings]\n for mapping_id_choices in itertools.product(*mapping_id_lists):\n\n field_mappings = [all_field_mappings[i][idx] for i, idx in enumerate(mapping_id_choices)]\n #print(field_mappings)\n\n if config[\"vis_backend\"] == \"vegalite\":\n vis_design = VisDesign(data=outputs, chart=copy.deepcopy(chart))\n vis_design.update_field_names(field_mappings)\n candidates.append(([p.stmt_string() for p in progs], vis_design))\n else:\n vis_design = MatplotlibChart(outputs,copy.deepcopy(chart))\n candidates.append(([p.stmt_string() for p in progs], vis_design.to_string_spec(field_mappings)))\n\n if len(candidates) > 0: break\n\n if group_results:\n return FalxEvalInterface.group_results(candidates)\n\n return candidates", "def __init__(self, factory, radii, heights, layers_lcs, transform_data,\n layers_physical_names, transfinite_r_data, transfinite_h_data,\n transfinite_phi_data, straight_boundary=None,\n layers_surfaces_names=None, surfaces_names=None,\n volumes_names=None):\n primitives = []\n k = 1 / 3.0 # inner quadrangle part of the first layer radius\n transfinite_types = [0, 0, 0, 1, 3]\n h_cnt = 0.0 # height counter\n if layers_lcs is None:\n layers_lcs = [[1 for _ in radii] for _ in heights]\n if surfaces_names is None:\n surfaces_names = [['NX', 'X', 'NY', 'Y', 'NZ', 'Z']]\n if layers_surfaces_names is None:\n layers_surfaces_names = [[0 for _ in radii] for _ in heights]\n if volumes_names is not None:\n new_layers_physical_names = [[volumes_names[x] for x in y]\n for y in layers_physical_names]\n layers_physical_names = new_layers_physical_names\n for i, h in enumerate(heights):\n c = radii[0] / math.sqrt(2.0)\n kc = k * radii[0] / math.sqrt(2.0)\n bottom_h = h_cnt # primitive bottom h\n top_h = h_cnt + h # primitive top h\n h_cnt += h\n if straight_boundary is None:\n # Core center\n primitives.append(Primitive(\n factory,\n [\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [[], [], [], [], [], [], [], [], [], [], [], []],\n [\n transfinite_phi_data,\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[0],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core X\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]], [], [], [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[0],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core Y\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]], [[0, 0, top_h, 1]], [], [],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[0],\n transfinite_h_data[i],\n ],\n transfinite_types[2],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NX\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n [\n [], [], [], [],\n [], [[0, 0, bottom_h, 1]], [[0, 0, top_h, 1]], [],\n [], [], [], []\n ],\n [\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NY\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [], [], [[0, 0, top_h, 1]], [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_h_data[i],\n ],\n transfinite_types[4],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Layers\n for j in range(1, len(radii)):\n c1 = radii[j - 1] / math.sqrt(2.0)\n c2 = radii[j] / math.sqrt(2.0)\n # Layer X\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[j],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer Y\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[j],\n transfinite_h_data[i]\n ],\n transfinite_types[2],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NX\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[j][0], transfinite_r_data[j][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NY\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[j][0], transfinite_r_data[j][1],\n rc],\n transfinite_h_data[i]\n ],\n transfinite_types[4],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n else:\n if straight_boundary[0] == 0:\n curve_types = {\n 'C': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[0] == 1:\n curve_types = {\n 'C': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[0] == 2:\n curve_types = {\n 'C': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n else:\n curve_types = {\n 'C': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n # Core center\n primitives.append(Primitive(\n factory,\n [\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['C'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []],\n [\n transfinite_phi_data,\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[0],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core X\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['X'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[0],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][0]]\n ))\n # Core Y\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['Y'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[0],\n transfinite_h_data[i],\n ],\n transfinite_types[2],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NX\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['NX'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NY\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['NY'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_h_data[i],\n ],\n transfinite_types[4],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Layers\n for j in range(1, len(radii)):\n if straight_boundary[j] == 0:\n curve_types = {\n 'X': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[j] == 1:\n curve_types = {\n 'X': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[j] == 2:\n curve_types = {\n 'X': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n else:\n curve_types = {\n 'X': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n c1 = radii[j - 1] / math.sqrt(2.0)\n c2 = radii[j] / math.sqrt(2.0)\n # Layer X\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['X'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[j],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer Y\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['Y'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[j],\n transfinite_h_data[i]\n ],\n transfinite_types[2],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NX\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['NX'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[j][0],\n transfinite_r_data[j][1], rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NY\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['NY'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[j][0],\n transfinite_r_data[j][1], rc],\n transfinite_h_data[i]\n ],\n transfinite_types[4],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n Complex.__init__(self, factory, primitives)", "def create_dataset(n_power_steps, n_initial_conditions, n_time_steps, power_system):\r\n data_ops = input_data_initialised(n_ops=n_power_steps * n_initial_conditions,\r\n power_system=power_system)\r\n\r\n power_values = np.linspace(0.0, 0.2, n_power_steps)\r\n delta_initial = np.linspace(-np.pi / 2, np.pi / 2, n_initial_conditions)\r\n\r\n power_ops_grid, delta_ops_grid = np.meshgrid(power_values, delta_initial)\r\n\r\n power_ops = power_ops_grid.reshape((-1, 1))\r\n delta_ops = delta_ops_grid.reshape((-1, 1))\r\n\r\n data_ops.update(time=np.ones(power_ops.shape) * power_system['t_max'],\r\n power=power_ops,\r\n states_initial=np.concatenate([delta_ops, power_system['omega_0'] + np.ones(delta_ops.shape) *\r\n 0.1], axis=1))\r\n\r\n data_ops = evaluate_op_trajectory(data_ops, n_time_steps=n_time_steps, power_system=power_system)\r\n\r\n data_ops = calculate_data_ode_right_hand_side(data_ops, power_system)\r\n\r\n return data_ops", "def construct(input_placeholder):\n\t\t###############################\n\t\t# MODEL ARCHITECTURE #\n\t\t###############################\n\t\t# First block of convolutions\n\t\twith tf.variable_scope(\"conv_1\"):\n\t\t\tconv_1_1 = conv2d(input_placeholder,\n\t\t\t\tinput_channels=1,\n\t\t\t\toutput_channels=64,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_1_2 = conv2d(conv_1_1,\n\t\t\t\tinput_channels=64,\n\t\t\t\toutput_channels=64,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\t\t\t# TODO batchn\n\t\t\tbn_1 = conv_1_2\n\n\t\t# Second block of convolutions.\n\t\twith tf.variable_scope(\"conv2\"):\n\t\t\tconv_2_1 = conv2d(bn_1,\n\t\t\t\tinput_channels=64,\n\t\t\t\toutput_channels=128,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_2_2 = conv2d(conv_2_1,\n\t\t\t\tinput_channels=128,\n\t\t\t\toutput_channels=128,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\n\t\t\t# TODO batchn\n\t\t\tbn_2 = conv_2_2\n\n\t\twith tf.variable_scope(\"conv3\"):\n\t\t\tconv_3_1 = conv2d(bn_2,\n\t\t\t\tinput_channels=128,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_3_2 = conv2d(conv_3_1,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_3_3 = conv2d(conv_3_2,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\t\t\t# TODO batchn\n\t\t\tbn_3 = conv_3_3\n\n\n\t\t# DILATED LAYERS:\n\t\twith tf.variable_scope(\"conv4\"):\n\t\t\tconv_4_1 = conv2d(bn_3,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_4_2 = conv2d(conv_4_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_4_3 = conv2d(conv_4_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\t# TODO batchn\n\t\t\tbn_4 = conv_4_3\n\n\t\twith tf.variable_scope(\"conv5\"):\n\t\t\tconv_5_1 = conv2d(bn_4,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_5_2 = conv2d(conv_5_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_5_3 = conv2d(conv_5_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\t# TODO batchn\n\t\t\tbn_5 = conv_5_3\n\n\t\twith tf.variable_scope(\"conv6\"):\n\t\t\tconv_6_1 = conv2d(bn_5,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_6_2 = conv2d(conv_6_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_6_3 = conv2d(conv_6_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\t# TODO batchn\n\t\t\tbn_6 = conv_6_3\n\n\n\t\twith tf.variable_scope(\"conv7\"):\n\t\t\tconv_7_1 = conv2d(bn_6,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_7_2 = conv2d(conv_7_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_7_3 = conv2d(conv_7_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\t# TODO batchn\n\t\t\tbn_7 = conv_7_3\n\n\n\t\twith tf.variable_scope(\"conv8\"):\n\t\t\tconv_8_1 = deconv2d(bn_7,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_size=[None, 64, 64, 256],\n\t\t\t\tkernel_size=4,\n\t\t\t\tstride=2,\n\t\t\t\tpad=1)\n\t\t\tconv_8_2 = conv2d(conv_8_1,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_8_3 = conv2d(conv_8_2,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=1)\n\t\t\tconv_8_313 = conv2d(conv_8_3,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=313,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=1)\n\n\n\t\treturn conv_8_313", "def construct_circuit(self):\n return self._circuit", "def build(self, inputs_shape):\n if inputs_shape[-1] is None:\n raise ValueError(\n \"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % str(inputs_shape)\n )\n _check_supported_dtypes(self.dtype)\n d = inputs_shape[-1] # noqa: F841\n h = self._real_units # noqa: F841\n s = self._slots # noqa: F841\n\n self._basic_build(inputs_shape)\n\n self.parameter_set = [\n self._erase_W,\n self._erase_b,\n self._reset_W,\n self._reset_b,\n self._add_W,\n self._add_b,\n self.heads,\n ]\n\n self.built = True", "def layer_construction(self, in_channel, out_channel, stride, num_blocks):\n layer = [ResBlock(in_channel,out_channel,stride)]\n for i in range(0, num_blocks-1):\n layer.append(ResBlock(out_channel * 4, out_channel))\n\n return nn.Sequential(*layer)" ]
[ "0.63333786", "0.6290255", "0.5760848", "0.57012063", "0.56889766", "0.55815995", "0.5549112", "0.54142976", "0.537557", "0.53703374", "0.53373516", "0.53207034", "0.53111106", "0.5274469", "0.5271391", "0.5248255", "0.52141166", "0.52014184", "0.5191351", "0.51885855", "0.51842", "0.51592404", "0.51475114", "0.51471394", "0.5140189", "0.5134254", "0.5130586", "0.5108377", "0.50838757", "0.5082016", "0.5027045", "0.49945712", "0.49883485", "0.49797022", "0.4949463", "0.49468634", "0.49429527", "0.49416998", "0.49416786", "0.49345055", "0.49333814", "0.4926911", "0.491706", "0.49044108", "0.49006042", "0.4880595", "0.4879516", "0.4867403", "0.48624387", "0.48512813", "0.48462227", "0.48457876", "0.48328212", "0.48235384", "0.48212773", "0.48133555", "0.48124182", "0.48066607", "0.47990775", "0.47978085", "0.4793194", "0.47915488", "0.47851914", "0.47850725", "0.47740862", "0.4773921", "0.47648454", "0.47620097", "0.47613266", "0.4760627", "0.47585624", "0.47536522", "0.47517595", "0.47508013", "0.47488657", "0.47486728", "0.4747965", "0.47478414", "0.47461465", "0.47351587", "0.47336853", "0.4728578", "0.47272322", "0.47248632", "0.47183284", "0.4718091", "0.47179884", "0.4715125", "0.47145006", "0.47132558", "0.47120562", "0.47108498", "0.47094095", "0.47065356", "0.47037044", "0.47036532", "0.47010076", "0.46984014", "0.46942565", "0.46935096" ]
0.5596882
5
Representation of a circuit as "Rotation list". On top of the structure (see Structured script representation), 1Q layers are compiled to the corresponding rotations. The 1Q layers are now represented with a rotation vector. rotations_1Q = [layer, qubit, rotation_vector] rotation_vector = [axis_of_rotation (3 numbers), angle_of_rotation]
def __init__(self, rotations_1Q, lines_2Q, n_qubits): self.rotations_1Q = rotations_1Q self.lines_2Q = lines_2Q self.n_qubits = n_qubits dim_depth, dim_qubits, dim_rot = self.rotations_1Q.shape self.depth = 1+2*len(lines_2Q) # 1Q rotations vector does not match the depth if not((2*dim_depth-1) == self.depth): raise ValueError('1Q rotations vector does not match the depth') # 1Q rotations vector does not match the qubit number if not(dim_qubits == n_qubits): raise ValueError( '1Q rotations vector does not match the qubit number') # 1Q rotations vector does not match the parameter number if not(dim_rot == 4): raise ValueError( '1Q rotations vector does not match the parameter number')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def two_qubit_rotation(self, bits, symbols):\n circuit = cirq.Circuit()\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[0], symbols[0:3]))\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[1], symbols[3:6]))\n circuit += [cirq.ZZ(*bits)**symbols[6]]\n circuit += [cirq.YY(*bits)**symbols[7]]\n circuit += [cirq.XX(*bits)**symbols[8]]\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[0], symbols[9:12]))\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[1], symbols[12:]))\n return circuit", "def _tk1_to_rotations(a: float, b: float, c: float) -> Circuit:\n circ = Circuit(1)\n circ.Rz(c, 0).Rx(b, 0).Rz(a, 0)\n return circ", "def one_qubit_rotation(self, qubit, symbols):\n # print(symbols, \"hi\")\n return [cirq.rx(symbols[0])(qubit),\n cirq.ry(symbols[1])(qubit),\n cirq.rz(symbols[2])(qubit)]", "def test_serialize_circuit_rotations(self):\n dev = QeQiskitDevice(wires=1, shots=1000, backend=\"qasm_simulator\", analytic=False)\n\n def circuit():\n qml.Hadamard(wires=[0])\n return qml.expval(qml.Hadamard(0))\n\n qnode = qml.QNode(circuit, dev)\n qnode._construct([], {})\n\n qasm = dev.serialize_circuit(qnode.circuit)\n expected = 'OPENQASM 2.0;\\ninclude \"qelib1.inc\";\\nqreg q[1];\\ncreg c[1];\\nh q[0];\\nry(-0.7853981633974483) q[0];\\n'\n assert qasm == expected", "def test_rot_decomposition(self, diff_method):\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def circuit(weights):\r\n qml.Rot(weights[0], weights[1], weights[2], wires=0)\r\n return qml.expval(qml.PauliX(0))\r\n\r\n circuit = qml.QNode(circuit, dev, diff_method=diff_method)\r\n params = np.array([1.0, 2.0, 3.0])\r\n tapes = qml.metric_tensor(circuit, only_construct=True)(params)\r\n assert len(tapes) == 3\r\n\r\n # first parameter subcircuit\r\n assert len(tapes[0].operations) == 0\r\n\r\n # Second parameter subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RZ)\r\n assert tapes[1].operations[0].data == [1]\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # Third parameter subcircuit\r\n assert len(tapes[2].operations) == 2\r\n assert isinstance(tapes[2].operations[0], qml.RZ)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert tapes[2].operations[0].data == [1]\r\n assert tapes[2].operations[1].data == [2]\r\n\r\n result = qml.metric_tensor(circuit)(params)\r\n assert result.shape == (3, 3)", "def Rot_layer(self, w):\n for idx, element in enumerate(w):\n qml.Rot(element[0], element[1], element[2], wires=idx)", "def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R", "def qwc_rotation(pauli_operators):\n paulis_with_identity = (qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ)\n if not all(isinstance(element, paulis_with_identity) for element in pauli_operators):\n raise TypeError(\n \"All values of input pauli_operators must be either Identity, PauliX, PauliY, or PauliZ instances,\"\n \" instead got pauli_operators = {}.\".format(pauli_operators)\n )\n\n for pauli in pauli_operators:\n if isinstance(pauli, qml.PauliX):\n qml.RY(-np.pi / 2, wires=pauli.wires)\n\n elif isinstance(pauli, qml.PauliY):\n qml.RX(np.pi / 2, wires=pauli.wires)", "def add_rotation_layer(self, rotation_blocks:Optional[Union[str, cirq.Gate, Callable,\n List[str],List[cirq.Gate],List[Callable],\n List['TemplateCircuitBlock']]] =None):\n rotation_blocks = self._parse_rotation_blocks(rotation_blocks)\n for i, block in enumerate(rotation_blocks):\n if self._reuse_param_per_layer:\n self.reset_index()\n if isinstance(block, TemplateCircuitBlock):\n interaction_graphs = self.get_interaction_graphs(i, block.num_block_qubits)\n for qubits in interaction_graphs:\n if self._reuse_param_per_template:\n self.reset_index()\n block.build(self, qubits)\n else:\n for qubit in range(self.n_qubit):\n gate = self.parameterise_gate(block)\n self.apply_gate_operation(gate, qubit)", "def getRotationTrajectory(self) -> SO3Trajectory:\n return SO3Trajectory(self.times,[m[:9] for m in self.milestones])", "def rotationData(self):\n return [self.detections.rotationTimeTags, self.detections.rotations], self.b1, self.b2", "def intermediateJacPol2Rot(self,x):\n allS = np.sin(x[0,:])\n allC = np.cos(x[0,:])\n allR = x[1,:]\n \n Jac = Idn(x.shape[1],self._dim)\n Jac[:,0,0] = -allS*allR\n Jac[:,0,1] = allC\n Jac[:,1,0] = allC*allR\n Jac[:,1,1] = allS\n return Jac", "def __init__(self, n_qubit:int, copies:int=1,\n rotation_blocks:Optional[Union[str, cirq.Gate, Callable, 'TemplateCircuitBlock',\n List[str],List[cirq.Gate],List[Callable],\n List['TemplateCircuitBlock']]] =None,\n entanglement_blocks:Optional[Union[str, cirq.Gate, Callable, 'TemplateCircuitBlock',\n List[str],List[cirq.Gate],List[Callable],\n List['TemplateCircuitBlock']]] =None,\n entangle_strategy:Optional[Union[str,List[str], Callable[[int,int],List[Tuple[int]]],\n List[Callable[[int,int],List[Tuple[int]]]]]]=None,\n parameter_symbol:str='θ',\n final_rotation_layer:bool=False,\n flatten_circuit:bool=False,\n reuse_param_per_depth:bool=False,\n reuse_param_per_layer:bool=False,\n reuse_param_per_template:bool=False,\n parameter_index:Optional[int]=None,\n parameter_scale=1,\n name:str='ParameterisedCircuit',\n *args, **kwargs):\n super().__init__(n_qubit, name=name, *args, **kwargs)\n self._parameter_symbol = parameter_symbol\n self._parameters = np.array([], dtype=object)\n self._readout_qubit = None\n self._flatten_circuit = flatten_circuit\n self._entangle_strategy = entangle_strategy if entangle_strategy else 'full'\n self._parameter_index = parameter_index\n self._reuse_param_per_depth = reuse_param_per_depth\n self._reuse_param_per_layer = reuse_param_per_layer\n self._reuse_param_per_template = reuse_param_per_template\n self._parameter_scale = parameter_scale \n self.build(rotation_blocks, entanglement_blocks, entangle_strategy, copies,\n final_rotation_layer)", "def representation_2_txyz(self: Q, representation: str = \"\") -> List:\n\n symbolic = False\n\n if (\n hasattr(self.t, \"free_symbols\")\n or hasattr(self.x, \"free_symbols\")\n or hasattr(self.y, \"free_symbols\")\n or hasattr(self.z, \"free_symbols\")\n ):\n symbolic = True\n\n if representation == \"\":\n box_t, box_x, box_y, box_z = self.t, self.x, self.y, self.z\n\n elif representation == \"polar\":\n amplitude, theta_x, theta_y, theta_z = self.t, self.x, self.y, self.z\n\n theta = (theta_x ** 2 + theta_y ** 2 + theta_z ** 2) ** (1 / 2)\n\n if theta == 0:\n box_t = self.t\n box_x, box_y, box_z = 0, 0, 0\n\n else:\n if symbolic:\n box_t = amplitude * sp.cos(theta)\n box_x = self.x / theta * amplitude * sp.sin(theta)\n box_y = self.y / theta * amplitude * sp.sin(theta)\n box_z = self.z / theta * amplitude * sp.sin(theta)\n else:\n box_t = amplitude * math.cos(theta)\n box_x = self.x / theta * amplitude * math.sin(theta)\n box_y = self.y / theta * amplitude * math.sin(theta)\n box_z = self.z / theta * amplitude * math.sin(theta)\n\n elif representation == \"spherical\":\n box_t, R, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_x = R * sp.sin(theta) * sp.cos(phi)\n box_y = R * sp.sin(theta) * sp.sin(phi)\n box_z = R * sp.cos(theta)\n else:\n box_x = R * math.sin(theta) * math.cos(phi)\n box_y = R * math.sin(theta) * math.sin(phi)\n box_z = R * math.cos(theta)\n\n elif representation == \"hyperbolic\":\n u, v, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_t = v * sp.exp(u)\n box_x = v * sp.exp(-u)\n box_y = v * sp.sin(theta) * sp.sin(phi)\n box_z = v * sp.cos(theta)\n\n else:\n box_t = v * math.exp(u)\n box_x = v * math.exp(-u)\n box_y = v * math.sin(theta) * sp.sin(phi)\n box_z = v * math.cos(theta)\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return [box_t, box_x, box_y, box_z]", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def to_revolute_chain(self):\n T_zero = {\"p0\": SE3.identity()}\n ang_lims_map = {}\n old_to_new_names = {\n \"p0\": \"p0\"\n } # Returned for user of the method (to map old joint names to new ones)\n ub, lb = spherical_angle_bounds_to_revolute(self.ub, self.lb)\n count = 1\n joint_prev = \"p0\"\n for (\n joint\n ) in self.d: # Assumes the dictionary is in chain order (perhaps enforce?)\n new_node1 = \"p\" + str(count)\n count += 1\n # ub[new_node1] = self.ub[joint]\n # lb[new_node1] = self.lb[joint]\n ang_lims_map[joint] = new_node1\n\n new_node2 = \"p\" + str(count)\n count += 1\n old_to_new_names[joint] = new_node2\n\n Ry = SE3(SO3(roty(np.pi / 2)), np.zeros(3))\n T_zero[new_node1] = T_zero[joint_prev].dot(Ry)\n d = self.d[joint]\n Ry_back = SE3(SO3(roty(-np.pi / 2)), np.zeros(3))\n T_zero[new_node2] = T_zero[new_node1].dot(Ry_back).dot(trans_axis(d, \"z\"))\n\n joint_prev = new_node2\n\n # for key in T_zero:\n # if key not in ub.keys() and key is not 'p0':\n # ub[key] = np.pi\n # lb[key] = -np.pi\n\n params = {\"T_zero\": T_zero, \"ub\": ub, \"lb\": lb}\n return RobotRevolute(params), old_to_new_names, ang_lims_map", "def rotorconversion(x):\n return cf.MultiVector(layout, val_rotorconversion(x))", "def rotate(self, theta, legs):\n U, onew = rotationTensor(theta, self.symmetries, legs)\n B = U @ self\n new = list(onew)\n old = list(legs)\n if B.internallegs != self.internallegs:\n old.append(self.internallegs[0])\n new.append(B.internallegs[0])\n B.swaplegs({n: o for o, n in zip(old, new)})\n return B.couplingAddapt(self.coupling)", "def __repr__(self):\n # first check for identity quaternion to avoid nans\n if self.real != 1:\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n else:\n angle = 0.\n xyz = self.pure\n result = \"Transformation: tx ty tz rx ry rz angle\\n %g %g %g %g %g %g %g\" \\\n % (self.trans[0], self.trans[1], self.trans[2],\n xyz[0], xyz[1], xyz[2], angle)\n return result", "def get_Grotations(self, x):\n xsh = x.get_shape().as_list()\n angles = [0.,np.pi/2.,np.pi,3.*np.pi/2.]\n rx = []\n for i in range(4):\n # Z4 rotations about the z axis\n perm = [1,0,2,3]\n y = tf.transpose(x, perm=perm)\n y = tf.contrib.image.rotate(y, angles[i])\n y = tf.transpose(y, perm=perm)\n # Rotations in the quotient space (sphere S^2)\n # i) Z4 rotations about y axis\n for j in range(4):\n perm = [2,1,0,3]\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[-j])\n z = tf.transpose(z, perm=perm)\n \n rx.append(z)\n # ii) 2 rotations to the poles about the x axis\n perm = [0,2,1,3]\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[3])\n z = tf.transpose(z, perm=perm)\n rx.append(z)\n\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[1])\n z = tf.transpose(z, perm=perm)\n rx.append(z)\n\n return rx", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def txyz_2_representation(self: Q, representation: str = \"\") -> List:\n\n symbolic = self.is_symbolic()\n\n if representation == \"\":\n rep = [self.t, self.x, self.y, self.z]\n\n elif representation == \"polar\":\n amplitude = (self.t ** 2 + self.x ** 2 + self.y ** 2 + self.z ** 2) ** (\n 1 / 2\n )\n\n abs_v = abs_of_vector(self).t\n\n if symbolic:\n theta = sp.atan2(abs_v, self.t)\n else:\n theta = math.atan2(abs_v, self.t)\n\n if abs_v == 0:\n theta_x, theta_y, theta_z = 0, 0, 0\n\n else:\n theta_x = theta * self.x / abs_v\n theta_y = theta * self.y / abs_v\n theta_z = theta * self.z / abs_v\n\n rep = [amplitude, theta_x, theta_y, theta_z]\n\n elif representation == \"spherical\":\n\n spherical_t = self.t\n\n spherical_r = (self.x ** 2 + self.y ** 2 + self.z ** 2) ** (1 / 2)\n\n if spherical_r == 0:\n theta = 0\n else:\n if symbolic:\n theta = sp.acos(self.z / spherical_r)\n\n else:\n theta = math.acos(self.z / spherical_r)\n\n if symbolic:\n phi = sp.atan2(self.y, self.x)\n else:\n phi = math.atan2(self.y, self.x)\n\n rep = [spherical_t, spherical_r, theta, phi]\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return rep", "def intermediateJacRot2Polar(self,x):\n r = cNorm(x[:2,:],kd=False)\n x0overr = x[0,:]/r\n x1overr = x[1,:]/r\n\n Jac = Idn(x.shape[1],x.shape[0])\n Jac[:,0,0] = -x1overr\n Jac[:,0,1] = x0overr\n Jac[:,1,0] = x0overr\n Jac[:,1,1] = x1overr\n \n return Jac", "def __init__(self, XY_rotations, lines_2Q, n_qubits):\n self.XY_rotations = XY_rotations\n self.lines_2Q = lines_2Q\n self.n_qubits = n_qubits\n dim_depth, dim_qubits, dim_XY = self.XY_rotations.shape\n self.depth = 1+2*len(lines_2Q)\n # XY rotations vector does not match the depth\n if not((2*dim_depth-1) == self.depth):\n raise ValueError('XY rotations vector does not match the depth')\n # XY rotations vector does not match the qubit number\n if not(dim_qubits == n_qubits):\n raise ValueError(\n 'XY rotations vector does not match the qubit number')\n # XY rotations vector does not match the parameter number\n if not(dim_XY == 2):\n raise ValueError(\n 'XY rotations vector does not match the parameter number')", "def _make_circuit_instructions(n_qubits, depth, type_circuit):\n\n if type_circuit in [0, 1, 2]:\n\n # if type_circuit == 1:\n # if depth > 8:\n # raise ValueError(\n # \"For type-1 circuits, only at most depth=8 allowed!\"\n # )\n\n # define rotations for circuit in each layer, 0: identity, 1:X, 2:Y 3:Z\n ini_pauli = np.zeros([depth, n_qubits], dtype=int)\n\n # set first and second layer, rest comes later\n ini_pauli[0, :] = 2 # y rotation\n if depth > 1:\n ini_pauli[1, :] = 3 # z rotation\n\n # construct natural parameterized circuit\n # gives which type of entangling gates at each layer -- first entry is\n # first qubit index, second is second qubit index, third entry is type\n # of entangling gate\n entangling_gate_index_list = [[] for i in range(depth)]\n orderList = []\n for i in range(n_qubits//2):\n if i % 2 == 0:\n orderList.append(i//2)\n else:\n orderList.append((n_qubits-i)//2)\n\n if n_qubits > 1:\n shiftList = [orderList[0]]\n else:\n shiftList = []\n for i in range(1, n_qubits//2):\n shiftList.append(orderList[i])\n shiftList += shiftList[:-1]\n\n # this list gives which entangling gates are applied in which layer\n if type_circuit == 0:\n # deep natural PQC, includes non-nearest neighbor gates\n for j in range(min(len(shiftList), int(np.ceil(depth/2))-1)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif type_circuit == 1:\n # only do 2 entangling layers at max, and only do gates with\n # nearest neighbor and no ring\n for j in range(min(len(shiftList), 3)):\n if j == 0:\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif (j == 1 or j == 2):\n # exclude ring gate and gate 0,1 on third entangling layer\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(1, n_qubits//2)\n ]\n\n elif type_circuit == 2:\n # only do 3 regular entangling layers in a ring topology, then two\n # more phase gates with next-nearst neighbor, which requires one\n # swap. This adds 4 more parameters\n for j in range(min(len(shiftList), 3)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n entangling_gate_index_list[1+2*3] = [\n [n_qubits-1, 1, 0],\n [0, n_qubits-2, 0]\n ]\n\n for i in range(len(entangling_gate_index_list)-1):\n if len(entangling_gate_index_list[i]) > 0:\n for j in range(len(entangling_gate_index_list[i])):\n qubit_index = entangling_gate_index_list[i][j][0]\n ini_pauli[i+1, qubit_index] = 2\n if i+2 < depth:\n ini_pauli[i+2, qubit_index] = 3\n\n elif type_circuit == 3:\n\n ini_pauli = np.ones([depth, n_qubits], dtype=int)*2\n\n for i in range(1, depth, 2):\n ini_pauli[i, :] = 3\n\n if n_qubits % 2 == 0:\n # even qubits ALT circuit needs to get rid of boundary rotations at\n # even entangling layers\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n ini_pauli[i+1, -1] = 0\n else:\n # for odd qubits, get rid of boundary either on top or bottom qubit\n for i in range(2, depth, 4):\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, -1] = 0\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n\n # CNOT entangling gates\n entangling_gate_index_list = [[] for i in range(depth)]\n counter = 0\n # third index indicates type of entangling gate\n for k in range(1, depth-1, 2):\n\n # place entangler every second layer, do not place any at last\n if counter % 2 == 0:\n # even layer\n entangling_gate_index_list[k] = [\n [2*j, 2*j+1, 1] for j in range(n_qubits//2)\n ]\n else:\n # odd layer\n entangling_gate_index_list[k] = [\n [2*j+1, 2*j+2, 1] for j in range((n_qubits-1)//2)\n ]\n counter += 1\n\n else:\n raise ValueError('type_circuit='+f'{type_circuit}'+' not recognised.')\n\n return ini_pauli, entangling_gate_index_list", "def rotation(self):\n return self.transform.getRotation() + [0]", "def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate", "def __repr__(self):\n return \"Quaternion({}, {}, {}, {})\".format(repr(self.q[0]), repr(self.q[1]), repr(self.q[2]), repr(self.q[3]))", "def test_rotation(self):\n quat_rotated = rowan.rotate(input1, vector_inputs)\n\n matrices = rowan.to_matrix(input1)\n matrix_rotated = np.einsum(\"ijk,ki->ij\", matrices, vector_inputs.T)\n self.assertTrue(np.allclose(matrix_rotated, quat_rotated))", "def orient(self, parent, rot_type, amounts, rot_order=''):\n\n self._check_frame(parent)\n amounts = list(amounts)\n for i, v in enumerate(amounts):\n if not isinstance(v, Vector):\n amounts[i] = sympify(v)\n def _rot(axis, angle):\n \"\"\"DCM for simple axis 1,2,or 3 rotations. \"\"\"\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])\n\n approved_orders = ('123', '231', '312', '132', '213', '321', '121',\n '131', '212', '232', '313', '323', '')\n rot_order = str(rot_order).upper() # Now we need to make sure XYZ = 123\n rot_type = rot_type.upper()\n rot_order = [i.replace('X', '1') for i in rot_order]\n rot_order = [i.replace('Y', '2') for i in rot_order]\n rot_order = [i.replace('Z', '3') for i in rot_order]\n rot_order = ''.join(rot_order)\n if not rot_order in approved_orders:\n raise TypeError('The supplied order is not an approved type')\n parent_orient = []\n\n if rot_type == 'AXIS':\n if not rot_order == '':\n raise TypeError('Axis orientation takes no rotation order')\n if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 2)):\n raise TypeError('Amounts are a list or tuple of length 2')\n theta = amounts[0]\n axis = amounts[1]\n self._check_vector(axis)\n if not axis.dt(parent) == 0:\n raise ValueError('Axis cannot be time-varying')\n axis = axis.express(parent).normalize()\n axis = axis.args[0][0]\n parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +\n Matrix([[0, -axis[2], axis[1]],[axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]]) * sin(theta) + axis * axis.T)\n elif rot_type == 'QUATERNION':\n if not rot_order == '':\n raise TypeError('Quaternion orientation takes no rotation order')\n if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 4)):\n raise TypeError('Amounts are a list or tuple of length 4')\n q0, q1, q2, q3 = amounts\n parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 **\n 2, 2 * (q1 * q2 - q0 * q3), 2 * (q0 * q2 + q1 * q3)],\n [2 * (q1 * q2 + q0 * q3), q0 ** 2 - q1 ** 2 + q2 **2 - q3 ** 2,\n 2 * (q2 * q3 - q0 * q1)], [2 * (q1 * q3 - q0 * q2), 2 * (q0 *\n q1 + q2 * q3), q0 ** 2 - q1 ** 2 - q2 ** 2 + q3 ** 2]]))\n elif rot_type == 'BODY':\n if not (len(amounts) == 3 & len(rot_order) == 3):\n raise TypeError('Body orientation takes 3 values & 3 orders')\n a1 = int(rot_order[0])\n a2 = int(rot_order[1])\n a3 = int(rot_order[2])\n parent_orient = (_rot(a1, amounts[0]) * _rot(a2, amounts[1])\n * _rot(a3, amounts[2]))\n elif rot_type == 'SPACE':\n if not (len(amounts) == 3 & len(rot_order) == 3):\n raise TypeError('Space orientation takes 3 values & 3 orders')\n a1 = int(rot_order[0])\n a2 = int(rot_order[1])\n a3 = int(rot_order[2])\n parent_orient = (_rot(a3, amounts[2]) * _rot(a2, amounts[1])\n * _rot(a1, amounts[0]))\n else:\n raise NotImplementedError('That is not an implemented rotation')\n self._dcm_dict.update({parent: parent_orient})\n parent._dcm_dict.update({self: parent_orient.T})\n if rot_type == 'QUATERNION':\n t = dynamicsymbols._t\n q0, q1, q2, q3 = amounts\n q0d = diff(q0, t)\n q1d = diff(q1, t)\n q2d = diff(q2, t)\n q3d = diff(q3, t)\n w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1)\n w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2)\n w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3)\n wvec = Vector([(Matrix([w1, w2, w3]), self)])\n elif rot_type == 'AXIS':\n thetad = (amounts[0]).diff(dynamicsymbols._t)\n wvec = thetad * amounts[1].express(parent).normalize()\n else:\n try:\n from sympy.polys.polyerrors import CoercionFailed\n from sympy.physics.mechanics.functions import kinematic_equations\n q1, q2, q3 = amounts\n u1, u2, u3 = dynamicsymbols('u1, u2, u3')\n templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],\n rot_type, rot_order)\n templist = [expand(i) for i in templist]\n td = solve(templist, [u1, u2, u3])\n u1 = expand(td[u1])\n u2 = expand(td[u2])\n u3 = expand(td[u3])\n wvec = u1 * self.x + u2 * self.y + u3 * self.z\n except (CoercionFailed, AssertionError):\n wvec = self._w_diff_dcm(parent)\n self._ang_vel_dict.update({parent: wvec})\n parent._ang_vel_dict.update({self: -wvec})", "def list_rotation(cns):\n cns = [cns[1], cns[2], cns[3], cns[0]]\n return cns", "def get_rot(self) -> WAQuaternion:\n pass", "def rotation_only(q_1: Q, h: Q) -> Q:\n h_4_rotation = vector_q(h)\n return rotation_and_or_boost(q_1, h_4_rotation)", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def RotateSliceView (self, Node, sliceViewName):\n volNode = Node\n sliceNode = slicer.app.layoutManager().sliceWidget(sliceViewName).mrmlSliceNode()\n sliceToRas = sliceNode.GetSliceToRAS()\n VtkMatrix = vtk.vtkMatrix4x4()\n volNode.GetIJKToRASMatrix(VtkMatrix)\n M = np.zeros((4,4)) #### IJK To RAS Numpy Matrix \n for i in range (4):\n for j in range (4):\n M[i,j] = VtkMatrix.GetElement(i,j)\n\n Dim = volNode.GetImageData().GetDimensions()\n t = M.dot(np.array([(Dim[0]-1)/2,(Dim[1]-1)/2,0,1]))\n for i in range (4):\n VtkMatrix.SetElement(i,3,t[i])\n VtkMatrix.SetElement(i,2,VtkMatrix.GetElement(i,2)/-10) \n VtkMatrix.SetElement(i,1,VtkMatrix.GetElement(i,1)*-1) # The minus sign, above and here, is a Pi rotation around the X axis.\n\n sliceToRas.DeepCopy(VtkMatrix)\n sliceNode.UpdateMatrices()\n sliceNode.RotateToVolumePlane(volNode)", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def correct_rotation(k_rotations):\n\n for key, value in Chunk.global_piece_rotations.items():\n Chunk.global_piece_rotations[key] = (k_rotations + value) % 4\n # Should I correct it for the side rotations also?", "def _decompose_rotation(self, cmd):\n \n axis = None\n angle = 0\n gate_name = str(cmd.gate)\n\n if \"Rz\" in gate_name:\n axis = 'z'\n elif \"Rx\" in gate_name:\n axis = 'x'\n elif \"Ry\" in gate_name:\n axis = 'y' \n\n angle = gate_name[gate_name.find(\"(\")+1:gate_name.find(\")\")]\n\n decomposition = subprocess.check_output(\"./gridsynth \" + angle, shell=True)[:-1]\n new_sequence = self._process_decomposition(str(decomposition),cmd.qubits[0])\n return new_sequence", "def rotationDetermination(self):\n \n for index, row in enumerate(self.magdata):\n if index > 11 and index < (len(self.magdata) - 12):\n br1 = [row[0] for row in self.magdata[(index-12):(index-2)]]\n bt1 = [row[1] for row in self.magdata[(index-12):(index-2)]]\n bn1 = [row[2] for row in self.magdata[(index-12):(index-2)]]\n b1 = np.matrix((np.mean(br1), np.mean(bt1), np.mean(bn1)))\n\n br2 = [row[0] for row in self.magdata[(index+2):(index+12)]]\n bt2 = [row[1] for row in self.magdata[(index+2):(index+12)]]\n bn2 = [row[2] for row in self.magdata[(index+2):(index+12)]]\n b2 = np.matrix((np.mean(br2), np.mean(bt2), np.mean(bn2)))\n\n theta = np.arccos(np.dot(b1,b2.T)/(np.linalg.norm(b1)*np.linalg.norm(b2)))*180/np.pi\n\n self.detections.rotations.append(theta[0,0])\n self.detections.rotationTimeTags.append(self.timestamps[index])\n \n\n## self.b1 = b1\n## self.b2 = b2\n self.detections.rotationBoundary=[]\n if len(self.detections.rotations) != 0:\n \n for index, theta in enumerate(self.detections.rotations):\n if index > 0:\n if theta > 30 and self.detections.rotations[index-1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])\n if index < len(self.detections.rotations)-1:\n if theta > 30 and self.detections.rotations[index+1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])", "def __repr__(self):\n result = \"Quaternion (%g (%g %g %g))\" % \\\n (self.real, self.pure[0],\n self.pure[1], self.pure[2])\n return result", "def fk4(joint_rotations):\n h0_4 = htm4(joint_rotations)\n x0_4 = h0_4[0, 3]\n y0_4 = h0_4[1, 3]\n z0_4 = h0_4[2, 3]\n d0_4 = [x0_4, y0_4, z0_4]\n return d0_4", "def __init__(self):\n self.rot_axis = 1", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def GetSymRotMat(symtype='Cubic'):\n if symtype == 'Cubic':\n m=np.zeros((24,3,3))\n m[0][0,1]=1\n m[0][1,0]=-1\n m[0][2,2]=1\n\n m[1][0,0]=-1\n m[1][1,1]=-1\n m[1][2,2]=1\n\n m[2][0,1]=-1\n m[2][1,0]=1\n m[2][2,2]=1\n\n m[3][0,2]=-1\n m[3][1,1]=1\n m[3][2,0]=1\n\n m[4][0,0]=-1\n m[4][1,1]=1\n m[4][2,2]=-1\n\n m[5][0,2]=1\n m[5][1,1]=1\n m[5][2,0]=-1\n\n m[6][0,0]=1\n m[6][1,2]=1\n m[6][2,1]=-1\n\n m[7][0,0]=1\n m[7][1,1]=-1\n m[7][2,2]=-1\n\n m[8][0,0]=1\n m[8][1,2]=-1\n m[8][2,1]=1\n\n m[9][0,1]=1\n m[9][1,2]=1\n m[9][2,0]=1\n\n m[10][0,2]=1\n m[10][1,0]=1\n m[10][2,1]=1\n\n m[11][0,2]=-1\n m[11][1,0]=1\n m[11][2,1]=-1\n\n m[12][0,1]=1\n m[12][1,2]=-1\n m[12][2,0]=-1\n\n m[13][0,2]=1\n m[13][1,0]=-1\n m[13][2,1]=-1\n\n m[14][0,1]=-1\n m[14][1,2]=-1\n m[14][2,0]=1\n\n m[15][0,2]=-1\n m[15][1,0]=-1\n m[15][2,1]=1\n\n m[16][0,1]=-1\n m[16][1,2]=1\n m[16][2,0]=-1\n\n m[17][0,0]=-1\n m[17][1,2]=1\n m[17][2,1]=1\n\n m[18][0,2]=1\n m[18][1,1]=-1\n m[18][2,0]=1\n\n m[19][0,1]=1\n m[19][1,0]=1\n m[19][2,2]=-1\n\n m[20][0,0]=-1\n m[20][1,2]=-1\n m[20][2,1]=-1\n\n m[21][0,2]=-1\n m[21][1,1]=-1\n m[21][2,0]=-1\n\n m[22][0,1]=-1\n m[22][1,0]=-1\n m[22][2,2]=-1\n\n m[23][0,0]=1\n m[23][1,1]=1\n m[23][2,2]=1\n \n return m\n elif symtype == 'Hexagonal':\n m=np.zeros((12,3,3))\n m[0][0,0]=0.5\n m[0][1,1]=0.5\n m[0][2,2]=1\n m[0][0,1]=-np.sqrt(3)*0.5\n m[0][1,0]=np.sqrt(3)*0.5\n\n m[1]=m[0].dot(m[0])\n m[2]=m[1].dot(m[0])\n m[3]=m[2].dot(m[0])\n m[4]=m[3].dot(m[0])\n m[5]=np.eye(3)\n\n m[6][0,0]=1\n m[6][1,1]=-1\n m[6][2,2]=-1\n\n m[7]=m[0].dot(m[6])\n m[8]=m[1].dot(m[6])\n m[9]=m[2].dot(m[6])\n m[10]=m[3].dot(m[6])\n m[11]=m[4].dot(m[6])\n\n return m\n else:\n print \"not implemented yet\"\n return 0", "def orbit(self, representation='trivial'):\n if not self:\n return self\n\n answer = BarrattEccles_element(torsion=self.torsion)\n for k, v in self.items():\n inverse = tuple(k[0].index(i + 1) + 1 for i in range(len(k[0])))\n permutation = SymmetricRing_element({inverse: 1}, torsion=self.torsion)\n if representation == 'sign':\n permutation = k[0].sign * permutation\n answer += permutation * BarrattEccles_element({k: v}, torsion=self.torsion)\n\n return answer", "def _quaternions(self, R):\n # Simple Wikipedia version\n # en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix\n diag = torch.diagonal(R, dim1=-2, dim2=-1)\n Rxx, Ryy, Rzz = diag.unbind(-1)\n magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n Rxx - Ryy - Rzz, \n - Rxx + Ryy - Rzz, \n - Rxx - Ryy + Rzz\n ], -1)))\n _R = lambda i,j: R[:,:,:,i,j]\n signs = torch.sign(torch.stack([\n _R(2,1) - _R(1,2),\n _R(0,2) - _R(2,0),\n _R(1,0) - _R(0,1)\n ], -1))\n xyz = signs * magnitudes\n # The relu enforces a non-negative trace\n w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n Q = torch.cat((xyz, w), -1)\n Q = F.normalize(Q, dim=-1)\n return Q", "def qrot(I,sym):\n T = s.Symbol(\"T\")\n if type(I) == list:\n return (((s.pi * I[0] * I[1] * I[2])**(1/2))/sym) * ((8 * s.pi**2 * k * T) / (h**2))**(3/2)\n else:\n return (((s.pi * I)**(1/2))/sym) * ((8 * s.pi**2 * k * T) / (h**2))**(3/2)", "def lrbt_rot(self):\n l, r, b, t = self._ext[0], self._ext[1], self._ext[2], self._ext[3]\n if self.__rot90 % 4 == 1: # -90deg\n self._ext = [b, t, l, r]\n elif self.__rot90 % 4 == 2: # -180deg\n self._ext = [r, l, t, b]\n elif self.__rot90 % 4 == 3: # -270deg\n self._ext = [t, b, r, l]", "def rotate_orbit(self):\n try:\n ang = self.orbit_speed * self.time_scale / self.refresh_rate\n self.obj.rotate(angle=ang, axis=vector(0, 1, 0), origin=self.star.obj.pos)\n self.sum_ang += ang\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot", "def Rotation_EQJ_ECL():\n # ob = mean obliquity of the J2000 ecliptic = 0.40909260059599012 radians.\n c = 0.9174821430670688 # cos(ob)\n s = 0.3977769691083922 # sin(ob)\n return RotationMatrix([\n [ 1, 0, 0],\n [ 0, +c, -s],\n [ 0, +s, +c]\n ])", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def rot_left(self):\n width = self.get_size()[1]\n return Matrix([row[1:width] + tuple([row[0]]) for row in self.coefficients])", "def CCframe(q1, q2, q3, xy = 0):\n\n # Postitions of the joints\n # Komplexe Zahlen für Geometrie in der Ebene nutzen\n # (Rotationsmatrizen gingen genauso)\n\n J0= 0+0j + xy # offset\n J1= J0 + l1*exp(1j*q1)\n J2= J1 + l2*exp(1j*(q1+q2))\n J3= J2 + l3*exp(1j*(q1+q2+q3))\n pl.plot(r_[J0,].real, r_[J0,].imag, 'ks', ms = 8)\n pl.plot(r_[J0, J1].real, r_[J0, J1].imag, 'k-', lw=3)\n pl.plot(r_[J2, J1].real, r_[J2, J1].imag, 'ko-', lw=2)\n pl.plot(r_[J2, J3].real, r_[J2, J3].imag, 'ko-')\n pl.xticks= []\n pl.yticks= []\n pl.axis('equal')", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotations4(polycube, axis):\r\n for i in range(4):\r\n yield rot90(polycube, i, axis)", "def intermediateJac(self, x, isRot=False):\n if not isRot:\n x = dot(self._rotation,x)\n\n #Due to how it is constructed,\n #J_rot2polar is its own inverse (and symmetric)\n \n Jac = self.intermediateJacRot2Polar(x)\n \n #Jac = S.J.R\n \n s=self._scaled.reshape((1,self._dim,1))\n R = self._rotation\n \n #Compute J.R\n Jac = np.einsum(\"ijk,kl->ijl\",Jac,R)\n #Left multiply with S\n #S.(J.R)\n Jac *= s #A left multiplication with a diagonal matrix is like scaling the rows\n \n return Jac", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def fk3(joint_rotations):\n h0_3 = htm0_3(joint_rotations)\n x0_3 = h0_3[0, 3]\n y0_3 = h0_3[1, 3]\n z0_3 = h0_3[2, 3]\n d0_3 = [x0_3, y0_3, z0_3]\n return d0_3", "def get_rot_dtdt(self) -> WAQuaternion:\n pass", "def set_rotation_matrices(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0], \r\n self.vertices[i].meta['axis'][1], \r\n self.vertices[i].meta['axis'][2], \r\n self.vertices[i].meta['axis_order'],\r\n degrees=True)\r\n # Todo: invert this by applying angle operations in reverse order\r\n self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = angles[0:3]\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def getMatrixRepresentation(self):\n \n S = self.spinSystem\n dim = S.dimension\n matRep = np.zeros([dim, dim], dtype = complex)\n\n for i, b1 in enumerate(S.basis.Bras):\n for j, b2 in enumerate(S.basis.Kets):\n ket = self.__mul__(b2)\n\n matRep[i,j] += b1*ket\n\n return matRep", "def getOblateXRotMatrix(aStar1, aStar2):\n aStarDir = aStar2 - a1\n aStarmid = aStar1 + 0.5 * aStarDir\n kath = np.sqrt((aStarDir[0] * aStarDir[0] + aStarDir[1] * aStarDir[1]) / 4.0)\n phi = np.arctan( abs( (aStarDir[2]/2) / kath) )\n octantAStar2 = octant(aStar2)\n if octantAStar2 in [1, 2, 7, 8]: #\n phi = -phi\n print \"phi =\" , np.rad2deg(phi)\n RotX = np.matrix( [ [ 1.0, 0.0 , 0.0 ],\n [ 0.0, np.cos(phi), np.sin(phi)],\n [ 0.0, -np.sin(phi), np.cos(phi)]\n ])\n return np.asarray( RotX )", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, kappa, omega) = self.get_phi_kappa_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.kappa_opposite_rotation_matrix(phi, np.deg2rad(self.alpha), kappa, omega)", "def rotate(self, quaternion, origin_x = 0, origin_y = 0, origin_z = 0):\n\n for atom in self.get_atoms():\n atom.rotate(quaternion, origin_x, origin_y, origin_z)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def qft(qubits):\n qftcirc = Circuit()\n\n # get number of qubits\n num_qubits = len(qubits)\n\n for k in range(num_qubits):\n # First add a Hadamard gate\n qftcirc.h(qubits[k])\n\n # Then apply the controlled rotations, with weights (angles) defined by the distance to the control qubit.\n # Start on the qubit after qubit k, and iterate until the end. When num_qubits==1, this loop does not run.\n for j in range(1, num_qubits - k):\n angle = 2 * math.pi / (2 ** (j + 1))\n qftcirc.cphaseshift(qubits[k + j], qubits[k], angle)\n\n # Then add SWAP gates to reverse the order of the qubits:\n for i in range(math.floor(num_qubits / 2)):\n qftcirc.swap(qubits[i], qubits[-i - 1])\n\n return qftcirc", "def add_rotation(self, rotation, lump, is_mirrored):\n\n if rotation == '0':\n self.rotations = {'0': lump}\n self.is_mirrored = {'0': False}\n self.has_rotations = False\n\n else:\n self.rotations[rotation] = lump\n self.is_mirrored[rotation] = is_mirrored\n self.has_rotations = True", "def quatActiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q) @ quatRightMat(q).T @ v_q\n\treturn v_qnew[1:]", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:\n\n # First row of the rotation matrix\n r00 = 2 * (q0 * q0 + q1 * q1) - 1\n r01 = 2 * (q1 * q2 - q0 * q3)\n r02 = 2 * (q1 * q3 + q0 * q2)\n\n # Second row of the rotation matrix\n r10 = 2 * (q1 * q2 + q0 * q3)\n r11 = 2 * (q0 * q0 + q2 * q2) - 1\n r12 = 2 * (q2 * q3 - q0 * q1)\n\n # Third row of the rotation matrix\n r20 = 2 * (q1 * q3 - q0 * q2)\n r21 = 2 * (q2 * q3 + q0 * q1)\n r22 = 2 * (q0 * q0 + q3 * q3) - 1\n\n # 3x3 rotation matrix\n rot_matrix = np.array([[r00, r01, r02],\n [r10, r11, r12],\n [r20, r21, r22]])\n\n return rot_matrix", "def quaternion2rot3d(quat):\n q01 = quat[0] * quat[1]\n q02 = quat[0] * quat[2]\n q03 = quat[0] * quat[3]\n q11 = quat[1] * quat[1]\n q12 = quat[1] * quat[2]\n q13 = quat[1] * quat[3]\n q22 = quat[2] * quat[2]\n q23 = quat[2] * quat[3]\n q33 = quat[3] * quat[3]\n\n # Obtain the rotation matrix\n rotation = np.zeros((3, 3))\n rotation[0, 0] = (1. - 2. * (q22 + q33))\n rotation[0, 1] = 2. * (q12 - q03)\n rotation[0, 2] = 2. * (q13 + q02)\n rotation[1, 0] = 2. * (q12 + q03)\n rotation[1, 1] = (1. - 2. * (q11 + q33))\n rotation[1, 2] = 2. * (q23 - q01)\n rotation[2, 0] = 2. * (q13 - q02)\n rotation[2, 1] = 2. * (q23 + q01)\n rotation[2, 2] = (1. - 2. * (q11 + q22))\n\n return rotation", "def rotator(angle):\n\n c = np.cos(2*angle)\n s = np.sin(2*angle)\n return np.array([[1,0,0,0],[0,c,-s,0],[0,s,c,0],[0,0,0,1]])", "def test_serialize_operator_needs_rotation(self, obs, expected):\n dev = QeQiskitDevice(wires=3, shots=1000, backend=\"qasm_simulator\", analytic=False)\n op_str = dev.serialize_operator(obs)\n assert op_str == expected", "def z_rotation(self):\n before = ('R', 'r', 'U', 'u', 'L', 'l', 'D', 'd', 'M', 'E', 'x', 'y')\n after = ('U', 'u', 'L', 'l', 'D', 'd', 'R', 'r', 'E', 'M\\'', \"y\", \"x'\")\n solve = self.solve_helper.maketrans(dict(zip(before, after)))\n solve_trans = self.solve_helper.translate(solve)\n solve_trans = solve_trans.replace(\"\\'\\'\", \"\")\n self.solve_helper = solve_trans", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def layer(i):\n with tf.name_scope('layer_{}'.format(i)):\n # displacement gate\n Dgate(tf.clip_by_value(d_r[i], -disp_clip, disp_clip), d_phi[i]) | q[0]\n # rotation gate\n Rgate(r1[i]) | q[0]\n # squeeze gate\n Sgate(tf.clip_by_value(sq_r[i], -sq_clip, sq_clip), sq_phi[i]) | q[0]\n # rotation gate\n Rgate(r2[i]) | q[0]\n # Kerr gate\n Kgate(tf.clip_by_value(kappa1[i], -kerr_clip, kerr_clip)) | q[0]", "def to_revolute_tree(self):\n T_zero = {\"p0\": SE3.identity()}\n stack = [\"p0\"]\n tree_structure = {\"p0\": []}\n ang_lims_map = {}\n old_to_new_names = {\n \"p0\": \"p0\"\n } # Returned for user of the method (to map old joint names to new ones)\n ub, lb = spherical_angle_bounds_to_revolute(self.ub, self.lb)\n count = 1\n while len(stack) > 0:\n joint = stack.pop(0)\n new_joint = old_to_new_names[joint]\n for child in self.parents[joint]:\n stack += [child]\n new_child = \"p\" + str(count)\n count += 1\n # ub[new_child] = self.ub[child]\n # lb[new_child] = self.lb[child]\n ang_lims_map[child] = new_child\n tree_structure[new_joint] += [new_child]\n new_grand_child = \"p\" + str(count)\n count += 1\n old_to_new_names[child] = new_grand_child\n tree_structure[new_child] = [new_grand_child]\n Ry = SE3(SO3(roty(np.pi / 2)), np.zeros(3))\n T_zero[new_child] = T_zero[new_joint].dot(Ry)\n d = self.d[child]\n Ry_back = SE3(SO3(roty(-np.pi / 2)), np.zeros(3))\n T_zero[new_grand_child] = (\n T_zero[new_child].dot(Ry_back).dot(trans_axis(d, \"z\"))\n )\n tree_structure[new_grand_child] = []\n\n # for key in old_to_new_names:\n # if key in self.ub.keys():\n # ub[old_to_new_names[key]] = self.ub[key]\n # lb[old_to_new_names[key]] = self.lb[key]\n\n # for key in T_zero:\n # if key not in ub.keys() and key is not 'p0':\n # ub[key] = np.pi\n # lb[key] = -np.pi\n\n params = {\"T_zero\": T_zero, \"ub\": ub, \"lb\": lb, \"parents\": tree_structure}\n\n # print(\"normal ub: {:}\".format(self.ub))\n # print(\"ub: {:}\".format(ub))\n # print(\"lb: {:}\".format(lb))\n return RobotRevolute(params), old_to_new_names, ang_lims_map", "def quat_to_rotmat(quat): \n norm_quat = quat\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def quat_rotate(X, q):\n # repeat q along 2nd dim\n ones_x = X[[0], :, :][:, :, [0]] * 0 + 1\n q = torch.unsqueeze(q, 1) * ones_x\n\n q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)\n\n X_rot = hamilton_product(q, hamilton_product(X, q_conj))\n return X_rot[:, :, 1:4]", "def quat_rotate(X, q):\n # repeat q along 2nd dim\n ones_x = X[[0], :, :][:, :, [0]] * 0 + 1\n q = torch.unsqueeze(q, 1) * ones_x\n\n q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)\n\n X_rot = hamilton_product(q, hamilton_product(X, q_conj))\n return X_rot[:, :, 1:4]", "def get_RotationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment \n self.rotInPlane = len(TiltSeries_.Projections) * [0.]\n kk = 0\n for Proj in TiltSeries_.Projections:\n self.rotInPlane[kk] = Proj.rotInPlane\n kk = kk + 1\n return self.rotInPlane", "def orbit(self, representation='trivial'):\n\n def sign(permutation, representation):\n if representation == 'trivial':\n return 1\n if representation == 'sign':\n return permutation.sign\n\n answer = self.zero()\n for k, v in self.items():\n seen = []\n for i in k:\n if i not in seen:\n seen.append(i)\n permutation = SymmetricGroupElement(seen).inverse()\n new_v = sign(permutation, representation) * v\n answer += permutation * self.create({k: new_v})\n\n return answer", "def test_cylindrical(self):\n # Rotate around the z axis\n r = Joint.cylindrical(np.array([0, 0, 1]))\n t_mat = r(np.array([np.pi / 2, 1.0]))\n\n rot_vec = np.dot(t_mat[:3, :3], np.array([1, 0, 0]))\n\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))\n self.assertTrue(np.allclose(t_mat[2, 3], 1))", "def wheels(self):\n return RotatedShape(quantify=4,\n shape_in=self.wheel_attributes[child.index],\n rotation_point=self.position,\n vector=Vector(0, 1, 0),\n angle=radians(-90))", "def next_rotation(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(q_1, q_2)\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized", "def galaxy1_orbital_orientation(self):\n return self._galaxy1_orbital_orientation", "def matrixRepresentation(self,decimals=8):\n temp = self.circuit.copy()\n temp.remove_final_measurements()\n \n simulator = Aer.get_backend('unitary_simulator')\n result = execute(temp, backend=simulator).result()\n unitary = result.get_unitary(decimals=decimals).tolist()\n for i in range(len(unitary)):\n for j in range(len(unitary[i])):\n if unitary[i][j]==0:\n unitary[i][j]=\"0\"\n else:\n string=str(unitary[i][j].real).replace(\".0\", \"\")\n string=\"\" if unitary[i][j].real==0 else string\n string+=self.numberFormat(unitary[i][j].imag,True)\n unitary[i][j]=string.lstrip(\"+\")\n return unitary", "def test_serialize_circuit_no_rotations(self):\n dev = QeQiskitDevice(wires=1, shots=1000, backend=\"statevector_simulator\", analytic=True)\n\n def circuit():\n qml.Hadamard(wires=[0])\n return qml.expval(qml.Hadamard(0))\n\n qnode = qml.QNode(circuit, dev)\n qnode._construct([], {})\n\n qasm = dev.serialize_circuit(qnode.circuit)\n expected = 'OPENQASM 2.0;\\ninclude \"qelib1.inc\";\\nqreg q[1];\\ncreg c[1];\\nh q[0];\\n'\n assert qasm == expected", "def _one_q_sic_prep(index, qubit):\n if index == 0:\n return Program()\n\n theta = 2 * np.arccos(1 / np.sqrt(3))\n zx_plane_rotation = Program([\n RX(-pi / 2, qubit),\n RZ(theta - pi, qubit),\n RX(-pi / 2, qubit),\n ])\n\n if index == 1:\n return zx_plane_rotation\n\n elif index == 2:\n return zx_plane_rotation + RZ(-2 * pi / 3, qubit)\n\n elif index == 3:\n return zx_plane_rotation + RZ(2 * pi / 3, qubit)\n\n raise ValueError(f'Bad SIC index: {index}')", "def rotations_load_to_maya(rotations, positions, names=None):\r\n \r\n import pymel.core as pm\r\n\r\n if names is None: names = [\"joint_\" + str(i) for i in range(rotations.shape[1])]\r\n \r\n maxis = []\r\n frames = range(1, len(positions)+1)\r\n for i, name in enumerate(names):\r\n \r\n name = name + \"_axis\"\r\n axis = pm.group(\r\n pm.curve(p=[(0,0,0), (1,0,0)], d=1, n=name+'_axis_x'),\r\n pm.curve(p=[(0,0,0), (0,1,0)], d=1, n=name+'_axis_y'),\r\n pm.curve(p=[(0,0,0), (0,0,1)], d=1, n=name+'_axis_z'),\r\n n=name)\r\n \r\n axis.rotatePivot.set((0,0,0))\r\n axis.scalePivot.set((0,0,0))\r\n axis.childAtIndex(0).overrideEnabled.set(1); axis.childAtIndex(0).overrideColor.set(13)\r\n axis.childAtIndex(1).overrideEnabled.set(1); axis.childAtIndex(1).overrideColor.set(14)\r\n axis.childAtIndex(2).overrideEnabled.set(1); axis.childAtIndex(2).overrideColor.set(15)\r\n \r\n curvex = pm.nodetypes.AnimCurveTA(n=name + \"_rotateX\")\r\n curvey = pm.nodetypes.AnimCurveTA(n=name + \"_rotateY\")\r\n curvez = pm.nodetypes.AnimCurveTA(n=name + \"_rotateZ\") \r\n \r\n arotations = rotations[:,i].euler()\r\n curvex.addKeys(frames, arotations[:,0])\r\n curvey.addKeys(frames, arotations[:,1])\r\n curvez.addKeys(frames, arotations[:,2])\r\n \r\n pm.connectAttr(curvex.output, axis.rotateX)\r\n pm.connectAttr(curvey.output, axis.rotateY)\r\n pm.connectAttr(curvez.output, axis.rotateZ)\r\n \r\n offsetx = pm.nodetypes.AnimCurveTU(n=name + \"_translateX\")\r\n offsety = pm.nodetypes.AnimCurveTU(n=name + \"_translateY\")\r\n offsetz = pm.nodetypes.AnimCurveTU(n=name + \"_translateZ\")\r\n \r\n offsetx.addKeys(frames, positions[:,i,0])\r\n offsety.addKeys(frames, positions[:,i,1])\r\n offsetz.addKeys(frames, positions[:,i,2])\r\n \r\n pm.connectAttr(offsetx.output, axis.translateX)\r\n pm.connectAttr(offsety.output, axis.translateY)\r\n pm.connectAttr(offsetz.output, axis.translateZ)\r\n \r\n maxis.append(axis)\r\n \r\n return pm.group(*maxis, n='RotationAnimation')" ]
[ "0.63733256", "0.6243695", "0.61284345", "0.59969985", "0.59389025", "0.5919403", "0.5793986", "0.5788567", "0.57774144", "0.5774565", "0.57473123", "0.57306886", "0.5724324", "0.57041705", "0.5641081", "0.56394804", "0.5630439", "0.5608946", "0.5591003", "0.55356807", "0.5504258", "0.5492301", "0.5478781", "0.5455895", "0.54513836", "0.543862", "0.5429372", "0.5428642", "0.53663313", "0.536328", "0.53521067", "0.53362703", "0.5320095", "0.5318598", "0.5315315", "0.5300679", "0.5295291", "0.5288886", "0.5278582", "0.52769285", "0.5264206", "0.5264177", "0.52579975", "0.5255416", "0.5251377", "0.5248857", "0.52418745", "0.5224906", "0.52233154", "0.52225", "0.521379", "0.520048", "0.5195255", "0.51948136", "0.51925063", "0.51624507", "0.5154339", "0.51536965", "0.51437217", "0.51317894", "0.5128286", "0.51262116", "0.51247203", "0.5112085", "0.51078576", "0.51071507", "0.51058304", "0.5100624", "0.50998783", "0.5098226", "0.50944513", "0.5093637", "0.50909674", "0.50753987", "0.50710315", "0.50710315", "0.5062398", "0.5062263", "0.5060067", "0.5055058", "0.5054152", "0.5046335", "0.50403035", "0.50398356", "0.503803", "0.5037118", "0.50354487", "0.50354487", "0.5035219", "0.5035219", "0.50310177", "0.50293183", "0.50290525", "0.5024986", "0.50232303", "0.50222033", "0.50213", "0.5013826", "0.50097805", "0.50086033" ]
0.62612224
1
Representation of a circuit as "Euler angles list". On top of the rotation list (see Rotations list representation), these rotations are converted to Euler angles. The 1Q layers are now represented with an euler vector. rotations_1Q = [layer, qubit, euler_vector] euler_vector = [rot_Z(first), rot_X, rot_Z2(third)]
def __init__(self, euler_1Q, lines_2Q, n_qubits): self.euler_1Q = euler_1Q self.lines_2Q = lines_2Q self.n_qubits = n_qubits dim_depth, dim_qubits, dim_euler = self.euler_1Q.shape self.depth = 1+2*len(lines_2Q) # euler angles vector does not match the depth if not((2*dim_depth-1) == self.depth): raise ValueError('euler angles vector does not match the depth') # euler angles vector does not match the qubit number if not(dim_qubits == n_qubits): raise ValueError( 'euler angles vector does not match the qubit number') # euler angles vector does not match the parameter number if not(dim_euler == 3): raise ValueError( 'euler angles vector does not match the parameter number')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one_qubit_rotation(self, qubit, symbols):\n # print(symbols, \"hi\")\n return [cirq.rx(symbols[0])(qubit),\n cirq.ry(symbols[1])(qubit),\n cirq.rz(symbols[2])(qubit)]", "def euler2quat(angles, rot_seq='zyx'):\n cangle = np.cos(0.5*angles)\n sangle = np.sin(0.5*angles)\n rot_seq = rot_seq.lower()\n if rot_seq == 'zyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'zyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'zxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'zxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'yxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'yzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'xyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'xzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n else:\n return False", "def euler_angle_to_rotation(ea, convention='zyx'):\n axis_names_to_vectors = dict([('x', (1, 0, 0)), ('y', (0, 1, 0)), ('z', (0, 0, 1))])\n axis0, axis1, axis2 = convention\n R0 = so3.rotation(axis_names_to_vectors[axis0], ea[0])\n R1 = so3.rotation(axis_names_to_vectors[axis1], ea[1])\n R2 = so3.rotation(axis_names_to_vectors[axis2], ea[2])\n return so3.mul(R0, so3.mul(R1, R2))", "def vecRot(data, seq, euler_angles, **kwargs):\n from scipy.spatial.transform import Rotation as R\n r = R.from_euler(seq, euler_angles, **kwargs)\n return r.apply(data)", "def qwc_rotation(pauli_operators):\n paulis_with_identity = (qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ)\n if not all(isinstance(element, paulis_with_identity) for element in pauli_operators):\n raise TypeError(\n \"All values of input pauli_operators must be either Identity, PauliX, PauliY, or PauliZ instances,\"\n \" instead got pauli_operators = {}.\".format(pauli_operators)\n )\n\n for pauli in pauli_operators:\n if isinstance(pauli, qml.PauliX):\n qml.RY(-np.pi / 2, wires=pauli.wires)\n\n elif isinstance(pauli, qml.PauliY):\n qml.RX(np.pi / 2, wires=pauli.wires)", "def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def two_qubit_rotation(self, bits, symbols):\n circuit = cirq.Circuit()\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[0], symbols[0:3]))\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[1], symbols[3:6]))\n circuit += [cirq.ZZ(*bits)**symbols[6]]\n circuit += [cirq.YY(*bits)**symbols[7]]\n circuit += [cirq.XX(*bits)**symbols[8]]\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[0], symbols[9:12]))\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[1], symbols[12:]))\n return circuit", "def txyz_2_representation(self: Q, representation: str = \"\") -> List:\n\n symbolic = self.is_symbolic()\n\n if representation == \"\":\n rep = [self.t, self.x, self.y, self.z]\n\n elif representation == \"polar\":\n amplitude = (self.t ** 2 + self.x ** 2 + self.y ** 2 + self.z ** 2) ** (\n 1 / 2\n )\n\n abs_v = abs_of_vector(self).t\n\n if symbolic:\n theta = sp.atan2(abs_v, self.t)\n else:\n theta = math.atan2(abs_v, self.t)\n\n if abs_v == 0:\n theta_x, theta_y, theta_z = 0, 0, 0\n\n else:\n theta_x = theta * self.x / abs_v\n theta_y = theta * self.y / abs_v\n theta_z = theta * self.z / abs_v\n\n rep = [amplitude, theta_x, theta_y, theta_z]\n\n elif representation == \"spherical\":\n\n spherical_t = self.t\n\n spherical_r = (self.x ** 2 + self.y ** 2 + self.z ** 2) ** (1 / 2)\n\n if spherical_r == 0:\n theta = 0\n else:\n if symbolic:\n theta = sp.acos(self.z / spherical_r)\n\n else:\n theta = math.acos(self.z / spherical_r)\n\n if symbolic:\n phi = sp.atan2(self.y, self.x)\n else:\n phi = math.atan2(self.y, self.x)\n\n rep = [spherical_t, spherical_r, theta, phi]\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return rep", "def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles", "def rotationMatrixToEulerAngles(R) :\n sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n\n if not singular :\n x = np.arctan2(R[2,1] , R[2,2])\n y = np.arctan2(-R[2,0], sy)\n z = np.arctan2(R[1,0], R[0,0])\n else :\n x = np.arctan2(-R[1,2], R[1,1])\n y = np.arctan2(-R[2,0], sy)\n z = 0\n\n return np.array([x, y, z])", "def euler_timestep_rotation(sphere_positions, sphere_rotations, new_sphere_positions, new_sphere_rotations, Oa_out, timestep):\r\n\r\n for i in range(sphere_positions.shape[0]):\r\n R0 = sphere_positions[i]\r\n O = (Oa_out[i][0] ** 2 + Oa_out[i][1] ** 2 + Oa_out[i][2] ** 2) ** 0.5\r\n\r\n ''' To rotate from basis (x,y,z) to (X,Y,Z), where x,y,z,X,Y,Z are unit vectors,\r\n you just need to multiply by the matrix\r\n ( X_x Y_x Z_x )\r\n ( X_y Y_y Z_y ),\r\n ( X_z Y_z Z_z )\r\n where X_x means the x-component of X.\r\n Our Z is Omega = o_spheres[i], so we need to make it into a complete basis.\r\n To do that we pick a unit vector different to Omega (either zhat or xhat depending on Omega)\r\n and use (Omega x zhat, Omega x (Omega x zhat), zhat) as our basis (X,Y,Z).\r\n That's it! [Only took me three days...]\r\n '''\r\n\r\n if np.array_equal(Oa_out[i], [0, 0, 0]):\r\n rot_matrix = np.identity(3)\r\n else:\r\n Otest = (abs(Oa_out[i] / O)).astype('float')\r\n perp1 = [0, 0, 1] if np.allclose(Otest, [1, 0, 0]) else [1, 0, 0]\r\n rot_matrix = np.array([np.cross(Oa_out[i], perp1) / O, np.cross(Oa_out[i], np.cross(Oa_out[i], perp1)) / O ** 2, Oa_out[i] / O]).transpose()\r\n\r\n for j in range(2):\r\n ''' rb0 is the position (\"r\") of the endpoint of the pointy rotation vector in the\r\n external (x,y,z) frame (\"b\") at the beginning of this process (\"0\") '''\r\n rb0 = sphere_rotations[i, j]\r\n\r\n ''' rbdashdash0_xyz is the position of the same endpoint in the frame of the rotating sphere (\"b''\"),\r\n\t\t\t\t\t\twhich we set to have the z-axis=Omega axis. It's in Cartesian coordinates. '''\r\n rbdashdash0_xyz = np.dot(linalg.inv(rot_matrix), (rb0 - R0))\r\n x0 = rbdashdash0_xyz[0]\r\n y0 = rbdashdash0_xyz[1]\r\n z0 = rbdashdash0_xyz[2]\r\n\r\n r0 = (x0 ** 2 + y0 ** 2 + z0 ** 2) ** 0.5\r\n t0 = np.arccos(z0 / r0)\r\n p0 = 0 if (x0 == 0 and y0 == 0) else np.arctan2(y0, x0)\r\n r = r0\r\n t = t0\r\n p = euler_timestep(p0, O, timestep)\r\n\r\n x = r * np.sin(t) * np.cos(p)\r\n y = r * np.sin(t) * np.sin(p)\r\n z = r * np.cos(t)\r\n rbdashdash_xyz = np.array([x, y, z])\r\n R = new_sphere_positions[i]\r\n rb = R + np.dot(rot_matrix, rbdashdash_xyz)\r\n new_sphere_rotations[i, j] = rb\r\n return new_sphere_rotations", "def euler_to_quaternion(euler: tuple) -> object:\n\n (yaw, pitch, roll) = (euler[0], euler[1], euler[2])\n qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)\n qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)\n qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n return qx, qy, qz, qw", "def EulerRotation(Coor, EulerAngle=[0, 0, 0], mode=None):\r\n # Check the validity of coordinate.\r\n try:\r\n h = Coor.shape[0]\r\n except Exception:\r\n h = 1\r\n try:\r\n w = Coor.shape[1]\r\n except Exception:\r\n w = 1\r\n if w != 3 and h != 3:\r\n raise Exception('Coordinate must be 3-dimensional.')\r\n\r\n l = len(EulerAngle)\r\n if l != 3:\r\n raise Exception('Coordinate must be 3-dimensional.')\r\n if EulerAngle[0] == 0 and EulerAngle[1] == 0 and EulerAngle[2] == 0:\r\n return Coor\r\n\r\n # Substitute Euler angles.\r\n alpha = EulerAngle[0]*np.pi/180\r\n beta = EulerAngle[1]*np.pi/180\r\n gamma = EulerAngle[2]*np.pi/180\r\n\r\n # mode 0 : normal transform, 1 : inverse transform.\r\n if mode is None:\r\n # First rotation with respect to z0 axis.\r\n Euler_z0_ori = np.array([\r\n [np.cos(alpha), np.sin(alpha), 0],\r\n [-np.sin(alpha), np.cos(alpha), 0],\r\n [0, 0, 1]\r\n ])\r\n # Second rotation with respect to x1 axis.\r\n Euler_x1_ori = np.array([\r\n [1, 0, 0],\r\n [0, np.cos(beta), np.sin(beta)],\r\n [0, -np.sin(beta), np.cos(beta)]\r\n ])\r\n # Third rotation with respect to z2 axis.\r\n Euler_z2_ori = np.array([\r\n [np.cos(gamma), np.sin(gamma), 0],\r\n [-np.sin(gamma), np.cos(gamma), 0],\r\n [0, 0, 1]\r\n ])\r\n Euler_all = np.dot(Euler_z2_ori, np.dot(Euler_x1_ori, Euler_z0_ori))\r\n else:\r\n Euler_z0_inv = np.array([\r\n [np.cos(alpha), -np.sin(alpha), 0],\r\n [np.sin(alpha), np.cos(alpha), 0],\r\n [0, 0, 1]\r\n ])\r\n Euler_x1_inv = np.array([\r\n [1, 0, 0],\r\n [0, np.cos(beta), -np.sin(beta)],\r\n [0, np.sin(beta), np.cos(beta)]\r\n ])\r\n Euler_z2_inv = np.array([\r\n [np.cos(gamma), -np.sin(gamma), 0],\r\n [np.sin(gamma), np.cos(gamma), 0],\r\n [0, 0, 1]\r\n ])\r\n Euler_all = np.dot(Euler_z0_inv, np.dot(Euler_x1_inv, Euler_z2_inv))\r\n\r\n # Reshaping for output : (N, 3) np.array.\r\n if h == 3:\r\n out = np.dot(Euler_all, Coor)\r\n else:\r\n out = np.dot(Euler_all, np.transpose(Coor))\r\n out = np.transpose(out)\r\n return out", "def rotate(self, euler_xyz_degree):\n degree = True\n compiler = self.root.find('.//compiler[@angle]')\n if compiler and compiler.get('angle'):\n if compiler.get('angle') == 'radian':\n degree = False\n\n x, y, z = np.array(euler_xyz_degree) * np.pi / 180\n R = T.euler.euler2quat(x, y, z, 'sxyz')\n\n if self.root.find('.//compiler[@eulerseq]'):\n raise NotImplementedError()\n\n for body in self.worldbody:\n if body.tag == 'light':\n continue\n quat = None\n if body.get('axisangle'):\n axisangle = string_to_array(body.get('axisangle'))\n length = np.linalg.norm(axisangle)\n quat = T.quaternions.axangle2quat(axisangle / length, length)\n body.set('axisangle', None)\n elif body.get('xyaxes'):\n raise NotImplementedError()\n elif body.get('zaxis'):\n raise NotImplementedError()\n elif body.get('euler'):\n i, j, k = string_to_array(body.get('euler'))\n if degree:\n i *= np.pi / 180\n j *= np.pi / 180\n k *= np.pi / 180\n quat = T.euler.euler2quat(i, j, k, 'sxyz')\n body.set('euler', None)\n else:\n quat = string_to_array(body.get('quat', '1 0 0 0'))\n body.set('quat', None)\n\n quat = T.quaternions.qmult(R, quat)\n body.set('quat', array_to_string(quat))", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def eulerAnglesToRotationMatrix(theta):\n\n R_x = np.array([[1, 0, 0 ],\n [0, np.cos(theta[0]), -np.sin(theta[0]) ],\n [0, np.sin(theta[0]), np.cos(theta[0]) ]\n ])\n R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1]) ],\n [0, 1, 0 ],\n [-np.sin(theta[1]), 0, np.cos(theta[1]) ]\n ])\n R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],\n [np.sin(theta[2]), np.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n R = np.dot(R_z, np.dot( R_y, R_x ))\n return R", "def __init__(self, rotations_1Q, lines_2Q, n_qubits):\n self.rotations_1Q = rotations_1Q\n self.lines_2Q = lines_2Q\n self.n_qubits = n_qubits\n dim_depth, dim_qubits, dim_rot = self.rotations_1Q.shape\n self.depth = 1+2*len(lines_2Q)\n # 1Q rotations vector does not match the depth\n if not((2*dim_depth-1) == self.depth):\n raise ValueError('1Q rotations vector does not match the depth')\n # 1Q rotations vector does not match the qubit number\n if not(dim_qubits == n_qubits):\n raise ValueError(\n '1Q rotations vector does not match the qubit number')\n # 1Q rotations vector does not match the parameter number\n if not(dim_rot == 4):\n raise ValueError(\n '1Q rotations vector does not match the parameter number')", "def rotmat2euler(R):\n if R[0,2] == 1 or R[0,2] == -1:\n # special case\n E3 = 0 # set arbitrarily\n dlta = np.arctan2( R[0,1], R[0,2] )\n if R[0,2] == -1:\n E2 = np.pi/2\n E1 = E3 + dlta\n else:\n E2 = -np.pi/2\n E1 = -E3 + dlta\n else:\n E2 = -np.arcsin( R[0,2] )\n E1 = np.arctan2( R[1,2]/np.cos(E2), R[2,2]/np.cos(E2) )\n E3 = np.arctan2( R[0,1]/np.cos(E2), R[0,0]/np.cos(E2) )\n eul = np.array([E1, E2, E3])\n return eul", "def quat_to_euler(orientation):\n quaternion = (\n orientation.x,\n orientation.y,\n orientation.z,\n orientation.w\n )\n euler = tf.transformations.euler_from_quaternion(quaternion)\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n return (roll,pitch,yaw)", "def _rotation_matrix_to_euler_angles(R):\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R", "def z_rotation(self):\n before = ('R', 'r', 'U', 'u', 'L', 'l', 'D', 'd', 'M', 'E', 'x', 'y')\n after = ('U', 'u', 'L', 'l', 'D', 'd', 'R', 'r', 'E', 'M\\'', \"y\", \"x'\")\n solve = self.solve_helper.maketrans(dict(zip(before, after)))\n solve_trans = self.solve_helper.translate(solve)\n solve_trans = solve_trans.replace(\"\\'\\'\", \"\")\n self.solve_helper = solve_trans", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians", "def rotmat2euler(R):\n if R[0, 2] == 1 or R[0, 2] == -1:\n # special case\n E3 = 0 # set arbitrarily\n dlta = np.arctan2(R[0, 1], R[0, 2])\n\n if R[0, 2] == -1:\n E2 = np.pi / 2\n E1 = E3 + dlta\n else:\n E2 = -np.pi / 2\n E1 = -E3 + dlta\n\n else:\n E2 = -np.arcsin(R[0, 2])\n E1 = np.arctan2(R[1, 2] / np.cos(E2), R[2, 2] / np.cos(E2))\n E3 = np.arctan2(R[0, 1] / np.cos(E2), R[0, 0] / np.cos(E2))\n\n eul = np.array([E1, E2, E3])\n return eul", "def quaternion_to_euler(q):\r\n W = q[0]\r\n X = q[1]\r\n Y = q[2]\r\n Z = q[3]\r\n\r\n # roll(x - axis rotation)\r\n sinr_cosp = +2.0 * (W * X + Y * Z)\r\n cosr_cosp = +1.0 - 2.0 * (X * X + Y * Y)\r\n roll = math.atan2(sinr_cosp, cosr_cosp)\r\n\r\n # pitch(y - axis rotation)\r\n sinp = +2.0 * (W * Y - Z * X)\r\n if abs(sinp) >= 1:\r\n pitch = np.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\r\n else:\r\n pitch = math.asin(sinp)\r\n\r\n # yaw(z - axis rotation)\r\n siny_cosp = +2.0 * (W * Z + X * Y)\r\n cosy_cosp = +1.0 - 2.0 * (Y * Y + Z * Z)\r\n yaw = math.atan2(siny_cosp, cosy_cosp)\r\n\r\n return roll, pitch, yaw", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians", "def get_euler_frame(quaternionion_frame):\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame", "def get_euler_frame(quaternionion_frame):\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame", "def test_rot_decomposition(self, diff_method):\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def circuit(weights):\r\n qml.Rot(weights[0], weights[1], weights[2], wires=0)\r\n return qml.expval(qml.PauliX(0))\r\n\r\n circuit = qml.QNode(circuit, dev, diff_method=diff_method)\r\n params = np.array([1.0, 2.0, 3.0])\r\n tapes = qml.metric_tensor(circuit, only_construct=True)(params)\r\n assert len(tapes) == 3\r\n\r\n # first parameter subcircuit\r\n assert len(tapes[0].operations) == 0\r\n\r\n # Second parameter subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RZ)\r\n assert tapes[1].operations[0].data == [1]\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # Third parameter subcircuit\r\n assert len(tapes[2].operations) == 2\r\n assert isinstance(tapes[2].operations[0], qml.RZ)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert tapes[2].operations[0].data == [1]\r\n assert tapes[2].operations[1].data == [2]\r\n\r\n result = qml.metric_tensor(circuit)(params)\r\n assert result.shape == (3, 3)", "def rotateEuler(self,axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "def _rotation_matrix_to_euler_angles(self, R):\n assert (self._is_rotation_matrix(R))\n\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def convert_quaternion_to_euler(quaternion_frames):\n\n def gen_4_tuples(it):\n \"\"\"Generator of n-tuples from iterable\"\"\"\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))\n\n def get_euler_frame(quaternionion_frame):\n \"\"\"Converts a quaternion frame into an Euler frame\"\"\"\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame\n\n euler_frames = list(map(get_euler_frame, quaternion_frames))\n\n return np.array(euler_frames)", "def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians", "def euler_from_quaternion(self, x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians", "def rotateEuler(axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "def representation_2_txyz(self: Q, representation: str = \"\") -> List:\n\n symbolic = False\n\n if (\n hasattr(self.t, \"free_symbols\")\n or hasattr(self.x, \"free_symbols\")\n or hasattr(self.y, \"free_symbols\")\n or hasattr(self.z, \"free_symbols\")\n ):\n symbolic = True\n\n if representation == \"\":\n box_t, box_x, box_y, box_z = self.t, self.x, self.y, self.z\n\n elif representation == \"polar\":\n amplitude, theta_x, theta_y, theta_z = self.t, self.x, self.y, self.z\n\n theta = (theta_x ** 2 + theta_y ** 2 + theta_z ** 2) ** (1 / 2)\n\n if theta == 0:\n box_t = self.t\n box_x, box_y, box_z = 0, 0, 0\n\n else:\n if symbolic:\n box_t = amplitude * sp.cos(theta)\n box_x = self.x / theta * amplitude * sp.sin(theta)\n box_y = self.y / theta * amplitude * sp.sin(theta)\n box_z = self.z / theta * amplitude * sp.sin(theta)\n else:\n box_t = amplitude * math.cos(theta)\n box_x = self.x / theta * amplitude * math.sin(theta)\n box_y = self.y / theta * amplitude * math.sin(theta)\n box_z = self.z / theta * amplitude * math.sin(theta)\n\n elif representation == \"spherical\":\n box_t, R, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_x = R * sp.sin(theta) * sp.cos(phi)\n box_y = R * sp.sin(theta) * sp.sin(phi)\n box_z = R * sp.cos(theta)\n else:\n box_x = R * math.sin(theta) * math.cos(phi)\n box_y = R * math.sin(theta) * math.sin(phi)\n box_z = R * math.cos(theta)\n\n elif representation == \"hyperbolic\":\n u, v, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_t = v * sp.exp(u)\n box_x = v * sp.exp(-u)\n box_y = v * sp.sin(theta) * sp.sin(phi)\n box_z = v * sp.cos(theta)\n\n else:\n box_t = v * math.exp(u)\n box_x = v * math.exp(-u)\n box_y = v * math.sin(theta) * sp.sin(phi)\n box_z = v * math.cos(theta)\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return [box_t, box_x, box_y, box_z]", "def Rotation_EQJ_ECL():\n # ob = mean obliquity of the J2000 ecliptic = 0.40909260059599012 radians.\n c = 0.9174821430670688 # cos(ob)\n s = 0.3977769691083922 # sin(ob)\n return RotationMatrix([\n [ 1, 0, 0],\n [ 0, +c, -s],\n [ 0, +s, +c]\n ])", "def rotate(self, ra1, dec1, ra2, dec2, ra3, dec3):\n # Turns Right Ascension/Declination into Azimuth/Zenith for healpy\n phi1 = ra1 - np.pi\n zen1 = np.pi/2. - dec1\n phi2 = ra2 - np.pi\n zen2 = np.pi/2. - dec2\n phi3 = ra3 - np.pi\n zen3 = np.pi/2. - dec3\n\n # Rotate each ra1 and dec1 towards the pole?\n x = np.array([hp.rotator.rotateDirection(\n hp.rotator.get_rotation_matrix((dp, -dz, 0.))[0], z, p)\n for z, p, dz, dp in zip(zen1, phi1, zen2, phi2)])\n\n # Rotate **all** these vectors towards ra3, dec3 (source_path)\n zen, phi = hp.rotator.rotateDirection(np.dot(\n hp.rotator.get_rotation_matrix((-phi3, 0, 0))[0],\n hp.rotator.get_rotation_matrix((0, zen3, 0.))[0]), x[:, 0], x[:, 1])\n\n dec = np.pi/2. - zen\n ra = phi + np.pi\n return np.atleast_1d(ra), np.atleast_1d(dec)", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def convert_euler_to_quaternion(roll, yaw, pitch):\n\n # roll (z), yaw (y), pitch (x)\n\n cy = math.cos(math.radians(roll) * 0.5)\n sy = math.sin(math.radians(roll) * 0.5)\n\n cp = math.cos(math.radians(yaw) * 0.5)\n sp = math.sin(math.radians(yaw) * 0.5)\n\n cr = math.cos(math.radians(pitch) * 0.5)\n sr = math.sin(math.radians(pitch) * 0.5)\n\n w = cy * cp * cr + sy * sp * sr\n x = cy * cp * sr - sy * sp * cr\n y = sy * cp * sr + cy * sp * cr\n z = sy * cp * cr - cy * sp * sr\n\n quat = np.array([w, x, y, z])\n quat = quat / np.linalg.norm(quat)\n return quat", "def euler_to_rodrigues(X_params):\n data_samples = X_params.shape[0]\n pose_euler = np.array([X_params[:, i:i+3] for i in range(0, 72, 3)])\n #print(pose_euler[0][0])\n #pose_euler = pose_euler.reshape((24, data_samples, 1, 3))\n #print(pose_euler[0][0])\n print(\"pose_euler shape: \" + str(pose_euler.shape))\n #R = np.array([[eulerAnglesToRotationMatrix(vector) for vector in vectors] for vectors in pose_euler])\n #print(\"R shape: \" + str(R.shape))\n #print(R[0][0])\n #R = R.reshape((data_samples, 24, 3, 3))\n\n #pose_params = np.array([[Rot.from_dcm(rot_mat).as_rotvec() for rot_mat in param_rot_mats] for param_rot_mats in R])\n pose_params = np.array([Rot.from_euler('xyz', vectors, degrees=False).as_rotvec() for vectors in pose_euler])\n print(\"pose_params shape: \" + str(pose_params.shape))\n pose_params = pose_params.reshape((data_samples, 72))\n print(\"pose_params shape: \" + str(pose_params.shape))\n print(\"other params shape: \" + str(X_params[:, 72:85].shape))\n X_params = np.concatenate([pose_params, X_params[:, 72:85]], axis=1)\n print(\"X_params shape: \" + str(X_params.shape))\n\n return X_params", "def rotation_matrix_to_euler(R):\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n \n singular = sy < 1e-6\n \n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n \n return np.array([x, y, z])", "def euler_from_quaternion(x, y, z, w):\r\n\tt0 = +2.0 * (w * x + y * z)\r\n\tt1 = +1.0 - 2.0 * (x * x + y * y)\r\n\troll_x = math.atan2(t0, t1)\r\n\r\n\tt2 = +2.0 * (w * y - z * x)\r\n\tt2 = +1.0 if t2 > +1.0 else t2\r\n\tt2 = -1.0 if t2 < -1.0 else t2\r\n\tpitch_y = math.asin(t2)\r\n\r\n\tt3 = +2.0 * (w * z + x * y)\r\n\tt4 = +1.0 - 2.0 * (y * y + z * z)\r\n\tyaw_z = math.atan2(t3, t4)\r\n\r\n\treturn roll_x, pitch_y, yaw_z # in radians\r", "def translateEuler(trans):\n return np.array([[1,0,0,trans[0]],[0,1,0,trans[1]],[0,0,1,trans[2]],[0,0,0,1]])", "def Rotation_ECL_EQJ():\n # ob = mean obliquity of the J2000 ecliptic = 0.40909260059599012 radians.\n c = 0.9174821430670688 # cos(ob)\n s = 0.3977769691083922 # sin(ob)\n return RotationMatrix([\n [ 1, 0, 0],\n [ 0, +c, +s],\n [ 0, -s, +c]\n ])", "def fk3(joint_rotations):\n h0_3 = htm0_3(joint_rotations)\n x0_3 = h0_3[0, 3]\n y0_3 = h0_3[1, 3]\n z0_3 = h0_3[2, 3]\n d0_3 = [x0_3, y0_3, z0_3]\n return d0_3", "def GetEulerSequenceList(self):\n return _gmat_py.Attitude_GetEulerSequenceList(self)", "def rotorconversion(x):\n return cf.MultiVector(layout, val_rotorconversion(x))", "def orbit(self, representation='trivial'):\n if not self:\n return self\n\n answer = BarrattEccles_element(torsion=self.torsion)\n for k, v in self.items():\n inverse = tuple(k[0].index(i + 1) + 1 for i in range(len(k[0])))\n permutation = SymmetricRing_element({inverse: 1}, torsion=self.torsion)\n if representation == 'sign':\n permutation = k[0].sign * permutation\n answer += permutation * BarrattEccles_element({k: v}, torsion=self.torsion)\n\n return answer", "def convert_quaternion_frames_to_euler_frames(quaternion_frames):\n\n def gen_4_tuples(it):\n \"\"\"Generator of n-tuples from iterable\"\"\"\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))\n\n def get_euler_frame(quaternionion_frame):\n \"\"\"Converts a quaternion frame into an Euler frame\"\"\"\n\n euler_frame = list(quaternionion_frame[:3])\n for quaternion in gen_4_tuples(quaternionion_frame[3:]):\n euler_frame += quaternion_to_euler(quaternion)\n\n return euler_frame\n\n euler_frames = list(map(get_euler_frame, quaternion_frames))\n\n return np.array(euler_frames)", "def eulerAnglesToRotationMatrix(self, theta):\n\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def _quaternions(self, R):\n # Simple Wikipedia version\n # en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix\n diag = torch.diagonal(R, dim1=-2, dim2=-1)\n Rxx, Ryy, Rzz = diag.unbind(-1)\n magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n Rxx - Ryy - Rzz, \n - Rxx + Ryy - Rzz, \n - Rxx - Ryy + Rzz\n ], -1)))\n _R = lambda i,j: R[:,:,:,i,j]\n signs = torch.sign(torch.stack([\n _R(2,1) - _R(1,2),\n _R(0,2) - _R(2,0),\n _R(1,0) - _R(0,1)\n ], -1))\n xyz = signs * magnitudes\n # The relu enforces a non-negative trace\n w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n Q = torch.cat((xyz, w), -1)\n Q = F.normalize(Q, dim=-1)\n return Q", "def euler_to_quaternion(euler_angles, rotation_order=DEFAULT_ROTATION_ORDER,filter_values=True):\n assert len(euler_angles) == 3, ('The length of euler angles should be 3!')\n euler_angles = np.deg2rad(euler_angles)\n rotmat = euler_matrix(*euler_angles, rotation_order_to_string(rotation_order))\n # convert rotation matrix R into quaternion vector (qw, qx, qy, qz)\n quat = quaternion_from_matrix(rotmat)\n # filter the quaternion see\n # http://physicsforgames.blogspot.de/2010/02/quaternions.html\n if filter_values:\n dot = np.sum(quat)\n if dot < 0:\n quat = -quat\n return [quat[0], quat[1], quat[2], quat[3]]", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def _tk1_to_rotations(a: float, b: float, c: float) -> Circuit:\n circ = Circuit(1)\n circ.Rz(c, 0).Rx(b, 0).Rz(a, 0)\n return circ", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def euler_to_quaternion(yaw, pitch, roll):\r\n cy = math.cos(yaw * 0.5)\r\n sy = math.sin(yaw * 0.5)\r\n cp = math.cos(pitch * 0.5)\r\n sp = math.sin(pitch * 0.5)\r\n cr = math.cos(roll * 0.5)\r\n sr = math.sin(roll * 0.5)\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return w, x, y, z", "def orient(self, parent, rot_type, amounts, rot_order=''):\n\n self._check_frame(parent)\n amounts = list(amounts)\n for i, v in enumerate(amounts):\n if not isinstance(v, Vector):\n amounts[i] = sympify(v)\n def _rot(axis, angle):\n \"\"\"DCM for simple axis 1,2,or 3 rotations. \"\"\"\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])\n\n approved_orders = ('123', '231', '312', '132', '213', '321', '121',\n '131', '212', '232', '313', '323', '')\n rot_order = str(rot_order).upper() # Now we need to make sure XYZ = 123\n rot_type = rot_type.upper()\n rot_order = [i.replace('X', '1') for i in rot_order]\n rot_order = [i.replace('Y', '2') for i in rot_order]\n rot_order = [i.replace('Z', '3') for i in rot_order]\n rot_order = ''.join(rot_order)\n if not rot_order in approved_orders:\n raise TypeError('The supplied order is not an approved type')\n parent_orient = []\n\n if rot_type == 'AXIS':\n if not rot_order == '':\n raise TypeError('Axis orientation takes no rotation order')\n if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 2)):\n raise TypeError('Amounts are a list or tuple of length 2')\n theta = amounts[0]\n axis = amounts[1]\n self._check_vector(axis)\n if not axis.dt(parent) == 0:\n raise ValueError('Axis cannot be time-varying')\n axis = axis.express(parent).normalize()\n axis = axis.args[0][0]\n parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +\n Matrix([[0, -axis[2], axis[1]],[axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]]) * sin(theta) + axis * axis.T)\n elif rot_type == 'QUATERNION':\n if not rot_order == '':\n raise TypeError('Quaternion orientation takes no rotation order')\n if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 4)):\n raise TypeError('Amounts are a list or tuple of length 4')\n q0, q1, q2, q3 = amounts\n parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 **\n 2, 2 * (q1 * q2 - q0 * q3), 2 * (q0 * q2 + q1 * q3)],\n [2 * (q1 * q2 + q0 * q3), q0 ** 2 - q1 ** 2 + q2 **2 - q3 ** 2,\n 2 * (q2 * q3 - q0 * q1)], [2 * (q1 * q3 - q0 * q2), 2 * (q0 *\n q1 + q2 * q3), q0 ** 2 - q1 ** 2 - q2 ** 2 + q3 ** 2]]))\n elif rot_type == 'BODY':\n if not (len(amounts) == 3 & len(rot_order) == 3):\n raise TypeError('Body orientation takes 3 values & 3 orders')\n a1 = int(rot_order[0])\n a2 = int(rot_order[1])\n a3 = int(rot_order[2])\n parent_orient = (_rot(a1, amounts[0]) * _rot(a2, amounts[1])\n * _rot(a3, amounts[2]))\n elif rot_type == 'SPACE':\n if not (len(amounts) == 3 & len(rot_order) == 3):\n raise TypeError('Space orientation takes 3 values & 3 orders')\n a1 = int(rot_order[0])\n a2 = int(rot_order[1])\n a3 = int(rot_order[2])\n parent_orient = (_rot(a3, amounts[2]) * _rot(a2, amounts[1])\n * _rot(a1, amounts[0]))\n else:\n raise NotImplementedError('That is not an implemented rotation')\n self._dcm_dict.update({parent: parent_orient})\n parent._dcm_dict.update({self: parent_orient.T})\n if rot_type == 'QUATERNION':\n t = dynamicsymbols._t\n q0, q1, q2, q3 = amounts\n q0d = diff(q0, t)\n q1d = diff(q1, t)\n q2d = diff(q2, t)\n q3d = diff(q3, t)\n w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1)\n w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2)\n w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3)\n wvec = Vector([(Matrix([w1, w2, w3]), self)])\n elif rot_type == 'AXIS':\n thetad = (amounts[0]).diff(dynamicsymbols._t)\n wvec = thetad * amounts[1].express(parent).normalize()\n else:\n try:\n from sympy.polys.polyerrors import CoercionFailed\n from sympy.physics.mechanics.functions import kinematic_equations\n q1, q2, q3 = amounts\n u1, u2, u3 = dynamicsymbols('u1, u2, u3')\n templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],\n rot_type, rot_order)\n templist = [expand(i) for i in templist]\n td = solve(templist, [u1, u2, u3])\n u1 = expand(td[u1])\n u2 = expand(td[u2])\n u3 = expand(td[u3])\n wvec = u1 * self.x + u2 * self.y + u3 * self.z\n except (CoercionFailed, AssertionError):\n wvec = self._w_diff_dcm(parent)\n self._ang_vel_dict.update({parent: wvec})\n parent._ang_vel_dict.update({self: -wvec})", "def rotationMatrixToEulerAngles(self, R):\n\n assert(self.isRotationMatrix(R))\n\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def __str__(self):\n struct_repr = \", \".join([\n \"roll_deg: \" + str(self.roll_deg),\n \"pitch_deg: \" + str(self.pitch_deg),\n \"yaw_deg: \" + str(self.yaw_deg)\n ])\n\n return f\"EulerAngle: [{struct_repr}]\"", "def euler_to_quat(self, yaw):\n quat_array = t.quaternion_from_euler(0.0, 0.0, yaw)\n return Quaternion(quat_array[0], quat_array[1], quat_array[2], quat_array[3])", "def quaternion_from_euler(ai, aj, ak, axes='sxyz'):\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _ = _TUPLE2AXES[axes]\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n if frame:\n ai, ak = ak, ai\n if parity:\n aj = -aj\n\n ai /= 2.0\n aj /= 2.0\n # print(\"ak : {}\".format(type(ak)))\n ak /= 2.0\n ci = math.cos(ai)\n si = math.sin(ai)\n cj = math.cos(aj)\n sj = math.sin(aj)\n ck = math.cos(ak)\n sk = math.sin(ak)\n cc = ci*ck\n cs = ci*sk\n sc = si*ck\n ss = si*sk\n\n quaternion = np.empty((4, ), dtype=np.float64)\n if repetition:\n quaternion[i] = cj*(cs + sc)\n quaternion[j] = sj*(cc + ss)\n quaternion[k] = sj*(cs - sc)\n quaternion[3] = cj*(cc - ss)\n else:\n quaternion[i] = cj*sc - sj*cs\n quaternion[j] = cj*ss + sj*cc\n quaternion[k] = cj*cs - sj*sc\n quaternion[3] = cj*cc + sj*ss\n if parity:\n quaternion[j] *= -1\n\n return quaternion", "def rotate_cube_right_list(liste):\n\n fronttemp = liste[1]\n \n righttemp = liste[2]\n \n backtemp = liste[3]\n \n lefttemp = liste[4]\n \n uptemp0 = liste[0][0]\n uptemp1 = liste[0][1]\n uptemp2 = liste[0][2]\n uptemp3 = liste[0][3]\n uptemp4 = liste[0][4]\n uptemp5 = liste[0][5]\n uptemp6 = liste[0][6]\n uptemp7 = liste[0][7]\n uptemp8 = liste[0][8]\n \n downtemp0 = liste[5][0]\n downtemp1 = liste[5][1]\n downtemp2 = liste[5][2]\n downtemp3 = liste[5][3]\n downtemp4 = liste[5][4]\n downtemp5 = liste[5][5]\n downtemp6 = liste[5][6]\n downtemp7 = liste[5][7]\n downtemp8 = liste[5][8]\n \n liste[2] = fronttemp\n \n liste[3] = righttemp\n \n liste[4] = backtemp\n \n liste[1] = lefttemp\n \n liste[0][0] = uptemp2\n liste[0][1] = uptemp5\n liste[0][2] = uptemp8\n liste[0][3] = uptemp1\n liste[0][4] = uptemp4\n liste[0][5] = uptemp7\n liste[0][6] = uptemp0\n liste[0][7] = uptemp3\n liste[0][8] = uptemp6\n \n liste[5][0] = downtemp6\n liste[5][1] = downtemp3\n liste[5][2] = downtemp0\n liste[5][3] = downtemp7\n liste[5][4] = downtemp4\n liste[5][5] = downtemp1\n liste[5][6] = downtemp8\n liste[5][7] = downtemp5\n liste[5][8] = downtemp2\n \n return liste", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:,0], angle[:,1], angle[:,2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach()*0\n ones = zeros.detach()+1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).reshape(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).reshape(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).reshape(B, 3, 3)\n\n rotMat = torch.matmul(torch.matmul(xmat, ymat), zmat)\n return rotMat", "def euler_to_quat(roll, pitch, yaw):\n pose = Pose()\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n pose.orientation.x = quaternion[0]\n pose.orientation.y = quaternion[1]\n pose.orientation.z = quaternion[2]\n pose.orientation.w = quaternion[3]\n return pose.orientation", "def _euler_90_algorithm(self):\n # define scale factor from min radius and output angle (which is ninety degrees), grab radius from input\n output_angle = np.pi / 2.\n effective_radius = self.radius\n # Euler curvature scaling factor, determined from calculating a 1. radius term and looking at output\n min_radius = effective_radius / 1.87009582269\n a_scale = 2. * min_radius * (output_angle / 2.0)**0.5\n # too many points causes issues on gdsii, splitting over different sizes is probably most suitable way\n if effective_radius < 30.:\n points = 50\n else:\n points = 80\n # Create t array for calculating parametric curve\n end_t = (output_angle / 2.0)**0.5\n all_t = np.linspace(0., end_t, points)\n # Create a list for x values and generate the x components of parametric curve using loop\n xs = list()\n for t in all_t:\n xs.append(a_scale * (t - (1 / 10.) * t**5 + (1 / 216.) * t**9 - (1 / 9360.) * t**13 + (1 / 685440.) * t**17))\n # Do the same for y values\n ys = list()\n for t in all_t:\n ys.append(a_scale * (t**3 * (1 / 3.) - (1 / 42.) * t**7 + (1 / 1320.) * t**11 - (1 / 75600.) * t**15))\n # Combine the xs and ys to perform the mirroring operation\n start_euler_xy = zip(xs, ys)\n # Calculating Mirror curve for X and Y, need x axis angle and end positions\n angle_x = np.pi / 2. + output_angle / 2.\n end_x = start_euler_xy[-1][0]\n end_y = start_euler_xy[-1][1]\n # initialising for loops, looping using checked equations from Mathematica for mirroring around line\n x_mirror = list()\n y_mirror = list()\n for elem in start_euler_xy:\n x_mirror.append(end_x + np.cos(2 * angle_x) * (elem[0] - end_x) + np.sin(2 * angle_x) * (elem[1] - end_y))\n\n for elem in start_euler_xy:\n y_mirror.append(end_y + np.sin(2 * angle_x) * (elem[0] - end_x) - np.cos(2 * angle_x) * (elem[1] - end_y))\n\n # takes output of mirrors, flips them and combines them\n mirror_xy = zip(x_mirror[::-1], y_mirror[::-1])\n\n # Combines initial and mirrored list to generate the euler curve\n euler_full = start_euler_xy + mirror_xy\n return euler_full", "def _euler_angles_to_rotation_matrix(theta):\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def fk4(joint_rotations):\n h0_4 = htm4(joint_rotations)\n x0_4 = h0_4[0, 3]\n y0_4 = h0_4[1, 3]\n z0_4 = h0_4[2, 3]\n d0_4 = [x0_4, y0_4, z0_4]\n return d0_4", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:,0], angle[:,1], angle[:,2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach()*0\n ones = zeros.detach()+1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).reshape(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).reshape(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).reshape(B, 3, 3)\n\n rotMat = xmat @ ymat @ zmat\n return rotMat", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:,0], angle[:,1], angle[:,2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach()*0\n ones = zeros.detach()+1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).reshape(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).reshape(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).reshape(B, 3, 3)\n\n rotMat = xmat @ ymat @ zmat\n return rotMat", "def rotations4(polycube, axis):\r\n for i in range(4):\r\n yield rot90(polycube, i, axis)", "def Q2euler(self, q):\n\n\tphi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));\n\tpsi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));\n try:\n theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));\n except ValueError:\n print \"ERRO: norm(Q) = %f\" % np.sqrt(np.sum(q**2))\n theta = 0;\n\n return (phi, theta, psi)", "def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate", "def translate_to_rpc(self, rpcEulerAngle):\n\n \n \n \n rpcEulerAngle.roll_deg = self.roll_deg\n \n \n \n \n \n rpcEulerAngle.pitch_deg = self.pitch_deg\n \n \n \n \n \n rpcEulerAngle.yaw_deg = self.yaw_deg", "def from_euler(rotation, translate=(0, 0, 0), radians=False):\n # pylint: disable=too-many-locals\n x_value, y_value, z_value = rotation\n\n # convert to radians if degrees are passed\n if radians is False:\n x_value, y_value, z_value = map(\n math.radians,\n (x_value, y_value, z_value),\n )\n\n cos_x, sin_x = math.cos(x_value), math.sin(x_value)\n cos_y, sin_y = math.cos(y_value), math.sin(y_value)\n cos_z, sin_z = math.cos(z_value), math.sin(z_value)\n\n x_vector = (\n cos_y * cos_z,\n cos_y * sin_z,\n -sin_y,\n 0.0,\n )\n\n y_vector = (\n sin_x * sin_y * cos_z - cos_x * sin_z,\n sin_x * sin_y * sin_z + cos_x * cos_z,\n sin_x * cos_y,\n 0.0,\n )\n\n z_vector = (\n cos_x * sin_y * cos_z + sin_x * sin_z,\n cos_x * sin_y * sin_z - sin_x * cos_z,\n cos_x * cos_y,\n 0.0,\n )\n\n t_vector = (translate[0], translate[1], translate[2], 1.0)\n\n return x_vector + y_vector + z_vector + t_vector", "def __init__(self, XY_rotations, lines_2Q, n_qubits):\n self.XY_rotations = XY_rotations\n self.lines_2Q = lines_2Q\n self.n_qubits = n_qubits\n dim_depth, dim_qubits, dim_XY = self.XY_rotations.shape\n self.depth = 1+2*len(lines_2Q)\n # XY rotations vector does not match the depth\n if not((2*dim_depth-1) == self.depth):\n raise ValueError('XY rotations vector does not match the depth')\n # XY rotations vector does not match the qubit number\n if not(dim_qubits == n_qubits):\n raise ValueError(\n 'XY rotations vector does not match the qubit number')\n # XY rotations vector does not match the parameter number\n if not(dim_XY == 2):\n raise ValueError(\n 'XY rotations vector does not match the parameter number')", "def fix_rotation(self):\n cube_helper = Cube()\n cube_helper.scramble = self.scramble.split()\n cube_helper.solve = self.solve.split()\n\n rotations = []\n for move in cube_helper.scramble:\n cube_helper.exe_move(move)\n for move in cube_helper.solve:\n if move not in cube_helper.rotation:\n if not self.currently_parsing_smart_cube:\n break\n cube_helper.exe_move(move)\n\n str_perm = cube_helper.perm_to_string(cube_helper.current_perm).split()\n up = str_perm[4]\n front = str_perm[22]\n flag = False\n for i in range (4):\n if (up == \"5\"):\n flag = True\n break\n rotations.append(\"x\")\n cube_helper.exe_move(\"x\")\n str_perm = cube_helper.perm_to_string(cube_helper.current_perm).split()\n up = str_perm[4]\n front = str_perm[22]\n\n if (front != \"23\" and not flag):\n rotations.append(\"z\")\n cube_helper.exe_move(\"z\")\n str_perm = cube_helper.perm_to_string(cube_helper.current_perm).split()\n up = str_perm[4]\n front = str_perm[22]\n\n while (up != \"5\" or front != \"23\"):\n rotations.append(\"y\")\n cube_helper.exe_move(\"y\")\n str_perm = cube_helper.perm_to_string(cube_helper.current_perm).split()\n front = str_perm[22]\n\n final_rot = []\n while len(rotations) >= 3:\n if rotations[0] == rotations[1] == rotations[2]:\n r_fix = \"{}'\".format(rotations[0]).replace(\"''\",\"\")\n final_rot.append(r_fix)\n rotations.pop(0)\n rotations.pop(0)\n rotations.pop(0)\n else:\n final_rot.append(rotations[0])\n rotations.pop(0)\n if final_rot:\n return final_rot\n return rotations", "def get_euler_angle_from_quat(w, x, y, z):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n alpha = math.atan2(t0, t1) * 180 / math.pi\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n beta = math.asin(t2) * 180 / math.pi\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n gamma = math.atan2(t3, t4) * 180 / math.pi\n return alpha, beta, gamma", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach() * 0\n ones = zeros.detach() + 1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).view(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).view(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).view(B, 3, 3)\n\n # rotMat = xmat.bmm(ymat).bmm(zmat)\n # changed to match opencv and conversion euler->mat/mat->euler\n rotMat = torch.bmm(zmat, torch.bmm(ymat, xmat))\n\n return rotMat", "def ik3(xyz_array):\n # Eqn 1\n theta_1 = np.arctan2(xyz_array[1], xyz_array[0])\n # Eqn 2\n r1 = np.hypot(xyz_array[0], xyz_array[1])\n # Eqn 3\n r2 = xyz_array[2] - link_lengths[0]\n # Eqn 4\n phi2 = np.arctan2(r2, r1)\n # Eqn 5\n r3 = np.hypot(r1, r2)\n # Eqn 6\n num6 = np.power(link_lengths[2], 2) - \\\n np.power(link_lengths[1], 2) - np.power(r3, 2)\n den6 = -2 * link_lengths[1] * r3\n phi1 = np.arccos(num6 / den6)\n # Eqn 7\n # theta_2 = phi2 - phi1 # elbow down\n theta_2 = phi2 + phi1\n # Eqn 8\n num8 = np.power(r3, 2) - \\\n np.power(link_lengths[1], 2) - np.power(link_lengths[2], 2)\n den8 = -2 * link_lengths[1] * link_lengths[2]\n phi3 = np.arccos(num8 / den8)\n # Eqn 9\n # theta_3 = pi - phi3 # elbow down\n theta_3 = -(np.pi - phi3)\n # Output Joint Angles\n theta_1 = np.rad2deg(theta_1)\n theta_2 = np.rad2deg(theta_2)\n theta_3 = np.rad2deg(theta_3)\n joint_rotations = np.array([theta_1, theta_2, theta_3])\n return joint_rotations", "def __repr__(self):\n # first check for identity quaternion to avoid nans\n if self.real != 1:\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n else:\n angle = 0.\n xyz = self.pure\n result = \"Transformation: tx ty tz rx ry rz angle\\n %g %g %g %g %g %g %g\" \\\n % (self.trans[0], self.trans[1], self.trans[2],\n xyz[0], xyz[1], xyz[2], angle)\n return result", "def rotator(angle):\n\n c = np.cos(2*angle)\n s = np.sin(2*angle)\n return np.array([[1,0,0,0],[0,c,-s,0],[0,s,c,0],[0,0,0,1]])", "def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)", "def lrbt_rot(self):\n l, r, b, t = self._ext[0], self._ext[1], self._ext[2], self._ext[3]\n if self.__rot90 % 4 == 1: # -90deg\n self._ext = [b, t, l, r]\n elif self.__rot90 % 4 == 2: # -180deg\n self._ext = [r, l, t, b]\n elif self.__rot90 % 4 == 3: # -270deg\n self._ext = [t, b, r, l]", "def rotation_elements(self, eta, phi, theta):\n \n # Three-axis rotation:\n # 1. Rotate about +z by eta (follows RHR; rotation is mathematical and thus counter-clockwise)\n # 2. Tilt by phi with respect to +z (rotation about y-axis) then\n # 3. rotate by theta in-place (rotation about z-axis) ### BUG: This isn't a conceptual rotation about z (influenced by other rotations)\n \n\n eta = radians( eta ) # eta is orientation around the z axis (before reorientation)\n phi = radians( phi ) # phi is grain tilt (with respect to +z axis)\n theta = radians( theta ) # grain orientation (around the z axis)\n \n rotation_elements = [[ cos(eta)*cos(phi)*cos(theta)-sin(eta)*sin(theta) ,\n -cos(eta)*cos(phi)*sin(theta)-sin(eta)*cos(theta) ,\n -cos(eta)*sin(phi) ],\n [ sin(eta)*cos(phi)*cos(theta)+cos(eta)*sin(theta) ,\n -sin(eta)*cos(phi)*sin(theta)+cos(eta)*cos(theta) ,\n sin(eta)*sin(phi) ],\n [ -sin(phi)*cos(theta) ,\n sin(phi)*sin(theta) ,\n cos(phi) ]]\n \n return rotation_elements", "def getRotationTrajectory(self) -> SO3Trajectory:\n return SO3Trajectory(self.times,[m[:9] for m in self.milestones])", "def qrot(I,sym):\n T = s.Symbol(\"T\")\n if type(I) == list:\n return (((s.pi * I[0] * I[1] * I[2])**(1/2))/sym) * ((8 * s.pi**2 * k * T) / (h**2))**(3/2)\n else:\n return (((s.pi * I)**(1/2))/sym) * ((8 * s.pi**2 * k * T) / (h**2))**(3/2)", "def translateEuler(self,trans):\n return np.array([[1,0,0,trans[0]],[0,1,0,trans[1]],[0,0,1,trans[2]],[0,0,0,1]])", "def rotation_euler(self,alpha,beta,gamma,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n # precomte the trig\n ca,sa = np.cos(alpha),np.sin(alpha)\n cb,sb = np.cos(beta),np.sin(beta)\n cg,sg = np.cos(gamma),np.sin(gamma)\n\n\n # get the center of the molecule\n xyz0 = np.mean(xyz,0)\n\n # rotation matrices\n rx = np.array([[1,0,0],[0,ca,-sa],[0,sa,ca]])\n ry = np.array([[cb,0,sb],[0,1,0],[-sb,0,cb]])\n rz = np.array([[cg,-sg,0],[sg,cg,0],[0,0,1]])\n rot_mat = np.dot(rx,np.dot(ry,rz))\n\n # apply the rotation\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n\n self.update('x,y,z',xyz,**kwargs)", "def to_revolute_chain(self):\n T_zero = {\"p0\": SE3.identity()}\n ang_lims_map = {}\n old_to_new_names = {\n \"p0\": \"p0\"\n } # Returned for user of the method (to map old joint names to new ones)\n ub, lb = spherical_angle_bounds_to_revolute(self.ub, self.lb)\n count = 1\n joint_prev = \"p0\"\n for (\n joint\n ) in self.d: # Assumes the dictionary is in chain order (perhaps enforce?)\n new_node1 = \"p\" + str(count)\n count += 1\n # ub[new_node1] = self.ub[joint]\n # lb[new_node1] = self.lb[joint]\n ang_lims_map[joint] = new_node1\n\n new_node2 = \"p\" + str(count)\n count += 1\n old_to_new_names[joint] = new_node2\n\n Ry = SE3(SO3(roty(np.pi / 2)), np.zeros(3))\n T_zero[new_node1] = T_zero[joint_prev].dot(Ry)\n d = self.d[joint]\n Ry_back = SE3(SO3(roty(-np.pi / 2)), np.zeros(3))\n T_zero[new_node2] = T_zero[new_node1].dot(Ry_back).dot(trans_axis(d, \"z\"))\n\n joint_prev = new_node2\n\n # for key in T_zero:\n # if key not in ub.keys() and key is not 'p0':\n # ub[key] = np.pi\n # lb[key] = -np.pi\n\n params = {\"T_zero\": T_zero, \"ub\": ub, \"lb\": lb}\n return RobotRevolute(params), old_to_new_names, ang_lims_map", "def list_rotary():\n \n global listPrevNextCode\n global listStore\n global rot_enc_table\n global list_dt, list_clk\n\n listPrevNextCode <<= 2;\n if (GPIO.input(list_dt)):\n listPrevNextCode |= 0x02\n if (GPIO.input(list_clk)):\n listPrevNextCode |= 0x01\n listPrevNextCode &= 0x0f\n\n # If valid then store as 16 bit data.\n if (rot_enc_table[listPrevNextCode] ):\n listStore <<= 4\n listStore |= listPrevNextCode\n\n if ((listStore & 0xff) == 0x2b):\n return -1\n if ((listStore & 0xff) == 0x17):\n return 1\n \n return 0", "def list_rotation(cns):\n cns = [cns[1], cns[2], cns[3], cns[0]]\n return cns", "def Rotation_EQD_ECL(time):\n eqd_eqj = Rotation_EQD_EQJ(time)\n eqj_ecl = Rotation_EQJ_ECL()\n return CombineRotation(eqd_eqj, eqj_ecl)", "def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def get_rot(self) -> WAQuaternion:\n pass", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle" ]
[ "0.63399196", "0.62531626", "0.60332036", "0.6030076", "0.59916806", "0.5967598", "0.59581524", "0.5917287", "0.59120715", "0.5886985", "0.58779275", "0.58498204", "0.584666", "0.58190167", "0.58053577", "0.58050466", "0.5801976", "0.5783403", "0.57816374", "0.5771403", "0.5766678", "0.5765716", "0.5765485", "0.5761021", "0.5745366", "0.5731642", "0.57281375", "0.5710326", "0.5710326", "0.57021576", "0.5673996", "0.5671944", "0.56691885", "0.5669077", "0.56608725", "0.5647561", "0.5633663", "0.5611585", "0.56113523", "0.56017214", "0.55915165", "0.5570773", "0.55587524", "0.55400157", "0.5539432", "0.55391115", "0.5524258", "0.55119425", "0.55071014", "0.54976285", "0.549586", "0.54752094", "0.54669684", "0.5457927", "0.5442376", "0.54413146", "0.542563", "0.54152477", "0.5410499", "0.54009837", "0.5386904", "0.5386864", "0.5386398", "0.5380263", "0.53748184", "0.5366932", "0.53658175", "0.5359772", "0.53549373", "0.5323155", "0.53200436", "0.53200436", "0.53121936", "0.53064865", "0.530288", "0.53008276", "0.52982265", "0.5292669", "0.52920437", "0.5291613", "0.52817374", "0.52800107", "0.5273754", "0.5272366", "0.52712", "0.5270418", "0.5268603", "0.5264961", "0.5257702", "0.5253577", "0.5249683", "0.5249028", "0.5248051", "0.52370864", "0.52356106", "0.5233312", "0.52303326", "0.52141863", "0.52092177", "0.52062374" ]
0.59328794
7
Representation of a circuit as "XY list". On top of the Euler list (see Euler list representation), The euler angles are compiled into a single MW gate (rotation around axis in the azimutal plane) through virtual phase updates. The 1Q layers are now represented with an XY vector. rotations_1Q = [layer, qubit, XY_vector] XY_vector = [axis(azimutal angle), rotation_angle]
def __init__(self, XY_rotations, lines_2Q, n_qubits): self.XY_rotations = XY_rotations self.lines_2Q = lines_2Q self.n_qubits = n_qubits dim_depth, dim_qubits, dim_XY = self.XY_rotations.shape self.depth = 1+2*len(lines_2Q) # XY rotations vector does not match the depth if not((2*dim_depth-1) == self.depth): raise ValueError('XY rotations vector does not match the depth') # XY rotations vector does not match the qubit number if not(dim_qubits == n_qubits): raise ValueError( 'XY rotations vector does not match the qubit number') # XY rotations vector does not match the parameter number if not(dim_XY == 2): raise ValueError( 'XY rotations vector does not match the parameter number')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def txyz_2_representation(self: Q, representation: str = \"\") -> List:\n\n symbolic = self.is_symbolic()\n\n if representation == \"\":\n rep = [self.t, self.x, self.y, self.z]\n\n elif representation == \"polar\":\n amplitude = (self.t ** 2 + self.x ** 2 + self.y ** 2 + self.z ** 2) ** (\n 1 / 2\n )\n\n abs_v = abs_of_vector(self).t\n\n if symbolic:\n theta = sp.atan2(abs_v, self.t)\n else:\n theta = math.atan2(abs_v, self.t)\n\n if abs_v == 0:\n theta_x, theta_y, theta_z = 0, 0, 0\n\n else:\n theta_x = theta * self.x / abs_v\n theta_y = theta * self.y / abs_v\n theta_z = theta * self.z / abs_v\n\n rep = [amplitude, theta_x, theta_y, theta_z]\n\n elif representation == \"spherical\":\n\n spherical_t = self.t\n\n spherical_r = (self.x ** 2 + self.y ** 2 + self.z ** 2) ** (1 / 2)\n\n if spherical_r == 0:\n theta = 0\n else:\n if symbolic:\n theta = sp.acos(self.z / spherical_r)\n\n else:\n theta = math.acos(self.z / spherical_r)\n\n if symbolic:\n phi = sp.atan2(self.y, self.x)\n else:\n phi = math.atan2(self.y, self.x)\n\n rep = [spherical_t, spherical_r, theta, phi]\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return rep", "def one_qubit_rotation(self, qubit, symbols):\n # print(symbols, \"hi\")\n return [cirq.rx(symbols[0])(qubit),\n cirq.ry(symbols[1])(qubit),\n cirq.rz(symbols[2])(qubit)]", "def two_qubit_rotation(self, bits, symbols):\n circuit = cirq.Circuit()\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[0], symbols[0:3]))\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[1], symbols[3:6]))\n circuit += [cirq.ZZ(*bits)**symbols[6]]\n circuit += [cirq.YY(*bits)**symbols[7]]\n circuit += [cirq.XX(*bits)**symbols[8]]\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[0], symbols[9:12]))\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[1], symbols[12:]))\n return circuit", "def qwc_rotation(pauli_operators):\n paulis_with_identity = (qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ)\n if not all(isinstance(element, paulis_with_identity) for element in pauli_operators):\n raise TypeError(\n \"All values of input pauli_operators must be either Identity, PauliX, PauliY, or PauliZ instances,\"\n \" instead got pauli_operators = {}.\".format(pauli_operators)\n )\n\n for pauli in pauli_operators:\n if isinstance(pauli, qml.PauliX):\n qml.RY(-np.pi / 2, wires=pauli.wires)\n\n elif isinstance(pauli, qml.PauliY):\n qml.RX(np.pi / 2, wires=pauli.wires)", "def representation_2_txyz(self: Q, representation: str = \"\") -> List:\n\n symbolic = False\n\n if (\n hasattr(self.t, \"free_symbols\")\n or hasattr(self.x, \"free_symbols\")\n or hasattr(self.y, \"free_symbols\")\n or hasattr(self.z, \"free_symbols\")\n ):\n symbolic = True\n\n if representation == \"\":\n box_t, box_x, box_y, box_z = self.t, self.x, self.y, self.z\n\n elif representation == \"polar\":\n amplitude, theta_x, theta_y, theta_z = self.t, self.x, self.y, self.z\n\n theta = (theta_x ** 2 + theta_y ** 2 + theta_z ** 2) ** (1 / 2)\n\n if theta == 0:\n box_t = self.t\n box_x, box_y, box_z = 0, 0, 0\n\n else:\n if symbolic:\n box_t = amplitude * sp.cos(theta)\n box_x = self.x / theta * amplitude * sp.sin(theta)\n box_y = self.y / theta * amplitude * sp.sin(theta)\n box_z = self.z / theta * amplitude * sp.sin(theta)\n else:\n box_t = amplitude * math.cos(theta)\n box_x = self.x / theta * amplitude * math.sin(theta)\n box_y = self.y / theta * amplitude * math.sin(theta)\n box_z = self.z / theta * amplitude * math.sin(theta)\n\n elif representation == \"spherical\":\n box_t, R, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_x = R * sp.sin(theta) * sp.cos(phi)\n box_y = R * sp.sin(theta) * sp.sin(phi)\n box_z = R * sp.cos(theta)\n else:\n box_x = R * math.sin(theta) * math.cos(phi)\n box_y = R * math.sin(theta) * math.sin(phi)\n box_z = R * math.cos(theta)\n\n elif representation == \"hyperbolic\":\n u, v, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_t = v * sp.exp(u)\n box_x = v * sp.exp(-u)\n box_y = v * sp.sin(theta) * sp.sin(phi)\n box_z = v * sp.cos(theta)\n\n else:\n box_t = v * math.exp(u)\n box_x = v * math.exp(-u)\n box_y = v * math.sin(theta) * sp.sin(phi)\n box_z = v * math.cos(theta)\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return [box_t, box_x, box_y, box_z]", "def __init__(self, rotations_1Q, lines_2Q, n_qubits):\n self.rotations_1Q = rotations_1Q\n self.lines_2Q = lines_2Q\n self.n_qubits = n_qubits\n dim_depth, dim_qubits, dim_rot = self.rotations_1Q.shape\n self.depth = 1+2*len(lines_2Q)\n # 1Q rotations vector does not match the depth\n if not((2*dim_depth-1) == self.depth):\n raise ValueError('1Q rotations vector does not match the depth')\n # 1Q rotations vector does not match the qubit number\n if not(dim_qubits == n_qubits):\n raise ValueError(\n '1Q rotations vector does not match the qubit number')\n # 1Q rotations vector does not match the parameter number\n if not(dim_rot == 4):\n raise ValueError(\n '1Q rotations vector does not match the parameter number')", "def vector_arrows(Out, x, y, z, plot_layer):\n\n x = sort_dim(x)\n y = sort_dim(y)\n z = sort_dim(z)\n\n # length of array in each dimension\n Ny = len(y)-1\n Nx = len(x)-1\n Nz = len(z)-1\n\n # coordinates of cell centres\n # (halfway between L and R edges)\n xm = 0.5 * (x[:-1] + x[1:])\n ym = 0.5 * (y[:-1] + y[1:])\n zm = 0.5 * (z[:-1] + z[1:])\n\n # create empty arrays for output\n U = np.zeros((len(Out.Qx[:,0,0,0]),len(Out.Qx[0,:,0,0]),len(Out.Qx[0,0,:,0]),len(Out.Qx[0,0,0,:])+1)) \n V = np.zeros((len(Out.Qy[:,0,0,0]),len(Out.Qy[0,:,0,0]),len(Out.Qy[0,0,:,0])+1,len(Out.Qy[0,0,0,:])))\n W = np.zeros((len(Out.Qz[:,0,0,0]),len(Out.Qz[0,:,0,0])+1,len(Out.Qz[0,0,:,0]),len(Out.Qz[0,0,0,:])))\n\n # create mesh\n X, Y, = np.meshgrid(xm, ym) # coordinates of cell centers\n Z = np.meshgrid(zm)\n\n # iterate through timesteps\n for t in range(len(Out.Qy[:,0,0,0])): # number of timesteps\n\n #grab relevant timestep from Out array\n Qx = Out.Qx[t,:,:,:]\n Qy = Out.Qy[t,:,:,:]\n Qz = Out.Qz[t,:,:,:]\n\n # Calculate flows at cell centers by interpolating between L and R faces\n Ut = np.concatenate((Qx[plot_layer, :, 0].reshape((1, Ny, 1)), \\\n 0.5 * (Qx[plot_layer, :, :-1].reshape((1, Ny, Nx-2)) +\\\n Qx[plot_layer, :, 1: ].reshape((1, Ny, Nx-2))), \\\n Qx[plot_layer, :, -1].reshape((1, Ny, 1))), axis=2).reshape((Ny,Nx))\n\n Vt = np.concatenate((Qy[plot_layer, 0, :].reshape((1, 1, Nx)), \\\n 0.5 * (Qy[plot_layer, :-1, :].reshape((1, Ny-2, Nx)) +\\\n Qy[plot_layer, 1:, :].reshape((1, Ny-2, Nx))), \\\n Qy[plot_layer, -1, :].reshape((1, 1, Nx))), axis=1).reshape((Ny,Nx))\n\n # average flow across vertical cell to get z flow at cell centre\n QzTop = Qz[0:-1,:,:]\n QzBot = Qz[1:,:,:]\n Wt = (QzTop+QzBot)/2\n \n # add results to output arrays\n U[t,:,:,:] = Ut\n V[t,:,:,:] = Vt\n W[t,1:-1,:,:] = Wt\n\n return X,Y,Z,U,V,W", "def intermediateJacPol2Rot(self,x):\n allS = np.sin(x[0,:])\n allC = np.cos(x[0,:])\n allR = x[1,:]\n \n Jac = Idn(x.shape[1],self._dim)\n Jac[:,0,0] = -allS*allR\n Jac[:,0,1] = allC\n Jac[:,1,0] = allC*allR\n Jac[:,1,1] = allS\n return Jac", "def test_rot_decomposition(self, diff_method):\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def circuit(weights):\r\n qml.Rot(weights[0], weights[1], weights[2], wires=0)\r\n return qml.expval(qml.PauliX(0))\r\n\r\n circuit = qml.QNode(circuit, dev, diff_method=diff_method)\r\n params = np.array([1.0, 2.0, 3.0])\r\n tapes = qml.metric_tensor(circuit, only_construct=True)(params)\r\n assert len(tapes) == 3\r\n\r\n # first parameter subcircuit\r\n assert len(tapes[0].operations) == 0\r\n\r\n # Second parameter subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RZ)\r\n assert tapes[1].operations[0].data == [1]\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # Third parameter subcircuit\r\n assert len(tapes[2].operations) == 2\r\n assert isinstance(tapes[2].operations[0], qml.RZ)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert tapes[2].operations[0].data == [1]\r\n assert tapes[2].operations[1].data == [2]\r\n\r\n result = qml.metric_tensor(circuit)(params)\r\n assert result.shape == (3, 3)", "def intermediateJacRot2Polar(self,x):\n r = cNorm(x[:2,:],kd=False)\n x0overr = x[0,:]/r\n x1overr = x[1,:]/r\n\n Jac = Idn(x.shape[1],x.shape[0])\n Jac[:,0,0] = -x1overr\n Jac[:,0,1] = x0overr\n Jac[:,1,0] = x0overr\n Jac[:,1,1] = x1overr\n \n return Jac", "def Rotation_EQJ_ECL():\n # ob = mean obliquity of the J2000 ecliptic = 0.40909260059599012 radians.\n c = 0.9174821430670688 # cos(ob)\n s = 0.3977769691083922 # sin(ob)\n return RotationMatrix([\n [ 1, 0, 0],\n [ 0, +c, -s],\n [ 0, +s, +c]\n ])", "def get_Grotations(self, x):\n xsh = x.get_shape().as_list()\n angles = [0.,np.pi/2.,np.pi,3.*np.pi/2.]\n rx = []\n for i in range(4):\n # Z4 rotations about the z axis\n perm = [1,0,2,3]\n y = tf.transpose(x, perm=perm)\n y = tf.contrib.image.rotate(y, angles[i])\n y = tf.transpose(y, perm=perm)\n # Rotations in the quotient space (sphere S^2)\n # i) Z4 rotations about y axis\n for j in range(4):\n perm = [2,1,0,3]\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[-j])\n z = tf.transpose(z, perm=perm)\n \n rx.append(z)\n # ii) 2 rotations to the poles about the x axis\n perm = [0,2,1,3]\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[3])\n z = tf.transpose(z, perm=perm)\n rx.append(z)\n\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[1])\n z = tf.transpose(z, perm=perm)\n rx.append(z)\n\n return rx", "def z_rotation(self):\n before = ('R', 'r', 'U', 'u', 'L', 'l', 'D', 'd', 'M', 'E', 'x', 'y')\n after = ('U', 'u', 'L', 'l', 'D', 'd', 'R', 'r', 'E', 'M\\'', \"y\", \"x'\")\n solve = self.solve_helper.maketrans(dict(zip(before, after)))\n solve_trans = self.solve_helper.translate(solve)\n solve_trans = solve_trans.replace(\"\\'\\'\", \"\")\n self.solve_helper = solve_trans", "def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix", "def rotorconversion(x):\n return cf.MultiVector(layout, val_rotorconversion(x))", "def Rot_layer(self, w):\n for idx, element in enumerate(w):\n qml.Rot(element[0], element[1], element[2], wires=idx)", "def _tk1_to_rotations(a: float, b: float, c: float) -> Circuit:\n circ = Circuit(1)\n circ.Rz(c, 0).Rx(b, 0).Rz(a, 0)\n return circ", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def Rotation_ECL_EQJ():\n # ob = mean obliquity of the J2000 ecliptic = 0.40909260059599012 radians.\n c = 0.9174821430670688 # cos(ob)\n s = 0.3977769691083922 # sin(ob)\n return RotationMatrix([\n [ 1, 0, 0],\n [ 0, +c, +s],\n [ 0, -s, +c]\n ])", "def rotations4(polycube, axis):\r\n for i in range(4):\r\n yield rot90(polycube, i, axis)", "def input_system():\n vx = a.odometry_data[:, 2:3] # linear velocity_y [m/s]\n vy = a.odometry_data[:, 1:2] # linear velocity_x [m/s]\n v = np.add(vx, vy)\n v = np.true_divide(v, 2) # combined velocity [m/s]\n yawrate = np.reshape(a.odometry_data[:, 3], (-1, 1)) # angular_z [rad/s]\n u = np.reshape([v, yawrate], (-1, 2))\n return u", "def __init__(self, name, times, x, y, z):\n x = numpy.array(x)\n y = numpy.array(y)\n z = numpy.array(z)\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert len(z.shape) == 1\n roll, pitch, yaw = z, y, x # first (resp. last) coordinate in EulerAnglesZyx is yaw (resp. roll)\n cr, cp, cy = numpy.cos(roll / 2.), numpy.cos(pitch / 2.), numpy.cos(yaw / 2.)\n sr, sp, sy = numpy.sin(roll / 2.), numpy.sin(pitch / 2.), numpy.sin(yaw / 2.)\n q_w = cr * cp * cy + sr * sp * sy\n q_x = sr * cp * cy - cr * sp * sy\n q_y = cr * sp * cy + sr * cp * sy\n q_z = cr * cp * sy - sr * sp * cy\n self.roll = roll\n self.pitch = pitch\n self.yaw = yaw\n super(EulerAnglesZyxSignal, self).__init__(name, times, q_w, q_x, q_y, q_z)", "def rotate(self, theta, legs):\n U, onew = rotationTensor(theta, self.symmetries, legs)\n B = U @ self\n new = list(onew)\n old = list(legs)\n if B.internallegs != self.internallegs:\n old.append(self.internallegs[0])\n new.append(B.internallegs[0])\n B.swaplegs({n: o for o, n in zip(old, new)})\n return B.couplingAddapt(self.coupling)", "def make_oneq_cliffords():\n ixyz_list = [g().to_matrix() for g in (IGate, XGate, YGate, ZGate)]\n ih_list = [g().to_matrix() for g in (IGate, HGate)]\n irs_list = [\n IGate().to_matrix(),\n SdgGate().to_matrix() @ HGate().to_matrix(),\n HGate().to_matrix() @ SGate().to_matrix(),\n ]\n oneq_cliffords = [\n Operator(ixyz @ ih @ irs) for ixyz in ixyz_list for ih in ih_list for irs in irs_list\n ]\n return oneq_cliffords", "def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))", "def generate(self):\n inside = self.crystal.is_inside(self.x,self.y,self.z)\n X = np.vstack((self.x[inside],self.y[inside],self.z[inside]))\n return self.rot.rotate(X)", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def __init__(self, euler_1Q, lines_2Q, n_qubits):\n self.euler_1Q = euler_1Q\n self.lines_2Q = lines_2Q\n self.n_qubits = n_qubits\n dim_depth, dim_qubits, dim_euler = self.euler_1Q.shape\n self.depth = 1+2*len(lines_2Q)\n # euler angles vector does not match the depth\n if not((2*dim_depth-1) == self.depth):\n raise ValueError('euler angles vector does not match the depth')\n # euler angles vector does not match the qubit number\n if not(dim_qubits == n_qubits):\n raise ValueError(\n 'euler angles vector does not match the qubit number')\n # euler angles vector does not match the parameter number\n if not(dim_euler == 3):\n raise ValueError(\n 'euler angles vector does not match the parameter number')", "def ik3(xyz_array):\n # Eqn 1\n theta_1 = np.arctan2(xyz_array[1], xyz_array[0])\n # Eqn 2\n r1 = np.hypot(xyz_array[0], xyz_array[1])\n # Eqn 3\n r2 = xyz_array[2] - link_lengths[0]\n # Eqn 4\n phi2 = np.arctan2(r2, r1)\n # Eqn 5\n r3 = np.hypot(r1, r2)\n # Eqn 6\n num6 = np.power(link_lengths[2], 2) - \\\n np.power(link_lengths[1], 2) - np.power(r3, 2)\n den6 = -2 * link_lengths[1] * r3\n phi1 = np.arccos(num6 / den6)\n # Eqn 7\n # theta_2 = phi2 - phi1 # elbow down\n theta_2 = phi2 + phi1\n # Eqn 8\n num8 = np.power(r3, 2) - \\\n np.power(link_lengths[1], 2) - np.power(link_lengths[2], 2)\n den8 = -2 * link_lengths[1] * link_lengths[2]\n phi3 = np.arccos(num8 / den8)\n # Eqn 9\n # theta_3 = pi - phi3 # elbow down\n theta_3 = -(np.pi - phi3)\n # Output Joint Angles\n theta_1 = np.rad2deg(theta_1)\n theta_2 = np.rad2deg(theta_2)\n theta_3 = np.rad2deg(theta_3)\n joint_rotations = np.array([theta_1, theta_2, theta_3])\n return joint_rotations", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot", "def getRotationTrajectory(self) -> SO3Trajectory:\n return SO3Trajectory(self.times,[m[:9] for m in self.milestones])", "def __init__(self):\n self.rot_axis = 1", "def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R", "def vrrotvec2mat(ax_ang):\n\n #file_dir = os.path.dirname(os.path.realpath(__file__))\n #path_dir2 = file_dir + '/../geometry/'\n #sys.path.append(path_dir2)\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n c = np.cos(angle)\n s = np.sin(angle)\n tc = 1 - c\n\n mt11 = tc*x*x + c\n mt12 = tc*x*y - s*z\n mt13 = tc*x*z + s*y\n\n mt21 = tc*x*y + s*z\n mt22 = tc*y*y + c\n mt23 = tc*y*z - s*x\n\n mt31 = tc*x*z - s*y\n mt32 = tc*y*z + s*x\n mt33 = tc*z*z + c\n\n mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n\n inds1 = np.where(ax_ang[4, :] == -1)\n mtx[inds1, :] = -mtx[inds1, :]\n\n if msz == 1:\n mtx = mtx.reshape(3, 3)\n else:\n mtx = mtx.reshape(msz, 3, 3)\n\n return mtx", "def y_operators(self) -> List[PauliTerm]:\n # Y = iXZ\n y_operators = [1j * x_op * z_op\n for x_op, z_op in zip(self.x_operators(), self.z_operators())]\n for y_op in y_operators:\n assert y_op.coefficient == 1\n return y_operators", "def getOblateXRotMatrix(aStar1, aStar2):\n aStarDir = aStar2 - a1\n aStarmid = aStar1 + 0.5 * aStarDir\n kath = np.sqrt((aStarDir[0] * aStarDir[0] + aStarDir[1] * aStarDir[1]) / 4.0)\n phi = np.arctan( abs( (aStarDir[2]/2) / kath) )\n octantAStar2 = octant(aStar2)\n if octantAStar2 in [1, 2, 7, 8]: #\n phi = -phi\n print \"phi =\" , np.rad2deg(phi)\n RotX = np.matrix( [ [ 1.0, 0.0 , 0.0 ],\n [ 0.0, np.cos(phi), np.sin(phi)],\n [ 0.0, -np.sin(phi), np.cos(phi)]\n ])\n return np.asarray( RotX )", "def test_cylindrical(self):\n # Rotate around the z axis\n r = Joint.cylindrical(np.array([0, 0, 1]))\n t_mat = r(np.array([np.pi / 2, 1.0]))\n\n rot_vec = np.dot(t_mat[:3, :3], np.array([1, 0, 0]))\n\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))\n self.assertTrue(np.allclose(t_mat[2, 3], 1))", "def __repr__(self):\n # first check for identity quaternion to avoid nans\n if self.real != 1:\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n else:\n angle = 0.\n xyz = self.pure\n result = \"Transformation: tx ty tz rx ry rz angle\\n %g %g %g %g %g %g %g\" \\\n % (self.trans[0], self.trans[1], self.trans[2],\n xyz[0], xyz[1], xyz[2], angle)\n return result", "def _make_circuit_instructions(n_qubits, depth, type_circuit):\n\n if type_circuit in [0, 1, 2]:\n\n # if type_circuit == 1:\n # if depth > 8:\n # raise ValueError(\n # \"For type-1 circuits, only at most depth=8 allowed!\"\n # )\n\n # define rotations for circuit in each layer, 0: identity, 1:X, 2:Y 3:Z\n ini_pauli = np.zeros([depth, n_qubits], dtype=int)\n\n # set first and second layer, rest comes later\n ini_pauli[0, :] = 2 # y rotation\n if depth > 1:\n ini_pauli[1, :] = 3 # z rotation\n\n # construct natural parameterized circuit\n # gives which type of entangling gates at each layer -- first entry is\n # first qubit index, second is second qubit index, third entry is type\n # of entangling gate\n entangling_gate_index_list = [[] for i in range(depth)]\n orderList = []\n for i in range(n_qubits//2):\n if i % 2 == 0:\n orderList.append(i//2)\n else:\n orderList.append((n_qubits-i)//2)\n\n if n_qubits > 1:\n shiftList = [orderList[0]]\n else:\n shiftList = []\n for i in range(1, n_qubits//2):\n shiftList.append(orderList[i])\n shiftList += shiftList[:-1]\n\n # this list gives which entangling gates are applied in which layer\n if type_circuit == 0:\n # deep natural PQC, includes non-nearest neighbor gates\n for j in range(min(len(shiftList), int(np.ceil(depth/2))-1)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif type_circuit == 1:\n # only do 2 entangling layers at max, and only do gates with\n # nearest neighbor and no ring\n for j in range(min(len(shiftList), 3)):\n if j == 0:\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif (j == 1 or j == 2):\n # exclude ring gate and gate 0,1 on third entangling layer\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(1, n_qubits//2)\n ]\n\n elif type_circuit == 2:\n # only do 3 regular entangling layers in a ring topology, then two\n # more phase gates with next-nearst neighbor, which requires one\n # swap. This adds 4 more parameters\n for j in range(min(len(shiftList), 3)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n entangling_gate_index_list[1+2*3] = [\n [n_qubits-1, 1, 0],\n [0, n_qubits-2, 0]\n ]\n\n for i in range(len(entangling_gate_index_list)-1):\n if len(entangling_gate_index_list[i]) > 0:\n for j in range(len(entangling_gate_index_list[i])):\n qubit_index = entangling_gate_index_list[i][j][0]\n ini_pauli[i+1, qubit_index] = 2\n if i+2 < depth:\n ini_pauli[i+2, qubit_index] = 3\n\n elif type_circuit == 3:\n\n ini_pauli = np.ones([depth, n_qubits], dtype=int)*2\n\n for i in range(1, depth, 2):\n ini_pauli[i, :] = 3\n\n if n_qubits % 2 == 0:\n # even qubits ALT circuit needs to get rid of boundary rotations at\n # even entangling layers\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n ini_pauli[i+1, -1] = 0\n else:\n # for odd qubits, get rid of boundary either on top or bottom qubit\n for i in range(2, depth, 4):\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, -1] = 0\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n\n # CNOT entangling gates\n entangling_gate_index_list = [[] for i in range(depth)]\n counter = 0\n # third index indicates type of entangling gate\n for k in range(1, depth-1, 2):\n\n # place entangler every second layer, do not place any at last\n if counter % 2 == 0:\n # even layer\n entangling_gate_index_list[k] = [\n [2*j, 2*j+1, 1] for j in range(n_qubits//2)\n ]\n else:\n # odd layer\n entangling_gate_index_list[k] = [\n [2*j+1, 2*j+2, 1] for j in range((n_qubits-1)//2)\n ]\n counter += 1\n\n else:\n raise ValueError('type_circuit='+f'{type_circuit}'+' not recognised.')\n\n return ini_pauli, entangling_gate_index_list", "def ZZRotation(angle):\r\n return Diagonal(2,\r\n np.array([[np.exp(1j * angle), np.exp(-1j * angle)],\r\n [np.exp(-1j * angle), np.exp(1j * angle)]]),\r\n 'R_ZZ({})'.format(angle))", "def EulerZXZ2Mat(e):\n x=e[0]\n y=e[1]\n z=e[2]\n s1=np.sin(x)\n s2=np.sin(y)\n s3=np.sin(z)\n c1=np.cos(x)\n c2=np.cos(y)\n c3=np.cos(z)\n m=np.array([[c1*c3-c2*s1*s3,-c1*s3-c3*c2*s1,s1*s2],\n [s1*c3+c2*c1*s3,c1*c2*c3-s1*s3,-c1*s2],\n [s3*s2,s2*c3,c2]])\n return m", "def unit_vectors(self):\n # return {'comp1': CartesianRepresentation(...),\n # 'comp2': CartesianRepresentation(...),\n # 'comp3': CartesianRepresentation(...)}\n raise Exception(\"Not yet implemented\")", "def make_ARI_list(dx, dy, m_info, offset):\n \"\"\"\n 1 Get information from m_info.\n \"\"\"\n x_m = m_info[0]\n y_m = m_info[1]\n z_m = m_info[2]\n\n m_points = m_info[3]\n\n m_p0 = m_points[0]\n m_p1 = m_points[1]\n m_p2 = m_points[2]\n m_p3 = m_points[3]\n\n \"\"\"\n 2 Get points of ARI.\n \"\"\"\n x_k = y_m * 2 / 3 # NOTE: fixed number\n\n # KUMIKI_points_left reflect offset\n p5 = (dx, dy)\n p4 = (dx, dy + y_m / 3 - offset)\n p3 = (dx + x_k, dy + y_m / 4 - offset)\n p2 = (dx + x_k, dy + 3 * y_m / 4 + offset)\n p1 = (dx, dy + 2 * y_m / 3 + offset)\n p0 = (dx, dy + y_m)\n\n KUMIKI_points_left = [p0, p1, p2, p3, p4, p5]\n\n # KUMIKI_points_right not reflect offset\n p5 = (dx, dy)\n p4 = (dx, dy + y_m / 3)\n p3 = (dx + x_k, dy + y_m / 4)\n p2 = (dx + x_k, dy + 3 * y_m / 4)\n p1 = (dx, dy + 2 * y_m / 3)\n p0 = (dx, dy + y_m)\n\n KUMIKI_points_right = [p0, p1, p2, p3, p4, p5]\n\n \"\"\"\n 3 Get SEN information.\n \"\"\"\n SEN_info = get_m2_m3_SEN_info(dx, dy, m_info, x_k)\n\n # upper shape\n upper_shape_left, upper_shape_right =\\\n m2_m3_make_upper_shape_points_list(dx, dy, m_info, SEN_info)\n\n upper_shape_left_upper_row = upper_shape_left[0]\n upper_shape_left_lower_row = upper_shape_left[1]\n\n upper_shape_right_upper_row = upper_shape_right[0]\n upper_shape_right_lower_row = upper_shape_right[1]\n\n # lower shape\n lower_shape_left, lower_shape_right =\\\n m2_m3_make_lower_shape_points_list(dx, dy, m_info, SEN_info)\n\n lower_shape_left_upper_row = lower_shape_left[0]\n lower_shape_left_lower_row = lower_shape_left[1]\n\n lower_shape_right_upper_row = lower_shape_right[0]\n lower_shape_right_lower_row = lower_shape_right[1]\n\n # middle shape\n middle_shape_left, middle_shape_right =\\\n m2_m3_make_middle_shape_points_list(dx, dy, m_info, SEN_info)\n\n middle_shape_left_upper_row = middle_shape_left[0]\n middle_shape_left_lower_row = middle_shape_left[1]\n\n middle_shape_right_upper_row = middle_shape_right[0]\n middle_shape_right_lower_row = middle_shape_right[1]\n\n \"\"\"\n 4 Make ARI lists\n \"\"\"\n # Leftside\n # Upper\n left_upper = []\n left_upper.append(m_p1)\n left_upper.extend(upper_shape_left_upper_row)\n\n left_upper.extend(KUMIKI_points_left)\n left_upper.extend(upper_shape_left_lower_row)\n left_upper.append(m_p0)\n\n # left_upper_crv = rs.AddPolyline(left_upper)\n\n # Middle\n left_middle = []\n left_middle.append(m_p1)\n left_middle.extend(middle_shape_left_upper_row)\n\n left_middle.extend(KUMIKI_points_left)\n left_middle.extend(middle_shape_left_lower_row)\n left_middle.append(m_p0)\n\n # left_middle_crv = rs.AddPolyline(left_middle)\n\n # Lower\n left_lower = []\n left_lower.append(m_p1)\n left_lower.extend(lower_shape_left_upper_row)\n\n left_lower.extend(KUMIKI_points_left)\n left_lower.extend(lower_shape_left_lower_row)\n left_lower.append(m_p0)\n\n # left_lower_crv = rs.AddPolyline(left_lower)\n\n # left_crvs = [left_upper_crv, left_middle_crv, left_lower_crv]\n\n left_list = [left_upper, left_middle, left_lower]\n\n # Rightside\n # Upper\n right_upper = []\n right_upper.append(m_p2)\n right_upper.extend(upper_shape_right_upper_row)\n\n right_upper.extend(KUMIKI_points_right)\n right_upper.extend(upper_shape_right_lower_row)\n right_upper.append(m_p3)\n\n # right_upper_crv = rs.AddPolyline(right_upper)\n\n # Middle\n right_middle = []\n right_middle.append(m_p2)\n right_middle.extend(middle_shape_right_upper_row)\n\n right_middle.extend(KUMIKI_points_right)\n right_middle.extend(middle_shape_right_lower_row)\n right_middle.append(m_p3)\n\n # right_middle_crv = rs.AddPolyline(right_middle)\n\n # Lower\n right_lower = []\n right_lower.append(m_p2)\n right_lower.extend(lower_shape_right_upper_row)\n\n right_lower.extend(KUMIKI_points_right)\n right_lower.extend(lower_shape_right_lower_row)\n right_lower.append(m_p3)\n\n # right_lower_crv = rs.AddPolyline(right_lower)\n\n # right_crvs = [right_upper_crv, right_middle_crv, right_lower_crv]\n\n right_list = [right_upper, right_middle, right_lower]\n\n return left_list, right_list, SEN_info", "def euler_timestep_rotation(sphere_positions, sphere_rotations, new_sphere_positions, new_sphere_rotations, Oa_out, timestep):\r\n\r\n for i in range(sphere_positions.shape[0]):\r\n R0 = sphere_positions[i]\r\n O = (Oa_out[i][0] ** 2 + Oa_out[i][1] ** 2 + Oa_out[i][2] ** 2) ** 0.5\r\n\r\n ''' To rotate from basis (x,y,z) to (X,Y,Z), where x,y,z,X,Y,Z are unit vectors,\r\n you just need to multiply by the matrix\r\n ( X_x Y_x Z_x )\r\n ( X_y Y_y Z_y ),\r\n ( X_z Y_z Z_z )\r\n where X_x means the x-component of X.\r\n Our Z is Omega = o_spheres[i], so we need to make it into a complete basis.\r\n To do that we pick a unit vector different to Omega (either zhat or xhat depending on Omega)\r\n and use (Omega x zhat, Omega x (Omega x zhat), zhat) as our basis (X,Y,Z).\r\n That's it! [Only took me three days...]\r\n '''\r\n\r\n if np.array_equal(Oa_out[i], [0, 0, 0]):\r\n rot_matrix = np.identity(3)\r\n else:\r\n Otest = (abs(Oa_out[i] / O)).astype('float')\r\n perp1 = [0, 0, 1] if np.allclose(Otest, [1, 0, 0]) else [1, 0, 0]\r\n rot_matrix = np.array([np.cross(Oa_out[i], perp1) / O, np.cross(Oa_out[i], np.cross(Oa_out[i], perp1)) / O ** 2, Oa_out[i] / O]).transpose()\r\n\r\n for j in range(2):\r\n ''' rb0 is the position (\"r\") of the endpoint of the pointy rotation vector in the\r\n external (x,y,z) frame (\"b\") at the beginning of this process (\"0\") '''\r\n rb0 = sphere_rotations[i, j]\r\n\r\n ''' rbdashdash0_xyz is the position of the same endpoint in the frame of the rotating sphere (\"b''\"),\r\n\t\t\t\t\t\twhich we set to have the z-axis=Omega axis. It's in Cartesian coordinates. '''\r\n rbdashdash0_xyz = np.dot(linalg.inv(rot_matrix), (rb0 - R0))\r\n x0 = rbdashdash0_xyz[0]\r\n y0 = rbdashdash0_xyz[1]\r\n z0 = rbdashdash0_xyz[2]\r\n\r\n r0 = (x0 ** 2 + y0 ** 2 + z0 ** 2) ** 0.5\r\n t0 = np.arccos(z0 / r0)\r\n p0 = 0 if (x0 == 0 and y0 == 0) else np.arctan2(y0, x0)\r\n r = r0\r\n t = t0\r\n p = euler_timestep(p0, O, timestep)\r\n\r\n x = r * np.sin(t) * np.cos(p)\r\n y = r * np.sin(t) * np.sin(p)\r\n z = r * np.cos(t)\r\n rbdashdash_xyz = np.array([x, y, z])\r\n R = new_sphere_positions[i]\r\n rb = R + np.dot(rot_matrix, rbdashdash_xyz)\r\n new_sphere_rotations[i, j] = rb\r\n return new_sphere_rotations", "def diagonalizing_gates(self):\n return [qml.RY(-np.pi / 4, wires=self.wires)]", "def rotate(self, ra1, dec1, ra2, dec2, ra3, dec3):\n # Turns Right Ascension/Declination into Azimuth/Zenith for healpy\n phi1 = ra1 - np.pi\n zen1 = np.pi/2. - dec1\n phi2 = ra2 - np.pi\n zen2 = np.pi/2. - dec2\n phi3 = ra3 - np.pi\n zen3 = np.pi/2. - dec3\n\n # Rotate each ra1 and dec1 towards the pole?\n x = np.array([hp.rotator.rotateDirection(\n hp.rotator.get_rotation_matrix((dp, -dz, 0.))[0], z, p)\n for z, p, dz, dp in zip(zen1, phi1, zen2, phi2)])\n\n # Rotate **all** these vectors towards ra3, dec3 (source_path)\n zen, phi = hp.rotator.rotateDirection(np.dot(\n hp.rotator.get_rotation_matrix((-phi3, 0, 0))[0],\n hp.rotator.get_rotation_matrix((0, zen3, 0.))[0]), x[:, 0], x[:, 1])\n\n dec = np.pi/2. - zen\n ra = phi + np.pi\n return np.atleast_1d(ra), np.atleast_1d(dec)", "def orbit(self, representation='trivial'):\n if not self:\n return self\n\n answer = BarrattEccles_element(torsion=self.torsion)\n for k, v in self.items():\n inverse = tuple(k[0].index(i + 1) + 1 for i in range(len(k[0])))\n permutation = SymmetricRing_element({inverse: 1}, torsion=self.torsion)\n if representation == 'sign':\n permutation = k[0].sign * permutation\n answer += permutation * BarrattEccles_element({k: v}, torsion=self.torsion)\n\n return answer", "def z_operators(self) -> List[PauliTerm]:\n z_matrix = self.z_operator_matrix()\n zeros = np.zeros_like(z_matrix, dtype='int')\n return [\n pauli_term_for_row(zeros[i, :], z_matrix[i, :])\n for i in range(self.k)\n ]", "def _mclachlan_atela_4th(vel_update, pos_update=update.PositionUpdate()):\n As = [\n 0.5153528374311229364,\n -0.085782019412973646,\n 0.4415830236164665242,\n 0.1288461583643841854,\n ]\n Bs = [\n 0.1344961992774310892,\n -0.2248198030794208058,\n 0.7563200005156682911,\n 0.3340036032863214255,\n ]\n updates = [vel_update, pos_update]*4\n coeff = []\n for a, b in zip(As, Bs):\n coeff.extend([a, b])\n return ExplicitIntegrator(coeff, updates)", "def __init__(self, x_traj=[], y_traj=[], period=0, num_div_action=5,\n closed=True, differential_car=True, discrete_input=False):\n\n # Size of the space\n self.max_x = SPACE_X / 2 # [m]\n self.max_y = SPACE_Y / 2 # [m]\n self.state = []\n self.x_trajectory = x_traj\n self.y_trajectory = y_traj\n self.r= 0.0325 # [m] wheel´s radius\n self.rho = 0.133 # [m] distance between wheel\n self.time = period # frames per second\n\n # More steps for Ackerman model because circuit is longer\n if discrete_input:\n self.max_steps = 600\n else:\n self.max_steps = 600\n\n self.constant = -0.1\n self.x_ant = 0.0\n self.y_ant = 0.0\n # Sqr of the limit distance\n self.zone_0_limit = ZONE0_LIMIT\n self.zone_1_limit = ZONE1_LIMIT\n self.zone_2_limit = ZONE2_LIMIT\n if discrete_input:\n self.zone_2_limit = 0.08\n else:\n self.zone_2_limit = ZONE2_LIMIT\n\n self.num_div_action = num_div_action\n self.num_div_state = num_div_action\n\n # It is to inform if it´s an closed circuit without ending\n self.closed = closed\n\n # Distance between axis in Ackerman car\n self.l_ack = 0.245\n # Radius of wheels of the Ackerman car\n self.r_ack = 0.035\n # Maximum angle of the wheels of the Ackerman car\n self.alpha_ack = 25.34*np.pi/180\n\n # Choose car model\n self.differential_car = differential_car\n\n self.discrete_input = discrete_input\n\n # parameters to add noise to x, y, angle values\n # self.mu = 0\n # self.sigmaxy = 0.002\n # self.sigmaangle = 2*np.pi/180", "def _euler_90_algorithm(self):\n # define scale factor from min radius and output angle (which is ninety degrees), grab radius from input\n output_angle = np.pi / 2.\n effective_radius = self.radius\n # Euler curvature scaling factor, determined from calculating a 1. radius term and looking at output\n min_radius = effective_radius / 1.87009582269\n a_scale = 2. * min_radius * (output_angle / 2.0)**0.5\n # too many points causes issues on gdsii, splitting over different sizes is probably most suitable way\n if effective_radius < 30.:\n points = 50\n else:\n points = 80\n # Create t array for calculating parametric curve\n end_t = (output_angle / 2.0)**0.5\n all_t = np.linspace(0., end_t, points)\n # Create a list for x values and generate the x components of parametric curve using loop\n xs = list()\n for t in all_t:\n xs.append(a_scale * (t - (1 / 10.) * t**5 + (1 / 216.) * t**9 - (1 / 9360.) * t**13 + (1 / 685440.) * t**17))\n # Do the same for y values\n ys = list()\n for t in all_t:\n ys.append(a_scale * (t**3 * (1 / 3.) - (1 / 42.) * t**7 + (1 / 1320.) * t**11 - (1 / 75600.) * t**15))\n # Combine the xs and ys to perform the mirroring operation\n start_euler_xy = zip(xs, ys)\n # Calculating Mirror curve for X and Y, need x axis angle and end positions\n angle_x = np.pi / 2. + output_angle / 2.\n end_x = start_euler_xy[-1][0]\n end_y = start_euler_xy[-1][1]\n # initialising for loops, looping using checked equations from Mathematica for mirroring around line\n x_mirror = list()\n y_mirror = list()\n for elem in start_euler_xy:\n x_mirror.append(end_x + np.cos(2 * angle_x) * (elem[0] - end_x) + np.sin(2 * angle_x) * (elem[1] - end_y))\n\n for elem in start_euler_xy:\n y_mirror.append(end_y + np.sin(2 * angle_x) * (elem[0] - end_x) - np.cos(2 * angle_x) * (elem[1] - end_y))\n\n # takes output of mirrors, flips them and combines them\n mirror_xy = zip(x_mirror[::-1], y_mirror[::-1])\n\n # Combines initial and mirrored list to generate the euler curve\n euler_full = start_euler_xy + mirror_xy\n return euler_full", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def _decompose_rotation(self, cmd):\n \n axis = None\n angle = 0\n gate_name = str(cmd.gate)\n\n if \"Rz\" in gate_name:\n axis = 'z'\n elif \"Rx\" in gate_name:\n axis = 'x'\n elif \"Ry\" in gate_name:\n axis = 'y' \n\n angle = gate_name[gate_name.find(\"(\")+1:gate_name.find(\")\")]\n\n decomposition = subprocess.check_output(\"./gridsynth \" + angle, shell=True)[:-1]\n new_sequence = self._process_decomposition(str(decomposition),cmd.qubits[0])\n return new_sequence", "def XXRotation(angle):\r\n return SuperPosition([IGate * IGate, XGate * XGate],\r\n [np.cos(angle), np.sin(angle) * 1j],\r\n 'R_XX({})'.format(angle))", "def CCframe(q1, q2, q3, xy = 0):\n\n # Postitions of the joints\n # Komplexe Zahlen für Geometrie in der Ebene nutzen\n # (Rotationsmatrizen gingen genauso)\n\n J0= 0+0j + xy # offset\n J1= J0 + l1*exp(1j*q1)\n J2= J1 + l2*exp(1j*(q1+q2))\n J3= J2 + l3*exp(1j*(q1+q2+q3))\n pl.plot(r_[J0,].real, r_[J0,].imag, 'ks', ms = 8)\n pl.plot(r_[J0, J1].real, r_[J0, J1].imag, 'k-', lw=3)\n pl.plot(r_[J2, J1].real, r_[J2, J1].imag, 'ko-', lw=2)\n pl.plot(r_[J2, J3].real, r_[J2, J3].imag, 'ko-')\n pl.xticks= []\n pl.yticks= []\n pl.axis('equal')", "def rotate(self, euler_xyz_degree):\n degree = True\n compiler = self.root.find('.//compiler[@angle]')\n if compiler and compiler.get('angle'):\n if compiler.get('angle') == 'radian':\n degree = False\n\n x, y, z = np.array(euler_xyz_degree) * np.pi / 180\n R = T.euler.euler2quat(x, y, z, 'sxyz')\n\n if self.root.find('.//compiler[@eulerseq]'):\n raise NotImplementedError()\n\n for body in self.worldbody:\n if body.tag == 'light':\n continue\n quat = None\n if body.get('axisangle'):\n axisangle = string_to_array(body.get('axisangle'))\n length = np.linalg.norm(axisangle)\n quat = T.quaternions.axangle2quat(axisangle / length, length)\n body.set('axisangle', None)\n elif body.get('xyaxes'):\n raise NotImplementedError()\n elif body.get('zaxis'):\n raise NotImplementedError()\n elif body.get('euler'):\n i, j, k = string_to_array(body.get('euler'))\n if degree:\n i *= np.pi / 180\n j *= np.pi / 180\n k *= np.pi / 180\n quat = T.euler.euler2quat(i, j, k, 'sxyz')\n body.set('euler', None)\n else:\n quat = string_to_array(body.get('quat', '1 0 0 0'))\n body.set('quat', None)\n\n quat = T.quaternions.qmult(R, quat)\n body.set('quat', array_to_string(quat))", "def test_serialize_circuit_rotations(self):\n dev = QeQiskitDevice(wires=1, shots=1000, backend=\"qasm_simulator\", analytic=False)\n\n def circuit():\n qml.Hadamard(wires=[0])\n return qml.expval(qml.Hadamard(0))\n\n qnode = qml.QNode(circuit, dev)\n qnode._construct([], {})\n\n qasm = dev.serialize_circuit(qnode.circuit)\n expected = 'OPENQASM 2.0;\\ninclude \"qelib1.inc\";\\nqreg q[1];\\ncreg c[1];\\nh q[0];\\nry(-0.7853981633974483) q[0];\\n'\n assert qasm == expected", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def plot(self):\n #prepare the marker list\n marker = itertools.cycle((',', '+', '.', 'o', '*',\n '^', 'v', '<', '>', '8',\n 's', 'p', 'h', 'H', 'D',\n 'd'))\n # first categorised with plane\n for each_plane in self.plane_list:\n if self.is_literal:\n label = \"[\" + \"{0} {1} {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"]\"\n else:\n label = \"{\"+\"{0}, {1}, {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"}\"\n x_list = []\n y_list = []\n if self.is_literal:\n tmp = [each_plane]\n opposite_plane = [-item for item in each_plane]\n tmp.append(opposite_plane)\n else:\n tmp = PoleFigure.get_permutations(each_plane)\n # second categorised with grain ID\n my_marker = \".\" # default marker\n for i in range(len(self.__data)):\n each_euler = self.__data[i]\n if self.unique_marker:\n my_marker = marker.next()\n plt.rcParams['text.usetex'] = False # otherwise, '^' will cause trouble\n euler = EulerAngle(each_euler[0], each_euler[1], each_euler[2])\n rot_m = np.dot(self.__ref, euler.rotation_matrix)\n self.__data[i] = RotationMatrix(rot_m).euler_angle\n for each_pole in tmp:\n tmp_pole = np.array(each_pole) / self.lattice_vector\n tmp_pole /= np.linalg.norm(tmp_pole)\n coord = np.dot(rot_m, tmp_pole)\n if coord[2] < 0:\n continue # not pointing up, moving on\n else:\n x = coord[0] / (1.0 + float(coord[2]))\n y = coord[1] / (1.0 + float(coord[2]))\n # need to rotate 90 degree\n x_list.append(y)\n y_list.append(-x)\n # start plotting\n if self.__clr_list is not None:\n clr = self.__clr_list.next()\n else:\n clr = np.random.rand(3, 1)\n plt.scatter(x_list, y_list, marker=my_marker, c=clr, label=label, edgecolor='none')\n # label x/y axis\n plt.text(1.1, 0.0, \"y\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n plt.text(0.0, -1.1, \"x\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n # set legend\n plt.legend(loc='upper left', numpoints=1, ncol=6, fontsize=8, bbox_to_anchor=(0, 0))\n plt.title(self.title)\n plt.savefig(self.title + \".\" + self.output)\n plt.close()", "def reaction_forces(Ca, la, x1, x2, x3, xa, h, d1, d3, theta, P, q, E, I):\r\n \r\n equation_matrix = np.array([[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \r\n [1, 0, 0, 1, 0, 1, 0, np.sin(theta), 0, 0, 0, 0, (P*np.sin(theta)+q*la*np.cos(theta))], \r\n [0, 1, 0, 0, 1, 0, 1, np.cos(theta), 0, 0, 0, 0, (P*np.cos(theta)-q*la*np.sin(theta))],\r\n \r\n [-(Ca/4-h/2), 0, 0, -(Ca/4-h/2) ,0 , -(Ca/4-h/2), 0, (np.cos(theta)*h/2-np.sin(theta)*Ca/4), 0, 0, 0, 0, (P*np.cos(theta)*h/2*-P*np.sin(theta)*Ca/4)], \r\n [0, (x2-x1), 0, 0, 0, 0, -(x3-x2), (np.cos(theta)*xa/2), 0, 0, 0, 0, (-P*np.cos(theta)*xa/2+q*la*np.sin(theta)*(la/2-x2))], \r\n [-(x2-x1), 0, 0, 0, 0, (x3-x2), 0, -np.sin(theta)*xa/2, 0, 0, 0, 0, (P*np.sin(theta)*xa/2+q*la*np.cos(theta)*(la/2-x2))], \r\n \r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, x1, 1, -q*np.sin(theta)*((x1**4)/24)], \r\n [0, ((x2-x1)**3)/6, 0, 0, 0, 0, 0, ((np.cos(theta))*((xa/2)**3)/6), 0, 0, x2, 1, (-q*np.sin(theta)*((x2**4)/24))], \r\n [0, ((x3-x1)**3)/6, 0, 0, ((x3-x2)**3)/6, 0, 0, ((np.cos(theta))*((x3-x2+xa/2)**3)/6), 0, 0, x3, 1, (-q*np.sin(theta)*((x3**4)/24)+P*(np.cos(theta))*(x3-x2-xa/2)**3/6)], \r\n [0, 0, 0, 0, 0, 0, 0, 0, x1, 1, 0, 0, (-E*I*d1*+q*np.cos(theta)*(x1**4)/24)], \r\n [(((x2-x1)**3)/6), 0, 0, 0, 0, 0, 0, ((-np.sin(theta))*((xa/2)**3)/6), x2, 1, 0, 0, (q*np.cos(theta)*(x2**4)/24)], \r\n [(((x3-x1)**3)/6),0,0,(((x3-x2)**3)/6),0,0,0,((-np.sin(theta))*((x3-x2+xa/2)**3)/6),x3,1,0,0,(-E*I*d3*+q*np.cos(theta)*((x3**4)/24)+P/6*np.sin(theta)*(x3-x2-xa/2)**3)]])\r\n \r\n \r\n unknown_matrix = equation_matrix[:,:-1]\r\n constant_matrix = equation_matrix[:,-1]\r\n \r\n \r\n solution_matrix = np.linalg.solve(unknown_matrix,constant_matrix)\r\n \r\n solution_matrix = solution_matrix/1000\r\n \r\n (R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4) = tuple(solution_matrix)\r\n \r\n print((R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4))", "def x_rotation(self):\n before = ('U', 'u', 'F', 'f', 'D', 'd', 'B', 'b', 'S', 'E', 'y', 'z')\n after = ('F', 'f', 'D', 'd', 'B', 'b', 'U', 'u', 'E', 'S\\'', \"z\", \"y'\")\n solve = self.solve_helper.maketrans(dict(zip(before, after)))\n solve_trans = self.solve_helper.translate(solve)\n solve_trans = solve_trans.replace(\"\\'\\'\", \"\")\n self.solve_helper = solve_trans", "def calc_rotate(self, joints: list, axis: str, offset: float, tcp_config: dict = None) -> List[float]:\n self.__logger.debug('Eva.calc_rotate called')\n return self.__http_client.calc_rotate(joints, axis, offset, tcp_config=tcp_config)", "def set_rotation_matrices(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0], \r\n self.vertices[i].meta['axis'][1], \r\n self.vertices[i].meta['axis'][2], \r\n self.vertices[i].meta['axis_order'],\r\n degrees=True)\r\n # Todo: invert this by applying angle operations in reverse order\r\n self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])", "def convert_to_circuit(x):\n y = np.arcsin(x)\n z = np.arccos(x**2)\n qubits = cirq.GridQubit.rect(5, 1)\n circuit = cirq.Circuit()\n for i in range(5):\n circuit.append(cirq.ry(y).on(qubits[i]))\n circuit.append(cirq.rz(z).on(qubits[i]))\n return circuit", "def rotate_c(X,a_set,vector):\r\n\taxis_vector = math.radians(-X) * np.array([0,0,1])\r\n\tr = R.from_rotvec(axis_vector)\r\n\treturn list(r.apply(vector))", "def matrixRepresentation(self,decimals=8):\n temp = self.circuit.copy()\n temp.remove_final_measurements()\n \n simulator = Aer.get_backend('unitary_simulator')\n result = execute(temp, backend=simulator).result()\n unitary = result.get_unitary(decimals=decimals).tolist()\n for i in range(len(unitary)):\n for j in range(len(unitary[i])):\n if unitary[i][j]==0:\n unitary[i][j]=\"0\"\n else:\n string=str(unitary[i][j].real).replace(\".0\", \"\")\n string=\"\" if unitary[i][j].real==0 else string\n string+=self.numberFormat(unitary[i][j].imag,True)\n unitary[i][j]=string.lstrip(\"+\")\n return unitary", "def rotator(angle):\n\n c = np.cos(2*angle)\n s = np.sin(2*angle)\n return np.array([[1,0,0,0],[0,c,-s,0],[0,s,c,0],[0,0,0,1]])", "def quick_rot(line):\n\treturn zip(*reversed(create_matrix(line)))", "def rotate_cube_right_list(liste):\n\n fronttemp = liste[1]\n \n righttemp = liste[2]\n \n backtemp = liste[3]\n \n lefttemp = liste[4]\n \n uptemp0 = liste[0][0]\n uptemp1 = liste[0][1]\n uptemp2 = liste[0][2]\n uptemp3 = liste[0][3]\n uptemp4 = liste[0][4]\n uptemp5 = liste[0][5]\n uptemp6 = liste[0][6]\n uptemp7 = liste[0][7]\n uptemp8 = liste[0][8]\n \n downtemp0 = liste[5][0]\n downtemp1 = liste[5][1]\n downtemp2 = liste[5][2]\n downtemp3 = liste[5][3]\n downtemp4 = liste[5][4]\n downtemp5 = liste[5][5]\n downtemp6 = liste[5][6]\n downtemp7 = liste[5][7]\n downtemp8 = liste[5][8]\n \n liste[2] = fronttemp\n \n liste[3] = righttemp\n \n liste[4] = backtemp\n \n liste[1] = lefttemp\n \n liste[0][0] = uptemp2\n liste[0][1] = uptemp5\n liste[0][2] = uptemp8\n liste[0][3] = uptemp1\n liste[0][4] = uptemp4\n liste[0][5] = uptemp7\n liste[0][6] = uptemp0\n liste[0][7] = uptemp3\n liste[0][8] = uptemp6\n \n liste[5][0] = downtemp6\n liste[5][1] = downtemp3\n liste[5][2] = downtemp0\n liste[5][3] = downtemp7\n liste[5][4] = downtemp4\n liste[5][5] = downtemp1\n liste[5][6] = downtemp8\n liste[5][7] = downtemp5\n liste[5][8] = downtemp2\n \n return liste", "def planar_jacobian(robot: RobotPlanar, q: list, ee: str):\n # assume all joints are revolute\n n = robot.n\n\n Jp = np.zeros((3, n))\n Jo = np.zeros((3, n))\n\n if type(robot) == Revolute3dChain:\n path_names = [f\"p{i}\" for i in range(0, robot.n + 1)]\n else:\n path_names = robot.kinematic_map[\"p0\"][ee]\n\n if type(robot) == RobotPlanar:\n edges = list(robot.tree_graph().edges) # for Revolute2dtree\n elif type(robot) == RobotPlanar:\n edges = list(robot.chain_graph().edges) # for Revolute2dchain\n elif type(robot) == Revolute3dChain or type(robot) == Revolute3dTree:\n edges = [\n (node, path_names[p_ind + 1]) for p_ind, node in enumerate(path_names[0:-1])\n ]\n # elif type(robot) == Revolute3dTree:\n # edges = [\n #\n # ]\n\n # Ts = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q))\n Ts = robot.get_all_poses(list_to_variable_dict(q))\n # Ts[\"p0\"] = np.eye(4)\n\n T_0_ee = Ts[ee]\n pe = T_0_ee.as_matrix()[0:3, -1]\n\n for i_path, joint in enumerate(path_names[:-1]):\n T_0_i = Ts[joint].as_matrix()\n z_hat_i = T_0_i[0:3, 2]\n p_i = T_0_i[0:3, -1]\n edge = (joint, path_names[i_path + 1])\n j_idx = edges.index(edge) # get joint column number\n Jp[:, j_idx] = np.cross(z_hat_i, pe - p_i)\n\n # Euler error jacobian as in eqn 3.88\n Jo[:, j_idx] = z_hat_i\n\n J = np.vstack([Jp, Jo])\n return J", "def circuits(self) -> List[QuantumCircuit]:\n circ0 = QuantumCircuit(1, 1)\n circ0.measure(0, 0)\n\n circ1 = QuantumCircuit(1, 1)\n circ1.x(0)\n circ1.measure(0, 0)\n\n for i, circ in enumerate([circ0, circ1]):\n circ.metadata = {\n \"experiment_type\": self._type,\n \"qubit\": self.physical_qubits[0],\n \"xval\": i,\n }\n\n return [circ0, circ1]", "def _represent_ZGate(self, basis, **options):\n _format = options.get('format', 'sympy')\n n = 1\n definite_state = 0\n for it in reversed(self.qubit_values):\n definite_state += n*it\n n = n*2\n result = [0]*(2**self.dimension)\n result[int(definite_state)] = 1\n if _format == 'sympy':\n return Matrix(result)\n elif _format == 'numpy':\n import numpy as np\n return np.array(result, dtype='complex').transpose()\n elif _format == 'scipy.sparse':\n from scipy import sparse\n return sparse.csr_matrix(result, dtype='complex').transpose()", "def euler_angle_to_rotation(ea, convention='zyx'):\n axis_names_to_vectors = dict([('x', (1, 0, 0)), ('y', (0, 1, 0)), ('z', (0, 0, 1))])\n axis0, axis1, axis2 = convention\n R0 = so3.rotation(axis_names_to_vectors[axis0], ea[0])\n R1 = so3.rotation(axis_names_to_vectors[axis1], ea[1])\n R2 = so3.rotation(axis_names_to_vectors[axis2], ea[2])\n return so3.mul(R0, so3.mul(R1, R2))", "def rotateZ(self, *args, **kwargs):\n ...", "def get_anglesXY( self ):\n [accel_xout_scaled, accel_yout_scaled, _] = self.get_accelXYZ()\n rot_x = get_x_angle( accel_xout_scaled, accel_yout_scaled, accel_zout_scaled )\n rot_y = get_y_angle( accel_xout_scaled, accel_yout_scaled, accel_zout_scaled )\n return [rot_x, rot_y]", "def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles", "def qrot(I,sym):\n T = s.Symbol(\"T\")\n if type(I) == list:\n return (((s.pi * I[0] * I[1] * I[2])**(1/2))/sym) * ((8 * s.pi**2 * k * T) / (h**2))**(3/2)\n else:\n return (((s.pi * I)**(1/2))/sym) * ((8 * s.pi**2 * k * T) / (h**2))**(3/2)", "def Rotation_GAL_EQJ():\n # This rotation matrix was calculated by the following script\n # in this same source code repository:\n # demo/python/galeqj_matrix.py\n return RotationMatrix([\n [-0.0548624779711344, -0.8734572784246782, -0.4838000529948520],\n [+0.4941095946388765, -0.4447938112296831, +0.7470034631630423],\n [-0.8676668813529025, -0.1980677870294097, +0.4559861124470794]\n ])", "def intermediateJac(self, x, isRot=False):\n if not isRot:\n x = dot(self._rotation,x)\n\n #Due to how it is constructed,\n #J_rot2polar is its own inverse (and symmetric)\n \n Jac = self.intermediateJacRot2Polar(x)\n \n #Jac = S.J.R\n \n s=self._scaled.reshape((1,self._dim,1))\n R = self._rotation\n \n #Compute J.R\n Jac = np.einsum(\"ijk,kl->ijl\",Jac,R)\n #Left multiply with S\n #S.(J.R)\n Jac *= s #A left multiplication with a diagonal matrix is like scaling the rows\n \n return Jac", "def rotation_elements(self, eta, phi, theta):\n \n # Three-axis rotation:\n # 1. Rotate about +z by eta (follows RHR; rotation is mathematical and thus counter-clockwise)\n # 2. Tilt by phi with respect to +z (rotation about y-axis) then\n # 3. rotate by theta in-place (rotation about z-axis) ### BUG: This isn't a conceptual rotation about z (influenced by other rotations)\n \n\n eta = radians( eta ) # eta is orientation around the z axis (before reorientation)\n phi = radians( phi ) # phi is grain tilt (with respect to +z axis)\n theta = radians( theta ) # grain orientation (around the z axis)\n \n rotation_elements = [[ cos(eta)*cos(phi)*cos(theta)-sin(eta)*sin(theta) ,\n -cos(eta)*cos(phi)*sin(theta)-sin(eta)*cos(theta) ,\n -cos(eta)*sin(phi) ],\n [ sin(eta)*cos(phi)*cos(theta)+cos(eta)*sin(theta) ,\n -sin(eta)*cos(phi)*sin(theta)+cos(eta)*cos(theta) ,\n sin(eta)*sin(phi) ],\n [ -sin(phi)*cos(theta) ,\n sin(phi)*sin(theta) ,\n cos(phi) ]]\n \n return rotation_elements", "def Rotation_EQJ_GAL():\n # This rotation matrix was calculated by the following script\n # in this same source code repository:\n # demo/python/galeqj_matrix.py\n return RotationMatrix([\n [-0.0548624779711344, +0.4941095946388765, -0.8676668813529025],\n [-0.8734572784246782, -0.4447938112296831, -0.1980677870294097],\n [-0.4838000529948520, +0.7470034631630423, +0.4559861124470794]\n ])", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def _r_z(angle: tf.Tensor) -> tf.Tensor:\n zero = tf.constant(0, dtype=tf.float64)\n exponent = tf.complex(zero, angle)\n exp = tf.exp(exponent)\n zero_complex = tf.complex(zero, zero)\n one_complex = tf.complex(tf.constant(1, dtype=tf.float64), zero)\n rz = tf.stack([[one_complex, zero_complex], [zero_complex, exp]])\n\n return rz", "def get_invntt_operator(self):\n\n\n Operator = []\n invntt_qubic = self.qubic.get_invntt_operator()\n R_qubic = ReshapeOperator(invntt_qubic.shapeout, invntt_qubic.shape[0])\n Operator.append(R_qubic(invntt_qubic(R_qubic.T)))\n\n invntt_planck = self.planck.get_invntt_operator()\n R_planck = ReshapeOperator(invntt_planck.shapeout, invntt_planck.shape[0])\n Operator.append(R_planck(invntt_planck(R_planck.T)))\n\n return BlockDiagonalOperator(Operator, axisout=0)", "def y_rotation(self):\n before = ('R','r', 'B', 'b', 'L', 'l','F','f','M' , 'z' ,'S', 'x')\n after = ('B','b', 'L', 'l', 'F', 'f','R','r','S' , 'x' ,\"M'\", \"z'\")\n solve = self.solve_helper.maketrans(dict(zip(before, after)))\n solve_trans = self.solve_helper.translate(solve)\n solve_trans = solve_trans.replace(\"\\'\\'\",\"\")\n self.solve_helper = solve_trans", "def orient(self, parent, rot_type, amounts, rot_order=''):\n\n self._check_frame(parent)\n amounts = list(amounts)\n for i, v in enumerate(amounts):\n if not isinstance(v, Vector):\n amounts[i] = sympify(v)\n def _rot(axis, angle):\n \"\"\"DCM for simple axis 1,2,or 3 rotations. \"\"\"\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])\n\n approved_orders = ('123', '231', '312', '132', '213', '321', '121',\n '131', '212', '232', '313', '323', '')\n rot_order = str(rot_order).upper() # Now we need to make sure XYZ = 123\n rot_type = rot_type.upper()\n rot_order = [i.replace('X', '1') for i in rot_order]\n rot_order = [i.replace('Y', '2') for i in rot_order]\n rot_order = [i.replace('Z', '3') for i in rot_order]\n rot_order = ''.join(rot_order)\n if not rot_order in approved_orders:\n raise TypeError('The supplied order is not an approved type')\n parent_orient = []\n\n if rot_type == 'AXIS':\n if not rot_order == '':\n raise TypeError('Axis orientation takes no rotation order')\n if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 2)):\n raise TypeError('Amounts are a list or tuple of length 2')\n theta = amounts[0]\n axis = amounts[1]\n self._check_vector(axis)\n if not axis.dt(parent) == 0:\n raise ValueError('Axis cannot be time-varying')\n axis = axis.express(parent).normalize()\n axis = axis.args[0][0]\n parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +\n Matrix([[0, -axis[2], axis[1]],[axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]]) * sin(theta) + axis * axis.T)\n elif rot_type == 'QUATERNION':\n if not rot_order == '':\n raise TypeError('Quaternion orientation takes no rotation order')\n if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 4)):\n raise TypeError('Amounts are a list or tuple of length 4')\n q0, q1, q2, q3 = amounts\n parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 **\n 2, 2 * (q1 * q2 - q0 * q3), 2 * (q0 * q2 + q1 * q3)],\n [2 * (q1 * q2 + q0 * q3), q0 ** 2 - q1 ** 2 + q2 **2 - q3 ** 2,\n 2 * (q2 * q3 - q0 * q1)], [2 * (q1 * q3 - q0 * q2), 2 * (q0 *\n q1 + q2 * q3), q0 ** 2 - q1 ** 2 - q2 ** 2 + q3 ** 2]]))\n elif rot_type == 'BODY':\n if not (len(amounts) == 3 & len(rot_order) == 3):\n raise TypeError('Body orientation takes 3 values & 3 orders')\n a1 = int(rot_order[0])\n a2 = int(rot_order[1])\n a3 = int(rot_order[2])\n parent_orient = (_rot(a1, amounts[0]) * _rot(a2, amounts[1])\n * _rot(a3, amounts[2]))\n elif rot_type == 'SPACE':\n if not (len(amounts) == 3 & len(rot_order) == 3):\n raise TypeError('Space orientation takes 3 values & 3 orders')\n a1 = int(rot_order[0])\n a2 = int(rot_order[1])\n a3 = int(rot_order[2])\n parent_orient = (_rot(a3, amounts[2]) * _rot(a2, amounts[1])\n * _rot(a1, amounts[0]))\n else:\n raise NotImplementedError('That is not an implemented rotation')\n self._dcm_dict.update({parent: parent_orient})\n parent._dcm_dict.update({self: parent_orient.T})\n if rot_type == 'QUATERNION':\n t = dynamicsymbols._t\n q0, q1, q2, q3 = amounts\n q0d = diff(q0, t)\n q1d = diff(q1, t)\n q2d = diff(q2, t)\n q3d = diff(q3, t)\n w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1)\n w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2)\n w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3)\n wvec = Vector([(Matrix([w1, w2, w3]), self)])\n elif rot_type == 'AXIS':\n thetad = (amounts[0]).diff(dynamicsymbols._t)\n wvec = thetad * amounts[1].express(parent).normalize()\n else:\n try:\n from sympy.polys.polyerrors import CoercionFailed\n from sympy.physics.mechanics.functions import kinematic_equations\n q1, q2, q3 = amounts\n u1, u2, u3 = dynamicsymbols('u1, u2, u3')\n templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],\n rot_type, rot_order)\n templist = [expand(i) for i in templist]\n td = solve(templist, [u1, u2, u3])\n u1 = expand(td[u1])\n u2 = expand(td[u2])\n u3 = expand(td[u3])\n wvec = u1 * self.x + u2 * self.y + u3 * self.z\n except (CoercionFailed, AssertionError):\n wvec = self._w_diff_dcm(parent)\n self._ang_vel_dict.update({parent: wvec})\n parent._ang_vel_dict.update({self: -wvec})", "def test_rotation(self):\n quat_rotated = rowan.rotate(input1, vector_inputs)\n\n matrices = rowan.to_matrix(input1)\n matrix_rotated = np.einsum(\"ijk,ki->ij\", matrices, vector_inputs.T)\n self.assertTrue(np.allclose(matrix_rotated, quat_rotated))", "def rotate_orbit(self):\n try:\n ang = self.orbit_speed * self.time_scale / self.refresh_rate\n self.obj.rotate(angle=ang, axis=vector(0, 1, 0), origin=self.star.obj.pos)\n self.sum_ang += ang\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "def zx_rotation(vector,theta):\r\n R = np.array([[np.cos(theta),0,np.sin(theta)],\r\n [0,1,0],\r\n [-np.sin(theta),0,np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)", "def to_pycuber(self) -> pycuber.Cube:\n self.soft_align_faces()\n qpos_copy = self.sim.data.qpos.copy()\n\n cubies = []\n\n for i in range(27):\n cubelet_meta = self.cubelet_meta_info[i]\n\n if cubelet_meta[\"type\"] == \"cubelet\":\n mtx = self._cubelet_rotation_matrix(cubelet_meta, qpos_copy)\n\n original_coords = cubelet_meta[\"coords\"]\n # current_coords = (mtx @ cubelet_meta['coords'].astype(float)).round().astype(int)\n\n cubie_desc = {}\n\n for prev_axis, sign in enumerate(original_coords):\n if sign != 0:\n vec = mtx[:, prev_axis] * sign\n new_axis = np.abs(vec).argmax()\n new_sign = vec[new_axis]\n\n color = PYCUBER_REVERSE_COLORS[prev_axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[new_axis, new_sign]\n\n cubie_desc[loc] = pycuber.Square(color)\n\n if len(cubie_desc) == 3:\n cubies.append(pycuber.Corner(**cubie_desc))\n elif len(cubie_desc) == 2:\n cubies.append(pycuber.Edge(**cubie_desc))\n if cubelet_meta[\"type\"] == \"driver\":\n original_coords = cubelet_meta[\"coords\"]\n axis = np.abs(original_coords).argmax()\n sign = original_coords[axis]\n\n color = PYCUBER_REVERSE_COLORS[axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[axis, sign]\n\n cubie_desc = {loc: pycuber.Square(color)}\n cubies.append(pycuber.Centre(**cubie_desc))\n\n return pycuber.Cube(cubies=cubies)", "def reverseXZrotation(self):\n rot = np.zeros((3, 3), dtype = 'float64')\n rot[0, 0] = np.cos(self.currtheta)\n rot[0, 1] = -np.sin(self.currtheta)\n rot[0, 2] = 0.0\n rot[1, 0] = np.sin(self.currtheta) * np.cos(self.currphi)\n rot[1, 1] = np.cos(self.currtheta) * np.cos(self.currphi)\n rot[1, 2] = - np.sin(self.currphi)\n rot[2, 0] = np.sin(self.currtheta) * np.sin(self.currphi)\n rot[2, 1] = np.cos(self.currtheta) * np.sin(self.currphi)\n rot[2, 2] = np.cos(self.currphi)\n\n self.ds = np.dot(rot, self.ds1)", "def _get_repr_matrix_impl( # pylint: disable=too-many-locals\n *, orbitals, real_space_operator, rotation_matrix_cartesian,\n spin_rot_function, numeric,\n position_tolerance\n):\n\n orbitals = list(orbitals)\n\n positions_mapping = _get_positions_mapping(\n orbitals=orbitals,\n real_space_operator=real_space_operator,\n position_tolerance=position_tolerance\n )\n repr_matrix = sp.zeros(len(orbitals))\n if not numeric:\n rotation_matrix_cartesian = sp.Matrix(rotation_matrix_cartesian)\n\n expr_substitution = _get_substitution(rotation_matrix_cartesian)\n for i, orb in enumerate(orbitals):\n res_pos_idx = positions_mapping[i]\n spin_res = spin_rot_function(\n rotation_matrix_cartesian=rotation_matrix_cartesian,\n spin=orb.spin,\n numeric=numeric\n )\n\n new_func = orb.function.subs(expr_substitution, simultaneous=True)\n for new_spin, spin_value in spin_res.items():\n res_pos_idx_reduced = [\n idx for idx in res_pos_idx if orbitals[idx].spin == new_spin\n ]\n func_basis_reduced = [\n orbitals[idx].function for idx in res_pos_idx_reduced\n ]\n func_vec = _expr_to_vector(\n new_func, basis=func_basis_reduced, numeric=numeric\n )\n func_vec_norm = la.norm(np.array(func_vec).astype(complex))\n if not np.isclose(func_vec_norm, 1):\n raise ValueError(\n 'Norm {} of vector {} for expression {} created from orbital {} is not one.\\nCartesian rotation matrix: {}'\n .format(\n func_vec_norm, func_vec, new_func, orb,\n rotation_matrix_cartesian\n )\n )\n for idx, func_value in zip(res_pos_idx_reduced, func_vec):\n repr_matrix[idx, i] += func_value * spin_value\n # check that the matrix is unitary\n repr_matrix_numeric = np.array(repr_matrix).astype(complex)\n if not np.allclose(\n repr_matrix_numeric @ repr_matrix_numeric.conj().T,\n np.eye(*repr_matrix_numeric.shape) # pylint: disable=not-an-iterable\n ):\n max_mismatch = np.max(\n np.abs(\n repr_matrix_numeric @ repr_matrix_numeric.conj().T -\n np.eye(*repr_matrix_numeric.shape) # pylint: disable=not-an-iterable\n )\n )\n raise ValueError(\n 'Representation matrix is not unitary. Maximum mismatch to unity: {}'\n .format(max_mismatch)\n )\n if numeric:\n return repr_matrix_numeric\n else:\n repr_matrix.simplify()\n return repr_matrix", "def euler2quat(angles, rot_seq='zyx'):\n cangle = np.cos(0.5*angles)\n sangle = np.sin(0.5*angles)\n rot_seq = rot_seq.lower()\n if rot_seq == 'zyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'zyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'zxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'zxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'yxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'yzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'xyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'xzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n else:\n return False", "def inverseIntermediateJac(self,x):\n \n Ri = self._rotation.T\n si = (1./self._scaled).reshape((1,1,self._dim))\n \n Jac = self.intermediateJacPol2Rot(x)\n \n #Ri.J\n Jac = np.einsum(\"jk,ikl->ijl\",Ri,Jac)\n #(Ri.J).diag(si)\n Jac *= si\n \n return Jac", "def to_z_basis_ops(self) -> Iterator[raw_types.Operation]:\n for qubit, pauli in self.items():\n yield clifford_gate.SingleQubitCliffordGate.from_single_map(\n {pauli: (pauli_gates.Z, False)})(qubit)", "def vertexes(self):\n theta = self.orientation\n shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a\n return self.coords + (shifts[:, None] * [-1, 1]).T", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0" ]
[ "0.5938358", "0.5937187", "0.59367967", "0.5919371", "0.57940376", "0.56724787", "0.5659674", "0.5651457", "0.5649507", "0.55863035", "0.55385494", "0.5521582", "0.5468761", "0.5432579", "0.5426138", "0.5414661", "0.5390122", "0.5376509", "0.5373534", "0.5363947", "0.53566766", "0.5331303", "0.5328344", "0.53211504", "0.52783394", "0.5239268", "0.5230893", "0.52231055", "0.5220119", "0.521671", "0.5208589", "0.5204232", "0.52032894", "0.5196598", "0.5195137", "0.51854753", "0.5169777", "0.5165372", "0.51474893", "0.51409006", "0.5130862", "0.5093332", "0.5088064", "0.50872886", "0.50831115", "0.5081669", "0.5076263", "0.5071399", "0.5068829", "0.50631624", "0.50620836", "0.5060072", "0.5056821", "0.50490165", "0.50461155", "0.5045728", "0.50426257", "0.50372833", "0.50360554", "0.5023266", "0.5015961", "0.5006077", "0.49988103", "0.4973558", "0.49651334", "0.49620143", "0.49529275", "0.49528328", "0.49489397", "0.49484834", "0.49480686", "0.49399433", "0.49315032", "0.4927031", "0.49156383", "0.49044812", "0.49042523", "0.49015203", "0.4901495", "0.48998237", "0.4899142", "0.48984227", "0.48971766", "0.4896406", "0.48894835", "0.48893848", "0.48869696", "0.48849407", "0.48806235", "0.48796773", "0.4879309", "0.48768058", "0.48725906", "0.48716828", "0.4870474", "0.48689276", "0.48674902", "0.486631", "0.48491535", "0.48479858" ]
0.60099
0
basic RNN returning next hidden state at a specific timestep.
def rnn_cell(hprev, zt, name=None, reuse=False): nin = zt.shape[-1].value nout = hprev.shape[-1].value with tf.variable_scope(name, default_name="rnn", values=[hprev, zt], reuse=reuse): wz = get_variable_wrap("kernel/input", [nin, nout], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) wh = get_variable_wrap("kernel/hidden", [nout, nout],dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) bh = get_variable_wrap("bias", [nout], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) return tf.tanh(ed.dot(hprev, wh) + ed.dot(zt, wz) + bh)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_rnn(self, input_tensor):\n\n w_trainable = False\n x_shift_trainable = False\n eta_trainable = True\n\n input_shape = input_tensor.get_shape().as_list()\n input_area = np.prod(input_shape[1:])\n batch_input_shape = (-1, input_area)\n\n filters = self._hparams.filters + self._hparams.bias_neurons\n hidden_size = [filters]\n weights_shape = [filters, filters]\n\n with tf.variable_scope(\"rnn\"):\n init_state_pl = self._dual.add('init_pl', shape=hidden_size, default_value=0).add_pl()\n init_hebb_pl = self._dual.add('hebb_init_pl', shape=weights_shape, default_value=0).add_pl()\n\n # ensure init placeholders are being reset every iteration\n init_hebb_pl = tf_print(init_hebb_pl, \"Init Hebb:\", summarize=100, mute=True)\n\n # Input reshape: Ensure flat (vector) x batch size input (batches, inputs)\n # -----------------------------------------------------------------\n input_vector = tf.reshape(input_tensor, batch_input_shape, name='input_vector')\n\n # unroll input into a series so that we can iterate over it easily\n x_series = tf.unstack(input_vector, axis=0, name=\"ep-series\") # batch_size of hidden_size\n\n # get the target and degraded samples\n target = input_vector[-1]\n target = tf_print(target, \"TARGET\\n\", mute=True)\n degraded_extracted = input_vector[-2]\n degraded_extracted = tf_print(degraded_extracted, \"DEGRADED-extracted\\n\", mute=True)\n self._dual.set_op('target', target)\n self._dual.set_op('degraded_raw', degraded_extracted)\n\n y_current = tf.reshape(init_state_pl, [1, filters], name=\"init-curr-state\")\n hebb = init_hebb_pl\n\n with tf.variable_scope(\"slow-weights\"):\n w_default = 0.01\n alpha_default = 0.1\n eta_default = 0.1\n x_shift_default = 0.01\n bias_default = 1.0 * w_default # To emulate the Miconi method of having an additional input at 20 i.e.\n # it creates an output of 1.0, and this is multiplied by the weight (here we have straight bias, no weight)\n\n if w_trainable:\n w = tf.get_variable(name=\"w\", initializer=(w_default * tf.random_uniform(weights_shape)))\n else:\n w = tf.zeros(weights_shape)\n\n alpha = tf.get_variable(name=\"alpha\", initializer=(alpha_default * tf.random_uniform(weights_shape)))\n\n if eta_trainable:\n eta = tf.get_variable(name=\"eta\", initializer=(eta_default * tf.ones(shape=[1])))\n else:\n eta = eta_default * tf.ones([1])\n\n if x_shift_trainable:\n x_shift = tf.get_variable(name=\"x_shift\", initializer=(x_shift_default * tf.ones(shape=[1])))\n else:\n x_shift = 0\n\n self._dual.set_op('w', w)\n self._dual.set_op('alpha', alpha)\n self._dual.set_op('eta', eta)\n self._dual.set_op('x_shift', x_shift)\n\n if self._hparams.bias:\n bias = tf.get_variable(name=\"bias\", initializer=(bias_default * tf.ones(filters)))\n self._dual.set_op('bias', bias)\n bias = tf_print(bias, \"*** bias ***\", mute=MUTE_DEBUG_GRAPH)\n\n with tf.variable_scope(\"layers\"):\n hebb = tf_print(hebb, \"*** initial hebb ***\", mute=MUTE_DEBUG_GRAPH)\n y_current = tf_print(y_current, \"*** initial state ***\")\n w = tf_print(w, \"*** w ***\", mute=MUTE_DEBUG_GRAPH)\n alpha = tf_print(alpha, \"*** alpha ***\", mute=MUTE_DEBUG_GRAPH)\n\n i = 0\n last_x = None\n outer_first = None\n outer_last = None\n for x in x_series:\n # last sample is target, so don't process it again\n if i == len(x_series) - 1: # [0:x, 1:d, 2:t], l=3\n break\n layer_name = \"layer-\" + str(i)\n with tf.variable_scope(layer_name):\n x = self._hparams.bt_amplify_factor * x\n x = tf_print(x, str(i) + \": x_input\", mute=MUTE_DEBUG_GRAPH)\n y_current = tf_print(y_current, str(i) + \": y(t-1)\", mute=MUTE_DEBUG_GRAPH)\n\n # neurons latch on as they have bidirectional connections\n # attempt to remove this issue by knocking out lateral connections\n remove = 'random'\n if remove == 'circular':\n diagonal_mask = tf.convert_to_tensor(np.tril(np.ones(weights_shape, dtype=np.float32), 0))\n alpha = tf.multiply(alpha, diagonal_mask)\n elif remove == 'random':\n size = np.prod(weights_shape[:])\n knockout_mask = np.ones(size)\n knockout_mask[:int(size / 2)] = 0\n np.random.shuffle(knockout_mask)\n knockout_mask = np.reshape(knockout_mask, weights_shape)\n alpha = tf.multiply(alpha, knockout_mask)\n\n # ---------- Calculate next output of the RNN\n weighted_sum = tf.add(tf.matmul(y_current - x_shift,\n tf.add(w, tf.multiply(alpha, hebb, name='lyr-mul'), name=\"lyr-add_w_ah\"),\n name='lyr-mul-add-matmul'),\n x, \"weighted_sum\")\n\n if self._hparams.bias:\n weighted_sum = tf.add(weighted_sum, bias) # weighted sum with bias\n\n y_next, _ = activation_fn(weighted_sum, self._hparams.nonlinearity)\n\n with tf.variable_scope(\"fast_weights\"):\n # ---------- Update Hebbian fast weights\n # outer product of (yin * yout) = (current_state * next_state)\n outer = tf.matmul(tf.reshape(y_current, shape=[filters, 1]),\n tf.reshape(y_next, shape=[1, filters]),\n name=\"outer-product\")\n outer = tf_print(outer, str(i) + \": *** outer = y(t-1) * y(t) ***\", mute=MUTE_DEBUG_GRAPH)\n\n if i == 1: # first outer is zero\n outer_first = outer\n outer_last = outer\n\n hebb = (1.0 - eta) * hebb + eta * outer\n hebb = tf_print(hebb, str(i) + \": *** hebb ***\", mute=MUTE_DEBUG_GRAPH)\n\n # record for visualisation the output when presented with the last blank\n idx_blank_first = self._blank_indices[-1][0]\n idx_blank_last = self._blank_indices[-1][1]\n\n if i == idx_blank_first:\n blank_output_first = y_next\n self._dual.set_op('blank_output_first', blank_output_first)\n\n if i == idx_blank_last:\n blank_output_last = y_next\n self._dual.set_op('blank_output_last', blank_output_last)\n\n y_current = y_next\n last_x = x\n i = i + 1\n\n self._dual.set_op('hebb', hebb)\n self._dual.set_op('outer_first', outer_first)\n self._dual.set_op('outer_last', outer_last)\n\n last_x = tf_print(last_x, str(i) + \": LAST-X\", mute=True)\n self._dual.set_op('degraded', last_x)\n\n output_pre_masked = tf.squeeze(y_current)\n self._dual.set_op('output_pre_masked', output_pre_masked) # pre-masked output\n\n # External masking\n # -----------------------------------------------------------------\n with tf.variable_scope(\"masking\"):\n mask_pl = self._dual.add('mask', shape=hidden_size, default_value=1.0).add_pl()\n y_masked = tf.multiply(y_current, mask_pl, name='y_masked')\n\n # Setup the training operations\n # -----------------------------------------------------------------\n with tf.variable_scope(\"optimizer\"):\n loss_op = self._build_loss_op(y_masked, target)\n self._dual.set_op('loss', loss_op)\n\n self._optimizer = tf.train.AdamOptimizer(self._hparams.learning_rate)\n training_op = self._optimizer.minimize(loss_op,\n global_step=tf.train.get_or_create_global_step(), name='training_op')\n self._dual.set_op('training', training_op)\n\n return y_masked, y_masked", "def model_RNN(x_train, y_train, x_test=None, y_test=None, kwargs={}):\n \"\"\"\n Notes on Input shape\n 3D tensor with shape (batch_size, timesteps, input_dim).\n https://keras.io/layers/recurrent/\n LSTMs in Keras are typically used on 3d data (batch dimension, timesteps, features).\n LSTM without return_sequences will output (batch dimension, output features)\n LSTM with return_sequences will output (batch dimension, timesteps, output features)\n Basic timeseries data has an input shape (number of sequences, steps, features). Target is (number of sequences, steps, targets). Use an LSTM with return_sequences.\n \"\"\"\n ######## RELU??? DropOut\n # create and fit the LSTM network\n # input_shape = Lookback x Features\n verbose = kwargs.get('verbose',False)\n layers = kwargs.get('layers', 1 )\n nodes = kwargs.get('nodes', None)\n\n if nodes is None or nodes==0 or nodes==[0]:\n nodes = [np.shape(x_train)[1]]\n elif isinstance(nodes, (int, np.integer)): # turn int to list\n nodes = [nodes]\n\n if layers > 1 and len(nodes) < layers:\n nodes = list(np.pad(nodes,[0,layers-len(nodes)], mode='constant',constant_values=nodes[-1]))\n\n ndim = np.max([2,len(np.shape(x_train))]) # Min 2D\n if ndim==2:\n input_shape=(x_train.shape[1],)\n else:\n input_shape=(x_train.shape[1],x_train.shape[2])\n if kwargs.get('learning_rate', False):\n lr = kwargs.get('learning_rate')\n else:\n lr = False\n\n if np.ndim(y_train)==1:\n n_out = 1 #e.g. forecast y as float, just 1 step ahead.\n else:\n n_out = np.shape(y_train)[1] #e.g. onehot encoded, or n-steps ahead.\n\n dropout = kwargs.get('dropout',0) # dropout rate between 0 and 1.\n stateful = kwargs.get('stateful',True)\n if stateful: #RNN needs fixed batch - consider using static_index\n batch_shape = (kwargs.get('batch_size',1234),) + input_shape\n actvn = kwargs.get('actvn','tanh')\n actvl = kwargs.get('actvl','sigmoid')\n if verbose and not actvn == 'tanh': print('tanh activation recommended for LSTM but you are using',actvn)\n\n model=[]\n model = Sequential() # https://keras.io/models/sequential/\n model.reset_states() # ?useful for batch training RNN... perhaps inside batched loop\n #TODO? model.add(Embedding(max_features, output_dim=n_out))\n\n if layers>1:\n for n in range(1,layers):\n if kwargs.get('verbose'): print('+adding extra layer')\n if stateful: #switch between batch_ and input_shape\n model.add(LSTM(nodes[layers-1], batch_input_shape=batch_shape, return_sequences=True, activation=actvn, stateful=stateful))\n else:\n model.add(LSTM(nodes[layers-1], input_shape=input_shape, return_sequences=True, activation=actvn, stateful=stateful))\n if kwargs.get('bnorm', False):\n model.add(keras.layers.normalization.BatchNormalization())\n # TODO find out about time lock dropout\n if dropout:\n model.add(Dropout(dropout)) #(c.f. Regularisation of Betas)\n\n # Single layer or last layer of RNN\n if stateful:\n model.add(LSTM(nodes[layers-1], batch_input_shape=batch_shape, return_sequences=False, activation=actvn, stateful=stateful))\n else:\n model.add(LSTM(nodes[layers-1], input_shape=input_shape, return_sequences=False, activation=actvn, stateful=stateful))\n\n #model.add(Flatten()) # Req'd if last layer return_sequences=True\n #model.add(Dense(nodes[layers-1]**2, activation=actvl))\n model.add(Dense(n_out, activation=actvl))\n\n #defaults = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)\n if hasattr(kwargs,'optimizer'):\n optimizer = kwargs['optimizer']\n elif lr:\n optimizer = keras.optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8, schedule_decay=0.004)\n else:\n optimizer = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-8,schedule_decay=0.004)\n optimizer = 'adam'\n\n if is_bool_dtype(y_train):\n model.compile(loss='binary_crossentropy', optimizer=optimizer)\n if is_categorical_dtype(y_train) or kwargs.get('onehot',False):\n #TODO Multiple Category\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n else:\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n\n if verbose > 1:\n model.summary()\n print(\"Inputs: {}\".format(model.input_shape))\n print(\"Outputs: {}\".format(model.output_shape))\n print(\"Actual input: {}\".format(x_train.shape))\n print(\"Actual output: {}\".format(y_train.shape))\n print('Model Loss: ' + model.loss)\n\n # For compatability with other models;\n model.score = model.evaluate\n\n return model #self.model=model", "def the_nn(input_size, hidden_size, n_layers, output_size, layer_type = 'RNN', nonlinearity = 'tanh', dropout = 0, bidirectional = False):\n \n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n \n # Defining layers\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n \n # RNN Layer\n if layer_type == 'RNN':\n self.rnn = nn.RNN(input_size = input_size, hidden_size = hidden_size, \n num_layers = n_layers, batch_first = True, \n bidirectional = bidirectional, nonlinearity = nonlinearity, dropout = dropout) \n \n if layer_type == 'LSTM':\n self.rnn = nn.LSTM(input_size = input_size, hidden_size = hidden_size, \n num_layers = n_layers, batch_first = True, \n bidirectional = bidirectional, dropout = dropout)\n \n if layer_type == 'GRU':\n self.rnn = nn.GRU(input_size = input_size, hidden_size = hidden_size, \n num_layers = n_layers, batch_first = True, \n bidirectional = bidirectional, dropout = dropout)\n \n if bidirectional == False:\n self.fc = nn.Linear(hidden_size, output_size)\n \n if bidirectional == True:\n self.fc = nn.Linear(hidden_size*2, output_size)\n \n def forward(self, x):\n batch_size = x.size(0)\n \n #Initializing hidden state for first input using method defined below\n hidden = self.init_hidden(batch_size, self.hidden_size)\n \n # Find sequence lengths (for packing)\n x_lengths = self.find_lengths(x)\n \n # Pack sequences\n x = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, batch_first=True, enforce_sorted=False)\n\n # Run the network\n out, hidden = self.rnn(x, hidden)\n \n # Unpack the sequences again\n out, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)\n\n # Run through the linear layer\n out = F.relu(self.fc(out))\n \n # Perform log_softmax on output (WORSE PERFORMANCE!)\n #x = F.log_softmax(x, dim = 2)\n\n return out, hidden\n \n def init_hidden(self, batch_size, hidden_size):\n # This method generates the first hidden state of zeros which we'll use in the forward pass\n \n if layer_type == 'RNN' or layer_type == 'GRU':\n \n if bidirectional == False:\n hidden = torch.zeros(self.n_layers, batch_size, self.hidden_size)\n \n if bidirectional == True:\n hidden = torch.zeros(2, batch_size, self.hidden_size)\n \n if layer_type == 'LSTM':\n \n if bidirectional == False:\n hidden = (torch.zeros(self.n_layers, batch_size, self.hidden_size),\n torch.zeros(self.n_layers, batch_size, self.hidden_size))\n \n if bidirectional == True:\n hidden = (torch.zeros(2, batch_size, self.hidden_size),\n torch.zeros(2, batch_size, self.hidden_size))\n \n return hidden\n \n def find_lengths(self, input_seq):\n # Find seq-lengths of each sequence (used to pack sequences)\n x_lengths = []\n for seq in input_seq:\n for idx, vec in enumerate(seq):\n if sum(vec).item() != 1:\n x_lengths.append(idx)\n break\n if idx == 752:\n x_lengths.append(len(seq)) \n return x_lengths\n \n net = Model()\n return net", "def _step(self, input, rnn_state):\n assert len(self.lstm_cells) == 1\n rnn_cell = self.lstm_cells[0]\n new_rnn_state = rnn_cell(input, rnn_state)\n return new_rnn_state", "def build_rnn(cell, inputs):\n #_,initial_state = get_init_cell(batch_size, rnn_size)\n \n output, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\n final_state = tf.identity(final_state, name='final_state')\n\n return (output, final_state)", "def n_step_rnn(\n n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):\n return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,\n activation, use_bi_direction=False, **kwargs)", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def recurrent_inference(self, hidden_state: np.array, action: Action) -> NetworkOutput:\n\n conditioned_hidden = self._conditioned_hidden_state(hidden_state, action)\n hidden_representation, reward, value, policy_logits = self.recurrent_model.predict(conditioned_hidden)\n output = NetworkOutput(\n value=self._value_transform(value),\n reward=self._reward_transform(reward),\n policy_logits=NetworkOutput.build_policy_logits(policy_logits),\n hidden_state=hidden_representation[0]\n )\n return output", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def rnn_no_early_stop(dataset):\n model, prefix = make_rnn()\n\n print(\"RNN without early stopping\")\n cross_validate(dataset, model)\n\n print(\"RNN without early stopping, run 2\")\n cross_validate(dataset, model)", "def next_epoch(self, state):\n return self.reset(state)", "def next_state(self,\n old_state,\n inputs,\n parameters = None):\n if parameters is not None:\n raise NotImplementedError('Dynamically specifying RNN weights is '\n 'currently not supported.')\n next_rnn_state, next_rnn_output = self._rnn_cell(\n inputs.get('input'), old_state.get('state'))\n return Value(state=next_rnn_state, cell_output=next_rnn_output)", "def the_nn_article(input_size, dropout):\n \n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n \n # Defining layers\n self.hidden_size = 256\n self.first_layer = 512\n self.second_layer = 1024\n self.n_layers = 2\n self.bidirectional = True\n self.dropout = dropout\n \n # RNN Layer\n self.rnn = nn.LSTM(input_size = input_size, hidden_size = self.hidden_size, \n num_layers = self.n_layers, batch_first = True, \n bidirectional = self.bidirectional, dropout = self.dropout)\n \n self.fc1 = nn.Linear(self.first_layer, self.second_layer)\n self.fc2 = nn.Linear(self.second_layer, 3)\n \n def forward(self, x):\n batch_size = x.size(0)\n \n #Initializing hidden state for first input using method defined below\n hidden = self.init_hidden(batch_size, self.hidden_size)\n \n # Find sequence lengths (for packing)\n x_lengths = self.find_lengths(x)\n \n # Pack sequences\n x = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, batch_first=True, enforce_sorted=False)\n\n # Run the network\n out, hidden = self.rnn(x, hidden)\n \n # Unpack the sequences again\n out, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)\n\n # Run through the linear layer\n out = F.relu(self.fc1(out))\n out = F.relu(self.fc2(out))\n \n # Perform log_softmax on output (WORSE PERFORMANCE!)\n #x = F.log_softmax(x, dim = 2)\n\n return out, hidden\n \n def init_hidden(self, batch_size, hidden_size):\n # This method generates the first hidden state of zeros which we'll use in the forward pass\n \n hidden = (torch.zeros(2*self.n_layers, batch_size, self.hidden_size),\n torch.zeros(2*self.n_layers, batch_size, self.hidden_size))\n \n return hidden\n \n def find_lengths(self, input_seq):\n # Find seq-lengths of each sequence (used to pack sequences)\n x_lengths = []\n for seq in input_seq:\n for idx, vec in enumerate(seq):\n if sum(vec).item() != 1:\n x_lengths.append(idx)\n break\n if idx == 752:\n x_lengths.append(len(seq)) \n return x_lengths\n \n net = Model()\n return net", "def rnn(rnn_type, inputs, length, hidden_size, layer_num=1, dropout_keep_prob=None, concat=True):\n if not rnn_type.startswith('bi'):\n cell = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)\n outputs, state = tf.nn.dynamic_rnn(\n cell, inputs, sequence_length=length, dtype=tf.float32)\n if rnn_type.endswith('lstm'):\n c, h = state\n state = h\n else:\n cell_fw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)\n cell_bw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)\n outputs, state = tf.nn.bidirectional_dynamic_rnn(\n cell_bw, cell_fw, inputs, sequence_length=length, dtype=tf.float32\n )\n state_fw, state_bw = state\n if rnn_type.endswith('lstm'):\n c_fw, h_fw = state_fw\n c_bw, h_bw = state_bw\n state_fw, state_bw = h_fw, h_bw\n if concat:\n outputs = tf.concat(outputs, 2)\n state = tf.concat([state_fw, state_bw], 1)\n else:\n outputs = outputs[0] + outputs[1]\n state = state_fw + state_bw\n return outputs, state", "def predict(self,inputs,keep_prob, _):\n #Non-Dynamic Unidirectional RNN\n hidden_size = self.config.mRNN._hidden_size\n batch_size = self.config.batch_size\n embed_size = self.config.mRNN._embed_size\n\n if keep_prob == None:\n keep_prob = 1\n\n with tf.variable_scope('InputDropout'):\n inputs = [tf.nn.dropout(x,keep_prob) for x in inputs]\n \n with tf.variable_scope('RNN') as scope:\n state = self.initial_state\n RNN_H = tf.get_variable('HMatrix',[hidden_size,hidden_size])\n RNN_I = tf.get_variable('IMatrix', [embed_size,hidden_size])\n RNN_b = tf.get_variable('B',[hidden_size])\n\n self.variable_summaries(RNN_H, 'HMatrix')\n self.variable_summaries(RNN_I, 'IMatrix')\n self.variable_summaries(RNN_b, 'Bias')\n \n with tf.variable_scope('RNN',reuse=True):\n rnn_outputs = []\n for tstep, current_input in enumerate(inputs):\n RNN_H = tf.get_variable('HMatrix',[hidden_size,hidden_size])\n RNN_I = tf.get_variable('IMatrix', [embed_size,hidden_size])\n RNN_b = tf.get_variable('B',[hidden_size])\n #state = tf.nn.tanh(tf.matmul(state,RNN_H) + tf.matmul(current_input,RNN_I) + RNN_b)\n\n state = tf.matmul(state,RNN_H) + current_input\n rnn_outputs.append(state)\n\t\t#How to pass state info for subsequent sentences\n self.final_state = rnn_outputs[-1]\n \n with tf.variable_scope('RNNDropout'):\n rnn_outputs = [tf.nn.dropout(x,keep_prob) for x in rnn_outputs]\n\n return rnn_outputs", "def custom_dynamic_rnn(cell, inputs, inputs_len, initial_state=None):\n batch_size = tf.shape(inputs)[0]\n max_time = tf.shape(inputs)[1]\n\n inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n inputs_ta = inputs_ta.unstack(tf.transpose(inputs, [1, 0, 2]))\n emit_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)\n t0 = tf.constant(0, dtype=tf.int32)\n if initial_state is not None:\n s0 = initial_state\n else:\n s0 = cell.zero_state(batch_size, dtype=tf.float32)\n f0 = tf.zeros([batch_size], dtype=tf.bool)\n\n def loop_fn(t, prev_s, emit_ta, finished):\n \"\"\"\n the loop function of rnn\n \"\"\"\n cur_x = inputs_ta.read(t)\n scores, cur_state = cell(cur_x, prev_s)\n\n # copy through\n scores = tf.where(finished, tf.zeros_like(scores), scores)\n\n if isinstance(cell, tc.rnn.LSTMCell):\n cur_c, cur_h = cur_state\n prev_c, prev_h = prev_s\n cur_state = tc.rnn.LSTMStateTuple(tf.where(finished, prev_c, cur_c),\n tf.where(finished, prev_h, cur_h))\n else:\n cur_state = tf.where(finished, prev_s, cur_state)\n\n emit_ta = emit_ta.write(t, scores)\n finished = tf.greater_equal(t + 1, inputs_len)\n return [t + 1, cur_state, emit_ta, finished]\n\n _, state, emit_ta, _ = tf.while_loop(\n cond=lambda _1, _2, _3, finished: tf.logical_not(\n tf.reduce_all(finished)),\n body=loop_fn,\n loop_vars=(t0, s0, emit_ta, f0),\n parallel_iterations=32,\n swap_memory=False)\n\n outputs = tf.transpose(emit_ta.stack(), [1, 0, 2])\n return outputs, state", "def rnn(rnn_type, inputs, length, hidden_size, layer_num=1, dropout_keep_prob=None, concat=True):\n if not rnn_type.startswith('bi'):\n cell = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)\n outputs, states = tf.nn.dynamic_rnn(cell, inputs, sequence_length=length, dtype=tf.float32)\n if rnn_type.endswith('lstm'):\n c = [state.c for state in states]\n h = [state.h for state in states]\n states = h\n else:\n cell_fw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)\n cell_bw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)\n outputs, states = tf.nn.bidirectional_dynamic_rnn(\n cell_bw, cell_fw, inputs, sequence_length=length, dtype=tf.float32\n )\n states_fw, states_bw = states\n if rnn_type.endswith('lstm'):\n c_fw = [state_fw.c for state_fw in states_fw]\n h_fw = [state_fw.h for state_fw in states_fw]\n c_bw = [state_bw.c for state_bw in states_bw]\n h_bw = [state_bw.h for state_bw in states_bw]\n states_fw, states_bw = h_fw, h_bw\n if concat:\n outputs = tf.concat(outputs, 2)\n states = tf.concat([states_fw, states_bw], 1)\n else:\n outputs = outputs[0] + outputs[1]\n states = states_fw + states_bw\n return outputs, states", "def build_rnn(cell, inputs):\n outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\n final_state = tf.identity(final_state, name=\"final_state\")\n\n return outputs, final_state", "def _build_bdrnn_graph(self, hparams):\n\n sample = self.iterator.get_next()\n\n inputs, tgt_outputs, seq_len = sample\n\n # linear projection to state size\n #with tf.variable_scope(\"bdrnn_in\", dtype=tf.float32):\n # inputs = tf.layers.dense(inputs=inputs,\n # units=hparams.input_proj_size,\n # kernel_initializer=tf.glorot_uniform_initializer())\n\n lm_fw_cell = []\n lm_bw_cell = []\n lm_init_state_fw = []\n lm_init_state_bw = []\n if hparams.pretrained:\n with tf.variable_scope(\"lm_rnn\", dtype=tf.float32):\n # create lm\n with tf.variable_scope(\"fw\", dtype=tf.float32):\n lm_fw_cell = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=1,\n mode=self.mode)\n # build the cell so it is in the correct scope\n # NOTE: this is hard coded\n lm_fw_cell[0].build([None, hparams.num_features])#hparams.input_proj_size])\n lm_init_state_fw = _get_initial_state([lm_fw_cell[0].state_size], tf.shape(inputs)[0], \"lm\")\n with tf.variable_scope(\"bw\", dtype=tf.float32):\n lm_bw_cell = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=1,\n mode=self.mode)\n # NOTE: this is hard coded\n lm_bw_cell[0].build([None, hparams.num_features])#hparams.input_proj_size])\n lm_init_state_bw = _get_initial_state([lm_bw_cell[0].state_size], tf.shape(inputs)[0], \"lm\")\n\n lm_outputs, lm_states = tf.nn.bidirectional_dynamic_rnn(lm_fw_cell[0],\n lm_bw_cell[0],\n inputs,\n sequence_length=seq_len,\n initial_state_fw=lm_init_state_fw[0],\n initial_state_bw=lm_init_state_bw[0],\n dtype=tf.float32)\n # optionally fix the LM weights\n if hparams.fixed_lm:\n print(\"Fixing pretrained language models.\")\n lm_outputs = tf.stop_gradient(lm_outputs)\n lm_outputs = tf.concat([lm_outputs[0], lm_outputs[1]], axis=-1)\n lm_outputs = tf.layers.dense(lm_outputs,\n 20,\n kernel_initializer=tf.glorot_uniform_initializer())\n lm_outputs = tf.concat([lm_outputs, inputs], axis=-1)\n\n\n #lm_outputs = tf.concat([lm_outputs[0], lm_outputs[1], inputs], axis=-1)\n else:\n lm_outputs = tf.concat(lm_outputs, axis=-1)\n\n\n\n with tf.variable_scope(\"bdrnn\", dtype=tf.float32) as bdrnn_scope:\n # create bdrnn\n with tf.variable_scope(\"fw\", dtype=tf.float32):\n fw_cells = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n mode=self.mode\n )\n init_state_fw = _get_initial_state([cell.state_size for cell in fw_cells],\n tf.shape(inputs)[0], \"initial_state_fw\")\n\n with tf.variable_scope(\"bw\", dtype=tf.float32):\n bw_cells = _create_rnn_cell(num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n mode=self.mode,\n )\n\n init_state_bw = _get_initial_state([cell.state_size for cell in bw_cells],\n tf.shape(inputs)[0], \"initial_state_bw\")\n # NOTE: this is commented because the lm cells and states are separated now\n #fw_cells = lm_fw_cell + fw_cells\n #bw_cells = lm_bw_cell + bw_cells\n #init_state_fw = lm_init_state_fw + init_state_fw\n #init_state_bw = lm_init_state_bw + init_state_bw\n\n # run bdrnn\n combined_outputs, output_state_fw, output_state_bw = \\\n tf.contrib.rnn.stack_bidirectional_dynamic_rnn(cells_fw=fw_cells,\n cells_bw=bw_cells,\n inputs=lm_outputs,\n sequence_length=seq_len,\n initial_states_fw=init_state_fw,\n initial_states_bw=init_state_bw,\n dtype=tf.float32,\n scope=bdrnn_scope)\n # outputs is a tuple (output_fw, output_bw)\n # output_fw/output_bw are tensors [batch_size, max_time, cell.output_size]\n # outputs_states is a tuple (output_state_fw, output_state_bw) containing final states for\n # forward and backward rnn\n\n # concatenate the outputs of each direction\n #combined_outputs = tf.concat([outputs[0], outputs[1]], axis=-1)\n\n with tf.variable_scope(\"bdrnn_out\", dtype=tf.float32):\n # dense output layers\n dense1 = tf.layers.dense(inputs=combined_outputs,\n units=hparams.num_dense_units,\n kernel_initializer=tf.glorot_uniform_initializer(),\n activation=tf.nn.relu,\n use_bias=True)\n drop1 = tf.layers.dropout(inputs=dense1,\n rate=hparams.dropout,\n training=self.mode==tf.contrib.learn.ModeKeys.TRAIN)\n dense2 = tf.layers.dense(inputs=drop1,\n units=hparams.num_dense_units,\n kernel_initializer=tf.glorot_uniform_initializer(),\n activation=tf.nn.relu,\n use_bias=True)\n drop2 = tf.layers.dropout(inputs=dense2,\n rate=hparams.dropout,\n training=self.mode==tf.contrib.learn.ModeKeys.TRAIN)\n\n logits = tf.layers.dense(inputs=drop2,\n units=hparams.num_labels,\n use_bias=False)\n\n # mask out entries longer than target sequence length\n mask = tf.sequence_mask(seq_len, dtype=tf.float32)\n\n crossent = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,\n labels=tgt_outputs,\n name=\"crossent\")\n\n # divide loss by batch_size * mean(seq_len)\n loss = tf.reduce_sum(crossent*mask)/tf.cast(hparams.batch_size, tf.float32)\n\n metrics = []\n update_ops = []\n if self.mode == tf.contrib.learn.ModeKeys.EVAL:\n # mean eval loss\n loss, loss_update = tf.metrics.mean(values=loss)\n\n predictions = tf.argmax(input=logits, axis=-1)\n tgt_labels = tf.argmax(input=tgt_outputs, axis=-1)\n acc, acc_update = tf.metrics.accuracy(predictions=predictions,\n labels=tgt_labels,\n weights=mask)\n # confusion matrix\n targets_flat = tf.reshape(tgt_labels, [-1])\n predictions_flat = tf.reshape(predictions, [-1])\n mask_flat = tf.reshape(mask, [-1])\n cm, cm_update = streaming_confusion_matrix(labels=targets_flat,\n predictions=predictions_flat,\n num_classes=hparams.num_labels,\n weights=mask_flat)\n tf.add_to_collection(\"eval\", cm_summary(cm, hparams.num_labels))\n metrics = [acc, cm]\n update_ops = [loss_update, acc_update, cm_update]\n\n return logits, loss, metrics, update_ops", "def reset(self, initial_read: np.ndarray = None):\n # Reset the network back to zero inputs\n self.rnn_state = np.zeros((self.bs, self.n_rnn, 1), dtype=self.dtype) # RNN outputs are single float\n self.hidden_act = np.zeros((self.bs, self.n_hidden), dtype=self.dtype) if self.n_hidden > 0 else None\n self.output_act = np.zeros((self.bs, self.n_outputs), dtype=self.dtype)\n \n # Initialize the network on maximum sensory inputs\n if (initial_read is not None) and self.n_hidden > 0:\n for _ in range(self.n_hidden + 20): # RNNs need a little bit of warmth to start up\n # Code below is straight up stolen from 'activate(self, inputs)'\n inputs = np.asarray([initial_read] * self.bs, dtype=self.dtype) # TODO: Improve\n output_inputs = np.matmul(self.in2out, inputs.transpose()).transpose()\n self.hidden_act = self.act_f(np.matmul(self.in2hid, inputs.transpose()).transpose() +\n np.matmul(self.hid2hid, self.hidden_act.transpose()).transpose() +\n self.hidden_biases)\n for i, rnn_idx in enumerate(self.rnn_idx):\n self.rnn_state[:, i] = self.rnn_array[i](\n np.concatenate((self.in2hid[rnn_idx] * inputs,\n self.hid2hid[rnn_idx] * self.hidden_act),\n axis=1)[self.rnn_map[i]].reshape(self.bs, self.rnn_array[i].input_size))\n self.hidden_act[:, rnn_idx] = self.rnn_state[:, i, 0]\n output_inputs += np.matmul(self.hid2out, self.hidden_act.transpose()).transpose()\n self.output_act = self.act_f(output_inputs + self.output_biases)", "def train_step(x_batch, y_batch):\r\n feed_dict = {\r\n rnn.input_x: x_batch,\r\n rnn.input_y: y_batch,\r\n rnn.dropout_keep_prob: FLAGS.dropout_keep_prob\r\n }\r\n _, step, loss, accuracy = sess.run(\r\n [train_op, global_step, rnn.loss, rnn.accuracy],\r\n feed_dict)\r\n return step, loss, accuracy", "def __call__(self, initial_lr, step, epoch):\n\n pass", "def get_rnn(X, rnn_size, seq_len, batch_size, num_layers=1, input_keep_prob=1.0, output_keep_prob=1.0, is_training=False,\n cell_name=\"BasicLSTM\", bidirectional=False):\n with tf.device(\"/cpu:0\"):\n # Convert input tensor to python list (along the sequence length dimention)\n word_embeddings = tf.split(1, seq_len, X)\n word_embeddings = [tf.squeeze(embed_, [1]) for embed_ in word_embeddings]\n\n # if is_training and keep_prob < 1:\n # word_embeddings = [tf.nn.dropout(input_, keep_prob) for input_ in word_embeddings]\n\n def get_cell():\n if cell_name == \"GRU\": # GRU\n cell = rnn_cell.GRUCell(rnn_size)\n elif cell_name == \"LSTM\": # LSTM\n cell = rnn_cell.LSTMCell(rnn_size, tf.shape(X)[2])\n else:\n cell = rnn_cell.BasicLSTMCell(rnn_size)\n if is_training and (input_keep_prob < 1 or output_keep_prob < 1):\n cell = rnn_cell.DropoutWrapper(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)\n cell = rnn_cell.MultiRNNCell([cell] * num_layers)\n initial_state = cell.zero_state(batch_size, tf.float32)\n return cell, initial_state\n\n if bidirectional:\n with tf.variable_scope(\"forward\"):\n cell_fw, initial_state_fw = get_cell()\n with tf.variable_scope(\"backward\"):\n cell_bw, initial_state_bw = get_cell()\n return rnn.bidirectional_rnn(cell_fw, cell_bw, word_embeddings,\n initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw)\n else:\n cell, initial_state = get_cell()\n return rnn.rnn(cell, word_embeddings, initial_state=initial_state)", "def rnn_cell_loop(self):\n\n\t\t# Set up initial state\n\t\tself.h_out = [tf.zeros([par['batch_size'],par['n_hidden']])]\t\t\t# Spike\n\t\tself.h = tf.ones([par['batch_size'],par['n_hidden']])\t\t\t\t\t# State\n\t\tself.h *= 0.1 if par['cell_type'] == 'rate' else par[par['cell_type']]['V_r']\n\t\tself.h = [self.h]\n\t\tadapt = par['w_init']*tf.ones([par['batch_size'],par['n_hidden']])\n\n\t\tsyn_x = par['syn_x_init']*tf.ones([par['batch_size'], par['n_hidden']]) if par['use_stp'] else None\n\t\tsyn_u = par['syn_u_init']*tf.ones([par['batch_size'], par['n_hidden']]) if par['use_stp'] else None\n\n\t\t# Apply the EI mask to the recurrent weights\n\t\tself.W_rnn_effective = par['EI_matrix'] @ tf.nn.relu(self.var_dict['W_rnn'])\n\n\t\t# Set up latency buffer if being used\n\t\tif par['use_latency']:\n\t\t\tself.state_buffer = [tf.zeros([par['batch_size'], par['n_hidden']]) for t in range(par['latency_max'])]\n\t\t\tself.state_buffer = deque(self.state_buffer)\n\t\t\tself.W_rnn_latency = self.W_rnn_effective[tf.newaxis,...] * par['latency_mask']\n\t\t\tself.lat_spike_shape = tf.ones([par['latency_max'], 1, 1])\n\n\t\t# Set up output record\n\t\tself.output = []\n\t\tself.syn_x = []\n\t\tself.syn_u = []\n\n\t\ty = 0.\n\t\tfor t in range(par['num_time_steps']):\n\t\t\tself.t = t \t\t# For latency calculations\n\n\t\t\tif par['cell_type'] == 'rate':\n\t\t\t\traise Exception('Rate cell not yet implemented.')\n\t\t\telif par['cell_type'] == 'adex':\n\t\t\t\tif t < 10:\n\t\t\t\t\tspike, state, adapt, syn_x, syn_u = self.AdEx_cell(tf.zeros_like(self.h_out[-1]), self.h[-1], \\\n\t\t\t\t\t\tadapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\telse:\n\t\t\t\t\tspike, state, adapt, syn_x, syn_u = self.AdEx_cell(self.h_out[-10], self.h[-1], \\\n\t\t\t\t\t\tadapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\ty = 0.95*y + 0.05*(spike @ self.var_dict['W_out'] + self.var_dict['b_out'])\n\n\t\t\t\tself.h_out.append(spike)\n\t\t\t\tself.h.append(state)\n\t\t\t\tself.output.append(y)\n\t\t\t\tself.syn_x.append(syn_x)\n\t\t\t\tself.syn_u.append(syn_u)\n\n\t\t\telif par['cell_type'] == 'lif':\n\t\t\t\tspike, state, adapt, syn_x, syn_u = self.LIF_cell(self.h_out[-1], self.h[-1], adapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\ty = 0.95*y + 0.05*spike @ self.var_dict['W_out'] + 0.*self.var_dict['b_out']\n\n\t\t\t\tself.h_out.append(spike)\n\t\t\t\tself.h.append(state)\n\t\t\t\tself.output.append(y)\n\n\t\t# Stack records\n\t\tself.output = tf.stack(self.output, axis=0)\n\t\tself.h = tf.stack(self.h, axis=0)\n\t\tself.h_out = tf.stack(self.h_out, axis=0)\n\t\tself.syn_x = tf.stack(self.syn_x, axis=0)\n\t\tself.syn_u = tf.stack(self.syn_u, axis=0)", "def n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,\n activation, use_bi_direction, **kwargs): # NOQA\n\n argument.check_unexpected_kwargs(\n kwargs, train='train argument is not supported anymore. '\n 'Use chainer.using_config',\n use_cudnn='use_cudnn argument is not supported anymore. '\n 'Use chainer.using_config')\n argument.assert_kwargs_empty(kwargs)\n\n activation_list = ['tanh', 'relu']\n if activation not in activation_list:\n candidate = ','.join(activation_list)\n raise ValueError('Invalid activation: \"%s\". Please select from [%s]'\n % (activation, candidate))\n\n xp = cuda.get_array_module(hx)\n\n if xp is not numpy and chainer.should_use_cudnn('>=auto', 5000):\n states = get_random_state().create_dropout_states(dropout_ratio)\n lengths = [len(x) for x in xs]\n xs = chainer.functions.concat(xs, axis=0)\n\n rnn_mode = 'rnn_%s' % activation\n w = cudnn_rnn_weight_concat(\n n_layers, states, use_bi_direction, rnn_mode, ws, bs)\n\n if use_bi_direction:\n # Bi-directional RNN\n if activation == 'tanh':\n rnn = NStepBiRNNTanh\n elif activation == 'relu':\n rnn = NStepBiRNNReLU\n else:\n # Uni-directional RNN\n if activation == 'tanh':\n rnn = NStepRNNTanh\n elif activation == 'relu':\n rnn = NStepRNNReLU\n\n hy, ys = rnn(n_layers, states, lengths)(hx, w, xs)\n sections = numpy.cumsum(lengths[:-1])\n ys = chainer.functions.split_axis(ys, sections, 0)\n return hy, ys\n\n else:\n\n def f(x, h, c, w, b):\n xw, hw = w\n xb, hb = b\n rnn_in = linear.linear(x, xw, xb) + linear.linear(h, hw, hb)\n if activation == 'tanh':\n return tanh.tanh(rnn_in), None\n elif activation == 'relu':\n return relu.relu(rnn_in), None\n\n hy, _, ys = n_step_rnn_impl(\n f, n_layers, dropout_ratio, hx, None, ws, bs, xs, use_bi_direction)\n return hy, ys", "def test_real(n_epochs=1000):\n n_hidden = 10\n n_in = 5\n n_out = 3\n n_steps = 10\n n_seq = 10 # per batch\n n_batches = 10\n\n np.random.seed(0)\n # simple lag test\n seq = np.random.randn(n_steps, n_seq * n_batches, n_in)\n targets = np.zeros((n_steps, n_seq * n_batches, n_out))\n\n targets[1:, :, 0] = seq[:-1, :, 3] # delayed 1\n targets[1:, :, 1] = seq[:-1, :, 2] # delayed 1\n targets[2:, :, 2] = seq[:-2, :, 0] # delayed 2\n\n targets += 0.01 * np.random.standard_normal(targets.shape)\n\n model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,\n learning_rate=0.01, learning_rate_decay=0.999,\n n_epochs=n_epochs, batch_size=n_seq, activation='tanh',\n L2_reg=1e-3)\n\n model.fit(seq, targets, validate_every=100, optimizer='bfgs')\n\n plt.close('all')\n fig = plt.figure()\n ax1 = plt.subplot(211)\n plt.plot(seq[:, 0, :])\n ax1.set_title('input')\n ax2 = plt.subplot(212)\n true_targets = plt.plot(targets[:, 0, :])\n\n guess = model.predict(seq[:, 0, :][:, np.newaxis, :])\n\n guessed_targets = plt.plot(guess.squeeze(), linestyle='--')\n for i, x in enumerate(guessed_targets):\n x.set_color(true_targets[i].get_color())\n ax2.set_title('solid: true output, dashed: model output')", "def get_dynamic_rnn(X, rnn_size, seq_len, num_layers=1, input_keep_prob=1.0, output_keep_prob=1.0,\n is_training=False, cell_name=\"GRU\", bidirectional=False, swap_memory=False):\n batch_size = tf.shape(X)[0]\n if bidirectional:\n rnn_size /= 2\n\n def get_cell():\n if cell_name == \"GRU\": # GRU\n _cell = rnn_cell.GRUCell(rnn_size)\n elif cell_name == \"LSTM\": # LSTM\n _cell = rnn_cell.LSTMCell(rnn_size, tf.shape(X)[2])\n # cell = rnn_cell.BasicLSTMCell(rnn_size)\n else:\n raise ValueError(\"Unrecognized Cell Name: {0}\".format(cell_name))\n if is_training and (input_keep_prob < 1 or output_keep_prob < 1):\n _cell = rnn_cell.DropoutWrapper(_cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)\n _cell = rnn_cell.MultiRNNCell([_cell] * num_layers)\n _initial_state = _cell.zero_state(batch_size, tf.float32)\n return _cell, _initial_state\n\n if bidirectional:\n with tf.variable_scope(\"forward\"):\n cell_fw, initial_state_fw = get_cell()\n with tf.variable_scope(\"backward\"):\n cell_bw, initial_state_bw = get_cell()\n return rnn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, X, sequence_length=seq_len,\n initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw,\n dtype=None, parallel_iterations=None, swap_memory=swap_memory, time_major=False, scope=None)\n else:\n cell, initial_state = get_cell()\n return rnn.dynamic_rnn(\n cell, X, sequence_length=seq_len, initial_state=initial_state,\n dtype=None, parallel_iterations=None, swap_memory=swap_memory, time_major=False, scope=None)", "def act(self, state, timestep=1e5):\n sample = random.random()\n eps_threshold = self.eps_end + (self.eps_start - self.eps_end) * \\\n math.exp(-1. * timestep / self.eps_decay)\n\n eps_threshold = max(self.eps_end, self.eps_start -\n timestep * (self.eps_start - self.eps_end) / self.eps_decay)\n\n if sample > eps_threshold:\n state = torch.tensor(state, dtype=torch.float32)\n return self.qnet(state).argmax()\n else:\n return torch.tensor(random.randrange(self.n_actions), dtype=torch.long)", "def recurrent_model(net, hidden_units=256, number_of_outputs=2):\n \n batch_size, seq_length, num_features = net.get_shape().as_list()\n\n lstm = tf.nn.rnn_cell.LSTMCell(hidden_units,\n use_peepholes=True,\n cell_clip=100,\n state_is_tuple=True)\n\n stacked_lstm = tf.nn.rnn_cell.MultiRNNCell([lstm] * 2, state_is_tuple=True)\n\n # We have to specify the dimensionality of the Tensor so we can allocate\n # weights for the fully connected layers.\n outputs, _ = tf.nn.dynamic_rnn(stacked_lstm, net, dtype=tf.float32)\n\n net = tf.reshape(outputs, (batch_size * seq_length, hidden_units))\n\n prediction = slim.layers.linear(net, number_of_outputs)\n \n return tf.reshape(prediction, (batch_size, seq_length, number_of_outputs))", "def test_forward_no_errors():\n rnn = GeneratorRNN(1)\n inputs = Variable(torch.zeros(1,1,3))\n hidden = rnn.init_hidden()\n rnn(inputs, hidden)\n\n rnn = GeneratorRNN(20)\n inputs = Variable(torch.zeros(1,1,3))\n hidden = rnn.init_hidden()\n rnn(inputs, hidden)", "def rnn_step_forward(x, prev_h, Wx, Wh, b):\n next_h, cache = None, None\n ##############################################################################\n # TODO: Implement a single forward step for the vanilla RNN. Store the next #\n # hidden state and any values you need for the backward pass in the next_h #\n # and cache variables respectively. #\n ##############################################################################\n # wx is a vocab vector\n dict_size = Wx.shape[0]\n h_num = Wx.shape[1]\n next_h = np.tanh(np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n cache = (x, next_h, prev_h, Wx, Wh, b)\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return next_h, cache", "def rnn_one_step(x_t, h_t):\n\n # Convert character id into embedding.\n x_t_emb = embed_x(tf.reshape(x_t,[-1,1]))[:,0]\n \n # Concatenate x embedding and previous h state.\n x_and_h = concatenate([x_t_emb, h_t])\n \n # Compute next state given x_and_h.\n h_next = get_h_next(x_and_h)\n #print(h_next.get_shape().as_list())\n \n # Get probabilities for language model P(x_next|h_next).\n output_probas = get_probas(h_next)\n \n return output_probas,h_next", "def create_bidirectionnal_dynamic_rnn_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Get dimensions\n lstm_size = model_settings['lstm_size']\n\n batch_size = tf.shape(feats2d)[0] \n feats2d = tf.reshape(feats2d, shape=[batch_size,-1,model_settings['feature_width']]) # features are of shape [max seq length for batch, 40]\n seq_lengths = shapes[:,0] # all shapes are [seq_length, 40], we extract seq_length\n\n # seq_lengths = tf.slice(shapes, [0, 0], [batch_size, 1])\n # print(seq_lengths)\n\n # LSTM cells\n cell_fw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n # ini_fw = cell_fw.zero_state(batch_size,dtype=tf.float32)\n # ini_bw = cell_bw.zero_state(batch_size,dtype=tf.float32)\n\n # Bi-directional RNN (+ Dropout)\n (output_fw, output_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, feats2d, \n sequence_length=seq_lengths, \n dtype=tf.float32)\n\n # initial_state_fw = ini_fw, initial_state_bw = ini_bw, \n # if state_is_tuple, state is a tuple (cell_state, memory_state)\n concat_rnn = tf.concat([state_fw[0], state_bw[0]], axis=1)\n\n if is_training:\n first_dropout = tf.nn.dropout(concat_rnn, dropout_prob)\n else:\n first_dropout = concat_rnn\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=first_dropout, units=num_classes)\n \n if is_training:\n return logits, dropout_prob\n else:\n return logits", "def __init__(self, input_size, hidden_size, num_layers=1, num_classes=2, device=None):\n super(BiRNN, self).__init__()\n self.device = device\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)\n self.fc = nn.Linear(hidden_size * 2, num_classes)", "def get_rnn_hidden_state(h):\n return h if not isinstance(h, tuple) else h[0]", "def train_epoch(self, epoch_num: int) -> float:\n self.model.train()\n epoch_loss = 0.0\n # hidden_start = torch.zeros(self.batch_size, self.rnn_size)\n # for batch_num, (x, y) in enumerate(make_batches(self.train_data,\n # self.batch_size,\n # self.max_len)):\n\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n # reset gradients in train epoch\n self.optimizer.zero_grad()\n x = len(batch_tuple[0])\n y = len(batch_tuple[0][0])\n # compute hidden states\n # batch x timesteps x hidden_size\n x, y = batch_tuple\n # x = x.to(self.device)\n # y = y.to(self.device)\n hidden_states = self.model(x)\n # compute unnormalized probabilities\n # batch x timesteps x vocab_size\n # logits = self.model.get_logits(hidden_states)\n\n # compute loss\n # scalar\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n\n # backpropagation (gradient of loss wrt parameters)\n batch_loss.backward()\n\n # clip gradients if they get too large\n torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),\n self.max_grad_norm)\n\n # update parameters\n self.optimizer.step()\n\n # we use a stateful RNN, which means the first hidden state for the\n # next batch is the last hidden state of the current batch\n # hidden_states.detach_()\n # hidden_start = hidden_states[:,-1,:] # add comment\n if batch_num % 100 == 0:\n print(\"epoch %d, %d/%d examples, batch loss = %f\"\n % (epoch_num, (batch_num + 1) * self.batch_size,\n self.num_train_examples, batch_loss.item()))\n epoch_loss /= (batch_num + 1)\n\n return epoch_loss", "def forward(self, input, hidden):\r\n output, hidden = self.rnn(input, hidden)\r\n output = f.log_softmax(self.out(output.squeeze(1)), 1)\r\n return output, hidden", "def __init__(\n self, state_size, action_size, hidden_dim=128, state_rep_size=64, learning_rate=1e-5, eta=2\n ):\n super(RND, self).__init__(state_size, action_size, eta)\n self.hidden_dim = hidden_dim\n self.state_rep_size = state_rep_size\n self.learning_rate = learning_rate\n\n self.predictor_dev = \"cpu\"\n self.target_dev = \"cpu\"\n\n # create models\n self.predictor_model = RNDNetwork(state_size, action_size, hidden_dim, state_rep_size)\n self.target_model = RNDNetwork(state_size, action_size, hidden_dim, state_rep_size)\n\n for param in self.target_model.parameters():\n param.requires_grad = False\n\n self.optimizer = optim.Adam(self.predictor_model.parameters(), lr=learning_rate)\n self.loss = None", "def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")", "def _build_rnn_graph_lstm(self, inputs, config, is_training):\n cell = util.create_lstm_cell(is_training, config)\n state = util.get_zero_state_for_the_cell(cell, config)\n\n self.initial_state = state\n with tf.variable_scope(\"RNN\"):\n inputs = tf.unstack(inputs, num=self.num_steps, axis=1)\n outputs, state = tf.contrib.rnn.static_rnn(cell, inputs,\n initial_state=self.initial_state)\n output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])\n return output, state", "def _rnn_layer(input_data, rnn_cell, rnn_hidden_size, layer_id, rnn_activation,\n is_batch_norm, is_bidirectional):\n if is_batch_norm:\n input_data = tf.keras.layers.BatchNormalization(\n momentum=_MOMENTUM, epsilon=_EPSILON)(input_data)\n rnn_layer = rnn_cell(\n rnn_hidden_size, activation=rnn_activation, return_sequences=True,\n name=\"rnn_{}\".format(layer_id))\n if is_bidirectional:\n rnn_layer = tf.keras.layers.Bidirectional(rnn_layer, merge_mode=\"sum\")\n\n return rnn_layer(input_data)", "def train(self, terminal_state: bool, step: int) -> History:\r\n if len(self.replay_memory) < self.MIN_REPLAY_MEMORY_SIZE:\r\n return\r\n\r\n # Get a minibatch of random samples from memory replay table, like a\r\n # standard ANN minibatch is a list of tuples\r\n minibatch = random.sample(self.replay_memory, self.MINIBATCH_SIZE)\r\n\r\n # Get current states from minibatch, then query NN model for Q values\r\n current_states = np.array([transition[0] for transition in minibatch])\r\n current_qs_list = self.model.predict(current_states)\r\n\r\n # Get future states from minibatch, then query NN model for Q values\r\n # When using target network, query it, otherwise main network should be queried\r\n new_current_states = np.array([transition[3] for transition in minibatch])\r\n future_qs_list = self.target_model.predict(new_current_states)\r\n\r\n X = []\r\n y = []\r\n # traversing all the experience-tuples in minibatch\r\n for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):\r\n if not done:\r\n max_future_q = np.max(future_qs_list[index])\r\n # using the Bellmann equation\r\n new_q = reward + self.DISCOUNT * max_future_q\r\n else:\r\n new_q = reward\r\n\r\n # Update Q value for given state\r\n current_qs = current_qs_list[index]\r\n current_qs[action] = new_q\r\n\r\n # And append to our training data\r\n X.append(current_state)\r\n y.append(current_qs)\r\n\r\n # Fit on all samples as one batch, log only on terminal state\r\n hist = self.model.fit(np.array(X), np.array(y),\r\n batch_size=self.MINIBATCH_SIZE, verbose=0,\r\n shuffle=False)\r\n\r\n if terminal_state:\r\n self.target_update_counter += 1\r\n\r\n # updateing to determine if we want to update target model\r\n if self.target_update_counter > self.UPDATE_TARGET_EVERY:\r\n self.target_model.set_weights(self.model.get_weights())\r\n self.target_update_counter = 0\r\n\r\n return hist", "def add_model(self, inputs):\n size = self.config.hidden_size\n forget_bias = self.config.forget_bias\n input_cell = self.config.input_cell\n\n if input_cell == 'BasicLSTMCell':\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias)\n print 'Using Basic LSTM Cell \\n'\n\n elif input_cell == 'LSTMCell':\n lstm_cell = tf.nn.rnn_cell.LSTMCell(size, forget_bias)\n print 'Using LSTM Cell \\n'\n\n elif input_cell == 'GRUCell':\n lstm_cell = tf.nn.rnn_cell.GRUCell(size)\n print 'Using GRU Cell \\n'\n\n else:\n print \"Please Specify a Correct Cell Type\"\n\n lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.config.dropout,\n input_keep_prob=self.config.dropout)\n\n cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * self.config.num_layers)\n \n print 'Number of Hidden Layers ', self.config.num_layers\n \n self.initial_state = cell.zero_state(self.config.batch_size, tf.float32)\n rnn_outputs = []\n state = self.initial_state\n\n with tf.variable_scope('RNNLM') as scope:\n for time_step in range(self.config.num_steps):\n if time_step > 0: scope.reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n rnn_outputs.append(cell_output)\n self.final_state = state\n\n return rnn_outputs", "def forward(self, inp, hidden=None, give_gates=False, debug=False, readout_time=None):\n\n if self.recoder is None:\n emb = inp\n else:\n emb = self.recoder(inp)\n\n if hidden is None:\n hidden = self.init_hidden(inp.shape[1])\n # if emb.dim()<3:\n # emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n # print(output.shape)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n if readout_time is None:\n decoded = decoded[-1,...] # assume only final timestep matters\n\n if give_gates:\n return decoded, hidden, extras\n else:\n return decoded, hidden", "def __init__(self, input_dim=600+9, output_dim=1*3, dropout_prob=0.):\n super(F0_RNN, self).__init__()\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.dropout_prob = dropout_prob\n\n self.recurrent_layers = utils.SequentialWithRecurrent(\n nn.Linear(self.input_dim, 256),\n nn.Sigmoid(),\n nn.Dropout(p=dropout_prob),\n utils.RecurrentCuDNNWrapper(\n nn.GRU(256, 64, batch_first=True)),\n nn.Dropout(p=dropout_prob),\n utils.RecurrentCuDNNWrapper(\n nn.GRU(64, 64, batch_first=True)),\n nn.Dropout(p=dropout_prob),\n utils.RecurrentCuDNNWrapper(\n nn.GRU(64, 64, batch_first=True)),\n nn.Dropout(p=dropout_prob),\n nn.Linear(64, 64),\n nn.Sigmoid(),\n nn.Dropout(p=dropout_prob),\n nn.Linear(64, self.output_dim),\n )\n\n self.metrics.add_metrics('all',\n LF0_RMSE_Hz=LF0Distortion())", "def methoddef(name, color, model, optimizer,\n xdata, ydata, data_train, data_valid, data_test):\n method = util2.experiment.method_rnn(\n name, color, model, optimizer, data_train, xdata, ydata)\n method.meas = [meas.meas_rnnloss(model.x, model.y,\n model.zero_state, model.init_state,\n model.final_state,\n data_train, model.y_hat, \"y_hat\",\n BATCH, STEPS,\n axes=[0.0, np.inf, 0.0, YMAX_TRAIN])]\n return method", "def build_rnn(x, h, output_size, scope, n_layers, size, gru_size, activation=tf.tanh, output_activation=None, regularizer=None):\n #====================================================================================#\n # ----------PROBLEM 2----------\n #====================================================================================#\n # YOUR CODE HERE\n # raise NotImplementedError\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n ## step 1 - inputs are first embedded by an MLP\n x = build_mlp(x, output_size, scope, n_layers, size, activation=activation, output_activation=output_activation, regularizer=regularizer)\n # print(\"after build_mlp x = \", x)\n ## step 2 - passed to a GRU cell\n # as \"sy_hidden = tf.placeholder(shape=[None, self.gru_size], name=\"hidden\", dtype=tf.float32)\" -> h, so we can only pass for 1 single GRUCell, if multiple CRUCell, hidden state will be agumented.\n # as x is the size of (?, output_size) in output of build_mlp, the rnn must be equal with size\n x, h = tf.nn.dynamic_rnn(tf.nn.rnn_cell.GRUCell(output_size, activation=activation), x, initial_state=h)\n # after build_mlp x = Tensor(\"continuous_logits/continuous_logits/fc1/BiasAdd:0\", shape=(?, 1, 32), dtype=float32)\n # x = tf.squeeze(x, axis=1)\n x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))\n # print(\"dynamic_rnn build_mlp x = \", x, \" h = \", h)\n return x, h", "def reset_rnn_states(self):\n if self.recurrent:\n self.rnn_states = self.network.init_hidden_states(config=self.config, \n batch_size=self.env_spec.num_env)\n else:\n raise TypeError('the network must be BaseRNN type. ')", "def train_replay(self):\n\n if len(self.memory) < self.train_start:\n return\n\n if self.epsilon > self.epsilon_end:\n self.epsilon -= self.epsilon_decay_step\n\n mini_batch = random.sample(self.memory, self.batch_size)\n\n history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n next_history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n\n # Initialize the Value targets to optimize\n v_target = np.zeros((self.batch_size,))\n\n action, reward, dead = [], [], []\n\n for i in range(self.batch_size):\n history[i] = np.float32(mini_batch[i][0] / 255.)\n next_history[i] = np.float32(mini_batch[i][3] / 255.)\n action.append(mini_batch[i][1])\n reward.append(mini_batch[i][2])\n dead.append(mini_batch[i][4])\n\n # current state-action values Q(st, at)\n q_outputs = self.q_duelling_part.predict(history)\n\n # TD-values for updating the networks coming from the target model\n if self.target_model is True:\n v_target_value = self.target_v_duelling_part.predict(next_history)\n elif self.target_model is False:\n v_target_value = self.v_duelling_part.predict(next_history)\n\n q_targets = []\n\n for i in range(self.batch_size):\n if dead[i]:\n v_target[i] = reward[i]\n q_outputs[i][action[i]] = reward[i]\n\n else:\n v_target[i] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n q_outputs[i][action[i]] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n\n q_targets.append(q_outputs[i][action[i]])\n\n self.optimizer([history, action, q_targets]) # optimize the state-action-value head\n self.v_duelling_part.fit(history, v_target, epochs=1, verbose=0) # optimize the state-value head", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerGetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name\n \n self.cur_net_fwd = dot_product(tf.zeros(self.incoming_shape[:1] + self.incoming_shape[2:]),\n tf.zeros(self.W_fwd_conc.shape.as_list()))", "def add_dense_layer(self):\n output = self.cnn_layer\n # weight_shape is dimension 1 of the first weights in the dense layer. We\n # update it accordingly as we keep adding intermediate layers\n weight_shape = self.out_channels_2\n if self.hybrid:\n with tf.variable_scope(\"lstm_layer\"):\n print(\"adding Recurrent layer\")\n if self.rnn:\n print(\"Adding Basic RNN cell\")\n cell_fw = tf.contrib.rnn.BasicRNNCell(self.lstm_size)\n cell_bw = tf.contrib.rnn.BasicRNNCell(self.lstm_size)\n elif self.gru:\n print(\"Adding GRU cell\")\n cell_fw = tf.contrib.rnn.GRUCell(self.lstm_size)\n cell_bw = tf.contrib.rnn.GRUCell(self.lstm_size)\n elif self.elman:\n print(\"Adding ELMAN cell\")\n cell_fw = ElmanRNNCell(self.lstm_size)\n cell_bw = ElmanRNNCell(self.lstm_size)\n else:\n print(\"Adding LSTM cell\")\n cell_fw = tf.contrib.rnn.LSTMCell(self.lstm_size)\n cell_bw = tf.contrib.rnn.LSTMCell(self.lstm_size)\n if self.use_window_rnn:\n cnn_values = self.cnn_values\n print(\"using window_rnn\")\n else:\n print(\"not using window_rnn\")\n cnn_values = self.cnn_layer\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, cnn_values,\n sequence_length=self.sequence_lengths, dtype=tf.float32\n )\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.nn.dropout(output, self.dropout_holder)\n self.word_reps = output\n weight_shape = 2*self.lstm_size\n with tf.variable_scope(\"dense_layer\"):\n print(\"adding dense layer\")\n # weight_shape_2 is dimension 2 of the first weights in the dense layer.\n # we update it accordingly if we add more layers at the end\n weight_shape_2 = self.tag_count\n if self.pos and (self.single_ann or self.pos_ann):\n # this if block is for the cases when we have to concatenate the output\n # from prevoius layer to pos_embeddings\n print(\"Concatenating pos vecs with lstm/cnn output\")\n output = tf.concat([output, self.pos_vecs], axis=-1)\n weight_shape = weight_shape + self.pos_embedding_size\n output = tf.reshape(output, [-1, weight_shape])\n if self.single_ann:\n # This means we will have a hidden layer i.e two weights.\n print(\"Setting up hidden layer for single ann\")\n weight_shape_2 = self.ann_size\n if self.pos and not self.concat_pos and self.bi_ann:\n # this if block si for the case when we want to implement a bi-partite\n # ann, as such we will get weights for pos embeddings\n print(\"setting up network for bi ann..\")\n weight_shape_2 = self.ann_size\n w_pos = tf.get_variable(\"w_pos\", dtype=tf.float32,\n shape=[self.pos_embedding_size,\n self.pos_ann_count])\n b_pos = tf.get_variable(\"b_pos\", dtype=tf.float32,\n shape=[self.pos_ann_count],\n initializer=tf.zeros_initializer())\n output_pos = tf.reshape(self.pos_vecs, [-1, self.pos_embedding_size])\n pred_pos = tf.matmul(output_pos, w_pos) + b_pos\n output = tf.reshape(output, [-1, weight_shape])\n if len(output.shape) == 3:\n print(\"Setting up a network with no ann, only a fully connected dense \"\n \"layer\")\n # we have to reshape our output so that we can multiply it with weights.\n # if we reach here, it means we haven't reshaped it properly.\n output = tf.reshape(output, [-1, weight_shape])\n w1 = tf.get_variable(\"w1\", dtype=tf.float32,\n shape=[weight_shape, weight_shape_2])\n b1 = tf.get_variable(\"b1\", dtype=tf.float32, shape=[weight_shape_2],\n initializer=tf.zeros_initializer())\n pred = tf.matmul(output, w1) + b1\n # the pred above has weight_shape_2 as it's second dimension. our vars\n # have been defined in such a way that weight_shape_2 = self.tag_count if\n # we don't have to care about adding another layer.\n if self.pos and not self.concat_pos and self.bi_ann:\n # This block deals with concatenating output of pos hidden layer to\n # pred. We change weight_shape_2 accordingly.\n print(\"setting up network for bi ann..\")\n pred = tf.concat([pred, pred_pos], axis=-1)\n weight_shape_2 = weight_shape_2 + self.pos_ann_count\n\n if self.pos and (self.single_ann or self.bi_ann):\n # now, these are our final sets of weights in case we have an ann. so\n # the second dimension of these weights is self.tag_count!\n print(\"setting up network for ann\")\n w2 = tf.get_variable(\"w2\", dtype=tf.float32, shape=[weight_shape_2,\n self.tag_count])\n b2 = tf.get_variable(\"b2\", shape=[self.tag_count], dtype=tf.float32,\n initializer=tf.zeros_initializer())\n pred = tf.matmul(pred, w2) + b2\n self.logits = tf.reshape(pred, [-1, self.max_len, self.tag_count])", "def predict_next_state(self, state, action, stepsize, tau, a_previous):\r\n distance = state[1]\r\n delta_v = state[2]\r\n a_real = (action + tau * a_previous / stepsize) / (1 + tau/stepsize) # a_real to replace with more detailed longitudinal dynamics model\r\n distance_next = distance + stepsize * delta_v + (stepsize^2)/2 * a_real - (stepsize^2)/2 - (stepsize^2)/2 * a_prec # a_prec to replace with NN\r\n delta_v_next = delta_v + stepsize*(a_real - a_prec)\r\n return distance_next, delta_v_next", "def MakeFancyRNNCell(hidden_units, is_train, keep_prob, num_layers=1):\n cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_units, forget_bias=0.0,state_is_tuple=True)\n if is_train:\n cell = tf.nn.rnn_cell.DropoutWrapper(cell, input_keep_prob=keep_prob, output_keep_prob=keep_prob)\n cell = tf.nn.rnn_cell.MultiRNNCell([cell] * num_layers, state_is_tuple=True)\n return cell", "def train(self, step):\n if len(self.memory) <= 1:\n return\n\n # Update target network\n if self.target_update >= 1 and step % self.target_update == 0:\n # Perform a hard update\n for v_main, v_targ in zip(self.model.trainable_variables, self.target_model.trainable_variables):\n tf.compat.v1.assign(v_targ, v_main)\n elif self.target_update < 1:\n # Perform a soft update\n for v_main, v_targ in zip(self.model.trainable_variables, self.target_model.trainable_variables):\n tf.compat.v1.assign(v_targ, self.target_update * v_targ + (1 - self.target_update) * v_main)\n\n # Train even when memory has fewer than the specified batch_size\n batch_size = min(len(self.memory), self.batch_size)\n\n # Sample batch_size traces from memory\n state_batch, action_batch, reward_batches, end_state_batch, not_done_mask = self.memory.get(batch_size)\n\n # Compute the value of the last next states\n target_qvals = np.zeros(batch_size)\n non_final_last_next_states = [es for es in end_state_batch if es is not None]\n\n if len(non_final_last_next_states) > 0:\n if self.enable_double_dqn:\n # \"Deep Reinforcement Learning with Double Q-learning\" (van Hasselt et al., 2015)\n # The online network predicts the actions while the target network is used to estimate the Q-values\n values, best_angles, best_pixels = self.best_q_value(np.array(non_final_last_next_states))\n # q_values = self.model.predict_on_batch(np.array(non_final_last_next_states))\n # actions = np.argmax(q_values, axis=1)\n # Estimate Q-values using the target network but select the values with the\n # highest Q-value wrt to the online model (as computed above).\n # target_q_values = self.target_model.predict_on_batch(np.array(non_final_last_next_states))\n selected_target_q_vals = self.specified_q_value(self.target_model, np.array(non_final_last_next_states),\n best_angles, best_pixels)\n # selected_target_q_vals = target_q_values[range(len(target_q_values)), actions]\n else:\n # Use delayed target network to compute target Q-values\n selected_target_q_vals = self.target_model.predict_on_batch(np.array(non_final_last_next_states)).max(1)\n non_final_mask = list(map(lambda s: s is not None, end_state_batch))\n target_qvals[non_final_mask] = selected_target_q_vals\n\n # Compute n-step discounted return\n # If episode ended within any sampled nstep trace - zero out remaining rewards\n for n in reversed(range(self.nsteps)):\n rewards = np.array([b[n] for b in reward_batches])\n target_qvals *= np.array([t[n] for t in not_done_mask])\n target_qvals = rewards + (self.gamma * target_qvals)\n\n # Compile information needed by the custom loss function\n # If using PrioritizedExperienceReplay then we need to provide the trace indexes\n # to the loss function as well so we can update the priorities of the traces\n # Train model\n target_qvals = target_qvals.astype(np.float32)\n state_batch = np.array(state_batch).astype(np.float32)\n\n with tf.GradientTape() as tape:\n angles = np.array(action_batch)[:, 0]\n pixel_indices = np.array(action_batch)[:, 1]\n qvals = self.specified_q_value(self.model, state_batch, angles, pixel_indices)\n loss = tf.keras.losses.mse(qvals, target_qvals)\n print('loss', loss)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n if isinstance(self.memory, memory.PrioritizedExperienceReplay):\n td_error = np.abs((target_qvals - qvals).numpy())\n traces_idxs = self.memory.last_traces_idxs()\n self.memory.update_priorities(traces_idxs, td_error)", "def __init__(self, name, params, load_model):\n\n super(MDN_RNN, self).__init__(name, params, load_model)\n self.name = name\n self.type = 'MDN RNN'\n if load_model != False:\n self.load_model(load_model)\n else:\n self.params = params\n self.z_size = self.params['z_size']\n self.action_size = self.params['action_size']\n self.hidden_size = self.params['hidden_size']\n self.gaussian_size = self.params['gaussian_size']\n self.stacked_layers = self.params['stacked_layers']\n self.learning_rate = self.params['learning_rate']\n self.grad_clip = self.params['grad_clip']\n self.batch_size = self.params['batch_size']\n self.device = self.get_device()\n \n self.lstm = nn.LSTM(self.z_size + self.action_size, self.hidden_size, self.stacked_layers, batch_first = True)\n self.fc1 = nn.Linear(self.hidden_size, self.gaussian_size * self.z_size)\n self.fc2 = nn.Linear(self.hidden_size, self.gaussian_size * self.z_size)\n self.fc3 = nn.Linear(self.hidden_size, self.gaussian_size * self.z_size)\n \n if load_model != False:\n self.load_state_dict(self.weights)\n \n print(self, \"\\n\\n\")", "def demo_rbm_tutorial(\n eta = 0.01,\n n_hidden = 500,\n n_samples = None,\n minibatch_size = 10,\n plot_interval = 10,\n w_init_mag = 0.01,\n n_epochs = 1,\n persistent = False,\n seed = None\n ):\n if is_test_mode():\n n_samples=50\n n_epochs=1\n plot_interval=50\n n_hidden = 10\n\n data = get_mnist_dataset(flat = True).training_set.input[:n_samples]\n n_visible = data.shape[1]\n rng = np.random.RandomState(seed)\n activation = lambda x: (1./(1+np.exp(-x)) > rng.rand(*x.shape)).astype(float)\n\n w = w_init_mag*np.random.randn(n_visible, n_hidden)\n b_hid = np.zeros(n_hidden)\n b_vis = np.zeros(n_visible)\n\n if persistent:\n hid_sleep_state = np.random.rand(minibatch_size, n_hidden)\n\n for i, vis_wake_state in enumerate(minibatch_iterate(data, n_epochs = n_epochs, minibatch_size=minibatch_size)):\n hid_wake_state = activation(vis_wake_state.dot(w)+b_hid)\n if not persistent:\n hid_sleep_state = hid_wake_state\n vis_sleep_state = activation(hid_sleep_state.dot(w.T)+b_vis)\n hid_sleep_state = activation(vis_sleep_state.dot(w)+b_hid)\n\n # Update Parameters\n w_grad = (vis_wake_state.T.dot(hid_wake_state) - vis_sleep_state.T.dot(hid_sleep_state))/float(minibatch_size)\n w += w_grad * eta\n b_vis_grad = np.mean(vis_wake_state, axis = 0) - np.mean(vis_sleep_state, axis = 0)\n b_vis += b_vis_grad * eta\n b_hid_grad = np.mean(hid_wake_state, axis = 0) - np.mean(hid_sleep_state, axis = 0)\n b_hid += b_hid_grad * eta\n\n if i % plot_interval == 0:\n dbplot(w.T[:100].reshape(-1, 28, 28), 'weights')\n dbplot(vis_sleep_state.reshape(-1, 28, 28), 'dreams')\n print 'Sample %s' % i", "def __init__(self, num_actions=2, state=None): # device=torch.device(\"cpu\")):\r\n super(DRQN, self).__init__()\r\n # self.device = device\r\n self.num_actions = num_actions\r\n\r\n self.input = Input(shape=(1, 4))\r\n self.lstm1 = LSTM(128, input_shape=(256, 32, 4), return_sequences=True)(self.input)\r\n self.lstm2 = LSTM(128, return_sequences=True)(self.lstm1)\r\n self.lstm3 = LSTM(128, return_sequences=True)(self.lstm2)\r\n self.dense1 = Dense(128, activation='relu')(self.lstm3)\r\n self.output = Dense(2, activation='linear')(self.dense1)\r\n self.state = state\r\n self.model = Model(inputs=self.input, outputs=self.output)\r\n # self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)\r\n # self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\r\n # self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\r\n # self.fc4 = nn.Linear(7 * 7 * 64, 512)\r\n # self.gru = nn.GRU(512, num_actions, batch_first=True) # input shape (batch, seq, feature)\r", "def __init__(self, state_size, action_size, seed):\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n \"*** YOUR CODE HERE ***\"\n hidden_layers = [64,64]\n drop_p = 0.5\n #add the first hidden layer\n self.hidden_layers = nn.ModuleList([nn.Linear(state_size, hidden_layers[0])])\n\n #Add a variable number of hidden layers\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])\n self.hidden_layers.extend([nn.Linear(h1,h2) for h1,h2 in layer_sizes])\n\n self.output = nn.Linear(hidden_layers[-1], action_size)\n #self.dropout = nn.Dropout(p=drop_p)", "def run_episode(self, environment):\n state = environment.reset()\n self.steps_done = 0\n while True:\n state_tensor = FloatTensor([state])\n position = self.Q.sample_from_softmax_policy(state_tensor)\n action = position + 1\n next_state, reward, done, _ = environment.step(position.item())\n self.memory.push((state_tensor, action,))\n self.learn(state_tensor, action, next_state, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def forward(self, x, h):\n self.hidden = h\n hnext= self.rnn.forward(x,h)\n logits = self.projection.forward(hnext)\n \n \n return logits, hnext", "def eval_epoch(self, epoch_num: int) -> float:\n epoch_loss = 0.0\n # hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)\n with torch.no_grad():\n # for batch_num, (x, y) in enumerate(make_batches(self.dev_data,\n # self.batch_size,\n # self.max_len)):\n acc = 0;\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n # reset gradients\n # self.optimizer.zero_grad()\n # x = len(batch_tuple[0])\n # y = len(batch_tuple[0][0])\n # batch x timesteps x hidden_size\n x, y = batch_tuple\n # x = x.to(self.device)\n # y = y.to(self.device)\n hidden_states = self.model(x)\n # batch x timesteps x vocab_size\n # logits = self.model.get_logits(hidden_states)\n\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n hidden_states_m = torch.argmax(hidden_states, dim=1)\n acc += sum(hidden_states_m == y).item()\n # we use a stateful RNN, which means the first hidden state for\n # the next batch is the last hidden state of the current batch\n # hidden_states.detach_()\n # hidden_start = hidden_states[:,-1,:]\n\n epoch_loss /= (batch_num + 1)\n\n return epoch_loss, acc", "def train_nn_sequence(sess, epochs, nn_last_layer, hidden_state, carry_state, batch_size, data_loader, accuracy_op, train_op, loss_function, input_tensor,\n truth_tensor, initial_hidden_state, initial_carry_state, learning_rate, base_learning_rate,\n learning_decay_rate, learning_decay_factor):\n #initialize variables\n sess.run(tf.global_variables_initializer())\n \n print(\"Training...\")\n print()\n scaling_rate = 1\n \n loss_output = 0\n for i in range(epochs):\n loss_output = 0\n print(\"EPOCH {} ...\".format(i+1))\n if i%learning_decay_rate == 0 and i != 0:\n scaling_rate = learning_decay_factor * scaling_rate\n j = 0\n sum_accuracy = 0\n sum_loss = 0\n for image, output, batch_i_size in data_loader.get_train_batches_fn_timeseries_sequence(batch_size):\n initial_state_value = np.zeros(shape=(batch_i_size, 29, 39, 20), dtype=float)\n \n nn_output, lstm_hidden_state, lstm_carry_state, optimizer, loss = sess.run([nn_last_layer, hidden_state, carry_state, train_op, loss_function], \n feed_dict={input_tensor: image, truth_tensor: output, initial_hidden_state: initial_state_value, initial_carry_state: initial_state_value, learning_rate: scaling_rate*base_learning_rate})\n \n \n #print(np.shape(lstm_hidden_state))\n #print(np.shape(lstm_carry_state))\n \n accuracy, loss_output = sess.run([accuracy_op, loss_function], feed_dict={input_tensor: image, truth_tensor: output, initial_hidden_state: initial_state_value, initial_carry_state: initial_state_value})\n \n #print(np.shape(loss_output))\n sum_accuracy = sum_accuracy + accuracy\n sum_loss = sum_loss + loss_output\n j = j+1\n\n valid_x, valid_y, valid_size = data_loader.get_validation_data_sequence()\n initial_state_value = np.zeros(shape=(valid_size, 29, 39, 20), dtype=float)\n valid_accuracy = sess.run([accuracy_op],\n feed_dict={input_tensor: valid_x, truth_tensor: valid_y, initial_hidden_state: initial_state_value, initial_carry_state: initial_state_value})\n print(\"Loss {} ...\".format(sum_loss/j))\n print(\"Train Accuracy {} ...\".format(sum_accuracy/j))\n print(\"Validation Accuracy {} ...\".format(valid_accuracy))", "def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):\n embed = get_embed(input_data, vocab_size, embed_dim) \n output, final_state = build_rnn(cell, embed)\n \n logits = tf.contrib.layers.fully_connected(output, vocab_size, activation_fn=None)\n #final_state = tf.identity(final_state, name='final_state') \n return logits, final_state", "def LSTM_train(X_train, Y_train, X_dev, Y_dev, R_train, R_dev, hyperparams):", "def forward_rnn(self, inputs: TensorType, state: List[TensorType],\n seq_lens: TensorType) -> (TensorType, List[TensorType]):\n raise NotImplementedError(\"You must implement this for a RNN model\")", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def __init__(self,\n input_idx: np.ndarray, hidden_idx: np.ndarray, rnn_idx: np.ndarray, output_idx: np.ndarray,\n in2hid: tuple, in2out: tuple,\n hid2hid: tuple, hid2out: tuple,\n hidden_biases: np.ndarray, output_biases: np.ndarray,\n rnn_array: np.ndarray, rnn_map: np.ndarray,\n activation,\n batch_size: int = 1,\n initial_read: np.ndarray = None,\n dtype=np.float64,\n ):\n # Storing the input arguments (needed later on)\n self.act_f = activation\n self.dtype = dtype\n self.rnn_idx: np.ndarray = rnn_idx\n self.n_inputs: int = len(input_idx)\n self.n_hidden: int = len(hidden_idx)\n self.n_rnn: int = len(rnn_idx)\n self.n_outputs: int = len(output_idx)\n self.bs: int = batch_size\n \n # Setup the rnn_map\n rnn_map_temp = [] # Numpy-append has unwanted behaviour\n for i, m in enumerate(rnn_map):\n rnn_map_temp.append(np.tile(rnn_map[i], (batch_size, 1)))\n self.rnn_map: np.ndarray = np.asarray(rnn_map_temp, dtype=bool)\n \n # Placeholders, initialized during reset\n self.rnn_state: np.ndarray = None # State of the RNNs\n self.hidden_act: np.ndarray = None # Activations of the hidden nodes\n self.output_act: np.ndarray = None # Activations of the output nodes\n \n # Do not create the hidden-related matrices if hidden-nodes do not exist\n # If they do not exist, a single matrix directly mapping inputs to outputs is only used\n if self.n_hidden > 0:\n self.in2hid: np.ndarray = dense_from_coo((self.n_hidden, self.n_inputs), in2hid, dtype=dtype)\n self.hid2hid: np.ndarray = dense_from_coo((self.n_hidden, self.n_hidden), hid2hid, dtype=dtype)\n self.hid2out: np.ndarray = dense_from_coo((self.n_outputs, self.n_hidden), hid2out, dtype=dtype)\n self.rnn_array: np.ndarray = rnn_array\n self.in2out: np.ndarray = dense_from_coo((self.n_outputs, self.n_inputs), in2out, dtype=dtype)\n \n # Fill in the biases\n if self.n_hidden > 0:\n self.hidden_biases: np.ndarray = np.asarray(hidden_biases, dtype=dtype)\n self.output_biases: np.ndarray = np.asarray(output_biases, dtype=dtype)\n \n # Put network to initial (default) state\n self.reset(initial_read=initial_read)", "def run_rnn(rnn_for_scan, x_t, h0):\n _, h_t = lax.scan(rnn_for_scan, h0, x_t)\n return h_t", "def build_rnn_greedy(self):\n print(\"Building the RNN part...\")\n params = self.params\n\n contexts = self.conv_feats\n\n sentences = tf.placeholder(tf.int32, [self.batch_size, self.max_sent_len])\n masks = tf.placeholder(tf.float32, [self.batch_size, self.max_sent_len]) \n weights = tf.placeholder(tf.float32, [self.batch_size, self.max_sent_len]) \n\n # initialize the word embedding\n idx2vec = np.array([self.word_table.word2vec[self.word_table.idx2word[i]] \n for i in range(self.num_words)])\n emb_w = weight('emb_weights', [self.num_words, self.dim_embed], init_val=idx2vec)\n\n # initialize the decoding layer\n dec_w = weight('dec_weights', [self.dim_dec, self.num_words]) \n if self.init_dec_bias: \n dec_b = bias('dec_biases', [self.num_words], init_val=self.word_table.word_freq)\n else:\n dec_b = bias('dec_biases', [self.num_words], init_val=0.0)\n \n # compute the mean context\n context_mean = tf.reduce_mean(contexts, 1)\n \n # initialize the LSTM\n lstm = tf.nn.rnn_cell.LSTMCell(self.dim_hidden, initializer=tf.random_normal_initializer(stddev=0.033)) \n lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, self.lstm_keep_prob, self.lstm_keep_prob, self.lstm_keep_prob)\n\n memory, output = self.init_lstm(context_mean)\n state = memory, output\n\n cross_entropy_loss = 0.0\n results = []\n scores = []\n\n alphas = [] \n cross_entropies = []\n num_correct_words = 0.0\n\n # Generate the words one by one \n for idx in range(self.max_sent_len):\n\n # Attention mechanism\n alpha = self.attend(contexts, output) \n \n masked_alpha = alpha * tf.tile(tf.expand_dims(masks[:, idx], 1), [1, self.num_ctx]) \n alphas.append(tf.reshape(masked_alpha, [-1])) \n\n if idx == 0: \n word_emb = tf.zeros([self.batch_size, self.dim_embed])\n weighted_context = tf.identity(context_mean)\n else:\n word_emb = tf.cond(self.is_train, \n lambda: tf.nn.embedding_lookup(emb_w, sentences[:, idx-1]), \n lambda: word_emb)\n weighted_context = tf.reduce_sum(contexts * tf.expand_dims(alpha, 2), 1)\n \n # Apply the LSTM\n with tf.variable_scope(\"LSTM\"):\n output, state = lstm(tf.concat([weighted_context, word_emb], 1), state)\n \n # Compute the logits\n expanded_output = tf.concat([output, weighted_context, word_emb], 1)\n\n logits1 = fully_connected(expanded_output, self.dim_dec, 'dec_fc')\n logits1 = nonlinear(logits1, 'tanh')\n logits1 = dropout(logits1, self.fc_keep_prob, self.is_train)\n\n logits2 = tf.nn.xw_plus_b(logits1, dec_w, dec_b)\n\n # Update the loss\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sentences[:, idx], \n logits=logits2)\n masked_cross_entropy = cross_entropy * masks[:, idx]\n cross_entropy_loss += tf.reduce_sum(masked_cross_entropy*weights[:, idx])\n cross_entropies.append(masked_cross_entropy)\n\n # Update the result\n max_prob_word = tf.argmax(logits2, 1)\n results.append(max_prob_word)\n\n is_word_correct = tf.where(tf.equal(max_prob_word, tf.cast(sentences[:, idx], tf.int64)), \n tf.cast(masks[:, idx], tf.float32), \n tf.cast(tf.zeros_like(max_prob_word), tf.float32))\n num_correct_words += tf.reduce_sum(is_word_correct) \n\n probs = tf.nn.softmax(logits2) \n score = tf.log(tf.reduce_max(probs, 1)) \n scores.append(score) \n \n # Prepare for the next iteration\n word_emb = tf.cond(self.is_train, lambda: word_emb, lambda: tf.nn.embedding_lookup(emb_w, max_prob_word)) \n tf.get_variable_scope().reuse_variables() \n\n # Get the final result\n results = tf.stack(results, axis=1)\n scores = tf.stack(scores, axis=1)\n\n alphas = tf.stack(alphas, axis=1)\n alphas = tf.reshape(alphas, [self.batch_size, self.num_ctx, -1])\n sum_alpha = tf.reduce_sum(alphas, axis=2)\n\n cross_entropies = tf.stack(cross_entropies, axis=1) \n num_correct_words = num_correct_words / tf.reduce_sum(masks)\n\n # Compute the final loss \n cross_entropy_loss = cross_entropy_loss / tf.reduce_sum(masks*weights)\n\n avg_alpha = tf.reduce_sum(masks, axis=1) / self.num_ctx\n small_alpha_diff = tf.nn.relu(tf.tile(tf.expand_dims(avg_alpha*0.6, 1), [1, self.num_ctx])-sum_alpha)\n large_alpha_diff = tf.nn.relu(sum_alpha-tf.tile(tf.expand_dims(avg_alpha*6, 1), [1, self.num_ctx]))\n attention_loss = tf.nn.l2_loss(small_alpha_diff) + tf.nn.l2_loss(large_alpha_diff) \n attention_loss = params.att_coeff * attention_loss / self.batch_size \n\n if self.train_cnn:\n g_vars = tf.trainable_variables()\n else:\n g_vars = [tf_var for tf_var in tf.trainable_variables() if \"CNN\" not in tf_var.name]\n\n l2_loss = params.weight_decay * sum(tf.nn.l2_loss(tf_var) for tf_var in g_vars \n if (\"bias\" not in tf_var.name and\n \"offset\" not in tf_var.name and \n \"scale\" not in tf_var.name)) \n\n loss = cross_entropy_loss + attention_loss + l2_loss\n\n # Build the solver \n with tf.variable_scope(\"Solver\", reuse=tf.AUTO_REUSE):\n learning_rate = tf.train.exponential_decay(params.learning_rate, \n self.global_step,\n 10000, \n 0.9, \n staircase=True)\n\n if params.solver==\"momentum\":\n solver = tf.train.MomentumOptimizer(learning_rate, params.momentum)\n elif params.solver==\"rmsprop\":\n solver = tf.train.RMSPropOptimizer(learning_rate, params.decay, params.momentum)\n else:\n solver = tf.train.GradientDescentOptimizer(learning_rate)\n\n gs = tf.gradients(loss, g_vars)\n gs, _ = tf.clip_by_global_norm(gs, 10.0)\n opt_op = solver.apply_gradients(zip(gs, g_vars), global_step=self.global_step)\n\n self.sentences = sentences\n self.masks = masks\n self.weights = weights\n\n self.results = results\n self.scores = scores\n self.alphas = alphas\n\n self.sum_alpha = sum_alpha\n self.cross_entropies = cross_entropies\n self.num_correct_words = num_correct_words\n\n self.loss = loss\n self.cross_entropy_loss = cross_entropy_loss\n self.attention_loss = attention_loss\n self.l2_loss = l2_loss\n\n self.opt_op = opt_op\n self.g_vars = g_vars\n self.gs = gs\n \n print(\"RNN part built.\")", "def __init__(self, num_policies: int, num_layers: int, \n hidden_dim: int, learning_rate: float, repetitions: int):\n super().__init__()\n\n self.input_size = num_policies * 2\n self.num_layers = num_layers\n self.hidden_dim = hidden_dim\n self.output_size = num_policies\n self.rnn = nn.RNN(self.input_size, self.hidden_dim, \n self.num_layers, batch_first=True) \n self.fc = nn.Linear(self.hidden_dim, self.output_size)\n self.optimizer = torch.optim.Adam(self.parameters(), \n lr=learning_rate)\n self.repetitions = repetitions", "def __init__(self,feature,hidden_unit, D_in, D_out):\n super(cnn_lstm, self).__init__()\n self.model_ft = models.alexnet(pretrained=True)\n # print (model_ft)\n\n self.num_ftrs = self.model_ft.classifier[6].in_features\n self.feature_model = list(self.model_ft.classifier.children())\n self.feature_model.pop()\n self.feature_model.pop()\n # feature_model.append(nn.Linear(num_ftrs, 3))\n self.feature_model.append(nn.Linear(self.num_ftrs, 1046))\n # self.feature_model.append(nn.Linear(self.num_ftrs, 524))\n self.feature_model.append(nn.Linear(1046, 100))\n # self.feature_model.append(nn.Linear(524, 100))\n\n self.model_ft.classifier = nn.Sequential(*self.feature_model)\n\n self.rnn = nn.LSTM(feature,hidden_unit,batch_first=True).cuda()\n self.linear = torch.nn.Linear(D_in, D_out).cuda()", "def __init__(self, hidden_size, num_step=2000, print_interval=100, af=relu, lr=0.1):\n np.random.seed(3023)\n self.num_step = num_step\n self.print_interval = print_interval\n self.af = af # activation function\n self.lr = lr # learning rate\n self.accuracy_box = []\n\n # Model parameters initialization\n # Please initiate your network parameters here.\n self.hidden1_weights = np.random.randn(2, hidden_size) # The dimension of the dataset is 2, so the initial dimension of the hidden weights is 2 x hidden size\n self.hidden2_weights = np.random.randn(hidden_size, hidden_size)\n self.output3_weights = np.random.randn(hidden_size, 1) # output is binary, so the output size is 1", "def test_simple_rnn_keras_single_timestep_with_placeholder_input(self):\n\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n\n with sess.graph.as_default():\n X_batch = np.random.random([32, 1, 8]).astype(np.float32)\n X = tf.compat.v1.placeholder(tf.float32, [32, 1, 8], name='input')\n simple_rnn = tf.keras.layers.SimpleRNN(1)\n output = simple_rnn(X)\n _ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,\n name=\"matmul0\")(output)\n\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n # _ = tf.compat.v1.summary.FileWriter('./test_simple_rnn_keras_single_timestep_with_placeholder_input', sess.graph)\n sess.run(output, feed_dict={X: X_batch})\n\n # construct a connected graph\n conn_graph = ConnectedGraph(sess.graph, ['input'], ['matmul0/Softmax'])\n # there should be only 4 connected graph ops, input, simpleRNN , Dense and Softmax\n self.assertEqual(4, len(conn_graph.get_all_ops()))\n simple_rnn_detected = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'SimpleRNN':\n simple_rnn_detected = True\n inner_list = op.internal_ops\n self.assertEqual(25, len(inner_list))\n self.assertEqual(op.get_module(), sess.graph.get_operation_by_name('simple_rnn/while/MatMul'))\n self.assertEqual('simple_rnn', op.name)\n self.assertTrue(simple_rnn_detected)\n\n valid_matmuls = []\n valid_bias_add = []\n valid_activation = []\n for op in inner_list:\n if op.type == 'MatMul' and op not in valid_matmuls:\n valid_matmuls.append(op)\n if op.type == 'BiasAdd' and op not in valid_bias_add:\n valid_bias_add.append(op)\n if op.type == 'Tanh' and op not in valid_activation:\n valid_activation.append(op)\n\n self.assertEqual(2, len(valid_matmuls))\n self.assertEqual(1, len(valid_bias_add))\n self.assertEqual(1, len(valid_activation))", "def n_step_birnn(\n n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):\n return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,\n activation, use_bi_direction=True)", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # sample memories\n states_val, action_val, rewards, next_state_val, continues \\\n = (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # evaluate the target q\n target_q = self.sess.run(self.graph.target_q_values, feed_dict={self.graph.states: next_state_val})\n # if using double q\n if self.params.double_q:\n online_q = self.sess.run(self.graph.online_q_values, feed_dict={self.graph.states: next_state_val})\n actions = np.argmax(online_q, axis=1)\n max_next_q_values = target_q[np.arange(actions.shape[0]), actions].reshape(-1, 1)\n else:\n max_next_q_values = np.max(target_q, axis=1, keepdims=True)\n # train the online DQN\n td_target = rewards + continues * self.params.discount_factor * max_next_q_values\n _, self.loss_val = self.sess.run([self.graph.training_op, self.graph.loss],\n feed_dict={self.graph.states: states_val, self.graph.actions: action_val,\n self.graph.td_target: td_target})\n # copy to target\n if self.params.copy_interval is None or (\n self.params.copy_interval and (self.iteration % self.params.copy_interval == 0)):\n self.sess.run(self.graph.copy_online_to_target)", "def reset_hidden(self, hidden, reset_flags):\n # detach it from history (pytorch mechanics)\n if self.rnn_type in ['lstm', 'mylstm']:\n h = Variable(hidden[0].data)\n c = Variable(hidden[1].data)\n hidden = (h, c)\n for b, flag in enumerate(reset_flags):\n if flag.data[0] == 1: # data[0] access the data in Variable\n hidden[0][:, b, :].data.fill_(0)\n hidden[1][:, b, :].data.fill_(0)\n elif self.rnn_type == 'gru':\n hidden = Variable(hidden.data)\n for b, flag in enumerate(reset_flags):\n if flag.data[0] == 1: # data[0] access the data in Variable\n hidden[:, b, :].data.fill_(0)\n else:\n print(\"Not support this type yet.\")\n exit(0)\n return hidden", "def __call__(self, inputs, state):\n with tf.variable_scope(\"BayesLSTMCell\"):\n if self.w is None:\n\n# size = inputs.get_shape()[-1].value\n \n print ([\"------- Size input LSTM: \", inputs.shape])\n print ([\"------- Dim input specified \", self.X_dim])\n# print ([\"num units LSTM: \", self.num_units])\n \n self.w = VI.sample_posterior((self.X_dim + self.num_units, 4 * self.num_units),\n name=self.n + \"_weights\",\n prior=self.prior,\n is_training=self.is_training)\n \n self.b = VI.sample_posterior((4 * self.num_units, 1),\n name=self.n + \"_biases\",\n prior=self.prior,\n is_training=self.is_training)\n\n # Get the cell and hidden state from the previous cell [C_t-1, h_t-1]\n C_t_prev , h_t_prev = state\n #Vector concatenation of previous hidden state and embedded inputs\n concat_inputs_hidden = tf.concat([inputs, h_t_prev], 1)\n # Compute the Z = Wx + b for each of the 4 networks at once !\n gate_inputs = tf.nn.bias_add(tf.matmul(concat_inputs_hidden, self.w), tf.squeeze(self.b))\n \n # Split data up for the 4 gates\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = tf.split(value=gate_inputs, num_or_size_splits=4, axis=1)\n\n # Compute the new cell \n C_t = (C_t_prev * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i)*self._activation(j))\n h_t = self._activation(C_t) * tf.sigmoid(o)\n \n #Create tuple of the new state\n State_t = LSTMStateTuple(C_t, h_t)\n\n return h_t, State_t", "def __init__(\n self,\n n_in_channel=1,\n nclass=10,\n attention=True,\n activation=\"glu\",\n dropout=0.5,\n train_cnn=True,\n rnn_type=\"BGRU\",\n n_RNN_cell=128,\n n_layers_RNN=2,\n dropout_recurrent=0,\n cnn_integration=False,\n freeze_bn=False,\n **kwargs,\n ):\n super(RCRNN, self).__init__()\n self.n_in_channel = n_in_channel\n self.attention = attention\n self.cnn_integration = cnn_integration\n self.freeze_bn = freeze_bn\n\n n_in_cnn = n_in_channel\n\n if cnn_integration:\n n_in_cnn = 1\n\n self.cnn = ResidualCNN(\n n_in_channel=n_in_cnn, activation=activation, conv_dropout=dropout, **kwargs\n )\n\n self.train_cnn = train_cnn\n if not train_cnn:\n for param in self.cnn.parameters():\n param.requires_grad = False\n\n if rnn_type == \"BGRU\":\n nb_in = self.cnn.nb_filters[-1]\n if self.cnn_integration:\n # self.fc = nn.Linear(nb_in * n_in_channel, nb_in)\n nb_in = nb_in * n_in_channel\n self.rnn = BidirectionalGRU(\n n_in=nb_in,\n n_hidden=n_RNN_cell,\n dropout=dropout_recurrent,\n num_layers=2,\n )\n #self.rnn2 = BidirectionalGRU(\n # n_in=nb_in*2,\n # n_hidden=n_RNN_cell,\n # dropout=dropout_recurrent,\n # num_layers=1,\n #)\n else:\n NotImplementedError(\"Only BGRU supported for CRNN for now\")\n\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(dropout)\n self.dense = nn.Linear(n_RNN_cell * 2, nclass)\n self.sigmoid = nn.Sigmoid()\n\n if self.attention:\n self.dense_softmax = nn.Linear(n_RNN_cell * 2, nclass)\n self.softmax = nn.Softmax(dim=-1)", "def step(self, gradients, states):\r\n\r\n # flatten and concatenate inputs\r\n flattened_grads = []\r\n for grad in gradients:\r\n flattened_grads.append(tf.reshape(grad, shape=(-1, 1)))\r\n\r\n shapes = [tf.shape(grad) for grad in gradients]\r\n sizes = [tf.size(grad) for grad in flattened_grads]\r\n\r\n flattened_grads = tf.concat(flattened_grads, 0)\r\n\r\n if self.config[\"preprocess\"]:\r\n flattened_grads = preprocess_grads(flattened_grads)\r\n\r\n n_params = flattened_grads.shape[0]\r\n\r\n # create inner state of RNN\r\n if states is None:\r\n states = [[tf.zeros([n_params, self.hidden_size])]\r\n * 2] * self.layers\r\n\r\n # define RNN op\r\n with tf.variable_scope(\"meta\"):\r\n update, states = self.cell(flattened_grads, states)\r\n\r\n # split and reshape inputs into their original shape\r\n updates = tf.split(update, sizes)\r\n updates = [tf.squeeze(update, axis=[1]) if tf.size(\r\n update) == 1 else update for update in updates]\r\n updates = [tf.reshape(update, shape)\r\n for update, shape in zip(updates, shapes)]\r\n\r\n return updates, states", "def train_step(x_batch, y_batch):\n feed_dict = {\n rnn.input_x: x_batch,\n rnn.input_y: y_batch,\n rnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, rnn.loss, rnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n # print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n\n return loss,accuracy", "def local_trend_rnn(features, labels, mode, params):\n\n ''' set up variables & parameters'''\n # get parameters & super parameters\n length = params['length']\n batch_size = params['batch_size']\n state_size = params['state_size']\n drop_rate = params['drop_rate']\n attention_size = params['attention_size']\n learning_rate = params['learning_rate']\n\n # get training features and label\n local_prices = features['local_prices'] # [b, n, 1]\n local_events = features['local_events'] # [b, n, d]\n\n ''' building basic model structure'''\n # inner trend capture\n price_trend_capture = DynamicLstm(batch_size=batch_size, state_size=state_size,\n keep_rate=1 - drop_rate, variable_scope=\"price_trend_capture\")\n price_trend_h_vec = price_trend_capture.run(local_prices) # [b, n, h]\n\n # event trend capture\n event_trend_capture = DynamicLstm(batch_size=batch_size, state_size=state_size,\n keep_rate=1 - drop_rate, variable_scope=\"event_trend_capture\")\n event_trend_h_vec = event_trend_capture.run(local_events) # [b, n, h]\n\n ''' combine price trend and event trend '''\n hybrid_trend = tf.concat([price_trend_h_vec, event_trend_h_vec], axis=2) # [b, n, 2h]\n hybrid_trend_capture = DynamicLstm(batch_size=batch_size, state_size=state_size,\n keep_rate=1 - drop_rate, variable_scope=\"hybrid_trend_capture\")\n hybrid_trend_h_vec = hybrid_trend_capture.run(hybrid_trend) # [b, n, h]\n\n ''' generate final prediction '''\n output_dense = tf.layers.Dense(units=1, name=\"output_dense\")\n hybrid_attention = AttentionLayer(attention_size=attention_size, name=\"hybrid_attention\")\n values = output_dense(hybrid_attention.run(hybrid_trend_h_vec)) # [b, 1]\n\n ''' config '''\n # Compute predictions.\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {'values': values}\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Compute loss.\n loss = tf.losses.mean_squared_error(labels, values)\n\n # Compute evaluation metrics.\n mse = tf.metrics.mean_squared_error(labels=labels, predictions=values, name='MSE')\n metrics = {'MSE': mse}\n tf.summary.scalar('mean_squared_error', loss)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(\n mode, loss=loss, eval_metric_ops=metrics)\n\n # Create training op.\n assert mode == tf.estimator.ModeKeys.TRAIN\n optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)", "def _build_rnn(self, n_cells, hidden_layer_size, num_layers,\n batch_size, time_window, lr):\n\n x_anchor = tf.placeholder(shape=[batch_size, n_cells, time_window],\n dtype=tf.float32)\n x_pos = tf.placeholder(shape=[batch_size, n_cells, time_window],\n dtype=tf.float32)\n x_neg = tf.placeholder(shape=[batch_size, n_cells, time_window,],\n dtype=tf.float32)\n\n def embed_rnn(x_in):\n \"\"\"Gives RNN embedding of x_in.\"\"\"\n x_in_flat = tf.reshape(x_in, [batch_size*time_window, n_cells])\n w_in = tf.get_variable(\"w_in\", shape=[n_cells, hidden_layer_size],\n initializer=tf.random_normal_initializer())\n b_in = tf.get_variable(\"b_in\", shape=[hidden_layer_size],\n initializer=tf.random_normal_initializer())\n x_ld_flat = tf.matmul(x_in_flat, w_in) + b_in\n x_ld = tf.reshape(x_ld_flat, [batch_size, time_window, hidden_layer_size])\n\n # stacked LSTM cells\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_layer_size,\n forget_bias=0.0,\n state_is_tuple=True)\n cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers,\n state_is_tuple=True)\n\n # compute output\n initial_state = cell.zero_state(batch_size, tf.float32)\n outputs, _ = tf.nn.dynamic_rnn(cell, x_ld,\n initial_state=initial_state,\n time_major=False)\n\n # reconstruct and compute loss on last output only\n outputs_unp = tf.unpack(tf.transpose(outputs, [1, 0, 2]))\n embedding = outputs_unp[-1]\n\n return embedding\n\n def distance_squared(embed1, embed2):\n return tf.reduce_sum((embed1 - embed2)**2, 1)\n\n with tf.variable_scope(\"RNN\") as scope:\n\n # embed anchor, positive and negative examples.\n embed_anchor = embed_rnn(tf.transpose(x_anchor, [0, 2, 1]))\n # This enables reusing the RNN for anchor, pos and neg.\n scope.reuse_variables()\n # Check that we are reusing the same RNN using:\n # print(tf.get_variable_scope().reuse)\n embed_pos = embed_rnn(tf.transpose(x_pos, [0, 2, 1]))\n embed_neg = embed_rnn(tf.transpose(x_neg, [0, 2, 1]))\n\n # Loss for each point max(d(anchor, pos)^2 - d(anchor, neg)^2 + 1, 0)\n loss = tf.reduce_sum(tf.nn.relu(distance_squared(embed_anchor,\n embed_pos) -\n distance_squared(embed_anchor,\n embed_neg)\n + 1), 0)\n # train model\n train_step = tf.train.AdagradOptimizer(lr).minimize(loss)\n\n return (x_anchor, x_pos, x_neg, train_step, loss,\n embed_anchor, embed_pos, embed_neg)", "def run_episode(\n initial_state: tf.Tensor, model: tf.keras.Model, max_steps: int\n) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:\n\n # action_probs = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n actions = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n action_probs = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n values = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n rewards = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n\n initial_state_shape = initial_state.shape\n state = initial_state\n\n for t in tf.range(max_steps):\n # Convert state into a batched tensor (batch size = 1)\n state = tf.expand_dims(state, 0)\n\n # Run the model and to get action probabilities and critic value\n # action_logits_t, value = model(state)\n a_mu, a_sigma, value = model(state)\n a_sigma = a_sigma + 0.01\n # tf.print(\"value:\", value)\n\n # Sample next action from the action probability distribution\n action_rand = tf.random.normal([1], a_mu, a_sigma, tf.float32)\n action_probs_t = tf.compat.v1.distributions.Normal(a_mu, a_sigma).prob(\n action_rand\n )\n action = tf.math.tanh(action_rand) # R -> [-1,1]\n \"\"\"\n tf.print(\"a_mu:\", a_mu)\n tf.print(\"a_sigma:\", a_sigma)\n tf.print(\"action_rand:\", action_rand)\n tf.print(\"action_probs_t:\", action_probs_t)\n \"\"\"\n\n # Store critic values\n values = values.write(t, tf.squeeze(value))\n\n # Store log probability of the action chosen\n actions = actions.write(t, action)\n action_probs = action_probs.write(t, action_probs_t)\n\n # Apply action to the environment to get next state and reward\n state, reward, done = tf_env_step(action)\n state.set_shape(initial_state_shape)\n\n # Store reward\n rewards = rewards.write(t, reward)\n\n if tf.cast(done, tf.bool):\n break\n\n actions = actions.stack() # list of action-mean\n action_probs = action_probs.stack() # list of action-sigma\n values = values.stack()\n rewards = rewards.stack()\n\n return actions, action_probs, values, rewards", "def run_epochs(self, X, removed_seasonality, removed_std):\n bootstrap_size_train = 2 * 24 * 25\n n_test = 2 * 24 * 5\n\n # arrays for storing 5 day step errors\n lstm_errors = []\n lr_errors = []\n svr_errors = []\n\n # arrays for stacking predictions\n preds_lstm = []\n preds_lr = []\n preds_svr = []\n ground_truth = []\n\n sess = tf.Session()\n\n for i in range(bootstrap_size_train, len(X), n_test):\n print(\"Current window:\", i, i + n_test)\n\n X_train = X[:i]\n X_test = X[i:i + n_test]\n removed_seasonality_ = removed_seasonality[i:i + n_test]\n removed_std_ = removed_std[i:i + n_test]\n\n sess.run(tf.global_variables_initializer())\n\n # mini batch generator for training\n gen_train = self.batch_train_generator(\n X_train, self.config.batch_size, self.config.seq_len)\n train_state = np.zeros(\n [self.config.batch_size, self.config.state_size * self.config.num_layers])\n\n # all training data in 1 batch for validation\n batch_size = len(X_train) // self.config.seq_len\n gen_val = self.batch_train_generator(\n X_train, batch_size, self.config.seq_len)\n x_train, y_train = next(gen_val)\n eval_state = np.zeros(\n [batch_size, self.config.state_size * self.config.num_layers])\n\n # deep neural network\n\n for epoch in range(self.config.num_epochs):\n for num_batch in range(len(X_train) / self.config.batch_size):\n\n batch_X_train, batch_y_train = next(gen_train)\n _, outH = sess.run([self.optimize, self.H], feed_dict={self.input_placeholder: batch_X_train,\n self.target_placeholder: batch_y_train,\n self.Hin_placeholder: train_state,\n self.dropout_placeholder: self.config.dropout_train\n })\n train_state = outH\n error = sess.run(self.cost, feed_dict={self.input_placeholder: x_train,\n self.target_placeholder: y_train,\n self.Hin_placeholder: eval_state,\n self.dropout_placeholder: self.config.dropout_eval\n })\n\n print \"Epoch: %d, train error: %f\" % (epoch, error)\n\n x_train, y_train, _, _ = self.lags_generator(\n X_train, removed_seasonality_, removed_std_)\n x_test, y_test, removed_seasonality_, removed_std_ = self.lags_generator(\n X_test, removed_seasonality_, removed_std_)\n test_state = np.zeros(\n [x_test.shape[0], self.config.state_size * self.config.num_layers])\n\n preds = sess.run(self.prediction, feed_dict={self.input_placeholder: x_test,\n self.target_placeholder: y_test.reshape(y_test.shape[0], 1),\n self.Hin_placeholder: test_state,\n self.dropout_placeholder: self.config.dropout_eval\n })\n\n preds = preds[:, 0] * removed_std_ + removed_seasonality_\n trues = y_test * removed_std_ + removed_seasonality_\n\n preds_lstm.extend(preds)\n ground_truth.extend(trues)\n\n lstm_errors.append(self.evaluate(preds, trues))\n\n # linear regression\n\n regr = linear_model.LinearRegression()\n regr.fit(x_train[:, :, 0], y_train)\n preds = regr.predict(\n x_test[:, :, 0]) * removed_std_ + removed_seasonality_\n\n preds_lr.extend(preds)\n\n lr_errors.append(self.evaluate(preds, trues))\n\n # support vector regression\n\n svr = SVR()\n svr.fit(x_train[:, :, 0], y_train)\n preds = svr.predict(\n x_test[:, :, 0]) * removed_std_ + removed_seasonality_\n\n preds_svr.extend(preds)\n\n svr_errors.append(self.evaluate(preds, trues))\n\n\n # compute overall error metrics\n\n LSTM_RMSE = self.evaluate(preds_lstm, ground_truth)\n LReg_RMSE = self.evaluate(preds_lr, ground_truth)\n SVR_RMSE = self.evaluate(preds_svr, ground_truth)\n\n OV_RMSE = pd.DataFrame()\n\n OV_RMSE['LSTM_RMSE'] = [LSTM_RMSE]\n OV_RMSE['LReg_RMSE'] = [LReg_RMSE]\n OV_RMSE['SVR_RMSE'] = [SVR_RMSE]\n\n OV_RMSE.to_csv('data/OV_JFK_SO.csv', index=False, sep=\"\\t\")\n\n # save errors computed at each 5 day step\n\n RMSE = pd.DataFrame()\n\n RMSE['lstm'] = lstm_errors\n RMSE['lr'] = lr_errors\n RMSE['svr'] = svr_errors\n\n RMSE.to_csv('data/JFK_SO.csv', index=False, sep=\"\\t\")", "def init_hidden(self, batch_size, device):\n if self.mode == 'LSTM':\n return (torch.ones((1+self.bidirectional , batch_size, self.hidden_dim), device=device),\n torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device))\n else:\n return torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device)", "def _dynamic_rnn_loop(cell: RNNCellBase[State], inputs: torch.Tensor, initial_state: State, sequence_length: torch.LongTensor) ->Tuple[torch.Tensor, State]:\n state = initial_state\n time_steps = inputs.shape[0]\n all_outputs = []\n all_state = map_structure(lambda _: no_map(list), state)\n for i in range(time_steps):\n output, state = cell(inputs[i], state)\n all_outputs.append(output)\n map_structure_zip(lambda xs, x: xs.append(x), (all_state, state))\n final_outputs = torch.stack(all_outputs, dim=0)\n final_outputs = mask_sequences(final_outputs, sequence_length=sequence_length, time_major=True)\n final_state = map_structure(lambda _: no_map(list), state)\n for batch_idx, time_idx in enumerate(sequence_length.tolist()):\n if time_idx > 0:\n map_structure_zip(lambda xs, x: xs.append(x[time_idx - 1][batch_idx]), (final_state, all_state))\n else:\n map_structure_zip(lambda xs, x: xs.append(x[batch_idx]), (final_state, initial_state))\n final_state = map_structure(lambda x: torch.stack(x, dim=0), final_state)\n return final_outputs, final_state", "def __init__(self, state_size, action_dim, seed, fc1_units=64, fc2_units=64):\n super(RNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size + action_dim, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, 1)", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerSetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n tofov(v, shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for bwd connections\n W_bwd_conc = tf.concat(axis=1, values=[W_ci, W_ig, W_og, W_fg])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [None, None, None, None]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W_ci, W_ig, W_og, W_fg]))\n \n self.W_fwd_conc = None\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def _get_lr(self, initial_lr, step, epoch):\n return initial_lr", "def test_output_activation_return_return_final_seq_only_off():\n RANDOM_ITERATIONS = 20\n input_dim = 100\n for _ in range(RANDOM_ITERATIONS):\n data = torch.randn((25, 10, 100))\n RNN_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5], [\"linear\", 10], [\"linear\", 3]],\n hidden_activations=\"relu\", input_dim=input_dim, return_final_seq_only=False,\n output_activation=\"relu\", initialiser=\"xavier\", batch_norm=True)\n out = RNN_instance.forward(data)\n assert all(out.reshape(1, -1).squeeze() >= 0)\n\n RNN_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5]],\n hidden_activations=\"relu\", input_dim=input_dim, return_final_seq_only=False,\n output_activation=\"relu\", initialiser=\"xavier\")\n out = RNN_instance.forward(data)\n assert all(out.reshape(1, -1).squeeze() >= 0)\n\n RNN_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5], [\"linear\", 10], [\"linear\", 3]],\n hidden_activations=\"relu\", input_dim=input_dim, return_final_seq_only=False,\n output_activation=\"relu\", initialiser=\"xavier\")\n out = RNN_instance.forward(data)\n assert all(out.reshape(1, -1).squeeze() >= 0)\n\n RNN_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5], [\"linear\", 10], [\"linear\", 3]],\n hidden_activations=\"relu\", input_dim=input_dim, return_final_seq_only=False,\n output_activation=\"sigmoid\", initialiser=\"xavier\")\n out = RNN_instance.forward(data)\n assert all(out.reshape(1, -1).squeeze() >= 0)\n assert all(out.reshape(1, -1).squeeze() <= 1)\n summed_result = torch.sum(out, dim=2)\n assert all(summed_result.reshape(1, -1).squeeze() != 1.0)\n\n\n RNN_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5], [\"linear\", 10], [\"linear\", 3]],\n hidden_activations=\"relu\", input_dim=input_dim, return_final_seq_only=False,\n output_activation=\"softmax\", initialiser=\"xavier\")\n out = RNN_instance.forward(data)\n assert all(out.reshape(1, -1).squeeze() >= 0)\n assert all(out.reshape(1, -1).squeeze() <= 1)\n summed_result = torch.sum(out, dim=2)\n summed_result = summed_result.reshape(1, -1).squeeze()\n summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))\n assert all( summed_result == 1.0)\n\n RNN_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5], [\"lstm\", 25]],\n hidden_activations=\"relu\", input_dim=input_dim, return_final_seq_only=False,\n output_activation=\"softmax\", initialiser=\"xavier\")\n out = RNN_instance.forward(data)\n assert all(out.reshape(1, -1).squeeze() >= 0)\n assert all(out.reshape(1, -1).squeeze() <= 1)\n summed_result = torch.sum(out, dim=2)\n summed_result = summed_result.reshape(1, -1).squeeze()\n summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))\n\n\n\n assert all( summed_result == 1.0)\n\n RNN_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5], [\"lstm\", 25]],\n hidden_activations=\"relu\", input_dim=input_dim, return_final_seq_only=False,\n initialiser=\"xavier\")\n out = RNN_instance.forward(data)\n assert not all(out.reshape(1, -1).squeeze() >= 0)\n\n assert not all(out.reshape(1, -1).squeeze() <= 0)\n summed_result = torch.sum(out, dim=2)\n summed_result = summed_result.reshape(1, -1).squeeze()\n summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))\n assert not all( summed_result == 1.0)\n\n RNN_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 5], [\"lstm\", 25], [\"linear\", 8]],\n hidden_activations=\"relu\", input_dim=input_dim, return_final_seq_only=False,\n initialiser=\"xavier\")\n out = RNN_instance.forward(data)\n assert not all(out.reshape(1, -1).squeeze() >= 0)\n assert not all(out.reshape(1, -1).squeeze() <= 0)\n summed_result = torch.sum(out, dim=2)\n summed_result = summed_result.reshape(1, -1).squeeze()\n summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))\n assert not all( summed_result == 1.0)", "def rnn_lstm(\n nclass,\n input_shape=(187, 1),\n recurrent_layers=[64, 128],\n dense_layers=[64, 16],\n dropout=0.2,\n binary=False,\n):\n if not binary:\n loss = losses.sparse_categorical_crossentropy\n last_activation = activations.softmax\n else:\n loss = losses.binary_crossentropy\n last_activation = activations.sigmoid\n return_sequences = True\n\n inp = Input(shape=input_shape)\n x = inp\n for i, neurons in enumerate(recurrent_layers):\n x = LSTM(neurons, return_sequences=return_sequences)(x)\n x = Dropout(rate=dropout)(x)\n return_sequences = False\n for i, neurons in enumerate(dense_layers):\n x = Dense(neurons, name=f\"dense_{i+1}\", activation=\"relu\")(x)\n x = Dense(nclass, name=\"Output\", activation=last_activation)(x)\n\n model = models.Model(inputs=inp, outputs=x)\n opt = optimizers.Adam(0.001)\n model.compile(optimizer=opt, loss=loss, metrics=[\"acc\"])\n model.summary()\n return model", "def forward(self, prev_state, obs_t):\r\n # Use your network to compute qvalues for given state\r\n #print(state_t.shape)\r\n h = self.conv(obs_t)\r\n\r\n h = h.view(h.size(0), -1)\r\n\r\n new_state = h_new, c_new = self.lstm(h, prev_state)\r\n advantage = self.adv(h_new)\r\n value = self.val(h_new)\r\n\r\n\r\n adv_mean = torch.mean(advantage, dim=1, keepdim=True)\r\n qvalues = value + advantage - adv_mean\r\n\r\n return new_state, qvalues", "def SimpleRNN(self):\n # Model.\n model = Sequential()\n model.add(SimpleRNN(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def run(self):\n\n # initializing random network activity\n s_rand_T = np.zeros((self.T, self.N_rand))\n p_rand_T = np.zeros((self.T, self.N_rand))\n r_rand_T = np.zeros((self.T, self.N_rand))\n\n s_rand_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_rand))\n\n # initializing sensory networks\n s_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n p_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n r_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n s_sens_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_sensory_nets * self.N_sensory))\n\n # extend input to be T timesteps and only nonzero for 100 ts\n s_ext_T = np.broadcast_to(self.s_ext, (self.T, self.N_sensory * self.N_sensory_nets)).copy()\n # stimulus is presented for 100 ms\n stim_T = int(200/self.rand_net.dt)\n s_ext_T[:100] = 0\n s_ext_T[100+stim_T:] = 0\n # s_ext_T *= 0\n\n for t in range(1, self.T):\n if (t + 1) % 100 == 0:\n print(f'step {t} of {self.T}')\n s_sens_prev = s_sens_T[t - 1]\n s_rand_prev = s_rand_T[t - 1]\n p_rand_prev = p_rand_T[t - 1]\n s_ext = s_ext_T[t - 1]\n step = self.forward(s_ext=s_ext, s_rand_prev=s_rand_prev, s_sens_prev=s_sens_prev, p_rand_prev=p_rand_prev)\n s_sens_T[t] = step['s_sens']\n p_sens_T[t] = step['p_sens']\n r_sens_T[t] = step['r_sens']\n s_rand_T[t] = step['s_rand']\n r_rand_T[t] = step['r_rand']\n p_rand_T[t] = step['p_rand']\n\n p_sens_T = p_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_ext_T = s_ext_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n r_sens_T = r_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_sens_T = s_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n\n return dict(\n n_sensory=self.N_sensory,\n n_rand=self.N_rand,\n mus=self.mus,\n sigma=self.sigma,\n s_ext=s_ext_T,\n s_sens=s_sens_T,\n r_sens=r_sens_T,\n p_sens=p_sens_T,\n s_rand=s_rand_T,\n r_rand=r_rand_T,\n p_rand=p_rand_T\n )", "def rnn_step_backward(dnext_h, cache):\n dx, dprev_h, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a single step of a vanilla RNN. #\n # #\n # HINT: For the tanh function, you can compute the local derivative in terms #\n # of the output value from tanh. #\n ##############################################################################\n\n x, next_h, prev_h, Wx, Wh, b = cache\n # this is because in vanilla RNN h = tanh(z) and derivative of next_h = tanh(z) = 1-z*z;\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dprev_h, dWx, dWh, db", "def evaluate_model(neurons, lr, Ising_Data, verbose):\n training_epochs=100\n batch_size=100\n\n # SGD learning params\n opt_params=dict(learning_rate=lr)\n\n # create DNN\n DNN=model(neurons,opt_params)\n with tf.Session() as sess:\n\n # Initialize the necessary variables, in this case, w and b.\n sess.run(tf.global_variables_initializer())\n\n # Train the DNN.\n for epoch in range(training_epochs): \n\n batch_X, batch_Y = Ising_Data[\"train\"].next_batch(batch_size,seed=seed)\n\n loss_batch, _ = sess.run([DNN.loss,DNN.optimizer], \n feed_dict={DNN.X: batch_X,\n DNN.Y: batch_Y, \n DNN.dropout_keepprob: 0.5} )\n accuracy = sess.run(DNN.accuracy, \n feed_dict={DNN.X: batch_X,\n DNN.Y: batch_Y, \n DNN.dropout_keepprob: 1.0} )\n # Count training step.\n step = sess.run(DNN.global_step)\n # Test DNN performance on entire train test and critical data sets.\n train_loss, train_accuracy = sess.run([DNN.loss, DNN.accuracy], \n feed_dict={DNN.X: Ising_Data[\"train\"].data_X,\n DNN.Y: Ising_Data[\"train\"].data_Y,\n DNN.dropout_keepprob: 0.5}\n )\n if verbose: print(\"train loss/accuracy:\", train_loss, train_accuracy)\n\n test_loss, test_accuracy = sess.run([DNN.loss, DNN.accuracy], \n feed_dict={DNN.X: Ising_Data[\"test\"].data_X,\n DNN.Y: Ising_Data[\"test\"].data_Y,\n DNN.dropout_keepprob: 1.0}\n )\n\n if verbose: print(\"test loss/accuracy:\", test_loss, test_accuracy)\n\n critical_loss, critical_accuracy = sess.run([DNN.loss, DNN.accuracy], \n feed_dict={DNN.X: Ising_Data[\"critical\"].data_X,\n DNN.Y: Ising_Data[\"critical\"].data_Y,\n DNN.dropout_keepprob: 1.0}\n )\n if verbose: print(\"crtitical loss/accuracy:\", critical_loss, critical_accuracy)\n\n return train_loss,train_accuracy,test_loss,test_accuracy,critical_loss,critical_accuracy", "def run_epoch(data, session, model, train_op=None, verbose=False):\n start_time = time.time()\n costs = 0.0\n num_steps = model['num_steps']\n num_batches = (data.shape[1] - 1) // num_steps\n\n # initialize RNN cell states to be all zero\n state = session.run(model['initial_state'])\n\n fetches = {\n \"cost\": model['cost'],\n \"final_state\": model['final_state'],\n }\n\n # train model\n if train_op is not None:\n fetches[\"train_op\"] = train_op\n\n for batch in range(num_batches):\n feed_dict = {\n model['user_inputs']: data[:, batch * num_steps: (batch + 1) * num_steps],\n model['targets']: data[:, batch * num_steps + 1: (batch + 1) * num_steps + 1],\n }\n for i, (c, h) in enumerate(model['initial_state']):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n costs += cost\n\n if verbose and batch % (num_batches // 10) == 10:\n iters = num_steps * (batch + 1)\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (batch * 1.0 / num_batches, np.exp(costs / iters),\n iters * data.shape[0] * 1 /\n (time.time() - start_time)))\n\n return np.exp(costs / (data.shape[1] - 1))", "def rnn_lstm_bidir(\n nclass,\n input_shape=(187, 1),\n recurrent_layers=[64, 128],\n dense_layers=[64, 16],\n dropout=0.2,\n binary=False,\n):\n if not binary:\n loss = losses.sparse_categorical_crossentropy\n last_activation = activations.softmax\n else:\n loss = losses.binary_crossentropy\n last_activation = activations.sigmoid\n return_sequences = True\n\n inp = Input(shape=input_shape)\n x = inp\n for i, neurons in enumerate(recurrent_layers):\n layer = LSTM(neurons, return_sequences=return_sequences)\n x = Bidirectional(layer)(x)\n x = Dropout(rate=dropout)(x)\n return_sequences = False\n for i, neurons in enumerate(dense_layers):\n x = Dense(neurons, name=f\"dense_{i+1}\", activation=\"relu\")(x)\n x = Dense(nclass, name=\"Output\", activation=last_activation)(x)\n\n model = models.Model(inputs=inp, outputs=x)\n opt = optimizers.Adam(0.001)\n model.compile(optimizer=opt, loss=loss, metrics=[\"acc\"])\n model.summary()\n return model", "def step_begin_epoch(self, epoch):\n self.lr = self.get_next_lr(epoch)\n self.optimizer.set_lr(self.warmup_factor * self.lr)\n return self.optimizer.get_lr()" ]
[ "0.61140877", "0.59839666", "0.59752065", "0.5951735", "0.5937966", "0.59245795", "0.5917941", "0.5917941", "0.5909956", "0.5903214", "0.5903214", "0.58980507", "0.589458", "0.58825445", "0.58472407", "0.58389205", "0.5838521", "0.5834483", "0.5830626", "0.58288646", "0.58194387", "0.5811854", "0.5807376", "0.58072186", "0.57864034", "0.5783501", "0.5776474", "0.5772046", "0.57516444", "0.57457525", "0.5743092", "0.5742316", "0.5742209", "0.5722813", "0.5701747", "0.568372", "0.56534415", "0.565271", "0.56306267", "0.56232095", "0.5622038", "0.56078964", "0.5607511", "0.559617", "0.55944717", "0.5593867", "0.5590002", "0.55854726", "0.5577271", "0.5575223", "0.5562607", "0.5558488", "0.5546834", "0.55460405", "0.5544593", "0.55414045", "0.5538523", "0.5537143", "0.553199", "0.55310214", "0.55280745", "0.55198354", "0.551466", "0.5506659", "0.5502693", "0.5497438", "0.5493985", "0.54933864", "0.54882514", "0.54750174", "0.5474343", "0.5466351", "0.5463208", "0.5454938", "0.54512197", "0.54490113", "0.5444014", "0.544081", "0.54348797", "0.54289967", "0.5425936", "0.54257953", "0.54252344", "0.5419816", "0.5417938", "0.5413044", "0.54117393", "0.5410494", "0.53807586", "0.53764546", "0.53753155", "0.537401", "0.5371809", "0.53714323", "0.53694737", "0.53534317", "0.5352627", "0.5351733", "0.5351005", "0.5350693", "0.53416175" ]
0.0
-1
LSTM returning hidden state and content cell at a specific timestep.
def lstm_cell(x, h, c, name=None, reuse=False): nin = x.shape[-1].value nout = h.shape[-1].value with tf.variable_scope(name, default_name="lstm", values=[x, h, c], reuse=reuse): wx = get_variable_wrap("kernel/input", [nin, nout * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) wh = get_variable_wrap("kernel/hidden", [nout, nout * 4],dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) b = get_variable_wrap("bias", [nout * 4], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) z = ed.dot(x, wx) + ed.dot(h, wh) + b i, f, o, u = tf.split(z, 4, axis=0) i = tf.sigmoid(i) f = tf.sigmoid(f + 1.0) o = tf.sigmoid(o) u = tf.tanh(u) c = f * c + i * u h = o * tf.tanh(c) return h, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hx, cx = hidden\n gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)\n\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = hard_sigmoid(ingate)\n forgetgate = hard_sigmoid(forgetgate)\n cellgate = F.tanh(cellgate)\n outgate = hard_sigmoid(outgate)\n\n cy = (forgetgate * cx) + (ingate * cellgate)\n hy = outgate * F.tanh(cy)\n\n return hy, cy", "def LSTM(inputs, dim, seq_len, name):\r\n with tf.name_scope(name) as scope:\r\n cell = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n hidden_states, cell_states = tf.nn.dynamic_rnn(cell, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)\r\n\r\n return hidden_states, cell_states", "def LSTM(self, previous_hidden_memory_tuple, x):\n \n previous_hidden_state,c_prev=tf.unstack(previous_hidden_memory_tuple)\n \n #Input Gate\n i= tf.sigmoid(\n tf.matmul(x,self.Wi)+tf.matmul(previous_hidden_state,self.Ui) + self.bi \n )\n \n #Forget Gate\n f= tf.sigmoid(\n tf.matmul(x,self.Wf)+tf.matmul(previous_hidden_state,self.Uf) + self.bf \n )\n \n #Output Gate\n o= tf.sigmoid(\n tf.matmul(x,self.Wog)+tf.matmul(previous_hidden_state,self.Uog) + self.bog\n )\n \n #New Memory Cell\n c_= tf.nn.tanh(\n tf.matmul(x,self.Wc)+tf.matmul(previous_hidden_state,self.Uc) + self.bc \n ) \n \n #Final Memory cell\n c= f*c_prev + i*c_\n \n #Current Hidden state\n current_hidden_state = o*tf.nn.tanh(c)\n\n return tf.stack([current_hidden_state,c])", "def build_lstm(self, keep_prob):\n def get_cell():\n if self.kernel == 'LSTM':\n cell = tf.contrib.rnn.BasicLSTMCell(self.num_hidden_units)\n print('LSTM is using...')\n elif self.kernel == 'GRU': # GRU RNN\n cell = tf.contrib.rnn.GRUCell(self.num_hidden_units)\n print('GRU is using...')\n else:\n raise AttributeError\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n return cell\n lstm_cell = get_cell()\n init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)\n return lstm_cell, init_state", "def _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden):\n param_cells = []\n last_states = []\n for i in range(num_lstm_layer):\n param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable(\"l%d_i2h_weight\" % i),\n i2h_bias=mx.sym.Variable(\"l%d_i2h_bias\" % i),\n h2h_weight=mx.sym.Variable(\"l%d_h2h_weight\" % i),\n h2h_bias=mx.sym.Variable(\"l%d_h2h_bias\" % i)))\n state = LSTMState(c=mx.sym.Variable(\"l%d_init_c\" % i),\n h=mx.sym.Variable(\"l%d_init_h\" % i))\n last_states.append(state)\n assert len(last_states) == num_lstm_layer\n\n # embedding layer\n data = mx.sym.Variable('data')\n wordvec = mx.sym.SliceChannel(data=data, num_outputs=seq_len, squeeze_axis=1)\n\n hidden_all = []\n for seqidx in range(seq_len):\n hidden = wordvec[seqidx]\n for i in range(num_lstm_layer):\n next_state = _lstm(\n num_hidden=num_hidden,\n indata=hidden,\n prev_state=last_states[i],\n param=param_cells[i],\n seqidx=seqidx,\n layeridx=i)\n hidden = next_state.h\n last_states[i] = next_state\n hidden_all.append(hidden)\n\n hidden_concat = mx.sym.Concat(*hidden_all, dim=0)\n pred_fc = mx.sym.FullyConnected(data=hidden_concat, num_hidden=11, name=\"pred_fc\")\n return pred_fc", "def __call__(self, inputs, state):\n with tf.variable_scope(\"BayesLSTMCell\"):\n if self.w is None:\n\n# size = inputs.get_shape()[-1].value\n \n print ([\"------- Size input LSTM: \", inputs.shape])\n print ([\"------- Dim input specified \", self.X_dim])\n# print ([\"num units LSTM: \", self.num_units])\n \n self.w = VI.sample_posterior((self.X_dim + self.num_units, 4 * self.num_units),\n name=self.n + \"_weights\",\n prior=self.prior,\n is_training=self.is_training)\n \n self.b = VI.sample_posterior((4 * self.num_units, 1),\n name=self.n + \"_biases\",\n prior=self.prior,\n is_training=self.is_training)\n\n # Get the cell and hidden state from the previous cell [C_t-1, h_t-1]\n C_t_prev , h_t_prev = state\n #Vector concatenation of previous hidden state and embedded inputs\n concat_inputs_hidden = tf.concat([inputs, h_t_prev], 1)\n # Compute the Z = Wx + b for each of the 4 networks at once !\n gate_inputs = tf.nn.bias_add(tf.matmul(concat_inputs_hidden, self.w), tf.squeeze(self.b))\n \n # Split data up for the 4 gates\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = tf.split(value=gate_inputs, num_or_size_splits=4, axis=1)\n\n # Compute the new cell \n C_t = (C_t_prev * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i)*self._activation(j))\n h_t = self._activation(C_t) * tf.sigmoid(o)\n \n #Create tuple of the new state\n State_t = LSTMStateTuple(C_t, h_t)\n\n return h_t, State_t", "def lstm_cell(input, cx):\n return FunctionLib.apply('LSTMCell', input.device, [input, cx])", "def forward(self, X, hx=None):\n outputs = []\n (batch_size, Nseq, Dim) = X.size()\n \n# print (\"Dimensions LSTM_RNN Batch: \", (batch_size, Nseq, Dim))\n for l in range(self.num_layers):\n if (type(hx) == type(None)):\n h_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n c_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n else:\n h_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n c_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n # We loop for every element in the chain and for every layer\n\n for i in range(Nseq):\n input_t = X[:,i,:]\n# print (\"Sequence Chunk size: \",input_t.size())\n l = 0\n # First layer we put the input, in the rest we put the propagated states\n h_t, c_t = getattr(self, 'LSTMCell%i'%(l+1))(input_t, (getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))))\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n for l in range(1,self.num_layers):\n h_t, c_t = getattr(self, 'LSTMCell%i'%(l+1))(h_t, (getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))))\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n \n # Return the hx and cx of all layers ? for the last sample ?\n outputs = []\n for l in range(self.num_layers):\n outputs.append( [getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))])\n\n# outputs = torch.stack(outputs, 1).squeeze(2)\n \n return outputs", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def extract_lstm_minus_feature(hidden_state, i, j):\n seq_len, bs, hidden_size = hidden_state.size()\n assert hidden_size % 2 == 0\n split_point = hidden_size // 2\n hidden_f = hidden_state[j + 1, :, :split_point] - hidden_state[i, :, :split_point]\n hidden_b = (\n hidden_state[i + 1, :, split_point:] - hidden_state[j + 2, :, split_point:]\n )\n span_v = torch.cat([hidden_f, hidden_b], dim=-1)\n return span_v", "def forward(self, x, hidden):\n emb_x = self.emb_layer(x)\n lstm_out, hidden = self.lstm(emb_x, hidden)\n if self.bidirectional:\n # separate to forward and backward\n # following code reshapes LSTM output to:\n # (batch size, seq length, num directions, hidden dimensions)\n # where direction '0' is forward and direction '1' is backward\n lstm_out = lstm_out.contiguous().view(-1, self.seq_len, 2, self.hidden_dim)\n # get backward output in first node\n lstm_out_bw = lstm_out[:, 0, 1, :]\n # get forward output in last node\n lstm_out_fw = lstm_out[:, -1, 0, :]\n # we may simple concatenate forward & backward outputs,\n # or add them, multiply or average; in this case i used average\n lstm_out = torch.add(input=lstm_out_bw, alpha=1, other=lstm_out_fw)\n lstm_out = torch.div(lstm_out, 2)\n else:\n lstm_out = lstm_out[:, -1]\n \n assert lstm_out.shape[-1] == self.hidden_dim, (lstm_out.shape, self.hidden_dim)\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n \n d_out = self.dropout(lstm_out)\n fc_out = self.output_layer(d_out)\n sig_out = torch.sigmoid(fc_out)\n \n # return last sigmoid output and hidden state\n return sig_out, hidden", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerGetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name\n \n self.cur_net_fwd = dot_product(tf.zeros(self.incoming_shape[:1] + self.incoming_shape[2:]),\n tf.zeros(self.W_fwd_conc.shape.as_list()))", "def lstm_cell_hidden(mprev, cprev, node_dim, attention_m=False):\n\n # Input Gate\n m_nodes = node_dim\n if attention_m:\n m_nodes = 2 * node_dim\n im = Variable(torch.rand(m_nodes,node_dim))\n ib = Variable(torch.zeros(1,node_dim))\n i_g = torch.sigmoid(torch.matmul(mprev,im) + ib)\n \n #Forget Gate\n fm = Variable(torch.rand(m_nodes,node_dim))\n fb = Variable(torch.zeros(1,node_dim))\n f_g = torch.sigmoid(torch.matmul(mprev,fm) + fb)\n \n #Cell\n cm = Variable(torch.rand(m_nodes,node_dim))\n cb = Variable(torch.zeros(1,node_dim))\n cprime = torch.sigmoid(torch.matmul(mprev,cm) + cb)\n c = f_g * cprev + i_g * torch.tanh(cprime)\n \n #Output Gate\n om = Variable(torch.rand(m_nodes,node_dim))\n ob = Variable(torch.zeros(1,node_dim))\n o_g = torch.sigmoid(torch.matmul(mprev,om) + ob)\n m = o_g * torch.tanh(c)\n return m,c", "def generate_lstm(\n input_seqs,\n hidden_state,\n cell_state,\n w_inp,\n w_hid,\n b_inp,\n b_hid,\n f_act,\n g_act,\n h_act,\n backwards=False,\n ):\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n gates += b_inp\n if b_hid is not None:\n gates += b_hid\n i, f, c, o = _op.split(gates, 4, axis=-1)\n\n i = f_act(i)\n f = f_act(f)\n\n c = g_act(c)\n C = f * cell_state + i * c\n\n o = f_act(o)\n\n H = o * h_act(C)\n\n hidden_state = H\n cell_state = C\n h_list.append(_op.expand_dims(H, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n cell_state = _op.expand_dims(cell_state, axis=0)\n\n return output, hidden_state, cell_state", "def __init__(self, input_dim, hidden_dim, output_dim):\r\n super(LstmEstimator, self).__init__()\r\n \r\n # The LSTM takes track features as inputs, and outputs hidden states\r\n # with dimensionality hidden_dim\r\n self.lstm = nn.LSTM(input_dim, hidden_dim)\r\n \r\n self.hidden2target = nn.Linear(hidden_dim, output_dim)", "def lstm_cell():\n if 'reuse' in inspect.getargspec(\n tf.contrib.rnn.BasicLSTMCell.__init__).args:\n return tf.contrib.rnn.BasicLSTMCell(\n args.hidden_size, input_size=args.embedding_size, forget_bias=0.0, state_is_tuple=True,\n reuse=tf.get_variable_scope().reuse)\n else:\n return tf.contrib.rnn.BasicLSTMCell(\n args.hidden_size, input_size=args.embedding_size, forget_bias=0.0, state_is_tuple=True)", "def create_LSTM_LSTM_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Get dimensions\n lstm_size = model_settings['lstm_size']\n\n batch_size = tf.shape(feats2d)[0] \n feats2d = tf.reshape(feats2d, shape=[batch_size,-1,model_settings['feature_width']]) # features are of shape [max seq length for batch, 40]\n seq_lengths = shapes[:,0] # all shapes are [seq_length, 40], we extract seq_length\n\n # First LSTM \n\n # LSTM cells\n cell_fw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n\n # Bi-directional RNN (+ Dropout)\n (output_fw, output_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, feats2d, \n sequence_length=seq_lengths, \n dtype=tf.float32)\n\n # TODO: make predictions after every 64 time slices\n\n concat_rnn = tf.concat([state_fw[0], state_bw[0]], axis=1)\n\n if is_training:\n first_dropout = tf.nn.dropout(concat_rnn, dropout_prob)\n else:\n first_dropout = concat_rnn\n\n # Second LSTM \n # TODO\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=first_dropout, units=num_classes)\n \n if is_training:\n return logits, dropout_prob\n else:\n return logits", "def lstm_cell(inputs, **kwargs):\n if context.executing_eagerly():\n return OpLib.execute('LSTMCell', inputs, outputs=[None, None])\n return OpLib.add('LSTMCell', num_outputs=2, **kwargs)", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def build_lstm_cell(num_units, dropout):\n cell = tf.nn.rnn_cell.LSTMCell(num_units)\n if dropout:\n result = tf.nn.rnn_cell.DropoutWrapper(cell,\n output_keep_prob=1-dropout)\n return result", "def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh):\n time_step = x.shape[0]\n h_t = h\n if self.is_lstm:\n hidden_size = h[0].shape[-1]\n zero_output = P.ZerosLike()(h_t[0])\n else:\n hidden_size = h.shape[-1]\n zero_output = P.ZerosLike()(h_t)\n seq_length = P.Cast()(seq_length, mstype.float32)\n seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)\n seq_length = P.Cast()(seq_length, mstype.int32)\n seq_length = P.Transpose()(seq_length, (1, 0))\n\n outputs = []\n state_t = h_t\n t = 0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h_t = self.cell(x_t, state_t, w_ih, w_hh, b_ih, b_hh)\n seq_cond = seq_length > t\n if self.is_lstm:\n state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])\n state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])\n output = P.Select()(seq_cond, h_t[0], zero_output)\n state_t = (state_t_0, state_t_1)\n else:\n state_t = P.Select()(seq_cond, h_t, state_t)\n output = P.Select()(seq_cond, h_t, zero_output)\n outputs.append(output)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, state_t", "def run_single_step(self): \n contexts = tf.placeholder(tf.float32, [self.batch_size, self.num_ctx, self.dim_ctx]) \n last_memory = tf.placeholder(tf.float32, [self.batch_size, self.dim_hidden])\n last_output = tf.placeholder(tf.float32, [self.batch_size, self.dim_hidden])\n last_word = tf.placeholder(tf.int32, [self.batch_size])\n initial_step = tf.placeholder(tf.bool)\n\n context_mean = tf.reduce_mean(contexts, 1) \n\n lstm = tf.nn.rnn_cell.LSTMCell(self.dim_hidden, initializer=tf.random_normal_initializer(stddev=0.033)) \n\n # Attention mechanism\n alpha = self.attend(contexts, last_output) \n weighted_context = tf.cond(initial_step,\n lambda: tf.identity(context_mean),\n lambda: tf.reduce_sum(contexts*tf.expand_dims(alpha, 2), 1))\n\n word_emb = tf.cond(initial_step, \n lambda: tf.zeros([self.batch_size, self.dim_embed]), \n lambda: tf.nn.embedding_lookup(self.emb_w, last_word))\n \n # Apply the LSTM\n with tf.variable_scope(\"LSTM\"):\n last_state = last_memory, last_output\n output, state = lstm(tf.concat([weighted_context, word_emb], 1), last_state)\n memory, _ = state\n \n # Compute the logits and probs\n expanded_output = tf.concat([output, weighted_context, word_emb], 1)\n\n logits1 = fully_connected(expanded_output, self.dim_dec, 'dec_fc')\n logits1 = nonlinear(logits1, 'tanh')\n logits2 = tf.nn.xw_plus_b(logits1, self.dec_w, self.dec_b)\n probs = tf.nn.softmax(logits2) \n logprobs = tf.log(probs)\n\n tf.get_variable_scope().reuse_variables() \n\n self.contexts = contexts\n self.last_memory = last_memory\n self.last_output = last_output\n self.last_word = last_word\n self.initial_step = initial_step\n\n self.memory = memory\n self.output = output\n self.logprobs = logprobs", "def recurrent(self, x, h_0, w_ih, w_hh, b_ih, b_hh):\n time_step = x.shape[0]\n outputs = []\n t = 0\n h = h_0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h = self.cell(x_t, h, w_ih, w_hh, b_ih, b_hh)\n if self.is_lstm:\n outputs.append(h[0])\n else:\n outputs.append(h)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, h", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerSetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n tofov(v, shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for bwd connections\n W_bwd_conc = tf.concat(axis=1, values=[W_ci, W_ig, W_og, W_fg])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [None, None, None, None]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W_ci, W_ig, W_og, W_fg]))\n \n self.W_fwd_conc = None\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def extract_hidden_states(self, output):\n \n # Extracting the forward and backward hidden states from the last BiLSTM layer\n # output (batch_size, sequence length, 2 * hidden dim)\n output_fw = output[:,:,0:self._hidden_size]\n output_bw = output[:,:,self._hidden_size:]\n \n hidden_states = torch.cat((output_fw, output_bw),-1)\n \n return hidden_states", "def LSTM_train(X_train, Y_train, X_dev, Y_dev, R_train, R_dev, hyperparams):", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def apply_lstm(x, seq_len):\n return cudnn_layers.stacked_bilstm(\n input_emb=x,\n input_len=seq_len,\n hidden_size=FLAGS.lstm_dim,\n num_layers=1,\n dropout_ratio=0.0,\n mode=tf_estimator.ModeKeys.TRAIN,\n use_cudnn=None)", "def forward(self, src, device):\n\n src = torch.as_tensor(src).float().to(device)\n\n\n # Set initial hidden and cell states \n h0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n c0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n\n # shape of lstm_out: [batch_size, input_size, hidden_dim]\n # shape of self.hidden: (a, b), where a and b both have shape (num_layers, batch_size, hidden_dim).\n lstm_out, self.hidden = self.lstm(src, (h0, c0)) \n \n # Only take the output from the final timetep\n # Can pass on the entirety of lstm_out to the next layer if it is a seq2seq prediction\n #print(lstm_out.size())\n y_pred = self.linear(lstm_out[:, -1, :].view(src.shape[0], -1))\n return y_pred", "def bi_lstm(X_inputs):\n # X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]\n log(\"embedding\")\n log(embedding)\n log(X_inputs)\n log(\"X_inputs\")\n inputs = tf.nn.embedding_lookup(embedding, X_inputs) \n \n # ** 1.构建前向后向多层 LSTM\n cell_fw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n cell_bw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n \n # ** 2.初始状态\n initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)\n initial_state_bw = cell_bw.zero_state(batch_size, tf.float32) \n \n # 下面两部分是等价的\n # **************************************************************\n # ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式\n # ** 文档说明\n # inputs: A length T list of inputs, each a tensor of shape\n # [batch_size, input_size], or a nested tuple of such elements.\n # *************************************************************\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]\n # inputs = tf.unstack(inputs, timestep_size, 1)\n # ** 3.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。\n # 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。\n# try:\n# outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs, \n# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n# except Exception: # Old TensorFlow version only returns outputs not states\n# outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs, \n# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n# output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])\n # ***********************************************************\n \n # ***********************************************************\n # ** 3. bi-lstm 计算(展开)\n with tf.variable_scope('bidirectional_rnn'):\n # *** 下面,两个网络是分别计算 output 和 state \n # Forward direction\n outputs_fw = list()\n state_fw = initial_state_fw\n with tf.variable_scope('fw'):\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)\n outputs_fw.append(output_fw)\n \n # backward direction\n outputs_bw = list()\n state_bw = initial_state_bw\n with tf.variable_scope('bw') as bw_scope:\n inputs = tf.reverse(inputs, [1])\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)\n outputs_bw.append(output_bw)\n # *** 然后把 output_bw 在 timestep 维度进行翻转\n # outputs_bw.shape = [timestep_size, batch_size, hidden_size]\n outputs_bw = tf.reverse(outputs_bw, [0])\n # 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]\n output = tf.concat([outputs_fw, outputs_bw], 2)\n output = tf.transpose(output, perm=[1,0,2])\n output = tf.reshape(output, [-1, hidden_size*2])\n # ***********************************************************\n return output # [-1, hidden_size*2]", "def lstm(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def get_initial_hx(self, input_seq, hidden_state):\n num_directions = 2 if self.lstm.bidirectional else 1\n # hidden state\n hidden = hidden_state.view(self.lstm.num_layers * num_directions, len(hidden_state), -1)\n # cell state\n c_zeros = torch.zeros(self.lstm.num_layers * num_directions,\n input_seq.size(0), self.lstm.hidden_size,\n dtype=input_seq.dtype, device=input_seq.device)\n return hidden, c_zeros", "def init_hidden(self, batch_size, device):\n if self.mode == 'LSTM':\n return (torch.ones((1+self.bidirectional , batch_size, self.hidden_dim), device=device),\n torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device))\n else:\n return torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device)", "def get_rnn_hidden_state(h):\n return h if not isinstance(h, tuple) else h[0]", "def biLSTM(inputs, dim, seq_len, name, cell_type=\"cudnn\", cells=None, is_training=True, dropout_rate=0.0):\r\n if cell_type==\"cudnn\":\r\n with tf.variable_scope(name,reuse=tf.AUTO_REUSE) as scope:\r\n hidden_states, cell_states = bi_cudnn_rnn_encoder('lstm', dim, 1, dropout_rate, inputs, seq_len, is_training)\r\n else:\r\n with tf.name_scope(name) as scope:\r\n with tf.variable_scope('forward' + name) as scope:\r\n if cell_type == \"lstm\":\r\n lstm_fwd = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n with tf.variable_scope('backward' + name) as scope:\r\n if cell_type == \"lstm\":\r\n lstm_bwd = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n \r\n with tf.variable_scope(name+'blstm', reuse=tf.AUTO_REUSE):\r\n hidden_states, cell_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fwd, cell_bw=lstm_bwd, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)\r\n\r\n return hidden_states, cell_states", "def _extract_states(self, state):\n conf = self._config\n\n # c_prev is `m` (cell value), and\n # m_prev is `h` (previous output) in the paper.\n # Keeping c and m here for consistency with the codebase\n c_prev = [None] * conf.num_dims\n m_prev = [None] * conf.num_dims\n\n # for LSTM : state = memory cell + output, hence cell_output_size > 0\n # for GRU/RNN: state = output (whose size is equal to _num_units),\n # hence cell_output_size = 0\n total_cell_state_size = self._cell_state_size()\n cell_output_size = total_cell_state_size - conf.num_units\n\n if self._state_is_tuple:\n if len(conf.recurrents) != len(state):\n raise ValueError('Expected state as a tuple of {} '\n 'element'.format(len(conf.recurrents)))\n\n for recurrent_dim, recurrent_state in zip(conf.recurrents, state):\n if cell_output_size > 0:\n c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state\n else:\n m_prev[recurrent_dim] = recurrent_state\n else:\n for recurrent_dim, start_idx in zip(conf.recurrents,\n range(0, self.state_size,\n total_cell_state_size)):\n if cell_output_size > 0:\n c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n m_prev[recurrent_dim] = array_ops.slice(\n state, [0, start_idx + conf.num_units], [-1, cell_output_size])\n else:\n m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n return c_prev, m_prev, cell_output_size", "def cudnn_lstm_state(lstm_cell_state):\n h = tf.stack([s.h for s in lstm_cell_state])\n c = tf.stack([s.c for s in lstm_cell_state])\n return (h, c)", "def __call__(self, inputs, state):\n\n output, next_state = super(BinaryLSTM, self).__call__(inputs, state)\n with tf.variable_scope(self._cell_name):\n\n binary_cell_state = Bsn_layer.bsn_layer(next_state[0],\n stochastic_method=self.stochastic_method,\n preprocessing_method=self.preprocessing_method,\n tf_graph=self._tf_graph,\n slope_tensor=self._slope_tensor,\n loss_op_name='loss_by_example',\n name='binary_layer',\n stochastic_gradient_estimator=bsn_literals.STRAIGHT_THROUGH)\n binary_hidden_state = Bsn_layer.bsn_layer(next_state[1],\n stochastic_method=self.stochastic_method,\n preprocessing_method=self.preprocessing_method,\n tf_graph=self._tf_graph,\n slope_tensor=self._slope_tensor,\n loss_op_name='loss_by_example',\n name='binary_layer',\n stochastic_gradient_estimator=bsn_literals.STRAIGHT_THROUGH)\n\n return binary_hidden_state, tf.nn.rnn_cell.LSTMStateTuple(binary_cell_state, binary_hidden_state)", "def forward(self, X, A, beta=1, print_output=False):\n assert X.size(0) == A.size(0) + 1, print('the seq length of X and A are wrong')\n kl_loss = 0 # KL divergence term\n Ell_loss = 0 # expected log likelihood term\n batch_size = X.size(1)\n\n if len(X.size()) != 3:\n print('The input data matrix should be the shape of [seq_length, batch_size, input_dim]')\n\n X = X.to(self.device)\n A = A.to(self.device)\n\n # container\n states = torch.zeros(A.size(0), A.size(1), self.state_size).to(self.device) # [seq-1, batch, state]\n rnn_hiddens = torch.zeros(A.size(0), A.size(1), self.hidden_size).to(self.device) # [seq-1, batch, hidden]\n\n # initialising state and rnn hidden state\n # state = torch.zeros(X.size(1), self.state_size).to(self.device)\n rnn_hidden = self.init_h(X[0]).to(self.device) # [batch, hidden]\n if self.mode == 'LSTM':\n rnn_hidden_c = torch.zeros_like(rnn_hidden).to(self.device) # [batch, hidden]\n\n # temp_prior = self.hidden_prior(rnn_hidden) #[batch, state]\n temp_prior = rnn_hidden\n prior_mean = self.prior_mean(temp_prior) # [batch, state]\n prior_sigma = torch.exp(self.prior_sigma(temp_prior)) # [batch, state]\n state = self.reparametrise(prior_mean, prior_sigma) # [batch, state]\n\n # rnn_hidden = torch.zeros(X.size(1), self.hidden_size).to(self.device)\n\n # emission_mean = X[0]\n for t in range(1, X.size()[\n 0]): # for each time step, compute the free energy for each batch of data (start from the second hid state)\n if self.mode == 'LSTM':\n next_state_prior_m, next_state_prior_sigma, rnn_hidden, rnn_hidden_c = self.prior(state,\n A[t - 1].unsqueeze(\n -1),\n rnn_hidden,\n rnn_hidden_c)\n else:\n next_state_prior_m, next_state_prior_sigma, rnn_hidden = self.prior(state, A[t - 1].unsqueeze(-1),\n rnn_hidden)\n\n next_state_post_m, next_state_post_sigma = self.posterior(rnn_hidden, X[t])\n state = self.reparametrise(next_state_post_m, next_state_post_sigma) # [batch, state_size]\n states[t - 1] = state\n rnn_hiddens[t - 1] = rnn_hidden\n next_state_prior = Normal(next_state_prior_m, next_state_prior_sigma)\n next_state_post = Normal(next_state_post_m, next_state_post_sigma)\n\n # kl = kl_divergence(next_state_prior, next_state_post).sum(dim=1) #[batch]\n kl = kl_divergence(next_state_post, next_state_prior).sum(dim=1) # [batch]\n\n kl_loss += kl.mean()\n kl_loss /= A.size(0)\n\n # compute nll\n\n # flatten state\n flatten_states = states.view(-1, self.state_size)\n flatten_rnn_hiddens = rnn_hiddens.view(-1, self.hidden_size)\n flatten_x_mean, flatten_x_sigma = self.obs_model(flatten_states, flatten_rnn_hiddens)\n\n nll = self.batched_gaussian_ll(flatten_x_mean, flatten_x_sigma, X[1:, :, :].reshape(-1, self.output_size))\n nll = nll.mean()\n\n FE = nll - kl_loss\n\n if print_output:\n # print('ELL loss=', Ell_loss, 'KL loss=', kl_loss)\n print('Free energy of this batch = {}. Nll loss = {}. KL div = {}.'.format(float(FE.data)\n , float(nll.data),\n float(kl_loss.data)))\n\n return FE, nll, kl_loss", "def __init__(self, device, input_size, hidden_size, layer_norm=False, chrono_init=False, t_max=10):\n\n super(LSTMCell, self).__init__()\n\n self._device = device\n self._input_size = input_size\n self._hidden_size = hidden_size\n self._layer_norm = layer_norm\n self._chrono_init = chrono_init\n self._t_max = t_max\n\n self._W_x2i = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2i = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2i = nn.Parameter(torch.Tensor(hidden_size))\n self._b_i = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2f = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2f = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2f = nn.Parameter(torch.Tensor(hidden_size))\n self._b_f = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2o = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2o = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2o = nn.Parameter(torch.Tensor(hidden_size))\n self._b_o = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2c = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2c = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._b_c = nn.Parameter(torch.Tensor(hidden_size))\n\n if self._layer_norm:\n self._ln_c = nn.LayerNorm(hidden_size)\n self._ln_i = nn.LayerNorm(hidden_size)\n self._ln_f = nn.LayerNorm(hidden_size)\n self._ln_o = nn.LayerNorm(hidden_size)\n self._ln_g = nn.LayerNorm(hidden_size)\n \n self._reset_parameters()", "def lstm_layer(self):\n if self.pooling:\n ret_seq = True\n else:\n ret_seq = False\n ker_in = glorot_uniform(seed=self.seed)\n rec_in = Orthogonal(seed=self.seed)\n if self.type_of_weights == \"shared\":\n if self.recurrent == \"bilstm\" or self.recurrent is None:\n out_a = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n elif self.recurrent == \"lstm\":\n out_a = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n return out_a, out_a\n elif self.type_of_weights == \"separate\":\n if self.recurrent == \"bilstm\" or self.recurrent is None:\n out_a = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n out_b = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n elif self.recurrent == \"lstm\":\n out_a = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n out_b = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n return out_a, out_b", "def init_hidden(self, batch_size):\n b = 2 if self.bidirectional else 1\n if self.rnn_type == \"LSTM\":\n h0 = (Variable(torch.zeros(b, batch_size, self.hidden_size)),\n Variable(torch.zeros(b, batch_size, self.hidden_size)))\n h0 = [h0[0].cuda(), h0[1].cuda()] if self.use_cuda else h0\n else:\n h0 = Variable(torch.zeros(b, batch_size, self.hidden_size))\n h0 = h0.cuda() if self.use_cuda else h0\n return h0", "def build_dynamic_rnn(self, cells, lstm_input, lstm_state):\n lstm_output, final_state = tf.nn.dynamic_rnn(cells, lstm_input, initial_state=lstm_state)\n # reshape lstm_output from [batch_size, time_steps, n_units] to [batch_size*time_steps, n_units]\n lstm_output = tf.reshape(lstm_output, [-1, self.num_hidden_units])\n\n return lstm_output, final_state", "def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh, att_score):\n time_step = x.shape[0]\n h_t = h\n if self.is_lstm:\n hidden_size = h[0].shape[-1]\n zero_output = P.ZerosLike()(h_t[0])\n else:\n hidden_size = h.shape[-1]\n zero_output = P.ZerosLike()(h_t)\n seq_length = P.Cast()(seq_length, mstype.float32)\n seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)\n seq_length = P.Cast()(seq_length, mstype.int32)\n seq_length = P.Transpose()(seq_length, (1, 0))\n\n outputs = []\n state_t = h_t\n t = 0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h_t = self.cell(x_t, state_t, w_ih, w_hh, b_ih, b_hh, att_score[t])\n\n seq_cond = seq_length > t\n if self.is_lstm:\n state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])\n state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])\n output = P.Select()(seq_cond, h_t[0], zero_output)\n state_t = (state_t_0, state_t_1)\n else:\n state_t = P.Select()(seq_cond, h_t, state_t)\n output = P.Select()(seq_cond, h_t, zero_output)\n outputs.append(output)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, state_t", "def the_nn_article(input_size, dropout):\n \n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n \n # Defining layers\n self.hidden_size = 256\n self.first_layer = 512\n self.second_layer = 1024\n self.n_layers = 2\n self.bidirectional = True\n self.dropout = dropout\n \n # RNN Layer\n self.rnn = nn.LSTM(input_size = input_size, hidden_size = self.hidden_size, \n num_layers = self.n_layers, batch_first = True, \n bidirectional = self.bidirectional, dropout = self.dropout)\n \n self.fc1 = nn.Linear(self.first_layer, self.second_layer)\n self.fc2 = nn.Linear(self.second_layer, 3)\n \n def forward(self, x):\n batch_size = x.size(0)\n \n #Initializing hidden state for first input using method defined below\n hidden = self.init_hidden(batch_size, self.hidden_size)\n \n # Find sequence lengths (for packing)\n x_lengths = self.find_lengths(x)\n \n # Pack sequences\n x = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, batch_first=True, enforce_sorted=False)\n\n # Run the network\n out, hidden = self.rnn(x, hidden)\n \n # Unpack the sequences again\n out, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)\n\n # Run through the linear layer\n out = F.relu(self.fc1(out))\n out = F.relu(self.fc2(out))\n \n # Perform log_softmax on output (WORSE PERFORMANCE!)\n #x = F.log_softmax(x, dim = 2)\n\n return out, hidden\n \n def init_hidden(self, batch_size, hidden_size):\n # This method generates the first hidden state of zeros which we'll use in the forward pass\n \n hidden = (torch.zeros(2*self.n_layers, batch_size, self.hidden_size),\n torch.zeros(2*self.n_layers, batch_size, self.hidden_size))\n \n return hidden\n \n def find_lengths(self, input_seq):\n # Find seq-lengths of each sequence (used to pack sequences)\n x_lengths = []\n for seq in input_seq:\n for idx, vec in enumerate(seq):\n if sum(vec).item() != 1:\n x_lengths.append(idx)\n break\n if idx == 752:\n x_lengths.append(len(seq)) \n return x_lengths\n \n net = Model()\n return net", "def custom_dynamic_rnn(cell, inputs, inputs_len, initial_state=None):\n batch_size = tf.shape(inputs)[0]\n max_time = tf.shape(inputs)[1]\n\n inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n inputs_ta = inputs_ta.unstack(tf.transpose(inputs, [1, 0, 2]))\n emit_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)\n t0 = tf.constant(0, dtype=tf.int32)\n if initial_state is not None:\n s0 = initial_state\n else:\n s0 = cell.zero_state(batch_size, dtype=tf.float32)\n f0 = tf.zeros([batch_size], dtype=tf.bool)\n\n def loop_fn(t, prev_s, emit_ta, finished):\n \"\"\"\n the loop function of rnn\n \"\"\"\n cur_x = inputs_ta.read(t)\n scores, cur_state = cell(cur_x, prev_s)\n\n # copy through\n scores = tf.where(finished, tf.zeros_like(scores), scores)\n\n if isinstance(cell, tc.rnn.LSTMCell):\n cur_c, cur_h = cur_state\n prev_c, prev_h = prev_s\n cur_state = tc.rnn.LSTMStateTuple(tf.where(finished, prev_c, cur_c),\n tf.where(finished, prev_h, cur_h))\n else:\n cur_state = tf.where(finished, prev_s, cur_state)\n\n emit_ta = emit_ta.write(t, scores)\n finished = tf.greater_equal(t + 1, inputs_len)\n return [t + 1, cur_state, emit_ta, finished]\n\n _, state, emit_ta, _ = tf.while_loop(\n cond=lambda _1, _2, _3, finished: tf.logical_not(\n tf.reduce_all(finished)),\n body=loop_fn,\n loop_vars=(t0, s0, emit_ta, f0),\n parallel_iterations=32,\n swap_memory=False)\n\n outputs = tf.transpose(emit_ta.stack(), [1, 0, 2])\n return outputs, state", "def bi_lstm(X_inputs):\n # X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]\n inputs = tf.nn.embedding_lookup(embedding, X_inputs)\n\n # ** 1.构建前向后向多层 LSTM\n cell_fw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n cell_bw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n\n # ** 2.初始状态\n initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)\n initial_state_bw = cell_bw.zero_state(batch_size, tf.float32)\n\n # 下面两部分是等价的\n # **************************************************************\n # ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式\n # ** 文档说明\n # inputs: A length T list of inputs, each a tensor of shape\n # [batch_size, input_size], or a nested tuple of such elements.\n # *************************************************************\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]\n # inputs = tf.unstack(inputs, timestep_size, 1)\n # ** 3.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。\n # 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。\n # try:\n # outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,\n # initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n # except Exception: # Old TensorFlow version only returns outputs not states\n # outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,\n # initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n # output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])\n # ***********************************************************\n\n # ***********************************************************\n # ** 3. bi-lstm 计算(展开)\n with tf.variable_scope('bidirectional_rnn'):\n # *** 下面,两个网络是分别计算 output 和 state\n # Forward direction\n outputs_fw = list()\n state_fw = initial_state_fw\n with tf.variable_scope('fw'):\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)\n outputs_fw.append(output_fw)\n\n # backward direction\n outputs_bw = list()\n state_bw = initial_state_bw\n with tf.variable_scope('bw') as bw_scope:\n inputs = tf.reverse(inputs, [1])\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)\n outputs_bw.append(output_bw)\n # *** 然后把 output_bw 在 timestep 维度进行翻转\n # outputs_bw.shape = [timestep_size, batch_size, hidden_size]\n outputs_bw = tf.reverse(outputs_bw, [0])\n # 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]\n output = tf.concat([outputs_fw, outputs_bw], 2)\n output = tf.transpose(output, perm=[1, 0, 2])\n output = tf.reshape(output, [-1, hidden_size * 2])\n # ***********************************************************\n return output # [-1, hidden_size*2]", "def __init__(\n self,\n input_size,\n hidden_size,\n num_layers=1,\n bidirectional=False,\n dropout=0,\n **kwargs\n ):\n super(LSTM, self).__init__(\n 'lstm', input_size, hidden_size,\n num_layers, bidirectional, dropout, **kwargs\n )", "def forward(self, input_data, hidden_state):\n batch_size = input_data.size(0)\n if hidden_state is None:\n hidden_state = torch.zeros(self._num_layers, batch_size, self._layer_size)\n hidden_state = [hidden_state, hidden_state] if self._controller_type.lower() == 'lstm' else hidden_state\n\n embedded_vector = self._embedding(input_data)\n output_vector, hidden_state_out = self._layer(embedded_vector.unsqueeze(0), hidden_state)\n output_vector = self._linear(output_vector.squeeze())\n return output_vector, hidden_state_out", "def __training_forward(self, h, h_lengths, y):\n batch_size = h.size()[1]\n input_seq_len = h.size()[0]\n output_seq_len = y.size()[0]\n\n # obtain embedding representations for the correct tokens\n # shift by one token (add <sos> token at the beginning of the sentences)\n start_tokens = torch.tensor([self.bos_token], dtype=y.dtype, device=self.device).repeat(batch_size, 1).t()\n y_input = torch.cat([start_tokens, y[:-1, :]], dim=0)\n y_emb = self.output_embedding.forward(y_input) # (output_seq_len x batch x embed_dim)\n\n # store hidden and cell states, at the beginning filled with zeros\n states_s = torch.zeros(input_seq_len+1, output_seq_len+1, batch_size, self.state_dim_2d, device=self.device)\n states_c = torch.zeros(input_seq_len+1, output_seq_len+1, batch_size, self.state_dim_2d, device=self.device)\n\n for diagonal_num in range(input_seq_len + output_seq_len - 1):\n # calculate the indices for input / states / etc. for this diagonal\n (ver_from, ver_to), (hor_from, hor_to) = LSTM2d.__calculate_input_ranges(diagonal_num=diagonal_num,\n input_seq_len=input_seq_len,\n output_seq_len=output_seq_len)\n ver_state_ranges, hor_state_ranges, diag_ranges = LSTM2d.__calculate_state_ranges((ver_from, ver_to),\n (hor_from, hor_to))\n ver_range_x, ver_range_y = ver_state_ranges\n hor_range_x, hor_range_y = hor_state_ranges\n diag_range_x, diag_range_y = diag_ranges\n\n # flip the output range so we take the inputs in the right order corresponding to the input range\n # Note: the 2d-cell with smallest source-position (horizontally) and largest target-position (vertically) is\n # the first cell in the diagonal!\n input_range = list(range(ver_from, ver_to))\n output_range = list(reversed(range(hor_from, hor_to)))\n diagonal_len = len(input_range) # always == len(output_range)\n\n # calculate x input for this diagonal\n # treat diagonal as though it was a larger batch and reshape inputs accordingly\n new_batch_size = diagonal_len * batch_size\n h_current = h[input_range, :, :].view(new_batch_size, 2*self.encoder_state_dim)\n y_current = y_emb[output_range, :, :].view(new_batch_size, self.embed_dim)\n x_current = torch.cat([h_current, y_current], dim=-1) # shape (batch*diagonal_len x input_dim)\n\n # calculate previous hidden & cell states for this diagonal\n s_prev_hor = states_s[hor_range_x, hor_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n c_prev_hor = states_c[hor_range_x, hor_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n s_prev_ver = states_s[ver_range_x, ver_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n c_prev_ver = states_c[ver_range_x, ver_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n\n # run batched computation for this diagonal\n c_next, s_next = self.cell2d.forward(x_current, s_prev_hor, s_prev_ver, c_prev_hor, c_prev_ver)\n\n # separate batch and diagonal_len again so we can store them accordingly\n c_next = c_next.view(diagonal_len, batch_size, self.state_dim_2d)\n s_next = s_next.view(diagonal_len, batch_size, self.state_dim_2d)\n\n # store new hidden and cell states at the right indices for the next diagonal(s) to use\n states_s[diag_range_x, diag_range_y, :, :] = s_next\n states_c[diag_range_x, diag_range_y, :, :] = c_next\n\n # for the prediction, take the last (valid, non-padded) column of states and all but the first (1:) row\n states_for_pred = states_s[h_lengths, 1:, range(batch_size), :].permute(1, 0, 2)\n states_for_pred = self.logits_dropout.forward(states_for_pred)\n\n y_pred = self.logits.forward(states_for_pred) # shape (output_seq_len x batch x output_vocab_size)\n return y_pred", "def recurrent(self, x, h_0, w_ih, w_hh, b_ih, b_hh, att_score):\n time_step = x.shape[0]\n outputs = []\n t = 0\n h = h_0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h = self.cell(x_t, h, w_ih, w_hh, b_ih, b_hh, att_score[t])\n if self.is_lstm:\n outputs.append(h[0])\n else:\n outputs.append(h)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, h", "def forward(self, x):\n x, self.hidden = self.lstm(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x", "def __init__(self, num_vars, device, lag_max, hidden_size_lstm, hidden_size_mlp, num_outputs=1):\n super(LSTMgc, self).__init__()\n\n # LSTMs\n self.lstm_cell_list = nn.ModuleList()\n for state in range(num_vars):\n self.lstm_cell_list.append(nn.LSTMCell(lag_max, hidden_size_lstm))\n\n # MLP for prediction\n self.pred_mlp_l1 = nn.Linear(hidden_size_lstm * num_vars, hidden_size_mlp)\n self.pred_mlp_l2 = nn.Linear(hidden_size_mlp, num_outputs)\n\n # Initialise weights for each variable\n self.imp_weights = nn.Parameter(torch.Tensor(np.ones((num_vars,)) / num_vars + np.random.normal(0, 0.00001,\n (num_vars,))))\n\n # Initialise weights\n self.init_weights()\n\n # Save parameters\n self.num_vars = num_vars\n self.lag = lag_max\n self.hidden_size_lstm = hidden_size_lstm\n self.hidden_size_mlp = hidden_size_mlp\n\n # Initialise LSTM states\n self.lstm_state_list = []\n for state in range(num_vars):\n self.lstm_state_list.append((Variable(torch.zeros(1, self.hidden_size_lstm).float()).to(device),\n Variable(torch.zeros(1, self.hidden_size_lstm).float()).to(device)))", "def _build_rnn_graph_lstm(self, inputs, config, is_training):\n cell = util.create_lstm_cell(is_training, config)\n state = util.get_zero_state_for_the_cell(cell, config)\n\n self.initial_state = state\n with tf.variable_scope(\"RNN\"):\n inputs = tf.unstack(inputs, num=self.num_steps, axis=1)\n outputs, state = tf.contrib.rnn.static_rnn(cell, inputs,\n initial_state=self.initial_state)\n output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])\n return output, state", "def Get_States(self):\n\n # Getting all hidden state throuh time\n all_hidden_states = tf.scan(self.LSTM,\n self.processed_input,\n initializer=self.initial_hidden,\n name='states')\n all_hidden_states=all_hidden_states[:,0,:,:]\n \n return all_hidden_states", "def lstmdouble(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def lstm_model(nlstm=128, layer_norm=False):\n\n def network_fn(X, nenv=1, obs_size=-1):\n with tf.variable_scope(\"emb\", reuse=tf.AUTO_REUSE):\n w_emb = tf.get_variable(\"w_emb\", [obs_size+1, 32])\n X = tf.nn.embedding_lookup(w_emb, X)\n\n nbatch = X.shape[0]\n nsteps = nbatch // nenv\n\n h = tf.layers.flatten(X)\n\n M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)\n S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states\n\n xs = batch_to_seq(h, nenv, nsteps)\n ms = batch_to_seq(M, nenv, nsteps)\n\n assert not layer_norm\n h5, snew = lstm(xs, ms, S, scope='lstm', nh=nlstm)\n\n h = seq_to_batch(h5)\n initial_state = np.zeros(S.shape.as_list(), dtype=float)\n\n return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}\n\n return network_fn", "def add_model(self, inputs):\n size = self.config.hidden_size\n forget_bias = self.config.forget_bias\n input_cell = self.config.input_cell\n\n if input_cell == 'BasicLSTMCell':\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias)\n print 'Using Basic LSTM Cell \\n'\n\n elif input_cell == 'LSTMCell':\n lstm_cell = tf.nn.rnn_cell.LSTMCell(size, forget_bias)\n print 'Using LSTM Cell \\n'\n\n elif input_cell == 'GRUCell':\n lstm_cell = tf.nn.rnn_cell.GRUCell(size)\n print 'Using GRU Cell \\n'\n\n else:\n print \"Please Specify a Correct Cell Type\"\n\n lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.config.dropout,\n input_keep_prob=self.config.dropout)\n\n cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * self.config.num_layers)\n \n print 'Number of Hidden Layers ', self.config.num_layers\n \n self.initial_state = cell.zero_state(self.config.batch_size, tf.float32)\n rnn_outputs = []\n state = self.initial_state\n\n with tf.variable_scope('RNNLM') as scope:\n for time_step in range(self.config.num_steps):\n if time_step > 0: scope.reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n rnn_outputs.append(cell_output)\n self.final_state = state\n\n return rnn_outputs", "def __init__(self, embedding_dim, hidden_dim, vocab_size, label_size, use_gpu, batch_size):\n super(BiLSTM, self).__init__()\n self.hidden_dim = hidden_dim\n self.use_gpu = use_gpu\n self.batch_size = batch_size\n self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_dim, bidirectional=True)\n self.hidden2label = nn.Linear(hidden_dim*2, label_size)\n self.hidden = self.init_hidden()", "def lstm_forward(x, h0, Wx, Wh, b):\n h, cache = None, None\n #############################################################################\n # TODO: Implement the forward pass for an LSTM over an entire timeseries. #\n # You should use the lstm_step_forward function that you just defined. #\n #############################################################################\n\n N,T,D = x.shape\n N,H = h0.shape\n prev_h = h0\n prev_c = np.zeros((N, H))\n\n h = np.zeros((N, T, H))\n cache = list()\n\n for t in range(T):\n next_h, next_c, t_cache = lstm_step_forward(x[:,t,:],prev_h,prev_c,Wx,Wh,b)\n prev_h = next_h\n prev_c = next_c\n h[:,t,:] = next_h\n cache.append(t_cache)\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n\n return h, cache", "def forward(self, x, hidden):\n batch_size = x.size(0)\n\n # embeddings and lstm_out\n x = x.long()\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds, hidden)\n\n # print(f'lstm_out:{lstm_out.shape}')\n\n # stack up lstm outputs\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n\n # print(f'lstm_out flatten:{lstm_out.shape}')\n\n # dropout and fully-connected layer\n out = self.dropout(lstm_out)\n out = self.fc(out)\n # sigmoid function\n sig_out = self.sig(out)\n\n # print(f'sig_out:{sig_out.shape}')\n\n # reshape to be batch_size first\n sig_out = sig_out.view(batch_size, -1)\n sig_out = sig_out[:, -1] # get last batch of labels\n\n # print(f'sig_out last batch:{sig_out.shape}')\n\n # return last sigmoid output and hidden state\n return sig_out, hidden", "def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n super(BiLSTMCRF, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = vocab_size\n self.tag_to_ix = tag_to_ix\n self.tagset_size = len(tag_to_ix)\n \n self.embedding = nn.Embedding(self.vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\n num_layers=1, bidirectional=True)\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n self.transition = nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size))\n \n self.transition.data[self.tag_to_ix[START_TAG], :] = -10000.0\n self.transition.data[:, self.tag_to_ix[STOP_TAG]] = -10000.0\n self.hidden = self.init_hidden()", "def convert_rnn(g, op, block):\n\n def generate_lstm(\n input_seqs,\n hidden_state,\n cell_state,\n w_inp,\n w_hid,\n b_inp,\n b_hid,\n f_act,\n g_act,\n h_act,\n backwards=False,\n ):\n \"\"\"Implementation of LSTM cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n gates += b_inp\n if b_hid is not None:\n gates += b_hid\n i, f, c, o = _op.split(gates, 4, axis=-1)\n\n i = f_act(i)\n f = f_act(f)\n\n c = g_act(c)\n C = f * cell_state + i * c\n\n o = f_act(o)\n\n H = o * h_act(C)\n\n hidden_state = H\n cell_state = C\n h_list.append(_op.expand_dims(H, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n cell_state = _op.expand_dims(cell_state, axis=0)\n\n return output, hidden_state, cell_state\n\n def generate_gru(\n input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, rz_act, n_act, backwards=False\n ):\n \"\"\"Implementation of GRU cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n xwt = _op.nn.dense(step, w_inp)\n hwt = _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n xwt += b_inp\n if b_hid is not None:\n hwt += b_hid\n i_r, i_z, i_n = _op.split(xwt, 3, axis=-1)\n h_r, h_z, h_n = _op.split(hwt, 3, axis=-1)\n\n r_gate = rz_act(i_r + h_r)\n z_gate = rz_act(i_z + h_z)\n n_gate = n_act(i_n + r_gate * h_n)\n\n hidden_state = (hidden_state - n_gate) * z_gate + n_gate\n h_list.append(_op.expand_dims(hidden_state, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n\n return output, hidden_state\n\n def generate_simplernn(\n input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, n_act, backwards=False\n ):\n \"\"\"Implementation of SimpleRNN cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n xwt = _op.nn.dense(step, w_inp)\n hwt = _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n xwt += b_inp\n if b_hid is not None:\n hwt += b_hid\n\n n_gate = n_act(xwt + hwt)\n\n hidden_state = n_gate\n h_list.append(_op.expand_dims(hidden_state, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n\n return output, hidden_state\n\n def make_param_inputs(g, node, layer, hidden_size, num_layers):\n \"\"\"Param for weight and bias.\"\"\"\n\n bidirect_len = 4 if node.attr(\"is_bidirec\") else 2\n all_layer_param_len = len(node.input(\"WeightList\"))\n weight_list = node.input(\"WeightList\")[: all_layer_param_len // 2]\n bias_list = node.input(\"WeightList\")[all_layer_param_len // 2 :]\n\n layer_weight_list = weight_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n layer_bias_list = bias_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n param_list = layer_weight_list + layer_bias_list\n param_list_len = len(param_list)\n\n input_weights = param_list[0 : param_list_len // 2 : 2]\n hidden_weights = param_list[1 : param_list_len // 2 : 2]\n\n input_bias = param_list[param_list_len // 2 : param_list_len : 2]\n hidden_bias = param_list[param_list_len // 2 + 1 : param_list_len : 2]\n\n return input_weights, hidden_weights, input_bias, hidden_bias\n\n def make_init_param_inputs(g, node, layer):\n \"\"\"Init param for inputs.\"\"\"\n\n mode = node.attr(\"mode\")\n if mode == \"LSTM\":\n all_init_h, all_init_c = node.input(\"PreState\")\n bidirect_len = 2 if node.attr(\"is_bidirec\") else 1\n init_h = _op.strided_slice(\n g.get_node(all_init_h),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n init_c = _op.strided_slice(\n g.get_node(all_init_c),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n return init_h, init_c\n all_init_h = node.input(\"PreState\")[0]\n bidirect_len = 2 if node.attr(\"is_bidirec\") else 1\n init_h = _op.strided_slice(\n g.get_node(all_init_h),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n return init_h\n\n hidden_size = op.attr(\"hidden_size\")\n num_layers = op.attr(\"num_layers\")\n is_bidirec = op.attr(\"is_bidirec\")\n mode = op.attr(\"mode\")\n\n input_x = g.get_node(op.input(\"Input\")[0])\n\n num_directions = 1\n if is_bidirec:\n num_directions = 2\n\n x_shape = infer_shape(input_x)\n time_steps = x_shape[0]\n x_steps = _op.split(input_x, indices_or_sections=time_steps, axis=0)\n for layer in range(num_layers):\n input_weights, hidden_weights, input_bias, hidden_bias = make_param_inputs(\n g, op, layer, hidden_size, num_layers\n )\n if mode == \"LSTM\":\n init_h, init_c = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n init_cs = _op.split(init_c, num_directions)\n result_output = []\n result_H = []\n result_C = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n C_t = _op.squeeze(init_cs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H, C = generate_lstm(\n input_seqs=x_steps,\n hidden_state=H_t,\n cell_state=C_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n f_act=_op.sigmoid,\n g_act=_op.tanh,\n h_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n result_C.append(C)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n C = _op.concatenate(result_C, axis=0)\n elif mode == \"GRU\":\n init_h = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n result_output = []\n result_H = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H = generate_gru(\n input_seqs=x_steps,\n hidden_state=H_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n rz_act=_op.sigmoid,\n n_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n elif mode == \"RNN_TANH\":\n init_h = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n result_output = []\n result_H = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H = generate_simplernn(\n input_seqs=x_steps,\n hidden_state=H_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n n_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n\n output = _op.transpose(output, axes=[0, 2, 1, 3])\n output = _op.reshape(output, newshape=(0, 0, -1))\n x_steps = _op.split(output, indices_or_sections=time_steps, axis=0)\n\n g.add_node(op.output(\"Out\")[0], output)", "def get_output_for(self, input, **kwargs):\n\t\tbatch_size = input.shape[0]\n\t\tnum_states = self.num_states\n\t\timg_dim = self.img_dim\n\t\tN = self.N\n\t\tattender = self.attender\n\t\ttheano.gradient.grad_clip\n\n\t\tdef step(c_tm1, h_tm1, att_acc_tm1, input, W, Wg):\n\t\t\tcenter_y, center_x, delta, sigma, gamma = gp_from_hidden(h_tm1, Wg, img_dim, N)\n\t\t\tg, att = attender.read(input, center_y, center_x, delta, sigma, gamma) # (batch_size, N, N) and (batch_size, img_dim, img_dim)\n\t\t\n\t\t\tatt_acc_t = T.clip(att_acc_tm1 + att, 0.0, 1.0)\t# (batch_size, img_dim, img_dim)\n\t\t\tr = input[:, :, :img_dim] * (1.0 - att_acc_t) # (batch_size, img_dim, img_dim)\n\t\t\tR , _ = attender.read(r, *gp_from_hidden(T.zeros((batch_size, 5)), T.eye(5), img_dim, N)) # (batch_size, N, N)\n\t\t\t\n\t\t\tflat_g = g.reshape((batch_size, N * N)) # (batch_size, N * N)\n\t\t\tflat_R = R.reshape((batch_size, N * N)) # (batch_size, N * N)\n\t\t\t\n\t\t\t# concatenate gA, gB and h_tm1 to form a single matrix # (batch_size, N * N + N * N + num_states + 1)\n\t\t\tlstm_inp = T.concatenate([flat_g, flat_R, h_tm1, T.ones((batch_size, 1))], axis=1)\n\n\t\t\t# multiply by LSTM weights\n\t\t\t# (num_states * 4, num_input + num_states + 1) dot (batch_size, N * N + N * N + num_states + 1).T\n\t\t\tpre_act = T.dot(W, lstm_inp.T) \t# (4 * num_states, batch_size)\n\n\t\t\t# split up to get individual gates\n\t\t\tz = T.tanh(pre_act[0*num_states:1*num_states]) # (num_states, batch_size)\n\t\t\ti = T.nnet.sigmoid(pre_act[1*num_states:2*num_states])\n\t\t\tf = T.nnet.sigmoid(pre_act[2*num_states:3*num_states])\n\t\t\to = T.nnet.sigmoid(pre_act[3*num_states:4*num_states])\n\n\t\t\t# do LSTM update\n\t\t\tc_t = f * c_tm1.T + i * z\n\t\t\th_t = o * T.tanh(c_t)\n\n\t\t\treturn c_t.T, h_t.T, att_acc_t\t# 1, 2: (batch_size, num_states); 3, 4: (batch_size, img_dim, img_dim)\n\n\t\tc0 = T.zeros((batch_size, num_states))\n\t\th0 = T.zeros((batch_size, num_states))\n\t\tatt_acc0 = T.zeros((batch_size, img_dim, img_dim))\n\t\t\n\t\tcells, hiddens, att_acc_T = theano.scan(fn=step, non_sequences=[input, self.W, self.Wg], outputs_info=[c0, h0, att_acc0], \n\t\t\t\t\t\t\t\t\t\tn_steps=self.num_glimpses, strict=True)[0]\n\t\tif self.final_state_only:\n\t\t\treturn hiddens[-1]\n\t\telse:\n\t\t\treturn hiddens", "def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.7):\n super(SentimentLSTM, self).__init__()\n\n self.output_size = output_size\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n\n\n # define all layers\n self.embed = nn.Embedding(vocab_size,embedding_dim)\n self.lstm = nn.LSTM(embedding_dim,hidden_dim,n_layers,dropout=drop_prob,batch_first=True)\n self.fc = nn.Linear(hidden_dim,output_size)\n self.sigmoid = nn.Sigmoid()\n self.drp = nn.Dropout(p=0.7)", "def model_create_lstm(input_dim, output_dim, n_features, n_houses, x_train, y_train, x_test, y_test, early=None):\r\n model = Sequential()\r\n for _ in range(nn_hparams['num_layers']):\r\n model.add(LSTM(nn_hparams['units'], activation=nn_hparams['activation'], input_shape=(input_dim,n_features), return_sequences=True))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Flatten())\r\n model.add(Dense(y_train.shape[1]*y_train.shape[2]))\r\n custom_optimizer = getattr(optimizers, nn_hparams['optimizer'])(lr=nn_hparams['learning_rate'], beta_1=nn_hparams['beta_1'], beta_2=nn_hparams['beta_2'])\r\n model.compile(optimizer=custom_optimizer, loss=nn_hparams['loss'])\r\n y_train = y_train.reshape((y_train.shape[0], y_train.shape[1]*y_train.shape[2]))\r\n y_test = y_test.reshape((y_test.shape[0], y_test.shape[1]*y_test.shape[2]))\r\n if early:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1, callbacks=[early])\r\n else:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1)\r\n model_loss = model.evaluate(x_train, y_train, verbose=0)\r\n \r\n return model, model_loss", "def get_rnn(X, rnn_size, seq_len, batch_size, num_layers=1, input_keep_prob=1.0, output_keep_prob=1.0, is_training=False,\n cell_name=\"BasicLSTM\", bidirectional=False):\n with tf.device(\"/cpu:0\"):\n # Convert input tensor to python list (along the sequence length dimention)\n word_embeddings = tf.split(1, seq_len, X)\n word_embeddings = [tf.squeeze(embed_, [1]) for embed_ in word_embeddings]\n\n # if is_training and keep_prob < 1:\n # word_embeddings = [tf.nn.dropout(input_, keep_prob) for input_ in word_embeddings]\n\n def get_cell():\n if cell_name == \"GRU\": # GRU\n cell = rnn_cell.GRUCell(rnn_size)\n elif cell_name == \"LSTM\": # LSTM\n cell = rnn_cell.LSTMCell(rnn_size, tf.shape(X)[2])\n else:\n cell = rnn_cell.BasicLSTMCell(rnn_size)\n if is_training and (input_keep_prob < 1 or output_keep_prob < 1):\n cell = rnn_cell.DropoutWrapper(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)\n cell = rnn_cell.MultiRNNCell([cell] * num_layers)\n initial_state = cell.zero_state(batch_size, tf.float32)\n return cell, initial_state\n\n if bidirectional:\n with tf.variable_scope(\"forward\"):\n cell_fw, initial_state_fw = get_cell()\n with tf.variable_scope(\"backward\"):\n cell_bw, initial_state_bw = get_cell()\n return rnn.bidirectional_rnn(cell_fw, cell_bw, word_embeddings,\n initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw)\n else:\n cell, initial_state = get_cell()\n return rnn.rnn(cell, word_embeddings, initial_state=initial_state)", "def build_lstm11(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9a'))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9a'))\n # model.add(GlobalMaxPool1D())\n # model.add(BatchNormalization())\n # model.add(Dropout(settings['dropout'] / 2.0))\n\n # model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9c'))\n model.add(GlobalMaxPool1D(name='mp9'))\n model.add(BatchNormalization(name='bn9'))\n model.add(Dropout(settings['dropout'] / 2.0, name='drop9b'))\n\n model.add(Dense(shape['n_class'], activation='sigmoid', name='den9b'))\n xprint('build_lstm9: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def forward(self, data, time_steps, lengths):\n data_packed = pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)\n _, hidden = self.rnn(data_packed)\n assert hidden.size(1) == data.size(0)\n assert hidden.size(2) == self.latent_dim\n\n # check if bidirectional\n if hidden.size(0) == 1:\n hidden = hidden.squeeze(0)\n elif hidden.size(0) == 2:\n hidden = torch.cat((hidden[0], hidden[1]), dim=-1)\n else:\n raise ValueError('Incorrect RNN hidden state.')\n\n # extract mean and logvar\n mean_logvar = self.hidden_to_z0(hidden)\n assert mean_logvar.size(-1) == 2 * self.latent_dim\n mean, logvar = mean_logvar[:, :self.latent_dim], mean_logvar[:, self.latent_dim:]\n return mean, logvar", "def forward(self, x, hidden):\n batch_size=x.shape[0]\n\n x = self.embed(x)\n\n x,hidden = self.lstm(x,hidden)\n\n x = x.reshape(-1,self.hidden_dim)\n\n x = self.drp(x)\n\n x = self.fc(x)\n\n sig_out = self.sigmoid(x)\n\n # return last sigmoid output and hidden state\n sig_out = sig_out.reshape(batch_size,-1)\n sig_out = sig_out[:,-1]\n\n return sig_out, hidden", "def __init__(self, embeddings, hidden_dim, output_size, dropout_emb, dropout_lstm):\n super(BaselineLSTMModel, self).__init__()\n\n # 1) embedding layer:\n trainable_emb = False\n self.word_embeddings = nn.Embedding(num_embeddings=embeddings.shape[0],\n embedding_dim=embeddings.shape[1])\n self.init_embeddings(embeddings, trainable_emb)\n self.drop_emb = nn.Dropout(dropout_emb)\n\n # 2) LSTM layer\n self.hidden_dim = hidden_dim\n self.lstm = nn.LSTM(embeddings.shape[1], hidden_dim, batch_first=True,\n dropout=dropout_lstm)\n self.drop_lstm = nn.Dropout(dropout_lstm)\n\n # 3) linear layer -> outputs\n self.hidden2output = nn.Linear(hidden_dim, output_size)", "def __init__(self, embedding_size=300, lstm_layer_num=3,\r\n max_time_size=50, cell_size=100, forget_bias=0.0,\r\n l2_reg_lambda=0.0, class_num=8):\r\n # begin\r\n \"\"\"\r\n constant store in model. benefit: when load model can show the constant\r\n arguments.\r\n dropout not used in test step, move to outside.\r\n \"\"\"\r\n _l2_reg_lambda = tf.constant(l2_reg_lambda, dtype=tf.float32,\r\n name=\"l2_reg_lambda\")\r\n _lstm_layer_num = tf.constant(lstm_layer_num, dtype=tf.int32,\r\n name=\"lstm_layer_num\")\r\n _cell_size = tf.constant(cell_size, dtype=tf.int32,\r\n name=\"cell_size\")\r\n _max_time_size = tf.constant(max_time_size, dtype=tf.int32,\r\n name=\"max_time_size\")\r\n \"\"\"\r\n Placeholders for input, output and dropout.\r\n \"\"\"\r\n # inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),\r\n # dtype=tf.float32)\r\n self.input_x = tf.placeholder(\r\n shape=(None, embedding_size, max_time_size),\r\n dtype=tf.float32,\r\n name=\"input_x\")\r\n batch_size = tf.shape(self.input_x)[0]\r\n self.input_y = tf.placeholder(shape=(None, class_num), dtype=tf.float32,\r\n name=\"input_y\")\r\n self.input_keep_prob = tf.placeholder(tf.float32,\r\n name=\"input_keep_prob\")\r\n self.output_keep_prob = tf.placeholder(\r\n tf.float32,\r\n name=\"output_keep_prob\"\r\n )\r\n # Keeping track of l2 regularization loss (optional)\r\n l2_loss = tf.constant(0.0)\r\n\r\n def lstm_cell_func():\r\n # LSTM Cell, hidden size larger, remenber more detail\r\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(\r\n cell_size,\r\n forget_bias=forget_bias,\r\n state_is_tuple=True)\r\n \"\"\"\r\n add dropout, dnn dropout different from cnn.\r\n in_keep_prob: input keep probability(the probability of h_t == 0).\r\n out_keep_prob: output keep probability(the probability of h_{t+1} == 0).\r\n \"\"\"\r\n\r\n lstm_cell = tf.nn.rnn_cell.DropoutWrapper(\r\n lstm_cell,\r\n input_keep_prob=self.input_keep_prob,\r\n output_keep_prob=self.output_keep_prob)\r\n \"\"\"What's the benefit of multiple LSTM hidden layer?\r\n point 1: An interesting property of multilayer LSTMs is that it allows to\r\n perform hierarchical processing on difficult temporal tasks, and more\r\n naturally capture the structure of sequences.\r\n point 2: The purpose of using multilayer RNN cells is to learn more\r\n sophisticated conditional distributions\"\"\"\r\n return lstm_cell\r\n cell = tf.nn.rnn_cell.MultiRNNCell(\r\n [lstm_cell_func() for _ in range(lstm_layer_num)], state_is_tuple=True)\r\n with tf.name_scope(\"lstm\"):\r\n state = cell.zero_state(batch_size, tf.float32) # sents counte\r\n # with tf.name_scope(\"lstm\"):\r\n with tf.variable_scope(tf.get_variable_scope()) as scope:\r\n for time_step in range(max_time_size):\r\n if time_step > 0:\r\n tf.get_variable_scope().reuse_variables()\r\n (h_t, state) = cell(self.input_x[:,:,time_step], state)\r\n h = h_t\r\n # 全连阶层\r\n with tf.name_scope(\"full_cont_layer\"):\r\n W1 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W1\")\r\n W2 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W2\")\r\n W3 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W3\")\r\n b1 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b1\")\r\n b2 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b2\")\r\n b3 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b3\")\r\n l2_loss += tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2) + tf.nn.l2_loss(W3)\r\n l2_loss += tf.nn.l2_loss(b1) + tf.nn.l2_loss(b2) + tf.nn.l2_loss(b3)\r\n self.scores = tf.nn.xw_plus_b(h, W1, b1, name=\"scores\")\r\n # self.score = tf.matmul(h, W) + b\r\n self.predictions = tf.argmax(self.scores, 1, name=\"predictions\")\r\n\r\n # CalculateMean cross-entropy loss\r\n with tf.name_scope(\"loss\"):\r\n # losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores,\r\n # labels=self.input_y)\r\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores+1e-10, labels=self.input_y)\r\n \"\"\"sparse softmax cross entropy do not need to transform labels to\r\n one-hot matrix. and \"\"\"\r\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss\r\n\r\n # Accuracy\r\n with tf.name_scope(\"accuracy\"):\r\n correct_predictions = tf.equal(self.predictions,\r\n tf.argmax(self.input_y, 1))\r\n self.accuracy = tf.reduce_mean(\r\n tf.cast(correct_predictions, \"float\"), name=\"accuracy\")", "def forward(self, x):\n batch_size = x.size(0)\n out,_ = self.lstm(x) #out = batch, seq_len, num_directions * hidden_size\n out1 = out[:, -1, :16] #最后一层正向传播的最后一个timestep\n out2 = out[:, 0, 16:] #最后一层反向传播最后一个timestep\n out = torch.cat((out1,out2), dim=1)\n out = self.fc(out)\n\n return out", "def forward(self, input: torch.Tensor, hidden_state: torch.Tensor, cell_state: torch.Tensor) ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n input = input.unsqueeze(1)\n output = self.embedding(input)\n output, (hidden_state, cell_state) = self.lstm(output, (hidden_state, cell_state))\n output_logits = self.out(output)\n return output_logits, hidden_state, cell_state", "def lstm2():\n return render_template(\n 'lstm2.html',\n title='LSTM',\n year=datetime.now().year,\n message='Your LSTM page.'\n )", "def test_lstm_basic(self):\n\n class SimpleLSTM(nn.Module):\n def __init__(self):\n super(SimpleLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(12, 10, 1)\n w2 = torch.randn(40, 10)\n w1 = torch.randn(40, 12)\n b1 = torch.randn(40)\n b2 = torch.randn(40)\n self.rnn.training = False\n self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)\n self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)\n self.rnn.bias_ih_l0 = torch.nn.Parameter(b1)\n self.rnn.bias_hh_l0 = torch.nn.Parameter(b2)\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(10, 3, 12)\n h = torch.randn(1, 3, 10)\n c = torch.randn(1, 3, 10)\n model = SimpleLSTM()\n\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def lstm2():\n return render_template(\n 'lstm2.html',\n title='LSTM',\n year=\"2020\",\n message='Your LSTM page.'\n )", "def init_hidden(self, batch_size: int, device: Device) -> AmbiguousHidden:\n hidden_zero = torch.zeros(batch_size, self.hidden_size).to(device)\n\n if self.rnn_type == \"LSTM\":\n return hidden_zero, hidden_zero.clone()\n else:\n return hidden_zero", "def lstm_model(input_size, output_size, embedding, num_nodes, num_unrollings, batch_size,\n learning_rate, exp_decay = None, gradient_max_value = 1.25, dropout_prob = 0.0):\n\n graph = tf.Graph()\n with graph.as_default():\n # [ix, fx, cx, ox]\n x_mat = training.utils.gaussian_weights_variable([input_size, 4*num_nodes])\n # [im, fm, cm, om]\n o_mat = training.utils.gaussian_weights_variable([num_nodes, 4*num_nodes])\n # [ib, fb, cb, ob]\n b_vec = tf.Variable(tf.zeros([1, 4*num_nodes]))\n\n # Variables saving state across unrollings.\n saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n # Classifier weights and biases.\n w = training.utils.gaussian_weights_variable([num_nodes, output_size])\n b = tf.Variable(tf.zeros([output_size]))\n\n # Definition of the cell computation.\n def lstm_cell(i, o, state):\n \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n Note that in this formulation, we omit the various connections between the\n previous state and the gates.\"\"\"\n mult = tf.matmul(i, x_mat) + tf.matmul(o, o_mat) + b_vec\n\n input_gate = tf.sigmoid(mult[:, 0:num_nodes])\n forget_gate = tf.sigmoid(mult[:, num_nodes:2*num_nodes])\n state = forget_gate * state + input_gate * tf.tanh(mult[:, 2*num_nodes:3*num_nodes])\n output_gate = tf.sigmoid(mult[:, 3*num_nodes:4*num_nodes])\n return output_gate * tf.tanh(state), state\n\n # Input data.\n before_embedding_size = input_size\n if embedding is not None:\n before_embedding_size = embedding.shape[0]\n\n train_data = list()\n for _ in range(num_unrollings + 1):\n train_data.append(\n tf.placeholder(tf.float32, shape=[batch_size, before_embedding_size]))\n train_inputs = train_data[:num_unrollings]\n train_labels = train_data[1:] # Labels are inputs shifted by one time step.\n\n # Unrolled LSTM loop.\n outputs = list()\n output = saved_output\n state = saved_state\n for i in train_inputs:\n if embedding is not None:\n # Converting the input to the embedding.\n indices = tf.argmax(i, 1)\n i = tf.nn.embedding_lookup(embedding, indices)\n # Dropout is only applied to inputs, not to recurrent connections.\n i = tf.nn.dropout(i, 1 - dropout_prob)\n output, state = lstm_cell(i, output, state)\n outputs.append(output)\n\n # State saving across unrollings.\n with tf.control_dependencies([saved_output.assign(output),\n saved_state.assign(state)]):\n # Classifier.\n # Dropout is also applied to the output of the LSTM cell, only when\n # used for the projection, as it is not recurrent.\n outputs = tf.concat(0, outputs)\n outputs = tf.nn.dropout(outputs, 1 - dropout_prob)\n logits = tf.nn.xw_plus_b(outputs, w, b)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits, tf.concat(0, train_labels)))\n\n # Optimizer.\n global_step = tf.Variable(0)\n\n if exp_decay is not None:\n learning_rate = tf.train.exponential_decay(\n learning_rate, global_step,\n exp_decay['decay_steps'], exp_decay['decay_rate'], exp_decay['staircase'])\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n # Clipping to avoid exploding gradient.\n gradients, v = zip(*optimizer.compute_gradients(loss))\n gradients, _ = tf.clip_by_global_norm(gradients, gradient_max_value)\n optimizer = optimizer.apply_gradients(zip(gradients, v), global_step=global_step)\n\n # Predictions.\n train_prediction = tf.nn.softmax(logits)\n\n # Sampling and validation eval: batch 1, no unrolling.\n sample_input_ph = tf.placeholder(tf.float32, shape=[1, before_embedding_size])\n saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n reset_sample_state = tf.group(\n saved_sample_output.assign(tf.zeros([1, num_nodes])),\n saved_sample_state.assign(tf.zeros([1, num_nodes])))\n\n sample_input = sample_input_ph\n if embedding is not None:\n indices = tf.argmax(sample_input_ph, 1)\n sample_input = tf.nn.embedding_lookup(embedding, indices)\n\n sample_output, sample_state = lstm_cell(\n sample_input, saved_sample_output, saved_sample_state)\n with tf.control_dependencies([saved_sample_output.assign(sample_output),\n saved_sample_state.assign(sample_state)]):\n sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))\n\n tf_graph = {\n 'graph': graph,\n 'data_ph': train_data,\n 'sample_ph': sample_input_ph }\n tf_predictions = [train_prediction, sample_prediction]\n\n return tf_graph, optimizer, loss, tf_predictions, reset_sample_state", "def extract_hidden_states(self, output):\n # Intermediate hidden states\n output_fw_intermediate = output[:,:-1,0:self._hidden_size]\n output_bw_intermediate = output[:,1:,self._hidden_size:] \n \n # Last hidden states\n output_fw = output[:,-1,0:self._hidden_size]\n output_bw = output[:,0,self._hidden_size:]\n last_ht = torch.cat((output_fw, output_bw), -1)\n \n return last_ht, output_fw_intermediate, output_bw_intermediate", "def build_lstm8(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False,\n name='eembed'\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False, name='td8')))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi'))\n model.add(Flatten(name='flaaten'))\n model.add(BatchNormalization())\n model.add(Dropout(settings['dropout'] / 2.0))\n model.add(Dense(shape['n_class'], activation='sigmoid'))\n xprint('build_lstm8: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def Bilstm(self):\n # Model.\n model = Sequential()\n # model.add(Bidirectional(LSTM(2048, return_sequences=True),input_shape=self.input_shape))\n # model.add(Bidirectional(LSTM(2048))) id identification is 2048\n model.add(Bidirectional(LSTM(2048, return_sequences=True), input_shape=self.input_shape))\n #model.add(Bidirectional(LSTM(2048)))\n model.add(Dense(2048, activation='relu'))\n #model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n # model.add(Bidirectional(LSTM(20, return_sequences=True), input_shape=(n_timesteps, 1)))\n # model.add(TimeDistributed(Dense(1, activation='sigmoid')))\n # model = Sequential()\n # model = Sequential()\n # model.add(Embedding(max_features, 128, input_length=maxlen))\n # model.add(Bidirectional(LSTM(64)))\n # model.add(Dropout(0.5))\n # model.add(Dense(1, activation='sigmoid'))\n\n # model.add(Embedding(20000, 128, input_length=self.seq_length))\n # model.add(Flatten(input_shape=self.input_shape))\n # model.add(Embedding(20000, 128, input_length=self.seq_length))\n # model.add(Bidirectional(LSTM(128)))\n # model.add(Dropout(0.5))\n # model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def separable_lstm(cell, num_units, inputs, seq_lengths1, seq_lengths2, scope=None):\n with variable_scope.variable_scope(scope, \"SeparableLstm\", [inputs]):\n hidden = bidirectional_horizontal_lstm(cell, num_units, inputs, seq_lengths1)\n with variable_scope.variable_scope(\"vertical\"):\n transposed = array_ops.transpose(hidden, [0, 2, 1, 3])\n output_transposed = bidirectional_horizontal_lstm(cell, num_units, transposed, seq_lengths2)\n output = array_ops.transpose(output_transposed, [0, 2, 1, 3])\n return output", "def test_lstm_two_layers(self):\n\n class MultipleLayersLSTM(nn.Module):\n def __init__(self):\n super(MultipleLayersLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(10, 20, 2, bidirectional=False)\n self.rnn.training = False\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(5, 3, 10)\n h = torch.randn(2, 3, 20)\n c = torch.randn(2, 3, 20)\n model = MultipleLayersLSTM()\n\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def forward(self, batch: torch.LongTensor,\n hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:\n\n # max_len = x.size(1)\n # x,label = batch\n # batch_size x max_len x embedding_dim\n x_embedded = self.embedding(batch)\n # x_drop = self.dropout\n x_drop = self.dropout(x_embedded)\n\n # compute hidden states and logits for each time step\n # hidden_states_list = []\n # prev_hidden = hidden_start\n hidden_state = self.rnn(x_drop)[0]\n # print(hidden_state)\n # print(hidden_state[0].shape)\n # print(hidden_state[1].shape)\n\n # hidden_state = hidden_state.permute(2,1,0)\n # hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])\n # hidden_state_maxPooled = hidden_state.permute(2,1,0)\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n\n output = self.get_logits(hidden_state_pooled)\n\n # Loss = self.loss(output, y)\n\n # hidden_state = softmax(logits(hidden_state))\n\n # batch_size x max_len x rnn_size\n # hidden_states = torch.stack(hidden_states_list, dim=1)\n\n return output", "def create_multilayer_lstm_params(num_layers, in_size, state_size, name=\"\"):\n lstm_layers = []\n for i in range(num_layers):\n layer_name = name + \"-\" + str(i)\n print(\"LSTM \" + layer_name + \": \" + str(in_size) + \" x \" + str(state_size) + \"; default Dynet initialization of hidden weights\")\n lstm_layer = torch.nn.LSTMCell(input_size=int(in_size), hidden_size=int(state_size), bias=True)\n lstm_layers.append(lstm_layer)\n in_size = state_size\n return torch.nn.ModuleList(lstm_layers)", "def __init__(self, embeddings, hidden_dim, output_size, dropout_emb, dropout_lstm):\n super(AttentionalLSTM, self).__init__()\n\n # 1) embedding layer:\n trainable_emb = False\n self.word_embeddings = nn.Embedding(num_embeddings=embeddings.shape[0],\n embedding_dim=embeddings.shape[1])\n self.init_embeddings(embeddings, trainable_emb)\n self.drop_emb = nn.Dropout(dropout_emb)\n\n # 2) LSTM layer\n self.hidden_dim = hidden_dim\n self.lstm = nn.LSTM(input_size=embeddings.shape[1],\n hidden_size=hidden_dim,\n batch_first=True,\n dropout=dropout_lstm)\n self.drop_lstm = nn.Dropout(dropout_lstm)\n self.attention = Attention(attention_size=hidden_dim, batch_first=True)\n\n # 3) linear layer -> outputs\n self.hidden2output = nn.Linear(hidden_dim, output_size)", "def __call__(self, inputs, state, scope=None):\n # Apply vanilla LSTM\n output, new_state = self._cell(inputs, state, scope)\n\n if self.state_is_tuple:\n (prev_c, prev_h) = state\n (new_c, new_h) = new_state\n else:\n num_proj = self._cell._num_units if self._cell._num_proj is None else self._cell._num_proj\n prev_c = tf.slice(state, [0, 0], [-1, self._cell._num_units])\n prev_h = tf.slice(state, [0, self._cell._num_units], [-1, num_proj])\n new_c = tf.slice(new_state, [0, 0], [-1, self._cell._num_units])\n new_h = tf.slice(new_state, [0, self._cell._num_units], [-1, num_proj])\n\n # Apply zoneout\n if self.is_training:\n keep_rate_cell = 1.0 - self._zoneout_cell\n keep_rate_output = 1.0 - self._zoneout_outputs\n c = keep_rate_cell * tf.nn.dropout(new_c - prev_c, keep_prob=keep_rate_cell) + prev_c\n h = keep_rate_output * tf.nn.dropout(new_h - prev_h, keep_prob=keep_rate_output) + prev_h\n else:\n c = new_c - self._zoneout_cell * (new_c + prev_c)\n h = new_h - self._zoneout_outputs * (new_h + prev_h)\n\n new_state = tf.nn.rnn_cell.LSTMStateTuple(c, h) if self.state_is_tuple else tf.concat([c, h], axis=1)\n\n return output, new_state", "def loop_fn(time, cell_output, cell_state, loop_state, emit_ta):\n \n if cell_output is None: # time == 0\n next_cell_state = initial_state\n emit_output= tf.ones(tf.shape(initial_state[1])[:1], dtype=tf.int32) * tf.constant(-1) #(batch_size)\n next_input = tf.squeeze(self.sos, [1])\n elements_finished = tf.logical_and(tf.cast(emit_output, dtype=tf.bool), False)\n \n else:\n \n next_cell_state = cell_state\n decoder_outputs = tf.expand_dims(cell_output, 1) #(batch_size, 1, hidden_size)\n encoder_outputs_reshape = tf.reshape(encoder_outputs, shape=(-1, self.config.hidden_size)) #(batch_size*time_steps, hidden_size)\n decoder_outputs_reshape = tf.reshape(decoder_outputs, shape=(-1, self.config.hidden_size)) #(batch_size*1, hidden_size)\n encoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(encoder_outputs_reshape, output_size=self.config.hidden_size, \n bias=False, scope='Ptr_W1') #(b_sz*tstps_en, h_sz)\n decoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(decoder_outputs_reshape, output_size=self.config.hidden_size, \n bias=False, scope='Ptr_W2') #(b_sz*1, h_sz)\n encoder_outputs_linear = tf.reshape(encoder_outputs_linear_reshape, tf.shape(encoder_outputs))\n decoder_outputs_linear = tf.reshape(decoder_outputs_linear_reshape, tf.shape(decoder_outputs))\n \n encoder_outputs_linear_expand = tf.expand_dims(encoder_outputs_linear, 1) #(b_sz, 1, tstp_en, h_sz)\n decoder_outputs_linear_expand = tf.expand_dims(decoder_outputs_linear, 2) #(b_sz, 1, 1, h_sz)\n \n after_add = tf.tanh(encoder_outputs_linear_expand + decoder_outputs_linear_expand) #(b_sz, 1, tstp_en, h_sz)\n \n after_add_reshape = tf.reshape(after_add, shape=(-1, self.config.hidden_size))\n \n after_add_linear_reshape = tf.nn.rnn_cell._linear(after_add_reshape, output_size=1, #(b_sz*1*tstp_en, 1)\n bias=False, scope='Ptr_v')\n after_add_linear = tf.reshape(after_add_linear_reshape, shape=(-1, tstps_en)) #(b_sz, tstp_en)\n en_length_mask = tf.sequence_mask(self.encoder_tstps, #(b_sz, tstp_en)\n maxlen=tstps_en, dtype=tf.bool)\n\n \"\"\"mask out already hitted ids\"\"\" \n hit_ids = tf.cond(emit_ta.size() > 0, lambda: emit_ta.pack(), lambda: tf.ones(shape=[1, batch_size], dtype=tf.int32)*-1) #(to_cur_tstp, b_sz)\n masks = tf.one_hot(hit_ids, depth=tstps_en, on_value=True, off_value=False) #(to_cur_tstp, b_sz, tstp_en)\n masks = tf.reduce_any(masks, reduction_indices=[0]) #(b_sz, tstp_en)\n hit_masks = tf.logical_not(masks)\n\n mask = tf.logical_and(en_length_mask, hit_masks)\n logits = tf.select(mask, after_add_linear,\n tf.ones_like(after_add_linear) * (-np.Inf)) # shape(b_sz, tstp_en)\n\n emit_output = tf.arg_max(logits, dimension=1) #(batch_size)\n emit_output = tf.cast(emit_output, dtype=tf.int32)\n \n bool_mask = tf.one_hot(emit_output, depth=tstps_en, on_value=True, off_value=False) #(b_sz, tstps_en)\n bool_mask = tf.reshape(bool_mask, shape=(batch_size, tstps_en))\n next_input = tf.boolean_mask(encoder_inputs, bool_mask) #(b_sz, emb_sz)\n \n elements_finished = tf.equal(emit_output, 0) #(batch_size)\n elements_finished = tf.reshape(elements_finished, (-1,))\n \n elements_finished = tf.logical_or(elements_finished, (time >= self.config.num_steps))\n next_loop_state = loop_state\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)", "def forward(self, x, hidden):\n batch_size = x.size(0)\n # embeddings and lstm_out\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds, hidden)\n \n # stack up lstm outputs\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n \n # dropout and fully connected layer\n out = self.dropout(lstm_out)\n out = self.fc(out)\n \n # sigmoid function\n sig_out = self.sig(out)\n \n # reshape to be batch_size first\n sig_out = sig_out.view(batch_size, -1)\n sig_out = sig_out[:, -1] # get last batch of labels\n \n # return last sigmoid output and hidden state\n return sig_out, hidden", "def add_logits_op(self):\n with tf.variable_scope(\"bi-lstm\"):\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.word_embeddings,\n sequence_length=self.sequence_lengths, dtype=tf.float32)\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.nn.dropout(output, self.dropout)\n\n with tf.variable_scope(\"proj\"):\n W = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[2*self.config.hidden_size_lstm, self.config.ntags])\n\n b = tf.get_variable(\"b\", shape=[self.config.ntags],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps = tf.shape(output)[1]\n output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])\n pred = tf.matmul(output, W) + b\n self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])", "def trajectory(self, state, T=1, time_steps=200):\n\n state = state.to(device)\n t = torch.linspace(0, T, time_steps).to(device)\n\n # integrate and remove batch dim\n traj = self.model_of_dyn_system.trajectory(state, t)\n return traj.detach().cpu()[:, 0, :]", "def build_lstm_layers(lstm_sizes, embed, keep_prob_, batch_size):\n lstms = [tf.contrib.rnn.BasicLSTMCell(size) for size in lstm_sizes]\n # Add dropout to the cell\n drops = [tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob_) for lstm in lstms]\n # Stack up multiple LSTM layers, for deep learning\n cell = tf.contrib.rnn.MultiRNNCell(drops)\n # Getting an initial state of all zeros\n initial_state = cell.zero_state(batch_size, tf.float32)\n \n lstm_outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)\n \n return initial_state, lstm_outputs, cell, final_state", "def get_init_cell(batch_size, rnn_size):\n\n cell = tf.contrib.rnn.BasicLSTMCell(rnn_size) #?????????????????cell_hidden_size = state_size\n #cell = tf.contrib.run.DropoutWrapper(cell, output_keep_prob=keep_prob)\n num_of_layers = 3\n cell = tf.contrib.rnn.MultiRNNCell([cell for _ in range(num_of_layers)])\n initial_state = cell.zero_state(batch_size, tf.float32)\n initial_state = tf.identity(initial_state, name='initial_state')\n \n return (cell, initial_state)", "def get_init_cell(batch_size, rnn_size):\n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n #drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5)\n cell = tf.contrib.rnn.MultiRNNCell([lstm] * 3)\n initial_state = cell.zero_state(batch_size, tf.float32)\n initial_state = tf.identity(initial_state, name=\"initial_state\")\n\n return cell, initial_state", "def __init__(self, input_size=1, hidden_layer_size=100, output_size=1):\n super().__init__()\n self.hidden_layer_size = hidden_layer_size # TODO [tuning] different size (?)\n\n # TODO [tuning] BiLSTM (?)\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_layer_size, batch_first=True)\n\n self.linear = nn.Linear(hidden_layer_size, output_size, bias=True) # TODO try with bias=False (?)", "def create_logits(self):\n with tf.variable_scope('LSTM'):\n first_label = self.get_input(prev=None, i=0)\n decoder_inputs = [first_label] + [None] * (self._params.seq_length - 1)\n lstm_cell = tf.contrib.rnn.LSTMCell(\n self._mparams.num_lstm_units,\n use_peepholes=False,\n cell_clip=self._mparams.lstm_state_clip_value,\n state_is_tuple=True,\n initializer=orthogonal_initializer)\n lstm_outputs, _ = self.unroll_cell(\n decoder_inputs=decoder_inputs,\n initial_state=lstm_cell.zero_state(self._batch_size, tf.float32),\n loop_function=self.get_input,\n cell=lstm_cell)\n\n with tf.variable_scope('logits'):\n logits_list = [\n tf.expand_dims(self.char_logit(logit, i), dim=1)\n for i, logit in enumerate(lstm_outputs)\n ]\n\n return tf.concat(logits_list, 1)", "def run_LSTM(data):\n # Initialising the RNN\n model = Sequential()\n \"\"\"\n # Adding the first LSTM layer and some Dropout regularisation\n model.add(LSTM(units=256,return_sequences=False, input_shape=(data.x_train.shape[1], data.x_train.shape[2])))\n model.add(Dropout(0.3))\n model.add(Dense(units=1))\n \"\"\"\n\n model = Sequential()\n model.add(LSTM(units=180, return_sequences=False, input_shape=(data.x_train.shape[1], data.x_train.shape[2])))\n #model.add(Dropout(params['dropout']))\n #model.add(LSTM(units=128))\n #model.add(Dropout(params['dropout']))\n #model.add(Dense(units=64))\n model.add(Dense(units=1,activation='relu',kernel_initializer=tf.keras.initializers.Orthogonal()))\n # Compiling the RNN\n opt = Adam(lr=0.0052)\n model.compile(optimizer=opt, loss='mean_squared_error',metrics=['mean_absolute_percentage_error'])\n\n # Fitting the RNN to the Training set\n regressor = model.fit(data.x_train, data.y_train.ravel(), epochs=180,batch_size=410,shuffle=True,validation_data=(data.x_valid,data.y_valid.ravel()))\n\n #Create plots\n plt.plot(regressor.history['loss'], label='loss')\n plt.plot(regressor.history['val_loss'], label='val_loss')\n plt.legend()\n plt.show()\n\n\n plt.figure()\n outputs = model.predict(data.x_test)\n print(outputs.shape)\n predictions = outputs[:,0]\n\n print(predictions.shape)\n\n pred_prices = predictions.reshape(-1,1)\n real_prices = data.y_test.reshape(-1,1)\n mape = 0\n\n pred_prices = data.inv.inverse_transform(pred_prices)\n real_prices = data.inv.inverse_transform(real_prices)\n\n #if Returns == False:\n \n #mape = mean_absolute_percentage_error(real_prices, pred_prices)\n\n #pred_prices = [x * (data.mm[1] - data.mm[0]) + data.mm[0] for x in predictions.reshape(-1)]\n #real_prices = [x * (data.mm[1] - data.mm[0]) + data.mm[0] for x in data.y_test.reshape(-1)]\n\n #pred_prices = data.train_sc.inverse_transform(predictions.reshape(-1))\n #real_prices = data.test_sc.inverse_transform(data.y_test.reshape(-1))\n\n #mape = mean_absolute_percentage_error(data.y_test.ravel(), pred_prices.)\n y_true, y_pred = np.array(real_prices).reshape(-1,1), np.array(pred_prices).reshape(-1,1)\n #y_true, y_pred = y_true[:50], y_pred[:50]\n\n mape = mean_absolute_percentage_error(y_true, y_pred)\n pct = PCT(y_true,y_pred)\n mse = mean_squared_error(y_true,y_pred)\n rmse = sqrt(mse)\n amape = AMAPE(y_true,y_pred)\n mae = MAE(y_true,y_pred)\n\n plt.plot(real_prices, label='targets')\n plt.plot(pred_prices, label='predictions')\n plt.legend()\n plt.title('LSTM test data')\n plt.show()\n\n plt.figure()\n outputs = model.predict(data.x_train)\n print(outputs.shape)\n predictions = outputs[:,0]\n\n plt.plot(data.y_train.ravel(), label='targets')\n plt.plot(predictions, label='predictions')\n plt.legend()\n plt.title('LSTM train data')\n plt.show()\n print(y_pred)\n\n print('RMSE= {:.6f}, MAPE = {:.6f}, PCT = {:.6f}, MSE = {:.6f}, MAE = {:.6f}, AMAPE = {:.6f}'.format(rmse, mape, pct, mse, mae, amape))" ]
[ "0.6892743", "0.6710073", "0.6645775", "0.6560571", "0.65253127", "0.6467472", "0.6402133", "0.63405454", "0.6327336", "0.6319975", "0.62931645", "0.62660724", "0.6201569", "0.61974037", "0.61889523", "0.6188214", "0.6185717", "0.61776847", "0.61347216", "0.61347216", "0.61342585", "0.6120478", "0.610608", "0.6079514", "0.60789424", "0.60562885", "0.60407937", "0.6007396", "0.6007396", "0.5986837", "0.59853923", "0.5984463", "0.59828645", "0.5982049", "0.5973913", "0.59557307", "0.59554553", "0.5946882", "0.5943511", "0.5914852", "0.5910636", "0.59094375", "0.5908141", "0.5906957", "0.58992594", "0.58959985", "0.58535314", "0.5850395", "0.58475465", "0.581226", "0.58045775", "0.5786732", "0.57788795", "0.577182", "0.577038", "0.5763523", "0.5749947", "0.5746776", "0.57337594", "0.5727108", "0.57141185", "0.56992304", "0.56948125", "0.5693537", "0.5687394", "0.567982", "0.567324", "0.56701344", "0.5660205", "0.5654713", "0.5652018", "0.56478685", "0.5647462", "0.5643771", "0.56406355", "0.56390435", "0.5633544", "0.5630392", "0.5623084", "0.5611732", "0.56076545", "0.5605041", "0.56040186", "0.5603595", "0.5603507", "0.560264", "0.55973506", "0.55870885", "0.5586603", "0.5586413", "0.55851024", "0.55759805", "0.55709565", "0.5570894", "0.55694216", "0.5567427", "0.55486876", "0.55402774", "0.5539253", "0.55347145" ]
0.5587781
87
LSTM returning hidden state and content cell at a specific timestep.
def lstm_cell_1(x, h, c, name=None, reuse=False): nin = x.shape[-1].value nout = h.shape[-1].value with tf.variable_scope(name, default_name="lstm_1", values=[x, h, c], reuse=reuse): wx = get_variable_wrap("kernel/input", [nin, nout * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) wh = get_variable_wrap("kernel/hidden", [nout, nout * 4],dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) b = get_variable_wrap("bias", [nout * 4], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) z = ed.dot(x, wx) + ed.dot(h, wh) + b i, f, o, u = tf.split(z, 4, axis=0) i = tf.sigmoid(i) f = tf.sigmoid(f + 1.0) o = tf.sigmoid(o) u = tf.tanh(u) c = f * c + i * u h = o * tf.tanh(c) return h, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hx, cx = hidden\n gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)\n\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = hard_sigmoid(ingate)\n forgetgate = hard_sigmoid(forgetgate)\n cellgate = F.tanh(cellgate)\n outgate = hard_sigmoid(outgate)\n\n cy = (forgetgate * cx) + (ingate * cellgate)\n hy = outgate * F.tanh(cy)\n\n return hy, cy", "def LSTM(inputs, dim, seq_len, name):\r\n with tf.name_scope(name) as scope:\r\n cell = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n hidden_states, cell_states = tf.nn.dynamic_rnn(cell, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)\r\n\r\n return hidden_states, cell_states", "def LSTM(self, previous_hidden_memory_tuple, x):\n \n previous_hidden_state,c_prev=tf.unstack(previous_hidden_memory_tuple)\n \n #Input Gate\n i= tf.sigmoid(\n tf.matmul(x,self.Wi)+tf.matmul(previous_hidden_state,self.Ui) + self.bi \n )\n \n #Forget Gate\n f= tf.sigmoid(\n tf.matmul(x,self.Wf)+tf.matmul(previous_hidden_state,self.Uf) + self.bf \n )\n \n #Output Gate\n o= tf.sigmoid(\n tf.matmul(x,self.Wog)+tf.matmul(previous_hidden_state,self.Uog) + self.bog\n )\n \n #New Memory Cell\n c_= tf.nn.tanh(\n tf.matmul(x,self.Wc)+tf.matmul(previous_hidden_state,self.Uc) + self.bc \n ) \n \n #Final Memory cell\n c= f*c_prev + i*c_\n \n #Current Hidden state\n current_hidden_state = o*tf.nn.tanh(c)\n\n return tf.stack([current_hidden_state,c])", "def build_lstm(self, keep_prob):\n def get_cell():\n if self.kernel == 'LSTM':\n cell = tf.contrib.rnn.BasicLSTMCell(self.num_hidden_units)\n print('LSTM is using...')\n elif self.kernel == 'GRU': # GRU RNN\n cell = tf.contrib.rnn.GRUCell(self.num_hidden_units)\n print('GRU is using...')\n else:\n raise AttributeError\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n return cell\n lstm_cell = get_cell()\n init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)\n return lstm_cell, init_state", "def _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden):\n param_cells = []\n last_states = []\n for i in range(num_lstm_layer):\n param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable(\"l%d_i2h_weight\" % i),\n i2h_bias=mx.sym.Variable(\"l%d_i2h_bias\" % i),\n h2h_weight=mx.sym.Variable(\"l%d_h2h_weight\" % i),\n h2h_bias=mx.sym.Variable(\"l%d_h2h_bias\" % i)))\n state = LSTMState(c=mx.sym.Variable(\"l%d_init_c\" % i),\n h=mx.sym.Variable(\"l%d_init_h\" % i))\n last_states.append(state)\n assert len(last_states) == num_lstm_layer\n\n # embedding layer\n data = mx.sym.Variable('data')\n wordvec = mx.sym.SliceChannel(data=data, num_outputs=seq_len, squeeze_axis=1)\n\n hidden_all = []\n for seqidx in range(seq_len):\n hidden = wordvec[seqidx]\n for i in range(num_lstm_layer):\n next_state = _lstm(\n num_hidden=num_hidden,\n indata=hidden,\n prev_state=last_states[i],\n param=param_cells[i],\n seqidx=seqidx,\n layeridx=i)\n hidden = next_state.h\n last_states[i] = next_state\n hidden_all.append(hidden)\n\n hidden_concat = mx.sym.Concat(*hidden_all, dim=0)\n pred_fc = mx.sym.FullyConnected(data=hidden_concat, num_hidden=11, name=\"pred_fc\")\n return pred_fc", "def __call__(self, inputs, state):\n with tf.variable_scope(\"BayesLSTMCell\"):\n if self.w is None:\n\n# size = inputs.get_shape()[-1].value\n \n print ([\"------- Size input LSTM: \", inputs.shape])\n print ([\"------- Dim input specified \", self.X_dim])\n# print ([\"num units LSTM: \", self.num_units])\n \n self.w = VI.sample_posterior((self.X_dim + self.num_units, 4 * self.num_units),\n name=self.n + \"_weights\",\n prior=self.prior,\n is_training=self.is_training)\n \n self.b = VI.sample_posterior((4 * self.num_units, 1),\n name=self.n + \"_biases\",\n prior=self.prior,\n is_training=self.is_training)\n\n # Get the cell and hidden state from the previous cell [C_t-1, h_t-1]\n C_t_prev , h_t_prev = state\n #Vector concatenation of previous hidden state and embedded inputs\n concat_inputs_hidden = tf.concat([inputs, h_t_prev], 1)\n # Compute the Z = Wx + b for each of the 4 networks at once !\n gate_inputs = tf.nn.bias_add(tf.matmul(concat_inputs_hidden, self.w), tf.squeeze(self.b))\n \n # Split data up for the 4 gates\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = tf.split(value=gate_inputs, num_or_size_splits=4, axis=1)\n\n # Compute the new cell \n C_t = (C_t_prev * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i)*self._activation(j))\n h_t = self._activation(C_t) * tf.sigmoid(o)\n \n #Create tuple of the new state\n State_t = LSTMStateTuple(C_t, h_t)\n\n return h_t, State_t", "def lstm_cell(input, cx):\n return FunctionLib.apply('LSTMCell', input.device, [input, cx])", "def forward(self, X, hx=None):\n outputs = []\n (batch_size, Nseq, Dim) = X.size()\n \n# print (\"Dimensions LSTM_RNN Batch: \", (batch_size, Nseq, Dim))\n for l in range(self.num_layers):\n if (type(hx) == type(None)):\n h_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n c_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n else:\n h_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n c_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n # We loop for every element in the chain and for every layer\n\n for i in range(Nseq):\n input_t = X[:,i,:]\n# print (\"Sequence Chunk size: \",input_t.size())\n l = 0\n # First layer we put the input, in the rest we put the propagated states\n h_t, c_t = getattr(self, 'LSTMCell%i'%(l+1))(input_t, (getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))))\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n for l in range(1,self.num_layers):\n h_t, c_t = getattr(self, 'LSTMCell%i'%(l+1))(h_t, (getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))))\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n \n # Return the hx and cx of all layers ? for the last sample ?\n outputs = []\n for l in range(self.num_layers):\n outputs.append( [getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))])\n\n# outputs = torch.stack(outputs, 1).squeeze(2)\n \n return outputs", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def extract_lstm_minus_feature(hidden_state, i, j):\n seq_len, bs, hidden_size = hidden_state.size()\n assert hidden_size % 2 == 0\n split_point = hidden_size // 2\n hidden_f = hidden_state[j + 1, :, :split_point] - hidden_state[i, :, :split_point]\n hidden_b = (\n hidden_state[i + 1, :, split_point:] - hidden_state[j + 2, :, split_point:]\n )\n span_v = torch.cat([hidden_f, hidden_b], dim=-1)\n return span_v", "def forward(self, x, hidden):\n emb_x = self.emb_layer(x)\n lstm_out, hidden = self.lstm(emb_x, hidden)\n if self.bidirectional:\n # separate to forward and backward\n # following code reshapes LSTM output to:\n # (batch size, seq length, num directions, hidden dimensions)\n # where direction '0' is forward and direction '1' is backward\n lstm_out = lstm_out.contiguous().view(-1, self.seq_len, 2, self.hidden_dim)\n # get backward output in first node\n lstm_out_bw = lstm_out[:, 0, 1, :]\n # get forward output in last node\n lstm_out_fw = lstm_out[:, -1, 0, :]\n # we may simple concatenate forward & backward outputs,\n # or add them, multiply or average; in this case i used average\n lstm_out = torch.add(input=lstm_out_bw, alpha=1, other=lstm_out_fw)\n lstm_out = torch.div(lstm_out, 2)\n else:\n lstm_out = lstm_out[:, -1]\n \n assert lstm_out.shape[-1] == self.hidden_dim, (lstm_out.shape, self.hidden_dim)\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n \n d_out = self.dropout(lstm_out)\n fc_out = self.output_layer(d_out)\n sig_out = torch.sigmoid(fc_out)\n \n # return last sigmoid output and hidden state\n return sig_out, hidden", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerGetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name\n \n self.cur_net_fwd = dot_product(tf.zeros(self.incoming_shape[:1] + self.incoming_shape[2:]),\n tf.zeros(self.W_fwd_conc.shape.as_list()))", "def lstm_cell_hidden(mprev, cprev, node_dim, attention_m=False):\n\n # Input Gate\n m_nodes = node_dim\n if attention_m:\n m_nodes = 2 * node_dim\n im = Variable(torch.rand(m_nodes,node_dim))\n ib = Variable(torch.zeros(1,node_dim))\n i_g = torch.sigmoid(torch.matmul(mprev,im) + ib)\n \n #Forget Gate\n fm = Variable(torch.rand(m_nodes,node_dim))\n fb = Variable(torch.zeros(1,node_dim))\n f_g = torch.sigmoid(torch.matmul(mprev,fm) + fb)\n \n #Cell\n cm = Variable(torch.rand(m_nodes,node_dim))\n cb = Variable(torch.zeros(1,node_dim))\n cprime = torch.sigmoid(torch.matmul(mprev,cm) + cb)\n c = f_g * cprev + i_g * torch.tanh(cprime)\n \n #Output Gate\n om = Variable(torch.rand(m_nodes,node_dim))\n ob = Variable(torch.zeros(1,node_dim))\n o_g = torch.sigmoid(torch.matmul(mprev,om) + ob)\n m = o_g * torch.tanh(c)\n return m,c", "def generate_lstm(\n input_seqs,\n hidden_state,\n cell_state,\n w_inp,\n w_hid,\n b_inp,\n b_hid,\n f_act,\n g_act,\n h_act,\n backwards=False,\n ):\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n gates += b_inp\n if b_hid is not None:\n gates += b_hid\n i, f, c, o = _op.split(gates, 4, axis=-1)\n\n i = f_act(i)\n f = f_act(f)\n\n c = g_act(c)\n C = f * cell_state + i * c\n\n o = f_act(o)\n\n H = o * h_act(C)\n\n hidden_state = H\n cell_state = C\n h_list.append(_op.expand_dims(H, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n cell_state = _op.expand_dims(cell_state, axis=0)\n\n return output, hidden_state, cell_state", "def lstm_cell():\n if 'reuse' in inspect.getargspec(\n tf.contrib.rnn.BasicLSTMCell.__init__).args:\n return tf.contrib.rnn.BasicLSTMCell(\n args.hidden_size, input_size=args.embedding_size, forget_bias=0.0, state_is_tuple=True,\n reuse=tf.get_variable_scope().reuse)\n else:\n return tf.contrib.rnn.BasicLSTMCell(\n args.hidden_size, input_size=args.embedding_size, forget_bias=0.0, state_is_tuple=True)", "def __init__(self, input_dim, hidden_dim, output_dim):\r\n super(LstmEstimator, self).__init__()\r\n \r\n # The LSTM takes track features as inputs, and outputs hidden states\r\n # with dimensionality hidden_dim\r\n self.lstm = nn.LSTM(input_dim, hidden_dim)\r\n \r\n self.hidden2target = nn.Linear(hidden_dim, output_dim)", "def create_LSTM_LSTM_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Get dimensions\n lstm_size = model_settings['lstm_size']\n\n batch_size = tf.shape(feats2d)[0] \n feats2d = tf.reshape(feats2d, shape=[batch_size,-1,model_settings['feature_width']]) # features are of shape [max seq length for batch, 40]\n seq_lengths = shapes[:,0] # all shapes are [seq_length, 40], we extract seq_length\n\n # First LSTM \n\n # LSTM cells\n cell_fw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n\n # Bi-directional RNN (+ Dropout)\n (output_fw, output_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, feats2d, \n sequence_length=seq_lengths, \n dtype=tf.float32)\n\n # TODO: make predictions after every 64 time slices\n\n concat_rnn = tf.concat([state_fw[0], state_bw[0]], axis=1)\n\n if is_training:\n first_dropout = tf.nn.dropout(concat_rnn, dropout_prob)\n else:\n first_dropout = concat_rnn\n\n # Second LSTM \n # TODO\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=first_dropout, units=num_classes)\n \n if is_training:\n return logits, dropout_prob\n else:\n return logits", "def lstm_cell(inputs, **kwargs):\n if context.executing_eagerly():\n return OpLib.execute('LSTMCell', inputs, outputs=[None, None])\n return OpLib.add('LSTMCell', num_outputs=2, **kwargs)", "def build_lstm_cell(num_units, dropout):\n cell = tf.nn.rnn_cell.LSTMCell(num_units)\n if dropout:\n result = tf.nn.rnn_cell.DropoutWrapper(cell,\n output_keep_prob=1-dropout)\n return result", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh):\n time_step = x.shape[0]\n h_t = h\n if self.is_lstm:\n hidden_size = h[0].shape[-1]\n zero_output = P.ZerosLike()(h_t[0])\n else:\n hidden_size = h.shape[-1]\n zero_output = P.ZerosLike()(h_t)\n seq_length = P.Cast()(seq_length, mstype.float32)\n seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)\n seq_length = P.Cast()(seq_length, mstype.int32)\n seq_length = P.Transpose()(seq_length, (1, 0))\n\n outputs = []\n state_t = h_t\n t = 0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h_t = self.cell(x_t, state_t, w_ih, w_hh, b_ih, b_hh)\n seq_cond = seq_length > t\n if self.is_lstm:\n state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])\n state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])\n output = P.Select()(seq_cond, h_t[0], zero_output)\n state_t = (state_t_0, state_t_1)\n else:\n state_t = P.Select()(seq_cond, h_t, state_t)\n output = P.Select()(seq_cond, h_t, zero_output)\n outputs.append(output)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, state_t", "def run_single_step(self): \n contexts = tf.placeholder(tf.float32, [self.batch_size, self.num_ctx, self.dim_ctx]) \n last_memory = tf.placeholder(tf.float32, [self.batch_size, self.dim_hidden])\n last_output = tf.placeholder(tf.float32, [self.batch_size, self.dim_hidden])\n last_word = tf.placeholder(tf.int32, [self.batch_size])\n initial_step = tf.placeholder(tf.bool)\n\n context_mean = tf.reduce_mean(contexts, 1) \n\n lstm = tf.nn.rnn_cell.LSTMCell(self.dim_hidden, initializer=tf.random_normal_initializer(stddev=0.033)) \n\n # Attention mechanism\n alpha = self.attend(contexts, last_output) \n weighted_context = tf.cond(initial_step,\n lambda: tf.identity(context_mean),\n lambda: tf.reduce_sum(contexts*tf.expand_dims(alpha, 2), 1))\n\n word_emb = tf.cond(initial_step, \n lambda: tf.zeros([self.batch_size, self.dim_embed]), \n lambda: tf.nn.embedding_lookup(self.emb_w, last_word))\n \n # Apply the LSTM\n with tf.variable_scope(\"LSTM\"):\n last_state = last_memory, last_output\n output, state = lstm(tf.concat([weighted_context, word_emb], 1), last_state)\n memory, _ = state\n \n # Compute the logits and probs\n expanded_output = tf.concat([output, weighted_context, word_emb], 1)\n\n logits1 = fully_connected(expanded_output, self.dim_dec, 'dec_fc')\n logits1 = nonlinear(logits1, 'tanh')\n logits2 = tf.nn.xw_plus_b(logits1, self.dec_w, self.dec_b)\n probs = tf.nn.softmax(logits2) \n logprobs = tf.log(probs)\n\n tf.get_variable_scope().reuse_variables() \n\n self.contexts = contexts\n self.last_memory = last_memory\n self.last_output = last_output\n self.last_word = last_word\n self.initial_step = initial_step\n\n self.memory = memory\n self.output = output\n self.logprobs = logprobs", "def recurrent(self, x, h_0, w_ih, w_hh, b_ih, b_hh):\n time_step = x.shape[0]\n outputs = []\n t = 0\n h = h_0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h = self.cell(x_t, h, w_ih, w_hh, b_ih, b_hh)\n if self.is_lstm:\n outputs.append(h[0])\n else:\n outputs.append(h)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, h", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerSetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n tofov(v, shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for bwd connections\n W_bwd_conc = tf.concat(axis=1, values=[W_ci, W_ig, W_og, W_fg])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [None, None, None, None]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W_ci, W_ig, W_og, W_fg]))\n \n self.W_fwd_conc = None\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def extract_hidden_states(self, output):\n \n # Extracting the forward and backward hidden states from the last BiLSTM layer\n # output (batch_size, sequence length, 2 * hidden dim)\n output_fw = output[:,:,0:self._hidden_size]\n output_bw = output[:,:,self._hidden_size:]\n \n hidden_states = torch.cat((output_fw, output_bw),-1)\n \n return hidden_states", "def LSTM_train(X_train, Y_train, X_dev, Y_dev, R_train, R_dev, hyperparams):", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def apply_lstm(x, seq_len):\n return cudnn_layers.stacked_bilstm(\n input_emb=x,\n input_len=seq_len,\n hidden_size=FLAGS.lstm_dim,\n num_layers=1,\n dropout_ratio=0.0,\n mode=tf_estimator.ModeKeys.TRAIN,\n use_cudnn=None)", "def forward(self, src, device):\n\n src = torch.as_tensor(src).float().to(device)\n\n\n # Set initial hidden and cell states \n h0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n c0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n\n # shape of lstm_out: [batch_size, input_size, hidden_dim]\n # shape of self.hidden: (a, b), where a and b both have shape (num_layers, batch_size, hidden_dim).\n lstm_out, self.hidden = self.lstm(src, (h0, c0)) \n \n # Only take the output from the final timetep\n # Can pass on the entirety of lstm_out to the next layer if it is a seq2seq prediction\n #print(lstm_out.size())\n y_pred = self.linear(lstm_out[:, -1, :].view(src.shape[0], -1))\n return y_pred", "def bi_lstm(X_inputs):\n # X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]\n log(\"embedding\")\n log(embedding)\n log(X_inputs)\n log(\"X_inputs\")\n inputs = tf.nn.embedding_lookup(embedding, X_inputs) \n \n # ** 1.构建前向后向多层 LSTM\n cell_fw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n cell_bw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n \n # ** 2.初始状态\n initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)\n initial_state_bw = cell_bw.zero_state(batch_size, tf.float32) \n \n # 下面两部分是等价的\n # **************************************************************\n # ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式\n # ** 文档说明\n # inputs: A length T list of inputs, each a tensor of shape\n # [batch_size, input_size], or a nested tuple of such elements.\n # *************************************************************\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]\n # inputs = tf.unstack(inputs, timestep_size, 1)\n # ** 3.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。\n # 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。\n# try:\n# outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs, \n# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n# except Exception: # Old TensorFlow version only returns outputs not states\n# outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs, \n# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n# output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])\n # ***********************************************************\n \n # ***********************************************************\n # ** 3. bi-lstm 计算(展开)\n with tf.variable_scope('bidirectional_rnn'):\n # *** 下面,两个网络是分别计算 output 和 state \n # Forward direction\n outputs_fw = list()\n state_fw = initial_state_fw\n with tf.variable_scope('fw'):\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)\n outputs_fw.append(output_fw)\n \n # backward direction\n outputs_bw = list()\n state_bw = initial_state_bw\n with tf.variable_scope('bw') as bw_scope:\n inputs = tf.reverse(inputs, [1])\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)\n outputs_bw.append(output_bw)\n # *** 然后把 output_bw 在 timestep 维度进行翻转\n # outputs_bw.shape = [timestep_size, batch_size, hidden_size]\n outputs_bw = tf.reverse(outputs_bw, [0])\n # 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]\n output = tf.concat([outputs_fw, outputs_bw], 2)\n output = tf.transpose(output, perm=[1,0,2])\n output = tf.reshape(output, [-1, hidden_size*2])\n # ***********************************************************\n return output # [-1, hidden_size*2]", "def lstm(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def get_initial_hx(self, input_seq, hidden_state):\n num_directions = 2 if self.lstm.bidirectional else 1\n # hidden state\n hidden = hidden_state.view(self.lstm.num_layers * num_directions, len(hidden_state), -1)\n # cell state\n c_zeros = torch.zeros(self.lstm.num_layers * num_directions,\n input_seq.size(0), self.lstm.hidden_size,\n dtype=input_seq.dtype, device=input_seq.device)\n return hidden, c_zeros", "def init_hidden(self, batch_size, device):\n if self.mode == 'LSTM':\n return (torch.ones((1+self.bidirectional , batch_size, self.hidden_dim), device=device),\n torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device))\n else:\n return torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device)", "def get_rnn_hidden_state(h):\n return h if not isinstance(h, tuple) else h[0]", "def biLSTM(inputs, dim, seq_len, name, cell_type=\"cudnn\", cells=None, is_training=True, dropout_rate=0.0):\r\n if cell_type==\"cudnn\":\r\n with tf.variable_scope(name,reuse=tf.AUTO_REUSE) as scope:\r\n hidden_states, cell_states = bi_cudnn_rnn_encoder('lstm', dim, 1, dropout_rate, inputs, seq_len, is_training)\r\n else:\r\n with tf.name_scope(name) as scope:\r\n with tf.variable_scope('forward' + name) as scope:\r\n if cell_type == \"lstm\":\r\n lstm_fwd = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n with tf.variable_scope('backward' + name) as scope:\r\n if cell_type == \"lstm\":\r\n lstm_bwd = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n \r\n with tf.variable_scope(name+'blstm', reuse=tf.AUTO_REUSE):\r\n hidden_states, cell_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fwd, cell_bw=lstm_bwd, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)\r\n\r\n return hidden_states, cell_states", "def _extract_states(self, state):\n conf = self._config\n\n # c_prev is `m` (cell value), and\n # m_prev is `h` (previous output) in the paper.\n # Keeping c and m here for consistency with the codebase\n c_prev = [None] * conf.num_dims\n m_prev = [None] * conf.num_dims\n\n # for LSTM : state = memory cell + output, hence cell_output_size > 0\n # for GRU/RNN: state = output (whose size is equal to _num_units),\n # hence cell_output_size = 0\n total_cell_state_size = self._cell_state_size()\n cell_output_size = total_cell_state_size - conf.num_units\n\n if self._state_is_tuple:\n if len(conf.recurrents) != len(state):\n raise ValueError('Expected state as a tuple of {} '\n 'element'.format(len(conf.recurrents)))\n\n for recurrent_dim, recurrent_state in zip(conf.recurrents, state):\n if cell_output_size > 0:\n c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state\n else:\n m_prev[recurrent_dim] = recurrent_state\n else:\n for recurrent_dim, start_idx in zip(conf.recurrents,\n range(0, self.state_size,\n total_cell_state_size)):\n if cell_output_size > 0:\n c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n m_prev[recurrent_dim] = array_ops.slice(\n state, [0, start_idx + conf.num_units], [-1, cell_output_size])\n else:\n m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n return c_prev, m_prev, cell_output_size", "def cudnn_lstm_state(lstm_cell_state):\n h = tf.stack([s.h for s in lstm_cell_state])\n c = tf.stack([s.c for s in lstm_cell_state])\n return (h, c)", "def __call__(self, inputs, state):\n\n output, next_state = super(BinaryLSTM, self).__call__(inputs, state)\n with tf.variable_scope(self._cell_name):\n\n binary_cell_state = Bsn_layer.bsn_layer(next_state[0],\n stochastic_method=self.stochastic_method,\n preprocessing_method=self.preprocessing_method,\n tf_graph=self._tf_graph,\n slope_tensor=self._slope_tensor,\n loss_op_name='loss_by_example',\n name='binary_layer',\n stochastic_gradient_estimator=bsn_literals.STRAIGHT_THROUGH)\n binary_hidden_state = Bsn_layer.bsn_layer(next_state[1],\n stochastic_method=self.stochastic_method,\n preprocessing_method=self.preprocessing_method,\n tf_graph=self._tf_graph,\n slope_tensor=self._slope_tensor,\n loss_op_name='loss_by_example',\n name='binary_layer',\n stochastic_gradient_estimator=bsn_literals.STRAIGHT_THROUGH)\n\n return binary_hidden_state, tf.nn.rnn_cell.LSTMStateTuple(binary_cell_state, binary_hidden_state)", "def forward(self, X, A, beta=1, print_output=False):\n assert X.size(0) == A.size(0) + 1, print('the seq length of X and A are wrong')\n kl_loss = 0 # KL divergence term\n Ell_loss = 0 # expected log likelihood term\n batch_size = X.size(1)\n\n if len(X.size()) != 3:\n print('The input data matrix should be the shape of [seq_length, batch_size, input_dim]')\n\n X = X.to(self.device)\n A = A.to(self.device)\n\n # container\n states = torch.zeros(A.size(0), A.size(1), self.state_size).to(self.device) # [seq-1, batch, state]\n rnn_hiddens = torch.zeros(A.size(0), A.size(1), self.hidden_size).to(self.device) # [seq-1, batch, hidden]\n\n # initialising state and rnn hidden state\n # state = torch.zeros(X.size(1), self.state_size).to(self.device)\n rnn_hidden = self.init_h(X[0]).to(self.device) # [batch, hidden]\n if self.mode == 'LSTM':\n rnn_hidden_c = torch.zeros_like(rnn_hidden).to(self.device) # [batch, hidden]\n\n # temp_prior = self.hidden_prior(rnn_hidden) #[batch, state]\n temp_prior = rnn_hidden\n prior_mean = self.prior_mean(temp_prior) # [batch, state]\n prior_sigma = torch.exp(self.prior_sigma(temp_prior)) # [batch, state]\n state = self.reparametrise(prior_mean, prior_sigma) # [batch, state]\n\n # rnn_hidden = torch.zeros(X.size(1), self.hidden_size).to(self.device)\n\n # emission_mean = X[0]\n for t in range(1, X.size()[\n 0]): # for each time step, compute the free energy for each batch of data (start from the second hid state)\n if self.mode == 'LSTM':\n next_state_prior_m, next_state_prior_sigma, rnn_hidden, rnn_hidden_c = self.prior(state,\n A[t - 1].unsqueeze(\n -1),\n rnn_hidden,\n rnn_hidden_c)\n else:\n next_state_prior_m, next_state_prior_sigma, rnn_hidden = self.prior(state, A[t - 1].unsqueeze(-1),\n rnn_hidden)\n\n next_state_post_m, next_state_post_sigma = self.posterior(rnn_hidden, X[t])\n state = self.reparametrise(next_state_post_m, next_state_post_sigma) # [batch, state_size]\n states[t - 1] = state\n rnn_hiddens[t - 1] = rnn_hidden\n next_state_prior = Normal(next_state_prior_m, next_state_prior_sigma)\n next_state_post = Normal(next_state_post_m, next_state_post_sigma)\n\n # kl = kl_divergence(next_state_prior, next_state_post).sum(dim=1) #[batch]\n kl = kl_divergence(next_state_post, next_state_prior).sum(dim=1) # [batch]\n\n kl_loss += kl.mean()\n kl_loss /= A.size(0)\n\n # compute nll\n\n # flatten state\n flatten_states = states.view(-1, self.state_size)\n flatten_rnn_hiddens = rnn_hiddens.view(-1, self.hidden_size)\n flatten_x_mean, flatten_x_sigma = self.obs_model(flatten_states, flatten_rnn_hiddens)\n\n nll = self.batched_gaussian_ll(flatten_x_mean, flatten_x_sigma, X[1:, :, :].reshape(-1, self.output_size))\n nll = nll.mean()\n\n FE = nll - kl_loss\n\n if print_output:\n # print('ELL loss=', Ell_loss, 'KL loss=', kl_loss)\n print('Free energy of this batch = {}. Nll loss = {}. KL div = {}.'.format(float(FE.data)\n , float(nll.data),\n float(kl_loss.data)))\n\n return FE, nll, kl_loss", "def __init__(self, device, input_size, hidden_size, layer_norm=False, chrono_init=False, t_max=10):\n\n super(LSTMCell, self).__init__()\n\n self._device = device\n self._input_size = input_size\n self._hidden_size = hidden_size\n self._layer_norm = layer_norm\n self._chrono_init = chrono_init\n self._t_max = t_max\n\n self._W_x2i = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2i = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2i = nn.Parameter(torch.Tensor(hidden_size))\n self._b_i = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2f = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2f = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2f = nn.Parameter(torch.Tensor(hidden_size))\n self._b_f = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2o = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2o = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2o = nn.Parameter(torch.Tensor(hidden_size))\n self._b_o = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2c = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2c = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._b_c = nn.Parameter(torch.Tensor(hidden_size))\n\n if self._layer_norm:\n self._ln_c = nn.LayerNorm(hidden_size)\n self._ln_i = nn.LayerNorm(hidden_size)\n self._ln_f = nn.LayerNorm(hidden_size)\n self._ln_o = nn.LayerNorm(hidden_size)\n self._ln_g = nn.LayerNorm(hidden_size)\n \n self._reset_parameters()", "def lstm_layer(self):\n if self.pooling:\n ret_seq = True\n else:\n ret_seq = False\n ker_in = glorot_uniform(seed=self.seed)\n rec_in = Orthogonal(seed=self.seed)\n if self.type_of_weights == \"shared\":\n if self.recurrent == \"bilstm\" or self.recurrent is None:\n out_a = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n elif self.recurrent == \"lstm\":\n out_a = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n return out_a, out_a\n elif self.type_of_weights == \"separate\":\n if self.recurrent == \"bilstm\" or self.recurrent is None:\n out_a = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n out_b = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n elif self.recurrent == \"lstm\":\n out_a = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n out_b = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n return out_a, out_b", "def init_hidden(self, batch_size):\n b = 2 if self.bidirectional else 1\n if self.rnn_type == \"LSTM\":\n h0 = (Variable(torch.zeros(b, batch_size, self.hidden_size)),\n Variable(torch.zeros(b, batch_size, self.hidden_size)))\n h0 = [h0[0].cuda(), h0[1].cuda()] if self.use_cuda else h0\n else:\n h0 = Variable(torch.zeros(b, batch_size, self.hidden_size))\n h0 = h0.cuda() if self.use_cuda else h0\n return h0", "def build_dynamic_rnn(self, cells, lstm_input, lstm_state):\n lstm_output, final_state = tf.nn.dynamic_rnn(cells, lstm_input, initial_state=lstm_state)\n # reshape lstm_output from [batch_size, time_steps, n_units] to [batch_size*time_steps, n_units]\n lstm_output = tf.reshape(lstm_output, [-1, self.num_hidden_units])\n\n return lstm_output, final_state", "def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh, att_score):\n time_step = x.shape[0]\n h_t = h\n if self.is_lstm:\n hidden_size = h[0].shape[-1]\n zero_output = P.ZerosLike()(h_t[0])\n else:\n hidden_size = h.shape[-1]\n zero_output = P.ZerosLike()(h_t)\n seq_length = P.Cast()(seq_length, mstype.float32)\n seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)\n seq_length = P.Cast()(seq_length, mstype.int32)\n seq_length = P.Transpose()(seq_length, (1, 0))\n\n outputs = []\n state_t = h_t\n t = 0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h_t = self.cell(x_t, state_t, w_ih, w_hh, b_ih, b_hh, att_score[t])\n\n seq_cond = seq_length > t\n if self.is_lstm:\n state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])\n state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])\n output = P.Select()(seq_cond, h_t[0], zero_output)\n state_t = (state_t_0, state_t_1)\n else:\n state_t = P.Select()(seq_cond, h_t, state_t)\n output = P.Select()(seq_cond, h_t, zero_output)\n outputs.append(output)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, state_t", "def the_nn_article(input_size, dropout):\n \n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n \n # Defining layers\n self.hidden_size = 256\n self.first_layer = 512\n self.second_layer = 1024\n self.n_layers = 2\n self.bidirectional = True\n self.dropout = dropout\n \n # RNN Layer\n self.rnn = nn.LSTM(input_size = input_size, hidden_size = self.hidden_size, \n num_layers = self.n_layers, batch_first = True, \n bidirectional = self.bidirectional, dropout = self.dropout)\n \n self.fc1 = nn.Linear(self.first_layer, self.second_layer)\n self.fc2 = nn.Linear(self.second_layer, 3)\n \n def forward(self, x):\n batch_size = x.size(0)\n \n #Initializing hidden state for first input using method defined below\n hidden = self.init_hidden(batch_size, self.hidden_size)\n \n # Find sequence lengths (for packing)\n x_lengths = self.find_lengths(x)\n \n # Pack sequences\n x = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, batch_first=True, enforce_sorted=False)\n\n # Run the network\n out, hidden = self.rnn(x, hidden)\n \n # Unpack the sequences again\n out, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)\n\n # Run through the linear layer\n out = F.relu(self.fc1(out))\n out = F.relu(self.fc2(out))\n \n # Perform log_softmax on output (WORSE PERFORMANCE!)\n #x = F.log_softmax(x, dim = 2)\n\n return out, hidden\n \n def init_hidden(self, batch_size, hidden_size):\n # This method generates the first hidden state of zeros which we'll use in the forward pass\n \n hidden = (torch.zeros(2*self.n_layers, batch_size, self.hidden_size),\n torch.zeros(2*self.n_layers, batch_size, self.hidden_size))\n \n return hidden\n \n def find_lengths(self, input_seq):\n # Find seq-lengths of each sequence (used to pack sequences)\n x_lengths = []\n for seq in input_seq:\n for idx, vec in enumerate(seq):\n if sum(vec).item() != 1:\n x_lengths.append(idx)\n break\n if idx == 752:\n x_lengths.append(len(seq)) \n return x_lengths\n \n net = Model()\n return net", "def custom_dynamic_rnn(cell, inputs, inputs_len, initial_state=None):\n batch_size = tf.shape(inputs)[0]\n max_time = tf.shape(inputs)[1]\n\n inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n inputs_ta = inputs_ta.unstack(tf.transpose(inputs, [1, 0, 2]))\n emit_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)\n t0 = tf.constant(0, dtype=tf.int32)\n if initial_state is not None:\n s0 = initial_state\n else:\n s0 = cell.zero_state(batch_size, dtype=tf.float32)\n f0 = tf.zeros([batch_size], dtype=tf.bool)\n\n def loop_fn(t, prev_s, emit_ta, finished):\n \"\"\"\n the loop function of rnn\n \"\"\"\n cur_x = inputs_ta.read(t)\n scores, cur_state = cell(cur_x, prev_s)\n\n # copy through\n scores = tf.where(finished, tf.zeros_like(scores), scores)\n\n if isinstance(cell, tc.rnn.LSTMCell):\n cur_c, cur_h = cur_state\n prev_c, prev_h = prev_s\n cur_state = tc.rnn.LSTMStateTuple(tf.where(finished, prev_c, cur_c),\n tf.where(finished, prev_h, cur_h))\n else:\n cur_state = tf.where(finished, prev_s, cur_state)\n\n emit_ta = emit_ta.write(t, scores)\n finished = tf.greater_equal(t + 1, inputs_len)\n return [t + 1, cur_state, emit_ta, finished]\n\n _, state, emit_ta, _ = tf.while_loop(\n cond=lambda _1, _2, _3, finished: tf.logical_not(\n tf.reduce_all(finished)),\n body=loop_fn,\n loop_vars=(t0, s0, emit_ta, f0),\n parallel_iterations=32,\n swap_memory=False)\n\n outputs = tf.transpose(emit_ta.stack(), [1, 0, 2])\n return outputs, state", "def bi_lstm(X_inputs):\n # X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]\n inputs = tf.nn.embedding_lookup(embedding, X_inputs)\n\n # ** 1.构建前向后向多层 LSTM\n cell_fw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n cell_bw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n\n # ** 2.初始状态\n initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)\n initial_state_bw = cell_bw.zero_state(batch_size, tf.float32)\n\n # 下面两部分是等价的\n # **************************************************************\n # ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式\n # ** 文档说明\n # inputs: A length T list of inputs, each a tensor of shape\n # [batch_size, input_size], or a nested tuple of such elements.\n # *************************************************************\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]\n # inputs = tf.unstack(inputs, timestep_size, 1)\n # ** 3.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。\n # 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。\n # try:\n # outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,\n # initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n # except Exception: # Old TensorFlow version only returns outputs not states\n # outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,\n # initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n # output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])\n # ***********************************************************\n\n # ***********************************************************\n # ** 3. bi-lstm 计算(展开)\n with tf.variable_scope('bidirectional_rnn'):\n # *** 下面,两个网络是分别计算 output 和 state\n # Forward direction\n outputs_fw = list()\n state_fw = initial_state_fw\n with tf.variable_scope('fw'):\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)\n outputs_fw.append(output_fw)\n\n # backward direction\n outputs_bw = list()\n state_bw = initial_state_bw\n with tf.variable_scope('bw') as bw_scope:\n inputs = tf.reverse(inputs, [1])\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)\n outputs_bw.append(output_bw)\n # *** 然后把 output_bw 在 timestep 维度进行翻转\n # outputs_bw.shape = [timestep_size, batch_size, hidden_size]\n outputs_bw = tf.reverse(outputs_bw, [0])\n # 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]\n output = tf.concat([outputs_fw, outputs_bw], 2)\n output = tf.transpose(output, perm=[1, 0, 2])\n output = tf.reshape(output, [-1, hidden_size * 2])\n # ***********************************************************\n return output # [-1, hidden_size*2]", "def __init__(\n self,\n input_size,\n hidden_size,\n num_layers=1,\n bidirectional=False,\n dropout=0,\n **kwargs\n ):\n super(LSTM, self).__init__(\n 'lstm', input_size, hidden_size,\n num_layers, bidirectional, dropout, **kwargs\n )", "def forward(self, input_data, hidden_state):\n batch_size = input_data.size(0)\n if hidden_state is None:\n hidden_state = torch.zeros(self._num_layers, batch_size, self._layer_size)\n hidden_state = [hidden_state, hidden_state] if self._controller_type.lower() == 'lstm' else hidden_state\n\n embedded_vector = self._embedding(input_data)\n output_vector, hidden_state_out = self._layer(embedded_vector.unsqueeze(0), hidden_state)\n output_vector = self._linear(output_vector.squeeze())\n return output_vector, hidden_state_out", "def __training_forward(self, h, h_lengths, y):\n batch_size = h.size()[1]\n input_seq_len = h.size()[0]\n output_seq_len = y.size()[0]\n\n # obtain embedding representations for the correct tokens\n # shift by one token (add <sos> token at the beginning of the sentences)\n start_tokens = torch.tensor([self.bos_token], dtype=y.dtype, device=self.device).repeat(batch_size, 1).t()\n y_input = torch.cat([start_tokens, y[:-1, :]], dim=0)\n y_emb = self.output_embedding.forward(y_input) # (output_seq_len x batch x embed_dim)\n\n # store hidden and cell states, at the beginning filled with zeros\n states_s = torch.zeros(input_seq_len+1, output_seq_len+1, batch_size, self.state_dim_2d, device=self.device)\n states_c = torch.zeros(input_seq_len+1, output_seq_len+1, batch_size, self.state_dim_2d, device=self.device)\n\n for diagonal_num in range(input_seq_len + output_seq_len - 1):\n # calculate the indices for input / states / etc. for this diagonal\n (ver_from, ver_to), (hor_from, hor_to) = LSTM2d.__calculate_input_ranges(diagonal_num=diagonal_num,\n input_seq_len=input_seq_len,\n output_seq_len=output_seq_len)\n ver_state_ranges, hor_state_ranges, diag_ranges = LSTM2d.__calculate_state_ranges((ver_from, ver_to),\n (hor_from, hor_to))\n ver_range_x, ver_range_y = ver_state_ranges\n hor_range_x, hor_range_y = hor_state_ranges\n diag_range_x, diag_range_y = diag_ranges\n\n # flip the output range so we take the inputs in the right order corresponding to the input range\n # Note: the 2d-cell with smallest source-position (horizontally) and largest target-position (vertically) is\n # the first cell in the diagonal!\n input_range = list(range(ver_from, ver_to))\n output_range = list(reversed(range(hor_from, hor_to)))\n diagonal_len = len(input_range) # always == len(output_range)\n\n # calculate x input for this diagonal\n # treat diagonal as though it was a larger batch and reshape inputs accordingly\n new_batch_size = diagonal_len * batch_size\n h_current = h[input_range, :, :].view(new_batch_size, 2*self.encoder_state_dim)\n y_current = y_emb[output_range, :, :].view(new_batch_size, self.embed_dim)\n x_current = torch.cat([h_current, y_current], dim=-1) # shape (batch*diagonal_len x input_dim)\n\n # calculate previous hidden & cell states for this diagonal\n s_prev_hor = states_s[hor_range_x, hor_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n c_prev_hor = states_c[hor_range_x, hor_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n s_prev_ver = states_s[ver_range_x, ver_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n c_prev_ver = states_c[ver_range_x, ver_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n\n # run batched computation for this diagonal\n c_next, s_next = self.cell2d.forward(x_current, s_prev_hor, s_prev_ver, c_prev_hor, c_prev_ver)\n\n # separate batch and diagonal_len again so we can store them accordingly\n c_next = c_next.view(diagonal_len, batch_size, self.state_dim_2d)\n s_next = s_next.view(diagonal_len, batch_size, self.state_dim_2d)\n\n # store new hidden and cell states at the right indices for the next diagonal(s) to use\n states_s[diag_range_x, diag_range_y, :, :] = s_next\n states_c[diag_range_x, diag_range_y, :, :] = c_next\n\n # for the prediction, take the last (valid, non-padded) column of states and all but the first (1:) row\n states_for_pred = states_s[h_lengths, 1:, range(batch_size), :].permute(1, 0, 2)\n states_for_pred = self.logits_dropout.forward(states_for_pred)\n\n y_pred = self.logits.forward(states_for_pred) # shape (output_seq_len x batch x output_vocab_size)\n return y_pred", "def recurrent(self, x, h_0, w_ih, w_hh, b_ih, b_hh, att_score):\n time_step = x.shape[0]\n outputs = []\n t = 0\n h = h_0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h = self.cell(x_t, h, w_ih, w_hh, b_ih, b_hh, att_score[t])\n if self.is_lstm:\n outputs.append(h[0])\n else:\n outputs.append(h)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, h", "def forward(self, x):\n x, self.hidden = self.lstm(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x", "def __init__(self, num_vars, device, lag_max, hidden_size_lstm, hidden_size_mlp, num_outputs=1):\n super(LSTMgc, self).__init__()\n\n # LSTMs\n self.lstm_cell_list = nn.ModuleList()\n for state in range(num_vars):\n self.lstm_cell_list.append(nn.LSTMCell(lag_max, hidden_size_lstm))\n\n # MLP for prediction\n self.pred_mlp_l1 = nn.Linear(hidden_size_lstm * num_vars, hidden_size_mlp)\n self.pred_mlp_l2 = nn.Linear(hidden_size_mlp, num_outputs)\n\n # Initialise weights for each variable\n self.imp_weights = nn.Parameter(torch.Tensor(np.ones((num_vars,)) / num_vars + np.random.normal(0, 0.00001,\n (num_vars,))))\n\n # Initialise weights\n self.init_weights()\n\n # Save parameters\n self.num_vars = num_vars\n self.lag = lag_max\n self.hidden_size_lstm = hidden_size_lstm\n self.hidden_size_mlp = hidden_size_mlp\n\n # Initialise LSTM states\n self.lstm_state_list = []\n for state in range(num_vars):\n self.lstm_state_list.append((Variable(torch.zeros(1, self.hidden_size_lstm).float()).to(device),\n Variable(torch.zeros(1, self.hidden_size_lstm).float()).to(device)))", "def _build_rnn_graph_lstm(self, inputs, config, is_training):\n cell = util.create_lstm_cell(is_training, config)\n state = util.get_zero_state_for_the_cell(cell, config)\n\n self.initial_state = state\n with tf.variable_scope(\"RNN\"):\n inputs = tf.unstack(inputs, num=self.num_steps, axis=1)\n outputs, state = tf.contrib.rnn.static_rnn(cell, inputs,\n initial_state=self.initial_state)\n output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])\n return output, state", "def Get_States(self):\n\n # Getting all hidden state throuh time\n all_hidden_states = tf.scan(self.LSTM,\n self.processed_input,\n initializer=self.initial_hidden,\n name='states')\n all_hidden_states=all_hidden_states[:,0,:,:]\n \n return all_hidden_states", "def lstmdouble(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def lstm_model(nlstm=128, layer_norm=False):\n\n def network_fn(X, nenv=1, obs_size=-1):\n with tf.variable_scope(\"emb\", reuse=tf.AUTO_REUSE):\n w_emb = tf.get_variable(\"w_emb\", [obs_size+1, 32])\n X = tf.nn.embedding_lookup(w_emb, X)\n\n nbatch = X.shape[0]\n nsteps = nbatch // nenv\n\n h = tf.layers.flatten(X)\n\n M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)\n S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states\n\n xs = batch_to_seq(h, nenv, nsteps)\n ms = batch_to_seq(M, nenv, nsteps)\n\n assert not layer_norm\n h5, snew = lstm(xs, ms, S, scope='lstm', nh=nlstm)\n\n h = seq_to_batch(h5)\n initial_state = np.zeros(S.shape.as_list(), dtype=float)\n\n return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}\n\n return network_fn", "def add_model(self, inputs):\n size = self.config.hidden_size\n forget_bias = self.config.forget_bias\n input_cell = self.config.input_cell\n\n if input_cell == 'BasicLSTMCell':\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias)\n print 'Using Basic LSTM Cell \\n'\n\n elif input_cell == 'LSTMCell':\n lstm_cell = tf.nn.rnn_cell.LSTMCell(size, forget_bias)\n print 'Using LSTM Cell \\n'\n\n elif input_cell == 'GRUCell':\n lstm_cell = tf.nn.rnn_cell.GRUCell(size)\n print 'Using GRU Cell \\n'\n\n else:\n print \"Please Specify a Correct Cell Type\"\n\n lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.config.dropout,\n input_keep_prob=self.config.dropout)\n\n cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * self.config.num_layers)\n \n print 'Number of Hidden Layers ', self.config.num_layers\n \n self.initial_state = cell.zero_state(self.config.batch_size, tf.float32)\n rnn_outputs = []\n state = self.initial_state\n\n with tf.variable_scope('RNNLM') as scope:\n for time_step in range(self.config.num_steps):\n if time_step > 0: scope.reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n rnn_outputs.append(cell_output)\n self.final_state = state\n\n return rnn_outputs", "def __init__(self, embedding_dim, hidden_dim, vocab_size, label_size, use_gpu, batch_size):\n super(BiLSTM, self).__init__()\n self.hidden_dim = hidden_dim\n self.use_gpu = use_gpu\n self.batch_size = batch_size\n self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_dim, bidirectional=True)\n self.hidden2label = nn.Linear(hidden_dim*2, label_size)\n self.hidden = self.init_hidden()", "def lstm_forward(x, h0, Wx, Wh, b):\n h, cache = None, None\n #############################################################################\n # TODO: Implement the forward pass for an LSTM over an entire timeseries. #\n # You should use the lstm_step_forward function that you just defined. #\n #############################################################################\n\n N,T,D = x.shape\n N,H = h0.shape\n prev_h = h0\n prev_c = np.zeros((N, H))\n\n h = np.zeros((N, T, H))\n cache = list()\n\n for t in range(T):\n next_h, next_c, t_cache = lstm_step_forward(x[:,t,:],prev_h,prev_c,Wx,Wh,b)\n prev_h = next_h\n prev_c = next_c\n h[:,t,:] = next_h\n cache.append(t_cache)\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n\n return h, cache", "def forward(self, x, hidden):\n batch_size = x.size(0)\n\n # embeddings and lstm_out\n x = x.long()\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds, hidden)\n\n # print(f'lstm_out:{lstm_out.shape}')\n\n # stack up lstm outputs\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n\n # print(f'lstm_out flatten:{lstm_out.shape}')\n\n # dropout and fully-connected layer\n out = self.dropout(lstm_out)\n out = self.fc(out)\n # sigmoid function\n sig_out = self.sig(out)\n\n # print(f'sig_out:{sig_out.shape}')\n\n # reshape to be batch_size first\n sig_out = sig_out.view(batch_size, -1)\n sig_out = sig_out[:, -1] # get last batch of labels\n\n # print(f'sig_out last batch:{sig_out.shape}')\n\n # return last sigmoid output and hidden state\n return sig_out, hidden", "def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n super(BiLSTMCRF, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = vocab_size\n self.tag_to_ix = tag_to_ix\n self.tagset_size = len(tag_to_ix)\n \n self.embedding = nn.Embedding(self.vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\n num_layers=1, bidirectional=True)\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n self.transition = nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size))\n \n self.transition.data[self.tag_to_ix[START_TAG], :] = -10000.0\n self.transition.data[:, self.tag_to_ix[STOP_TAG]] = -10000.0\n self.hidden = self.init_hidden()", "def convert_rnn(g, op, block):\n\n def generate_lstm(\n input_seqs,\n hidden_state,\n cell_state,\n w_inp,\n w_hid,\n b_inp,\n b_hid,\n f_act,\n g_act,\n h_act,\n backwards=False,\n ):\n \"\"\"Implementation of LSTM cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n gates += b_inp\n if b_hid is not None:\n gates += b_hid\n i, f, c, o = _op.split(gates, 4, axis=-1)\n\n i = f_act(i)\n f = f_act(f)\n\n c = g_act(c)\n C = f * cell_state + i * c\n\n o = f_act(o)\n\n H = o * h_act(C)\n\n hidden_state = H\n cell_state = C\n h_list.append(_op.expand_dims(H, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n cell_state = _op.expand_dims(cell_state, axis=0)\n\n return output, hidden_state, cell_state\n\n def generate_gru(\n input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, rz_act, n_act, backwards=False\n ):\n \"\"\"Implementation of GRU cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n xwt = _op.nn.dense(step, w_inp)\n hwt = _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n xwt += b_inp\n if b_hid is not None:\n hwt += b_hid\n i_r, i_z, i_n = _op.split(xwt, 3, axis=-1)\n h_r, h_z, h_n = _op.split(hwt, 3, axis=-1)\n\n r_gate = rz_act(i_r + h_r)\n z_gate = rz_act(i_z + h_z)\n n_gate = n_act(i_n + r_gate * h_n)\n\n hidden_state = (hidden_state - n_gate) * z_gate + n_gate\n h_list.append(_op.expand_dims(hidden_state, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n\n return output, hidden_state\n\n def generate_simplernn(\n input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, n_act, backwards=False\n ):\n \"\"\"Implementation of SimpleRNN cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n xwt = _op.nn.dense(step, w_inp)\n hwt = _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n xwt += b_inp\n if b_hid is not None:\n hwt += b_hid\n\n n_gate = n_act(xwt + hwt)\n\n hidden_state = n_gate\n h_list.append(_op.expand_dims(hidden_state, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n\n return output, hidden_state\n\n def make_param_inputs(g, node, layer, hidden_size, num_layers):\n \"\"\"Param for weight and bias.\"\"\"\n\n bidirect_len = 4 if node.attr(\"is_bidirec\") else 2\n all_layer_param_len = len(node.input(\"WeightList\"))\n weight_list = node.input(\"WeightList\")[: all_layer_param_len // 2]\n bias_list = node.input(\"WeightList\")[all_layer_param_len // 2 :]\n\n layer_weight_list = weight_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n layer_bias_list = bias_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n param_list = layer_weight_list + layer_bias_list\n param_list_len = len(param_list)\n\n input_weights = param_list[0 : param_list_len // 2 : 2]\n hidden_weights = param_list[1 : param_list_len // 2 : 2]\n\n input_bias = param_list[param_list_len // 2 : param_list_len : 2]\n hidden_bias = param_list[param_list_len // 2 + 1 : param_list_len : 2]\n\n return input_weights, hidden_weights, input_bias, hidden_bias\n\n def make_init_param_inputs(g, node, layer):\n \"\"\"Init param for inputs.\"\"\"\n\n mode = node.attr(\"mode\")\n if mode == \"LSTM\":\n all_init_h, all_init_c = node.input(\"PreState\")\n bidirect_len = 2 if node.attr(\"is_bidirec\") else 1\n init_h = _op.strided_slice(\n g.get_node(all_init_h),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n init_c = _op.strided_slice(\n g.get_node(all_init_c),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n return init_h, init_c\n all_init_h = node.input(\"PreState\")[0]\n bidirect_len = 2 if node.attr(\"is_bidirec\") else 1\n init_h = _op.strided_slice(\n g.get_node(all_init_h),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n return init_h\n\n hidden_size = op.attr(\"hidden_size\")\n num_layers = op.attr(\"num_layers\")\n is_bidirec = op.attr(\"is_bidirec\")\n mode = op.attr(\"mode\")\n\n input_x = g.get_node(op.input(\"Input\")[0])\n\n num_directions = 1\n if is_bidirec:\n num_directions = 2\n\n x_shape = infer_shape(input_x)\n time_steps = x_shape[0]\n x_steps = _op.split(input_x, indices_or_sections=time_steps, axis=0)\n for layer in range(num_layers):\n input_weights, hidden_weights, input_bias, hidden_bias = make_param_inputs(\n g, op, layer, hidden_size, num_layers\n )\n if mode == \"LSTM\":\n init_h, init_c = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n init_cs = _op.split(init_c, num_directions)\n result_output = []\n result_H = []\n result_C = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n C_t = _op.squeeze(init_cs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H, C = generate_lstm(\n input_seqs=x_steps,\n hidden_state=H_t,\n cell_state=C_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n f_act=_op.sigmoid,\n g_act=_op.tanh,\n h_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n result_C.append(C)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n C = _op.concatenate(result_C, axis=0)\n elif mode == \"GRU\":\n init_h = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n result_output = []\n result_H = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H = generate_gru(\n input_seqs=x_steps,\n hidden_state=H_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n rz_act=_op.sigmoid,\n n_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n elif mode == \"RNN_TANH\":\n init_h = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n result_output = []\n result_H = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H = generate_simplernn(\n input_seqs=x_steps,\n hidden_state=H_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n n_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n\n output = _op.transpose(output, axes=[0, 2, 1, 3])\n output = _op.reshape(output, newshape=(0, 0, -1))\n x_steps = _op.split(output, indices_or_sections=time_steps, axis=0)\n\n g.add_node(op.output(\"Out\")[0], output)", "def get_output_for(self, input, **kwargs):\n\t\tbatch_size = input.shape[0]\n\t\tnum_states = self.num_states\n\t\timg_dim = self.img_dim\n\t\tN = self.N\n\t\tattender = self.attender\n\t\ttheano.gradient.grad_clip\n\n\t\tdef step(c_tm1, h_tm1, att_acc_tm1, input, W, Wg):\n\t\t\tcenter_y, center_x, delta, sigma, gamma = gp_from_hidden(h_tm1, Wg, img_dim, N)\n\t\t\tg, att = attender.read(input, center_y, center_x, delta, sigma, gamma) # (batch_size, N, N) and (batch_size, img_dim, img_dim)\n\t\t\n\t\t\tatt_acc_t = T.clip(att_acc_tm1 + att, 0.0, 1.0)\t# (batch_size, img_dim, img_dim)\n\t\t\tr = input[:, :, :img_dim] * (1.0 - att_acc_t) # (batch_size, img_dim, img_dim)\n\t\t\tR , _ = attender.read(r, *gp_from_hidden(T.zeros((batch_size, 5)), T.eye(5), img_dim, N)) # (batch_size, N, N)\n\t\t\t\n\t\t\tflat_g = g.reshape((batch_size, N * N)) # (batch_size, N * N)\n\t\t\tflat_R = R.reshape((batch_size, N * N)) # (batch_size, N * N)\n\t\t\t\n\t\t\t# concatenate gA, gB and h_tm1 to form a single matrix # (batch_size, N * N + N * N + num_states + 1)\n\t\t\tlstm_inp = T.concatenate([flat_g, flat_R, h_tm1, T.ones((batch_size, 1))], axis=1)\n\n\t\t\t# multiply by LSTM weights\n\t\t\t# (num_states * 4, num_input + num_states + 1) dot (batch_size, N * N + N * N + num_states + 1).T\n\t\t\tpre_act = T.dot(W, lstm_inp.T) \t# (4 * num_states, batch_size)\n\n\t\t\t# split up to get individual gates\n\t\t\tz = T.tanh(pre_act[0*num_states:1*num_states]) # (num_states, batch_size)\n\t\t\ti = T.nnet.sigmoid(pre_act[1*num_states:2*num_states])\n\t\t\tf = T.nnet.sigmoid(pre_act[2*num_states:3*num_states])\n\t\t\to = T.nnet.sigmoid(pre_act[3*num_states:4*num_states])\n\n\t\t\t# do LSTM update\n\t\t\tc_t = f * c_tm1.T + i * z\n\t\t\th_t = o * T.tanh(c_t)\n\n\t\t\treturn c_t.T, h_t.T, att_acc_t\t# 1, 2: (batch_size, num_states); 3, 4: (batch_size, img_dim, img_dim)\n\n\t\tc0 = T.zeros((batch_size, num_states))\n\t\th0 = T.zeros((batch_size, num_states))\n\t\tatt_acc0 = T.zeros((batch_size, img_dim, img_dim))\n\t\t\n\t\tcells, hiddens, att_acc_T = theano.scan(fn=step, non_sequences=[input, self.W, self.Wg], outputs_info=[c0, h0, att_acc0], \n\t\t\t\t\t\t\t\t\t\tn_steps=self.num_glimpses, strict=True)[0]\n\t\tif self.final_state_only:\n\t\t\treturn hiddens[-1]\n\t\telse:\n\t\t\treturn hiddens", "def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.7):\n super(SentimentLSTM, self).__init__()\n\n self.output_size = output_size\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n\n\n # define all layers\n self.embed = nn.Embedding(vocab_size,embedding_dim)\n self.lstm = nn.LSTM(embedding_dim,hidden_dim,n_layers,dropout=drop_prob,batch_first=True)\n self.fc = nn.Linear(hidden_dim,output_size)\n self.sigmoid = nn.Sigmoid()\n self.drp = nn.Dropout(p=0.7)", "def model_create_lstm(input_dim, output_dim, n_features, n_houses, x_train, y_train, x_test, y_test, early=None):\r\n model = Sequential()\r\n for _ in range(nn_hparams['num_layers']):\r\n model.add(LSTM(nn_hparams['units'], activation=nn_hparams['activation'], input_shape=(input_dim,n_features), return_sequences=True))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Flatten())\r\n model.add(Dense(y_train.shape[1]*y_train.shape[2]))\r\n custom_optimizer = getattr(optimizers, nn_hparams['optimizer'])(lr=nn_hparams['learning_rate'], beta_1=nn_hparams['beta_1'], beta_2=nn_hparams['beta_2'])\r\n model.compile(optimizer=custom_optimizer, loss=nn_hparams['loss'])\r\n y_train = y_train.reshape((y_train.shape[0], y_train.shape[1]*y_train.shape[2]))\r\n y_test = y_test.reshape((y_test.shape[0], y_test.shape[1]*y_test.shape[2]))\r\n if early:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1, callbacks=[early])\r\n else:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1)\r\n model_loss = model.evaluate(x_train, y_train, verbose=0)\r\n \r\n return model, model_loss", "def get_rnn(X, rnn_size, seq_len, batch_size, num_layers=1, input_keep_prob=1.0, output_keep_prob=1.0, is_training=False,\n cell_name=\"BasicLSTM\", bidirectional=False):\n with tf.device(\"/cpu:0\"):\n # Convert input tensor to python list (along the sequence length dimention)\n word_embeddings = tf.split(1, seq_len, X)\n word_embeddings = [tf.squeeze(embed_, [1]) for embed_ in word_embeddings]\n\n # if is_training and keep_prob < 1:\n # word_embeddings = [tf.nn.dropout(input_, keep_prob) for input_ in word_embeddings]\n\n def get_cell():\n if cell_name == \"GRU\": # GRU\n cell = rnn_cell.GRUCell(rnn_size)\n elif cell_name == \"LSTM\": # LSTM\n cell = rnn_cell.LSTMCell(rnn_size, tf.shape(X)[2])\n else:\n cell = rnn_cell.BasicLSTMCell(rnn_size)\n if is_training and (input_keep_prob < 1 or output_keep_prob < 1):\n cell = rnn_cell.DropoutWrapper(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)\n cell = rnn_cell.MultiRNNCell([cell] * num_layers)\n initial_state = cell.zero_state(batch_size, tf.float32)\n return cell, initial_state\n\n if bidirectional:\n with tf.variable_scope(\"forward\"):\n cell_fw, initial_state_fw = get_cell()\n with tf.variable_scope(\"backward\"):\n cell_bw, initial_state_bw = get_cell()\n return rnn.bidirectional_rnn(cell_fw, cell_bw, word_embeddings,\n initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw)\n else:\n cell, initial_state = get_cell()\n return rnn.rnn(cell, word_embeddings, initial_state=initial_state)", "def build_lstm11(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9a'))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9a'))\n # model.add(GlobalMaxPool1D())\n # model.add(BatchNormalization())\n # model.add(Dropout(settings['dropout'] / 2.0))\n\n # model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9c'))\n model.add(GlobalMaxPool1D(name='mp9'))\n model.add(BatchNormalization(name='bn9'))\n model.add(Dropout(settings['dropout'] / 2.0, name='drop9b'))\n\n model.add(Dense(shape['n_class'], activation='sigmoid', name='den9b'))\n xprint('build_lstm9: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def forward(self, data, time_steps, lengths):\n data_packed = pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)\n _, hidden = self.rnn(data_packed)\n assert hidden.size(1) == data.size(0)\n assert hidden.size(2) == self.latent_dim\n\n # check if bidirectional\n if hidden.size(0) == 1:\n hidden = hidden.squeeze(0)\n elif hidden.size(0) == 2:\n hidden = torch.cat((hidden[0], hidden[1]), dim=-1)\n else:\n raise ValueError('Incorrect RNN hidden state.')\n\n # extract mean and logvar\n mean_logvar = self.hidden_to_z0(hidden)\n assert mean_logvar.size(-1) == 2 * self.latent_dim\n mean, logvar = mean_logvar[:, :self.latent_dim], mean_logvar[:, self.latent_dim:]\n return mean, logvar", "def __init__(self, embeddings, hidden_dim, output_size, dropout_emb, dropout_lstm):\n super(BaselineLSTMModel, self).__init__()\n\n # 1) embedding layer:\n trainable_emb = False\n self.word_embeddings = nn.Embedding(num_embeddings=embeddings.shape[0],\n embedding_dim=embeddings.shape[1])\n self.init_embeddings(embeddings, trainable_emb)\n self.drop_emb = nn.Dropout(dropout_emb)\n\n # 2) LSTM layer\n self.hidden_dim = hidden_dim\n self.lstm = nn.LSTM(embeddings.shape[1], hidden_dim, batch_first=True,\n dropout=dropout_lstm)\n self.drop_lstm = nn.Dropout(dropout_lstm)\n\n # 3) linear layer -> outputs\n self.hidden2output = nn.Linear(hidden_dim, output_size)", "def forward(self, x, hidden):\n batch_size=x.shape[0]\n\n x = self.embed(x)\n\n x,hidden = self.lstm(x,hidden)\n\n x = x.reshape(-1,self.hidden_dim)\n\n x = self.drp(x)\n\n x = self.fc(x)\n\n sig_out = self.sigmoid(x)\n\n # return last sigmoid output and hidden state\n sig_out = sig_out.reshape(batch_size,-1)\n sig_out = sig_out[:,-1]\n\n return sig_out, hidden", "def __init__(self, embedding_size=300, lstm_layer_num=3,\r\n max_time_size=50, cell_size=100, forget_bias=0.0,\r\n l2_reg_lambda=0.0, class_num=8):\r\n # begin\r\n \"\"\"\r\n constant store in model. benefit: when load model can show the constant\r\n arguments.\r\n dropout not used in test step, move to outside.\r\n \"\"\"\r\n _l2_reg_lambda = tf.constant(l2_reg_lambda, dtype=tf.float32,\r\n name=\"l2_reg_lambda\")\r\n _lstm_layer_num = tf.constant(lstm_layer_num, dtype=tf.int32,\r\n name=\"lstm_layer_num\")\r\n _cell_size = tf.constant(cell_size, dtype=tf.int32,\r\n name=\"cell_size\")\r\n _max_time_size = tf.constant(max_time_size, dtype=tf.int32,\r\n name=\"max_time_size\")\r\n \"\"\"\r\n Placeholders for input, output and dropout.\r\n \"\"\"\r\n # inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),\r\n # dtype=tf.float32)\r\n self.input_x = tf.placeholder(\r\n shape=(None, embedding_size, max_time_size),\r\n dtype=tf.float32,\r\n name=\"input_x\")\r\n batch_size = tf.shape(self.input_x)[0]\r\n self.input_y = tf.placeholder(shape=(None, class_num), dtype=tf.float32,\r\n name=\"input_y\")\r\n self.input_keep_prob = tf.placeholder(tf.float32,\r\n name=\"input_keep_prob\")\r\n self.output_keep_prob = tf.placeholder(\r\n tf.float32,\r\n name=\"output_keep_prob\"\r\n )\r\n # Keeping track of l2 regularization loss (optional)\r\n l2_loss = tf.constant(0.0)\r\n\r\n def lstm_cell_func():\r\n # LSTM Cell, hidden size larger, remenber more detail\r\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(\r\n cell_size,\r\n forget_bias=forget_bias,\r\n state_is_tuple=True)\r\n \"\"\"\r\n add dropout, dnn dropout different from cnn.\r\n in_keep_prob: input keep probability(the probability of h_t == 0).\r\n out_keep_prob: output keep probability(the probability of h_{t+1} == 0).\r\n \"\"\"\r\n\r\n lstm_cell = tf.nn.rnn_cell.DropoutWrapper(\r\n lstm_cell,\r\n input_keep_prob=self.input_keep_prob,\r\n output_keep_prob=self.output_keep_prob)\r\n \"\"\"What's the benefit of multiple LSTM hidden layer?\r\n point 1: An interesting property of multilayer LSTMs is that it allows to\r\n perform hierarchical processing on difficult temporal tasks, and more\r\n naturally capture the structure of sequences.\r\n point 2: The purpose of using multilayer RNN cells is to learn more\r\n sophisticated conditional distributions\"\"\"\r\n return lstm_cell\r\n cell = tf.nn.rnn_cell.MultiRNNCell(\r\n [lstm_cell_func() for _ in range(lstm_layer_num)], state_is_tuple=True)\r\n with tf.name_scope(\"lstm\"):\r\n state = cell.zero_state(batch_size, tf.float32) # sents counte\r\n # with tf.name_scope(\"lstm\"):\r\n with tf.variable_scope(tf.get_variable_scope()) as scope:\r\n for time_step in range(max_time_size):\r\n if time_step > 0:\r\n tf.get_variable_scope().reuse_variables()\r\n (h_t, state) = cell(self.input_x[:,:,time_step], state)\r\n h = h_t\r\n # 全连阶层\r\n with tf.name_scope(\"full_cont_layer\"):\r\n W1 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W1\")\r\n W2 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W2\")\r\n W3 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W3\")\r\n b1 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b1\")\r\n b2 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b2\")\r\n b3 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b3\")\r\n l2_loss += tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2) + tf.nn.l2_loss(W3)\r\n l2_loss += tf.nn.l2_loss(b1) + tf.nn.l2_loss(b2) + tf.nn.l2_loss(b3)\r\n self.scores = tf.nn.xw_plus_b(h, W1, b1, name=\"scores\")\r\n # self.score = tf.matmul(h, W) + b\r\n self.predictions = tf.argmax(self.scores, 1, name=\"predictions\")\r\n\r\n # CalculateMean cross-entropy loss\r\n with tf.name_scope(\"loss\"):\r\n # losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores,\r\n # labels=self.input_y)\r\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores+1e-10, labels=self.input_y)\r\n \"\"\"sparse softmax cross entropy do not need to transform labels to\r\n one-hot matrix. and \"\"\"\r\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss\r\n\r\n # Accuracy\r\n with tf.name_scope(\"accuracy\"):\r\n correct_predictions = tf.equal(self.predictions,\r\n tf.argmax(self.input_y, 1))\r\n self.accuracy = tf.reduce_mean(\r\n tf.cast(correct_predictions, \"float\"), name=\"accuracy\")", "def forward(self, x):\n batch_size = x.size(0)\n out,_ = self.lstm(x) #out = batch, seq_len, num_directions * hidden_size\n out1 = out[:, -1, :16] #最后一层正向传播的最后一个timestep\n out2 = out[:, 0, 16:] #最后一层反向传播最后一个timestep\n out = torch.cat((out1,out2), dim=1)\n out = self.fc(out)\n\n return out", "def forward(self, input: torch.Tensor, hidden_state: torch.Tensor, cell_state: torch.Tensor) ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n input = input.unsqueeze(1)\n output = self.embedding(input)\n output, (hidden_state, cell_state) = self.lstm(output, (hidden_state, cell_state))\n output_logits = self.out(output)\n return output_logits, hidden_state, cell_state", "def lstm2():\n return render_template(\n 'lstm2.html',\n title='LSTM',\n year=datetime.now().year,\n message='Your LSTM page.'\n )", "def test_lstm_basic(self):\n\n class SimpleLSTM(nn.Module):\n def __init__(self):\n super(SimpleLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(12, 10, 1)\n w2 = torch.randn(40, 10)\n w1 = torch.randn(40, 12)\n b1 = torch.randn(40)\n b2 = torch.randn(40)\n self.rnn.training = False\n self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)\n self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)\n self.rnn.bias_ih_l0 = torch.nn.Parameter(b1)\n self.rnn.bias_hh_l0 = torch.nn.Parameter(b2)\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(10, 3, 12)\n h = torch.randn(1, 3, 10)\n c = torch.randn(1, 3, 10)\n model = SimpleLSTM()\n\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def lstm2():\n return render_template(\n 'lstm2.html',\n title='LSTM',\n year=\"2020\",\n message='Your LSTM page.'\n )", "def init_hidden(self, batch_size: int, device: Device) -> AmbiguousHidden:\n hidden_zero = torch.zeros(batch_size, self.hidden_size).to(device)\n\n if self.rnn_type == \"LSTM\":\n return hidden_zero, hidden_zero.clone()\n else:\n return hidden_zero", "def lstm_model(input_size, output_size, embedding, num_nodes, num_unrollings, batch_size,\n learning_rate, exp_decay = None, gradient_max_value = 1.25, dropout_prob = 0.0):\n\n graph = tf.Graph()\n with graph.as_default():\n # [ix, fx, cx, ox]\n x_mat = training.utils.gaussian_weights_variable([input_size, 4*num_nodes])\n # [im, fm, cm, om]\n o_mat = training.utils.gaussian_weights_variable([num_nodes, 4*num_nodes])\n # [ib, fb, cb, ob]\n b_vec = tf.Variable(tf.zeros([1, 4*num_nodes]))\n\n # Variables saving state across unrollings.\n saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n # Classifier weights and biases.\n w = training.utils.gaussian_weights_variable([num_nodes, output_size])\n b = tf.Variable(tf.zeros([output_size]))\n\n # Definition of the cell computation.\n def lstm_cell(i, o, state):\n \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n Note that in this formulation, we omit the various connections between the\n previous state and the gates.\"\"\"\n mult = tf.matmul(i, x_mat) + tf.matmul(o, o_mat) + b_vec\n\n input_gate = tf.sigmoid(mult[:, 0:num_nodes])\n forget_gate = tf.sigmoid(mult[:, num_nodes:2*num_nodes])\n state = forget_gate * state + input_gate * tf.tanh(mult[:, 2*num_nodes:3*num_nodes])\n output_gate = tf.sigmoid(mult[:, 3*num_nodes:4*num_nodes])\n return output_gate * tf.tanh(state), state\n\n # Input data.\n before_embedding_size = input_size\n if embedding is not None:\n before_embedding_size = embedding.shape[0]\n\n train_data = list()\n for _ in range(num_unrollings + 1):\n train_data.append(\n tf.placeholder(tf.float32, shape=[batch_size, before_embedding_size]))\n train_inputs = train_data[:num_unrollings]\n train_labels = train_data[1:] # Labels are inputs shifted by one time step.\n\n # Unrolled LSTM loop.\n outputs = list()\n output = saved_output\n state = saved_state\n for i in train_inputs:\n if embedding is not None:\n # Converting the input to the embedding.\n indices = tf.argmax(i, 1)\n i = tf.nn.embedding_lookup(embedding, indices)\n # Dropout is only applied to inputs, not to recurrent connections.\n i = tf.nn.dropout(i, 1 - dropout_prob)\n output, state = lstm_cell(i, output, state)\n outputs.append(output)\n\n # State saving across unrollings.\n with tf.control_dependencies([saved_output.assign(output),\n saved_state.assign(state)]):\n # Classifier.\n # Dropout is also applied to the output of the LSTM cell, only when\n # used for the projection, as it is not recurrent.\n outputs = tf.concat(0, outputs)\n outputs = tf.nn.dropout(outputs, 1 - dropout_prob)\n logits = tf.nn.xw_plus_b(outputs, w, b)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits, tf.concat(0, train_labels)))\n\n # Optimizer.\n global_step = tf.Variable(0)\n\n if exp_decay is not None:\n learning_rate = tf.train.exponential_decay(\n learning_rate, global_step,\n exp_decay['decay_steps'], exp_decay['decay_rate'], exp_decay['staircase'])\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n # Clipping to avoid exploding gradient.\n gradients, v = zip(*optimizer.compute_gradients(loss))\n gradients, _ = tf.clip_by_global_norm(gradients, gradient_max_value)\n optimizer = optimizer.apply_gradients(zip(gradients, v), global_step=global_step)\n\n # Predictions.\n train_prediction = tf.nn.softmax(logits)\n\n # Sampling and validation eval: batch 1, no unrolling.\n sample_input_ph = tf.placeholder(tf.float32, shape=[1, before_embedding_size])\n saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n reset_sample_state = tf.group(\n saved_sample_output.assign(tf.zeros([1, num_nodes])),\n saved_sample_state.assign(tf.zeros([1, num_nodes])))\n\n sample_input = sample_input_ph\n if embedding is not None:\n indices = tf.argmax(sample_input_ph, 1)\n sample_input = tf.nn.embedding_lookup(embedding, indices)\n\n sample_output, sample_state = lstm_cell(\n sample_input, saved_sample_output, saved_sample_state)\n with tf.control_dependencies([saved_sample_output.assign(sample_output),\n saved_sample_state.assign(sample_state)]):\n sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))\n\n tf_graph = {\n 'graph': graph,\n 'data_ph': train_data,\n 'sample_ph': sample_input_ph }\n tf_predictions = [train_prediction, sample_prediction]\n\n return tf_graph, optimizer, loss, tf_predictions, reset_sample_state", "def extract_hidden_states(self, output):\n # Intermediate hidden states\n output_fw_intermediate = output[:,:-1,0:self._hidden_size]\n output_bw_intermediate = output[:,1:,self._hidden_size:] \n \n # Last hidden states\n output_fw = output[:,-1,0:self._hidden_size]\n output_bw = output[:,0,self._hidden_size:]\n last_ht = torch.cat((output_fw, output_bw), -1)\n \n return last_ht, output_fw_intermediate, output_bw_intermediate", "def test_lstm_two_layers(self):\n\n class MultipleLayersLSTM(nn.Module):\n def __init__(self):\n super(MultipleLayersLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(10, 20, 2, bidirectional=False)\n self.rnn.training = False\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(5, 3, 10)\n h = torch.randn(2, 3, 20)\n c = torch.randn(2, 3, 20)\n model = MultipleLayersLSTM()\n\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def build_lstm8(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False,\n name='eembed'\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False, name='td8')))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi'))\n model.add(Flatten(name='flaaten'))\n model.add(BatchNormalization())\n model.add(Dropout(settings['dropout'] / 2.0))\n model.add(Dense(shape['n_class'], activation='sigmoid'))\n xprint('build_lstm8: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def separable_lstm(cell, num_units, inputs, seq_lengths1, seq_lengths2, scope=None):\n with variable_scope.variable_scope(scope, \"SeparableLstm\", [inputs]):\n hidden = bidirectional_horizontal_lstm(cell, num_units, inputs, seq_lengths1)\n with variable_scope.variable_scope(\"vertical\"):\n transposed = array_ops.transpose(hidden, [0, 2, 1, 3])\n output_transposed = bidirectional_horizontal_lstm(cell, num_units, transposed, seq_lengths2)\n output = array_ops.transpose(output_transposed, [0, 2, 1, 3])\n return output", "def Bilstm(self):\n # Model.\n model = Sequential()\n # model.add(Bidirectional(LSTM(2048, return_sequences=True),input_shape=self.input_shape))\n # model.add(Bidirectional(LSTM(2048))) id identification is 2048\n model.add(Bidirectional(LSTM(2048, return_sequences=True), input_shape=self.input_shape))\n #model.add(Bidirectional(LSTM(2048)))\n model.add(Dense(2048, activation='relu'))\n #model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n # model.add(Bidirectional(LSTM(20, return_sequences=True), input_shape=(n_timesteps, 1)))\n # model.add(TimeDistributed(Dense(1, activation='sigmoid')))\n # model = Sequential()\n # model = Sequential()\n # model.add(Embedding(max_features, 128, input_length=maxlen))\n # model.add(Bidirectional(LSTM(64)))\n # model.add(Dropout(0.5))\n # model.add(Dense(1, activation='sigmoid'))\n\n # model.add(Embedding(20000, 128, input_length=self.seq_length))\n # model.add(Flatten(input_shape=self.input_shape))\n # model.add(Embedding(20000, 128, input_length=self.seq_length))\n # model.add(Bidirectional(LSTM(128)))\n # model.add(Dropout(0.5))\n # model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def forward(self, batch: torch.LongTensor,\n hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:\n\n # max_len = x.size(1)\n # x,label = batch\n # batch_size x max_len x embedding_dim\n x_embedded = self.embedding(batch)\n # x_drop = self.dropout\n x_drop = self.dropout(x_embedded)\n\n # compute hidden states and logits for each time step\n # hidden_states_list = []\n # prev_hidden = hidden_start\n hidden_state = self.rnn(x_drop)[0]\n # print(hidden_state)\n # print(hidden_state[0].shape)\n # print(hidden_state[1].shape)\n\n # hidden_state = hidden_state.permute(2,1,0)\n # hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])\n # hidden_state_maxPooled = hidden_state.permute(2,1,0)\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n\n output = self.get_logits(hidden_state_pooled)\n\n # Loss = self.loss(output, y)\n\n # hidden_state = softmax(logits(hidden_state))\n\n # batch_size x max_len x rnn_size\n # hidden_states = torch.stack(hidden_states_list, dim=1)\n\n return output", "def lstm_cell(x, h, c, name=None, reuse=False):\n nin = x.shape[-1].value\n nout = h.shape[-1].value\n with tf.variable_scope(name, default_name=\"lstm\", values=[x, h, c], reuse=reuse):\n wx = get_variable_wrap(\"kernel/input\", [nin, nout * 4], dtype=tf.float32, \n initializer=tf.orthogonal_initializer(1.0))\n wh = get_variable_wrap(\"kernel/hidden\", [nout, nout * 4],dtype=tf.float32,\n initializer=tf.orthogonal_initializer(1.0))\n b = get_variable_wrap(\"bias\", [nout * 4], dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n\n z = ed.dot(x, wx) + ed.dot(h, wh) + b\n i, f, o, u = tf.split(z, 4, axis=0)\n i = tf.sigmoid(i)\n f = tf.sigmoid(f + 1.0)\n o = tf.sigmoid(o)\n u = tf.tanh(u)\n c = f * c + i * u\n h = o * tf.tanh(c)\n return h, c", "def create_multilayer_lstm_params(num_layers, in_size, state_size, name=\"\"):\n lstm_layers = []\n for i in range(num_layers):\n layer_name = name + \"-\" + str(i)\n print(\"LSTM \" + layer_name + \": \" + str(in_size) + \" x \" + str(state_size) + \"; default Dynet initialization of hidden weights\")\n lstm_layer = torch.nn.LSTMCell(input_size=int(in_size), hidden_size=int(state_size), bias=True)\n lstm_layers.append(lstm_layer)\n in_size = state_size\n return torch.nn.ModuleList(lstm_layers)", "def __init__(self, embeddings, hidden_dim, output_size, dropout_emb, dropout_lstm):\n super(AttentionalLSTM, self).__init__()\n\n # 1) embedding layer:\n trainable_emb = False\n self.word_embeddings = nn.Embedding(num_embeddings=embeddings.shape[0],\n embedding_dim=embeddings.shape[1])\n self.init_embeddings(embeddings, trainable_emb)\n self.drop_emb = nn.Dropout(dropout_emb)\n\n # 2) LSTM layer\n self.hidden_dim = hidden_dim\n self.lstm = nn.LSTM(input_size=embeddings.shape[1],\n hidden_size=hidden_dim,\n batch_first=True,\n dropout=dropout_lstm)\n self.drop_lstm = nn.Dropout(dropout_lstm)\n self.attention = Attention(attention_size=hidden_dim, batch_first=True)\n\n # 3) linear layer -> outputs\n self.hidden2output = nn.Linear(hidden_dim, output_size)", "def __call__(self, inputs, state, scope=None):\n # Apply vanilla LSTM\n output, new_state = self._cell(inputs, state, scope)\n\n if self.state_is_tuple:\n (prev_c, prev_h) = state\n (new_c, new_h) = new_state\n else:\n num_proj = self._cell._num_units if self._cell._num_proj is None else self._cell._num_proj\n prev_c = tf.slice(state, [0, 0], [-1, self._cell._num_units])\n prev_h = tf.slice(state, [0, self._cell._num_units], [-1, num_proj])\n new_c = tf.slice(new_state, [0, 0], [-1, self._cell._num_units])\n new_h = tf.slice(new_state, [0, self._cell._num_units], [-1, num_proj])\n\n # Apply zoneout\n if self.is_training:\n keep_rate_cell = 1.0 - self._zoneout_cell\n keep_rate_output = 1.0 - self._zoneout_outputs\n c = keep_rate_cell * tf.nn.dropout(new_c - prev_c, keep_prob=keep_rate_cell) + prev_c\n h = keep_rate_output * tf.nn.dropout(new_h - prev_h, keep_prob=keep_rate_output) + prev_h\n else:\n c = new_c - self._zoneout_cell * (new_c + prev_c)\n h = new_h - self._zoneout_outputs * (new_h + prev_h)\n\n new_state = tf.nn.rnn_cell.LSTMStateTuple(c, h) if self.state_is_tuple else tf.concat([c, h], axis=1)\n\n return output, new_state", "def loop_fn(time, cell_output, cell_state, loop_state, emit_ta):\n \n if cell_output is None: # time == 0\n next_cell_state = initial_state\n emit_output= tf.ones(tf.shape(initial_state[1])[:1], dtype=tf.int32) * tf.constant(-1) #(batch_size)\n next_input = tf.squeeze(self.sos, [1])\n elements_finished = tf.logical_and(tf.cast(emit_output, dtype=tf.bool), False)\n \n else:\n \n next_cell_state = cell_state\n decoder_outputs = tf.expand_dims(cell_output, 1) #(batch_size, 1, hidden_size)\n encoder_outputs_reshape = tf.reshape(encoder_outputs, shape=(-1, self.config.hidden_size)) #(batch_size*time_steps, hidden_size)\n decoder_outputs_reshape = tf.reshape(decoder_outputs, shape=(-1, self.config.hidden_size)) #(batch_size*1, hidden_size)\n encoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(encoder_outputs_reshape, output_size=self.config.hidden_size, \n bias=False, scope='Ptr_W1') #(b_sz*tstps_en, h_sz)\n decoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(decoder_outputs_reshape, output_size=self.config.hidden_size, \n bias=False, scope='Ptr_W2') #(b_sz*1, h_sz)\n encoder_outputs_linear = tf.reshape(encoder_outputs_linear_reshape, tf.shape(encoder_outputs))\n decoder_outputs_linear = tf.reshape(decoder_outputs_linear_reshape, tf.shape(decoder_outputs))\n \n encoder_outputs_linear_expand = tf.expand_dims(encoder_outputs_linear, 1) #(b_sz, 1, tstp_en, h_sz)\n decoder_outputs_linear_expand = tf.expand_dims(decoder_outputs_linear, 2) #(b_sz, 1, 1, h_sz)\n \n after_add = tf.tanh(encoder_outputs_linear_expand + decoder_outputs_linear_expand) #(b_sz, 1, tstp_en, h_sz)\n \n after_add_reshape = tf.reshape(after_add, shape=(-1, self.config.hidden_size))\n \n after_add_linear_reshape = tf.nn.rnn_cell._linear(after_add_reshape, output_size=1, #(b_sz*1*tstp_en, 1)\n bias=False, scope='Ptr_v')\n after_add_linear = tf.reshape(after_add_linear_reshape, shape=(-1, tstps_en)) #(b_sz, tstp_en)\n en_length_mask = tf.sequence_mask(self.encoder_tstps, #(b_sz, tstp_en)\n maxlen=tstps_en, dtype=tf.bool)\n\n \"\"\"mask out already hitted ids\"\"\" \n hit_ids = tf.cond(emit_ta.size() > 0, lambda: emit_ta.pack(), lambda: tf.ones(shape=[1, batch_size], dtype=tf.int32)*-1) #(to_cur_tstp, b_sz)\n masks = tf.one_hot(hit_ids, depth=tstps_en, on_value=True, off_value=False) #(to_cur_tstp, b_sz, tstp_en)\n masks = tf.reduce_any(masks, reduction_indices=[0]) #(b_sz, tstp_en)\n hit_masks = tf.logical_not(masks)\n\n mask = tf.logical_and(en_length_mask, hit_masks)\n logits = tf.select(mask, after_add_linear,\n tf.ones_like(after_add_linear) * (-np.Inf)) # shape(b_sz, tstp_en)\n\n emit_output = tf.arg_max(logits, dimension=1) #(batch_size)\n emit_output = tf.cast(emit_output, dtype=tf.int32)\n \n bool_mask = tf.one_hot(emit_output, depth=tstps_en, on_value=True, off_value=False) #(b_sz, tstps_en)\n bool_mask = tf.reshape(bool_mask, shape=(batch_size, tstps_en))\n next_input = tf.boolean_mask(encoder_inputs, bool_mask) #(b_sz, emb_sz)\n \n elements_finished = tf.equal(emit_output, 0) #(batch_size)\n elements_finished = tf.reshape(elements_finished, (-1,))\n \n elements_finished = tf.logical_or(elements_finished, (time >= self.config.num_steps))\n next_loop_state = loop_state\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)", "def forward(self, x, hidden):\n batch_size = x.size(0)\n # embeddings and lstm_out\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds, hidden)\n \n # stack up lstm outputs\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n \n # dropout and fully connected layer\n out = self.dropout(lstm_out)\n out = self.fc(out)\n \n # sigmoid function\n sig_out = self.sig(out)\n \n # reshape to be batch_size first\n sig_out = sig_out.view(batch_size, -1)\n sig_out = sig_out[:, -1] # get last batch of labels\n \n # return last sigmoid output and hidden state\n return sig_out, hidden", "def trajectory(self, state, T=1, time_steps=200):\n\n state = state.to(device)\n t = torch.linspace(0, T, time_steps).to(device)\n\n # integrate and remove batch dim\n traj = self.model_of_dyn_system.trajectory(state, t)\n return traj.detach().cpu()[:, 0, :]", "def add_logits_op(self):\n with tf.variable_scope(\"bi-lstm\"):\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.word_embeddings,\n sequence_length=self.sequence_lengths, dtype=tf.float32)\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.nn.dropout(output, self.dropout)\n\n with tf.variable_scope(\"proj\"):\n W = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[2*self.config.hidden_size_lstm, self.config.ntags])\n\n b = tf.get_variable(\"b\", shape=[self.config.ntags],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps = tf.shape(output)[1]\n output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])\n pred = tf.matmul(output, W) + b\n self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])", "def build_lstm_layers(lstm_sizes, embed, keep_prob_, batch_size):\n lstms = [tf.contrib.rnn.BasicLSTMCell(size) for size in lstm_sizes]\n # Add dropout to the cell\n drops = [tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob_) for lstm in lstms]\n # Stack up multiple LSTM layers, for deep learning\n cell = tf.contrib.rnn.MultiRNNCell(drops)\n # Getting an initial state of all zeros\n initial_state = cell.zero_state(batch_size, tf.float32)\n \n lstm_outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)\n \n return initial_state, lstm_outputs, cell, final_state", "def get_init_cell(batch_size, rnn_size):\n\n cell = tf.contrib.rnn.BasicLSTMCell(rnn_size) #?????????????????cell_hidden_size = state_size\n #cell = tf.contrib.run.DropoutWrapper(cell, output_keep_prob=keep_prob)\n num_of_layers = 3\n cell = tf.contrib.rnn.MultiRNNCell([cell for _ in range(num_of_layers)])\n initial_state = cell.zero_state(batch_size, tf.float32)\n initial_state = tf.identity(initial_state, name='initial_state')\n \n return (cell, initial_state)", "def get_init_cell(batch_size, rnn_size):\n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n #drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5)\n cell = tf.contrib.rnn.MultiRNNCell([lstm] * 3)\n initial_state = cell.zero_state(batch_size, tf.float32)\n initial_state = tf.identity(initial_state, name=\"initial_state\")\n\n return cell, initial_state", "def create_logits(self):\n with tf.variable_scope('LSTM'):\n first_label = self.get_input(prev=None, i=0)\n decoder_inputs = [first_label] + [None] * (self._params.seq_length - 1)\n lstm_cell = tf.contrib.rnn.LSTMCell(\n self._mparams.num_lstm_units,\n use_peepholes=False,\n cell_clip=self._mparams.lstm_state_clip_value,\n state_is_tuple=True,\n initializer=orthogonal_initializer)\n lstm_outputs, _ = self.unroll_cell(\n decoder_inputs=decoder_inputs,\n initial_state=lstm_cell.zero_state(self._batch_size, tf.float32),\n loop_function=self.get_input,\n cell=lstm_cell)\n\n with tf.variable_scope('logits'):\n logits_list = [\n tf.expand_dims(self.char_logit(logit, i), dim=1)\n for i, logit in enumerate(lstm_outputs)\n ]\n\n return tf.concat(logits_list, 1)", "def __init__(self, input_size=1, hidden_layer_size=100, output_size=1):\n super().__init__()\n self.hidden_layer_size = hidden_layer_size # TODO [tuning] different size (?)\n\n # TODO [tuning] BiLSTM (?)\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_layer_size, batch_first=True)\n\n self.linear = nn.Linear(hidden_layer_size, output_size, bias=True) # TODO try with bias=False (?)", "def run_LSTM(data):\n # Initialising the RNN\n model = Sequential()\n \"\"\"\n # Adding the first LSTM layer and some Dropout regularisation\n model.add(LSTM(units=256,return_sequences=False, input_shape=(data.x_train.shape[1], data.x_train.shape[2])))\n model.add(Dropout(0.3))\n model.add(Dense(units=1))\n \"\"\"\n\n model = Sequential()\n model.add(LSTM(units=180, return_sequences=False, input_shape=(data.x_train.shape[1], data.x_train.shape[2])))\n #model.add(Dropout(params['dropout']))\n #model.add(LSTM(units=128))\n #model.add(Dropout(params['dropout']))\n #model.add(Dense(units=64))\n model.add(Dense(units=1,activation='relu',kernel_initializer=tf.keras.initializers.Orthogonal()))\n # Compiling the RNN\n opt = Adam(lr=0.0052)\n model.compile(optimizer=opt, loss='mean_squared_error',metrics=['mean_absolute_percentage_error'])\n\n # Fitting the RNN to the Training set\n regressor = model.fit(data.x_train, data.y_train.ravel(), epochs=180,batch_size=410,shuffle=True,validation_data=(data.x_valid,data.y_valid.ravel()))\n\n #Create plots\n plt.plot(regressor.history['loss'], label='loss')\n plt.plot(regressor.history['val_loss'], label='val_loss')\n plt.legend()\n plt.show()\n\n\n plt.figure()\n outputs = model.predict(data.x_test)\n print(outputs.shape)\n predictions = outputs[:,0]\n\n print(predictions.shape)\n\n pred_prices = predictions.reshape(-1,1)\n real_prices = data.y_test.reshape(-1,1)\n mape = 0\n\n pred_prices = data.inv.inverse_transform(pred_prices)\n real_prices = data.inv.inverse_transform(real_prices)\n\n #if Returns == False:\n \n #mape = mean_absolute_percentage_error(real_prices, pred_prices)\n\n #pred_prices = [x * (data.mm[1] - data.mm[0]) + data.mm[0] for x in predictions.reshape(-1)]\n #real_prices = [x * (data.mm[1] - data.mm[0]) + data.mm[0] for x in data.y_test.reshape(-1)]\n\n #pred_prices = data.train_sc.inverse_transform(predictions.reshape(-1))\n #real_prices = data.test_sc.inverse_transform(data.y_test.reshape(-1))\n\n #mape = mean_absolute_percentage_error(data.y_test.ravel(), pred_prices.)\n y_true, y_pred = np.array(real_prices).reshape(-1,1), np.array(pred_prices).reshape(-1,1)\n #y_true, y_pred = y_true[:50], y_pred[:50]\n\n mape = mean_absolute_percentage_error(y_true, y_pred)\n pct = PCT(y_true,y_pred)\n mse = mean_squared_error(y_true,y_pred)\n rmse = sqrt(mse)\n amape = AMAPE(y_true,y_pred)\n mae = MAE(y_true,y_pred)\n\n plt.plot(real_prices, label='targets')\n plt.plot(pred_prices, label='predictions')\n plt.legend()\n plt.title('LSTM test data')\n plt.show()\n\n plt.figure()\n outputs = model.predict(data.x_train)\n print(outputs.shape)\n predictions = outputs[:,0]\n\n plt.plot(data.y_train.ravel(), label='targets')\n plt.plot(predictions, label='predictions')\n plt.legend()\n plt.title('LSTM train data')\n plt.show()\n print(y_pred)\n\n print('RMSE= {:.6f}, MAPE = {:.6f}, PCT = {:.6f}, MSE = {:.6f}, MAE = {:.6f}, AMAPE = {:.6f}'.format(rmse, mape, pct, mse, mae, amape))" ]
[ "0.68931335", "0.67093015", "0.6646216", "0.65613323", "0.6523706", "0.6465427", "0.6402848", "0.6338531", "0.6324766", "0.63201183", "0.6290784", "0.6263753", "0.6202274", "0.6196958", "0.6188465", "0.6187984", "0.61834884", "0.61785024", "0.6135082", "0.61321425", "0.61321425", "0.61196715", "0.6103434", "0.60783696", "0.6076441", "0.6054846", "0.60396314", "0.6004754", "0.6004754", "0.5986235", "0.5984219", "0.5982831", "0.5981776", "0.5980558", "0.5971272", "0.59558064", "0.59544516", "0.5945293", "0.5942619", "0.5913094", "0.5909018", "0.59085745", "0.59065104", "0.5905303", "0.58976454", "0.5895867", "0.5851203", "0.58491063", "0.58460414", "0.5810412", "0.5803136", "0.5785663", "0.5778604", "0.5770046", "0.5769302", "0.5762037", "0.5747636", "0.57451046", "0.573436", "0.5724696", "0.5712389", "0.56986916", "0.5692991", "0.5691443", "0.5686611", "0.5678249", "0.5671416", "0.56700635", "0.5657749", "0.5652648", "0.56517506", "0.564566", "0.56446075", "0.5642127", "0.56386787", "0.5638081", "0.5633764", "0.56298053", "0.5623116", "0.56099254", "0.5606777", "0.56042784", "0.5603358", "0.56031513", "0.560263", "0.56015056", "0.5595616", "0.55880225", "0.5586793", "0.5585373", "0.5584883", "0.5583393", "0.5574062", "0.5571614", "0.5569895", "0.55687445", "0.55659246", "0.5547628", "0.5538716", "0.5537298", "0.5531979" ]
0.0
-1
LSTM returning hidden state and content cell at a specific timestep.
def lstm_cell_2(x, h, c, name=None, reuse=False): nin = x.shape[-1].value nout = h.shape[-1].value with tf.variable_scope(name, default_name="lstm_2", values=[x, h, c], reuse=reuse): wx = get_variable_wrap("kernel/input", [nin, nout * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) wh = get_variable_wrap("kernel/hidden", [nout, nout * 4],dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) b = get_variable_wrap("bias", [nout * 4], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) z = ed.dot(x, wx) + ed.dot(h, wh) + b i, f, o, u = tf.split(z, 4, axis=0) i = tf.sigmoid(i) f = tf.sigmoid(f + 1.0) o = tf.sigmoid(o) u = tf.tanh(u) c = f * c + i * u h = o * tf.tanh(c) return h, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hx, cx = hidden\n gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)\n\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = hard_sigmoid(ingate)\n forgetgate = hard_sigmoid(forgetgate)\n cellgate = F.tanh(cellgate)\n outgate = hard_sigmoid(outgate)\n\n cy = (forgetgate * cx) + (ingate * cellgate)\n hy = outgate * F.tanh(cy)\n\n return hy, cy", "def LSTM(inputs, dim, seq_len, name):\r\n with tf.name_scope(name) as scope:\r\n cell = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n hidden_states, cell_states = tf.nn.dynamic_rnn(cell, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)\r\n\r\n return hidden_states, cell_states", "def LSTM(self, previous_hidden_memory_tuple, x):\n \n previous_hidden_state,c_prev=tf.unstack(previous_hidden_memory_tuple)\n \n #Input Gate\n i= tf.sigmoid(\n tf.matmul(x,self.Wi)+tf.matmul(previous_hidden_state,self.Ui) + self.bi \n )\n \n #Forget Gate\n f= tf.sigmoid(\n tf.matmul(x,self.Wf)+tf.matmul(previous_hidden_state,self.Uf) + self.bf \n )\n \n #Output Gate\n o= tf.sigmoid(\n tf.matmul(x,self.Wog)+tf.matmul(previous_hidden_state,self.Uog) + self.bog\n )\n \n #New Memory Cell\n c_= tf.nn.tanh(\n tf.matmul(x,self.Wc)+tf.matmul(previous_hidden_state,self.Uc) + self.bc \n ) \n \n #Final Memory cell\n c= f*c_prev + i*c_\n \n #Current Hidden state\n current_hidden_state = o*tf.nn.tanh(c)\n\n return tf.stack([current_hidden_state,c])", "def build_lstm(self, keep_prob):\n def get_cell():\n if self.kernel == 'LSTM':\n cell = tf.contrib.rnn.BasicLSTMCell(self.num_hidden_units)\n print('LSTM is using...')\n elif self.kernel == 'GRU': # GRU RNN\n cell = tf.contrib.rnn.GRUCell(self.num_hidden_units)\n print('GRU is using...')\n else:\n raise AttributeError\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n return cell\n lstm_cell = get_cell()\n init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)\n return lstm_cell, init_state", "def _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden):\n param_cells = []\n last_states = []\n for i in range(num_lstm_layer):\n param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable(\"l%d_i2h_weight\" % i),\n i2h_bias=mx.sym.Variable(\"l%d_i2h_bias\" % i),\n h2h_weight=mx.sym.Variable(\"l%d_h2h_weight\" % i),\n h2h_bias=mx.sym.Variable(\"l%d_h2h_bias\" % i)))\n state = LSTMState(c=mx.sym.Variable(\"l%d_init_c\" % i),\n h=mx.sym.Variable(\"l%d_init_h\" % i))\n last_states.append(state)\n assert len(last_states) == num_lstm_layer\n\n # embedding layer\n data = mx.sym.Variable('data')\n wordvec = mx.sym.SliceChannel(data=data, num_outputs=seq_len, squeeze_axis=1)\n\n hidden_all = []\n for seqidx in range(seq_len):\n hidden = wordvec[seqidx]\n for i in range(num_lstm_layer):\n next_state = _lstm(\n num_hidden=num_hidden,\n indata=hidden,\n prev_state=last_states[i],\n param=param_cells[i],\n seqidx=seqidx,\n layeridx=i)\n hidden = next_state.h\n last_states[i] = next_state\n hidden_all.append(hidden)\n\n hidden_concat = mx.sym.Concat(*hidden_all, dim=0)\n pred_fc = mx.sym.FullyConnected(data=hidden_concat, num_hidden=11, name=\"pred_fc\")\n return pred_fc", "def __call__(self, inputs, state):\n with tf.variable_scope(\"BayesLSTMCell\"):\n if self.w is None:\n\n# size = inputs.get_shape()[-1].value\n \n print ([\"------- Size input LSTM: \", inputs.shape])\n print ([\"------- Dim input specified \", self.X_dim])\n# print ([\"num units LSTM: \", self.num_units])\n \n self.w = VI.sample_posterior((self.X_dim + self.num_units, 4 * self.num_units),\n name=self.n + \"_weights\",\n prior=self.prior,\n is_training=self.is_training)\n \n self.b = VI.sample_posterior((4 * self.num_units, 1),\n name=self.n + \"_biases\",\n prior=self.prior,\n is_training=self.is_training)\n\n # Get the cell and hidden state from the previous cell [C_t-1, h_t-1]\n C_t_prev , h_t_prev = state\n #Vector concatenation of previous hidden state and embedded inputs\n concat_inputs_hidden = tf.concat([inputs, h_t_prev], 1)\n # Compute the Z = Wx + b for each of the 4 networks at once !\n gate_inputs = tf.nn.bias_add(tf.matmul(concat_inputs_hidden, self.w), tf.squeeze(self.b))\n \n # Split data up for the 4 gates\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = tf.split(value=gate_inputs, num_or_size_splits=4, axis=1)\n\n # Compute the new cell \n C_t = (C_t_prev * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i)*self._activation(j))\n h_t = self._activation(C_t) * tf.sigmoid(o)\n \n #Create tuple of the new state\n State_t = LSTMStateTuple(C_t, h_t)\n\n return h_t, State_t", "def lstm_cell(input, cx):\n return FunctionLib.apply('LSTMCell', input.device, [input, cx])", "def forward(self, X, hx=None):\n outputs = []\n (batch_size, Nseq, Dim) = X.size()\n \n# print (\"Dimensions LSTM_RNN Batch: \", (batch_size, Nseq, Dim))\n for l in range(self.num_layers):\n if (type(hx) == type(None)):\n h_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n c_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n else:\n h_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n c_t = torch.zeros(batch_size, self.hidden_size).to(device = Vil.device, dtype = Vil.dtype)\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n # We loop for every element in the chain and for every layer\n\n for i in range(Nseq):\n input_t = X[:,i,:]\n# print (\"Sequence Chunk size: \",input_t.size())\n l = 0\n # First layer we put the input, in the rest we put the propagated states\n h_t, c_t = getattr(self, 'LSTMCell%i'%(l+1))(input_t, (getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))))\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n for l in range(1,self.num_layers):\n h_t, c_t = getattr(self, 'LSTMCell%i'%(l+1))(h_t, (getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))))\n setattr(self, 'h_t%i'%(l+1), h_t)\n setattr(self, 'c_t%i'%(l+1), c_t)\n \n \n # Return the hx and cx of all layers ? for the last sample ?\n outputs = []\n for l in range(self.num_layers):\n outputs.append( [getattr(self, 'h_t%i'%(l+1)), getattr(self, 'c_t%i'%(l+1))])\n\n# outputs = torch.stack(outputs, 1).squeeze(2)\n \n return outputs", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def extract_lstm_minus_feature(hidden_state, i, j):\n seq_len, bs, hidden_size = hidden_state.size()\n assert hidden_size % 2 == 0\n split_point = hidden_size // 2\n hidden_f = hidden_state[j + 1, :, :split_point] - hidden_state[i, :, :split_point]\n hidden_b = (\n hidden_state[i + 1, :, split_point:] - hidden_state[j + 2, :, split_point:]\n )\n span_v = torch.cat([hidden_f, hidden_b], dim=-1)\n return span_v", "def forward(self, x, hidden):\n emb_x = self.emb_layer(x)\n lstm_out, hidden = self.lstm(emb_x, hidden)\n if self.bidirectional:\n # separate to forward and backward\n # following code reshapes LSTM output to:\n # (batch size, seq length, num directions, hidden dimensions)\n # where direction '0' is forward and direction '1' is backward\n lstm_out = lstm_out.contiguous().view(-1, self.seq_len, 2, self.hidden_dim)\n # get backward output in first node\n lstm_out_bw = lstm_out[:, 0, 1, :]\n # get forward output in last node\n lstm_out_fw = lstm_out[:, -1, 0, :]\n # we may simple concatenate forward & backward outputs,\n # or add them, multiply or average; in this case i used average\n lstm_out = torch.add(input=lstm_out_bw, alpha=1, other=lstm_out_fw)\n lstm_out = torch.div(lstm_out, 2)\n else:\n lstm_out = lstm_out[:, -1]\n \n assert lstm_out.shape[-1] == self.hidden_dim, (lstm_out.shape, self.hidden_dim)\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n \n d_out = self.dropout(lstm_out)\n fc_out = self.output_layer(d_out)\n sig_out = torch.sigmoid(fc_out)\n \n # return last sigmoid output and hidden state\n return sig_out, hidden", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerGetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name\n \n self.cur_net_fwd = dot_product(tf.zeros(self.incoming_shape[:1] + self.incoming_shape[2:]),\n tf.zeros(self.W_fwd_conc.shape.as_list()))", "def lstm_cell_hidden(mprev, cprev, node_dim, attention_m=False):\n\n # Input Gate\n m_nodes = node_dim\n if attention_m:\n m_nodes = 2 * node_dim\n im = Variable(torch.rand(m_nodes,node_dim))\n ib = Variable(torch.zeros(1,node_dim))\n i_g = torch.sigmoid(torch.matmul(mprev,im) + ib)\n \n #Forget Gate\n fm = Variable(torch.rand(m_nodes,node_dim))\n fb = Variable(torch.zeros(1,node_dim))\n f_g = torch.sigmoid(torch.matmul(mprev,fm) + fb)\n \n #Cell\n cm = Variable(torch.rand(m_nodes,node_dim))\n cb = Variable(torch.zeros(1,node_dim))\n cprime = torch.sigmoid(torch.matmul(mprev,cm) + cb)\n c = f_g * cprev + i_g * torch.tanh(cprime)\n \n #Output Gate\n om = Variable(torch.rand(m_nodes,node_dim))\n ob = Variable(torch.zeros(1,node_dim))\n o_g = torch.sigmoid(torch.matmul(mprev,om) + ob)\n m = o_g * torch.tanh(c)\n return m,c", "def generate_lstm(\n input_seqs,\n hidden_state,\n cell_state,\n w_inp,\n w_hid,\n b_inp,\n b_hid,\n f_act,\n g_act,\n h_act,\n backwards=False,\n ):\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n gates += b_inp\n if b_hid is not None:\n gates += b_hid\n i, f, c, o = _op.split(gates, 4, axis=-1)\n\n i = f_act(i)\n f = f_act(f)\n\n c = g_act(c)\n C = f * cell_state + i * c\n\n o = f_act(o)\n\n H = o * h_act(C)\n\n hidden_state = H\n cell_state = C\n h_list.append(_op.expand_dims(H, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n cell_state = _op.expand_dims(cell_state, axis=0)\n\n return output, hidden_state, cell_state", "def __init__(self, input_dim, hidden_dim, output_dim):\r\n super(LstmEstimator, self).__init__()\r\n \r\n # The LSTM takes track features as inputs, and outputs hidden states\r\n # with dimensionality hidden_dim\r\n self.lstm = nn.LSTM(input_dim, hidden_dim)\r\n \r\n self.hidden2target = nn.Linear(hidden_dim, output_dim)", "def lstm_cell():\n if 'reuse' in inspect.getargspec(\n tf.contrib.rnn.BasicLSTMCell.__init__).args:\n return tf.contrib.rnn.BasicLSTMCell(\n args.hidden_size, input_size=args.embedding_size, forget_bias=0.0, state_is_tuple=True,\n reuse=tf.get_variable_scope().reuse)\n else:\n return tf.contrib.rnn.BasicLSTMCell(\n args.hidden_size, input_size=args.embedding_size, forget_bias=0.0, state_is_tuple=True)", "def create_LSTM_LSTM_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Get dimensions\n lstm_size = model_settings['lstm_size']\n\n batch_size = tf.shape(feats2d)[0] \n feats2d = tf.reshape(feats2d, shape=[batch_size,-1,model_settings['feature_width']]) # features are of shape [max seq length for batch, 40]\n seq_lengths = shapes[:,0] # all shapes are [seq_length, 40], we extract seq_length\n\n # First LSTM \n\n # LSTM cells\n cell_fw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n\n # Bi-directional RNN (+ Dropout)\n (output_fw, output_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, feats2d, \n sequence_length=seq_lengths, \n dtype=tf.float32)\n\n # TODO: make predictions after every 64 time slices\n\n concat_rnn = tf.concat([state_fw[0], state_bw[0]], axis=1)\n\n if is_training:\n first_dropout = tf.nn.dropout(concat_rnn, dropout_prob)\n else:\n first_dropout = concat_rnn\n\n # Second LSTM \n # TODO\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=first_dropout, units=num_classes)\n \n if is_training:\n return logits, dropout_prob\n else:\n return logits", "def lstm_cell(inputs, **kwargs):\n if context.executing_eagerly():\n return OpLib.execute('LSTMCell', inputs, outputs=[None, None])\n return OpLib.add('LSTMCell', num_outputs=2, **kwargs)", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def build_lstm_cell(num_units, dropout):\n cell = tf.nn.rnn_cell.LSTMCell(num_units)\n if dropout:\n result = tf.nn.rnn_cell.DropoutWrapper(cell,\n output_keep_prob=1-dropout)\n return result", "def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh):\n time_step = x.shape[0]\n h_t = h\n if self.is_lstm:\n hidden_size = h[0].shape[-1]\n zero_output = P.ZerosLike()(h_t[0])\n else:\n hidden_size = h.shape[-1]\n zero_output = P.ZerosLike()(h_t)\n seq_length = P.Cast()(seq_length, mstype.float32)\n seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)\n seq_length = P.Cast()(seq_length, mstype.int32)\n seq_length = P.Transpose()(seq_length, (1, 0))\n\n outputs = []\n state_t = h_t\n t = 0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h_t = self.cell(x_t, state_t, w_ih, w_hh, b_ih, b_hh)\n seq_cond = seq_length > t\n if self.is_lstm:\n state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])\n state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])\n output = P.Select()(seq_cond, h_t[0], zero_output)\n state_t = (state_t_0, state_t_1)\n else:\n state_t = P.Select()(seq_cond, h_t, state_t)\n output = P.Select()(seq_cond, h_t, zero_output)\n outputs.append(output)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, state_t", "def run_single_step(self): \n contexts = tf.placeholder(tf.float32, [self.batch_size, self.num_ctx, self.dim_ctx]) \n last_memory = tf.placeholder(tf.float32, [self.batch_size, self.dim_hidden])\n last_output = tf.placeholder(tf.float32, [self.batch_size, self.dim_hidden])\n last_word = tf.placeholder(tf.int32, [self.batch_size])\n initial_step = tf.placeholder(tf.bool)\n\n context_mean = tf.reduce_mean(contexts, 1) \n\n lstm = tf.nn.rnn_cell.LSTMCell(self.dim_hidden, initializer=tf.random_normal_initializer(stddev=0.033)) \n\n # Attention mechanism\n alpha = self.attend(contexts, last_output) \n weighted_context = tf.cond(initial_step,\n lambda: tf.identity(context_mean),\n lambda: tf.reduce_sum(contexts*tf.expand_dims(alpha, 2), 1))\n\n word_emb = tf.cond(initial_step, \n lambda: tf.zeros([self.batch_size, self.dim_embed]), \n lambda: tf.nn.embedding_lookup(self.emb_w, last_word))\n \n # Apply the LSTM\n with tf.variable_scope(\"LSTM\"):\n last_state = last_memory, last_output\n output, state = lstm(tf.concat([weighted_context, word_emb], 1), last_state)\n memory, _ = state\n \n # Compute the logits and probs\n expanded_output = tf.concat([output, weighted_context, word_emb], 1)\n\n logits1 = fully_connected(expanded_output, self.dim_dec, 'dec_fc')\n logits1 = nonlinear(logits1, 'tanh')\n logits2 = tf.nn.xw_plus_b(logits1, self.dec_w, self.dec_b)\n probs = tf.nn.softmax(logits2) \n logprobs = tf.log(probs)\n\n tf.get_variable_scope().reuse_variables() \n\n self.contexts = contexts\n self.last_memory = last_memory\n self.last_output = last_output\n self.last_word = last_word\n self.initial_step = initial_step\n\n self.memory = memory\n self.output = output\n self.logprobs = logprobs", "def recurrent(self, x, h_0, w_ih, w_hh, b_ih, b_hh):\n time_step = x.shape[0]\n outputs = []\n t = 0\n h = h_0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h = self.cell(x_t, h, w_ih, w_hh, b_ih, b_hh)\n if self.is_lstm:\n outputs.append(h[0])\n else:\n outputs.append(h)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, h", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerSetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n tofov(v, shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for bwd connections\n W_bwd_conc = tf.concat(axis=1, values=[W_ci, W_ig, W_og, W_fg])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [None, None, None, None]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W_ci, W_ig, W_og, W_fg]))\n \n self.W_fwd_conc = None\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name", "def extract_hidden_states(self, output):\n \n # Extracting the forward and backward hidden states from the last BiLSTM layer\n # output (batch_size, sequence length, 2 * hidden dim)\n output_fw = output[:,:,0:self._hidden_size]\n output_bw = output[:,:,self._hidden_size:]\n \n hidden_states = torch.cat((output_fw, output_bw),-1)\n \n return hidden_states", "def LSTM_train(X_train, Y_train, X_dev, Y_dev, R_train, R_dev, hyperparams):", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def apply_lstm(x, seq_len):\n return cudnn_layers.stacked_bilstm(\n input_emb=x,\n input_len=seq_len,\n hidden_size=FLAGS.lstm_dim,\n num_layers=1,\n dropout_ratio=0.0,\n mode=tf_estimator.ModeKeys.TRAIN,\n use_cudnn=None)", "def forward(self, src, device):\n\n src = torch.as_tensor(src).float().to(device)\n\n\n # Set initial hidden and cell states \n h0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n c0 = torch.zeros(self.num_layers, src.shape[0], self.hidden_dim).to(device)\n\n # shape of lstm_out: [batch_size, input_size, hidden_dim]\n # shape of self.hidden: (a, b), where a and b both have shape (num_layers, batch_size, hidden_dim).\n lstm_out, self.hidden = self.lstm(src, (h0, c0)) \n \n # Only take the output from the final timetep\n # Can pass on the entirety of lstm_out to the next layer if it is a seq2seq prediction\n #print(lstm_out.size())\n y_pred = self.linear(lstm_out[:, -1, :].view(src.shape[0], -1))\n return y_pred", "def bi_lstm(X_inputs):\n # X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]\n log(\"embedding\")\n log(embedding)\n log(X_inputs)\n log(\"X_inputs\")\n inputs = tf.nn.embedding_lookup(embedding, X_inputs) \n \n # ** 1.构建前向后向多层 LSTM\n cell_fw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n cell_bw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n \n # ** 2.初始状态\n initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)\n initial_state_bw = cell_bw.zero_state(batch_size, tf.float32) \n \n # 下面两部分是等价的\n # **************************************************************\n # ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式\n # ** 文档说明\n # inputs: A length T list of inputs, each a tensor of shape\n # [batch_size, input_size], or a nested tuple of such elements.\n # *************************************************************\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]\n # inputs = tf.unstack(inputs, timestep_size, 1)\n # ** 3.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。\n # 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。\n# try:\n# outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs, \n# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n# except Exception: # Old TensorFlow version only returns outputs not states\n# outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs, \n# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n# output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])\n # ***********************************************************\n \n # ***********************************************************\n # ** 3. bi-lstm 计算(展开)\n with tf.variable_scope('bidirectional_rnn'):\n # *** 下面,两个网络是分别计算 output 和 state \n # Forward direction\n outputs_fw = list()\n state_fw = initial_state_fw\n with tf.variable_scope('fw'):\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)\n outputs_fw.append(output_fw)\n \n # backward direction\n outputs_bw = list()\n state_bw = initial_state_bw\n with tf.variable_scope('bw') as bw_scope:\n inputs = tf.reverse(inputs, [1])\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)\n outputs_bw.append(output_bw)\n # *** 然后把 output_bw 在 timestep 维度进行翻转\n # outputs_bw.shape = [timestep_size, batch_size, hidden_size]\n outputs_bw = tf.reverse(outputs_bw, [0])\n # 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]\n output = tf.concat([outputs_fw, outputs_bw], 2)\n output = tf.transpose(output, perm=[1,0,2])\n output = tf.reshape(output, [-1, hidden_size*2])\n # ***********************************************************\n return output # [-1, hidden_size*2]", "def lstm(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def get_initial_hx(self, input_seq, hidden_state):\n num_directions = 2 if self.lstm.bidirectional else 1\n # hidden state\n hidden = hidden_state.view(self.lstm.num_layers * num_directions, len(hidden_state), -1)\n # cell state\n c_zeros = torch.zeros(self.lstm.num_layers * num_directions,\n input_seq.size(0), self.lstm.hidden_size,\n dtype=input_seq.dtype, device=input_seq.device)\n return hidden, c_zeros", "def init_hidden(self, batch_size, device):\n if self.mode == 'LSTM':\n return (torch.ones((1+self.bidirectional , batch_size, self.hidden_dim), device=device),\n torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device))\n else:\n return torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device)", "def get_rnn_hidden_state(h):\n return h if not isinstance(h, tuple) else h[0]", "def biLSTM(inputs, dim, seq_len, name, cell_type=\"cudnn\", cells=None, is_training=True, dropout_rate=0.0):\r\n if cell_type==\"cudnn\":\r\n with tf.variable_scope(name,reuse=tf.AUTO_REUSE) as scope:\r\n hidden_states, cell_states = bi_cudnn_rnn_encoder('lstm', dim, 1, dropout_rate, inputs, seq_len, is_training)\r\n else:\r\n with tf.name_scope(name) as scope:\r\n with tf.variable_scope('forward' + name) as scope:\r\n if cell_type == \"lstm\":\r\n lstm_fwd = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n with tf.variable_scope('backward' + name) as scope:\r\n if cell_type == \"lstm\":\r\n lstm_bwd = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n \r\n with tf.variable_scope(name+'blstm', reuse=tf.AUTO_REUSE):\r\n hidden_states, cell_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fwd, cell_bw=lstm_bwd, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)\r\n\r\n return hidden_states, cell_states", "def _extract_states(self, state):\n conf = self._config\n\n # c_prev is `m` (cell value), and\n # m_prev is `h` (previous output) in the paper.\n # Keeping c and m here for consistency with the codebase\n c_prev = [None] * conf.num_dims\n m_prev = [None] * conf.num_dims\n\n # for LSTM : state = memory cell + output, hence cell_output_size > 0\n # for GRU/RNN: state = output (whose size is equal to _num_units),\n # hence cell_output_size = 0\n total_cell_state_size = self._cell_state_size()\n cell_output_size = total_cell_state_size - conf.num_units\n\n if self._state_is_tuple:\n if len(conf.recurrents) != len(state):\n raise ValueError('Expected state as a tuple of {} '\n 'element'.format(len(conf.recurrents)))\n\n for recurrent_dim, recurrent_state in zip(conf.recurrents, state):\n if cell_output_size > 0:\n c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state\n else:\n m_prev[recurrent_dim] = recurrent_state\n else:\n for recurrent_dim, start_idx in zip(conf.recurrents,\n range(0, self.state_size,\n total_cell_state_size)):\n if cell_output_size > 0:\n c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n m_prev[recurrent_dim] = array_ops.slice(\n state, [0, start_idx + conf.num_units], [-1, cell_output_size])\n else:\n m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n return c_prev, m_prev, cell_output_size", "def cudnn_lstm_state(lstm_cell_state):\n h = tf.stack([s.h for s in lstm_cell_state])\n c = tf.stack([s.c for s in lstm_cell_state])\n return (h, c)", "def __call__(self, inputs, state):\n\n output, next_state = super(BinaryLSTM, self).__call__(inputs, state)\n with tf.variable_scope(self._cell_name):\n\n binary_cell_state = Bsn_layer.bsn_layer(next_state[0],\n stochastic_method=self.stochastic_method,\n preprocessing_method=self.preprocessing_method,\n tf_graph=self._tf_graph,\n slope_tensor=self._slope_tensor,\n loss_op_name='loss_by_example',\n name='binary_layer',\n stochastic_gradient_estimator=bsn_literals.STRAIGHT_THROUGH)\n binary_hidden_state = Bsn_layer.bsn_layer(next_state[1],\n stochastic_method=self.stochastic_method,\n preprocessing_method=self.preprocessing_method,\n tf_graph=self._tf_graph,\n slope_tensor=self._slope_tensor,\n loss_op_name='loss_by_example',\n name='binary_layer',\n stochastic_gradient_estimator=bsn_literals.STRAIGHT_THROUGH)\n\n return binary_hidden_state, tf.nn.rnn_cell.LSTMStateTuple(binary_cell_state, binary_hidden_state)", "def forward(self, X, A, beta=1, print_output=False):\n assert X.size(0) == A.size(0) + 1, print('the seq length of X and A are wrong')\n kl_loss = 0 # KL divergence term\n Ell_loss = 0 # expected log likelihood term\n batch_size = X.size(1)\n\n if len(X.size()) != 3:\n print('The input data matrix should be the shape of [seq_length, batch_size, input_dim]')\n\n X = X.to(self.device)\n A = A.to(self.device)\n\n # container\n states = torch.zeros(A.size(0), A.size(1), self.state_size).to(self.device) # [seq-1, batch, state]\n rnn_hiddens = torch.zeros(A.size(0), A.size(1), self.hidden_size).to(self.device) # [seq-1, batch, hidden]\n\n # initialising state and rnn hidden state\n # state = torch.zeros(X.size(1), self.state_size).to(self.device)\n rnn_hidden = self.init_h(X[0]).to(self.device) # [batch, hidden]\n if self.mode == 'LSTM':\n rnn_hidden_c = torch.zeros_like(rnn_hidden).to(self.device) # [batch, hidden]\n\n # temp_prior = self.hidden_prior(rnn_hidden) #[batch, state]\n temp_prior = rnn_hidden\n prior_mean = self.prior_mean(temp_prior) # [batch, state]\n prior_sigma = torch.exp(self.prior_sigma(temp_prior)) # [batch, state]\n state = self.reparametrise(prior_mean, prior_sigma) # [batch, state]\n\n # rnn_hidden = torch.zeros(X.size(1), self.hidden_size).to(self.device)\n\n # emission_mean = X[0]\n for t in range(1, X.size()[\n 0]): # for each time step, compute the free energy for each batch of data (start from the second hid state)\n if self.mode == 'LSTM':\n next_state_prior_m, next_state_prior_sigma, rnn_hidden, rnn_hidden_c = self.prior(state,\n A[t - 1].unsqueeze(\n -1),\n rnn_hidden,\n rnn_hidden_c)\n else:\n next_state_prior_m, next_state_prior_sigma, rnn_hidden = self.prior(state, A[t - 1].unsqueeze(-1),\n rnn_hidden)\n\n next_state_post_m, next_state_post_sigma = self.posterior(rnn_hidden, X[t])\n state = self.reparametrise(next_state_post_m, next_state_post_sigma) # [batch, state_size]\n states[t - 1] = state\n rnn_hiddens[t - 1] = rnn_hidden\n next_state_prior = Normal(next_state_prior_m, next_state_prior_sigma)\n next_state_post = Normal(next_state_post_m, next_state_post_sigma)\n\n # kl = kl_divergence(next_state_prior, next_state_post).sum(dim=1) #[batch]\n kl = kl_divergence(next_state_post, next_state_prior).sum(dim=1) # [batch]\n\n kl_loss += kl.mean()\n kl_loss /= A.size(0)\n\n # compute nll\n\n # flatten state\n flatten_states = states.view(-1, self.state_size)\n flatten_rnn_hiddens = rnn_hiddens.view(-1, self.hidden_size)\n flatten_x_mean, flatten_x_sigma = self.obs_model(flatten_states, flatten_rnn_hiddens)\n\n nll = self.batched_gaussian_ll(flatten_x_mean, flatten_x_sigma, X[1:, :, :].reshape(-1, self.output_size))\n nll = nll.mean()\n\n FE = nll - kl_loss\n\n if print_output:\n # print('ELL loss=', Ell_loss, 'KL loss=', kl_loss)\n print('Free energy of this batch = {}. Nll loss = {}. KL div = {}.'.format(float(FE.data)\n , float(nll.data),\n float(kl_loss.data)))\n\n return FE, nll, kl_loss", "def __init__(self, device, input_size, hidden_size, layer_norm=False, chrono_init=False, t_max=10):\n\n super(LSTMCell, self).__init__()\n\n self._device = device\n self._input_size = input_size\n self._hidden_size = hidden_size\n self._layer_norm = layer_norm\n self._chrono_init = chrono_init\n self._t_max = t_max\n\n self._W_x2i = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2i = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2i = nn.Parameter(torch.Tensor(hidden_size))\n self._b_i = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2f = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2f = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2f = nn.Parameter(torch.Tensor(hidden_size))\n self._b_f = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2o = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2o = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._W_c2o = nn.Parameter(torch.Tensor(hidden_size))\n self._b_o = nn.Parameter(torch.Tensor(hidden_size))\n \n self._W_x2c = nn.Parameter(torch.Tensor(input_size, hidden_size))\n self._W_h2c = nn.Parameter(torch.Tensor(hidden_size, hidden_size))\n self._b_c = nn.Parameter(torch.Tensor(hidden_size))\n\n if self._layer_norm:\n self._ln_c = nn.LayerNorm(hidden_size)\n self._ln_i = nn.LayerNorm(hidden_size)\n self._ln_f = nn.LayerNorm(hidden_size)\n self._ln_o = nn.LayerNorm(hidden_size)\n self._ln_g = nn.LayerNorm(hidden_size)\n \n self._reset_parameters()", "def lstm_layer(self):\n if self.pooling:\n ret_seq = True\n else:\n ret_seq = False\n ker_in = glorot_uniform(seed=self.seed)\n rec_in = Orthogonal(seed=self.seed)\n if self.type_of_weights == \"shared\":\n if self.recurrent == \"bilstm\" or self.recurrent is None:\n out_a = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n elif self.recurrent == \"lstm\":\n out_a = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n return out_a, out_a\n elif self.type_of_weights == \"separate\":\n if self.recurrent == \"bilstm\" or self.recurrent is None:\n out_a = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n out_b = Bidirectional(LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq), merge_mode='concat')\n elif self.recurrent == \"lstm\":\n out_a = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n out_b = LSTM(self.hidden_dim,\n input_shape=(self.max_sequence_length, self.embedding_dim,),\n kernel_initializer=ker_in,\n recurrent_initializer=rec_in,\n return_sequences=ret_seq)\n return out_a, out_b", "def init_hidden(self, batch_size):\n b = 2 if self.bidirectional else 1\n if self.rnn_type == \"LSTM\":\n h0 = (Variable(torch.zeros(b, batch_size, self.hidden_size)),\n Variable(torch.zeros(b, batch_size, self.hidden_size)))\n h0 = [h0[0].cuda(), h0[1].cuda()] if self.use_cuda else h0\n else:\n h0 = Variable(torch.zeros(b, batch_size, self.hidden_size))\n h0 = h0.cuda() if self.use_cuda else h0\n return h0", "def build_dynamic_rnn(self, cells, lstm_input, lstm_state):\n lstm_output, final_state = tf.nn.dynamic_rnn(cells, lstm_input, initial_state=lstm_state)\n # reshape lstm_output from [batch_size, time_steps, n_units] to [batch_size*time_steps, n_units]\n lstm_output = tf.reshape(lstm_output, [-1, self.num_hidden_units])\n\n return lstm_output, final_state", "def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh, att_score):\n time_step = x.shape[0]\n h_t = h\n if self.is_lstm:\n hidden_size = h[0].shape[-1]\n zero_output = P.ZerosLike()(h_t[0])\n else:\n hidden_size = h.shape[-1]\n zero_output = P.ZerosLike()(h_t)\n seq_length = P.Cast()(seq_length, mstype.float32)\n seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)\n seq_length = P.Cast()(seq_length, mstype.int32)\n seq_length = P.Transpose()(seq_length, (1, 0))\n\n outputs = []\n state_t = h_t\n t = 0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h_t = self.cell(x_t, state_t, w_ih, w_hh, b_ih, b_hh, att_score[t])\n\n seq_cond = seq_length > t\n if self.is_lstm:\n state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])\n state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])\n output = P.Select()(seq_cond, h_t[0], zero_output)\n state_t = (state_t_0, state_t_1)\n else:\n state_t = P.Select()(seq_cond, h_t, state_t)\n output = P.Select()(seq_cond, h_t, zero_output)\n outputs.append(output)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, state_t", "def the_nn_article(input_size, dropout):\n \n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n \n # Defining layers\n self.hidden_size = 256\n self.first_layer = 512\n self.second_layer = 1024\n self.n_layers = 2\n self.bidirectional = True\n self.dropout = dropout\n \n # RNN Layer\n self.rnn = nn.LSTM(input_size = input_size, hidden_size = self.hidden_size, \n num_layers = self.n_layers, batch_first = True, \n bidirectional = self.bidirectional, dropout = self.dropout)\n \n self.fc1 = nn.Linear(self.first_layer, self.second_layer)\n self.fc2 = nn.Linear(self.second_layer, 3)\n \n def forward(self, x):\n batch_size = x.size(0)\n \n #Initializing hidden state for first input using method defined below\n hidden = self.init_hidden(batch_size, self.hidden_size)\n \n # Find sequence lengths (for packing)\n x_lengths = self.find_lengths(x)\n \n # Pack sequences\n x = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, batch_first=True, enforce_sorted=False)\n\n # Run the network\n out, hidden = self.rnn(x, hidden)\n \n # Unpack the sequences again\n out, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)\n\n # Run through the linear layer\n out = F.relu(self.fc1(out))\n out = F.relu(self.fc2(out))\n \n # Perform log_softmax on output (WORSE PERFORMANCE!)\n #x = F.log_softmax(x, dim = 2)\n\n return out, hidden\n \n def init_hidden(self, batch_size, hidden_size):\n # This method generates the first hidden state of zeros which we'll use in the forward pass\n \n hidden = (torch.zeros(2*self.n_layers, batch_size, self.hidden_size),\n torch.zeros(2*self.n_layers, batch_size, self.hidden_size))\n \n return hidden\n \n def find_lengths(self, input_seq):\n # Find seq-lengths of each sequence (used to pack sequences)\n x_lengths = []\n for seq in input_seq:\n for idx, vec in enumerate(seq):\n if sum(vec).item() != 1:\n x_lengths.append(idx)\n break\n if idx == 752:\n x_lengths.append(len(seq)) \n return x_lengths\n \n net = Model()\n return net", "def custom_dynamic_rnn(cell, inputs, inputs_len, initial_state=None):\n batch_size = tf.shape(inputs)[0]\n max_time = tf.shape(inputs)[1]\n\n inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n inputs_ta = inputs_ta.unstack(tf.transpose(inputs, [1, 0, 2]))\n emit_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)\n t0 = tf.constant(0, dtype=tf.int32)\n if initial_state is not None:\n s0 = initial_state\n else:\n s0 = cell.zero_state(batch_size, dtype=tf.float32)\n f0 = tf.zeros([batch_size], dtype=tf.bool)\n\n def loop_fn(t, prev_s, emit_ta, finished):\n \"\"\"\n the loop function of rnn\n \"\"\"\n cur_x = inputs_ta.read(t)\n scores, cur_state = cell(cur_x, prev_s)\n\n # copy through\n scores = tf.where(finished, tf.zeros_like(scores), scores)\n\n if isinstance(cell, tc.rnn.LSTMCell):\n cur_c, cur_h = cur_state\n prev_c, prev_h = prev_s\n cur_state = tc.rnn.LSTMStateTuple(tf.where(finished, prev_c, cur_c),\n tf.where(finished, prev_h, cur_h))\n else:\n cur_state = tf.where(finished, prev_s, cur_state)\n\n emit_ta = emit_ta.write(t, scores)\n finished = tf.greater_equal(t + 1, inputs_len)\n return [t + 1, cur_state, emit_ta, finished]\n\n _, state, emit_ta, _ = tf.while_loop(\n cond=lambda _1, _2, _3, finished: tf.logical_not(\n tf.reduce_all(finished)),\n body=loop_fn,\n loop_vars=(t0, s0, emit_ta, f0),\n parallel_iterations=32,\n swap_memory=False)\n\n outputs = tf.transpose(emit_ta.stack(), [1, 0, 2])\n return outputs, state", "def bi_lstm(X_inputs):\n # X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]\n inputs = tf.nn.embedding_lookup(embedding, X_inputs)\n\n # ** 1.构建前向后向多层 LSTM\n cell_fw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n cell_bw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n\n # ** 2.初始状态\n initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)\n initial_state_bw = cell_bw.zero_state(batch_size, tf.float32)\n\n # 下面两部分是等价的\n # **************************************************************\n # ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式\n # ** 文档说明\n # inputs: A length T list of inputs, each a tensor of shape\n # [batch_size, input_size], or a nested tuple of such elements.\n # *************************************************************\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]\n # inputs = tf.unstack(inputs, timestep_size, 1)\n # ** 3.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。\n # 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。\n # try:\n # outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,\n # initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n # except Exception: # Old TensorFlow version only returns outputs not states\n # outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,\n # initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n # output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])\n # ***********************************************************\n\n # ***********************************************************\n # ** 3. bi-lstm 计算(展开)\n with tf.variable_scope('bidirectional_rnn'):\n # *** 下面,两个网络是分别计算 output 和 state\n # Forward direction\n outputs_fw = list()\n state_fw = initial_state_fw\n with tf.variable_scope('fw'):\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)\n outputs_fw.append(output_fw)\n\n # backward direction\n outputs_bw = list()\n state_bw = initial_state_bw\n with tf.variable_scope('bw') as bw_scope:\n inputs = tf.reverse(inputs, [1])\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)\n outputs_bw.append(output_bw)\n # *** 然后把 output_bw 在 timestep 维度进行翻转\n # outputs_bw.shape = [timestep_size, batch_size, hidden_size]\n outputs_bw = tf.reverse(outputs_bw, [0])\n # 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]\n output = tf.concat([outputs_fw, outputs_bw], 2)\n output = tf.transpose(output, perm=[1, 0, 2])\n output = tf.reshape(output, [-1, hidden_size * 2])\n # ***********************************************************\n return output # [-1, hidden_size*2]", "def __init__(\n self,\n input_size,\n hidden_size,\n num_layers=1,\n bidirectional=False,\n dropout=0,\n **kwargs\n ):\n super(LSTM, self).__init__(\n 'lstm', input_size, hidden_size,\n num_layers, bidirectional, dropout, **kwargs\n )", "def forward(self, input_data, hidden_state):\n batch_size = input_data.size(0)\n if hidden_state is None:\n hidden_state = torch.zeros(self._num_layers, batch_size, self._layer_size)\n hidden_state = [hidden_state, hidden_state] if self._controller_type.lower() == 'lstm' else hidden_state\n\n embedded_vector = self._embedding(input_data)\n output_vector, hidden_state_out = self._layer(embedded_vector.unsqueeze(0), hidden_state)\n output_vector = self._linear(output_vector.squeeze())\n return output_vector, hidden_state_out", "def __training_forward(self, h, h_lengths, y):\n batch_size = h.size()[1]\n input_seq_len = h.size()[0]\n output_seq_len = y.size()[0]\n\n # obtain embedding representations for the correct tokens\n # shift by one token (add <sos> token at the beginning of the sentences)\n start_tokens = torch.tensor([self.bos_token], dtype=y.dtype, device=self.device).repeat(batch_size, 1).t()\n y_input = torch.cat([start_tokens, y[:-1, :]], dim=0)\n y_emb = self.output_embedding.forward(y_input) # (output_seq_len x batch x embed_dim)\n\n # store hidden and cell states, at the beginning filled with zeros\n states_s = torch.zeros(input_seq_len+1, output_seq_len+1, batch_size, self.state_dim_2d, device=self.device)\n states_c = torch.zeros(input_seq_len+1, output_seq_len+1, batch_size, self.state_dim_2d, device=self.device)\n\n for diagonal_num in range(input_seq_len + output_seq_len - 1):\n # calculate the indices for input / states / etc. for this diagonal\n (ver_from, ver_to), (hor_from, hor_to) = LSTM2d.__calculate_input_ranges(diagonal_num=diagonal_num,\n input_seq_len=input_seq_len,\n output_seq_len=output_seq_len)\n ver_state_ranges, hor_state_ranges, diag_ranges = LSTM2d.__calculate_state_ranges((ver_from, ver_to),\n (hor_from, hor_to))\n ver_range_x, ver_range_y = ver_state_ranges\n hor_range_x, hor_range_y = hor_state_ranges\n diag_range_x, diag_range_y = diag_ranges\n\n # flip the output range so we take the inputs in the right order corresponding to the input range\n # Note: the 2d-cell with smallest source-position (horizontally) and largest target-position (vertically) is\n # the first cell in the diagonal!\n input_range = list(range(ver_from, ver_to))\n output_range = list(reversed(range(hor_from, hor_to)))\n diagonal_len = len(input_range) # always == len(output_range)\n\n # calculate x input for this diagonal\n # treat diagonal as though it was a larger batch and reshape inputs accordingly\n new_batch_size = diagonal_len * batch_size\n h_current = h[input_range, :, :].view(new_batch_size, 2*self.encoder_state_dim)\n y_current = y_emb[output_range, :, :].view(new_batch_size, self.embed_dim)\n x_current = torch.cat([h_current, y_current], dim=-1) # shape (batch*diagonal_len x input_dim)\n\n # calculate previous hidden & cell states for this diagonal\n s_prev_hor = states_s[hor_range_x, hor_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n c_prev_hor = states_c[hor_range_x, hor_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n s_prev_ver = states_s[ver_range_x, ver_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n c_prev_ver = states_c[ver_range_x, ver_range_y, :, :].view(new_batch_size, self.state_dim_2d)\n\n # run batched computation for this diagonal\n c_next, s_next = self.cell2d.forward(x_current, s_prev_hor, s_prev_ver, c_prev_hor, c_prev_ver)\n\n # separate batch and diagonal_len again so we can store them accordingly\n c_next = c_next.view(diagonal_len, batch_size, self.state_dim_2d)\n s_next = s_next.view(diagonal_len, batch_size, self.state_dim_2d)\n\n # store new hidden and cell states at the right indices for the next diagonal(s) to use\n states_s[diag_range_x, diag_range_y, :, :] = s_next\n states_c[diag_range_x, diag_range_y, :, :] = c_next\n\n # for the prediction, take the last (valid, non-padded) column of states and all but the first (1:) row\n states_for_pred = states_s[h_lengths, 1:, range(batch_size), :].permute(1, 0, 2)\n states_for_pred = self.logits_dropout.forward(states_for_pred)\n\n y_pred = self.logits.forward(states_for_pred) # shape (output_seq_len x batch x output_vocab_size)\n return y_pred", "def recurrent(self, x, h_0, w_ih, w_hh, b_ih, b_hh, att_score):\n time_step = x.shape[0]\n outputs = []\n t = 0\n h = h_0\n while t < time_step:\n x_t = x[t:t + 1:1]\n x_t = P.Squeeze(0)(x_t)\n h = self.cell(x_t, h, w_ih, w_hh, b_ih, b_hh, att_score[t])\n if self.is_lstm:\n outputs.append(h[0])\n else:\n outputs.append(h)\n t += 1\n outputs = P.Stack()(outputs)\n return outputs, h", "def forward(self, x):\n x, self.hidden = self.lstm(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x", "def __init__(self, num_vars, device, lag_max, hidden_size_lstm, hidden_size_mlp, num_outputs=1):\n super(LSTMgc, self).__init__()\n\n # LSTMs\n self.lstm_cell_list = nn.ModuleList()\n for state in range(num_vars):\n self.lstm_cell_list.append(nn.LSTMCell(lag_max, hidden_size_lstm))\n\n # MLP for prediction\n self.pred_mlp_l1 = nn.Linear(hidden_size_lstm * num_vars, hidden_size_mlp)\n self.pred_mlp_l2 = nn.Linear(hidden_size_mlp, num_outputs)\n\n # Initialise weights for each variable\n self.imp_weights = nn.Parameter(torch.Tensor(np.ones((num_vars,)) / num_vars + np.random.normal(0, 0.00001,\n (num_vars,))))\n\n # Initialise weights\n self.init_weights()\n\n # Save parameters\n self.num_vars = num_vars\n self.lag = lag_max\n self.hidden_size_lstm = hidden_size_lstm\n self.hidden_size_mlp = hidden_size_mlp\n\n # Initialise LSTM states\n self.lstm_state_list = []\n for state in range(num_vars):\n self.lstm_state_list.append((Variable(torch.zeros(1, self.hidden_size_lstm).float()).to(device),\n Variable(torch.zeros(1, self.hidden_size_lstm).float()).to(device)))", "def _build_rnn_graph_lstm(self, inputs, config, is_training):\n cell = util.create_lstm_cell(is_training, config)\n state = util.get_zero_state_for_the_cell(cell, config)\n\n self.initial_state = state\n with tf.variable_scope(\"RNN\"):\n inputs = tf.unstack(inputs, num=self.num_steps, axis=1)\n outputs, state = tf.contrib.rnn.static_rnn(cell, inputs,\n initial_state=self.initial_state)\n output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])\n return output, state", "def Get_States(self):\n\n # Getting all hidden state throuh time\n all_hidden_states = tf.scan(self.LSTM,\n self.processed_input,\n initializer=self.initial_hidden,\n name='states')\n all_hidden_states=all_hidden_states[:,0,:,:]\n \n return all_hidden_states", "def lstmdouble(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def lstm_model(nlstm=128, layer_norm=False):\n\n def network_fn(X, nenv=1, obs_size=-1):\n with tf.variable_scope(\"emb\", reuse=tf.AUTO_REUSE):\n w_emb = tf.get_variable(\"w_emb\", [obs_size+1, 32])\n X = tf.nn.embedding_lookup(w_emb, X)\n\n nbatch = X.shape[0]\n nsteps = nbatch // nenv\n\n h = tf.layers.flatten(X)\n\n M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)\n S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states\n\n xs = batch_to_seq(h, nenv, nsteps)\n ms = batch_to_seq(M, nenv, nsteps)\n\n assert not layer_norm\n h5, snew = lstm(xs, ms, S, scope='lstm', nh=nlstm)\n\n h = seq_to_batch(h5)\n initial_state = np.zeros(S.shape.as_list(), dtype=float)\n\n return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}\n\n return network_fn", "def add_model(self, inputs):\n size = self.config.hidden_size\n forget_bias = self.config.forget_bias\n input_cell = self.config.input_cell\n\n if input_cell == 'BasicLSTMCell':\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias)\n print 'Using Basic LSTM Cell \\n'\n\n elif input_cell == 'LSTMCell':\n lstm_cell = tf.nn.rnn_cell.LSTMCell(size, forget_bias)\n print 'Using LSTM Cell \\n'\n\n elif input_cell == 'GRUCell':\n lstm_cell = tf.nn.rnn_cell.GRUCell(size)\n print 'Using GRU Cell \\n'\n\n else:\n print \"Please Specify a Correct Cell Type\"\n\n lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.config.dropout,\n input_keep_prob=self.config.dropout)\n\n cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * self.config.num_layers)\n \n print 'Number of Hidden Layers ', self.config.num_layers\n \n self.initial_state = cell.zero_state(self.config.batch_size, tf.float32)\n rnn_outputs = []\n state = self.initial_state\n\n with tf.variable_scope('RNNLM') as scope:\n for time_step in range(self.config.num_steps):\n if time_step > 0: scope.reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n rnn_outputs.append(cell_output)\n self.final_state = state\n\n return rnn_outputs", "def __init__(self, embedding_dim, hidden_dim, vocab_size, label_size, use_gpu, batch_size):\n super(BiLSTM, self).__init__()\n self.hidden_dim = hidden_dim\n self.use_gpu = use_gpu\n self.batch_size = batch_size\n self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_dim, bidirectional=True)\n self.hidden2label = nn.Linear(hidden_dim*2, label_size)\n self.hidden = self.init_hidden()", "def lstm_forward(x, h0, Wx, Wh, b):\n h, cache = None, None\n #############################################################################\n # TODO: Implement the forward pass for an LSTM over an entire timeseries. #\n # You should use the lstm_step_forward function that you just defined. #\n #############################################################################\n\n N,T,D = x.shape\n N,H = h0.shape\n prev_h = h0\n prev_c = np.zeros((N, H))\n\n h = np.zeros((N, T, H))\n cache = list()\n\n for t in range(T):\n next_h, next_c, t_cache = lstm_step_forward(x[:,t,:],prev_h,prev_c,Wx,Wh,b)\n prev_h = next_h\n prev_c = next_c\n h[:,t,:] = next_h\n cache.append(t_cache)\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n\n return h, cache", "def forward(self, x, hidden):\n batch_size = x.size(0)\n\n # embeddings and lstm_out\n x = x.long()\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds, hidden)\n\n # print(f'lstm_out:{lstm_out.shape}')\n\n # stack up lstm outputs\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n\n # print(f'lstm_out flatten:{lstm_out.shape}')\n\n # dropout and fully-connected layer\n out = self.dropout(lstm_out)\n out = self.fc(out)\n # sigmoid function\n sig_out = self.sig(out)\n\n # print(f'sig_out:{sig_out.shape}')\n\n # reshape to be batch_size first\n sig_out = sig_out.view(batch_size, -1)\n sig_out = sig_out[:, -1] # get last batch of labels\n\n # print(f'sig_out last batch:{sig_out.shape}')\n\n # return last sigmoid output and hidden state\n return sig_out, hidden", "def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n super(BiLSTMCRF, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = vocab_size\n self.tag_to_ix = tag_to_ix\n self.tagset_size = len(tag_to_ix)\n \n self.embedding = nn.Embedding(self.vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\n num_layers=1, bidirectional=True)\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n self.transition = nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size))\n \n self.transition.data[self.tag_to_ix[START_TAG], :] = -10000.0\n self.transition.data[:, self.tag_to_ix[STOP_TAG]] = -10000.0\n self.hidden = self.init_hidden()", "def convert_rnn(g, op, block):\n\n def generate_lstm(\n input_seqs,\n hidden_state,\n cell_state,\n w_inp,\n w_hid,\n b_inp,\n b_hid,\n f_act,\n g_act,\n h_act,\n backwards=False,\n ):\n \"\"\"Implementation of LSTM cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n gates += b_inp\n if b_hid is not None:\n gates += b_hid\n i, f, c, o = _op.split(gates, 4, axis=-1)\n\n i = f_act(i)\n f = f_act(f)\n\n c = g_act(c)\n C = f * cell_state + i * c\n\n o = f_act(o)\n\n H = o * h_act(C)\n\n hidden_state = H\n cell_state = C\n h_list.append(_op.expand_dims(H, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n cell_state = _op.expand_dims(cell_state, axis=0)\n\n return output, hidden_state, cell_state\n\n def generate_gru(\n input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, rz_act, n_act, backwards=False\n ):\n \"\"\"Implementation of GRU cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n xwt = _op.nn.dense(step, w_inp)\n hwt = _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n xwt += b_inp\n if b_hid is not None:\n hwt += b_hid\n i_r, i_z, i_n = _op.split(xwt, 3, axis=-1)\n h_r, h_z, h_n = _op.split(hwt, 3, axis=-1)\n\n r_gate = rz_act(i_r + h_r)\n z_gate = rz_act(i_z + h_z)\n n_gate = n_act(i_n + r_gate * h_n)\n\n hidden_state = (hidden_state - n_gate) * z_gate + n_gate\n h_list.append(_op.expand_dims(hidden_state, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n\n return output, hidden_state\n\n def generate_simplernn(\n input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, n_act, backwards=False\n ):\n \"\"\"Implementation of SimpleRNN cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n xwt = _op.nn.dense(step, w_inp)\n hwt = _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n xwt += b_inp\n if b_hid is not None:\n hwt += b_hid\n\n n_gate = n_act(xwt + hwt)\n\n hidden_state = n_gate\n h_list.append(_op.expand_dims(hidden_state, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n\n return output, hidden_state\n\n def make_param_inputs(g, node, layer, hidden_size, num_layers):\n \"\"\"Param for weight and bias.\"\"\"\n\n bidirect_len = 4 if node.attr(\"is_bidirec\") else 2\n all_layer_param_len = len(node.input(\"WeightList\"))\n weight_list = node.input(\"WeightList\")[: all_layer_param_len // 2]\n bias_list = node.input(\"WeightList\")[all_layer_param_len // 2 :]\n\n layer_weight_list = weight_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n layer_bias_list = bias_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n param_list = layer_weight_list + layer_bias_list\n param_list_len = len(param_list)\n\n input_weights = param_list[0 : param_list_len // 2 : 2]\n hidden_weights = param_list[1 : param_list_len // 2 : 2]\n\n input_bias = param_list[param_list_len // 2 : param_list_len : 2]\n hidden_bias = param_list[param_list_len // 2 + 1 : param_list_len : 2]\n\n return input_weights, hidden_weights, input_bias, hidden_bias\n\n def make_init_param_inputs(g, node, layer):\n \"\"\"Init param for inputs.\"\"\"\n\n mode = node.attr(\"mode\")\n if mode == \"LSTM\":\n all_init_h, all_init_c = node.input(\"PreState\")\n bidirect_len = 2 if node.attr(\"is_bidirec\") else 1\n init_h = _op.strided_slice(\n g.get_node(all_init_h),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n init_c = _op.strided_slice(\n g.get_node(all_init_c),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n return init_h, init_c\n all_init_h = node.input(\"PreState\")[0]\n bidirect_len = 2 if node.attr(\"is_bidirec\") else 1\n init_h = _op.strided_slice(\n g.get_node(all_init_h),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n return init_h\n\n hidden_size = op.attr(\"hidden_size\")\n num_layers = op.attr(\"num_layers\")\n is_bidirec = op.attr(\"is_bidirec\")\n mode = op.attr(\"mode\")\n\n input_x = g.get_node(op.input(\"Input\")[0])\n\n num_directions = 1\n if is_bidirec:\n num_directions = 2\n\n x_shape = infer_shape(input_x)\n time_steps = x_shape[0]\n x_steps = _op.split(input_x, indices_or_sections=time_steps, axis=0)\n for layer in range(num_layers):\n input_weights, hidden_weights, input_bias, hidden_bias = make_param_inputs(\n g, op, layer, hidden_size, num_layers\n )\n if mode == \"LSTM\":\n init_h, init_c = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n init_cs = _op.split(init_c, num_directions)\n result_output = []\n result_H = []\n result_C = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n C_t = _op.squeeze(init_cs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H, C = generate_lstm(\n input_seqs=x_steps,\n hidden_state=H_t,\n cell_state=C_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n f_act=_op.sigmoid,\n g_act=_op.tanh,\n h_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n result_C.append(C)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n C = _op.concatenate(result_C, axis=0)\n elif mode == \"GRU\":\n init_h = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n result_output = []\n result_H = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H = generate_gru(\n input_seqs=x_steps,\n hidden_state=H_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n rz_act=_op.sigmoid,\n n_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n elif mode == \"RNN_TANH\":\n init_h = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n result_output = []\n result_H = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H = generate_simplernn(\n input_seqs=x_steps,\n hidden_state=H_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n n_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n\n output = _op.transpose(output, axes=[0, 2, 1, 3])\n output = _op.reshape(output, newshape=(0, 0, -1))\n x_steps = _op.split(output, indices_or_sections=time_steps, axis=0)\n\n g.add_node(op.output(\"Out\")[0], output)", "def get_output_for(self, input, **kwargs):\n\t\tbatch_size = input.shape[0]\n\t\tnum_states = self.num_states\n\t\timg_dim = self.img_dim\n\t\tN = self.N\n\t\tattender = self.attender\n\t\ttheano.gradient.grad_clip\n\n\t\tdef step(c_tm1, h_tm1, att_acc_tm1, input, W, Wg):\n\t\t\tcenter_y, center_x, delta, sigma, gamma = gp_from_hidden(h_tm1, Wg, img_dim, N)\n\t\t\tg, att = attender.read(input, center_y, center_x, delta, sigma, gamma) # (batch_size, N, N) and (batch_size, img_dim, img_dim)\n\t\t\n\t\t\tatt_acc_t = T.clip(att_acc_tm1 + att, 0.0, 1.0)\t# (batch_size, img_dim, img_dim)\n\t\t\tr = input[:, :, :img_dim] * (1.0 - att_acc_t) # (batch_size, img_dim, img_dim)\n\t\t\tR , _ = attender.read(r, *gp_from_hidden(T.zeros((batch_size, 5)), T.eye(5), img_dim, N)) # (batch_size, N, N)\n\t\t\t\n\t\t\tflat_g = g.reshape((batch_size, N * N)) # (batch_size, N * N)\n\t\t\tflat_R = R.reshape((batch_size, N * N)) # (batch_size, N * N)\n\t\t\t\n\t\t\t# concatenate gA, gB and h_tm1 to form a single matrix # (batch_size, N * N + N * N + num_states + 1)\n\t\t\tlstm_inp = T.concatenate([flat_g, flat_R, h_tm1, T.ones((batch_size, 1))], axis=1)\n\n\t\t\t# multiply by LSTM weights\n\t\t\t# (num_states * 4, num_input + num_states + 1) dot (batch_size, N * N + N * N + num_states + 1).T\n\t\t\tpre_act = T.dot(W, lstm_inp.T) \t# (4 * num_states, batch_size)\n\n\t\t\t# split up to get individual gates\n\t\t\tz = T.tanh(pre_act[0*num_states:1*num_states]) # (num_states, batch_size)\n\t\t\ti = T.nnet.sigmoid(pre_act[1*num_states:2*num_states])\n\t\t\tf = T.nnet.sigmoid(pre_act[2*num_states:3*num_states])\n\t\t\to = T.nnet.sigmoid(pre_act[3*num_states:4*num_states])\n\n\t\t\t# do LSTM update\n\t\t\tc_t = f * c_tm1.T + i * z\n\t\t\th_t = o * T.tanh(c_t)\n\n\t\t\treturn c_t.T, h_t.T, att_acc_t\t# 1, 2: (batch_size, num_states); 3, 4: (batch_size, img_dim, img_dim)\n\n\t\tc0 = T.zeros((batch_size, num_states))\n\t\th0 = T.zeros((batch_size, num_states))\n\t\tatt_acc0 = T.zeros((batch_size, img_dim, img_dim))\n\t\t\n\t\tcells, hiddens, att_acc_T = theano.scan(fn=step, non_sequences=[input, self.W, self.Wg], outputs_info=[c0, h0, att_acc0], \n\t\t\t\t\t\t\t\t\t\tn_steps=self.num_glimpses, strict=True)[0]\n\t\tif self.final_state_only:\n\t\t\treturn hiddens[-1]\n\t\telse:\n\t\t\treturn hiddens", "def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.7):\n super(SentimentLSTM, self).__init__()\n\n self.output_size = output_size\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n\n\n # define all layers\n self.embed = nn.Embedding(vocab_size,embedding_dim)\n self.lstm = nn.LSTM(embedding_dim,hidden_dim,n_layers,dropout=drop_prob,batch_first=True)\n self.fc = nn.Linear(hidden_dim,output_size)\n self.sigmoid = nn.Sigmoid()\n self.drp = nn.Dropout(p=0.7)", "def model_create_lstm(input_dim, output_dim, n_features, n_houses, x_train, y_train, x_test, y_test, early=None):\r\n model = Sequential()\r\n for _ in range(nn_hparams['num_layers']):\r\n model.add(LSTM(nn_hparams['units'], activation=nn_hparams['activation'], input_shape=(input_dim,n_features), return_sequences=True))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Flatten())\r\n model.add(Dense(y_train.shape[1]*y_train.shape[2]))\r\n custom_optimizer = getattr(optimizers, nn_hparams['optimizer'])(lr=nn_hparams['learning_rate'], beta_1=nn_hparams['beta_1'], beta_2=nn_hparams['beta_2'])\r\n model.compile(optimizer=custom_optimizer, loss=nn_hparams['loss'])\r\n y_train = y_train.reshape((y_train.shape[0], y_train.shape[1]*y_train.shape[2]))\r\n y_test = y_test.reshape((y_test.shape[0], y_test.shape[1]*y_test.shape[2]))\r\n if early:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1, callbacks=[early])\r\n else:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1)\r\n model_loss = model.evaluate(x_train, y_train, verbose=0)\r\n \r\n return model, model_loss", "def get_rnn(X, rnn_size, seq_len, batch_size, num_layers=1, input_keep_prob=1.0, output_keep_prob=1.0, is_training=False,\n cell_name=\"BasicLSTM\", bidirectional=False):\n with tf.device(\"/cpu:0\"):\n # Convert input tensor to python list (along the sequence length dimention)\n word_embeddings = tf.split(1, seq_len, X)\n word_embeddings = [tf.squeeze(embed_, [1]) for embed_ in word_embeddings]\n\n # if is_training and keep_prob < 1:\n # word_embeddings = [tf.nn.dropout(input_, keep_prob) for input_ in word_embeddings]\n\n def get_cell():\n if cell_name == \"GRU\": # GRU\n cell = rnn_cell.GRUCell(rnn_size)\n elif cell_name == \"LSTM\": # LSTM\n cell = rnn_cell.LSTMCell(rnn_size, tf.shape(X)[2])\n else:\n cell = rnn_cell.BasicLSTMCell(rnn_size)\n if is_training and (input_keep_prob < 1 or output_keep_prob < 1):\n cell = rnn_cell.DropoutWrapper(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)\n cell = rnn_cell.MultiRNNCell([cell] * num_layers)\n initial_state = cell.zero_state(batch_size, tf.float32)\n return cell, initial_state\n\n if bidirectional:\n with tf.variable_scope(\"forward\"):\n cell_fw, initial_state_fw = get_cell()\n with tf.variable_scope(\"backward\"):\n cell_bw, initial_state_bw = get_cell()\n return rnn.bidirectional_rnn(cell_fw, cell_bw, word_embeddings,\n initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw)\n else:\n cell, initial_state = get_cell()\n return rnn.rnn(cell, word_embeddings, initial_state=initial_state)", "def build_lstm11(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9a'))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9a'))\n # model.add(GlobalMaxPool1D())\n # model.add(BatchNormalization())\n # model.add(Dropout(settings['dropout'] / 2.0))\n\n # model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9c'))\n model.add(GlobalMaxPool1D(name='mp9'))\n model.add(BatchNormalization(name='bn9'))\n model.add(Dropout(settings['dropout'] / 2.0, name='drop9b'))\n\n model.add(Dense(shape['n_class'], activation='sigmoid', name='den9b'))\n xprint('build_lstm9: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def forward(self, data, time_steps, lengths):\n data_packed = pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)\n _, hidden = self.rnn(data_packed)\n assert hidden.size(1) == data.size(0)\n assert hidden.size(2) == self.latent_dim\n\n # check if bidirectional\n if hidden.size(0) == 1:\n hidden = hidden.squeeze(0)\n elif hidden.size(0) == 2:\n hidden = torch.cat((hidden[0], hidden[1]), dim=-1)\n else:\n raise ValueError('Incorrect RNN hidden state.')\n\n # extract mean and logvar\n mean_logvar = self.hidden_to_z0(hidden)\n assert mean_logvar.size(-1) == 2 * self.latent_dim\n mean, logvar = mean_logvar[:, :self.latent_dim], mean_logvar[:, self.latent_dim:]\n return mean, logvar", "def forward(self, x, hidden):\n batch_size=x.shape[0]\n\n x = self.embed(x)\n\n x,hidden = self.lstm(x,hidden)\n\n x = x.reshape(-1,self.hidden_dim)\n\n x = self.drp(x)\n\n x = self.fc(x)\n\n sig_out = self.sigmoid(x)\n\n # return last sigmoid output and hidden state\n sig_out = sig_out.reshape(batch_size,-1)\n sig_out = sig_out[:,-1]\n\n return sig_out, hidden", "def __init__(self, embeddings, hidden_dim, output_size, dropout_emb, dropout_lstm):\n super(BaselineLSTMModel, self).__init__()\n\n # 1) embedding layer:\n trainable_emb = False\n self.word_embeddings = nn.Embedding(num_embeddings=embeddings.shape[0],\n embedding_dim=embeddings.shape[1])\n self.init_embeddings(embeddings, trainable_emb)\n self.drop_emb = nn.Dropout(dropout_emb)\n\n # 2) LSTM layer\n self.hidden_dim = hidden_dim\n self.lstm = nn.LSTM(embeddings.shape[1], hidden_dim, batch_first=True,\n dropout=dropout_lstm)\n self.drop_lstm = nn.Dropout(dropout_lstm)\n\n # 3) linear layer -> outputs\n self.hidden2output = nn.Linear(hidden_dim, output_size)", "def __init__(self, embedding_size=300, lstm_layer_num=3,\r\n max_time_size=50, cell_size=100, forget_bias=0.0,\r\n l2_reg_lambda=0.0, class_num=8):\r\n # begin\r\n \"\"\"\r\n constant store in model. benefit: when load model can show the constant\r\n arguments.\r\n dropout not used in test step, move to outside.\r\n \"\"\"\r\n _l2_reg_lambda = tf.constant(l2_reg_lambda, dtype=tf.float32,\r\n name=\"l2_reg_lambda\")\r\n _lstm_layer_num = tf.constant(lstm_layer_num, dtype=tf.int32,\r\n name=\"lstm_layer_num\")\r\n _cell_size = tf.constant(cell_size, dtype=tf.int32,\r\n name=\"cell_size\")\r\n _max_time_size = tf.constant(max_time_size, dtype=tf.int32,\r\n name=\"max_time_size\")\r\n \"\"\"\r\n Placeholders for input, output and dropout.\r\n \"\"\"\r\n # inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),\r\n # dtype=tf.float32)\r\n self.input_x = tf.placeholder(\r\n shape=(None, embedding_size, max_time_size),\r\n dtype=tf.float32,\r\n name=\"input_x\")\r\n batch_size = tf.shape(self.input_x)[0]\r\n self.input_y = tf.placeholder(shape=(None, class_num), dtype=tf.float32,\r\n name=\"input_y\")\r\n self.input_keep_prob = tf.placeholder(tf.float32,\r\n name=\"input_keep_prob\")\r\n self.output_keep_prob = tf.placeholder(\r\n tf.float32,\r\n name=\"output_keep_prob\"\r\n )\r\n # Keeping track of l2 regularization loss (optional)\r\n l2_loss = tf.constant(0.0)\r\n\r\n def lstm_cell_func():\r\n # LSTM Cell, hidden size larger, remenber more detail\r\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(\r\n cell_size,\r\n forget_bias=forget_bias,\r\n state_is_tuple=True)\r\n \"\"\"\r\n add dropout, dnn dropout different from cnn.\r\n in_keep_prob: input keep probability(the probability of h_t == 0).\r\n out_keep_prob: output keep probability(the probability of h_{t+1} == 0).\r\n \"\"\"\r\n\r\n lstm_cell = tf.nn.rnn_cell.DropoutWrapper(\r\n lstm_cell,\r\n input_keep_prob=self.input_keep_prob,\r\n output_keep_prob=self.output_keep_prob)\r\n \"\"\"What's the benefit of multiple LSTM hidden layer?\r\n point 1: An interesting property of multilayer LSTMs is that it allows to\r\n perform hierarchical processing on difficult temporal tasks, and more\r\n naturally capture the structure of sequences.\r\n point 2: The purpose of using multilayer RNN cells is to learn more\r\n sophisticated conditional distributions\"\"\"\r\n return lstm_cell\r\n cell = tf.nn.rnn_cell.MultiRNNCell(\r\n [lstm_cell_func() for _ in range(lstm_layer_num)], state_is_tuple=True)\r\n with tf.name_scope(\"lstm\"):\r\n state = cell.zero_state(batch_size, tf.float32) # sents counte\r\n # with tf.name_scope(\"lstm\"):\r\n with tf.variable_scope(tf.get_variable_scope()) as scope:\r\n for time_step in range(max_time_size):\r\n if time_step > 0:\r\n tf.get_variable_scope().reuse_variables()\r\n (h_t, state) = cell(self.input_x[:,:,time_step], state)\r\n h = h_t\r\n # 全连阶层\r\n with tf.name_scope(\"full_cont_layer\"):\r\n W1 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W1\")\r\n W2 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W2\")\r\n W3 = tf.Variable(tf.truncated_normal([cell_size, class_num], stddev=0.1), name=\"W3\")\r\n b1 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b1\")\r\n b2 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b2\")\r\n b3 = tf.Variable(tf.constant(0.1, shape=[class_num]), name=\"b3\")\r\n l2_loss += tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2) + tf.nn.l2_loss(W3)\r\n l2_loss += tf.nn.l2_loss(b1) + tf.nn.l2_loss(b2) + tf.nn.l2_loss(b3)\r\n self.scores = tf.nn.xw_plus_b(h, W1, b1, name=\"scores\")\r\n # self.score = tf.matmul(h, W) + b\r\n self.predictions = tf.argmax(self.scores, 1, name=\"predictions\")\r\n\r\n # CalculateMean cross-entropy loss\r\n with tf.name_scope(\"loss\"):\r\n # losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores,\r\n # labels=self.input_y)\r\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores+1e-10, labels=self.input_y)\r\n \"\"\"sparse softmax cross entropy do not need to transform labels to\r\n one-hot matrix. and \"\"\"\r\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss\r\n\r\n # Accuracy\r\n with tf.name_scope(\"accuracy\"):\r\n correct_predictions = tf.equal(self.predictions,\r\n tf.argmax(self.input_y, 1))\r\n self.accuracy = tf.reduce_mean(\r\n tf.cast(correct_predictions, \"float\"), name=\"accuracy\")", "def forward(self, x):\n batch_size = x.size(0)\n out,_ = self.lstm(x) #out = batch, seq_len, num_directions * hidden_size\n out1 = out[:, -1, :16] #最后一层正向传播的最后一个timestep\n out2 = out[:, 0, 16:] #最后一层反向传播最后一个timestep\n out = torch.cat((out1,out2), dim=1)\n out = self.fc(out)\n\n return out", "def forward(self, input: torch.Tensor, hidden_state: torch.Tensor, cell_state: torch.Tensor) ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n input = input.unsqueeze(1)\n output = self.embedding(input)\n output, (hidden_state, cell_state) = self.lstm(output, (hidden_state, cell_state))\n output_logits = self.out(output)\n return output_logits, hidden_state, cell_state", "def lstm2():\n return render_template(\n 'lstm2.html',\n title='LSTM',\n year=datetime.now().year,\n message='Your LSTM page.'\n )", "def test_lstm_basic(self):\n\n class SimpleLSTM(nn.Module):\n def __init__(self):\n super(SimpleLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(12, 10, 1)\n w2 = torch.randn(40, 10)\n w1 = torch.randn(40, 12)\n b1 = torch.randn(40)\n b2 = torch.randn(40)\n self.rnn.training = False\n self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)\n self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)\n self.rnn.bias_ih_l0 = torch.nn.Parameter(b1)\n self.rnn.bias_hh_l0 = torch.nn.Parameter(b2)\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(10, 3, 12)\n h = torch.randn(1, 3, 10)\n c = torch.randn(1, 3, 10)\n model = SimpleLSTM()\n\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def lstm2():\n return render_template(\n 'lstm2.html',\n title='LSTM',\n year=\"2020\",\n message='Your LSTM page.'\n )", "def init_hidden(self, batch_size: int, device: Device) -> AmbiguousHidden:\n hidden_zero = torch.zeros(batch_size, self.hidden_size).to(device)\n\n if self.rnn_type == \"LSTM\":\n return hidden_zero, hidden_zero.clone()\n else:\n return hidden_zero", "def lstm_model(input_size, output_size, embedding, num_nodes, num_unrollings, batch_size,\n learning_rate, exp_decay = None, gradient_max_value = 1.25, dropout_prob = 0.0):\n\n graph = tf.Graph()\n with graph.as_default():\n # [ix, fx, cx, ox]\n x_mat = training.utils.gaussian_weights_variable([input_size, 4*num_nodes])\n # [im, fm, cm, om]\n o_mat = training.utils.gaussian_weights_variable([num_nodes, 4*num_nodes])\n # [ib, fb, cb, ob]\n b_vec = tf.Variable(tf.zeros([1, 4*num_nodes]))\n\n # Variables saving state across unrollings.\n saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n # Classifier weights and biases.\n w = training.utils.gaussian_weights_variable([num_nodes, output_size])\n b = tf.Variable(tf.zeros([output_size]))\n\n # Definition of the cell computation.\n def lstm_cell(i, o, state):\n \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n Note that in this formulation, we omit the various connections between the\n previous state and the gates.\"\"\"\n mult = tf.matmul(i, x_mat) + tf.matmul(o, o_mat) + b_vec\n\n input_gate = tf.sigmoid(mult[:, 0:num_nodes])\n forget_gate = tf.sigmoid(mult[:, num_nodes:2*num_nodes])\n state = forget_gate * state + input_gate * tf.tanh(mult[:, 2*num_nodes:3*num_nodes])\n output_gate = tf.sigmoid(mult[:, 3*num_nodes:4*num_nodes])\n return output_gate * tf.tanh(state), state\n\n # Input data.\n before_embedding_size = input_size\n if embedding is not None:\n before_embedding_size = embedding.shape[0]\n\n train_data = list()\n for _ in range(num_unrollings + 1):\n train_data.append(\n tf.placeholder(tf.float32, shape=[batch_size, before_embedding_size]))\n train_inputs = train_data[:num_unrollings]\n train_labels = train_data[1:] # Labels are inputs shifted by one time step.\n\n # Unrolled LSTM loop.\n outputs = list()\n output = saved_output\n state = saved_state\n for i in train_inputs:\n if embedding is not None:\n # Converting the input to the embedding.\n indices = tf.argmax(i, 1)\n i = tf.nn.embedding_lookup(embedding, indices)\n # Dropout is only applied to inputs, not to recurrent connections.\n i = tf.nn.dropout(i, 1 - dropout_prob)\n output, state = lstm_cell(i, output, state)\n outputs.append(output)\n\n # State saving across unrollings.\n with tf.control_dependencies([saved_output.assign(output),\n saved_state.assign(state)]):\n # Classifier.\n # Dropout is also applied to the output of the LSTM cell, only when\n # used for the projection, as it is not recurrent.\n outputs = tf.concat(0, outputs)\n outputs = tf.nn.dropout(outputs, 1 - dropout_prob)\n logits = tf.nn.xw_plus_b(outputs, w, b)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits, tf.concat(0, train_labels)))\n\n # Optimizer.\n global_step = tf.Variable(0)\n\n if exp_decay is not None:\n learning_rate = tf.train.exponential_decay(\n learning_rate, global_step,\n exp_decay['decay_steps'], exp_decay['decay_rate'], exp_decay['staircase'])\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n # Clipping to avoid exploding gradient.\n gradients, v = zip(*optimizer.compute_gradients(loss))\n gradients, _ = tf.clip_by_global_norm(gradients, gradient_max_value)\n optimizer = optimizer.apply_gradients(zip(gradients, v), global_step=global_step)\n\n # Predictions.\n train_prediction = tf.nn.softmax(logits)\n\n # Sampling and validation eval: batch 1, no unrolling.\n sample_input_ph = tf.placeholder(tf.float32, shape=[1, before_embedding_size])\n saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n reset_sample_state = tf.group(\n saved_sample_output.assign(tf.zeros([1, num_nodes])),\n saved_sample_state.assign(tf.zeros([1, num_nodes])))\n\n sample_input = sample_input_ph\n if embedding is not None:\n indices = tf.argmax(sample_input_ph, 1)\n sample_input = tf.nn.embedding_lookup(embedding, indices)\n\n sample_output, sample_state = lstm_cell(\n sample_input, saved_sample_output, saved_sample_state)\n with tf.control_dependencies([saved_sample_output.assign(sample_output),\n saved_sample_state.assign(sample_state)]):\n sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output, w, b))\n\n tf_graph = {\n 'graph': graph,\n 'data_ph': train_data,\n 'sample_ph': sample_input_ph }\n tf_predictions = [train_prediction, sample_prediction]\n\n return tf_graph, optimizer, loss, tf_predictions, reset_sample_state", "def extract_hidden_states(self, output):\n # Intermediate hidden states\n output_fw_intermediate = output[:,:-1,0:self._hidden_size]\n output_bw_intermediate = output[:,1:,self._hidden_size:] \n \n # Last hidden states\n output_fw = output[:,-1,0:self._hidden_size]\n output_bw = output[:,0,self._hidden_size:]\n last_ht = torch.cat((output_fw, output_bw), -1)\n \n return last_ht, output_fw_intermediate, output_bw_intermediate", "def build_lstm8(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False,\n name='eembed'\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False, name='td8')))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi'))\n model.add(Flatten(name='flaaten'))\n model.add(BatchNormalization())\n model.add(Dropout(settings['dropout'] / 2.0))\n model.add(Dense(shape['n_class'], activation='sigmoid'))\n xprint('build_lstm8: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def Bilstm(self):\n # Model.\n model = Sequential()\n # model.add(Bidirectional(LSTM(2048, return_sequences=True),input_shape=self.input_shape))\n # model.add(Bidirectional(LSTM(2048))) id identification is 2048\n model.add(Bidirectional(LSTM(2048, return_sequences=True), input_shape=self.input_shape))\n #model.add(Bidirectional(LSTM(2048)))\n model.add(Dense(2048, activation='relu'))\n #model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n # model.add(Bidirectional(LSTM(20, return_sequences=True), input_shape=(n_timesteps, 1)))\n # model.add(TimeDistributed(Dense(1, activation='sigmoid')))\n # model = Sequential()\n # model = Sequential()\n # model.add(Embedding(max_features, 128, input_length=maxlen))\n # model.add(Bidirectional(LSTM(64)))\n # model.add(Dropout(0.5))\n # model.add(Dense(1, activation='sigmoid'))\n\n # model.add(Embedding(20000, 128, input_length=self.seq_length))\n # model.add(Flatten(input_shape=self.input_shape))\n # model.add(Embedding(20000, 128, input_length=self.seq_length))\n # model.add(Bidirectional(LSTM(128)))\n # model.add(Dropout(0.5))\n # model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def separable_lstm(cell, num_units, inputs, seq_lengths1, seq_lengths2, scope=None):\n with variable_scope.variable_scope(scope, \"SeparableLstm\", [inputs]):\n hidden = bidirectional_horizontal_lstm(cell, num_units, inputs, seq_lengths1)\n with variable_scope.variable_scope(\"vertical\"):\n transposed = array_ops.transpose(hidden, [0, 2, 1, 3])\n output_transposed = bidirectional_horizontal_lstm(cell, num_units, transposed, seq_lengths2)\n output = array_ops.transpose(output_transposed, [0, 2, 1, 3])\n return output", "def test_lstm_two_layers(self):\n\n class MultipleLayersLSTM(nn.Module):\n def __init__(self):\n super(MultipleLayersLSTM, self).__init__()\n self.rnn = torch.nn.LSTM(10, 20, 2, bidirectional=False)\n self.rnn.training = False\n\n def forward(self, inputs, h, c):\n return self.rnn(inputs, (h, c))\n\n inputs = torch.randn(5, 3, 10)\n h = torch.randn(2, 3, 20)\n c = torch.randn(2, 3, 20)\n model = MultipleLayersLSTM()\n\n utils.compare_tracing_methods(\n model, inputs, h, c, fusible_ops={\"aten::lstm\"}, skip_to_glow=True\n )", "def forward(self, batch: torch.LongTensor,\n hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:\n\n # max_len = x.size(1)\n # x,label = batch\n # batch_size x max_len x embedding_dim\n x_embedded = self.embedding(batch)\n # x_drop = self.dropout\n x_drop = self.dropout(x_embedded)\n\n # compute hidden states and logits for each time step\n # hidden_states_list = []\n # prev_hidden = hidden_start\n hidden_state = self.rnn(x_drop)[0]\n # print(hidden_state)\n # print(hidden_state[0].shape)\n # print(hidden_state[1].shape)\n\n # hidden_state = hidden_state.permute(2,1,0)\n # hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])\n # hidden_state_maxPooled = hidden_state.permute(2,1,0)\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n\n output = self.get_logits(hidden_state_pooled)\n\n # Loss = self.loss(output, y)\n\n # hidden_state = softmax(logits(hidden_state))\n\n # batch_size x max_len x rnn_size\n # hidden_states = torch.stack(hidden_states_list, dim=1)\n\n return output", "def lstm_cell(x, h, c, name=None, reuse=False):\n nin = x.shape[-1].value\n nout = h.shape[-1].value\n with tf.variable_scope(name, default_name=\"lstm\", values=[x, h, c], reuse=reuse):\n wx = get_variable_wrap(\"kernel/input\", [nin, nout * 4], dtype=tf.float32, \n initializer=tf.orthogonal_initializer(1.0))\n wh = get_variable_wrap(\"kernel/hidden\", [nout, nout * 4],dtype=tf.float32,\n initializer=tf.orthogonal_initializer(1.0))\n b = get_variable_wrap(\"bias\", [nout * 4], dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n\n z = ed.dot(x, wx) + ed.dot(h, wh) + b\n i, f, o, u = tf.split(z, 4, axis=0)\n i = tf.sigmoid(i)\n f = tf.sigmoid(f + 1.0)\n o = tf.sigmoid(o)\n u = tf.tanh(u)\n c = f * c + i * u\n h = o * tf.tanh(c)\n return h, c", "def create_multilayer_lstm_params(num_layers, in_size, state_size, name=\"\"):\n lstm_layers = []\n for i in range(num_layers):\n layer_name = name + \"-\" + str(i)\n print(\"LSTM \" + layer_name + \": \" + str(in_size) + \" x \" + str(state_size) + \"; default Dynet initialization of hidden weights\")\n lstm_layer = torch.nn.LSTMCell(input_size=int(in_size), hidden_size=int(state_size), bias=True)\n lstm_layers.append(lstm_layer)\n in_size = state_size\n return torch.nn.ModuleList(lstm_layers)", "def __init__(self, embeddings, hidden_dim, output_size, dropout_emb, dropout_lstm):\n super(AttentionalLSTM, self).__init__()\n\n # 1) embedding layer:\n trainable_emb = False\n self.word_embeddings = nn.Embedding(num_embeddings=embeddings.shape[0],\n embedding_dim=embeddings.shape[1])\n self.init_embeddings(embeddings, trainable_emb)\n self.drop_emb = nn.Dropout(dropout_emb)\n\n # 2) LSTM layer\n self.hidden_dim = hidden_dim\n self.lstm = nn.LSTM(input_size=embeddings.shape[1],\n hidden_size=hidden_dim,\n batch_first=True,\n dropout=dropout_lstm)\n self.drop_lstm = nn.Dropout(dropout_lstm)\n self.attention = Attention(attention_size=hidden_dim, batch_first=True)\n\n # 3) linear layer -> outputs\n self.hidden2output = nn.Linear(hidden_dim, output_size)", "def __call__(self, inputs, state, scope=None):\n # Apply vanilla LSTM\n output, new_state = self._cell(inputs, state, scope)\n\n if self.state_is_tuple:\n (prev_c, prev_h) = state\n (new_c, new_h) = new_state\n else:\n num_proj = self._cell._num_units if self._cell._num_proj is None else self._cell._num_proj\n prev_c = tf.slice(state, [0, 0], [-1, self._cell._num_units])\n prev_h = tf.slice(state, [0, self._cell._num_units], [-1, num_proj])\n new_c = tf.slice(new_state, [0, 0], [-1, self._cell._num_units])\n new_h = tf.slice(new_state, [0, self._cell._num_units], [-1, num_proj])\n\n # Apply zoneout\n if self.is_training:\n keep_rate_cell = 1.0 - self._zoneout_cell\n keep_rate_output = 1.0 - self._zoneout_outputs\n c = keep_rate_cell * tf.nn.dropout(new_c - prev_c, keep_prob=keep_rate_cell) + prev_c\n h = keep_rate_output * tf.nn.dropout(new_h - prev_h, keep_prob=keep_rate_output) + prev_h\n else:\n c = new_c - self._zoneout_cell * (new_c + prev_c)\n h = new_h - self._zoneout_outputs * (new_h + prev_h)\n\n new_state = tf.nn.rnn_cell.LSTMStateTuple(c, h) if self.state_is_tuple else tf.concat([c, h], axis=1)\n\n return output, new_state", "def loop_fn(time, cell_output, cell_state, loop_state, emit_ta):\n \n if cell_output is None: # time == 0\n next_cell_state = initial_state\n emit_output= tf.ones(tf.shape(initial_state[1])[:1], dtype=tf.int32) * tf.constant(-1) #(batch_size)\n next_input = tf.squeeze(self.sos, [1])\n elements_finished = tf.logical_and(tf.cast(emit_output, dtype=tf.bool), False)\n \n else:\n \n next_cell_state = cell_state\n decoder_outputs = tf.expand_dims(cell_output, 1) #(batch_size, 1, hidden_size)\n encoder_outputs_reshape = tf.reshape(encoder_outputs, shape=(-1, self.config.hidden_size)) #(batch_size*time_steps, hidden_size)\n decoder_outputs_reshape = tf.reshape(decoder_outputs, shape=(-1, self.config.hidden_size)) #(batch_size*1, hidden_size)\n encoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(encoder_outputs_reshape, output_size=self.config.hidden_size, \n bias=False, scope='Ptr_W1') #(b_sz*tstps_en, h_sz)\n decoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(decoder_outputs_reshape, output_size=self.config.hidden_size, \n bias=False, scope='Ptr_W2') #(b_sz*1, h_sz)\n encoder_outputs_linear = tf.reshape(encoder_outputs_linear_reshape, tf.shape(encoder_outputs))\n decoder_outputs_linear = tf.reshape(decoder_outputs_linear_reshape, tf.shape(decoder_outputs))\n \n encoder_outputs_linear_expand = tf.expand_dims(encoder_outputs_linear, 1) #(b_sz, 1, tstp_en, h_sz)\n decoder_outputs_linear_expand = tf.expand_dims(decoder_outputs_linear, 2) #(b_sz, 1, 1, h_sz)\n \n after_add = tf.tanh(encoder_outputs_linear_expand + decoder_outputs_linear_expand) #(b_sz, 1, tstp_en, h_sz)\n \n after_add_reshape = tf.reshape(after_add, shape=(-1, self.config.hidden_size))\n \n after_add_linear_reshape = tf.nn.rnn_cell._linear(after_add_reshape, output_size=1, #(b_sz*1*tstp_en, 1)\n bias=False, scope='Ptr_v')\n after_add_linear = tf.reshape(after_add_linear_reshape, shape=(-1, tstps_en)) #(b_sz, tstp_en)\n en_length_mask = tf.sequence_mask(self.encoder_tstps, #(b_sz, tstp_en)\n maxlen=tstps_en, dtype=tf.bool)\n\n \"\"\"mask out already hitted ids\"\"\" \n hit_ids = tf.cond(emit_ta.size() > 0, lambda: emit_ta.pack(), lambda: tf.ones(shape=[1, batch_size], dtype=tf.int32)*-1) #(to_cur_tstp, b_sz)\n masks = tf.one_hot(hit_ids, depth=tstps_en, on_value=True, off_value=False) #(to_cur_tstp, b_sz, tstp_en)\n masks = tf.reduce_any(masks, reduction_indices=[0]) #(b_sz, tstp_en)\n hit_masks = tf.logical_not(masks)\n\n mask = tf.logical_and(en_length_mask, hit_masks)\n logits = tf.select(mask, after_add_linear,\n tf.ones_like(after_add_linear) * (-np.Inf)) # shape(b_sz, tstp_en)\n\n emit_output = tf.arg_max(logits, dimension=1) #(batch_size)\n emit_output = tf.cast(emit_output, dtype=tf.int32)\n \n bool_mask = tf.one_hot(emit_output, depth=tstps_en, on_value=True, off_value=False) #(b_sz, tstps_en)\n bool_mask = tf.reshape(bool_mask, shape=(batch_size, tstps_en))\n next_input = tf.boolean_mask(encoder_inputs, bool_mask) #(b_sz, emb_sz)\n \n elements_finished = tf.equal(emit_output, 0) #(batch_size)\n elements_finished = tf.reshape(elements_finished, (-1,))\n \n elements_finished = tf.logical_or(elements_finished, (time >= self.config.num_steps))\n next_loop_state = loop_state\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)", "def forward(self, x, hidden):\n batch_size = x.size(0)\n # embeddings and lstm_out\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds, hidden)\n \n # stack up lstm outputs\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n \n # dropout and fully connected layer\n out = self.dropout(lstm_out)\n out = self.fc(out)\n \n # sigmoid function\n sig_out = self.sig(out)\n \n # reshape to be batch_size first\n sig_out = sig_out.view(batch_size, -1)\n sig_out = sig_out[:, -1] # get last batch of labels\n \n # return last sigmoid output and hidden state\n return sig_out, hidden", "def add_logits_op(self):\n with tf.variable_scope(\"bi-lstm\"):\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.word_embeddings,\n sequence_length=self.sequence_lengths, dtype=tf.float32)\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.nn.dropout(output, self.dropout)\n\n with tf.variable_scope(\"proj\"):\n W = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[2*self.config.hidden_size_lstm, self.config.ntags])\n\n b = tf.get_variable(\"b\", shape=[self.config.ntags],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps = tf.shape(output)[1]\n output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])\n pred = tf.matmul(output, W) + b\n self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])", "def trajectory(self, state, T=1, time_steps=200):\n\n state = state.to(device)\n t = torch.linspace(0, T, time_steps).to(device)\n\n # integrate and remove batch dim\n traj = self.model_of_dyn_system.trajectory(state, t)\n return traj.detach().cpu()[:, 0, :]", "def build_lstm_layers(lstm_sizes, embed, keep_prob_, batch_size):\n lstms = [tf.contrib.rnn.BasicLSTMCell(size) for size in lstm_sizes]\n # Add dropout to the cell\n drops = [tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob_) for lstm in lstms]\n # Stack up multiple LSTM layers, for deep learning\n cell = tf.contrib.rnn.MultiRNNCell(drops)\n # Getting an initial state of all zeros\n initial_state = cell.zero_state(batch_size, tf.float32)\n \n lstm_outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)\n \n return initial_state, lstm_outputs, cell, final_state", "def get_init_cell(batch_size, rnn_size):\n\n cell = tf.contrib.rnn.BasicLSTMCell(rnn_size) #?????????????????cell_hidden_size = state_size\n #cell = tf.contrib.run.DropoutWrapper(cell, output_keep_prob=keep_prob)\n num_of_layers = 3\n cell = tf.contrib.rnn.MultiRNNCell([cell for _ in range(num_of_layers)])\n initial_state = cell.zero_state(batch_size, tf.float32)\n initial_state = tf.identity(initial_state, name='initial_state')\n \n return (cell, initial_state)", "def get_init_cell(batch_size, rnn_size):\n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n #drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5)\n cell = tf.contrib.rnn.MultiRNNCell([lstm] * 3)\n initial_state = cell.zero_state(batch_size, tf.float32)\n initial_state = tf.identity(initial_state, name=\"initial_state\")\n\n return cell, initial_state", "def __init__(self, input_size=1, hidden_layer_size=100, output_size=1):\n super().__init__()\n self.hidden_layer_size = hidden_layer_size # TODO [tuning] different size (?)\n\n # TODO [tuning] BiLSTM (?)\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_layer_size, batch_first=True)\n\n self.linear = nn.Linear(hidden_layer_size, output_size, bias=True) # TODO try with bias=False (?)", "def create_logits(self):\n with tf.variable_scope('LSTM'):\n first_label = self.get_input(prev=None, i=0)\n decoder_inputs = [first_label] + [None] * (self._params.seq_length - 1)\n lstm_cell = tf.contrib.rnn.LSTMCell(\n self._mparams.num_lstm_units,\n use_peepholes=False,\n cell_clip=self._mparams.lstm_state_clip_value,\n state_is_tuple=True,\n initializer=orthogonal_initializer)\n lstm_outputs, _ = self.unroll_cell(\n decoder_inputs=decoder_inputs,\n initial_state=lstm_cell.zero_state(self._batch_size, tf.float32),\n loop_function=self.get_input,\n cell=lstm_cell)\n\n with tf.variable_scope('logits'):\n logits_list = [\n tf.expand_dims(self.char_logit(logit, i), dim=1)\n for i, logit in enumerate(lstm_outputs)\n ]\n\n return tf.concat(logits_list, 1)", "def run_LSTM(data):\n # Initialising the RNN\n model = Sequential()\n \"\"\"\n # Adding the first LSTM layer and some Dropout regularisation\n model.add(LSTM(units=256,return_sequences=False, input_shape=(data.x_train.shape[1], data.x_train.shape[2])))\n model.add(Dropout(0.3))\n model.add(Dense(units=1))\n \"\"\"\n\n model = Sequential()\n model.add(LSTM(units=180, return_sequences=False, input_shape=(data.x_train.shape[1], data.x_train.shape[2])))\n #model.add(Dropout(params['dropout']))\n #model.add(LSTM(units=128))\n #model.add(Dropout(params['dropout']))\n #model.add(Dense(units=64))\n model.add(Dense(units=1,activation='relu',kernel_initializer=tf.keras.initializers.Orthogonal()))\n # Compiling the RNN\n opt = Adam(lr=0.0052)\n model.compile(optimizer=opt, loss='mean_squared_error',metrics=['mean_absolute_percentage_error'])\n\n # Fitting the RNN to the Training set\n regressor = model.fit(data.x_train, data.y_train.ravel(), epochs=180,batch_size=410,shuffle=True,validation_data=(data.x_valid,data.y_valid.ravel()))\n\n #Create plots\n plt.plot(regressor.history['loss'], label='loss')\n plt.plot(regressor.history['val_loss'], label='val_loss')\n plt.legend()\n plt.show()\n\n\n plt.figure()\n outputs = model.predict(data.x_test)\n print(outputs.shape)\n predictions = outputs[:,0]\n\n print(predictions.shape)\n\n pred_prices = predictions.reshape(-1,1)\n real_prices = data.y_test.reshape(-1,1)\n mape = 0\n\n pred_prices = data.inv.inverse_transform(pred_prices)\n real_prices = data.inv.inverse_transform(real_prices)\n\n #if Returns == False:\n \n #mape = mean_absolute_percentage_error(real_prices, pred_prices)\n\n #pred_prices = [x * (data.mm[1] - data.mm[0]) + data.mm[0] for x in predictions.reshape(-1)]\n #real_prices = [x * (data.mm[1] - data.mm[0]) + data.mm[0] for x in data.y_test.reshape(-1)]\n\n #pred_prices = data.train_sc.inverse_transform(predictions.reshape(-1))\n #real_prices = data.test_sc.inverse_transform(data.y_test.reshape(-1))\n\n #mape = mean_absolute_percentage_error(data.y_test.ravel(), pred_prices.)\n y_true, y_pred = np.array(real_prices).reshape(-1,1), np.array(pred_prices).reshape(-1,1)\n #y_true, y_pred = y_true[:50], y_pred[:50]\n\n mape = mean_absolute_percentage_error(y_true, y_pred)\n pct = PCT(y_true,y_pred)\n mse = mean_squared_error(y_true,y_pred)\n rmse = sqrt(mse)\n amape = AMAPE(y_true,y_pred)\n mae = MAE(y_true,y_pred)\n\n plt.plot(real_prices, label='targets')\n plt.plot(pred_prices, label='predictions')\n plt.legend()\n plt.title('LSTM test data')\n plt.show()\n\n plt.figure()\n outputs = model.predict(data.x_train)\n print(outputs.shape)\n predictions = outputs[:,0]\n\n plt.plot(data.y_train.ravel(), label='targets')\n plt.plot(predictions, label='predictions')\n plt.legend()\n plt.title('LSTM train data')\n plt.show()\n print(y_pred)\n\n print('RMSE= {:.6f}, MAPE = {:.6f}, PCT = {:.6f}, MSE = {:.6f}, MAE = {:.6f}, AMAPE = {:.6f}'.format(rmse, mape, pct, mse, mae, amape))" ]
[ "0.6892743", "0.6710073", "0.6645775", "0.6560571", "0.65253127", "0.6467472", "0.6402133", "0.63405454", "0.6327336", "0.6319975", "0.62931645", "0.62660724", "0.6201569", "0.61974037", "0.61889523", "0.6188214", "0.6185717", "0.61776847", "0.61347216", "0.61347216", "0.61342585", "0.6120478", "0.610608", "0.6079514", "0.60789424", "0.60562885", "0.60407937", "0.6007396", "0.6007396", "0.5986837", "0.59853923", "0.5984463", "0.59828645", "0.5982049", "0.5973913", "0.59557307", "0.59554553", "0.5946882", "0.5943511", "0.5914852", "0.5910636", "0.59094375", "0.5908141", "0.5906957", "0.58992594", "0.58959985", "0.58535314", "0.5850395", "0.58475465", "0.581226", "0.58045775", "0.5786732", "0.57788795", "0.577182", "0.577038", "0.5763523", "0.5749947", "0.5746776", "0.57337594", "0.5727108", "0.57141185", "0.56992304", "0.56948125", "0.5693537", "0.5687394", "0.567982", "0.567324", "0.56701344", "0.5660205", "0.5654713", "0.5652018", "0.56478685", "0.5647462", "0.5643771", "0.56406355", "0.56390435", "0.5633544", "0.5630392", "0.5623084", "0.5611732", "0.56076545", "0.5605041", "0.56040186", "0.5603595", "0.5603507", "0.560264", "0.55973506", "0.5587781", "0.55870885", "0.5586603", "0.5586413", "0.55851024", "0.55759805", "0.55709565", "0.5570894", "0.55694216", "0.5567427", "0.55486876", "0.55402774", "0.5539253", "0.55347145" ]
0.0
-1
neural network model for mapping
def neural_network(z, dim_out): hidden_dim = 15 net1 = slim.fully_connected(z, hidden_dim, activation_fn=None) net2 = slim.fully_connected(net1, dim_out, activation_fn=tf.tanh) return net2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainNet():", "def predict_from(self, inputs, to_layers):", "def neural_network(xtrain, ytrain, xtest, ytest,labels_mapping, scaled = False):\n if not scaled :\n scaler = StandardScaler()\n xtrain = scaler.fit_transform(xtrain)\n xtest = scaler.transform(xtest)\n\n nn = MLPClassifier() #hidden_layer_sizes=30, alpha=0.0001, early_stopping=True\n nn = __train_and_test(nn, xtrain, ytrain, xtest, ytest,labels_mapping)\n return nn", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def build_mlp(input_data, output_data, n_neurons=[512, 256, 128]):\n input_layer = keras.layers.Input([input_data.shape[-1]], name='input-layer')\n for i, n_unit in enumerate(n_neurons):\n if i == 0:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(input_layer)\n else:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(x)\n \n output_layer = keras.layers.Dense(units=output_data.shape[-1],activation='softmax' , name='output-layer')(x)\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n return model", "def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2,num_hidden_units_3, num_code_units, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_in,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_hidden_units_2,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=num_hidden_units_3,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_3,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.softmax,\n )\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units_3,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_4 = lasagne.layers.DenseLayer(\n l_hidden_3,\n num_units=num_hidden_units_2,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_5 = lasagne.layers.DenseLayer(\n l_hidden_4,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_out = lasagne.layers.DenseLayer(\n l_hidden_5,\n num_units=output_dim,\n nonlinearity=None,\n )\n\n return l_out", "def Network_model(input_data):\n layer1_param={'weights':tf.Variable(tf.random_normal([784, no_neurons_layer1])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer1]))}\n \n layer2_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer1, no_neurons_layer2])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer2]))}\n \n layer3_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer2, no_neurons_layer3])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer3]))}\n \n layer4_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer3, no_neurons_layer4])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer4]))}\n \n output_layer_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer4, no_classes])), \n 'biases': tf.Variable(tf.random_normal([no_classes]))}\n \n #so uptill now the weights for each layer is initialized\n \n \"\"\"\n Now what will happened in each layer, I will define next. basically the weights are multiplied\n in each layer with the corresponding inputs and then it is passed through activation function \n (relu in this case) and the output is given as input to the other layer.\n sign:B-Jan\n \"\"\"\n \n l1_output= tf.add(tf.matmul(input_data,layer1_param['weights']), layer1_param['biases'])\n l1_output=tf.nn.relu(l1_output)\n \n l2_output= tf.add(tf.matmul(l1_output,layer2_param['weights']), layer2_param['biases'])\n l2_output=tf.nn.relu(l2_output)\n \n \n l3_output= tf.add(tf.matmul(l2_output,layer3_param['weights']), layer3_param['biases'])\n l3_output=tf.nn.relu(l3_output)\n \n l4_output= tf.add(tf.matmul(l3_output,layer4_param['weights']), layer4_param['biases'])\n l4_output=tf.nn.relu(l4_output)\n \n #The final output Layer\n output= tf.matmul(l4_output, output_layer_param['weights'])+output_layer_param['biases']\n \n return output # contains the output of the last output layer", "def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2, num_code_units, filter_size, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n print(\"Input shape: \",lasagne.layers.get_output_shape(l_in))\n\n # print(shaped_units)\n # shaped_units = shaped_units[0]\n shaped_units = 2800\n\n # print(shape)\n\n l_conv2D_1 = lasagne.layers.Conv2DLayer(\n l_in, \n num_filters=8,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n print(\"Conv 2D shape: \",lasagne.layers.get_output_shape(l_conv2D_1))\n\n l_reshape_1 = lasagne.layers.ReshapeLayer(\n l_conv2D_1,\n shape=(([0], -1))\n )\n\n print(\"Reshape 1 shape: \", lasagne.layers.get_output_shape(l_reshape_1))\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_reshape_1,\n num_units= num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 1 shape: \", lasagne.layers.get_output_shape(l_hidden_1))\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Code layer shape: \",lasagne.layers.get_output_shape(l_code_layer))\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 2 shape: \",lasagne.layers.get_output_shape(l_hidden_2))\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=shaped_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 3 shape: \",lasagne.layers.get_output_shape(l_hidden_3))\n\n l_reshape_2 = lasagne.layers.ReshapeLayer(\n l_hidden_3,\n shape=(([0],8,7,50))\n )\n\n print(\"Reshape 2 shape: \",lasagne.layers.get_output_shape(l_reshape_2))\n\n l_out = lasagne.layers.Conv2DLayer(\n l_reshape_2, \n num_filters=1,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n # print(\"Deconv shape: \",lasagne.layers.get_output_shape(l_deconv2D_1))\n\n print(\"Output shape: \",lasagne.layers.get_output_shape(l_out))\n\n return l_out", "def represent():\n\tmodel.eval()\n\twith torch.no_grad():\n\n\t\tall_data = []\n\t\tall_targets = []\n\n\t\tfor batch_idx, (data, labels) in enumerate(nat_test_loader):\n\t\t\tall_data.append(data)\n\t\t\tall_targets.append(labels.float()+50) # +50 for nat data, for distinction between nat and syn\n\t\tfor batch_idx, (data, labels) in enumerate(syn_test_loader):\n\t\t\tall_data.append(data)\n\t\t\tall_targets.append(labels.float())\n\n\t\tall_data = torch.cat(all_data, 0) # Merges the list of tensors\n\t\tall_data = all_data.cuda()\n\t\tall_targets = torch.cat(all_targets, 0)\n\n\t\trepresentation = model.representation(all_data)\n\t\t\n\t\ttorch.save(representation, directory + \"/representations/repr\" + str(epoch) + \".pt\")\n\t\twith open(directory + \"/representations/tar\" + str(epoch) + \".log\", \"w\") as f:\n\t\t\tfor t in all_targets:\n\t\t\t\tf.write(str(t.item()) + \"\\n\")\n\n\t\t# Optional: Plotting of the UMAP in each represent()\n\t\t#sns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t#reducer = umap.UMAP()\n\t\t#embedding = reducer.fit_transform(representation.cpu())\n\t\t# flatui = [\"#ff0000\", \"#000000\", \"#001800\", \"#003000\", \"#004800\", \"#006000\", \"#007800\", \"#009000\", \"#00a800\", \"#00c000\", \"#00d800\"]\n\t\t# plt.scatter(embedding[:, 0], embedding[:, 1], c=[sns.color_palette(flatui)[x] for x in all_targets.int()])\n\t\t#plt.scatter(embedding[:, 0], embedding[:, 1], c=all_targets.cpu())\n\t\t#plt.gca().set_aspect('equal', 'datalim')\n\t\t#plt.title('UMAP projection of cell data', fontsize=24);\n\t\t#plt.savefig(directory + \"/umap_\" + str(epoch) + \".png\")\n\t\t#plt.clf()", "def init_three_layer_neuralnet(weight_scale=1, bias_scale=0, input_feat_dim=786,\n num_classes=10, num_neurons=(20, 30)):\n \n assert len(num_neurons) == 2, 'You must provide number of neurons for two layers...'\n\n model = {}\n #model['W1'] = np.random.randn((num_neurons[0],(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)) # Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n \n model['W1'] = (np.random.rand(input_feat_dim,num_neurons[0])*weight_scale) * math.sqrt(2.0/input_feat_dim)\n model['b1'] = np.zeros(num_neurons[0])# Initialize with zeros\n \n #model['W2'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n #print ((model['W1'])[0,:]).shape\n #numcols = len(input[0])\n t=len((model['W1'])[0])\n #print t\n model['W2'] = (np.random.rand(num_neurons[0],num_neurons[1])*weight_scale) * math.sqrt(2.0/t)\n model['b2'] = np.zeros(num_neurons[1])# Initialize with zeros\n\n t=len((model['W2'])[0])\n #model['W3'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n model['W3'] = (np.random.rand(num_neurons[1],num_classes)*weight_scale) * math.sqrt(2.0/t)\n model['b3'] = np.zeros(num_classes)# Initialize with zeros\n\n return model", "def __init__(self):\n super().__init__()\n No_of_Inputs = 2\n No_of_Outputs = 2\n No_of_Hidden_nodes = 12\n \"\"\" Defining the min and max values of the data used in training for normalizing the input values sent by game and denormalize the predicted output values from neural network \"\"\" \n self.max_X_Distance = 636.2896274868181\n self.min_X_Distance = -596.6699167751983\n self.max_Y_Distance = 743.1598623474754\n self.min_Y_Distance = 65.6244797954829\n self.max_X_Velocity = 7.994655683732954\n self.min_X_Velocity = -6.139791884122744\n self.max_Y_Velocity = 6.116163006403942\n self.min_Y_Velocity = -5.779221733928611\n self.Neural_Net = Neuron_Network(No_of_Inputs, No_of_Hidden_nodes, No_of_Outputs)\n self.Neural_Net.Hidden_Layer_Weights = [[-2.4968545286197834, -1.8753602229555426, -0.212544244291629], [-1.7630022249709958, -3.6728753504716702, 0.9029212995412115], [-9.92308792895824, 18.605900320220044, 0.6546005968930644], [-2.4482999114771995, -1.517816946765758, -0.9193463164391101], [-2.3427861053090684, -2.4881000020941877, 0.4629152770160724], [-2.1591465483332413, 1.0195709398508257, -3.550975138336682], [-4.121604475036676, 1.2541841992381966, 0.20872225266025077], [-2.794714837157948, -0.6250218903568433, -0.9508382423169754], [-2.171501881731379, -2.860403977932674, 0.45023268515928966], [-7.574606539172206, 5.796893890015888, 0.8325562788065618], [-2.3949093030515787, -1.6691739704587119, -0.8994153916849774], [-2.5057827237537236, -1.833523946060227, -0.15265344756899354]]\n self.Neural_Net.Output_Layer_Weights = [[0.5339576155454724, -7.163855899626589, 4.441573522337238, -0.8487519667092871, 0.194328665944557, -6.253588662045125, 10.355395474689958, -0.5546973711452573, 1.3109277184619805, -2.8628613991153036, -3.4019242278486903, 0.920569758736398, -9.436494568306678], [-1.2778954480096152, 0.7155347068753504, 1.642050336134636, 1.847449069077208, 0.6888835859247565, 1.1005203424912922, 1.8925919549669181, -0.6795836727331039, 0.41572054666867386, 1.2533245105144883, -3.297414893260861, 0.7326422000597372, 0.6620055115639853]]", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def model(image_height,image_width,path):\n\n\tdef load_file(path='vgg19.mat'):\n\t\t\"\"\"\n\t\tLoads Weights File & returns Object of Numpy array\n\t\t\"\"\"\n\t\tfile=loadmat(path)\n\t\tfile=file['layers']\n\t\tprint(\"Success load_file\")\n\t\treturn file\n\n\tdef ret_layer_index(file):\n\t\t\"\"\"\n\t\tTakes file as input & returns a dictionary having name of layers with their code\n\t\t\"\"\"\n\t\tnames={}\n\t\tfor i in range(len(file[0])):\n\t\t\tprint(file[0][i][0][0][0])\n\t\t\tnames[file[0][i][0][0][0][0]]=i\n\t\tprint(\"Success layer_index\")\n\t\treturn names\n \n\tdef weight(layer_name):\n\t\t\"\"\" Asks for Layer Name & returns its weights & bias\n\t\t\"\"\"\n\t\tlayer_no=names[layer_name]\n\t\twb =file[0][layer_no][0][0][2]\n\t\tw=wb[0][0]\n\t\tb=wb[0][1]\n\t\tname=file[0][layer_no][0][0][0]\n\t\tassert name==layer_name\n\t\tprint(\"Success weight\")\n\t\treturn w,b\n\n\tdef conv_relu(prev_layer,layer_no,layer_name):\n\t\tW,b=weight(layer_name)\n\t\tW=tf.constant(W)\n\t\tb=tf.constant(np.reshape(b, (b.size)))\n\t\tl=tf.nn.conv2d(prev_layer,filter=W,strides=[1,1,1,1],padding='SAME') +b\n\t\tprint(\"Success convrelu\")\n\t\treturn tf.nn.relu(l)\n\n\tdef avg_pool(prev_layer):\n\t\treturn tf.nn.avg_pool(prev_layer,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')\n\n\tdef load_graph():\n\t\tgraph={}\n\t\tgraph['input'] = tf.Variable(np.zeros((1, image_height, image_width,3)), dtype = 'float32')\n\t\tgraph['conv1_1'] = conv_relu(graph['input'], 0, 'conv1_1')\n\t\tgraph['conv1_2'] = conv_relu(graph['conv1_1'], 2, 'conv1_2')\n\t\tgraph['avgpool1'] = avg_pool(graph['conv1_2'])\n\t\tgraph['conv2_1'] = conv_relu(graph['avgpool1'], 5, 'conv2_1')\n\t\tgraph['conv2_2'] = conv_relu(graph['conv2_1'], 7, 'conv2_2')\n\t\tgraph['avgpool2'] = avg_pool(graph['conv2_2'])\n\t\tgraph['conv3_1'] = conv_relu(graph['avgpool2'], 10, 'conv3_1')\n\t\tgraph['conv3_2'] = conv_relu(graph['conv3_1'], 12, 'conv3_2')\n\t\tgraph['conv3_3'] = conv_relu(graph['conv3_2'], 14, 'conv3_3')\n\t\tgraph['conv3_4'] = conv_relu(graph['conv3_3'], 16, 'conv3_4')\n\t\tgraph['avgpool3'] = avg_pool(graph['conv3_4'])\n\t\tgraph['conv4_1'] = conv_relu(graph['avgpool3'], 19, 'conv4_1')\n\t\tgraph['conv4_2'] = conv_relu(graph['conv4_1'], 21, 'conv4_2')\n\t\tgraph['conv4_3'] = conv_relu(graph['conv4_2'], 23, 'conv4_3')\n\t\tgraph['conv4_4'] = conv_relu(graph['conv4_3'], 25, 'conv4_4')\n\t\tgraph['avgpool4'] = avg_pool(graph['conv4_4'])\n\t\tgraph['conv5_1'] = conv_relu(graph['avgpool4'], 28, 'conv5_1')\n\t\tgraph['conv5_2'] = conv_relu(graph['conv5_1'], 30, 'conv5_2')\n\t\tgraph['conv5_3'] = conv_relu(graph['conv5_2'], 32, 'conv5_3')\n\t\tgraph['conv5_4'] = conv_relu(graph['conv5_3'], 34, 'conv5_4')\n\t\tgraph['avgpool5'] = avg_pool(graph['conv5_4'])\n\t\treturn graph\n\n\tfile=load_file(path)\n\tnames=ret_layer_index(file)\n\treturn load_graph()", "def three_layer_neuralnetwork(X, model, y=None, reg=0.0,verbose=0):\n \n # Unpack weights\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'],model['W3'],model['b3']\n N,D= X.shape\n\n assert W1.shape[0] == D, ' W1 2nd dimenions must match number of features'\n \n dW1,dW2,dW3,db1,db2,db3=np.zeros_like(W1),np.zeros_like(W2),np.zeros_like(W3),np.zeros_like(b1),np.zeros_like(b2),np.zeros_like(b3)\n # Compute the forward pass\n \n '''\n AffineLayer = X.dot(W1)+b1 \n ReluLayer,_ = relu_forward(AffineLayer)\n AffineLayer2 = ReluLayer.dot(W2) + b2\n ReluLayer2,_ = relu_forward(AffineLayer2)\n AffineLayer3 = ReluLayer2.dot(W3) + b3\n scores = AffineLayer3\n \n print X.shape\n print W1.shape\n print b1.shape\n print W2.shape\n print b2.shape\n print W3.shape\n print b3.shape\n '''\n affine_out1,cache1 = affine_forward(X, W1, b1)\n relu_out1,cache_relu1 = relu_forward(affine_out1)\n \n affine_out2,cache2 = affine_forward(relu_out1, W2, b2)\n relu_out2,cache_relu2 = relu_forward(affine_out2)\n \n affine_out3,cache3 = affine_forward(relu_out2, W3, b3)\n scores = affine_out3\n\n #if verbose:\n #print ['Layer {} Variance = {}'.format(i+1, np.var(l[:])) for i,l in enumerate([a1, a2, cache3[0]])][:]\n if y is None:\n return scores\n data_loss,d_softmax = softmax_loss(scores,y)\n data_loss += reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3))\n '''\n max_scores = np.max(scores)\n scores -= max_scores\n correct_class_scores = scores[y,np.arange(N)]\n exp_score = np.exp(scores)\n sumexp = np.sum(exp_score,axis=0)\n loss_i = -correct_class_scores + np.log(sumexp)\n loss = np.sum(loss_i) / N \n ''' \t\n # Compute the backward pass\n \n d_affine_out3, dW3, db3 = affine_backward(d_softmax, cache3) \n d_relu2 = relu_backward(d_affine_out3, cache_relu2)\n \n d_affine_out2, dW2, db2 = affine_backward(d_relu2, cache2) \n d_relu1 = relu_backward(d_affine_out2, cache_relu1)\n \n d_affine_out1, dW1, db1 = affine_backward(d_relu1, cache1) \n \n #\n reg_loss = 0\n\n loss = data_loss + reg_loss\n grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,'W3':dW3,'b3':db3}\n \n return loss, grads", "def __init__(self): #initializing\n super(Model, self).__init__()\n self.linear = torch.nn.Linear(3, 1) # one input/feature , one output\n # here where other NN layers are added", "def model(self, img, label, bias, filters):\n prediction, z, flat, layers = self.predict(bias, img, filters)\n\n loss = self.categorical_crossentropy(prediction, label)\n\n # backpropagation\n dout = prediction - np.asarray(label).reshape((15, 1))\n dflat, dw8, db8, dw7, db7 = self.dense_layer_backprop(dout, flat, filters[6:8], bias[6:8], z)\n\n dconv6 = dflat.reshape(layers[-1].shape)\n dconv6[layers[-1] <= 0] = 0\n dconv5, df6, db6 = self.conv_layer_backprop(dconv6, layers[-2], filters[5])\n dconv5[layers[-2] <= 0] = 0\n dpool2, df5, db5 = self.conv_layer_backprop(dconv5, layers[-3], filters[4])\n dconv4 = self.pooling_layer_backprop(dpool2, layers[-4])\n dconv4[layers[-4] <= 0] = 0\n dconv3, df4, db4 = self.conv_layer_backprop(dconv4, layers[-5], filters[3])\n dconv3[layers[-5] <= 0] = 0\n dpool1, df3, db3 = self.conv_layer_backprop(dconv3, layers[-6], filters[2])\n dconv2 = self.pooling_layer_backprop(dpool1, layers[-7])\n dconv2[layers[-7] <= 0] = 0\n dconv1, df2, db2 = self.conv_layer_backprop(dconv2, layers[-8], filters[1])\n dconv1[layers[-8] <= 0] = 0\n dimg, df1, db1 = self.conv_layer_backprop(dconv1, img[0], filters[0])\n\n weight_gradients = [df1, df2, df3, df4, df5, df6, dw7, dw8]\n bias_gradients = [db1, db2, db3, db4, db5, db6, db7, db8]\n\n return weight_gradients, bias_gradients, loss", "def network(self):\n inp = Input((self.env_dim))\n # #\n # x = Dense(256, activation='relu')(inp)\n # x = GaussianNoise(1.0)(x)\n # #\n # x = Flatten()(x)\n # x = Dense(128, activation='relu')(x)\n # x = GaussianNoise(1.0)(x)\n # #\n # out = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n # out = Lambda(lambda i: i * self.act_range)(out)\n # #\n\n x = conv_block(inp, 32, (2, 2), 8)\n x = conv_block(x, 64, (2, 2), 4)\n x = conv_block(x, 64, (2, 2), 3)\n x = Flatten()(x)\n x = Dense(256, activation='relu')(x)\n\n x = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(x)\n\n return Model(inp, out)", "def model(self, inputs):\n h1 = dense(inputs, self.weights[0], self.biases[0], tf.nn.relu) #hidden layer 1\n h2 = dense(h1, self.weights[1], self.biases[1], tf.nn.relu) #hidden layer 2\n\n out = dense(h2, self.weights[2], self.biases[2])\n\n return out", "def nn(data):\n training_set = SupervisedDataSet*\n\n\n input_nodes = 3\n hidden_layer_1 = 10\n hidden_layer_2 = 10\n output_layer = 5\n\n net = buildNetwork(input_nodes, hidden_layer_1, hidden_layer_2, output_layer, bias=True, hiddenclass=TanhLayer)", "def model_train(x_train, y_train):\n\n global dic\n K = y_train.shape[1] ###10类\n model=[]##保存模型\n for k in range(K):\n data=x_train[y_train[:,k]==1]\n D,N=data.shape##60000,784\n print (D,N)\n\n pai=np.ones(K)/K\n Q=30\n bias=np.exp(-700)\n ##hidden variable Q*1\n # Z=np.array(np.random.normal(loc=0,scale=0.1,size=Q).reshape([Q,1]))##对于隐变量\n ##mean N*1\n miu=np.array([np.mean(data,axis=0)]*K).reshape(K,N,1)\n ##Factor Loading W N*Q\n scale = np.power(np.linalg.det(np.cov(data)), (1 / N))\n W = np.array(np.random.randn(K,N,Q))*np.sqrt(scale/Q)\n W_and_miu_new=np.array(np.zeros(shape=[K,N,Q+1]))\n # for k in range(K):\n # W_and_miu_new[k] = np.column_stack((W[k], miu[k]))\n ##variance D\n psi=np.diag(np.cov(data,rowvar=False))+bias\n print ('dasas',psi.shape)#####维度为(100,)\n ##Beta K##\n beta=np.zeros(shape=[K,Q,N])\n smooth = 0.1 * np.eye(100, M=None, k=0)\n # print (beta)\n const=(2*np.pi)**(-D/2)\n\n # print (scale)\n newloglikelyhood=0\n oldloglikelyhood=1001\n Ez_w_x=np.zeros(shape=[D,K,Q,1])#####60000*10*Q\n Ezz_w_x=np.zeros(shape=[D,K,Q,Q])####Q*10*Q\n Ez_w_x_2 = np.zeros(shape=[D, K, Q+1, 1])\n Ezz_w_x_2 = np.zeros(shape=[D, K, Q+1, Q+1])\n rnk = np.array([np.zeros(K) for i in range(D)])###初始rnk表 60000*10\n # print (rnk.shape)\n # while np.abs(oldloglikelyhood-newloglikelyhood)>0.0001: ###10类\n # while np.abs(oldloglikelyhood-newloglikelyhood)>500:\n for ite in range(10):\n # oldloglikelyhood=newloglikelyhood\n print ('迭代')\n\n ##-----------EEEE-step----------------##\n ##get responsibility of all data##\n for i in range(D):\n for k in range(K):\n # print (np.matmul(W[k],W[k].T).shape,psi.shape)\n cov=np.matmul(W[k],W[k].T)+np.diag(psi)\n\n # print (data[i].reshape(data[i].shape[0],1),miu[k].shape)\n mean=data[i].reshape(data[i].shape[0],1)-miu[k]\n # print(mean.shape)\n Gaussian=stats.norm.pdf(data[i],mean.reshape(-1),cov)\n # print(data[i])\n # print('得出的高斯函数值',Gaussian.pdf(data[i]))\n rnk[i][k]=pai[k]*np.mean(Gaussian)\n ##------------------------------------------##\n ##计算Ez和Ezz\n tem = psi + np.matmul(W[k], W[k].T)\n if np.linalg.det(tem) == 0:\n beta[k] = np.matmul(W[k].T, np.linalg.pinv(tem))\n # tem[0][0] = tem[0][0] + bias * 0.01\n else:\n tem = tem\n # print (np.matmul(W[k].T, np.linalg.inv(tem)))\n beta[k] = np.matmul(W[k].T, np.linalg.inv(tem))\n diff = data[i].reshape(data[i].shape[0],1) - miu[k]\n # diff = diff.reshape(diff.shape[0], 1)\n ##calculate E[z|w_k,x_i]\n Ez_w_x[i][k] = np.matmul(beta[k], (diff))\n data_i = data[i]\n # print ('qqqq', data_i.shape)\n data_i = data_i.reshape(data_i.shape[0], 1)\n line_one = np.ones(shape=(1, 1))\n ####Ez-------------------#####\n Ez_w_x_2[i][k] = np.vstack((Ez_w_x[i][k], line_one))\n Ezz_w_x[i][k] = (np.identity(Q) - np.matmul(beta[k], W[k]) + np.matmul(np.matmul(np.matmul(beta[k], diff), diff.T),beta[k].T))\n # print ('E2', Ezz_w_x.shape)\n ####------------Ezz--------------###\n Ezz_w_x_2[i][k] = np.column_stack((np.row_stack((Ezz_w_x[i][k], Ez_w_x[i][k].T)), Ez_w_x_2[i][k]))\n # print('得出',)\n #####------------单独计算W an miu\n W_and_miu_new[k]=np.column_stack((W[k],miu[k]))\n ##计算Q(log_likelihood)--------------------\n # print (rnk)\n sum = 0\n for i in range(D):\n for k in range(K):\n # print (W_and_miu_new[k].T, np.linalg.pinv(np.diag(psi)))\n xx = np.matmul(np.matmul(np.matmul(W_and_miu_new[k].T, np.linalg.pinv(np.diag(psi))),W_and_miu_new[k]), Ezz_w_x_2[i][k])\n p4 = 0.5 * rnk[i][k] * np.trace(xx)\n p2 = 0.5 * rnk[i][k] * np.matmul(np.matmul(data[i].T, np.linalg.pinv(np.diag(psi))),data[i])\n # print ('PPPP2',p2)\n p3 = 1 * rnk[i][k] * np.matmul(\n np.matmul(np.matmul(data[i].T, np.linalg.pinv(np.diag(psi))), W_and_miu_new[k]),Ez_w_x_2[i][k])\n p3 = p3\n sum = p2 - p3 + p4 + sum\n # print (psi)\n # print (np.log(abs(np.linalg.det(np.diag(psi)))))\n p1 = (D / 2) * np.log(abs(np.linalg.det(np.diag(psi))))\n # (2 * np.pi) ** (-D / 2)\n newloglikelyhood = const-p1 - sum\n print('NEWLOG', newloglikelyhood)\n ##现在在一次迭代中我们已经得到###\n ####----Q,Ezz_2,Ez_2,W_and_miu,rnk,psi的矩阵------###\n ##--------M-step----------------########\n for k in range(K):\n ##更新factor loading W and mean miu\n ##跟新pai 对i求和\n W_k_p1_sum = np.zeros(shape=[N,Q+1])\n Mu_k_p1_sum = np.zeros(shape=[Q +1,Q+1])\n pai_new_sum=0\n\n for i in range(D):\n W_k_p1_sum=rnk[i][k]*np.matmul(data[i].reshape(data[i].shape[0],1),Ez_w_x_2[i][k].T)+W_k_p1_sum\n Mu_k_p1_sum=rnk[i][k]*Ezz_w_x_2[i][k]+Mu_k_p1_sum\n ###pai的加和\n # print ('RNK',rnk[i][k])\n pai_new_sum=rnk[i][k]+pai_new_sum\n pai[k]=pai_new_sum/N #####更新PAI\n # print ('PPPAAAAAIII',pai)\n W_and_miu_new[k]=np.matmul(W_k_p1_sum,np.linalg.pinv(Mu_k_p1_sum))\n # print ('一个NEW',W_and_miu_new.shape)\n W[k,:,:]=W_and_miu_new[k,:,:W_and_miu_new[k].shape[1]-1]\n # print ('XIN WWW',W.shape)####更新WWWWW\n miu[k,:]=W_and_miu_new[k,:,-1].T.reshape(100,1) ####更新MIU!!\n ##更新协方差矩阵\n psi_new_p0=np.zeros(shape=[N,N])\n ##对i求和\n for i in range(D):\n ##对 k求和,\n data_i=data[i].reshape(data[i].shape[0],1)\n psi_new_p1=np.zeros(shape=[N,N])\n # print (psi_new_p1.shape)\n for k in range(K):\n pp1=np.matmul(W_and_miu_new[k],Ez_w_x_2[i][k])\n # print ('P111',p1.shape)\n psi_new_p1=rnk[i][k]*np.matmul((data_i-pp1),data_i.T)+psi_new_p1\n # print ('qqqqqqqqqq',psi_new_p1.shape)\n psi_new_p0=psi_new_p1+psi_new_p0\n # print (psi_new_p1.shape)\n ##最后的取对角线得新的协方差矩阵\n # print ('%%%%%%%',psi_new_p0.shape)\n #####见论文\n psi=np.diag(psi_new_p0)/D# 更新方差\n print ('PSI',psi.shape)\n # print ('PPPSSSII',Psi_New,np.trace(psi_new_p0))\n # rnk_=rnk/sumres\n # r.append(np.sum(rnk))##????????????\n # print('每一行数据的和', r)\n # # print('dasdas',len(r))\n # R.append(r)\n # print(np.array(R)[49])\n\n print('save_model')\n dic={'miu':miu,'pai':pai,'W':W,'psi':psi}\n # print ()\n # const=-N/2*log(np.linalg.det(psi))\n # part2=0\n # # part3=\n # for i in range(N):\n # for j in range(K):\n # part2=0.5*rnk*data[i].T*np.linalg.inv(psi)*data[i]+part2\n\n submodel = dic\n model.append(submodel)\n model=model\n # You can modify this to save other variables, etc \n # but make sure the name of the file is 'model.npz.\n np.savez_compressed('model.npz', model=model)", "def create_simple_model():\n\n input_shape = (160, 320, 3)\n \n m = Sequential()\n\n # 1. Add Normalization\n m.add(Lambda(lambda x: x/255.0 - 0.5,\n input_shape=input_shape,\n ))\n\n # 2. Flatten + 1 fully connected layer\n m.add(Flatten())\n m.add(Dense(10, activation='relu', init=my_init))\n \n # 3. Output Layer is a Dense layer with no activation function\n m.add(Dense(1))\n \n return m", "def mlp_model2():\n model = Sequential()\n model.add(Dense(256, activation='relu', input_shape=(X_train_scaled.shape[1], )))\n model.add(Dropout(0.2))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(32, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(FLAGS.nb_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.summary()\n return model", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def model(inputs, is_training):\n\n\n if data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n\n #localize network to generate the transformation parameters\n # raw_inputs = inputs\n\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 32, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer=tf.variance_scaling_initializer())\n\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 64, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer = tf.variance_scaling_initializer())\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n # inputs = tf.layers.flatten(inputs = inputs)\n\n # inputs = tf.layers.dense(inputs = inputs, units = 128)\n # print(inputs.shape)\n # trans_parameters = tf.layers.dense(inputs = inputs, units = 6)\n # print(trans_parameters.shape)\n # inputs = stn(input_fmap = raw_inputs, theta = trans_parameters, out_dims = [60, 60])\n\n\n\n #embedding network\n inputs = conv2d_fixed_padding(inputs = inputs, filters = 64, kernel_size = 7, strides = 2, data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_conv')\n\n inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'SAME', data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n inputs = block_layer(inputs = inputs, filters = 64, block_fn = block_fn, blocks = layers[0], strides = 1, \n is_training = is_training, name = 'blcok_layer1', data_format = data_format)\n print('height:', inputs.shape[1])\n\n #attention module\n #input_fmap = inputs\n # inputs = tf.reshape(inputs, (-1, 64))\n #inputs = tf.layers.dense(inputs = inputs, units = 32, activation = tf.tanh)\n\n #inputs = tf.reshape(inputs, [-1, 32])\n #inputs = tf.layers.dense(inputs = inputs, units = 1, activation = tf.sigmoid)\n\n #attention_para = tf.reshape(inputs, [-1, 21, 21, 1])\n\n \n #inputs = tf.multiply(input_fmap, attention_para)\n\n inputs = block_layer(inputs = inputs, filters = 128, block_fn = block_fn, blocks = layers[1], strides = 2,\n is_training = is_training, name = 'block_layer2', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 256, block_fn = block_fn, blocks = layers[2], strides = 2, \n is_training = is_training, name = 'block_layer3', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 512, block_fn = block_fn, blocks = layers[3], strides = 2, \n is_training = is_training, name = 'block_layer4', data_format = data_format)\n\n print('height:', inputs.shape)\n inputs = batch_norm_relu(inputs, is_training, data_format)\n \n inputs = tf.layers.average_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'VALID', data_format = data_format)\n\n inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n inputs = tf.identity(inputs, 'final_avg_pool')\n\n inputs = tf.layers.flatten(inputs = inputs)\n\n #TODO\n inputs = tf.layers.dense(inputs = inputs, units = num_classes)\n\n print(inputs.shape)\n outputs = tf.identity(inputs, 'final_dense')\n\n return outputs", "def define(self, optimizer = Adam(lr=1e-5)): \n \n self.optimizer = optimizer\n\n model = Sequential()\n\n #Layer 1\n model.add(Conv2D( filters = 96, \n kernel_size = (11,11), \n strides = 4, \n padding = 'same', \n activation = 'relu', \n input_shape = (224, 224, 3), \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) # overlapping pooling\n #Layer 2\n model.add(Conv2D( filters = 256, \n kernel_size = (5,5), \n strides = 1, \n padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) \n #Layer 3\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', kernel_initializer = 'he_normal'))\n #Layer 4\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 5\n model.add(Conv2D( filters = 256, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 6\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None))\n \n #Layer 7\n model.add(Flatten())\n \n #Layer 8\n model.add(Dense( units = 4096, activation = 'relu'))\n model.add(Dense( units = 1024, activation = 'relu'))\n model.add(Dense( units = 512, activation = 'relu'))\n model.add(Dense( units = 256, activation = 'relu'))\n model.add(Dense( units = 128, activation = 'relu'))\n \n #Layer end\n model.add(Dense( units = 3, activation = 'softmax'))\n model.summary()\n \n self.model = model", "def sub_model_net(self):\r\n # define input\r\n x = keras.Input(shape=(960,), name='input')\r\n fc_2 = keras.layers.Dense(160, name='fc_2')(x)\r\n add_1 = keras.layers.Activation('relu')(fc_2)\r\n drop = keras.layers.Dropout(0.5)\r\n # output\r\n y_hat = keras.layers.Dense(1283, activation='softmax', name='output')(add_1)\r\n model = keras.Model(inputs=x, outputs=y_hat)\r\n\r\n return model", "def construct_model():\n # model = Sequential()\n # model.add(Dense(units=64, activation='relu', input_dim=100))\n # model.add(Dense(units=10, activation='softmax'))\n # model.compile(loss='categorical_crossentropy',\n # optimizer='sgd',\n # metrics=['accuracy'])\n # return model\n\n model = Sequential()\n # Input Layer\n model.add(Conv2D(64, 3, data_format='channels_last', activation='relu', padding='same',\n input_shape=(img_width, img_height, 3)))\n model.add(MaxPool2D(pool_size=2, strides=2))\n # Hidden Layer 1\n model.add(Conv2D(64, 3, activation='relu', padding='same'))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 2\n model.add(Conv2D(128, 3, activation='relu', padding='same'))\n model.add(Conv2D(128, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 3\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n\n # Fully Connected Layer\n model.add(Flatten())\n # 512 Neuron Layer\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n # Output Layer\n model.add(Dense(num_of_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "def __init__(self):\n #self.NN = Neural_Network()\n y_vals = pd.read_csv('training_data_y.csv')\n x_vals_original = pd.read_csv('training_data_x.csv')\n x_vals_original.columns = ['R1', 'G1', 'B1', 'W1', 'R2', 'G2', 'B2', 'W2', 'R3', 'G3', 'B3', 'W3']\n total_x_train = self.getNewDF_X(x_vals_original)\n total_y_train = self.getNewDF_Y(y_vals) \n #training data is numpy arrays here\n x_arr = np.asarray(total_x_train,dtype=np.float32)\n y_train = np.asarray(total_y_train,dtype=np.float32)\n #convert training data to tensors and scale it\n x_train = torch.tensor((x_arr), dtype=torch.float)\n self.x_train = self.scaleInputTestData(x_train)\n self.y_train = torch.tensor((y_train), dtype=torch.float) / 100", "def MLP_model(self):\n print(\"Building model..\")\n self.model = Sequential()\n\n # first hidden layer (0)\n self.model.add(Dense(self.h_nodes0, input_dim=self.input_size, use_bias=True))\n self.model.add(Activation(self.activation0))\n self.model.add(Dropout(self.dropout0))\n\n # second hidden layer (1)\n if self.h_nodes1 != None:\n self.model.add(Dense(self.h_nodes1, use_bias=True))\n self.model.add(Activation(self.activation1))\n self.model.add(Dropout(self.dropout1))\n\n # third hidden layer (2)\n if self.h_nodes2 != None:\n self.model.add(Dense(self.h_nodes2, use_bias=True))\n self.model.add(Activation(self.activation2))\n self.model.add(Dropout(self.dropout2))\n\n #output layer\n self.model.add(Dense(self.output_size))\n self.model.add(Activation(self.activation_out))\n\n #compile model\n self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[R_squared])\n\n return self.model", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def __init__(self,inputSize,outputSize, *args, **kwds):\n #currently the code is only for 2 hidden layers, apart from in and out\n self._saveFile = kwds.get('saveFile')\n self._inputSize = inputSize\n self._outputSize= outputSize\n self._layer1 = keras.layers.Dense(128,activation='relu')\n self._layer2 = keras.layers.Dense(64,activation='relu') \n self._layer3 = keras.layers.Dense(128,activation='relu')\n self._piLayer = keras.layers.Dense(self._outputSize-1,activation='softmax')\n self._zLayer = keras.layers.Dense(1,activation='tanh')\n self._inputs = keras.Input(shape=(self._inputSize,)) #returns placeholder\n x = self._layer1(self._inputs)\n x = self._layer2(x)\n x = self._layer3(x)\n self._outPi = self._piLayer(x)\n self._outZ = self._zLayer(x)\n self._output = keras.layers.concatenate([self._outPi,self._outZ],axis = -1)\n self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n# self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n self._model.compile(optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.99, beta_2=0.999, epsilon=1e-10, decay=0.0001),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n self._epochSize = 256", "def mlp_model(self):\n\n model = Sequential()\n model.add(Dense(self.dense1, input_shape=(784,)))\n model.add(Activation(self.activation))\n model.add(Dropout(self.drop1))\n\n model.add(Dense(self.dense2))\n model.add(Activation(self.activation))\n model.add(Dropout(self.drop2))\n\n model.add(Dense(10))\n model.add(Activation('softmax'))\n\n return model", "def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl", "def get_model():\r\n model = Sequential([\r\n\r\n Lambda(normalize, input_shape=(66, 200, 3)),\r\n\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(1, (3, 3), padding='same', activation='relu', strides=2),\r\n Flatten(),\r\n\r\n\r\n ])\r\n\r\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\r\n return model", "def genModel():\n inp = (160, 320, 3) # initial image size\n oup1 = (160, 320, 1) # gray image size\n oup2 = (80, 320, 1) # cropped image size\n\n model = Sequential()\n model.add(Lambda(color2gray, input_shape = inp, output_shape= oup1))\n # crop top 50 pixels, bottom 30 pixels, left/right 0 pixels\n model.add(Cropping2D(cropping=((50,30), (0,0))))\n # Preprocess incoming data, centered around zero with small standard deviation \n model.add(Lambda(lambda x: x/127.5 - 1., output_shape= oup2))\n model.add(Convolution2D(24,5,5,subsample=(1,2), activation=\"relu\"))\n model.add(Convolution2D(36,5,5,subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(48,5,5,subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(64,3,3, activation=\"relu\"))\n model.add(Convolution2D(64,3,3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dropout(0.3))\n model.add(Dense(180, activation=\"relu\"))\n model.add(Dense(60))\n model.add(Dense(10, activation=\"relu\"))\n model.add(Dense(1))\n # print layer size for each model layers\n for layer in model.layers:\n print(layer.get_output_at(0).get_shape().as_list())\n return model", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def BuildModel(ANNSetup,model):\n\n if(isinstance(ANNSetup.Activ,str)):\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), activation=ANNSetup.Activ, kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation=ANNSetup.Activ))\n else:\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit)))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n\n return model", "def MI_Net_with_DS(dataset):\n # load data and convert type\n train_bags = dataset['train']\n test_bags = dataset['test']\n\n # convert bag to batch\n train_set = convertToBatch(train_bags)\n test_set = convertToBatch(test_bags)\n dimension = train_set[0][0].shape[1]\n weight = [1.0, 1.0, 1.0, 0.0]\n\n # data: instance feature, n*d, n = number of training instance\n data_input = Input(shape=(dimension,), dtype='float32', name='input')\n\n # fully-connected\n fc1 = Dense(256, activation='relu', kernel_regularizer=l2(args.weight_decay))(data_input)\n fc2 = Dense(128, activation='relu', kernel_regularizer=l2(args.weight_decay))(fc1)\n fc3 = Dense(64, activation='relu', kernel_regularizer=l2(args.weight_decay))(fc2)\n\n # dropout\n dropout1 = Dropout(rate=0.5)(fc1)\n dropout2 = Dropout(rate=0.5)(fc2)\n dropout3 = Dropout(rate=0.5)(fc3)\n\n # features pooling\n fp1 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp1')(dropout1)\n fp2 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp2')(dropout2)\n fp3 = Feature_pooling(output_dim=1, kernel_regularizer=l2(args.weight_decay), pooling_mode=args.pooling_mode, name='fp3')(dropout3)\n\n # score average\n mg_ave =average([fp1,fp2,fp3], name='ave')\n\n model = Model(inputs=[data_input], outputs=[fp1, fp2, fp3, mg_ave])\n sgd = SGD(lr=args.init_lr, decay=1e-4, momentum=args.momentum, nesterov=True)\n model.compile(loss={'fp1':bag_loss, 'fp2':bag_loss, 'fp3':bag_loss, 'ave':bag_loss}, loss_weights={'fp1':weight[0], 'fp2':weight[1], 'fp3':weight[2], 'ave':weight[3]}, optimizer=sgd, metrics=[bag_accuracy])\n\n # train model\n t1 = time.time()\n num_batch = len(train_set)\n for epoch in range(args.max_epoch):\n train_loss, train_acc = train_eval(model, train_set)\n test_loss, test_acc = test_eval(model, test_set)\n print('epoch=', epoch, ' train_loss= {:.3f}'.format(train_loss), ' train_acc= {:.3f}'.format(train_acc), ' test_loss={:.3f}'.format(test_loss), ' test_acc= {:.3f}'.format(test_acc))\n t2 = time.time()\n print('run time:', (t2-t1) / 60, 'min')\n print('test_acc={:.3f}'.format(test_acc))\n\n return test_acc", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n mean = 0\n std_dev = 0.0001\n #print(in_features)\n #print(out_features)\n # create weight matrices\n weight = np.random.normal(mean, std_dev, (out_features, in_features))\n #print(weight.shape)\n grad_weight = np.zeros((in_features, out_features))\n\n # create biases (in batches)\n bias = np.zeros(out_features)\n grad_bias = np.zeros(out_features)\n\n self.params = {'weight': weight, 'bias': bias}\n self.grads = {'weight': bias, 'bias': grad_bias}\n\n ########################\n # END OF YOUR CODE #\n #######################", "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def make_neural_net_challenging():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n wt7 = random_weight()\n wt8 = random_weight()\n wt9 = random_weight()\n wt10 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n wAD = Weight('wAD', wt7)\n wBD = Weight('wBD', wt8)\n wD = Weight('wD', -1)\n wCE = Weight('wCE', wt9)\n wDE = Weight('wDE', wt10)\n wE = Weight('wE', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n D = Neuron('D', [A,B,i0], [wAD,wBD,wD])\n E = Neuron('D', [C,D,i0], [wCE,wDE,wE])\n P = PerformanceElem(E, 0.0)\n\n net = Network(P,[A, B, C, D, E])\n return net", "def __init__(self, args):\n \n super(MicroNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 1, kernel_size=1)\n self.conv2 = nn.Conv2d(1, 29, kernel_size=5)\n self.maxpool2 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv3 = nn.Conv2d(29, 59, kernel_size=3)\n self.maxpool3 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv4 = nn.Conv2d(59, 74, kernel_size=3)\n self.maxpool4 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv2_drop = nn.Dropout2d()\n self.conv3_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(1184, 300)\n self.fc2 = nn.Linear(300, args.num_classes)\n self.conv0_bn = nn.BatchNorm2d(3)\n self.conv1_bn = nn.BatchNorm2d(1)\n self.conv2_bn = nn.BatchNorm2d(29)\n self.conv3_bn = nn.BatchNorm2d(59)\n self.conv4_bn = nn.BatchNorm2d(74)\n self.dense1_bn = nn.BatchNorm1d(300)", "def neural_net(self, layers):\n model = nn.Sequential()\n for l in range(0, len(layers) - 1):\n model.add_module(\"layer_\"+str(l), nn.Linear(layers[l],layers[l+1], bias=True))\n if l != len(layers) - 2:\n model.add_module(\"tanh_\"+str(l), nn.Tanh())\n\n return model", "def __init__(self, input, n_in, n_out):\r\n # start-snippet-1\r\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\r\n self.W = theano.shared(value=np.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True)\r\n # initialize the biases b as a vector of n_out 0s\r\n self.b = theano.shared(value=np.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True)\r\n\r\n # symbolic expression for computing the matrix of class-membership\r\n # probabilities\r\n # Where:\r\n # W is a matrix where column-k represent the separation hyperplane for class-k\r\n # x is a matrix where row-j represents input training sample-j\r\n # b is a vector where element-k represent the free parameter of hyperplane-k\r\n # p_y_given_x is a matrix\r\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\r\n\r\n # symbolic description of how to compute prediction as class whose\r\n # probability is maximal\r\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\r\n self.y_pred_prob = T.max(self.p_y_given_x, axis=1)\r\n\r\n # parameters of the model\r\n self.params = [self.W, self.b]\r\n\r\n # keep track of model input\r\n self.input = input", "def __init__(self):\n self.model = Sequential()\n self.model.add(AveragePooling2D(pool_size=(4, 4), input_shape=(224, 224, 3)))\n self.model.add(Conv2D(16, (9, 9)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(16, (5, 5)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Flatten())\n self.model.add(Dropout(0.5))\n self.model.add(Dense(1, activation='sigmoid'))\n self.model.compile(loss=binary_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])", "def neural_network(X, Y, X_test, Y_test, num_layers, activation):\n \n X_n = (X - np.mean(X, axis = 0)) / np.std(X, axis = 0)\n Y_n = (Y - np.mean(Y, axis = 0)) / np.std(Y, axis = 0)\n \n if num_layers == 1:\n num_neurons = 1000\n elif num_layers == 2:\n num_neurons = 200\n elif num_layers == 3:\n num_neurons = 141\n elif num_layers == 4:\n num_neurons = 115\n\n if activation == \"ReLU\":\n activation = ReLUActivation\n elif activation == \"tanh\":\n activation = TanhActivation\n else:\n print('Null activation')\n\n model = Model(X_n.shape[1])\n model.addLayer(DenseLayer(num_neurons,activation()))\n if num_layers >= 2:\n model.addLayer(DenseLayer(num_neurons,activation()))\n if num_layers >= 3:\n model.addLayer(DenseLayer(num_neurons,activation()))\n\n model.addLayer(DenseLayer(Y.shape[1],LinearActivation()))\n model.initialize(QuadraticCost())\n \n Y_pred = model.predict((X_test - np.mean(X, axis = 0)) / np.std(X, axis = 0)) \n Y_pred = Y_pred * np.std(Y, axis = 0) + np.mean(Y, axis = 0)\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test) ** 2, axis = 1))) \n return mse", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def __init__(self, input, n_in, n_out):\r\n # start-snippet-1\r\n\r\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\r\n self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )\r\n\r\n # initialize the baises b as a vector of n_out 0s\r\n self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )\r\n\r\n # symbolic expression for computing the matrix of class-membership probabilities where:\r\n # W is a matrix where column-k represent the separation hyper plain for class-k\r\n # x is a matrix where row-j represents input training sample-j\r\n # b is a vector where element-k represent the free parameter of hyper plane-k\r\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\r\n\r\n # symbolic description of how to compute prediction as class whose probability is maximal\r\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\r\n # end-snippet-1\r\n\r\n # parameters of the model\r\n self.params = [self.W, self.b]\r\n\r\n # keep track of model input\r\n self.input = input", "def neural_network():\n model = Sequential()\n model.add(Conv2D(64, kernel_size=3, activation=\"relu\", input_shape=(28, 28, 1)))\n model.add(Conv2D(64, kernel_size=3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dense(10, activation=\"softmax\"))\n model.compile(optimizer='adam', loss='categorical_crossentropy')\n\n return model", "def MILP_1(self,xData,uData):\n print \"\\n\",\"*\"*80,\"\\n\",\"MILP 1: Parameter Estimation\\n\",\"*\"*80 \n model=Model(\"parameters\")\n outflow={}\n d={}\n bigM=500\n Q_out={}\n Q_in={}\n N=max(l[1] for l in xData.keys())\n print \"x Data size is\",N\n N=max(l[1] for l in uData.keys())\n print \"u Data size is\",N\n for l in self.links:\n l.d=model.addVar(lb=0,ub=200,obj=0*l.type==\"road\") \n for t in range(1,N):\n d[l,t]=model.addVar(lb=0,ub=200,obj=1)\n for k in l.outgoing:\n outflow[l,k,t]=model.addVar(lb=0,ub=200)\n self.c[l,k]=model.addVar(lb=20,ub=200)\n self.beta[l,k]=model.addVar(lb=0.2,ub=0.8)\n self.alpha[l,k]=model.addVar(lb=0,ub=1)\n self.M[l,k]=model.addVar(lb=0,ub=200)\n d[\"outflow-1\",l,k,t]=model.addVar(vtype=GRB.BINARY) \n d[\"outflow-2\",l,k,t]=model.addVar(vtype=GRB.BINARY) \n model.update()\n for t in range(1,N):\n for l in self.links:\n if True:\n Q_out[l,t]=LinExpr()\n Q_in[l,t]=LinExpr()\n Q_out[l,t].addConstant(0)\n Q_in[l,t].addConstant(0)\n for k in l.outgoing:\n model.addConstr(outflow[l,k,t]<=self.beta[l,k]*uData[l,t]*xData[l,t])\n model.addConstr(outflow[l,k,t]<=self.M[l,k])\n model.addConstr(outflow[l,k,t]<=self.c[l,k]-self.alpha[l,k]*xData[k,t])\n model.addConstr(outflow[l,k,t]>=self.beta[l,k]*uData[l,t]*xData[l,t]+bigM*d[\"outflow-1\",l,k,t]-bigM)\n model.addConstr(outflow[l,k,t]>=self.M[l,k]+bigM*d[\"outflow-2\",l,k,t]-bigM)\n model.addConstr(outflow[l,k,t]>=self.c[l,k]-self.alpha[l,k]*xData[k,t]-bigM*d[\"outflow-1\",l,k,t]-bigM*d[\"outflow-2\",l,k,t])\n Q_out[l,t].add(outflow[l,k,t])\n for k in l.incoming:\n Q_in[l,t].add(outflow[k,l,t])\n if l.type==\"road\":\n model.addConstr(xData[l,t+1]<=xData[l,t]- Q_out[l,t] + Q_in[l,t] + d[l,t] + l.lambda_arrival) \n else:\n model.addConstr(xData[l,t+1]<=xData[l,t]- uData[l,t]*xData[l,t] + Q_in[l,t] + d[l,t] + l.lambda_arrival)\n for l in self.links:\n sum=LinExpr()\n for k in l.outgoing:\n sum.add(self.beta[l,k])\n model.addConstr(sum>=0)\n \n# J=QuadExpr()\n# for l in self.links:\n# for t in range(1,N):\n# if l.type==\"road\":\n# J.add(d[l,t]*d[l,t])\n# model.setObjective(J)\n model.optimize()\n for l in self.links:\n l.d=l.d.X\n for k in l.outgoing:\n self.beta[l,k]=self.beta[l,k].X\n self.c[l,k]=self.c[l,k].X\n self.alpha[l,k]=self.alpha[l,k].X\n self.M[l,k]=self.M[l,k].X\n for l in self.links:\n for t in range(1,N):\n l.d=max(d[l,t].X,l.d)\n \n \n\n \n if True:\n for t in range(1,N):\n print \"*\"*80,\"time=\",t\n for l in self.links:\n print \"\\n\",l,\"x is\",xData[l,t],\"u is\",uData[l,t],\"x+ is\",xData[l,t+1]\n for k in l.outgoing:\n print k,\"beta:\",self.beta[l,k],\"outflow\",outflow[l,k,t].X", "def nnObjFunction(params, *args):\r\n \r\n\r\n \r\n n_input, n_hidden, n_class, training_data, training_label, lambdaval = args\r\n w1 = params[0:n_hidden * (n_input + 1)].reshape( (n_hidden, (n_input + 1)))\r\n w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))\r\n\r\n\r\n rowsToChange=xrange(len(training_label))\r\n \r\n oneKEncoding = np.zeros((len(training_label),10))\r\n \r\n for x,y in izip(rowsToChange,training_label):\r\n oneKEncoding[x,int(y)]=1\r\n \r\n training_label=oneKEncoding\r\n\r\n trans_w1=w1.T\r\n trans_w2=w2.T\r\n \r\n #add bias 1\r\n x=np.column_stack((training_data,np.ones(len(training_data))))\r\n #equation1\r\n eq1=np.dot(x,trans_w1)\r\n #equation 2\r\n z=sigmoid(eq1)\r\n #add bias 1\r\n z=np.column_stack((z,np.ones(len(z))))\r\n #equation 3\r\n eq3=np.dot(z,trans_w2)\r\n #equation 4\r\n o=sigmoid(eq3)\r\n\r\n #-----------------------------------------Calculations for gradient weight vector 2---------------------------------------------\r\n \r\n delta=np.subtract(o,training_label)\r\n eq5=np.sum(np.square(delta))\r\n\r\n dabba=(training_label-o)*(1-o)*o\r\n \r\n grad_w2=np.multiply(-1,np.dot(dabba.T,z)) \r\n \r\n\r\n #-----------------------------------------Calculations for gradient weight vector 1---------------------------------------------\r\n\r\n one_minus_z_into_z = (1-z)*z\r\n \r\n \r\n multiply_by_summation = one_minus_z_into_z*np.dot(dabba,w2)\r\n \r\n grad_w1_without_minus_one = np.dot(np.transpose(multiply_by_summation),x)\r\n \r\n\r\n grad_w1=np.multiply(-1,grad_w1_without_minus_one)\r\n \r\n grad_w1 = np.delete(grad_w1, n_hidden,0) \r\n \r\n\r\n #-----------------------------------------Calculations for gradient object value----------------------------------------\r\n\r\n \r\n obj_val=eq5/len(training_data)\r\n \r\n #-----------------------------------------Regularization of gradient val and weight vector-------------------------------\r\n \r\n obj_val = obj_val+ (lambdaval/(2*len(training_data)))*( np.sum(np.square(w1)) + np.sum(np.square(w2)))\r\n grad_w2 = (grad_w2 + lambdaval*w2 )/ len(training_data) \r\n grad_w1 = (grad_w1 + lambdaval*w1 )/ len(training_data) \r\n \r\n \r\n\r\n #-----------------------------------------Concatenate both the weight vectors---------------------------------------------\r\n\r\n obj_grad = np.array([])\r\n obj_grad = np.concatenate((grad_w1.flatten(), grad_w2.flatten()),0)\r\n return (obj_val,obj_grad)", "def build_model_mobilenet(num_classes):", "def mlp_model():\n\tmodel = Sequential()\n\tmodel.add(Dense(256, activation='relu', input_shape=(X_train_scaled.shape[1], )))\n\tmodel.add(Dropout(0.4))\n\tmodel.add(Dense(256, activation='relu'))\n\tmodel.add(Dropout(0.4))\n\tmodel.add(Dense(FLAGS.nb_classes, activation='softmax'))\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\tmodel.summary()\n\treturn model", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def _map(self, p_input:Element, p_output:Element):\r\n \r\n self._sl_model.eval()\r\n\r\n # Input pre processing\r\n input = self.input_preproc(p_input)\r\n\r\n # Make prediction\r\n output = self.forward(input)\r\n\r\n # Output post processing\r\n output = self.output_postproc(output)\r\n\r\n # Set list to Element\r\n p_output.set_values(output)", "def nnPredict(w1,w2,data):\n \n labels = np.array([])\n \n # create bias row\n bias_row =np.ones((np.size(data,0),1))\n \n # concatenate bias with data matrix\n data=np.concatenate((data,bias_row),axis=1)\n \n #Calculate input to hidden layer\n intput_hidden_layer= np.dot(data,w1.transpose()) \n \n #Calculate output of hidden layer using sigmoid function\n output_hidden_layer= sigmoid(intput_hidden_layer)\n \n #Calculate input to output nodes\n input_with_bias = np.concatenate((output_hidden_layer,bias_row),axis=1) \n input_output_node= np.dot(input_with_bias,w2.transpose()) \n \n # Calculate output of output layer\n output_layer= sigmoid(input_output_node) \n \n # get index of maximum from all rows in ouput layer matrix\n labels = np.argmax(output_layer,axis=1) \n \n return labels", "def __init__(self,layers,activations):\n model = utils.buildMLP(layers, activations)\n super().__init__(torch.nn.Sequential(model), nnType='dnn')", "def nnPredict(w1,w2,data): \r\n \r\n labels = np.array([])\r\n #Your code here\r\n #Get bias dimension\r\n bias_dimension = data.shape[0]\r\n\r\n #Fill it all with ones\r\n bias = np.ones((bias_dimension,1))\r\n\r\n #Add bias to weights \r\n data_with_bias = np.concatenate((data,bias),1)\r\n\r\n #Feed Foward Start By Multiplying Training data by weights of w1\r\n z2 = np.dot(data_with_bias,np.transpose(w1))\r\n\r\n #Apply Sigmoid function\r\n a2= sigmoid(z2)\r\n #Apply Another Bias Dimension to the new matrix\r\n\r\n #bias_dimension=a2.shape[0]\r\n #bias = np.ones((bias_dimension,1))\r\n a2_bias= np.concatenate((a2,bias),1)\r\n\r\n #Multiply new matrix by the weights of w2\r\n z3 = np.dot(a2_bias,np.transpose(w2))\r\n \r\n #Apply Sigmoid Function to the new data\r\n y= sigmoid(z3)\r\n\r\n #find max value and add that digit to the labels vector\r\n labels= np.zeros((y.shape[0],1))\r\n count=0\r\n for x in y:\r\n index=0\r\n max=0\r\n inmax=0\r\n for p in x:\r\n if p >= max:\r\n max=p\r\n inmax=index\r\n index+=1\r\n labels[count][0]=inmax\r\n count+=1\r\n \r\n print('results n ', labels)\r\n return labels", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, binary, stochastic):\n self.binary=binary\n self.stochastic=stochastic\n \n # Since we are dealing with a one hidden layer MLP, this will translate\n # into a HiddenLayer with a tanh activation function connected to the\n # LogisticRegression layer; the activation function can be replaced by\n # sigmoid or any other nonlinear function.\n self.hiddenLayers = []\n self.normLayers=[]\n for i in xrange(n_hiddenLayers):\n h_input = input if i == 0 else self.hiddenLayers[i-1].output\n h_in = n_in if i == 0 else n_hidden\n\n # if binary==True, we append a binary hiddenlayer\n if binary==True:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=True,\n stochastic=stochastic\n ))\n self.normLayers.append(\n BatchNormLayer(\n input=self.hiddenLayers[i].output,\n n_in=n_hidden,\n n_out=n_hidden\n ))\n else:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=False,\n stochastic=False\n ))\n\n # The logistic regression layer gets as input the hidden units\n # of the hidden layer\n self.logRegressionLayer = LogisticRegression(\n input=self.hiddenLayers[-1].output,\n n_in=n_hidden,\n n_out=n_out,\n binary=binary,\n stochastic=stochastic\n )\n \n # same holds for the function computing the number of errors\n self.errors = self.logRegressionLayer.errors\n\n # the parameters of the model are the parameters of the two layer it is\n # made out of\n self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params\n self.wrt = sum([x.wrt for x in self.hiddenLayers], []) + self.logRegressionLayer.wrt\n self.Ws = sum([x.Ws for x in self.hiddenLayers], []) + self.logRegressionLayer.Ws\n # keep track of model input\n self.input = input", "def __init__(self, attribute_size, output_size, n_hidden_layers=2, n_hidden_neurons=30):\n self.n_hidden_layers = n_hidden_layers\n self.n_hidden_neurons = n_hidden_neurons\n self.attribute_size = attribute_size\n self.output_size = output_size\n\n X = T.fmatrix()\n Y = T.fmatrix()\n\n self.w_h = nnet.init_weights((self.attribute_size, self.n_hidden_neurons))\n self.w_h2 = nnet.init_weights((self.n_hidden_neurons, self.n_hidden_neurons))\n self.w_o = nnet.init_weights((self.n_hidden_neurons, self.output_size))\n\n if self.n_hidden_layers == 2:\n\n noise_py_x = nnet.model_reg(X, self.w_h, self.w_h2, self.w_o, 0, 0)\n py_x = nnet.model_reg(X, self.w_h, self.w_h2, self.w_o, 0, 0)\n\n cost = nnet.rmse(noise_py_x, Y)\n params = [self.w_h, self.w_h2, self.w_o]\n updates = nnet.RMSprop(cost, params, lr=0.001)\n\n self.train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\n self.predict_ = theano.function(inputs=[X], outputs=py_x, allow_input_downcast=True)\n\n elif self.n_hidden_layers == 3:\n\n self.w_h3 = nnet.init_weights((self.n_hidden_neurons, self.n_hidden_neurons))\n\n noise_py_x = nnet.model_reg3(X, self.w_h, self.w_h2, self.w_h3, self.w_o, 0, 0)\n py_x = nnet.model_reg3(X, self.w_h, self.w_h2, self.w_h3, self.w_o, 0, 0)\n\n cost = nnet.rmse(noise_py_x, Y)\n params = [self.w_h, self.w_h2, self.w_h3, self.w_o]\n updates = nnet.RMSprop(cost, params, lr=0.001)\n\n self.train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\n self.predict_ = theano.function(inputs=[X], outputs=py_x, allow_input_downcast=True)", "def FeatLinModel(VGG, layername='features_20', type=\"weight\", weight=None, chan=0, pos=(10, 10)):\n layers_all = get_model_layers(VGG)\n if 'features' in layername:\n layeridx = layers_all.index(layername) - 1 + 1 # -1 for the \"features\" layer\n VGGfeat = VGG.features[:layeridx]\n else:\n VGGfeat = VGG\n hooks, feat_dict = hook_model(VGG, layerrequest=(layername,))\n layernames = list(feat_dict.keys())\n print(layernames)\n if type == \"weight\":\n def weight_objective(img, scaler=True):\n VGGfeat.forward(img.cuda())\n feat = hooks(layername)\n if scaler:\n return -(feat * weight.unsqueeze(0)).mean()\n else:\n batch = img.shape[0]\n return -(feat * weight.unsqueeze(0)).view(batch, -1).mean(axis=1)\n\n return weight_objective\n elif type == \"neuron\":\n def neuron_objective(img, scaler=True):\n VGGfeat.forward(img.cuda())\n feat = hooks(layername)\n if len(feat.shape) == 4:\n if scaler:\n return -(feat[:, chan, pos[0], pos[1]]).mean()\n else:\n batch = img.shape[0]\n return -(feat[:, chan, pos[0], pos[1]]).view(batch, -1).mean(axis=1)\n elif len(feat.shape) == 2:\n if scaler:\n return -(feat[:, chan]).mean()\n else:\n batch = img.shape[0]\n return -(feat[:, chan]).view(batch, -1).mean(axis=1)\n return neuron_objective", "def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n # Class members:\n # num_input_nodes\n # num_hidden_nodes\n # num_hidden_layers\n # num_output_nodes\n # weights = [[num_hidden_nodes, num_input_nodes],[num_hidden_nodes, num_hidden_nodes],[]<- for each hl,\n # [num_output_nodes, num_hidden_nodes]]\n # biases\n\n self.num_input_nodes = input_nodes\n self.num_hidden_nodes = hidden_nodes\n self.num_hidden_layers = hidden_layers\n self.num_output_nodes = output_nodes\n\n self.weights = []\n for i in range(self.num_hidden_layers + 1):\n if i is 0:\n # first weights array is input to hidden\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_input_nodes) - .25)\n\n elif i < self.num_hidden_layers:\n # next weight array is hidden nodes to hidden nodes\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_hidden_nodes) - .25)\n else:\n # last weight array is hidden nodes to output nodes\n self.weights.append(.5 * np.random.rand(self.num_output_nodes, self.num_hidden_nodes) - .25)\n\n self.biases = []\n for i in range(self.num_hidden_layers + 1):\n if i < self.num_hidden_layers:\n # for every hidden node there is a bias\n self.biases.append(0.5 * np.random.rand(self.num_hidden_nodes) - .25)\n else:\n # for the output node there is a bias as well\n self.biases.append(0.5 * np.random.rand(self.num_output_nodes) - .25)\n\n self.activation = np.vectorize(self.tanh, otypes=[float])", "def build_model(self):\n states = layers.Input(shape=(self.state_size,), name='states')\n actions = layers.Input(shape=(self.action_size,), name='actions')\n\n # Hidden Layers for state pathway\n net_states = layers.Dense(units=320, kernel_regularizer=regularizers.l2(0.01), activation='relu')(states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=160, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=80, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n net_states = layers.Dense(units=40, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_states)\n net_states = layers.BatchNormalization()(net_states)\n net_states = layers.Dropout(0.25)(net_states)\n\n # Hidden Layer for action pathway\n net_actions = layers.Dense(units=320, kernel_regularizer=regularizers.l2(0.01), activation='relu')(actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=160, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=80, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n net_actions = layers.Dense(units=40, kernel_regularizer=regularizers.l2(0.01), activation='relu')(net_actions)\n net_actions = layers.BatchNormalization()(net_actions)\n net_actions = layers.Dropout(0.25)(net_actions)\n\n # Combine state and action pathways\n net = layers.Add()([net_states, net_actions])\n net = layers.Activation('relu')(net)\n\n # Final Output layer\n Q_values = layers.Dense(units=1, name='q_values')(net)\n\n # Create a Keras Model\n self.model = models.Model(inputs=[states, actions], outputs=Q_values)\n\n # Define Optimizer and Compile the Model\n optimizer = optimizers.Adam(lr=0.0001)\n self.model.compile(optimizer=optimizer, loss='mse')\n\n # Action Gradients (derivative of Q_Value\n action_gradient = K.gradients(Q_values, actions)\n\n # Function to fetch action gradients\n self.get_action_gradients = K.function(\n inputs=[*self.model.input, K.learning_phase()],\n outputs=action_gradient\n )", "def mlp(self):\n # Model.\n model = Sequential()\n model.add(Flatten(input_shape=self.input_shape))\n model.add(Dense(1024))\n model.add(Dropout(0.6))\n model.add(Dense(512))\n model.add(Dropout(0.6))\n model.add(Dense(self.nb_classes, activation='softmax'))\n\n return model", "def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model", "def __init__(self, input, n_in, n_out,binary=True,stochastic=True):\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n self.W = theano.shared(\n value=numpy.zeros(\n (n_in, n_out),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n # initialize the biases b as a vector of n_out 0s\n self.b = theano.shared(\n value=numpy.zeros(\n (n_out,),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n \n self.Wb = theano.shared(\n value=numpy.zeros(\n (n_in, n_out),\n dtype=theano.config.floatX\n ),\n name='Wb',\n borrow=True\n )\n\n if (binary):\n self.wrt = [self.Wb, self.b]\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.Wb) + self.b)\n self.output=T.dot(input, self.Wb) + self.b\n else:\n self.wrt = [self.W, self.b]\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\n self.output=self.p_y_given_x\n\n # parameters of the model\n\n # symbolic expression for computing the matrix of class-membership\n # probabilities\n # Where:\n # W is a matrix where column-k represent the separation hyperplane for\n # class-k\n # x is a matrix where row-j represents input training sample-j\n # b is a vector where element-k represent the free parameter of\n # hyperplane-k\n \n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\n\n # keep track of model input\n self.input = input\n \n # parameters of the model\n self.params = [self.W,self.b]\n self.Ws=[self.W,self.Wb]", "def first_model():\n model=Sequential()\n # model.add(Flatten(input_shape=(160,320,3)))\n model.add(Lambda(lambda x: (x-128.0)/128.0,input_shape=(160, 320, 3)))\n model.add(Cropping2D(cropping=((70,25), (0,0))))\n model.add(Convolution2D(32, 3, 3))\n model.add(MaxPooling2D((2, 2)))\n model.add(Dropout(0.5))\n model.add(Activation('relu'))\n model.add(Flatten())\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(1))\n\n model.compile(loss=\"mse\",optimizer=\"adam\")\n return model", "def init_model(self):\n model = Sequential()\n model.add(Dense(units=24, input_dim=self.input_shape[0],\n activation='relu'))\n model.add(Dense(units=24, activation='relu'))\n # We want rewards instead of probability, so use linear here\n model.add(Dense(units=self.output_num, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.eta))\n return model", "def create_model(self, input_state, layer1=450, layer2=350):\n # create the DQN\n self.model = Sequential()\n self.model.add(Dense(units=layer1, input_dim=input_state.nn_input.size))\n self.model.add(Activation('relu'))\n\n self.model.add(Dense(units=layer2))\n self.model.add(Activation('relu'))\n\n self.model.add(Dense(units=(input_state.size_graph+1)))\n self.model.add(Activation('linear'))\n\n self.model.compile(optimizer='rmsprop', loss='mse')\n\n self.model.predict(input_state.nn_input.reshape(1, input_state.nn_input.size), batch_size=1)", "def __init__(self, input_dim: int, hidden_layer: bool) -> None:\n\n # --- PLEASE READ --\n # Use the parameters below to train your feed-forward neural network.\n\n # Number of hidden units if hidden_layer = True.\n self.hidden_units = 25\n\n # This parameter is called the step size, also known as the learning rate (lr).\n # See 18.6.1 in AIMA 3rd edition (page 719).\n # This is the value of α on Line 25 in Figure 18.24.\n self.lr = 1e-3\n\n # Line 6 in Figure 18.24 says \"repeat\".\n # This is the number of times we are going to repeat. This is often known as epochs.\n self.epochs = 400\n\n # We are going to store the data here.\n # Since you are only asked to implement training for the feed-forward neural network,\n # only self.x_train and self.y_train need to be used. You will need to use them to implement train().\n # The self.x_test and self.y_test is used by the unit tests. Do not change anything in it.\n self.x_train, self.y_train = None, None\n self.x_test, self.y_test = None, None\n\n np.random.seed(0) # Setting random seed for reproducibility.\n\n self.weights, self.biases = None, None # Initializing weights and biases\n\n self.total_layers = (\n None # Initializing the number of layers in the neural network.\n )\n\n \"\"\"\n I have implemented the neural network as two lists, one with the weight matrices between each layer,\n and the other with the bias vectors.\n \"\"\"\n if hidden_layer:\n self.weights = [\n np.random.randn(self.hidden_units, input_dim),\n np.random.randn(1, self.hidden_units),\n ]\n self.biases = [np.random.randn(self.hidden_units, 1), np.random.randn(1, 1)]\n self.total_layers = 3\n else:\n self.weights = [np.random.randn(1, input_dim)]\n self.biases = [np.random.randn(1, 1)]\n self.total_layers = 2\n\n self.sigmoid = lambda x: 1.0 / (\n 1.0 + np.exp(-x)\n ) # The sigmoid activation function: 1 / (1 + e^(-x))\n\n self.sigmoid_derivative = lambda x: self.sigmoid(x) * (\n 1 - self.sigmoid(x)\n ) # The derivative of the sigmoid activation function to be used in the backpropagation algorithm.", "def __init__(self, n_in, n_hidden, n_layers=2):\n super(GatedGN, self).__init__()\n\n self.E_n = nn.Sequential(nn.Linear(n_in, n_hidden),\n nn.ReLU(),\n nn.Linear(n_hidden, n_layers*n_hidden),\n nn.ReLU())\n self.E_e = nn.Sequential(nn.Linear(2*n_in, n_hidden),\n nn.ReLU(),\n nn.Linear(n_hidden, n_layers*n_hidden),\n nn.ReLU())\n self.E_g = nn.Linear(n_hidden, n_hidden)\n\n # Takes in features from nodes, node states, edge state, and global state.\n self.U_gru = nn.GRU(input_size=n_hidden+n_in,\n hidden_size=n_hidden,\n num_layers=n_layers,\n batch_first=True)\n self.M_gru = nn.GRU(input_size=2*(n_in+n_hidden),\n hidden_size=n_hidden,\n num_layers=n_layers,\n batch_first=True)\n self.G_gru = nn.GRU(input_size=2*n_hidden,\n hidden_size=n_hidden,\n num_layers=n_layers,\n batch_first=True)\n \n # Output function that predicts stability.\n self.O = nn.Linear(n_hidden, 1)\n \n self.n_in, self.n_hidden, self.n_layers = n_in, n_hidden, n_layers", "def compile_model(self, data):\n model=Sequential()\n\n model.add(Conv2D(self.conv_1_mapnum, \n (self.filter_width, self.filter_width),\n input_shape=data.shape[1:], \n strides=self.strides_len,\n padding='same', data_format='channels_last',\n dilation_rate=1, activation=self.acti_1_func, use_bias=True, \n kernel_initializer='glorot_uniform', bias_initializer='zeros', \n kernel_regularizer=l2(0.001), bias_regularizer=None, \n activity_regularizer=None, kernel_constraint=None, \n bias_constraint=None))\n\n if self.batch_norm:\n model.add(BatchNormalization(axis=3, momentum=0.99, epsilon=0.001, \n center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', \n moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None))\n\n if self.spatial_drop:\n model.add(SpatialDropout2D(rate=self.spatial_drop_perc, data_format='channels_last'))\n\n if self.pool_method=='mean':\n model.add(AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n if self.pool_method=='max':\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n \n model.add(Conv2D(self.conv_2_mapnum, \n (self.filter_width, self.filter_width),\n strides=self.strides_len,\n padding='same', data_format='channels_last',\n dilation_rate=1, activation=self.acti_2_func, use_bias=True, \n kernel_initializer='glorot_uniform', bias_initializer='zeros', \n kernel_regularizer=l2(0.001), bias_regularizer=None, \n activity_regularizer=None, kernel_constraint=None, \n bias_constraint=None))\n\n if self.batch_norm:\n model.add(BatchNormalization(axis=3, momentum=0.99, epsilon=0.001, \n center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', \n moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None))\n\n if self.spatial_drop:\n model.add(SpatialDropout2D(rate=self.spatial_drop_perc, data_format='channels_last'))\n\n if self.pool_method=='mean':\n model.add(AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n if self.pool_method=='max':\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n\n model.add(Conv2D(self.conv_3_mapnum, \n (self.filter_width, self.filter_width),\n strides=self.strides_len,\n padding='same', data_format='channels_last',\n dilation_rate=1, activation=self.acti_3_func, use_bias=True, \n kernel_initializer='glorot_uniform', bias_initializer='zeros', \n kernel_regularizer=l2(0.001), bias_regularizer=None, \n activity_regularizer=None, kernel_constraint=None, \n bias_constraint=None))\n\n if self.batch_norm:\n model.add(BatchNormalization(axis=3, momentum=0.99, epsilon=0.001, \n center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', \n moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None))\n\n if self.spatial_drop:\n model.add(SpatialDropout2D(rate=self.spatial_drop_perc, data_format='channels_last'))\n\n if self.pool_method=='mean':\n model.add(AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n if self.pool_method=='max':\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n \n model.add(Flatten())\n \n if self.additional_dense:\n model.add(Dense(units=self.additional_dense_units, activation=self.additional_dense_activation))\n\n if self.batch_norm:\n model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, \n center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', \n moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None))\n if self.spatial_drop:\n model.add(Dropout(rate=self.spatial_drop_perc))\n \n model.add(Dense(units=self.denseshape, activation=self.output_activation))\n\n model.compile(optimizer=Adam(lr=self.learning_rate), loss=self.loss_func, metrics=['accuracy', 'mean_squared_error', 'mean_absolute_error'])\n print(model.summary())\n return model", "def _build_model(self):\n\n with tf.variable_scope(self.name):\n # adds placeholders, data_normalization and data_noise if desired. Also adds a placeholder for dropout probability\n self.layer_in_x, self.layer_in_y = self._build_input_layers()\n\n # create core multi-layer perceptron\n mlp_output_dim = 2 * self.ndim_y * self.n_centers + self.n_centers\n core_network = MLP(\n name=\"core_network\",\n input_layer=self.layer_in_x,\n output_dim=mlp_output_dim,\n hidden_sizes=self.hidden_sizes,\n hidden_nonlinearity=self.hidden_nonlinearity,\n output_nonlinearity=None,\n weight_normalization=self.weight_normalization,\n dropout_ph=self.dropout_ph if self.dropout else None\n )\n\n core_output_layer = core_network.output_layer\n\n # slice output of MLP into three equally sized parts for loc, scale and mixture weights\n slice_layer_locs = L.SliceLayer(core_output_layer, indices=slice(0, self.ndim_y * self.n_centers), axis=-1)\n slice_layer_scales = L.SliceLayer(core_output_layer, indices=slice(self.ndim_y * self.n_centers, 2 * self.ndim_y * self.n_centers), axis=-1)\n slice_layer_weights = L.SliceLayer(core_output_layer, indices=slice(2 * self.ndim_y * self.n_centers, mlp_output_dim), axis=-1)\n\n # locations mixture components\n self.reshape_layer_locs = L.ReshapeLayer(slice_layer_locs, (-1, self.n_centers, self.ndim_y))\n self.locs = L.get_output(self.reshape_layer_locs)\n\n # scales of the mixture components\n reshape_layer_scales = L.ReshapeLayer(slice_layer_scales, (-1, self.n_centers, self.ndim_y))\n self.softplus_layer_scales = L.NonlinearityLayer(reshape_layer_scales, nonlinearity=tf.nn.softplus)\n self.scales = L.get_output(self.softplus_layer_scales)\n\n # weights of the mixture components\n self.logits = L.get_output(slice_layer_weights)\n self.softmax_layer_weights = L.NonlinearityLayer(slice_layer_weights, nonlinearity=tf.nn.softmax)\n self.weights = L.get_output(self.softmax_layer_weights)\n\n # # put mixture components together\n self.y_input = L.get_output(self.layer_in_y)\n self.cat = cat = Categorical(logits=self.logits)\n self.components = components = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale\n in zip(tf.unstack(self.locs, axis=1), tf.unstack( self.scales, axis=1))]\n self.mixture = mixture = Mixture(cat=cat, components=components, value=tf.zeros_like(self.y_input))\n\n # regularization\n self._add_softmax_entropy_regularization()\n self._add_l1_l2_regularization(core_network)\n\n # tensor to store samples\n self.samples = mixture.sample() #TODO either use it or remove it\n\n # tensor to compute probabilities\n if self.data_normalization:\n self.pdf_ = mixture.prob(self.y_input) / tf.reduce_prod(self.std_y_sym)\n self.log_pdf_ = mixture.log_prob(self.y_input) - tf.reduce_sum(tf.log(self.std_y_sym))\n else:\n self.pdf_ = mixture.prob(self.y_input)\n self.log_pdf_ = mixture.log_prob(self.y_input)\n\n # symbolic tensors for getting the unnormalized mixture components\n if self.data_normalization:\n self.scales_unnormalized = self.scales * self.std_y_sym\n self.locs_unnormalized = self.locs * self.std_y_sym + self.mean_y_sym\n else:\n self.scales_unnormalized = self.scales\n self.locs_unnormalized = self.locs\n\n # initialize LayersPowered --> provides functions for serializing tf models\n LayersPowered.__init__(self, [self.softmax_layer_weights, self.softplus_layer_scales, self.reshape_layer_locs,\n self.layer_in_y])", "def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])", "def model_creator(config):\n return nn.Linear(1, 1)", "def _model(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(units=64, input_dim=self.state_size, activation=\"relu\"))\n\t\tmodel.add(Dense(units=32, activation=\"relu\"))\n\t\tmodel.add(Dense(units=16, activation=\"relu\"))\n\t\tmodel.add(Dense(units=8, activation=\"relu\"))\n\t\tmodel.add(Dense(self.action_size, activation=\"linear\"))\n\t\tmodel.compile(loss=\"mse\", optimizer=Adam(lr=self.learning_rate))\n\n\t\treturn model", "def model(self,input_map=None,input_spec=None, mode=None,train=False):\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n \n if self.model_type == 'spatial':\n pool2_flat = self.spatial_model(input_map)\n elif self.model_type == 'spectral':\n pool2_flat = self.spectral_model(input_spec)\n elif self.model_type == 'both':\n pool2_flat_map = self.spatial_model(input_map)\n pool2_flat_spec = self.spectral_model(input_spec)\n pool2_flat = tf.concat([pool2_flat_map,pool2_flat_spec],1)\n\n \n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 7 * 7 * 64]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=pool2_flat, \n units=self.Ndense, \n activation=tf.nn.relu,\n name='dense')\n \n # Add dropout operation; 0.6 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=self.dropout_rate, training=train)\n \n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=self.Nlogit,name='logits')\n return logits", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def get_trans_net():\n return nn.Sequential(nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 2))", "def __init__(self):\n super(FcNet, self).__init__()\n\n # get size of some layers\n start_num = 48\n max_num = 200\n mid_num = 50\n end_num = 8\n \n # define regressor\n self.regress = nn.Sequential(\n nn.Linear(start_num,max_num,bias=True),\n nn.Sigmoid(),\n nn.Linear(max_num,mid_num,bias = True),\n nn.Sigmoid(),\n nn.Linear(mid_num,end_num, bias = True),\n nn.Sigmoid()\n )", "def SRCNN(input_shape, depth_multiplier=1, multi_output=False):\n inputs = Input(input_shape, name=\"inputs\")\n conv1 = Convolution2D(filters=64*depth_multiplier, kernel_size=9, padding=\"same\", name=\"conv1\", activation=\"relu\")(inputs)\n #conv1 = BatchNormalization(name='bn_conv1')(conv1)\n \n mapping = Convolution2D(filters=32*depth_multiplier, kernel_size=1, padding=\"same\", name=\"mapping\", activation=\"relu\")(conv1)\n #mapping = BatchNormalization(name='bn_mapping')(mapping)\n \n if multi_output:\n out = Convolution2D(filters=2, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n else:\n out = Convolution2D(filters=1, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n return Model(inputs, out)", "def create_neural_network():\n model = Sequential()\n model.add(LSTM(32, input_shape=(4, 45))) # 4 time-steps and 45 features\n model.add(Dense(64))\n model.add(Activation('tanh'))\n model.add(Dense(units=45)) # 45 is the number of class\n model.add(Activation('softmax')) # Output the density of probability\n\n model.compile(optimizer=adam(lr=0.001, decay=1e-6),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n\n model.summary()\n print(\"Creation of the Neural Network is finished.\")\n return model", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def trainModel( self, featureTrain, classTrain):", "def create_model_net(n_input,n_hidden,n_output):\n net = Sequential(\n L.Linear(n_input, n_hidden), F.relu,\n L.Linear(n_hidden, n_hidden), F.relu,\n L.Linear(n_hidden, n_output), F.softmax)\n return net", "def _onnx_model_to_singa_net(cls, model, init_inputs, device,\n opset_version):\n # init all tensor input and weight as a tensor map\n tensor_map = cls._init_graph_parameter(model.graph, init_inputs, device)\n # only weights tensor\n weights = {x.name: tensor_map[x.name] for x in model.graph.initializer}\n # the parsed operators queue\n singa_ops = []\n singa_op = namedtuple('SingaOps', ['name', 'op', 'handle', 'forward'])\n for node in model.graph.node:\n node = OnnxNode(node)\n # only give the inputs it needs\n # consumed_inputs are the inputs marked as attributes\n # so we remove it here\n inputs = [\n tensor_map[x]\n for x in node.inputs\n if x not in node.consumed_inputs\n ]\n handle, forward = cls._onnx_node_to_singa_op(\n node, inputs, opset_version)\n # if it is Constant, we hanlde it as a weight\n # otherwise, we run it and add its output into map for being used by later operators\n if node.op_type == 'Constant':\n tmp_tensor = tensor.from_numpy(forward)\n tmp_tensor.to_device(device)\n tmp_name = node.outputs.pop(0)\n weights[tmp_name] = tmp_tensor\n tensor_map[tmp_name] = tmp_tensor\n else:\n outputs = cls._run_node(node, inputs, handle, forward)\n for key, val in outputs.items():\n tensor_map[key] = val\n singa_ops.extend([singa_op(node.name, node, handle, forward)])\n return weights, singa_ops", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def generator():\n\n model = Sequential()\n\n in_shape = 100\n\n depth = 256\n\n model.add(Dense(depth * 7 * 7, input_shape=(in_shape,)))\n model.add(BatchNormalization(momentum=0.9)) # add the momentum\n # model.add(Activation('relu')) # pass the vector through a relu\n model.add(LeakyReLU(alpha=0.2))\n\n model.add(Reshape((7, 7, depth))) # reshape to depth number of 7x7 images\n model.add(Dropout(0.4))\n\n model.add(UpSampling2D())\n model.add(Conv2DTranspose(int(depth / 2), 5, padding='same'))\n model.add(BatchNormalization(momentum=0.9))\n # model.add(Activation('relu'))\n model.add(LeakyReLU(alpha=0.2))\n\n model.add(UpSampling2D())\n model.add(Conv2DTranspose(int(depth / 4), 5, padding='same'))\n model.add(BatchNormalization(momentum=0.9))\n # model.add(Activation('relu'))\n model.add(LeakyReLU(alpha=0.2))\n\n model.add(Conv2DTranspose(int(depth / 8), 5, padding='same'))\n model.add(BatchNormalization(momentum=0.9))\n # model.add(Activation('relu'))\n model.add(LeakyReLU(alpha=0.2))\n\n model.add(Conv2DTranspose(1, 5, padding='same'))\n model.add(Activation('sigmoid'))\n\n # model.summary()\n\n noise = Input(shape=(in_shape,))\n img = model(noise)\n\n return Model(noise, img)\n\n # return model", "def __init__(self, in_units, out_units):\n np.random.seed(42)\n #self.w = None # Declare the Weight matrix\n #self.b = None # Create a placeholder for Bias\n self.x = None # Save the input to forward in this\n self.a = None # Save the output of forward pass in this (without activation)\n\n self.d_x = None # Save the gradient w.r.t x in this\n self.d_w = None # Save the gradient w.r.t w in this\n self.d_b = None # Save the gradient w.r.t b in this\n self.d_v_w = None\n self.d_v_b = None\n\n self.w = np.random.normal(0, np.sqrt(1/in_units), (in_units, out_units))\n self.b = np.zeros((1, out_units))\n self.d_v_w = np.zeros((in_units, out_units))\n self.d_v_b = np.zeros((1, out_units))", "def model(self):\n filePath = self.config['data_path']['train_data']\n data = self.loadCSV(filePath)\n cleandata = self.preprocess(data)\n X, y = self.dataSplit(cleandata)\n X = self.CountVect(X, self.config['transform_path']['transform_model_path'])\n X_train, X_test, y_train, y_test = self.TrainTestSplit(X, y)\n self.MultinomialNB(X_train, X_test, y_train, y_test, self.config['nlp_path']['model_path'])", "def __init__(self, cell_index, stimulus_type, conv_layers=[(12, 9, 9), (12, 9, 9)], dense_layer=64,\n loss='poisson_loss', optimizer='adam', weight_init='normal', l2_reg=0., dropout=0.5, mean_adapt=False):\n\n self.stim_shape = (40, 50, 50)\n\n # build the model\n with notify('Building convnet'):\n\n self.model = Sequential()\n\n # convolutional layers\n for ix, layer in enumerate(conv_layers):\n\n # get parameters for this layer\n num_filters, row_size, col_size = layer\n\n # convolutional layer\n if ix == 0:\n self.model.add(Convolution2D(num_filters, row_size, col_size,\n input_shape=self.stim_shape, init=weight_init,\n border_mode='same', subsample=(1,1),\n W_regularizer=l2(l2_reg), activation='relu'))\n\n else:\n self.model.add(Convolution2D(num_filters, row_size, col_size,\n input_shape=self.stim_shape, init=weight_init,\n border_mode='same', subsample=(1,1),\n W_regularizer=l2(l2_reg), activation='relu'))\n\n # max pooling layer\n self.model.add(MaxPooling2D(pool_size=(2, 2), ignore_border=True))\n\n # dropout\n self.model.add(Dropout(dropout))\n\n # flatten\n self.model.add(Flatten())\n\n # Add dense (affine) layer with relu activation\n self.model.add(Dense(dense_layer, init=weight_init, W_regularizer=l2(l2_reg), activation='relu'))\n self.model.add(Dropout(dropout))\n\n # Add a final dense (affine) layer with softplus activation\n self.model.add(Dense(1, init=weight_init, W_regularizer=l2(l2_reg), activation='softplus'))\n\n # save architecture string (for markdown file)\n self.architecture = '\\n'.join(['Convolutional layers {}'.format(conv_layers),\n '{} filters in the second (fully connected) layer'.format(dense_layer),\n 'weight initialization: {}'.format(weight_init),\n 'l2 regularization: {}'.format(l2_reg),\n 'stimulus shape: {}'.format(self.stim_shape)])\n\n # compile\n super().__init__(cell_index, stimulus_type, loss, optimizer, mean_adapt)", "def mlp(\n\t# input_shape: Tuple[int, ...],\n\t# output_shape: Tuple[int, ...],\n\t# layer_size: int = 128,\n\t# dropout_amount: float = 0.2,\n\t# num_layers: int = 3, \n\tnet_config: Dict\n)->Model:\n\tactivation_fn = net_config[\"hyperparams\"][\"activation_fn\"]\n\tinput_s = net_config[\"shapes\"][\"input_shape\"]\n\toutput_s = net_config[\"shapes\"][\"output_shape\"]\n\n\tinputs = keras.Input(shape=(input_s,))\n\tdense = layers.Dense(64, activation=\"relu\")\n\tx = dense(inputs)\n\tlayer1 = layers.Dense(64, activation=activation_fn)(x)\n\tlayer2 = layers.Dense(64, activation=activation_fn)(layer1)\n\toutputs = layers.Dense(output_s)(layer2)\n\tmodel = keras.Model(inputs=inputs, outputs=outputs, name=\"house_pred\")\n\t\n\treturn model", "def generative_model(self):\n activation = tf.nn.relu\n if self.linear:\n activation = None\n\n # p(x | z, s)\n if self.batch is not None:\n h = tf.concat([self.z, self.batch], 1)\n else:\n h = self.z\n \n #h = dense(h, self.n_hidden,\n # activation=tf.nn.relu, bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n h = dense(h, self.n_hidden,\n activation=activation, bn=True, keep_prob=None, phase=self.training_phase)\n \n for layer in range(2, self.n_layers + 1):\n if self.batch is not None:\n h = tf.concat([h, self.batch], 1)\n h = dense(h, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n\n if self.batch is not None:\n h = tf.concat([h, self.batch], 1) \n \n #mean gamma\n self.px_scale = dense(h, self.n_input, activation=tf.nn.softmax, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n \n #dispersion\n if self.dispersion == \"gene-cell\":\n self.px_r = dense(h, self.n_input, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n elif self.dispersion == \"gene\":\n self.px_r = tf.Variable(tf.random_normal([self.n_input]), name=\"r\")\n else:\n if self.dispersion is False:\n self.px_r = tf.ones([self.n_input])\n else:\n if self.batch_ind is None:\n raise ValueError(\"batch dispersion with no batch info\")\n else:\n self.px_r = tf.Variable(tf.random_normal([self.num_batches, self.n_input]), name=\"r\")\n\n \n #mean poisson\n self.px_rate = self.px_scale \n if self.scalings:\n self.px_rate = self.px_scale * tf.exp(self.library)\n\n #dropout\n if self.zi:\n self.px_dropout = dense(h, self.n_input, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)", "def NN(train_df, val_df, test_df, sub_path):\n logging.info('Neural Network preprocessing')\n \n if train_df is not None: \n y_train = train_df['is_attributed'].values\n train_df = train_df.drop('is_attributed', axis = 1)\n train_df = train_df.drop('attributed_time', axis = 1) \n #train_df = train_df.drop('click_time', axis = 1) #only if no preprocessing\n gc.collect()\n if val_df is not None:\n y_val = val_df['is_attributed'].values \n val_df = val_df.drop(['is_attributed'], axis = 1)\n val_df = get_keras_data(val_df)\n \n list_variables = get_values(train_df)\n print(list_variables)\n \n logging.info('Model is creating...') \n \n max_var = []\n if test_df is not None:\n for i, var in enumerate(list_variables):\n max_var.append(np.max([train_df[var].max(), test_df[var].max()])+1) \n train_df = get_keras_data(train_df)\n else:\n for i, var in enumerate(list_variables):\n max_var.append(train_df[var].max()+1) \n train_df = get_keras_data(train_df)\n \n emb_n = 50\n dense_n = 1000\n \n in_var = []\n emb_var = [] \n for i, var in enumerate(list_variables):\n in_var.append(Input(shape=[1], name = var))\n emb_var.append(Embedding(max_var[i], emb_n)(in_var[i]))\n \n fe = concatenate([emb for emb in emb_var])\n s_dout = SpatialDropout1D(0.2)(fe)\n fl1 = Flatten()(s_dout)\n #conv = Conv1D(100, kernel_size=4, strides=1, padding='same')(s_dout)\n dl = Dense(100)(s_dout)\n fl2 = Flatten()(dl)\n concat = concatenate([(fl1), (fl2)])\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(concat))\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(x))\n outp = Dense(1,activation='sigmoid')(x)\n \n model = Model(inputs=[var for var in in_var], outputs=outp)\n \n logging.info('Model is compiling...')\n \n batch_size = 50000\n epochs = 2 #12 for sample_train\n exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1\n steps = int(len(list(train_df)[0]) / batch_size) * epochs\n lr_init, lr_fin = 0.002, 0.0002\n lr_decay = exp_decay(lr_init, lr_fin, steps)\n optimizer_adam = Adam(lr=lr_init, decay=lr_decay)\n \n model.compile(loss='binary_crossentropy',optimizer=optimizer_adam,metrics=['accuracy'])\n model.summary()\n \n logging.info('Model is training...')\n \n model.fit(train_df, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2, validation_split=0.1)\n del train_df, y_train; gc.collect()\n \n if val_df is not None:\n logging.info('Prediction on validation set')\n predictions_NN_prob = model.predict(val_df, batch_size=batch_size, verbose=2)\n del val_df; gc.collect()\n predictions_NN_prob = predictions_NN_prob[:,0]\n \n predictions_NN = np.where(predictions_NN_prob > 0.5, 1, 0)\n acc_NN = accuracy_score(y_val, predictions_NN)\n print('Overall accuracy of Neural Network model:', acc_NN)\n \n if test_df is not None:\n logging.info('Prediction on test set')\n sub = pd.DataFrame()\n sub['click_id'] = test_df['click_id'].astype('int')\n test_df = test_df.drop(['click_id'], axis=1)\n test_df = get_keras_data(test_df)\n \n sub['is_attributed'] = model.predict(test_df, batch_size=batch_size, verbose=2)\n del test_df; gc.collect()\n logging.info(\"Writing....\")\n with file_io.FileIO(sub_path, mode='wb') as fout:\n sub.to_csv(fout,index=False)\n logging.info(\"Done...\")\n logging.info(sub.info())" ]
[ "0.7084198", "0.6653931", "0.6629831", "0.6618596", "0.65436137", "0.6513071", "0.64708334", "0.646356", "0.6437267", "0.6412351", "0.6400347", "0.63606924", "0.6356501", "0.6342798", "0.6280546", "0.6239402", "0.6237478", "0.6236198", "0.6233234", "0.62314695", "0.6226379", "0.6224811", "0.6214258", "0.62055975", "0.6194236", "0.6192848", "0.6180378", "0.6179732", "0.6171608", "0.6148452", "0.6145013", "0.61448306", "0.61441374", "0.61379915", "0.61258847", "0.6124928", "0.6108657", "0.6099509", "0.609441", "0.6089835", "0.608666", "0.608323", "0.6082548", "0.6077147", "0.6070932", "0.6070819", "0.6061849", "0.6053621", "0.6053133", "0.6052472", "0.6047818", "0.60412174", "0.6039015", "0.6038149", "0.6037989", "0.6036494", "0.60337615", "0.60335046", "0.60334563", "0.60292476", "0.60232633", "0.6022434", "0.6021865", "0.60201097", "0.6011144", "0.6008292", "0.6004844", "0.5999755", "0.59932536", "0.5981341", "0.59781677", "0.5975917", "0.5967159", "0.59654146", "0.59594095", "0.5953865", "0.5950365", "0.59497905", "0.5948054", "0.5946584", "0.5943255", "0.59423864", "0.594163", "0.594038", "0.5939242", "0.59390384", "0.5934831", "0.59287137", "0.59283465", "0.59277105", "0.5927399", "0.59252894", "0.591956", "0.5918615", "0.5915251", "0.5910845", "0.59105855", "0.59067655", "0.5904406", "0.59040445", "0.5903812" ]
0.0
-1
Find a rotation matrix R such that F_inf.dot(R) ~= F_true
def compute_optimal_rotation(L, L_true, scale=True): from scipy.linalg import orthogonal_procrustes R = orthogonal_procrustes(L, L_true)[0] if scale: Lp = L.dot(R) s = (L_true*Lp).sum() / (Lp*Lp).sum() return R*s else: return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isRotationMatrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def _is_rotation_matrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6", "def rotation_inv(R: np.array) -> np.array:\n return R.T", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def get_fc_inv(fc):\n return scipy.linalg.pinvh(fc.T @ fc) @ fc.T", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def rotate(matrix):\n n, = np.shape(matrix)\n x = np.zeros(n, ) # Column vector of unknown\n\n \"\"\"\n Reduction of the matrix to\n a triangular form\n \"\"\"\n for i in range(0, n):\n for j in range(i + 1, n):\n a = matrix[i, i]\n b = matrix[j, i]\n c = a / m.sqrt(a * a + b * b)\n s = b / m.sqrt(a * a + b * b)\n for k in range(i, n + 1):\n t = matrix[i, k]\n matrix[i, k] = (c * matrix[i, k]) + (s * matrix[j, k])\n matrix[j, k] = (-s * t) + (c * matrix[j, k])\n\n \"\"\"\n Back stroke from the Gauss method\n \"\"\"\n for i in range(n - 1, -1, -1):\n summ = 0\n for j in range(i + 1, n):\n summ += matrix[i, j] * x[j]\n summ = matrix[i, n] - summ\n if matrix[i, i] == 0:\n return False\n x[i] = summ / matrix[i, i]\n\n i = 0\n while i < len(x):\n x[i] = int((x[i] * 10000) + 0.5) / 10000\n i += 1\n\n \"\"\"\n Vector of discrepancy (Ax - B)\n \"\"\"\n a, b = create_matrix_txt(form='normal')\n discrep = np.dot(a, x)\n discrep = discrep - b\n\n print(\"Method of rotation:\\n\")\n print(\"Vector discrepancy: \", discrep)\n print(\"Vector x: \", x, \"\\n\")\n\n return x", "def get_F_matrix(dR, dt, K1, K2):\n E = get_E_matrix(dR, dt)\n F = np.matmul(np.linalg.inv(K2).T, np.matmul(E,np.linalg.inv(K1)))\n\n return F", "def bloch_matrix(self):\n if self.gf_r is None:\n self.gf()\n\n return -self.gf_r.dot(self.lead[1])", "def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R", "def deformar_afin(self, F):\n x0 = self.nodos.x0\n x = np.matmul( x0, np.transpose(F))\n self.nodos.x[:] = x", "def get_rotation_matrix(f, t):\n v = np.cross(f, t)\n u = v / np.linalg.norm(v)\n c = np.dot(f, t)\n h = (1 - c) / (1 - (c ** 2))\n vx, vy, vz = v\n rotation_matrix = [\n [c + h*vx**2, h*vx*vy - vz, h*vx*vz + vy],\n [h*vx*vy+vz, c+h*vy**2, h*vy*vz-vx],\n [h*vx*vz - vy, h*vy*vz + vx, c+h*vz**2]\n ]\n return np.array(rotation_matrix)", "def test_d_2():\n rs = 10\n d = 2\n np.random.seed(rs)\n num = 3\n theta = np.random.uniform(0, 2 * math.pi)\n rotation = np.identity(d)\n\n rotation[0, 0] = math.cos(theta)\n rotation[0, 1] = - math.sin(theta)\n rotation[1, 0] = math.sin(theta)\n rotation[1, 1] = math.cos(theta)\n\n np.random.seed(rs)\n rotation_function = mt_obj.calculate_rotation_matrix(d, num)\n assert(np.all(rotation == rotation_function))", "def test_givens_matrix(a, b, left):\n\n grot_mat = _givens_matrix(a, b, left)\n assert np.isreal(grot_mat[0, 1]) and np.isreal(grot_mat[1, 1])\n\n rotated_vector = grot_mat @ np.array([a, b]).T\n result_element = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n rvec = np.array([0.0, result_element]).T if left else np.array([result_element, 0.0]).T\n assert np.allclose(rotated_vector, rvec)\n\n res1 = np.round(grot_mat @ grot_mat.conj().T, 5)\n res2 = np.round(grot_mat.conj().T @ grot_mat, 5)\n assert np.all(res1 == res2) and np.all(res1 == np.eye(2))", "def getOblateXRotMatrix(aStar1, aStar2):\n aStarDir = aStar2 - a1\n aStarmid = aStar1 + 0.5 * aStarDir\n kath = np.sqrt((aStarDir[0] * aStarDir[0] + aStarDir[1] * aStarDir[1]) / 4.0)\n phi = np.arctan( abs( (aStarDir[2]/2) / kath) )\n octantAStar2 = octant(aStar2)\n if octantAStar2 in [1, 2, 7, 8]: #\n phi = -phi\n print \"phi =\" , np.rad2deg(phi)\n RotX = np.matrix( [ [ 1.0, 0.0 , 0.0 ],\n [ 0.0, np.cos(phi), np.sin(phi)],\n [ 0.0, -np.sin(phi), np.cos(phi)]\n ])\n return np.asarray( RotX )", "def forcing_full(self):\n if not self._fr or not self._frstar:\n raise ValueError('Need to compute Fr, Fr* first.')\n f1 = self._k_ku * Matrix(self.u) + self._f_k\n return -Matrix([f1, self._f_d, self._f_dnh])", "def rot_inv(self):\n if not hasattr(self, '_rot_inv'):\n self._rot_inv=np.linalg.inv(self.rot)\n return self._rot_inv", "def testCalculateRotationDiff(self):\n # Test identity\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertEqual(result, 0.0)\n # Test arbitrary rotation\n rot1 = numpy.array(\n [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n rot2 = numpy.array(\n [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])\n transform1[0:3, 0:3] = numpy.matmul(transform1[0:3, 0:3], rot1)\n transform2[0:3, 0:3] = numpy.matmul(transform2[0:3, 0:3], rot2)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Order shouldn't matter\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Test when the angle is pi\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n transform2[0, 0] = -1.0\n transform2[1, 1] = -1.0\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n # It might wrap to -pi, so check the absolute value\n self.assertAlmostEqual(abs(result), numpy.pi, 8)\n # Test an extreme value\n transform2 = -1.0 * numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(abs(result), numpy.pi)", "def trf_rotation(_V, _ang, _P):\r\n normFact = 1./sqrt(_V[0]*_V[0] + _V[1]*_V[1] + _V[2]*_V[2]);\r\n axVect = [normFact*_V[0], normFact*_V[1], normFact*_V[2]]\r\n VxVx = axVect[0]*axVect[0]\r\n VyVy = axVect[1]*axVect[1]\r\n VzVz = axVect[2]*axVect[2]\r\n cosAng = cos(_ang)\r\n sinAng = sin(_ang)\r\n one_m_cos = 1. - cosAng\r\n one_m_cosVxVy = one_m_cos*axVect[0]*axVect[1]\r\n one_m_cosVxVz = one_m_cos*axVect[0]*axVect[2]\r\n one_m_cosVyVz = one_m_cos*axVect[1]*axVect[2]\r\n sinVx = sinAng*axVect[0]\r\n sinVy = sinAng*axVect[1]\r\n sinVz = sinAng*axVect[2]\r\n st0 = [VxVx + cosAng*(VyVy + VzVz), one_m_cosVxVy - sinVz, one_m_cosVxVz + sinVy]\r\n st1 = [one_m_cosVxVy + sinVz, VyVy + cosAng*(VxVx + VzVz), one_m_cosVyVz - sinVx]\r\n st2 = [one_m_cosVxVz - sinVy, one_m_cosVyVz + sinVx, VzVz + cosAng*(VxVx + VyVy)]\r\n M = [st0, st1, st2]\r\n st00 = [1. - st0[0], -st0[1], -st0[2]]\r\n st01 = [-st1[0], 1. - st1[1], -st1[2]]\r\n st02 = [-st2[0], -st2[0], 1. - st2[2]]\r\n M0 = [st00, st01, st02]\r\n V = matr_prod(M0, _P)\r\n return [M, V]", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def test_from_two_vectors(self):\r\n for _ in range(20):\r\n v0 = np.random.randn(3)\r\n v1 = np.random.randn(3)\r\n v0 /= np.linalg.norm(v0)\r\n v1 /= np.linalg.norm(v1)\r\n\r\n q = from_two_vectors(v0, v1)\r\n R = to_rotation(q)\r\n\r\n zero_vec = R @ v0 - v1\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n q_inv = from_two_vectors(v1, v0)\r\n R_inv = to_rotation(q_inv)\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def forcing(self):\n if not self._fr or not self._frstar:\n raise ValueError('Need to compute Fr, Fr* first.')\n return -Matrix([self._f_d, self._f_dnh])", "def compute_rot(v):\n if v[0] >= 0:\n M = nd.eye(len(v))\n else:\n M = - nd.eye(len(v))\n for i in range(1, len(v)):\n if v[i] == 0:\n continue\n rot_minus_theta = nd.eye(len(v))\n temp = nd.dot(M, v)\n\n theta = nd.arctan(temp[i]/temp[0])\n c = nd.cos(theta)\n s = nd.sin(theta)\n\n rot_minus_theta[0,0] = c\n rot_minus_theta[i,i] = c\n rot_minus_theta[0,i] = s\n rot_minus_theta[i,0] = -s\n\n M = nd.dot(rot_minus_theta, M)\n return M", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def regular(P):\n try:\n dim = P.shape[0]\n q = (P - np.eye(dim))\n ones = np.ones(dim)\n q = np.c_[q, ones]\n QTQ = np.dot(q, q.T)\n bQT = np.ones(dim)\n answer = np.linalg.solve(QTQ, bQT)\n if np.all(answer > 0):\n return answer\n else:\n return None\n except Exception as e:\n return None", "def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # QRFactorization\n Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')\n\n if np.linalg.norm(R[-1, :], np.inf) < tol:\n warn('Singular Jacobian matrix. Using SVD decomposition to ' +\n 'perform the factorizations.')\n return svd_factorization_projections(A, m, n,\n orth_tol,\n max_refin,\n tol)\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v = np.zeros(m)\n v[P] = aux2\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(z)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v[P] = aux2\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n z = np.zeros(m)\n z[P] = aux2\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = Q inv(R.T) P.T x\n aux1 = x[P]\n aux2 = scipy.linalg.solve_triangular(R, aux1,\n lower=False,\n trans='T')\n z = Q.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def lstsq_transforms_3D(I, F):\n # Make sure there are the same number of point coordinates to work on\n assert len(I) == len(F)\n\n # Do not assume you were given matrix objects (could be nupy arrays)\n I = np.asmatrix(I)\n F = np.asmatrix(F)\n\n # Determine the centroid for the intial and final positions\n centroid_I = np.mean(I, axis=0)\n centroid_F = np.mean(F, axis=0)\n\n # Center both sets of points\n cI = I - centroid_I\n cF = F - centroid_F\n\n # Caluculate the covariance matrix and use Singular Value Decomposition\n # (SVD) to get the U, S, and V matrices\n H = cI.T * cF\n U, S, V = np.linalg.svd(H)\n\n # Compute the rotation matrix (R) between the two sets of points\n R = V.T * U.T\n\n # This should not happen in real life but here for testing with random\n # input values\n #\n # Detect the special reflection case with det(R) = -1\n if np.linalg.det(R) < 0:\n print(\"Reflection detected\")\n V[2, :] *= -1\n R = V.T * U.T\n\n # Compute the translation vector\n t = -R * centroid_I.T + centroid_F.T\n\n return R, t", "def mirror(f, j=0):\n return f.per(dmp_mirror_in(f.rep, j, f.lev, f.dom))", "def generate_boolean_vector(f,q,r,DIMS):\n b = None\n for i in range(DIMS):\n if b is None:\n b = (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n else :\n b = b & (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n return b", "def JN_rad(self, F):\n P = bb_source.h * F / (math.exp(bb_source.h * F / (bb_source.k * self.T))-1)\n return P", "def test_rotation_isometry(self):\n import numpy\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n # 1/sqrt(2)\n s2_ref = 0.707106781186547524400844362104785\n\n o = s.make_origin(2)\n p = s.make_point((1, 0), magic)\n q = s.make_point((s2_ref, s2_ref), magic)\n\n rot = space_point_transform(\n numpy.array([[1,0,0],[0,s2_ref,-s2_ref],[0,s2_ref,s2_ref]]),\n curvature=k,\n math = common_math\n )\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (\n s.make_point((5/13, 12/13), magic),\n s.make_point((-3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n # 1/8 turn, times 8\n check_transform_eq(rot*8, i)\n\n # rotate, shift, rotate\n check_transform_eq(g, rot + f + rot * -1)\n\n # the other way\n check_transform_eq(f, rot * -1 + g + rot)", "def reciprocal_mat(l_g_go):\n InMat = np.copy(l_g_go)\n InMat = InMat.astype(float)\n\n L3 = np.cross(InMat[:, 0], InMat[:, 1]) / np.linalg.det(InMat)\n L1 = np.cross(InMat[:, 1], InMat[:, 2]) / np.linalg.det(InMat)\n L2 = np.cross(InMat[:, 2], InMat[:, 0]) / np.linalg.det(InMat)\n rl_g_go = np.vstack((L1, L2, L3)).T\n # rl_g_go = Matrix(rl_g_go)\n\n\n return rl_g_go", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n\n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n #S_inv = np.linalg.inv(S)\n #old_coords = np.array([[2, 2, 1], [6, 6, 1]]).T\n #new_coords = np.matmul(S, old_coords)\n #recovered_coords = np.matmul(S_inv, new_coords)\n #print('new coords: ', new_coords)\n #print('recovered coords: ', recovered_coords)\n return S", "def R(op, phi = None):\n if phi == None:\n return None\n else:\n return scipy.linalg.expm(-1j * phi / 2 * op)", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def get_rotation_vector(R):\n v = np.array([R[1,2] - R[2,1],\n R[2,0] - R[0,1],\n R[0,1] - R[1,0]]) # eq. 3.12 in [1], pp.66\n return v", "def regular(P):\n try:\n cols = P.shape[0]\n ans = np.ones((1, cols))\n # eq = np.matmul(ans, P)\n # s = np.array(np.arange(1, cols + 1))\n eq = np.vstack([P.T - np.identity(cols), ans])\n # va, vec = np.linalg .eig(P)\n results = np.zeros((cols, 1))\n results = np.vstack([results, np.array([1])])\n statetionary = np.linalg.solve(eq.T.dot(eq), eq.T.dot(results)).T\n # print(statetionary)\n # print(np.argwhere(statetionary < 0))\n if len(np.argwhere(statetionary < 0)) > 0:\n return None\n return statetionary\n except Exception as e:\n return None", "def matSys(A_r, Tx, x):\n B = np.linalg.pinv(A_r).T\n Xi = np.diagonal(B.T @ Tx @ B)\n return Xi / np.tensordot(A_r, x, axes= [0,0])\n # Original\n # B = tf.transpose(tf.linalg.pinv(A_r))\n # xi = tf.linalg.diag_part(tf.linalg.matmul(tf.linalg.matmul(tf.transpose(B), Tx), B))\n # Xi = tf.divide(xi, tf.tensordot(tf.transpose(A_r), x, axes= [[1],[0]]))\n # return Xi\n\n # TODO: debug this method\n # With Khatri-Rao and without pinv\n # _, r = tf.shape(A_r)\n # A = tf.reshape(tf.expand_dims(A_r,1) * tf.expand_dims(A_r,0), [-1, r])\n # Xi = tf.linalg.lstsq(A, tf.reshape(Tx,[-1,1]))\n # return Xi/tf.tensordot(A_r, x, axes = [0,0])\n\n # # With Khatri-Rao\n # _, r = tf.shape(A_r)\n # A = tf.reshape(tf.expand_dims(A_r,1) * tf.expand_dims(A_r,0), [-1, r])\n # Xi = tf.matmul(tf.linalg.pinv(A), tf.reshape(Tx,[-1,1]))\n # return Xi/tf.tensordot(A_r, x, axes = [0,0])", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def solve_rotation_ap(u, v):\n # TODO: Assert vectors are non-zero and non-parallel aka exterior\n # product is non-zero\n N = u.size # the number of dimensions\n uv = np.stack([u, v], axis=1) # the plane of rotation\n M = np.identity(N) # stores the rotations for rorienting reference frame\n # ensure u has positive basis0 component\n if uv[0, 0] < 0:\n M[0, 0] = -1\n M[1, 1] = -1\n uv = M.dot(uv)\n # align uv plane with the basis01 plane and u with basis0.\n for c in range(2):\n for r in range(N - 1, c, -1):\n if uv[r, c] != 0: # skip rotations when theta will be zero\n theta = np.arctan2(uv[r, c], uv[r - 1, c])\n Mk = givens_rotation_matrix(r, r - 1, theta, N)\n uv = Mk.dot(uv)\n M = Mk.dot(M)\n # rotate u onto v\n theta = 2 * np.arctan2(uv[1, 1], uv[0, 1])\n logger.debug(\n \"solve_rotation_ap: {d} degree rotation\".format(\n d=180 * theta / np.pi))\n R = givens_rotation_matrix(0, 1, theta, N)\n # perform M rotations in reverse order\n M_inverse = M.T\n R = M_inverse.dot(R.dot(M))\n return R", "def rotation_matrix_to_euler(R):\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n \n singular = sy < 1e-6\n \n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n \n return np.array([x, y, z])", "def test_revolute(self):\n # Rotate around the z axis\n r = Joint.revolute(np.array([0, 0, 1]))\n t_mat = r(np.array([np.pi / 2]))\n rot_vec = np.dot(t_mat, np.array([1, 0, 0, 1]))[:3]\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))", "def base_projection_matrix(self, fiber):\n return matrix(ZZ, fiber.vertices()).right_kernel_matrix()", "def _r_inv(self):\n raise NotImplementedError", "def deformar_afin_frontera(self, F):\n xf0 = self.nodos.get_coors0_fr()\n xf = np.matmul( xf0, np.transpose(F) )\n self.nodos.set_coors_fr(xf)", "def test_rotation(self):\n quat_rotated = rowan.rotate(input1, vector_inputs)\n\n matrices = rowan.to_matrix(input1)\n matrix_rotated = np.einsum(\"ijk,ki->ij\", matrices, vector_inputs.T)\n self.assertTrue(np.allclose(matrix_rotated, quat_rotated))", "def op_mirror():\n mir = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, -1]])\n return mir", "def getRotationMatrix(gravity, geomagnetic):\n Ax = gravity[0]\n Ay = gravity[1]\n Az = gravity[2]\n normsqA = (Ax * Ax + Ay * Ay + Az * Az)\n g = 9.81\n freeFallGravitySquared = 0.01 * g * g\n if normsqA < freeFallGravitySquared:\n # gravity less than 10% of normal value\n return None\n\n Ex = geomagnetic[0]\n Ey = geomagnetic[1]\n Ez = geomagnetic[2]\n Hx = Ey * Az - Ez * Ay\n Hy = Ez * Ax - Ex * Az\n Hz = Ex * Ay - Ey * Ax\n normH = np.sqrt(Hx * Hx + Hy * Hy + Hz * Hz)\n if normH < 0.1:\n # device is close to free fall (or in space?), or close to\n # magnetic north pole. Typical values are > 100.\n return None\n\n invH = 1.0 / normH\n Hx *= invH\n Hy *= invH\n Hz *= invH\n invA = 1.0 / np.sqrt(Ax * Ax + Ay * Ay + Az * Az)\n Ax *= invA\n Ay *= invA\n Az *= invA\n Mx = Ay * Hz - Az * Hy\n My = Az * Hx - Ax * Hz\n Mz = Ax * Hy - Ay * Hx\n\n R = np.matrix([[Hx, Hy, Hz],\n [Mx, My, Mz],\n [Ax, Ay, Az]])\n\n # compute the inclination matrix by projecting the geomagnetic\n # vector onto the Z (gravity) and X (horizontal component\n # of geomagnetic vector) axes.\n #invE = 1.0 / np.sqrt(Ex * Ex + Ey * Ey + Ez * Ez);\n #c = (Ex * Mx + Ey * My + Ez * Mz) * invE;\n #s = (Ex * Ax + Ey * Ay + Ez * Az) * invE;\n # I = np.matrix([[ 1.0, 0.0, 0.0],\n # [ 0.0, c, s ],\n # [0.0, -s, -c]])\n\n return R", "def get_w_matrix(fc_inv, s):\n return fc_inv @ s", "def rotation_to_transformation_matrix(R):\n R = Matrix(R)\n T = R.col_insert(3, Matrix([0., 0., 0.]))\n T = T.row_insert(3, Matrix([[0., 0., 0., 1.]]))\n return T", "def solve_rigid_transform(X, Y, debug=True):\n assert X.shape[0] == Y.shape[0] >= 3\n assert X.shape[1] == Y.shape[1] == 3\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n # Look for Inge Soderkvist's solution online if confused.\n meanA = np.mean(A, axis=1, keepdims=True)\n meanB = np.mean(B, axis=1, keepdims=True)\n A = A - meanA\n B = B - meanB\n covariance = B.dot(A.T)\n U, sigma, VH = np.linalg.svd(covariance) # VH = V.T, i.e. numpy transposes it for us.\n\n V = VH.T\n D = np.eye(3)\n D[2,2] = np.linalg.det( U.dot(V.T) )\n R = U.dot(D).dot(V.T)\n t = meanB - R.dot(meanA)\n RB_matrix = np.concatenate((R, t), axis=1)\n\n #################\n # SANITY CHECKS #\n #################\n\n print(\"\\nBegin debug prints for rigid transformation from A to B:\")\n print(\"meanA:\\n{}\\nmeanB:\\n{}\".format(meanA, meanB))\n print(\"Rotation R:\\n{}\\nand R^TR (should be identity):\\n{}\".format(R, (R.T).dot(R)))\n print(\"translation t:\\n{}\".format(t))\n print(\"RB_matrix:\\n{}\".format(RB_matrix))\n\n # Get residual to inspect quality of solution. Use homogeneous coordinates for A.\n # Also, recall that we're dealing with (3,N) matrices, not (N,3).\n # In addition, we don't want to zero-mean for real applications.\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n ones_vec = np.ones((1, A.shape[1]))\n A_h = np.concatenate((A, ones_vec), axis=0)\n B_pred = RB_matrix.dot(A_h)\n assert B_pred.shape == B.shape\n\n # Careful! Use raw_errors for the RF, but it will depend on pred-targ or targ-pred.\n raw_errors = B_pred - B # Use pred-targ, of shape (3,N)\n l2_per_example = np.sum((B-B_pred)*(B-B_pred), axis=0)\n frobenius_loss = np.mean(l2_per_example)\n\n if debug:\n print(\"\\nInput, A.T:\\n{}\".format(A.T))\n print(\"Target, B.T:\\n{}\".format(B.T))\n print(\"Predicted points:\\n{}\".format(B_pred.T))\n print(\"Raw errors, B-B_pred:\\n{}\".format((B-B_pred).T))\n print(\"Mean abs error per dim: {}\".format( (np.mean(np.abs(B-B_pred), axis=1))) )\n print(\"Residual (L2) for each:\\n{}\".format(l2_per_example.T))\n print(\"loss on data: {}\".format(frobenius_loss))\n print(\"End of debug prints for rigid transformation.\\n\")\n\n assert RB_matrix.shape == (3,4)\n return RB_matrix", "def fR(a, b, c, N, S, I, R, vital=False, vaccine=False, combined=False):\n\n if vital:\n temp = b*I - c*R - d*R\n elif vaccine:\n R = N - S - I\n temp = b*I - c*R + f*S\n elif combined:\n temp = b*I - c*R - d*R + f*S\n else:\n temp = 0\n\n return temp", "def modulus_fidelity(u: Matrix, v: Matrix) -> float:\n u_dag = np.transpose(np.conjugate(u))\n f = np.trace(np.dot(abs(u_dag), abs(v)))/u.shape[0]\n if isinstance(f, complex):\n return f.real\n else:\n return f", "def test_matrix_orientation():\n\n # the \"step\" kind generate heavyside-like signals for each voxel.\n # all signals being identical, standardizing along the wrong axis\n # would leave a null signal. Along the correct axis, the step remains.\n fmri, mask = generate_fake_fmri(shape=(40, 41, 42), kind=\"step\")\n masker = NiftiMasker(mask=mask, standardize=True, detrend=True)\n masker.fit()\n timeseries = masker.transform(fmri)\n assert(timeseries.shape[0] == fmri.shape[3])\n assert(timeseries.shape[1] == mask.get_data().sum())\n std = timeseries.std(axis=0)\n assert(std.shape[0] == timeseries.shape[1]) # paranoid\n assert(not np.any(std < 0.1))\n\n # Test inverse transform\n masker = NiftiMasker(mask=mask, standardize=False, detrend=False)\n masker.fit()\n timeseries = masker.transform(fmri)\n recovered = masker.inverse_transform(timeseries)\n np.testing.assert_array_almost_equal(recovered.get_data(), fmri.get_data())", "def test_givens_rotate(shape, indices, row, left):\n matrix = np.random.rand(*shape) * 1j + np.random.rand(*shape)\n unitary, (i, j) = matrix.copy(), indices\n if row:\n a, b = matrix[indices, j - 1]\n grot_mat = _givens_matrix(a, b, left)\n unitary[indices] = grot_mat @ unitary[indices]\n res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n if left:\n assert np.isclose(unitary[i, j - 1], 0.0) and np.isclose(unitary[j, j - 1], res)\n else:\n assert np.isclose(unitary[i, j - 1], res) and np.isclose(unitary[j, j - 1], 0.0)\n else:\n a, b = matrix[j - 1, indices].T\n grot_mat = _givens_matrix(a, b, left)\n unitary[:, indices] = unitary[:, indices] @ grot_mat.T\n res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n if left:\n assert np.isclose(unitary[j - 1, i], 0.0) and np.isclose(unitary[j - 1, j], res)\n else:\n assert np.isclose(unitary[j - 1, indices[0]], res) and np.isclose(\n unitary[j - 1, indices[1]], 0.0\n )", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def Rotation_GAL_EQJ():\n # This rotation matrix was calculated by the following script\n # in this same source code repository:\n # demo/python/galeqj_matrix.py\n return RotationMatrix([\n [-0.0548624779711344, -0.8734572784246782, -0.4838000529948520],\n [+0.4941095946388765, -0.4447938112296831, +0.7470034631630423],\n [-0.8676668813529025, -0.1980677870294097, +0.4559861124470794]\n ])", "def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)", "def test_antisymmetric_projection_3_3_true():\n res = antisymmetric_projection(3, 3, True).todense()\n np.testing.assert_equal(np.isclose(res[5].item(), -0.40824829), True)", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def Zstar(self):\n RV = self.Ystar().copy()\n for term_i in range(self.n_terms):\n if self.identity_trick and self.A_identity[term_i]:\n RV-=np.dot(self.Fstar()[term_i],self.B_hat()[term_i])\n else:\n RV-=np.dot(self.Fstar()[term_i],np.dot(self.B_hat()[term_i],self.Astar()[term_i]))\n self.clear_cache('DLZ')\n return RV", "def test_antisymmetric_projection_2():\n res = antisymmetric_projection(2).todense()\n expected_res = np.array([[0, 0, 0, 0], [0, 0.5, -0.5, 0], [0, -0.5, 0.5, 0], [0, 0, 0, 0]])\n\n bool_mat = np.isclose(res, expected_res)\n np.testing.assert_equal(np.all(bool_mat), True)", "def matI(a):\n shape=matShape(a)\n if shape[0]!=shape[1]: raise ValueError\n n=shape[0]\n ret=matZeros((n,n*2))\n for i in range(n):\n for j in range(n):\n matSet(ret,i,j,matGet(a,i,j))\n for i in range(n):\n matSet(ret,i,i+n,1)\n for row in range(n):\n rm=row\n ap=abs(matGet(ret,rm,row))\n for rint in range(row+1,n):\n p=abs(matGet(ret,rint,row))\n if ap<p:\n ap=p\n rm=rint\n if 0.000000001 > ap:\n return matCopy(a) # Not invertible\n di=matGet(ret,rm,row)\n if rm!=row:\n for i in range(n*2):\n t=matGet(ret,rm,i)\n matSet(ret,rm,i,matGet(ret,row,i))\n matSet(ret,row,i,t)\n idi=1.0/di\n for rint in range(row+1,n):\n f=idi*matGet(ret,rint,row)\n if f!=0:\n for co in range(row,n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-f*matGet(ret,row,co))\n row=n-1\n while row>=0:\n ic=1.0/matGet(ret,row,row)\n for rint in range(row):\n icx=ic*matGet(ret,rint,row)\n if icx!=0:\n for co in range(row, n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-icx*matGet(ret,row,co))\n matSet(ret,row,row,ic*matGet(ret,row,row))\n for co in range(n,n*2):\n matSet(ret,row,co,ic*matGet(ret,row,co))\n row-=1\n return matPart(ret,0,n,n,n*2)", "def Rotation_EQJ_GAL():\n # This rotation matrix was calculated by the following script\n # in this same source code repository:\n # demo/python/galeqj_matrix.py\n return RotationMatrix([\n [-0.0548624779711344, +0.4941095946388765, -0.8676668813529025],\n [-0.8734572784246782, -0.4447938112296831, -0.1980677870294097],\n [-0.4838000529948520, +0.7470034631630423, +0.4559861124470794]\n ])", "def test_reciprocal_operations(ph_zr3n4: Phonopy):\n ptg = ph_zr3n4.symmetry.pointgroup_operations\n rops = ph_zr3n4.symmetry.reciprocal_operations\n matches = []\n for r in ptg:\n for i, rec_r in enumerate(rops):\n if (r == rec_r).all():\n matches.append(i)\n break\n assert len(np.unique(matches)) == len(ptg)\n found_inv = False\n for rec_r in rops:\n if (rec_r == -np.eye(3, dtype=int)).all():\n found_inv = True\n break\n assert found_inv", "def trans_matrix_inv(m:numpy.ndarray):\n was2d = False\n if m.shape[1] == 3:\n was2d = True\n m = numpy.asarray([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, m[0,0], m[0,1], m[0,2]],\n [0.0, m[1,0], m[1,1], m[1,2]],\n [0.0, 0.0, 0.0, 1.0]], numpy.float64)\n trans = m[0:3,3]\n rotate = numpy.zeros(3, numpy.float64)\n r = m[0:3,0:3]\n rc = numpy.linalg.cholesky(numpy.matmul(r.T, r)).T\n scale = numpy.diagonal(rc)\n if numpy.linalg.det(r) < 0.0:\n scale[0] *= -1.0\n rcd = rc * numpy.eye(3, dtype=numpy.float64)\n rc = numpy.linalg.solve(rcd, rc)\n shear = numpy.asarray([rc[0,1], rc[0,2], rc[1,2]], numpy.float64)\n r0 = trans_matrix({'rotate': rotate, 'scale': scale, 'shear': shear})[0:3,0:3]\n r0 = numpy.linalg.solve(numpy.linalg.inv(r), numpy.linalg.inv(r0))\n rotate[1] = numpy.arcsin(_frone(r0[0,2]))\n if numpy.abs((numpy.abs(rotate[1]) - (numpy.pi / 2.0))) < 1.0e-6:\n rotate[0] = 0.0\n rotate[2] = numpy.arctan2(-_frone(r0[1,0]), _frone(-r0[2,0] / r0[0,2]))\n else:\n rc = numpy.cos(rotate[1])\n rotate[0] = numpy.arctan2(_frone(r0[1,2] / rc), _frone(r0[2,2] / rc))\n rotate[2] = numpy.arctan2(_frone(r0[0,1] / rc), _frone(r0[0,0] / rc))\n if was2d:\n trans = trans[1:]\n rotate = rotate[0:1]\n scale = scale[1:]\n shear = shear[2:3]\n return (trans, rotate, scale, shear)", "def f_nor(*args):\n f = Nor(*args).factor()\n return f if f in B else f.factor()", "def pseudoInversa(J):\n\tJinv = np.linalg.pinv(J)\n\treturn Jinv", "def test_flip_vectors(self):\r\n m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])\r\n jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])\r\n new_matrix = _flip_vectors(jn_matrix, m_matrix)\r\n assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))", "def est_rot_mat(pos, ref_pos, weights=None, allow_reflection=False):\n assert pos.shape == ref_pos.shape\n\n if weights is not None:\n centroid = \\\n np.sum(np.expand_dims(weights, 0) * pos, keepdims=True, axis=-1)\n centroid /= np.sum(weights) + 1e-9\n centroid_ref = np.sum(\n np.expand_dims(weights, 0) * ref_pos, keepdims=True, axis=-1\n )\n centroid_ref /= np.sum(weights) + 1e-9\n h = (pos - centroid) @ np.diag(weights) @ (ref_pos - centroid_ref).T\n else:\n centroid = np.mean(pos, keepdims=True, axis=-1)\n centroid_ref = np.mean(ref_pos, keepdims=True, axis=-1)\n h = (pos - centroid) @ (ref_pos - centroid_ref).T\n u, _, vh = np.linalg.svd(h)\n rot_mat = (u @ vh).T\n # Compensate reflections if they are not allowed.\n if np.linalg.det(rot_mat) < 0 and not allow_reflection:\n vh[:, -1] *= -1\n rot_mat = (u @ vh).T\n return rot_mat", "def RNull(R):\n return -S*R*(r1*(K1**B1/(K1**B1 + (A/R)**B1)) - r2*(K2**B2/(K2**B2 + (A)**B2)) \\\n *(R*M)/(K3 + R*M))/((-gwt*A + R*gc)*(R+1)) + S/(R+1)", "def solve_R(R, b):\n n = b.size\n assert R.shape == (n,n)\n x = zeros(n, dtype=R.dtype)\n for i in range(n-1,-1,-1):\n x[i] = (b[i] - dot(x[i+1:], R[i,i+1:])) / R[i,i]\n if not numpy.isfinite(x[i]):\n x[i] = 0.0\n return x", "def erfcinv(a):", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot", "def test_flip_vectors(self):\n m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])\n jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])\n new_matrix = _flip_vectors(jn_matrix, m_matrix)\n assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))", "def test_tensor_can_be_canonicalized(free_alg):\n\n dr = free_alg\n p = dr.names\n i, j = p.R_dumms[:2]\n r = p.R\n m = p.m\n h = p.h\n v = p.v\n\n # Anti-symmetric real matrix.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res == 0\n\n # With wrapping under an even function.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] ** 2 * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] ** 2 * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == 2 * m[i, j] ** 2\n assert term.vecs == (v[i], v[j])\n\n # With wrapping under an odd function.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] ** 3 * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] ** 3 * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res.n_terms == 0\n\n # Hermitian matrix.\n tensor = dr.einst(\n h[i, j] * v[i] * v[j] + conjugate(h[j, i]) * v[i] * v[j]\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res == 0", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def lorentzian(f, A, fc):\n return A/(1+(2*np.pi*f/fc)**2)", "def inv(X):\n R, t = Rt(X)\n Ri = R.T\n return np.concatenate((\n np.concatenate((Ri, -Ri.dot(t)[:,np.newaxis]), axis=1),\n np.array([[0, 0, 1]])))", "def get_scan_rotation_matrix(mount_angle, base_angle):\n alpha = 0 # rotation about y (roll)\n # negate mount_angle\n beta = np.deg2rad(-mount_angle) # rotation about x (pitch, mount_angle)\n gamma = np.deg2rad(base_angle) # rotation about z (yaw, base_angle)\n return tf.euler_matrix(alpha, beta, gamma, 'sxyz')", "def inverse(self):\n return Rotation(self.matrix.transposed())", "def expmap2rotmat(r):\n theta = np.linalg.norm( r )\n r0 = np.divide( r, max(theta, np.finfo(np.float32).eps) )\n r0x = np.array([0, -r0[2], r0[1], 0, 0, -r0[0], 0, 0, 0]).reshape(3,3)\n r0x = r0x - r0x.T\n R = np.eye(3,3) + np.sin(theta)*r0x + (1-np.cos(theta))*(r0x).dot(r0x)\n return R", "def _r_inv(self):\n # [output_dim, output_dim]\n return tf.linalg.cholesky_solve(\n self._chol_obs_covariance,\n tf.eye(self.emission.output_dim, dtype=self._chol_obs_covariance.dtype),\n )", "def opspace_matrices(mass_matrix, J_full, J_pos, J_ori):\n mass_matrix_inv = np.linalg.inv(mass_matrix)\n\n # J M^-1 J^T\n lambda_full_inv = np.dot(\n np.dot(J_full, mass_matrix_inv),\n J_full.transpose())\n\n # Jx M^-1 Jx^T\n lambda_pos_inv = np.dot(\n np.dot(J_pos, mass_matrix_inv),\n J_pos.transpose())\n\n # Jr M^-1 Jr^T\n lambda_ori_inv = np.dot(\n np.dot(J_ori, mass_matrix_inv),\n J_ori.transpose())\n\n # take the inverses, but zero out small singular values for stability\n lambda_full = np.linalg.pinv(lambda_full_inv)\n lambda_pos = np.linalg.pinv(lambda_pos_inv)\n lambda_ori = np.linalg.pinv(lambda_ori_inv)\n\n # nullspace\n Jbar = np.dot(mass_matrix_inv, J_full.transpose()).dot(lambda_full)\n nullspace_matrix = np.eye(J_full.shape[-1], J_full.shape[-1]) - np.dot(Jbar, J_full)\n\n return lambda_full, lambda_pos, lambda_ori, nullspace_matrix", "def find_plane_angles(self, roof_motor_position):\n\n # Calcolo il punto mediano tra i vertici 2 e 3\n pc_x = (self.roof_vertex_x[1] + self.roof_vertex_x[2]) / 2\n pc_y = (self.roof_vertex_y[1] + self.roof_vertex_y[2]) / 2\n pc_z = (self.roof_vertex_z[1] + self.roof_vertex_z[2]) / 2\n\n # Questa non so cosa sia\n base_r = [[self.roof_vertex_x[0] - pc_x, self.roof_vertex_y[0] - pc_y, self.roof_vertex_z[0] - pc_z],\n [self.roof_vertex_x[1] - pc_x, self.roof_vertex_y[1] - pc_y, self.roof_vertex_z[1] - pc_z],\n [0.0, 0.0, 0.0]]\n\n # Questa e' la costruzione di una matrice\n mat_rot = [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]]\n\n # Non so quale operazione è implementata, ma a me servono solo tre elementi, j=2, i=0,1, j=1, i=0\n # Primo elemento, j=1, i=0\n mr = math.sqrt((base_r[0][0] ** 2) + (base_r[0][1] ** 2) + (base_r[0][2] ** 2))\n mat_rot[1][0] = base_r[0][1] / mr\n # Secondo elemento, j=2, i=0\n mat_rot[2][0] = base_r[0][2] / mr\n # Terzo elemento, j=2, i=1\n mr = math.sqrt((base_r[1][0] ** 2) + (base_r[1][1] ** 2) + (base_r[1][2] ** 2))\n mat_rot[2][1] = base_r[1][2] / mr\n\n # In alternativa posso calcolare tutti gli elementi della matrice\n # for i in range(2):\n # mr = math.sqrt((base_r[i][0] ** 2) + (base_r[i][1] ** 2) + (base_r[i][2] ** 2))\n # for j in range(3):\n # base_r[i][j] /= mr\n # mat_rot[j][i] = base_r[i][j]\n\n # Sono elementi della matrice non utilizzati\n # base_r[2][0] = +base_r[1][1] * base_r[0][2] - base_r[0][1] * base_r[1][2]\n # base_r[2][1] = -base_r[1][0] * base_r[0][2] + base_r[0][0] * base_r[1][2]\n # base_r[2][2] = +base_r[1][0] * base_r[0][1] - base_r[0][0] * base_r[1][1]\n # for i in range(3):\n # mat_rot[i][2] = base_r[2][i]\n\n # Qui estraggo la terna di Tait-Bryan angles usata internamente, la Z1Y2X3\n k17 = mat_rot[2][0]\n k16 = mat_rot[1][0]\n l17 = mat_rot[2][1]\n m20 = math.asin(k17)\n i23 = math.cos(m20)\n i24 = k16 / i23\n i25 = l17 / i23\n m19 = math.asin(i24)\n self.zyx1_r = m19 + roof_motor_position\n self.zyx2_r = math.asin(k17)\n self.zyx3_r = math.asin(i25)\n self.zyx3 = self.zyx3_r / Kinematic.M_TO_RAD\n self.zyx2 = self.zyx2_r / Kinematic.M_TO_RAD\n self.zyx1 = self.zyx1_r / Kinematic.M_TO_RAD\n angles = self.zyx_r_to_xyz(self.zyx3_r, self.zyx2_r, self.zyx1_r)\n self.xyz1 = angles[2]\n self.xyz2 = angles[0]\n self.xyz3 = angles[1]\n self.xyz1_r = angles[5]\n self.xyz2_r = angles[3]\n self.xyz3_r = angles[4]", "def compute_T_matrix(coordinates, p, reference=[[1,0,0],[0,1,0],[0,0,1]], origin=[0,0,0]):\n e_b_x = coordinates[0]\n e_b_y = coordinates[1]\n e_b_z = coordinates[2]\n \n e_a_x = reference[0] \n e_a_y = reference[1]\n e_a_z = reference[2]\n \n # Compute the rotation matrix\n x_b_a = [np.dot(e_b_x, e_a_x), np.dot(e_b_x, e_a_y), np.dot(e_b_x, e_a_z)]\n y_b_a = [np.dot(e_b_y, e_a_x), np.dot(e_b_y, e_a_y), np.dot(e_b_y, e_a_z)]\n z_b_a = [np.dot(e_b_z, e_a_x), np.dot(e_b_z, e_a_y), np.dot(e_b_z, e_a_z)]\n \n R_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0]],[x_b_a[1], y_b_a[1], z_b_a[1]],x_b_a[2], y_b_a[2], z_b_a[2]]\n \n # Compute the displacement \n displacement = [p[0]-origin[0], p[1]-origin[1], p[2]-origin[2]]\n \n # Make it into a transform matrix\n T_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0], displacement[0]],\n [x_b_a[1], y_b_a[1], z_b_a[1], displacement[1]],\n [x_b_a[2], y_b_a[2], z_b_a[2], displacement[2]],\n [0, 0, 0, 1]]\n \n T_a_b = np.linalg.inv(T_b_a).tolist()\n \n return T_b_a, T_a_b", "def compute_T_matrix(coordinates, p, reference=[[1,0,0],[0,1,0],[0,0,1]], origin=[0,0,0]):\n e_b_x = coordinates[0]\n e_b_y = coordinates[1]\n e_b_z = coordinates[2]\n \n e_a_x = reference[0] \n e_a_y = reference[1]\n e_a_z = reference[2]\n \n # Compute the rotation matrix\n x_b_a = [np.dot(e_b_x, e_a_x), np.dot(e_b_x, e_a_y), np.dot(e_b_x, e_a_z)]\n y_b_a = [np.dot(e_b_y, e_a_x), np.dot(e_b_y, e_a_y), np.dot(e_b_y, e_a_z)]\n z_b_a = [np.dot(e_b_z, e_a_x), np.dot(e_b_z, e_a_y), np.dot(e_b_z, e_a_z)]\n \n R_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0]],[x_b_a[1], y_b_a[1], z_b_a[1]],x_b_a[2], y_b_a[2], z_b_a[2]]\n \n # Compute the displacement \n displacement = [p[0]-origin[0], p[1]-origin[1], p[2]-origin[2]]\n \n # Make it into a transform matrix\n T_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0], displacement[0]],\n [x_b_a[1], y_b_a[1], z_b_a[1], displacement[1]],\n [x_b_a[2], y_b_a[2], z_b_a[2], displacement[2]],\n [0, 0, 0, 1]]\n \n T_a_b = np.linalg.inv(T_b_a).tolist()\n \n return T_b_a, T_a_b", "def f41(R):\n return f3(f3(R, R), R) % MOD", "def f_xnor(*args):\n f = Xnor(*args).factor()\n return f if f in B else f.factor()", "def F(self, distances):\n return np.sum(self.F_mat(distances), 1)", "def _get_repr_matrix_impl( # pylint: disable=too-many-locals\n *, orbitals, real_space_operator, rotation_matrix_cartesian,\n spin_rot_function, numeric,\n position_tolerance\n):\n\n orbitals = list(orbitals)\n\n positions_mapping = _get_positions_mapping(\n orbitals=orbitals,\n real_space_operator=real_space_operator,\n position_tolerance=position_tolerance\n )\n repr_matrix = sp.zeros(len(orbitals))\n if not numeric:\n rotation_matrix_cartesian = sp.Matrix(rotation_matrix_cartesian)\n\n expr_substitution = _get_substitution(rotation_matrix_cartesian)\n for i, orb in enumerate(orbitals):\n res_pos_idx = positions_mapping[i]\n spin_res = spin_rot_function(\n rotation_matrix_cartesian=rotation_matrix_cartesian,\n spin=orb.spin,\n numeric=numeric\n )\n\n new_func = orb.function.subs(expr_substitution, simultaneous=True)\n for new_spin, spin_value in spin_res.items():\n res_pos_idx_reduced = [\n idx for idx in res_pos_idx if orbitals[idx].spin == new_spin\n ]\n func_basis_reduced = [\n orbitals[idx].function for idx in res_pos_idx_reduced\n ]\n func_vec = _expr_to_vector(\n new_func, basis=func_basis_reduced, numeric=numeric\n )\n func_vec_norm = la.norm(np.array(func_vec).astype(complex))\n if not np.isclose(func_vec_norm, 1):\n raise ValueError(\n 'Norm {} of vector {} for expression {} created from orbital {} is not one.\\nCartesian rotation matrix: {}'\n .format(\n func_vec_norm, func_vec, new_func, orb,\n rotation_matrix_cartesian\n )\n )\n for idx, func_value in zip(res_pos_idx_reduced, func_vec):\n repr_matrix[idx, i] += func_value * spin_value\n # check that the matrix is unitary\n repr_matrix_numeric = np.array(repr_matrix).astype(complex)\n if not np.allclose(\n repr_matrix_numeric @ repr_matrix_numeric.conj().T,\n np.eye(*repr_matrix_numeric.shape) # pylint: disable=not-an-iterable\n ):\n max_mismatch = np.max(\n np.abs(\n repr_matrix_numeric @ repr_matrix_numeric.conj().T -\n np.eye(*repr_matrix_numeric.shape) # pylint: disable=not-an-iterable\n )\n )\n raise ValueError(\n 'Representation matrix is not unitary. Maximum mismatch to unity: {}'\n .format(max_mismatch)\n )\n if numeric:\n return repr_matrix_numeric\n else:\n repr_matrix.simplify()\n return repr_matrix", "def __neg__(self):\r\n return mat4(map(lambda x: -x, self.mlist))", "def test_rotation(self, tol):\n theta = 0.98\n S = symplectic.rotation(theta)\n expected = np.block([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n np.allclose(S, expected, atol=tol, rtol=0)", "def generate_rotation_matrix(phi: float, the: float, psi: float) -> np.matrix:\n # Transfer the angle to Euclidean\n phi = -float(phi) * np.pi / 180.0\n the = -float(the) * np.pi / 180.0\n psi = -float(psi) * np.pi / 180.0\n sin_alpha = np.sin(phi)\n cos_alpha = np.cos(phi)\n sin_beta = np.sin(the)\n cos_beta = np.cos(the)\n sin_gamma = np.sin(psi)\n cos_gamma = np.cos(psi)\n\n # Calculate inverse rotation matrix\n Inv_R = np.zeros((3, 3), dtype='float32')\n\n Inv_R[0, 0] = cos_alpha * cos_gamma - cos_beta * sin_alpha \\\n * sin_gamma\n Inv_R[0, 1] = -cos_alpha * sin_gamma - cos_beta * sin_alpha \\\n * cos_gamma\n Inv_R[0, 2] = sin_beta * sin_alpha\n\n Inv_R[1, 0] = sin_alpha * cos_gamma + cos_beta * cos_alpha \\\n * sin_gamma\n Inv_R[1, 1] = -sin_alpha * sin_gamma + cos_beta * cos_alpha \\\n * cos_gamma\n Inv_R[1, 2] = -sin_beta * cos_alpha\n\n Inv_R[2, 0] = sin_beta * sin_gamma\n Inv_R[2, 1] = sin_beta * cos_gamma\n Inv_R[2, 2] = cos_beta\n #Inv_R[3, 3] = 1\n\n return np.matrix(Inv_R)", "def relu(self):\n return self * self.ge(0)", "def match(cube):\n \n #M1'\n M1 = (cube[1,1,0] & cube[1,1,1] & \n (not cube[0,0,2]) & (not cube[1,0,2]) & (not cube[2,0,2]) &\n (not cube[0,1,2]) & (not cube[1,1,2]) & (not cube[2,1,2]) &\n (not cube[0,2,2]) & (not cube[1,2,2]) & (not cube[2,2,2]));\n if M1:\n return True;\n \n # gerate rotations around z/vertical axis\n cuberots = [rotate(cube, axis = 2, steps = rot) for rot in range(4)];\n #print('Cube rotations:');\n #[printCube(c) for c in cuberots] \n \n # M2' and all rotations\n for curo in cuberots:\n M2 = (curo[1,1,0] & curo[1,1,1] & curo[1,2,1] &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]));\n if M2:\n return True;\n \n # M3' and all rotations\n for curo in cuberots:\n M3 = (curo[1,1,0] & curo[1,1,1] & curo[1,2,1] & curo[2,1,1] &\n (not curo[0,0,2]) & (not curo[1,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]));\n if M3:\n return True;\n \n # M4' and all rotations\n for curo in cuberots:\n M4 = (curo[1,1,0] & curo[1,1,1] & curo[2,2,1] & curo[2,2,2] &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]) &\n (not curo[0,2,2]) & (not curo[1,2,2]));\n if M4:\n return True;\n \n # M5' and all rotations\n for curo in cuberots:\n M5 = (curo[1,2,0] & curo[1,1,1] & \n (not curo[0,0,0]) & (not curo[1,0,0]) & (not curo[2,0,0]) &\n (not curo[1,1,0]) &\n (not curo[0,0,1]) & (not curo[1,0,1]) & (not curo[2,0,1]) &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]) &\n (not curo[0,2,2]) & (not curo[1,2,2]) & (not curo[2,2,2]));\n if M5:\n return True;\n \n # M6' and all rotations\n for curo in cuberots:\n M6 = (curo[2,1,0] & curo[1,2,0] & curo[1,1,1] &\n (not curo[0,0,0]) & (not curo[1,0,0]) &\n (not curo[0,1,0]) & (not curo[1,1,0]) &\n (not curo[0,0,1]) & (not curo[1,0,1]) &\n (not curo[0,1,1]) &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]) &\n (not curo[0,2,2]) & (not curo[1,2,2]) & (not curo[2,2,2]));\n if M6:\n return True;\n \n # M7' and all rotations\n for curo in cuberots:\n M7 = (curo[2,2,0] & curo[1,1,1] &\n (not curo[0,0,0]) & (not curo[1,0,0]) & (not curo[2,0,0]) &\n (not curo[0,1,0]) & (not curo[1,1,0]) & (not curo[2,1,0]) &\n (not curo[0,2,0]) & (not curo[1,2,0]) &\n (not curo[0,0,1]) & (not curo[1,0,1]) & (not curo[2,0,1]) &\n (not curo[0,1,1]) & (not curo[2,1,1]) &\n (not curo[0,2,1]) & (not curo[1,2,1]) & (not curo[2,2,1]) &\n (not curo[0,0,2]) & (not curo[1,0,2]) & (not curo[2,0,2]) &\n (not curo[0,1,2]) & (not curo[1,1,2]) & (not curo[2,1,2]) &\n (not curo[0,2,2]) & (not curo[1,2,2]) & (not curo[2,2,2]));\n if M7:\n return True;\n \n return False;" ]
[ "0.60769", "0.59607196", "0.59561664", "0.58100045", "0.5771714", "0.5771714", "0.5648924", "0.562697", "0.5613362", "0.5532923", "0.5497425", "0.5491512", "0.5427582", "0.5386786", "0.53646296", "0.53314674", "0.5323371", "0.5293043", "0.5282815", "0.5280129", "0.5269544", "0.5263146", "0.525797", "0.52557415", "0.5254084", "0.5239351", "0.5206202", "0.51792383", "0.51653636", "0.51643366", "0.51634973", "0.51562935", "0.5142141", "0.513586", "0.5129901", "0.511951", "0.51032096", "0.50972193", "0.5094665", "0.50891423", "0.50687844", "0.50608", "0.5049909", "0.50444686", "0.50427175", "0.5029062", "0.5021858", "0.50217247", "0.5008952", "0.5003346", "0.49938732", "0.499304", "0.49819863", "0.49804097", "0.4976385", "0.49751005", "0.49689686", "0.49686658", "0.49548656", "0.49500024", "0.494241", "0.494158", "0.49393433", "0.49380112", "0.4926901", "0.49251527", "0.4919763", "0.49153206", "0.4913956", "0.49049917", "0.49000114", "0.4897087", "0.48837164", "0.48822376", "0.48759687", "0.48753414", "0.4873215", "0.48679462", "0.48662493", "0.48642522", "0.48611236", "0.48609564", "0.4858172", "0.4855176", "0.4853529", "0.48406553", "0.48370624", "0.48368278", "0.483427", "0.48314315", "0.48314315", "0.48297262", "0.4827706", "0.4822861", "0.48209125", "0.48208568", "0.48204792", "0.48194742", "0.4819131", "0.48079228" ]
0.4913564
69
Testing the working of xia workflow celery task queue
def test_execute_xia_automated_workflow(self, mock_run): self.assert_(execute_xia_automated_workflow.run()) self.assert_(execute_xia_automated_workflow.run()) self.assertEqual(mock_run.call_count, 2) self.assert_(execute_xia_automated_workflow.run()) self.assertEqual(mock_run.call_count, 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_celery_tasks(self):\n from celery_runner import celery\n task = celery.send_task(\n 'tasks.log', args=['Hello from the other side!'], kwargs={}\n )\n self.assertTrue(task.id)\n task_state = celery.AsyncResult(task.id).state\n while task_state == PENDING:\n task_state = celery.AsyncResult(task.id).state\n res = celery.AsyncResult(task.id)\n self.assertEqual(res.result, 'Hello from the other side!')", "def test_task(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n json = self.generate_task_dictionary(tid, state=\"error\")\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, json=json)\n\n task = self.client.site(site).task(tid)\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"error\")", "def test_reenqueue_job1(client):\n with client.application.app_context():\n pytest.skip(\"Not implemented\")", "def test_wait(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n first_response = self.generate_task_dictionary(\n tid, state=\"waiting\", completed=False\n )\n\n responses = [\n {\"json\": first_response},\n {\"json\": self.generate_task_dictionary(tid)},\n ]\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, responses)\n\n task = self.client.site(site).task(tid).wait()\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"done\")", "async def test_get_tasks(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # declare _scheduler task\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'get_tasks'\n interval_schedule.process_name = \"sleep5\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.exclusive = False\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(15)\n\n # Assert running tasks\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.INTERRUPTED)])\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"end_time\", \"=\", 'NULL'])\n assert tasks\n\n tasks = await scheduler.get_tasks(limit=50)\n states = [int(task.state) for task in tasks]\n\n assert len(tasks) > 1\n assert int(Task.State.RUNNING) in states\n assert int(Task.State.COMPLETE) in states\n\n tasks = await scheduler.get_tasks(1)\n assert len(tasks) == 1\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"]], offset=50)\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"], [\"start_time\", \"asc\"]])\n assert tasks\n\n tasks = await scheduler.get_tasks(or_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n tasks = await scheduler.get_tasks(and_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n await self.stop_scheduler(scheduler)", "def test_run_job1(client):\n with client.application.app_context():\n app = client.application\n app.redis.flushall()\n\n task_id = str(uuid4())\n t = Task.create_task(task_id)\n j = t.create_job()\n job_id = j.job_id\n t.save()\n\n exec_mock = MagicMock()\n exec_mock.validate_max_running_executions.return_value = True\n client.application.executor = exec_mock\n\n queue = Queue(\"jobs\", is_async=False, connection=client.application.redis)\n result = queue.enqueue(job_mod.run_job, t.task_id, job_id, \"image\", \"command\")\n\n worker = SimpleWorker([queue], connection=queue.connection)\n worker.work(burst=True)\n\n t.reload()\n expect(t.jobs).to_length(1)\n\n job = t.jobs[0]\n expect(job.executions).to_length(1)\n\n execution = job.executions[0]\n expect(execution.image).to_equal(\"image\")\n expect(execution.command).to_equal(\"command\")\n\n hash_key = f\"rq:job:{result.id}\"\n\n res = app.redis.exists(hash_key)\n expect(res).to_be_true()\n\n res = app.redis.hget(hash_key, \"status\")\n expect(res).to_equal(\"finished\")\n\n res = app.redis.hexists(hash_key, \"data\")\n expect(res).to_be_true()\n\n keys = app.redis.keys()\n next_job_id = [\n key\n\n for key in keys\n\n if key.decode(\"utf-8\").startswith(\"rq:job\")\n and not key.decode(\"utf-8\").endswith(result.id)\n ]\n expect(next_job_id).to_length(1)\n next_job_id = next_job_id[0]\n\n res = app.redis.exists(next_job_id)\n expect(res).to_be_true()\n\n res = app.redis.hget(next_job_id, \"status\")\n expect(res).to_equal(\"queued\")\n\n res = app.redis.hexists(next_job_id, \"data\")\n expect(res).to_be_true()\n\n res = app.redis.hget(next_job_id, \"origin\")\n expect(res).to_equal(\"monitor\")\n\n res = app.redis.hget(next_job_id, \"description\")\n expect(res).to_equal(\n f\"fastlane.worker.job.monitor_job('{task_id}', '{job_id}', '{execution.execution_id}')\"\n )\n\n res = app.redis.hget(next_job_id, \"timeout\")\n expect(res).to_equal(\"-1\")\n\n t.reload()\n expect(t.jobs[0].executions[0].status).to_equal(JobExecution.Status.running)", "def start_task():\n get_results_from_message_queue()\n test_all_servers_connection()", "def test_dag_tasks_present(self):\n self.assertEqual(self.tasks, [\n \"harvest_oai\",\n \"create_collection\",\n \"combine_index\",\n \"solr_alias_swap\",\n \"success_slack_trigger\"\n ])", "def test_dag_tasks_present(self):\n self.assertEqual(self.tasks, [\n \"harvest_oai\",\n \"create_collection\",\n \"combine_index\",\n \"solr_alias_swap\",\n \"success_slack_trigger\"\n ])", "def test_execute(self):\n context = dict()\n cmd = pycell.python_cell(\n source='print(2+2)',\n validate=True\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(3)\n self.assertEqual(controller.task_id, '000')\n self.assertEqual(controller.state, 'SUCCESS')\n self.assertEqual(controller.outputs.stdout[0].value, '4')", "def test_contain_tasks(self):\n dag = self.dagbag.get_dag(self.dag_id)\n tasks = dag.tasks\n task_ids = list(map(lambda task: task.task_id, tasks))\n assert sorted(task_ids) == sorted([\n 'start', 'gcs_to_bq', 'stop'\n ])", "def test_get_job_queue(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def run_task(self) -> Task:", "def executeTask (self, queue='default'):\n tasks = self.taskqueue_stub.GetTasks(queue)\n if tasks:\n task = tasks[0]\n self.taskqueue_stub.DeleteTask (queue, task['name'])\n params = base64.b64decode(task[\"body\"])\n if dict(task['headers']).get('Content-Type') == 'application/json':\n return self.testapp.post_json(task[\"url\"], json.loads(params))\n else:\n return self.testapp.post(task[\"url\"], params)", "def tasks():", "def test_stuff_in_queues_integrated(integrated_ff):\n search_res = ff_utils.search_metadata('search/?limit=all&type=File', key=integrated_ff['ff_key'])\n # just take the first handful\n for item in search_res[:8]:\n ff_utils.patch_metadata({}, obj_id=item['uuid'], key=integrated_ff['ff_key'])\n time.sleep(3) # let queues catch up\n stuff_in_queue = ff_utils.stuff_in_queues(integrated_ff['ff_env_index_namespace'], check_secondary=True)\n assert stuff_in_queue\n with pytest.raises(Exception) as exec_info:\n ff_utils.stuff_in_queues(None, check_secondary=True) # fail if no env specified\n assert 'Must provide a full fourfront environment name' in str(exec_info.value)", "def test_task_add():\n pytest.fail('Not implemented yet.')", "def test_celery_beat(init):\n\n ini_file = os.path.join(os.path.dirname(__file__), \"task-test.ini\")\n worker, beat = run_worker_and_beat(ini_file)\n\n try:\n # Reset test database\n redis = get_redis(init.config.registry)\n redis.delete(\"foo\", \"bar\")\n\n foo = \"no read\"\n deadline = time.time() + 20\n while time.time() < deadline:\n redis = get_redis(init.config.registry)\n # scheduledtasks.ticker should beat every second and reset values in Redis\n foo = redis.get(\"foo\")\n if foo:\n break\n time.sleep(0.5)\n\n if worker:\n assert worker.returncode is None\n\n if beat:\n assert beat.returncode is None\n\n if foo == b\"xoo\":\n # TravisCI headless debugging\n print(worker.stdout.read().decode(\"utf-8\"))\n print(worker.stderr.read().decode(\"utf-8\"))\n print(beat.stdout.read().decode(\"utf-8\"))\n print(beat.stderr.read().decode(\"utf-8\"))\n\n assert foo == b\"xoo\" # Set back by its original value by 1 second beat\n\n finally:\n try:\n if worker:\n worker.terminate()\n except ProcessLookupError:\n pass\n\n try:\n beat and beat.terminate()\n except ProcessLookupError:\n pass", "def task():", "def test_executes_single_task(self):\n g = TaskDependencyGraph(MockWorkflowContext())\n task = tasks.NOPLocalWorkflowTask(mock.Mock())\n g.add_task(task)\n with limited_sleep_mock(limit=1):\n g.execute()\n self.assertTrue(task.is_terminated)", "def test_solve_task(self):\n pass", "def test_task_list():\n # Fake pyramid request, useful for testing.\n request = testing.DummyRequest()\n\n pytest.fail('Not implemented yet.')", "def test_task_preloading(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task1.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task2 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task2.get('info'), task2\r\n # Check that both tasks are different\r\n assert task1.get('id') != task2.get('id'), \"Tasks should be different\"\r\n ## Save the assigned task\r\n assigned_tasks.append(task1)\r\n assigned_tasks.append(task2)\r\n\r\n # Submit an Answer for the assigned and pre-loaded task\r\n for t in assigned_tasks:\r\n tr = dict(app_id=t['app_id'], task_id=t['id'], info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n # Get two tasks again\r\n res = self.app.get('api/app/1/newtask')\r\n task3 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task3.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task4 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task4.get('info'), task2\r\n # Check that both tasks are different\r\n assert task3.get('id') != task4.get('id'), \"Tasks should be different\"\r\n assert task1.get('id') != task3.get('id'), \"Tasks should be different\"\r\n assert task2.get('id') != task4.get('id'), \"Tasks should be different\"\r\n # Check that a big offset returns None\r\n res = self.app.get('api/app/1/newtask?offset=11')\r\n assert json.loads(res.data) == {}, res.data", "def test_enqueueWorkDone(self):\n # TODO: this exact test should run against LocalQueuer as well.\n def operation(txn):\n # TODO: how does \"enqueue\" get associated with the transaction?\n # This is not the fact with a raw t.w.enterprise transaction.\n # Should probably do something with components.\n return txn.enqueue(DummyWorkItem, a=3, b=4, jobID=100, workID=1,\n notBefore=datetime.datetime.utcnow())\n yield inTransaction(self.store.newTransaction, operation)\n\n # Wait for it to be executed. Hopefully this does not time out :-\\.\n yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)\n\n self.assertEquals(DummyWorkItem.results, {100: 7})", "def assertScheduled(self, tasks):\n\n actual_task_count = self.redis.llen('celery')\n self.assertEqual(actual_task_count, len(tasks))\n\n for t in tasks:\n raw_message = self.redis.lpop('celery')\n message = json.loads(raw_message)\n raw_body = base64.b64decode(message[u'body'])\n body = pickle.loads(raw_body)\n\n expected_name, expected_args, expected_kwargs = t\n self.assertEqual(body['task'], expected_name)\n if expected_args is not None:\n self.assertEqual(tuple(body['args']), tuple(expected_args))\n else:\n # args was not provided, compare with expected default\n self.assertEqual(tuple(body['args']), ())\n if expected_kwargs is not None:\n self.assertEqual(body['kwargs'], expected_kwargs)\n else:\n # kwargs was not provided, compare with expected default\n self.assertEqual(body['kwargs'], {})", "def setUp(self):\n self.pcp = ControllerQueue(None, None)\n DummyWorkItem.results = {}", "def setUp(self):\n self.t = Task()", "def setUp(self):\n self.t = Task()", "def task():\n pass", "def task():\n pass", "def run(request, resulthash):\n try:\n stored = models.HBTask.objects.get(resulthash=resulthash)\n except:\n stored = None\n thisone = {'Error', 'not found in database'}\n \n # Finished, and reported back\n if stored.status == models.HBTask.OK_STATUS:\n thisone = True\n\n # Submitted, have not heard from since\n elif stored.status == models.HBTask.PENDING_STATUS:\n obj = HbObject(hash=resulthash)\n status,fullstatus = check_stored_status(obj)\n thisone = fullstatus or True \n\n # resulted in error\n elif stored.status == models.HBTask.ERROR_STATUS:\n thisone = {'Error','something'}\n\n # no status: submit now\n else:\n # print 'Now status : ',stored.status\n # print 'Now submit task : ',stored.celery_taskname\n\n # to submit hb task\n todo = getattr(tasks,stored.hb_taskname)\n # celery_result = todo.delay(**json.loads(stored.parameters))\n parameters = json.loads(stored.parameters)\n \n action = todo(**parameters)\n\n if not action.ready_to_go:\n thisone = {'Warning':'Not all dependencies are met',\n 'dependency_status':action.dependency_status()}\n\n # Add me as waiting for a few\n todo = [d.split(':')[1] for d in action.dependencies_todo]\n dep = models.HBTask.objects.filter(resulthash__in=todo)\n for d in dep:\n w,isnew = models.Waiting.objects.get_or_create(todo=stored,dependency=d)\n # print 'Created ? ',w,isnew\n # submit dependency to run\n run(None,resulthash=d.resulthash)\n else:\n action.submit()\n time.sleep(0.5)\n obj = HbObject(hash=resulthash)\n status,fullstatus = check_stored_status(obj)\n thisone = fullstatus or True \n\n return JsonResponse({'result':thisone})\n # return JsonResponse(thisone)", "def test_incremental_tasks(self):\r\n self.create_2(sched='incremental')\r\n\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register(fullname=self.user.fullname, name=self.user.username,\r\n password=self.user.password)\r\n self.register(fullname=\"Marie Doe\", name=\"mariedoe\", password=\"dr0wss4p\")\r\n self.signin()\r\n\r\n # Get the only task with no runs!\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n # Check that we received a clean Task\r\n assert data.get('info'), data\r\n assert not data.get('info').get('last_answer')\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'], info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n # No more tasks available for this user!\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n assert not data\r\n\r\n #### Get the only task now with an answer as Anonimous!\r\n self.signout()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check that we received a Task with answer\r\n assert data.get('info'), data\r\n assert data.get('info').get('last_answer').get('answer') == 'No'\r\n\r\n # Submit a second Answer as Anonimous\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n #### Get the only task now with an answer as User2!\r\n self.signin(email=\"mariedoe@example.com\", password=\"dr0wss4p\")\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check that we received a Task with answer\r\n assert data.get('info'), data\r\n assert data.get('info').get('last_answer').get('answer') == 'No No'", "def test_executes_multiple_concurrent(self):\n g = TaskDependencyGraph(MockWorkflowContext())\n task1 = tasks.NOPLocalWorkflowTask(mock.Mock())\n task2 = tasks.NOPLocalWorkflowTask(mock.Mock())\n g.add_task(task1)\n g.add_task(task2)\n with limited_sleep_mock(limit=1):\n g.execute()\n self.assertTrue(task1.is_terminated)\n self.assertTrue(task2.is_terminated)", "def local_celery():\n click.echo('Start Celery on Machine')\n ret = subprocess.call(\n ['celery', 'worker', '-A', 'celery_worker.celery', '--loglevel=info', '-P', 'eventlet'])\n sys.exit(ret)", "def test_exceptionWhenWorking(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n\n # Error\n yield DummyWorkItem.makeJob(\n txn, a=-1, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=2, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n # Work item complete\n self.assertTrue(DummyWorkItem.results == {1: 1, 3: 2})", "def test_create(self):\n assert self.worker.connection is None or self.worker.connection.is_alive()\n # TODO(orlade): Mock this stuff.\n # assert_queue_size({TEST_REQUEST_QUEUE: 0, TEST_RESULT_QUEUE: 0})", "def test_workflows_get(self):\n pass", "def test_workflows_get(self):\n pass", "def test_priority_task():\n task_id = uuid.uuid4().hex\n\n high_priority_task.apply_async(queue=\"high_priority\")\n normal_task.apply_async(queue=\"default\")\n\n task1 = high_priority_task.apply_async(\n args=[\"high task 1\"], queue=\"default\", task_id=task_id\n )\n time.sleep(1)\n high_priority_task.apply_async(\n args=[\"high task 2\"], queue=\"default\", task_id=task_id\n )", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def create_task():", "def test_cron_workflow_service_get_cron_workflow(self):\n pass", "def setUp(self):\n self.t = Task()\n self.t(\"add one\")", "def task():\n\n\tprint('Example task executed.')", "def task(self):", "def task(self):", "def running_celery_tasks(request):\n active_dict = CELERY_INSPECT.active()\n active_tasks = []\n if active_dict:\n for task_list in active_dict.values():\n active_tasks.extend(task_list)\n if active_tasks:\n active_tasks = [dikt.get(\"id\", \"\") for dikt in active_tasks]\n return Response({\"active_tasks\": active_tasks})", "def test_queue_subtasks_for_query3(self):\r\n\r\n mock_create_subtask_fcn = Mock()\r\n self._queue_subtasks(mock_create_subtask_fcn, 6, 3, 11, 3)\r\n\r\n # Check number of items for each subtask\r\n mock_create_subtask_fcn_args = mock_create_subtask_fcn.call_args_list\r\n self.assertEqual(len(mock_create_subtask_fcn_args[0][0][0]), 3)\r\n self.assertEqual(len(mock_create_subtask_fcn_args[1][0][0]), 3)\r\n self.assertEqual(len(mock_create_subtask_fcn_args[2][0][0]), 4)\r\n self.assertEqual(len(mock_create_subtask_fcn_args[3][0][0]), 4)", "def test_task_sequence(self):\n\n class Task(tasks.WorkflowTask):\n name = 'task'\n\n def apply_async(self):\n record.append(self.i)\n self.set_state(tasks.TASK_SUCCEEDED)\n self.async_result.result = None\n return self.async_result\n\n task_count = 10\n\n # prepare the task seuqence\n seq_tasks = []\n for i in range(task_count):\n t = Task(mock.Mock())\n seq_tasks.append(t)\n t.i = i\n g = TaskDependencyGraph(MockWorkflowContext())\n seq = g.sequence()\n seq.add(*seq_tasks)\n\n record = []\n\n with limited_sleep_mock():\n g.execute()\n\n expected = list(range(task_count))\n self.assertEqual(expected, record)", "def test_queue_subtasks_for_query1(self):\r\n\r\n mock_create_subtask_fcn = Mock()\r\n self._queue_subtasks(mock_create_subtask_fcn, 6, 3, 8, 1)\r\n\r\n # Check number of items for each subtask\r\n mock_create_subtask_fcn_args = mock_create_subtask_fcn.call_args_list\r\n self.assertEqual(len(mock_create_subtask_fcn_args[0][0][0]), 3)\r\n self.assertEqual(len(mock_create_subtask_fcn_args[1][0][0]), 3)\r\n self.assertEqual(len(mock_create_subtask_fcn_args[2][0][0]), 3)", "def test_16_task_status_completed(self, mock):\r\n with self.flask_app.app_context():\r\n self.register()\r\n self.new_application()\r\n\r\n app = db.session.query(App).first()\r\n # We use a string here to check that it works too\r\n task = Task(app_id=app.id, info={'n_answers': '10'})\r\n db.session.add(task)\r\n db.session.commit()\r\n\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n assert \"Sample App\" in res.data, res.data\r\n assert '0 of 10' in res.data, res.data\r\n err_msg = \"Download button should be disabled\"\r\n assert dom.find(id='nothingtodownload') is not None, err_msg\r\n\r\n for i in range(5):\r\n task_run = TaskRun(app_id=app.id, task_id=1,\r\n info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n self.app.get('api/app/%s/newtask' % app.id)\r\n\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n assert \"Sample App\" in res.data, res.data\r\n assert '5 of 10' in res.data, res.data\r\n err_msg = \"Download Partial results button should be shown\"\r\n assert dom.find(id='partialdownload') is not None, err_msg\r\n\r\n for i in range(5):\r\n task_run = TaskRun(app_id=app.id, task_id=1,\r\n info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n self.app.get('api/app/%s/newtask' % app.id)\r\n\r\n self.signout()\r\n\r\n app = db.session.query(App).first()\r\n\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n assert \"Sample App\" in res.data, res.data\r\n msg = 'Task <span class=\"label label-success\">#1</span>'\r\n assert msg in res.data, res.data\r\n assert '10 of 10' in res.data, res.data\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Download Full results button should be shown\"\r\n assert dom.find(id='fulldownload') is not None, err_msg\r\n\r\n app.hidden = 1\r\n db.session.add(app)\r\n db.session.commit()\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n assert res.status_code == 403, res.status_code\r\n\r\n self.create()\r\n self.signin(email=Fixtures.email_addr2, password=Fixtures.password)\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n assert res.status_code == 403, res.status_code", "def ExecuteTaskQueueTasks(self, handler_name, task_queue_name):\n taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)\n tasks = taskq.GetTasks(task_queue_name)\n taskq.FlushQueue(task_queue_name)\n for task in tasks:\n self.testapp.post(handler_name,\n urllib.unquote_plus(base64.b64decode(task['body'])))\n self.ExecuteTaskQueueTasks(handler_name, task_queue_name)", "def test_ipc_queues():\n IPCComm.ipc_queues()", "def runner() -> None:\n # obtain our configuration from the environment\n config = from_environment(EXPECTED_CONFIG)\n # configure logging for the application\n log_level = getattr(logging, str(config[\"LOG_LEVEL\"]).upper())\n logging.basicConfig(\n format=\"{asctime} [{threadName}] {levelname:5} ({filename}:{lineno}) - {message}\",\n level=log_level,\n stream=sys.stdout,\n style=\"{\",\n )\n # create our TransferRequestFinisher service\n transfer_request_finisher = TransferRequestFinisher(config, LOG) # type: ignore[arg-type]\n # let's get to work\n transfer_request_finisher.logger.info(\"Adding tasks to asyncio loop\")\n loop = asyncio.get_event_loop()\n loop.create_task(work_loop(transfer_request_finisher))", "def testJobOneTask(databases):\n\n class CustomGenerator(DataGenerator):\n numTasks = 1\n numInputs = [ 0 ]\n numOutputs = [ 0 ]\n\n gen = CustomGenerator(databases)\n gen.createDefinitions()\n config = gen.createConfiguration()\n\n def checkOne(config):\n taskName = gen.tasks[0]\n #assert config.getProduct('') is None\n assert len(config.getInputs()) == 0\n assert len(config.getInputsGrouped()) == 0\n assert len(config.getTasks()) == 1\n task, = config.getTasks()\n assert task is not None\n assert task.getName() == taskName\n assert len(config.getTaskSequence()) == 1\n\n runWithReload(databases, config, checkOne)", "def queue_tasks(self, layer, tasks) -> int:\n pass", "def setUp(self):\n self.tasks = list(map(lambda t: t.task_id, FCDAGPROD.tasks))", "def test_cron_workflow_service_update_cron_workflow(self):\n pass", "def run_tasks(self, url=None, queue_name=None, method='POST', response_status_code=200, **kwargs):\n from google.appengine.api import namespace_manager\n tasks = self.taskqueue_stub.get_filtered_tasks(url=url,\n queue_names=[queue_name])\n for task in tasks:\n namespace = task.headers.get('X-AppEngine-Current-Namespace', '')\n previous_namespace = namespace_manager.get_namespace()\n try:\n namespace_manager.set_namespace(namespace)\n headers = {\n k: v for k, v in task.headers.iteritems()\n if k.startswith('X-AppEngine')}\n if method == 'PUT':\n response = self.testapp.put(url, task.payload, headers=headers, status='*')\n else:\n response = self.testapp.post(url, task.payload, headers=headers, status='*')\n finally:\n namespace_manager.set_namespace(previous_namespace)", "def test_task_creation(self):\n Task.objects.filter(status=Task.Status.AWAITING_PROCESSING).delete()\n\n project = self.projects['test_human_and_machine']\n self.assertEqual(Task.objects.filter(project=project).count(),\n 0)\n create_subsequent_tasks(project)\n\n # Human Task was created\n self.assertEqual(Task.objects.filter(project=project).count(),\n 1)\n\n human_step = self.workflow_steps['test_workflow_2']['step4']\n task = Task.objects.get(step=human_step, project=project)\n data = {'submit_key1': 'submit_val1'}\n assign_task(self.workers[0].id, task.id)\n\n # user 0 submits a task\n response = self._submit_assignment(self.clients[0], task.id, data=data)\n self.assertEqual(response.status_code, 200)\n\n # Machine Task was created\n self.assertEqual(Task.objects.filter(project=project).count(),\n 2)\n machine_step = self.workflow_steps['test_workflow_2']['simple_machine']\n machine_task_assignment = (\n TaskAssignment.objects\n .filter(task__step=machine_step,\n task__project=project)[0])\n\n self.assertEqual(machine_task_assignment.status,\n TaskAssignment.Status.SUBMITTED)\n\n self.assertEqual(machine_task_assignment.in_progress_task_data,\n {'json': 'simple'})\n\n self.assertEqual(machine_task_assignment.task.status,\n Task.Status.COMPLETE)", "def test_queue_subtasks_for_query2(self):\r\n\r\n mock_create_subtask_fcn = Mock()\r\n self._queue_subtasks(mock_create_subtask_fcn, 6, 3, 8, 3)\r\n\r\n # Check number of items for each subtask\r\n mock_create_subtask_fcn_args = mock_create_subtask_fcn.call_args_list\r\n self.assertEqual(len(mock_create_subtask_fcn_args[0][0][0]), 3)\r\n self.assertEqual(len(mock_create_subtask_fcn_args[1][0][0]), 3)\r\n self.assertEqual(len(mock_create_subtask_fcn_args[2][0][0]), 5)", "def test_queue_integration(self):\n from solariat_bottle.settings import LOGGER\n from solariat_bottle.db.channel.twitter import TwitterServiceChannel\n from solariat_bottle.db.historic_data import QueuedHistoricData\n from solariat_bottle.db.post.base import Post\n from solariat_bottle.daemons.twitter.historics.timeline_request import \\\n DirectMessagesRequest, SentDirectMessagesRequest, SearchRequest, UserTimelineRequest\n from solariat_bottle.db.user_profiles.user_profile import UserProfile\n\n # reduce amount of data for long-running integration test\n FakeTwitterApi.SEARCH_DATA_LENGTH = 50\n FakeTwitterApi.TIMELINE_DATA_LENGTH = 50\n FakeTwitterApi.DM_DATA_LENGTH = 50\n FakeTwitterApi.DM_SENT_DATA_LENGTH = 50\n FakeTwitterApi.ALL_DATA_LENGTH = 200\n FakeTwitterApi.CREATED_FROM = FakeTwitterApi.CREATED_TO - timedelta(days=1)\n FakeTwitterApi.init_next_params()\n SearchRequest.SEARCH_LIMIT = 10\n UserTimelineRequest.FETCH_LIMIT = 20\n DirectMessagesRequest.DIRECT_MESSAGES_LIMIT = 20\n SentDirectMessagesRequest.DIRECT_MESSAGES_LIMIT = 20\n\n profile = UserProfile.objects.upsert('Twitter', profile_data=dict(user_name='jarvis', user_id='99188210'))\n channel = TwitterServiceChannel.objects.create_by_user(self.user, title='SC')\n channel.add_username(profile.user_name)\n channel.add_keyword(u'keywörd')\n\n def get_id_date_pair(post_data):\n if 'twitter' in post_data:\n post_data = post_data['twitter']\n return int(post_data['id']), post_data['created_at']\n\n fetched_data = []\n def _save_tweets(fn):\n def decorated(tweets, *args, **kwargs):\n LOGGER.debug('PUSH_POSTS, len:%s', len(tweets))\n fetched_data.extend([get_id_date_pair(t) for t in tweets])\n return fn(tweets, *args, **kwargs)\n return decorated\n\n queued_data = []\n def _save_queued_data(method):\n def _method(*args, **kwargs):\n queued_data[:] = [\n get_id_date_pair(i.solariat_post_data) for i in\n QueuedHistoricData.objects(subscription=subscription)\n ]\n LOGGER.debug('QUEUED_POSTS, len: %s', len(queued_data))\n self.assertTrue(len(queued_data) == FakeTwitterApi.ALL_DATA_LENGTH,\n msg=\"len=%d %s\" % (len(queued_data), queued_data))\n self.assertEqual(set(queued_data), set(fetched_data),\n msg=u\"\\nqueued =%s\\nfetched=%s\" % (queued_data, fetched_data))\n return method(*args, **kwargs)\n return _method\n\n subscription = TwitterRestHistoricalSubscription.objects.create(\n created_by=self.user,\n channel_id=channel.id,\n from_date=FakeTwitterApi.CREATED_FROM,\n to_date=FakeTwitterApi.CREATED_TO\n )\n subscriber = TwitterHistoricsSubscriber(subscription)\n subscriber.push_posts = _save_tweets(subscriber.push_posts)\n subscriber.historic_loader.load = _save_queued_data(subscriber.historic_loader.load)\n\n subscriber.start_historic_load()\n self.assertEqual(subscriber.get_status(), SUBSCRIPTION_FINISHED)\n\n self.assertEqual(Post.objects(channels__in=[\n subscription.channel.inbound,\n subscription.channel.outbound]).count(), FakeTwitterApi.ALL_DATA_LENGTH)\n\n SearchRequest.SEARCH_LIMIT = 100\n UserTimelineRequest.FETCH_LIMIT = 200\n DirectMessagesRequest.DIRECT_MESSAGES_LIMIT = 200\n SentDirectMessagesRequest.DIRECT_MESSAGES_LIMIT = 200", "def test_callback_calls_celery_task(self, rf):\n product = product_factory()\n request = rf.post('/')\n\n url = request.build_absolute_uri(product.get_absolute_url())\n\n with patch('remindme.tasks.send_notification_email.delay') as task:\n product_in_stock_callback(\n self.__class__, product=product, request=request\n )\n task.assert_called_with(product.pk, product.title, url)", "def test_call_multiple_tasks_same_id():\n task_id = uuid.uuid4().hex\n for i in range(0, 15):\n high_priority_task.apply_async(\n args=[i], queue=\"default\", task_id=task_id\n )", "def test_worker_stop_queue(self):\n task = 'STOP'\n input_queue = Queue()\n output_queue = Queue()\n input_queue.put(task)\n self.assertFalse(sshpoller.worker(input_queue, output_queue))", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def task(*args, **kwargs):\n print(f\"task declared, args: {args}, kwargs:{kwargs}\")\n return FalseCeleryApp", "def test_get_all(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n task_list = self.task_storage.get_all()\n\n self.assertEqual(task_list[0], self.my_task)", "def testJobKilling(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n locationAction = self.daoFactory(classname=\"Locations.New\")\n locationAction.execute(\"site1\", pnn=\"T2_CH_CERN\")\n\n testWorkflow = Workflow(spec=self.specUrl, owner=\"Steve\",\n name=\"wf001\", task=self.taskName)\n testWorkflow.create()\n testFileset = Fileset(name=\"TestFileset\")\n testFileset.create()\n\n for i in range(4):\n newFile = File(lfn=\"File%s\" % i, locations=set([\"T2_CH_CERN\"]))\n newFile.create()\n testFileset.addFile(newFile)\n\n testFileset.commit()\n testSubscription = Subscription(fileset=testFileset,\n workflow=testWorkflow,\n split_algo=\"FileBased\")\n testSubscription.create()\n\n splitter = SplitterFactory()\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=testSubscription)\n jobGroup = jobFactory(files_per_job=1)[0]\n\n assert len(jobGroup.jobs) == 4, \\\n \"Error: Splitting should have created four jobs.\"\n\n testJobA = jobGroup.jobs[0]\n testJobA[\"user\"] = \"sfoulkes\"\n testJobA[\"group\"] = \"DMWM\"\n testJobA[\"taskType\"] = \"Processing\"\n testJobB = jobGroup.jobs[1]\n testJobB[\"user\"] = \"sfoulkes\"\n testJobB[\"group\"] = \"DMWM\"\n testJobB[\"taskType\"] = \"Processing\"\n testJobC = jobGroup.jobs[2]\n testJobC[\"user\"] = \"sfoulkes\"\n testJobC[\"group\"] = \"DMWM\"\n testJobC[\"taskType\"] = \"Processing\"\n testJobD = jobGroup.jobs[3]\n testJobD[\"user\"] = \"sfoulkes\"\n testJobD[\"group\"] = \"DMWM\"\n testJobD[\"taskType\"] = \"Processing\"\n\n change.persist([testJobA], \"created\", \"new\")\n change.persist([testJobB], \"jobfailed\", \"executing\")\n change.persist([testJobC, testJobD], \"executing\", \"created\")\n\n change.persist([testJobA], \"killed\", \"created\")\n change.persist([testJobB], \"killed\", \"jobfailed\")\n change.persist([testJobC, testJobD], \"killed\", \"executing\")\n\n for job in [testJobA, testJobB, testJobC, testJobD]:\n job.load()\n self.assertEqual(job['retry_count'], 99999)\n self.assertEqual(job['state'], 'killed')\n\n return", "def cb_test( self, ):\r\n # this shows how to run stuff in the helper -- call thru queue, post to queue\r\n self.post_to_queue( \"call\", self.helper_thread.test_test_ports , ( ) )", "def test_notBeforeWhenCheckingForWork(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # First, one that's right now.\n yield DummyWorkItem.makeJob(txn, a=1, b=2, notBefore=fakeNow)\n\n # Next, create one that's actually far enough into the past to run.\n yield DummyWorkItem.makeJob(\n txn, a=3, b=4, notBefore=(\n # Schedule it in the past so that it should have already\n # run.\n fakeNow - datetime.timedelta(seconds=20)\n )\n )\n\n # Finally, one that's actually scheduled for the future.\n yield DummyWorkItem.makeJob(\n txn, a=10, b=20, notBefore=fakeNow + datetime.timedelta(1000)\n )\n yield setup\n\n # Wait for job\n while len(DummyWorkItem.results) != 2:\n clock.advance(1)\n\n # Work item complete\n self.assertTrue(DummyWorkItem.results == {1: 3, 2: 7})", "def testJobTRLostWhileRunning(databases):\n gen = DataGenerator(databases)\n fwName = gen.createFramework('testfw1')\n taskName = gen.createTask('task1', fwName)\n trName = gen.createTaskRunner(capabilities=[fwName])\n config = gen.createConfiguration()\n\n sanityCheck(gen, config)\n job, = config.createJobs(gen.owner)\n runner = databases.resourceDB[trName]\n task = job.assignTask(runner)\n assert task is not None\n assert task.isRunning()\n runner.markLost()\n assert not task.isRunning()\n assert task.result == ResultCode.ERROR", "def test_anonymous_01_newtask(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n print res.data\r\n data = json.loads(res.data)\r\n assert data['info'], data", "def test_open_ended_flow_with_xqueue_failure(self):\r\n assessment = [1, 1]\r\n\r\n # Simulate a student saving an answer\r\n self._handle_ajax(\"save_answer\", {\"student_answer\": self.answer})\r\n status = self._handle_ajax(\"get_status\", {})\r\n self.assertIsInstance(status, basestring)\r\n\r\n # Mock a student submitting an assessment\r\n assessment_dict = MultiDict({'assessment': sum(assessment)})\r\n assessment_dict.extend(('score_list[]', val) for val in assessment)\r\n\r\n mock_xqueue_interface = Mock(\r\n send_to_queue=Mock(return_value=(1, \"Not Queued\"))\r\n )\r\n\r\n # Call handle_ajax on the module with xqueue down\r\n module = self._module()\r\n with patch.dict(module.xmodule_runtime.xqueue, {'interface': mock_xqueue_interface}):\r\n module.handle_ajax(\"save_assessment\", assessment_dict)\r\n self.assertEqual(module.current_task_number, 1)\r\n self.assertTrue((module.child_module.get_task_number(1).child_created))\r\n module.save()\r\n\r\n # Check that next time the OpenEndedModule is loaded it calls send_to_grader\r\n with patch.object(OpenEndedModule, 'send_to_grader') as mock_send_to_grader:\r\n mock_send_to_grader.return_value = (False, \"Not Queued\")\r\n module = self._module().child_module.get_score()\r\n self.assertTrue(mock_send_to_grader.called)\r\n self.assertTrue((self._module().child_module.get_task_number(1).child_created))\r\n\r\n # Loading it this time should send submission to xqueue correctly\r\n self.assertFalse((self._module().child_module.get_task_number(1).child_created))\r\n self.assertEqual(self._module().current_task_number, 1)\r\n self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)\r\n\r\n task_one_json = json.loads(self._module().task_states[0])\r\n self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)\r\n\r\n # Move to the next step in the problem\r\n self._handle_ajax(\"next_problem\", {})\r\n self.assertEqual(self._module().current_task_number, 1)\r\n self._module().render('student_view')\r\n\r\n # Try to get the rubric from the module\r\n self._handle_ajax(\"get_combined_rubric\", {})\r\n\r\n self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)\r\n\r\n # Make a fake reply from the queue\r\n queue_reply = {\r\n 'queuekey': \"\",\r\n 'xqueue_body': json.dumps({\r\n 'score': 0,\r\n 'feedback': json.dumps({\"spelling\": \"Spelling: Ok.\", \"grammar\": \"Grammar: Ok.\",\r\n \"markup-text\": \" all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we <bg>have no books left</bg> on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> you believe that certain materials , such as books , music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves if they are found <bg>offensive ? support your</bg> position with convincing arguments from your own experience , observations <bg>, and or reading .</bg> \"}),\r\n 'grader_type': \"ML\",\r\n 'success': True,\r\n 'grader_id': 1,\r\n 'submission_id': 1,\r\n 'rubric_xml': \"<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>\",\r\n 'rubric_scores_complete': True,\r\n })\r\n }\r\n\r\n self._handle_ajax(\"check_for_score\", {})\r\n\r\n # Update the module with the fake queue reply\r\n self._handle_ajax(\"score_update\", queue_reply)\r\n\r\n module = self._module()\r\n self.assertFalse(module.ready_to_reset)\r\n self.assertEqual(module.current_task_number, 1)\r\n\r\n # Get html and other data client will request\r\n module.render('student_view')\r\n\r\n self._handle_ajax(\"skip_post_assessment\", {})\r\n\r\n # Get all results\r\n self._handle_ajax(\"get_combined_rubric\", {})\r\n\r\n # reset the problem\r\n self._handle_ajax(\"reset\", {})\r\n self.assertEqual(self._module().state, \"initial\")", "def setUp(self):\n self.t = Task()\n self.t(\"add one mississippi\")\n self.t(\"add two mississippi\")", "def test_notBeforeWhenEnqueueing(self):\n\n dbpool, qpool, clock, performerChosen = self._setupPools()\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return qpool.enqueueWork(\n txn, DummyWorkItem, a=3, b=9,\n notBefore=datetime.datetime(2012, 12, 12, 12, 12, 20)\n )\n\n yield check\n\n # This is going to schedule the work to happen with some asynchronous\n # I/O in the middle; this is a problem because how do we know when it's\n # time to check to see if the work has started? We need to intercept\n # the thing that kicks off the work; we can then wait for the work\n # itself.\n\n self.assertEquals(performerChosen, [])\n\n # Advance to exactly the appointed second.\n clock.advance(20 - 12)\n self.assertEquals(performerChosen, [True])\n\n # Wait for job\n while (yield inTransaction(dbpool.pool.connection, lambda txn: JobItem.all(txn))):\n clock.advance(1)\n\n # Work item complete\n self.assertTrue(DummyWorkItem.results == {1: 12})", "def setup_task(self, *args, **kwargs):\n pass", "def test_anonymous_02_gets_different_tasks(self):\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # Get a Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n while data.get('info') is not None:\r\n # Check that we have received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = TaskRun(app_id=data['app_id'], task_id=data['id'],\r\n user_ip=\"127.0.0.1\",\r\n info={'answer': 'Yes'})\r\n db.session.add(tr)\r\n db.session.commit()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check if we received the same number of tasks that the available ones\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n assert len(assigned_tasks) == len(tasks), len(assigned_tasks)\r\n # Check if all the assigned Task.id are equal to the available ones\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n err_msg = \"Assigned Task not found in DB Tasks\"\r\n for at in assigned_tasks:\r\n assert self.is_task(at['id'], tasks), err_msg\r\n # Check that there are no duplicated tasks\r\n err_msg = \"One Assigned Task is duplicated\"\r\n for at in assigned_tasks:\r\n assert self.is_unique(at['id'], assigned_tasks), err_msg", "def dumb_task():\n return True", "def test_list_background_email_tasks(self, act):\r\n act.return_value = self.tasks\r\n url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n mock_factory = MockCompletionInfo()\r\n with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:\r\n mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info\r\n response = self.client.get(url, {})\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # check response\r\n self.assertTrue(act.called)\r\n expected_tasks = [ftask.to_dict() for ftask in self.tasks]\r\n actual_tasks = json.loads(response.content)['tasks']\r\n for exp_task, act_task in zip(expected_tasks, actual_tasks):\r\n self.assertDictEqual(exp_task, act_task)\r\n self.assertEqual(actual_tasks, expected_tasks)", "def run_tasks(request):\r\n import os\r\n if not os.environ['SERVER_SOFTWARE'].startswith('Development'):\r\n logging.error(\"This URL is only valid in a development environment.\")\r\n raise Http404\r\n else:\r\n from datetime import datetime\r\n from google.appengine.api import apiproxy_stub_map\r\n stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')\r\n \r\n #get all the tasks for all the queues\r\n tasks = []\r\n for queue in stub.GetQueues():\r\n tasks.extend( stub.GetTasks(queue['name']) )\r\n \r\n #keep only tasks that need to be executed\r\n now = datetime.now()\r\n fn = lambda t: datetime.strptime(t['eta'],'%Y/%m/%d %H:%M:%S') < now\r\n tasks = filter(fn, tasks)\r\n\r\n from django.utils import simplejson as json\r\n result = '\\n'.join([json.dumps(t) for t in tasks])\r\n \r\n #remove tasks from queues\r\n for queue in stub.GetQueues():\r\n stub.FlushQueue(queue['name'])\r\n \r\n return HttpResponse(result)", "def test_items_to_tasks(self):\n count = task_utils.items_to_tasks(\n items=[1, 2, 3],\n queue=Queue('adapter-update'),\n task_generator=lambda item: Task(url='/something/{}'.format(item))\n )\n\n self.assertEqual(count, 3)\n task_count = len(self.taskqueue.get_filtered_tasks())\n self.assertEqual(task_count, 3)", "def test_user_02_gets_different_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check if we received the same number of tasks that the available ones\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n assert len(assigned_tasks) == len(tasks), assigned_tasks\r\n # Check if all the assigned Task.id are equal to the available ones\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n err_msg = \"Assigned Task not found in DB Tasks\"\r\n for at in assigned_tasks:\r\n assert self.is_task(at['id'], tasks), err_msg\r\n # Check that there are no duplicated tasks\r\n err_msg = \"One Assigned Task is duplicated\"\r\n for at in assigned_tasks:\r\n assert self.is_unique(at['id'], assigned_tasks), err_msg", "def test_queue_simple(self):\n events = self.run_and_get_events('fixtures/queue/queue_simple.json')\n\n expected_events = self.events_from_tuples((\n ('on_b_dial', {\n 'call_id': 'e83df36bebbe-1507019160.61',\n 'caller': CallerId(code=15001, number='+31150010002', is_public=True),\n 'to_number': '+31150010004',\n 'targets': [CallerId(code=150010001, number='+31150010004', is_public=True)],\n }),\n ('on_up', {\n 'call_id': 'e83df36bebbe-1507019160.61',\n 'caller': CallerId(code=15001, number='+31150010002', is_public=True),\n 'to_number': '+31150010004',\n 'callee': CallerId(code=150010001, number='+31150010004', is_public=True),\n }),\n ('on_hangup', {\n 'call_id': 'e83df36bebbe-1507019160.61',\n 'caller': CallerId(code=15001, number='+31150010002', is_public=True),\n 'to_number': '+31150010004',\n 'reason': 'completed',\n }),\n ))\n\n self.assertEqual(expected_events, events)", "def test_workflows_restart(self):\n pass", "def test_submit_jobs(self):\r\n\r\n submit_jobs([self.command], prefix=\"test_job\")\r\n # Try and wait ten times, could be made nicer with alarm()\r\n for i in range(10):\r\n if exists(self.tmp_result_file):\r\n observed_text = \"\".join(list(open(self.tmp_result_file)))\r\n self.assertEqual(observed_text, \"hello\\n\")\r\n return\r\n else:\r\n sleep(10)\r\n # if we get here we failed\r\n self.fail(\"The test job apparently never finished.\\n\"\r\n + \"check the jobs error log and check the queue status\\n.\")", "def process_task(params):\n params['task'](params)", "def setUp(self):\n self.tasks = list(map(lambda t: t.task_id, FCDAGDEV.tasks))", "def test_send_queued_mail(self):\n # Make sure that send_queued_mail with empty queue does not raise error\n call_command('send_queued_mail', processes=1)\n\n Email.objects.create(from_email='from@example.com',\n to=['to@example.com'], status=STATUS.queued)\n Email.objects.create(from_email='from@example.com',\n to=['to@example.com'], status=STATUS.queued)\n call_command('send_queued_mail', processes=1)\n self.assertEqual(Email.objects.filter(status=STATUS.sent).count(), 2)\n self.assertEqual(Email.objects.filter(status=STATUS.queued).count(), 0)", "def test_queue_enqueue_command(runner, tmpworkdir, queue, target_factory): # pylint: disable=unused-argument\n\n atarget = target_factory.build(queue=queue)\n apath = Path('ips.txt')\n apath.write_text(f'{atarget.target}\\n \\n ', encoding='utf-8')\n\n result = runner.invoke(command, ['queue-enqueue', 'notexist', atarget.target])\n assert result.exit_code == 1\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, atarget.target])\n assert result.exit_code == 0\n assert Queue.query.get(queue.id).targets[0].target == atarget.target\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, '--file', apath])\n assert result.exit_code == 0\n assert len(Queue.query.get(queue.id).targets) == 2", "async def test_queue(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/queue\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"queue.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.queue()\n\n assert response\n assert isinstance(response, List)\n\n assert response[0]\n assert isinstance(response[0], models.QueueItem)\n assert response[0].episode\n assert isinstance(response[0].episode, models.Episode)", "def task(ctx, config):\n pass", "def test_run_started(self):", "def test_workers(self):\n wr = WorkflowRuner(4)\n try:\n wr.init_workers()\n assert wr.workers_available() == 4\n wr.acquire_worker()\n assert wr.workers_available() == 3\n wr.acquire_worker()\n assert wr.active_workers()\n wr.acquire_worker()\n assert wr.active_workers()\n wr.acquire_worker()\n assert not wr.active_workers()\n wr.release_worker()\n assert wr.active_workers()\n wr.release_worker()\n assert wr.workers_available() == 2\n wr.terminate_workers_and_clean_subprocesses()\n except:\n wr.terminate_workers_and_clean_subprocesses()", "def celery_task_invocation_started(self, logger):", "def check_queue():\n while True:\n logging.info( 'Awaiting task ' )\n yield from asyncio.sleep( 5 )\n loop.create_task( (start_background_tasks()) )", "def octopus_tasks(self, msg, args):\r\n self.tasks.send_tasks(msg, args)", "def testJobPostponedInspection(databases):\n\n class CustomGenerator(DataGenerator):\n pass\n gen = CustomGenerator(databases)\n\n image = gen.createProduct('image')\n buildFw = gen.createFramework('build', [], [ image ])\n testFw = gen.createFramework('test', [ image ], [])\n buildTask = gen.createTask('build', buildFw)\n testTask1 = gen.createTask('test1', testFw)\n testTask2 = gen.createTask('test2', testFw)\n testTask3 = gen.createTask('test3', testFw)\n\n tr = gen.createTaskRunner(name='tr_build',\n capabilities=['build', 'test'])\n\n def simulate(config):\n sanityCheck(gen, config)\n\n job, = config.createJobs(gen.owner)\n # TODO: The toXML functionality should probably be tested\n # in a separate test case.\n\n # Verify execution:\n # Successfully complete first build task.\n task = job.assignTask(databases.resourceDB[tr])\n assert task is not None\n assert task.getName() == buildTask\n taskDone(job, buildTask)\n assert job.result == ResultCode.OK\n assert job.getFinalResult() == None\n # Successfully complete first test task, without result.\n task = job.assignTask(databases.resourceDB[tr])\n assert (task is not None) is not None\n assert task.getName() == testTask1\n taskDone(job, testTask1, ResultCode.INSPECT)\n assert not job.isExecutionFinished()\n assert not job.hasFinalResult()\n assert job.result == ResultCode.INSPECT\n assert job.getFinalResult() is None\n # Successfully complete second test task, with result.\n task = job.assignTask(databases.resourceDB[tr])\n assert task is not None\n assert task.getName() == testTask2\n taskDone(job, testTask2, ResultCode.OK)\n assert not job.isExecutionFinished()\n assert not job.hasFinalResult()\n assert job.result == ResultCode.INSPECT\n assert job.getFinalResult() is None\n # Successfully complete third test task, without result.\n task = job.assignTask(databases.resourceDB[tr])\n assert task is not None\n assert task.getName() == testTask3\n taskDone(job, testTask3, ResultCode.INSPECT)\n assert job.isExecutionFinished()\n assert not job.hasFinalResult()\n assert job.result == ResultCode.INSPECT\n assert job.getFinalResult() is None\n # Attempt to set invalid inspection result.\n with raises(ValueError):\n job.inspectDone(testTask1, ResultCode.CANCELLED, 'invalid')\n # Complete inspection of first task.\n job.inspectDone(testTask1, ResultCode.WARNING, 'inspect 1')\n assert job.isExecutionFinished()\n assert not job.hasFinalResult()\n assert job.result == ResultCode.INSPECT\n assert job.getFinalResult() is None\n # Attempt to change inspection result.\n with raises(IllegalStateError):\n job.inspectDone(testTask1, ResultCode.OK, 'invalid')\n # Complete inspection of third task.\n job.inspectDone(testTask3, ResultCode.OK, 'inspect 3')\n assert job.isExecutionFinished()\n assert job.hasFinalResult()\n assert job.result == ResultCode.WARNING\n assert job.getFinalResult() == ResultCode.WARNING\n\n runWithReload(databases, gen.createConfiguration(), simulate)", "def test_enqueue(self):\n dbpool = buildConnectionPool(self, jobSchema + schemaText)\n yield self._enqueue(dbpool, 1, 2)\n\n # Make sure we have one JOB and one DUMMY_WORK_ITEM\n @transactionally(dbpool.connection)\n def checkJob(txn):\n return JobItem.all(txn)\n\n jobs = yield checkJob\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].workType == \"DUMMY_WORK_ITEM\")\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n\n @transactionally(dbpool.connection)\n def checkWork(txn):\n return DummyWorkItem.all(txn)\n\n work = yield checkWork\n self.assertTrue(len(work) == 1)\n self.assertTrue(work[0].jobID == jobs[0].jobID)", "def test_user_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # We need one extra loop to allow the scheduler to mark a task as completed\r\n for i in range(11):\r\n self.register(fullname=self.user.username + str(i),\r\n name=self.user.username + str(i),\r\n password=self.user.username + str(i))\r\n self.signin()\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n self.signout()\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, t.task_runs\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same User\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_id, t.task_runs), err_msg\r\n # Check that task.state is updated to completed\r\n for t in tasks:\r\n assert t.state == \"completed\", t.state" ]
[ "0.7558448", "0.7010462", "0.685679", "0.6836651", "0.6805605", "0.6643382", "0.6535218", "0.640248", "0.640248", "0.63725996", "0.63460475", "0.6286987", "0.62630576", "0.6254249", "0.62299985", "0.6221732", "0.6209035", "0.62022513", "0.6200722", "0.617855", "0.61699396", "0.6168474", "0.61627895", "0.6152956", "0.60914433", "0.60860205", "0.60807854", "0.60807854", "0.60770714", "0.60770714", "0.60545814", "0.60516095", "0.60370713", "0.60276335", "0.6025271", "0.6019333", "0.60189915", "0.60189915", "0.6012671", "0.60098773", "0.60075384", "0.6007245", "0.6004084", "0.60031205", "0.60016316", "0.60016316", "0.5998642", "0.5995375", "0.5984772", "0.59817046", "0.59806794", "0.59780794", "0.5972058", "0.5958331", "0.59415793", "0.5941521", "0.59345615", "0.59328884", "0.5914004", "0.5906119", "0.5898069", "0.58937645", "0.58852315", "0.5882568", "0.58788", "0.5873474", "0.58734584", "0.5866945", "0.5861512", "0.58596313", "0.58585656", "0.5857669", "0.5855822", "0.58539635", "0.58507246", "0.58488154", "0.5837862", "0.5837351", "0.58341825", "0.58317643", "0.5830892", "0.58171564", "0.58119845", "0.58067554", "0.58027077", "0.58015245", "0.58007437", "0.57995665", "0.57945967", "0.57945335", "0.57930326", "0.57906866", "0.57784736", "0.5774251", "0.5772753", "0.5767948", "0.5765237", "0.5762453", "0.5761792", "0.5753875" ]
0.581855
81
Forward method for your Runner. Should not be called directly outside of runner. If your model has specific interface, override this method to use it
def forward(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]: output = self._process_input(batch, **kwargs) output = self._process_output(output) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")", "def __call__(self):\n raise NotImplementedError", "def RunModel(self):\n raise UnimplementedMethodException()", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def __call__(self):\n raise NotImplementedError()", "def run(self):\n raise NotImplementedError # implement in subclass", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError()", "def run(self):\n raise NotImplementedError()", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplemented(\"Abstract method '_run' need to be defined\")", "def __call__(self):\r\n raise NotImplementedError('override me')", "def _run(self, *args, **kwargs):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError(\"Subclasses mut override run()\")", "def __call__(self, **kwargs):\n raise NotImplementedError", "def run(self) -> None:\n raise NotImplementedError()", "def __call__( self ):\n pass", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def run(self):\n raise Exception('derived class should redefine this function')", "def run(self):\n raise NotImplementedError(\"Subclass must implement abstract method\")", "def run(self):\n raise NotImplementedError('Run method not implemented in %s' % type(self).__name__)", "def perform(self):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self):\n pass", "def __call__(self):\n pass", "def run(self):\n raise NotImplemented(\"Inheriting classes should implement this\")", "def _run(self):\n # We usually override this in __init__\n # pylint: disable=method-hidden\n return", "def forward(self, *args: Any, **kwargs: Any) -> Any:\n return self.model(*args, **kwargs)", "def __call__(self, *args, **kwargs) -> None:\n raise NotImplementedError()", "def perform(self) -> None:\n raise NotImplementedError()", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def _proceed(self):\n raise NotImplementedError", "def perform(self):\n pass", "def train(self, ):\n raise NotImplementedError", "def run(self):\r\n pass", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def train(self):\n raise NotImplementedError()", "def train(self):\n\t\traise NotImplementedError", "def __call__(self) -> None:", "def __call__(self):\n\t\treturn", "def _execute(self, model_obj):", "def perform(self):\n raise TypeError(\"Derived class must implement\")", "def run(self): \r\n return", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def run(self, **kwargs) -> None:\n raise NotImplementedError()", "def run(self, **kwargs) -> None:\n raise NotImplementedError()", "def execute(self):\n raise NotImplementedError(\"Subclasses should override this method.\")", "def forward(self)->None:", "def run(self, **kwargs: Any) -> None:\n raise NotImplementedError", "def run(self):\n \n pass", "def test(self):\n raise NotImplementedError", "def __call__(self, args, kwargs):\n raise NotImplementedError", "def step(self, model):\n pass", "def step(self, model):\n pass", "def process(self):\n raise NotImplementedError", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "def run(self):\n\t\t\n\t\tpass", "def run(self) -> None:\n log.critical('Not implemented')", "def _train_model(self):\n raise NotImplementedError()", "def Run(self):\n pass", "def train(self):\n raise NotImplementedError", "def perform(self):\n return", "def perform(self):\n return", "def run(self, **kwargs):", "def forward_test(self, *args, **kwargs):\n pass", "def apply(self): # pragma: no cover\n raise NotImplementedError", "def get_object_to_run(self):", "def run(self, *args, **kwargs):\n pass", "def handle(self):\n raise NotImplementedError", "def act(self):\n raise NotImplementedError", "def run(self, *args, **kwargs) -> typing.Any:\n pass", "def process(self):\n raise NotImplementedError('Method must be implemented by subclass.')", "def run_autostig(self):\n raise NotImplementedError" ]
[ "0.72703433", "0.7202122", "0.7161987", "0.7095755", "0.7095755", "0.70892507", "0.7041553", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.6987295", "0.6987295", "0.69869214", "0.69604117", "0.6940787", "0.69100726", "0.682403", "0.677481", "0.6774137", "0.67679876", "0.66991514", "0.66991514", "0.669514", "0.6693241", "0.6665453", "0.6661472", "0.66054654", "0.66054654", "0.66054654", "0.6595659", "0.6595659", "0.6582435", "0.6569072", "0.65282476", "0.65117633", "0.64855844", "0.6382837", "0.63764334", "0.63389176", "0.63375556", "0.6332409", "0.6322233", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6312563", "0.6307838", "0.6305249", "0.6301651", "0.62995034", "0.6299186", "0.6281884", "0.6260266", "0.6252222", "0.6252222", "0.62369627", "0.62369627", "0.6231052", "0.6224191", "0.6218607", "0.6212719", "0.6196357", "0.61916804", "0.61916316", "0.61916316", "0.6191546", "0.61914784", "0.61764234", "0.61544454", "0.6150702", "0.6146326", "0.61404735", "0.61181426", "0.61181426", "0.6116764", "0.61099005", "0.60936946", "0.6089301", "0.60754395", "0.60656613", "0.60629714", "0.60570914", "0.6053816", "0.6041048" ]
0.0
-1
Inner method to handle specified data batch. Used to make a train/valid/infer stage during Experiment run.
def handle_batch(self, batch: Mapping[str, Any]) -> None: self.batch = {**batch, **self.forward(batch)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def train(self, batch):\n pass", "def _pipeline_general(self,\n data_batch: Dict[str, Any],\n levels: Sequence[int],\n training: bool = False,\n do_eval: bool = False,\n optim_mode: str = 'full',\n latent_code_type: str = 'train',\n eval_data_mode: str = None,\n flags: Dict[str, Any] = None):\n if self._num_point_dim == 2:\n _, dim_h, dim_w, _ = data_batch['input'].shape\n spatial_dims = [dim_h, dim_w]\n elif self._num_point_dim == 3:\n _, dim_d, dim_h, dim_w, _ = data_batch['input'].shape\n spatial_dims = [dim_d, dim_h, dim_w]\n\n gt_map = data_batch['gt']\n gt_map_for_input = gt_map\n if self._num_point_dim == 2:\n gt_data_for_label = {'sdf_map': gt_map}\n elif self._num_point_dim == 3:\n gt_data_for_label = {\n 'grid_samples': data_batch['grid_samples'],\n 'uniform_samples': data_batch['uniform_samples'],\n 'near_surface_samples': data_batch['near_surface_samples']\n }\n if 'uniform_samples_per_camera' in data_batch:\n gt_data_for_label['uniform_samples_per_camera'] = data_batch[\n 'uniform_samples_per_camera']\n if 'near_surface_samples_per_camera' in data_batch:\n gt_data_for_label['near_surface_samples_per_camera'] = data_batch[\n 'near_surface_samples_per_camera']\n if 'depth_xyzn_per_camera' in data_batch:\n gt_data_for_label['depth_xyzn_per_camera'] = data_batch[\n 'depth_xyzn_per_camera']\n\n # Unified processing on input.\n if self._input_config_unified['clip'][0]:\n clip_min_max = self._input_config_unified['clip'][1]\n gt_map_for_input = tf.clip_by_value(gt_map_for_input, clip_min_max[0],\n clip_min_max[1])\n\n # Unified processing on label.\n if self._label_config_unified['clip'][0]:\n clip_min_max = self._label_config_unified['clip'][1]\n if self._num_point_dim == 2:\n gt_data_for_label['sdf_map'] = tf.clip_by_value(\n gt_data_for_label['sdf_map'], clip_min_max[0], clip_min_max[1])\n else:\n for key in gt_data_for_label:\n if gt_data_for_label[key].shape[-1] > 3 and key not in [\n 'depth_xyzn_per_camera'\n ]:\n gt_data_for_label[key] = tf.concat([\n gt_data_for_label[key][..., :3],\n tf.clip_by_value(gt_data_for_label[key][..., 3:4],\n clip_min_max[0], clip_min_max[1])\n ],\n axis=-1)\n\n # Initialize dictionaries to store outputs.\n model_outputs_and_targets = {\n 'training': training,\n 'image_size': spatial_dims\n }\n image_summaries = {}\n\n # Sample points for training/latent optimization.\n if optim_mode == 'full':\n points_data = self._sample_points(\n spatial_dims,\n gt_data_for_label,\n self._train_point_sampler,\n params=flags)\n optim_sampler = self._train_point_sampler\n elif optim_mode == 'latent_optim':\n points_data = self._sample_points(\n spatial_dims,\n gt_data_for_label,\n self._latent_optim_point_sampler,\n params=flags)\n optim_sampler = self._latent_optim_point_sampler\n\n # Sample points for evaluation.\n params_for_eval = self._eval_sampling_params_init\n if 'mask' in optim_sampler.default_params:\n params_for_eval['mask'] = optim_sampler.default_params['mask']\n if eval_data_mode == 'all':\n params_for_eval['all_pixels/mode'] = 'all'\n points_eval_data = self._sample_points(spatial_dims, gt_data_for_label,\n self._eval_point_sampler,\n params_for_eval)\n model_outputs_and_targets['mask_for_point'] = points_eval_data[\n 'mask_for_point']\n\n # Initialize GT residual and GT cumulative.\n keys = list(points_data.keys())\n for key in keys:\n if key.startswith('points/'):\n points_data['points_sdf_gt_residual/' +\n key[7:]] = points_data['points_sdf_gt/' + key[7:]]\n points_data['points_sdf_gt_cumulative/' + key[7:]] = tf.zeros_like(\n points_data['points_sdf_gt/' + key[7:]])\n keys = list(points_eval_data.keys())\n for key in keys:\n if key.startswith('points/'):\n points_eval_data['points_sdf_gt_residual/' + \\\n key[7:]] = points_eval_data['points_sdf_gt/' + key[7:]]\n points_eval_data['points_sdf_gt_cumulative/' + key[7:]] = tf.zeros_like(\n points_eval_data['points_sdf_gt/' + key[7:]])\n\n # Iterate over all levels.\n for ith_level in range(len(levels)):\n i = levels[ith_level]\n debug_data = {}\n\n # Set input.\n input_data = gt_map_for_input\n\n if self._mode == 'fully_multi_level':\n if self._encoder_mode == 'input_enc+f2c':\n # Get root feature from input encoder.\n if ith_level == 0:\n if optim_mode == 'full':\n root_feature = self._input_encoder[0](\n input_data, training=training)\n debug_data['root_feature'] = root_feature\n model_outputs_and_targets['root_feature'] = root_feature\n elif optim_mode == 'latent_optim':\n pass\n else:\n raise ValueError('Unknown optim_mode: %s' % optim_mode)\n\n # Get latent code grid from feature to code net.\n if 'code_grid/level' + str(i) in model_outputs_and_targets:\n codes_this_level = model_outputs_and_targets['code_grid/level' +\n str(i)][0]\n else:\n if self._feature_to_code_net.mode == 'separate_branch':\n if optim_mode != 'latent_optim':\n codes_this_level = self._feature_to_code_net(\n root_feature, levels=[i], training=training)[i]\n model_outputs_and_targets['code_grid/level' +\n str(i)] = (codes_this_level, i)\n else:\n codes_all_level = self._gather_latent_codes(\n latent_code_type=latent_code_type)\n codes_this_level = codes_all_level[i]\n codes_this_level = self._feature_to_code_net(\n codes_this_level,\n levels=[i],\n training=training,\n decoder_only=True)[i]\n elif self._feature_to_code_net.mode in [\n 'single_branch', 'single_dec_branch'\n ]:\n if optim_mode != 'latent_optim':\n codes_all_level = self._feature_to_code_net(\n root_feature, training=training)\n else:\n codes_all_level = self._gather_latent_codes(\n latent_code_type=latent_code_type)\n codes_all_level = self._feature_to_code_net(\n codes_all_level, training=training, decoder_only=True)\n codes_this_level = codes_all_level[i]\n for level_idx, codes_all_level_i in enumerate(codes_all_level):\n model_outputs_and_targets['code_grid/level' +\n str(level_idx)] = (codes_all_level_i,\n level_idx)\n else:\n raise ValueError('mode: %s does not support encoder_mode: %s' % \\\n (self._mode, self._encoder_mode))\n\n # Forward decoder on training points.\n if training:\n model_outputs_and_targets_train, _ = \\\n self._forward_decoder_on_points_data(\n codes_this_level, points_data, i, training=True,\n save_key_prefix='')\n model_outputs_and_targets.update(model_outputs_and_targets_train)\n\n # Forward decoder on evaluation points.\n if self._do_eval_every_iter or do_eval:\n model_outputs_and_targets_eval, debug_data_temp = \\\n self._forward_decoder_on_points_data(\n codes_this_level, points_eval_data, i, training=False,\n save_key_prefix='eval_')\n model_outputs_and_targets.update(model_outputs_and_targets_eval)\n debug_data.update(debug_data_temp)\n\n # Add summaries for this level.\n if do_eval:\n # Input data.\n summary_key = 'input_data/level' + str(i)\n summary_data = tf.stack(\n [input_data[..., i_channel] for i_channel in\n range(input_data.shape[-1])], axis=1)[..., None] / \\\n self.summary_config['sdf_range'] * 0.5 + 0.5\n image_summaries_update = misc_utils.get_image_summary(\n summary_key,\n summary_data,\n channels_use='first',\n spatial_dims=None,\n normalize=False,\n summary_config=self.summary_config,\n extra_axis=1)\n image_summaries.update(image_summaries_update)\n\n # Root feature.\n if i == 0:\n data_key = 'root_feature'\n summary_key = 'misc/root_feature/level' + str(i)\n if data_key in debug_data:\n image_summaries_update = misc_utils.get_image_summary(\n summary_key,\n debug_data[data_key],\n channels_use='first',\n spatial_dims=None,\n normalize=True,\n summary_config=self.summary_config)\n image_summaries.update(image_summaries_update)\n\n # Latent code grid.\n summary_key = 'misc/code_grid/level' + str(i)\n image_summaries_update = misc_utils.get_image_summary(\n summary_key,\n codes_this_level,\n channels_use='first',\n spatial_dims=None,\n normalize=True,\n summary_config=self.summary_config)\n image_summaries.update(image_summaries_update)\n\n summary_key = 'misc/code_grid_last/level' + str(i)\n image_summaries_update = misc_utils.get_image_summary(\n summary_key,\n codes_this_level,\n channels_use='last',\n spatial_dims=None,\n normalize=True,\n summary_config=self.summary_config)\n image_summaries.update(image_summaries_update)\n\n # Latent codes for points.\n data_key = 'latent_codes'\n if data_key in debug_data:\n summary_key = 'misc/latent_codes/level' + str(i)\n image_summaries_update = misc_utils.get_image_summary(\n summary_key,\n debug_data[data_key],\n channels_use='first',\n spatial_dims=spatial_dims,\n normalize=True,\n summary_config=self.summary_config,\n data_mode=eval_data_mode)\n image_summaries.update(image_summaries_update)\n\n summary_key = 'misc/latent_codes_last/level' + str(i)\n image_summaries_update = misc_utils.get_image_summary(\n summary_key,\n debug_data[data_key],\n channels_use='last',\n spatial_dims=spatial_dims,\n normalize=True,\n summary_config=self.summary_config,\n data_mode=eval_data_mode)\n image_summaries.update(image_summaries_update)\n else:\n raise ValueError('Unknown mode: %s' % self._mode)\n\n return model_outputs_and_targets, image_summaries", "def process(self, data_batch: Sequence[dict],\n data_samples: Sequence[dict]) -> None:\n for data_sample in data_samples:\n # predicted keypoints coordinates, [1, K, D]\n pred_coords = data_sample['pred_instances']['keypoints']\n # ground truth data_info\n gt = data_sample['gt_instances']\n # ground truth keypoints coordinates, [1, K, D]\n gt_coords = gt['lifting_target']\n # ground truth keypoints_visible, [1, K, 1]\n mask = gt['lifting_target_visible'].astype(bool).reshape(1, -1)\n # instance action\n img_path = data_sample['target_img_path']\n _, rest = osp.basename(img_path).split('_', 1)\n action, _ = rest.split('.', 1)\n\n result = {\n 'pred_coords': pred_coords,\n 'gt_coords': gt_coords,\n 'mask': mask,\n 'action': action\n }\n\n self.results.append(result)", "def step(self, sess, batch_data, is_training):\n\n # Input feed\n input_feed = {}\n input_feed[self.images] = batch_data['images']\n input_feed[self.bbox_true_13] = batch_data['bbox_true_13']\n input_feed[self.bbox_true_26] = batch_data['bbox_true_26']\n input_feed[self.bbox_true_52] = batch_data['bbox_true_52']\n\n # Output feed: depends on training or test\n output_feed = [self.loss] # Loss for this batch.\n if is_training:\n output_feed.append(self.train_op) # Gradient updates\n\n outputs = sess.run(output_feed, input_feed)\n return outputs[0] # loss", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )", "def train_step(self, data_batch, optimizer, **kwargs):\n losses = self.forward(**data_batch)\n loss, log_vars = self._parse_losses(losses)\n if 'img' in data_batch:\n batch_size = data_batch['img'][0].shape[0]\n else:\n assert 'input_heatmaps' in data_batch\n batch_size = data_batch['input_heatmaps'][0][0].shape[0]\n outputs = dict(loss=loss, log_vars=log_vars, num_samples=batch_size)\n return outputs", "def train_step(self, batch_sample, epoch_it):\n batch_x = batch_sample['waveform']\n data_type = batch_sample['data_type']\n batch_target = {\n 'ov': batch_sample['ov'],\n 'sed': batch_sample['sed_label'],\n 'doa': batch_sample['doa_label'],\n }\n if self.cuda:\n batch_x = batch_x.cuda(non_blocking=True)\n batch_target['sed'] = batch_target['sed'].cuda(non_blocking=True)\n batch_target['doa'] = batch_target['doa'].cuda(non_blocking=True)\n\n\n self.optimizer.zero_grad()\n self.af_extractor.train()\n self.model.train()\n\n (batch_x, batch_target) = self.af_extractor((batch_x, batch_target,'train', data_type))\n batch_x = (batch_x - self.mean) / self.std\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n pred, pred_constraint = self.model(batch_x)\n if self.cfg['training']['model'] == 'EINV2':\n pred = self.model(batch_x)\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n loss_dict = self.losses.calculate_attention(pred, pred_constraint,batch_target, epoch_it,self.model)\n if self.cfg['training']['model'] == 'EINV2':\n loss_dict = self.losses.calculate(pred, batch_target, epoch_it, self.model)\n\n loss_dict[self.cfg['training']['loss_type']].backward(retain_graph=False)\n self.optimizer.step()\n\n self.train_losses['train_loss_all'] += loss_dict['all'].item()\n self.train_losses['train_loss_sed'] += loss_dict['sed'].item()\n self.train_losses['train_loss_doa'] += loss_dict['doa'].item()\n\n if self.cfg['training']['weight_constraints']:\n self.train_losses['train_loss_weight_orthogonal'] += loss_dict['loss_weight_orthogonal'].item()\n\n if self.cfg['training']['weight_constraints_1']:\n self.train_losses['train_loss_weight_orthogonal_1'] += loss_dict['loss_weight_orthogonal_1'].item()\n\n if self.cfg['training']['layer_constraints']:\n self.train_losses['train_loss_layer_orthogonal'] += loss_dict['loss_layer_orthogonal'].item()\n\n if self.cfg['training']['layer_constraints_1']:\n self.train_losses['train_loss_layer_orthogonal_1'] += loss_dict['loss_layer_orthogonal_1'].item()\n\n if self.cfg['training']['smoothness_loss']:\n self.train_losses['train_loss_doa_smoothness'] += loss_dict['loss_doa_smoothness'].item()", "def _do_training(self, iteration, batch):\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n self._session.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._update_target()", "def on_train_batch_begin(self, step, logs=None):", "def validation_step(self, *args: Any, **kwargs: Any) -> None:\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if (\n batch_idx < 10\n and hasattr(self.trainer, \"datamodule\")\n and self.logger\n and hasattr(self.logger, \"experiment\")\n and hasattr(self.logger.experiment, \"add_figure\")\n ):\n try:\n datamodule = self.trainer.datamodule\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n except ValueError:\n pass", "def _train_epoch(self, train_batches, data, max_metric_value, metric_save, patience, step_pbar):\n evaluate = True\n exit_tag = False\n num_steps = self.args.num_steps\n check_point, batch_size = self.args.check_point, self.args.batch_size\n save_dir, save_prefix = self.args.save_dir, self.args.algo\n\n for bitx, batch in enumerate(train_batches):\n if evaluate and self.global_step % self.eval_freq == 0:\n if data.dev_set is not None:\n dev_batches = data.gen_mini_batches('dev', 31928, shuffle=False)\n dev_loss, dev_perplexity, dev_perplexity_at_rank = self.evaluate(dev_batches, data)\n #print('dev loss=%s' % dev_loss, 'dev ppl=%s' % dev_perplexity, 'dev ppl at rank=', dev_perplexity_at_rank)\n\n test_batches = data.gen_mini_batches('test', 41405, shuffle=False)\n test_loss, test_perplexity, test_perplexity_at_rank = self.evaluate(test_batches, data)\n #print('test loss=%s' % test_loss, 'dev ppl=%s' % test_perplexity, 'dev ppl at rank=' , test_perplexity_at_rank)\n\n self.writer.add_scalar(\"dev/loss\", dev_loss, self.global_step)\n self.writer.add_scalar(\"dev/perplexity\", dev_perplexity, self.global_step)\n self.writer.add_scalar(\"test/loss\", test_loss, self.global_step)\n self.writer.add_scalar(\"test/perplexity\", test_perplexity, self.global_step)\n\n for trunc_level in self.trunc_levels:\n ndcg_version1, ndcg_version2 = self.relevance_estimator.evaluate(self, data, self.relevance_queries, trunc_level)\n self.writer.add_scalar(\"NDCG_version1/{}\".format(trunc_level), ndcg_version1, self.global_step)\n self.writer.add_scalar(\"NDCG_version2/{}\".format(trunc_level), ndcg_version2, self.global_step)\n\n if dev_loss < metric_save:\n metric_save = dev_loss\n patience = 0\n else:\n patience += 1\n # Trick: do not decay d_lr help convergence\n if patience >= self.patience:\n #self.adjust_learning_rate(self.discrim_optimizer, self.args.lr_decay)\n self.adjust_learning_rate(self.policy_optimizer, self.args.lr_decay)\n self.g_lr *= self.args.lr_decay\n #self.d_lr *= self.args.lr_decay\n self.writer.add_scalar('train/g_lr', self.g_lr, self.global_step)\n #self.writer.add_scalar('train/d_lr', self.d_lr, self.global_step)\n metric_save = dev_loss\n patience = 0\n self.patience += 1\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n self.global_step += 1\n step_pbar.update(1)\n QIDS = Variable(torch.from_numpy(np.array(batch['qids'], dtype=np.int64)))\n UIDS = Variable(torch.from_numpy(np.array(batch['uids'], dtype=np.int64)))\n VIDS = Variable(torch.from_numpy(np.array(batch['vids'], dtype=np.int64)))\n PRE_CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, :-1]))\n CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, 1:]))\n\n # generate trajectories\n for __ in range(self.args.d_step):\n actor_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n critic_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n CLICK_ = torch.zeros(QIDS.shape[0], 1, dtype=CLICKS.dtype)\n logits = torch.zeros(QIDS.shape[0], 0, 2)\n values = torch.zeros(QIDS.shape[0], 0)\n CLICKS_ = Variable(torch.zeros((QIDS.shape[0], 0), dtype=CLICKS.dtype))\n if self.use_cuda:\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS = QIDS.cuda(), UIDS.cuda(), VIDS.cuda(), PRE_CLICKS.cuda(), CLICKS.cuda()\n actor_rnn_state, critic_rnn_state, CLICK_ = actor_rnn_state.cuda(), critic_rnn_state.cuda(), CLICK_.cuda()\n logits, values, CLICKS_ = logits.cuda(), values.cuda(), CLICKS_.cuda()\n self.policy.eval()\n for i in range(self.max_d_num + 1):\n logit, value, actor_rnn_state, critic_rnn_state = self.policy(QIDS[:, i:i+1], \n UIDS[:, i:i+1], \n VIDS[:, i:i+1], \n CLICK_, \n actor_rnn_state, \n critic_rnn_state)\n if i > 0:\n CLICK_ = torch.distributions.Categorical(logit).sample()\n logits = torch.cat([logits, logit], dim=1)\n values = torch.cat([values, value], dim=1)\n CLICKS_ = torch.cat([CLICKS_, CLICK_], dim=1)\n\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n\n '''update discriminator'''\n for _ in range(self.args.k):\n self.discrim.train()\n self.discrim_optimizer.zero_grad()\n g_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS_)\n g_o_target = torch.ones((QIDS.shape[0], g_o.shape[1]))\n e_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS)\n e_o_target = torch.zeros((QIDS.shape[0], e_o.shape[1]))\n if self.use_cuda:\n g_o_target, e_o_target = g_o_target.cuda(), e_o_target.cuda()\n \n discrim_loss = self.discrim_criterion(g_o, g_o_target) + self.discrim_criterion(e_o, e_o_target)\n discrim_loss.backward()\n self.discrim_optimizer.step()\n self.writer.add_scalar('train/d_loss', discrim_loss.data, self.global_step)\n\n '''estimate advantage'''\n with torch.no_grad():\n self.discrim.eval()\n rewards = -torch.log(self.discrim(QIDS, UIDS, VIDS, CLICKS_)[0])\n # print(rewards.shape, values.shape)\n #print(tensor_type)\n #exit(0)\n deltas = torch.zeros(rewards.shape)\n advantages = torch.zeros(rewards.shape)\n prev_value = torch.zeros(rewards.shape[0])\n prev_advantage = torch.zeros(rewards.shape[0])\n if self.use_cuda:\n deltas, advantages = deltas.cuda(), advantages.cuda()\n prev_value, prev_advantage = prev_value.cuda(), prev_advantage.cuda()\n '''print(deltas)\n print(advantages)\n print(prev_value)\n print(prev_advantage)\n exit(0)'''\n\n for i in reversed(range(rewards.size(1))):\n deltas[:, i] = rewards[:, i] + self.gamma * prev_value - values[:, i]\n advantages[:, i] = deltas[:, i] + self.gamma * self.tau * prev_advantage\n prev_value = values[:, i]\n prev_advantage = advantages[:, i]\n\n returns = values + advantages\n advantages = (advantages - advantages.mean()) / (advantages.std() + MINF)\n # advantages = (returns - returns.mean())/returns.std()\n\n fixed_log_probs = torch.distributions.Categorical(logits).log_prob(CLICKS_[:, 1:])\n\n '''PPO update'''\n self.policy.train()\n optim_batchsize = 512\n optim_iter_num = int(math.ceil(QIDS.shape[0] / optim_batchsize))\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n for _ in range(self.args.g_step):\n perm = np.arange(QIDS.shape[0])\n np.random.shuffle(perm)\n\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS, CLICKS_, advantages, returns, fixed_log_probs = \\\n QIDS[perm].clone(), UIDS[perm].clone(), VIDS[perm].clone(), PRE_CLICKS[perm].clone(), \\\n CLICKS[perm].clone(), CLICKS_[perm].clone(), advantages[perm].clone(), returns[perm].clone(), fixed_log_probs[perm].clone()\n\n #print(QIDS)\n #exit(0)\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batchsize, min((i + 1) * optim_batchsize, QIDS.shape[0]))\n qids_b, uids_b, vids_b, pclicks_b, clicks_b, clicks__b, advantage_b, returns_b, fixed_log_probs_b = \\\n QIDS[ind], UIDS[ind], VIDS[ind], CLICKS_[ind, :-1], CLICKS[ind], CLICKS_[ind, 2:], \\\n advantages[ind], returns[ind], fixed_log_probs[ind]\n\n logits, values_pred, _, _ = self.policy(qids_b, uids_b, vids_b, pclicks_b)\n dist = torch.distributions.Categorical(logits)\n\n\n '''update critic'''\n value_loss = (values_pred - returns_b).pow(2).mean()\n '''optimizer policy'''\n log_probs_b = dist.log_prob(clicks__b)\n ratio = torch.exp(log_probs_b - fixed_log_probs_b)\n surr1 = ratio * advantage_b\n surr2 = torch.clamp(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon) * advantage_b\n policy_surr = -torch.min(surr1, surr2).mean()\n pe = dist.entropy().mean()\n loss = value_loss + self.alpha * policy_surr - self.beta * pe\n\n self.policy_optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 40)\n self.policy_optimizer.step()\n g_loss, _ = self.compute_loss(logits, clicks_b)\n\n self.writer.add_scalar('train/g_loss', g_loss.data, self.global_step)\n self.writer.add_scalar('train/g_valueloss', value_loss.data, self.global_step)\n self.writer.add_scalar('train/g_policysurr', policy_surr.data, self.global_step)\n self.writer.add_scalar('train/g_entropy', pe.data, self.global_step)\n\n if check_point > 0 and self.global_step % check_point == 0:\n self.save_model(save_dir, save_prefix)\n if self.global_step >= num_steps:\n exit_tag = True\n\n return max_metric_value, exit_tag, metric_save, patience", "def train(self, data_iterator):\n \n if self.config['sequence_input']:\n if self.config['net_input_add_onehot']:\n input_data_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_input']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_input']))\n \n if self.config['sequence_output']:\n if self.config['net_target_add_onehot']:\n target_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_output']))\n \n training, loss_avg_t = self.setup_train(input_data_ph, target_ph)\n \n session = tf.Session()\n session.run(tf.global_variables_initializer())\n \n self.analyze_config()\n \n for epoch in range(self.config['epochs']):\n starttime = time.time()\n for step in range(self.config['epoch_steps']):\n input_data, target = next(data_iterator)\n tmp, loss_avg_value = session.run([training, loss_avg_t], {input_data_ph:input_data, target_ph:target})\n print(\"Epoch: {} Loss: {} Elapsed:{}s\".format(epoch, loss_avg_value, (time.time() - starttime)))", "def train_single_batch(self, batch_data):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.optimizer.zero_grad()\n norm_adj = self.norm_adj\n ua_embeddings, ia_embeddings = self.model.forward(norm_adj)\n\n batch_users, pos_items, neg_items = batch_data\n\n u_g_embeddings = ua_embeddings[batch_users]\n pos_i_g_embeddings = ia_embeddings[pos_items]\n neg_i_g_embeddings = ia_embeddings[neg_items]\n\n batch_mf_loss, batch_reg_loss = self.loss_comput(\n u_g_embeddings,\n pos_i_g_embeddings,\n neg_i_g_embeddings,\n batch_users,\n pos_items,\n neg_items,\n )\n\n batch_loss = batch_mf_loss + batch_reg_loss\n\n batch_loss.backward()\n self.optimizer.step()\n loss = batch_loss.item()\n return loss", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def run_batch(self, batch_x, batch_y):\n raise NotImplementedError()", "def _batch_train(self, batch, training_step, step):\n lstm_size = (self.batch_size, self.Qmain.h_size)\n batch_mem = np.zeros(lstm_size)\n batch_carry = np.zeros(lstm_size)\n input_shape = (self.batch_size,\n self.trace_length,\n self.observation_size)\n m_data = np.vstack(batch[:, 0])\n m_data = m_data.reshape(input_shape)\n t_data = np.vstack(batch[:, 4])\n t_data = t_data.reshape(input_shape)\n q_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(m_data)]\n q1_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(t_data)]\n\n # Batch predict\n self.Qmain.trace_length.assign(self.trace_length)\n self.Qmain.dropout_rate.assign(0.0)\n self.Qtarget.trace_length.assign(self.trace_length)\n self.Qtarget.dropout_rate.assign(0.0)\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T batch predict\n pred = self.Qmain.model.predict(q_input,\n batch_size=self.batch_size)\n Q = pred[0]\n batch_bus = pred[1]\n batch_line = pred[2]\n batch_disp = pred[3]\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Qn, *_ = self.Qtarget.model.predict(q1_input,\n batch_size=self.batch_size)\n \n # Compute batch Q update to Qtarget\n for i in range(self.batch_size):\n idx = i * (self.trace_length - 1)\n a = batch[idx][1]\n grid = a[0]\n batch_bus[i][:] = a[1][:]\n batch_line[i][:] = a[2][:]\n batch_disp[i][:] = a[3][:]\n r = batch[idx][2]\n d = batch[idx][3]\n Q[i][grid] = r\n if d == False:\n Q[i][grid] += DISCOUNT_FACTOR * Qn[i][grid]\n\n # Batch train\n batch_x = [batch_mem, batch_carry, m_data]\n batch_y = [\n Q,\n batch_bus, batch_line, batch_disp,\n batch_mem, batch_carry\n ]\n loss = self.Qmain.model.train_on_batch(batch_x, batch_y)\n loss = loss[0]\n\n # Log to tensorboard\n self._tf_log_summary(loss, step)", "def train_step(self, data_batch, optimizer, **kwargs):\n losses = self.forward(**data_batch)\n loss, log_vars = self._parse_losses(losses)\n outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(next(iter(data_batch.values()))))\n return outputs", "def _process_batch_data(\n self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]]\n ) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[tf.Tensor]]:\n # encode each attribute present in tf_batch_data\n text_output = None\n text_sequence_lengths = None\n batch_encoded = {}\n for attribute in tf_batch_data.keys():\n if attribute in SENTENCE_FEATURES_TO_ENCODE + STATE_LEVEL_FEATURES:\n (\n attribute_features,\n _text_output,\n _text_sequence_lengths,\n ) = self._encode_features_per_attribute(tf_batch_data, attribute)\n\n batch_encoded[attribute] = attribute_features\n if attribute == TEXT:\n text_output = _text_output\n text_sequence_lengths = _text_sequence_lengths\n\n # if both action text and action name are present, combine them; otherwise,\n # return the one which is present\n\n if (\n batch_encoded.get(ACTION_TEXT) is not None\n and batch_encoded.get(ACTION_NAME) is not None\n ):\n batch_action = batch_encoded.pop(ACTION_TEXT) + batch_encoded.pop(\n ACTION_NAME\n )\n elif batch_encoded.get(ACTION_TEXT) is not None:\n batch_action = batch_encoded.pop(ACTION_TEXT)\n else:\n batch_action = batch_encoded.pop(ACTION_NAME)\n # same for user input\n if (\n batch_encoded.get(INTENT) is not None\n and batch_encoded.get(TEXT) is not None\n ):\n batch_user = batch_encoded.pop(INTENT) + batch_encoded.pop(TEXT)\n elif batch_encoded.get(TEXT) is not None:\n batch_user = batch_encoded.pop(TEXT)\n else:\n batch_user = batch_encoded.pop(INTENT)\n\n batch_features = [batch_user, batch_action]\n # once we have user input and previous action,\n # add all other attributes (SLOTS, ACTIVE_LOOP, etc.) to batch_features;\n for key in batch_encoded.keys():\n batch_features.append(batch_encoded.get(key))\n\n batch_features = tf.concat(batch_features, axis=-1)\n\n return batch_features, text_output, text_sequence_lengths", "def train_step(self, data_batch, optimizer, **kwargs):\n losses = self(**data_batch)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=len(data_batch['img_metas']))\n\n return outputs", "def train_batch(\n self, batch: TorchData, model: nn.Module, epoch_idx: int, batch_idx: int\n ) -> Union[torch.Tensor, Dict[str, Any]]:\n pass", "def train(self, data):\n pass", "def on_predict_batch_begin(self, batch, logs=None):", "def train(self, training_data):\n pass", "def train_batch(self, batch_info: BatchInfo) -> None:\n # Each DQN batch is\n # 1. Roll out environment and store out experience in the buffer\n self.model.eval()\n\n # Helper variables for rollouts\n episode_information = []\n frames = 0\n\n with torch.no_grad():\n if not self.env_roller.is_ready_for_sampling():\n while not self.env_roller.is_ready_for_sampling():\n rollout = self.env_roller.rollout(batch_info, self.model)\n\n episode_information.extend(rollout.episode_information())\n frames += rollout.frames()\n else:\n for i in range(self.settings.batch_rollout_rounds):\n rollout = self.env_roller.rollout(batch_info, self.model)\n\n episode_information.extend(rollout.episode_information())\n frames += rollout.frames()\n\n batch_info['frames'] = frames\n batch_info['episode_infos'] = episode_information\n\n # 2. Sample the buffer and train the algo on sample batch\n self.model.train()\n\n # Algo will aggregate data into this list:\n batch_info['sub_batch_data'] = []\n\n for i in range(self.settings.batch_training_rounds):\n sampled_rollout = self.env_roller.sample(batch_info, self.model)\n\n batch_result = self.algo.optimizer_step(\n batch_info=batch_info,\n device=self.device,\n model=self.model,\n rollout=sampled_rollout\n )\n\n self.env_roller.update(rollout=sampled_rollout, batch_info=batch_result)\n\n batch_info['sub_batch_data'].append(batch_result)\n\n batch_info.aggregate_key('sub_batch_data')", "def __init__(self, data_, vocab, bert_vocab, hps, device_id, single_pass,data_format):\n\t\tself._data = data_\n\t\tself._vocab = vocab\n\t\tself._hps = hps\n\t\tself._device_id = device_id\n\t\tself._single_pass = single_pass\n\t\tself._data_as_tf_example = data_format\n\t\tself.bert_vocab = bert_vocab\n\t\t# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched\n\t\tself._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)\n\t\tself._example_queue = Queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size.value)\n\t\t#\tself._data = pickle.load(open(data_path,'rb'))\n\t\t#\ttf.logging.info(len(self._data))\n\n\t\t# Different settings depending on whether we're in single_pass mode or not\n\t\tif single_pass:\n\t\t\tself._num_example_q_threads = 1 # just one thread, so we read through the dataset just once\n\t\t\tself._num_batch_q_threads = 1 # just one thread to batch examples\n\t\t\tself._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing\n\t\t\tself._finished_reading = False # this will tell us when we're finished reading the dataset\n\t\telse:\n\t\t\tself._num_example_q_threads = 1 # num threads to fill example queue\n\t\t\tself._num_batch_q_threads = 1 # num threads to fill batch queue\n\t\t\tself._bucketing_cache_size = 1 # how many batches-worth of examples to load into cache before bucketing\n\n\t\t# Start the threads that load the queues\n\t\tself._example_q_threads = []\n\t\tfor k in xrange(self._num_example_q_threads):\n\t\t\tself._example_q_threads.append(Thread(name=str(k), target=self.fill_example_queue))\n\t\t\tself._example_q_threads[-1].daemon = True\n\t\t\tself._example_q_threads[-1].start()\n\n\t\tself._batch_q_threads = []\n\t\tfor _ in xrange(self._num_batch_q_threads):\n\t\t\tself._batch_q_threads.append(Thread(target=self.fill_batch_queue))\n\t\t\tself._batch_q_threads[-1].daemon = True\n\t\t\tself._batch_q_threads[-1].start()\n\n\t\t# Start a thread that watches the other threads and restarts them if they're dead\n\t\tif not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever\n\t\t\tself._watch_thread = Thread(target=self.watch_threads)\n\t\t\tself._watch_thread.daemon = True\n\t\t\tself._watch_thread.start()", "def on_test_batch_begin(self, batch, logs=None):", "def run_train_iter(self, session, batch, summary_writer):\n # Match up our input data with the placeholders\n input_feed = {}\n input_feed[self.context_ids] = batch.context_ids\n input_feed[self.context_mask] = batch.context_mask\n input_feed[self.qn_ids] = batch.qn_ids\n input_feed[self.qn_mask] = batch.qn_mask\n input_feed[self.ans_ids] = batch.ans_ids\n input_feed[self.ans_mask] = batch.ans_mask\n input_feed[self.keep_prob] = 1.0 - self.FLAGS.dropout # apply dropout\n\n # if not use raw graph tokens\n if not self.FLAGS.use_raw_graph:\n input_feed[self.context_embedding] = batch.context_embeddings\n\n # output_feed contains the things we want to fetch.\n output_feed = [self.updates, self.summaries, self.loss, self.global_step, self.param_norm, self.gradient_norm, self.dev_loss]\n\n # Run the model\n [_, summaries, loss, global_step, param_norm, gradient_norm, dev_loss] = session.run(output_feed, input_feed)\n\n # All summaries in the graph are added to Tensorboard\n summary_writer.add_summary(summaries, global_step)\n\n return loss, global_step, param_norm, gradient_norm, dev_loss", "def evaluate_batch(self, batch: TorchData, model: nn.Module) -> Dict[str, Any]:\n pass", "def on_predict_batch_begin(self, step, logs=None):", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self, trainData):\n pass", "def __call__(\n self, data_batch: Dict[str, List[str]]\n ) -> Tuple[\n BatchEncoding,\n List[Dict[str, Union[int, str]]],\n List[SquadExample],\n List[SquadFeatures],\n ]:\n self._check_values_len(data_batch)\n concatenated_batch, evidences = self._concatenate_batch(data_batch)\n dataset, examples, features = load_examples(\n concatenated_batch, self.tokenizer, evaluate=True, output_examples=True\n )\n\n input_ids = [torch.unsqueeze(instance[0], 0) for instance in dataset]\n attention_mask = [torch.unsqueeze(instance[1], 0) for instance in dataset]\n token_type_ids = [torch.unsqueeze(instance[2], 0) for instance in dataset]\n\n output = {\n \"input_ids\": torch.cat(input_ids, axis=0),\n \"attention_mask\": torch.cat(attention_mask, axis=0),\n \"token_type_ids\": torch.cat(token_type_ids, axis=0),\n }\n output = BatchEncoding(output)\n\n return output, evidences, examples, features", "def train(self, session, train_dataset, val_dataset, train_dir):\n\n #self.saver=saver\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n # context_ids, question_ids, answer_spans, ctx_mask ,q_mask, train_context = dataset\n # train_dataset = [context_ids, question_ids, answer_spans, ctx_mask ,q_mask]\n\n # val_context_ids, val_question_ids, val_answer_spans, val_ctx_mask, val_q_mask, val_context = val_dataset\n # val_dataset = [val_context_ids, val_question_ids, val_answer_spans, val_ctx_mask, val_q_mask]\n\n \n num_epochs = self.flags.epochs\n\n # print train_dataset[0].shape,train_dataset[1].shape\n # print val_dataset[0].shape,val_dataset[1].shape\n\n #if self.flags.debug:\n # train_dataset = [elem[:self.flags.batch_size*1] for elem in train_dataset]\n # val_dataset = [elem[:self.flags.batch_size*1] for elem in val_dataset]\n # num_epochs = 100\n \n # print train_dataset[0].shape,train_dataset[1].shape\n # print val_dataset[0].shape,val_dataset[1].shape\n # assert False\n\n for epoch in range(num_epochs):\n logging.info(\"Epoch %d out of %d\", epoch + 1, self.flags.epochs)\n self.run_epoch(sess=session,\n train_set=train_dataset, \n val_set=val_dataset)\n logging.info(\"Saving model in %s\", train_dir)\n self.saver.save(session, train_dir+\"/\"+self.flags.run_name+\".ckpt\")", "def input_fn(is_training, data_dir, reid_data_dir= None,batch_size=32, num_epochs=1):\n dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir))\n dataset_seg = dataset.flat_map(tf.data.TFRecordDataset)\n\n # dataset_reid = tf.data.Dataset.from_tensor_slices(get_filenames_reid(is_training, reid_data_dir))\n # dataset_reid = dataset_reid.flat_map(tf.data.TFRecordDataset)\n\n\n if is_training:\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes have better performance.\n # is a relatively small dataset, we choose to shuffle the full epoch.\n dataset_seg = dataset_seg.shuffle(buffer_size=_NUM_IMAGES['train'])\n # dataset_reid = dataset_reid.shuffle(buffer_size=30248)\n\n\n dataset_seg = dataset_seg.map(parse_record)\n dataset_seg = dataset_seg.map(lambda image, label: preprocess_image(image, label, is_training))\n dataset_seg = dataset_seg.prefetch(batch_size)\n dataset_seg = dataset_seg.repeat(num_epochs)\n dataset_seg = dataset_seg.batch(batch_size)\n\n # dataset_reid = dataset_reid.map(parse_record_reid)\n # dataset_reid = dataset_reid.map(lambda image, label: preprocess_image_reid(image, label, is_training))\n # dataset_reid = dataset_reid.prefetch(batch_size)\n # dataset_reid = dataset_reid.repeat(num_epochs)\n # dataset_reid = dataset_reid.batch(batch_size)\n\n # iterator = dataset_reid.make_one_shot_iterator()\n # images_reid, label_reid = iterator.get_next()\n\n train_record_file = os.path.join(reid_data_dir, 'train-512-170.tfrecords')\n val_record_file = os.path.join(reid_data_dir, 'val-512-170.tfrecords')\n\n train_images, train_labels = read_records(train_record_file, _HEIGHT, _WIDTH, type='normalization')\n train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels,\n batch_size=batch_size, labels_nums=labels_nums,\n one_hot=True, shuffle=True)\n print(\"reid2222222\", train_images_batch.shape, train_labels_batch.shape)\n val_images, val_labels = read_records(val_record_file, _HEIGHT, _WIDTH, type='normalization')\n val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels,\n batch_size=batch_size, labels_nums=labels_nums,\n one_hot=True, shuffle=False)\n images_reid = train_images_batch\n label_reid = train_labels_batch\n # if is_training:\n # images_reid = train_images_batch\n # label_reid = train_labels_batch\n # else:\n # images_reid = val_images_batch\n # label_reid = val_labels_batch\n iterator = dataset_seg.make_one_shot_iterator()\n images_seg, label_seg = iterator.get_next()\n\n images = {\"seg\": images_seg, \"reid\": images_reid}\n labels = {\"seg\": label_seg, \"reid\": label_reid}\n\n # labels_seg_reid = tf.zeros(shape=[batch_size, labels_nums], dtype=tf.int32)\n # labels_reid_seg = tf.zeros(shape=[batch_size, 512, 170, 1], dtype=tf.int32)\n\n # images = tf.concat([images_seg, images_reid], 0)\n # labels_seg_all = tf.concat([label_seg, labels_reid_seg], 0)\n # labels_reid_all = tf.concat([labels_seg_reid, label_reid], 0)\n # labels = {\"seg\": labels_seg_all, \"reid\": labels_reid_all}\n # batch_out= 1\n\n return images, labels", "def training_step(self, batch):\n return {}", "def process_sample_train(self):\n raise NotImplementedError", "def train(self, num_batches: int):", "def run_train_step(self, sess, batch):\n feed_dict = self._make_feed_dict(batch)\n to_return = {\n 'train_op': self._train_op,\n 'summaries': self._summaries,\n 'loss': self._loss,\n 'logits': self._logits,\n 'global_step': self.global_step,\n }\n\n return sess.run(to_return, feed_dict)", "def on_train_batch_end(\n self,\n trainer: 'pl.Trainer',\n pl_module: 'pl.LightningModule',\n outputs: Optional[STEP_OUTPUT],\n batch: Any,\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n if trainer.global_step % self.every_n_steps == 0:\n if self.multi_optim:\n x = outputs[0]['x']\n xrec = outputs[0]['xrec']\n else:\n x = outputs['x']\n xrec = outputs['xrec']\n\n x_grid = torchvision.utils.make_grid(\n tensor=x,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n xrec_grid = torchvision.utils.make_grid(\n tensor=xrec,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n if self.use_wandb:\n trainer.logger.experiment.log({\n \"train/input\": wandb.Image(x_grid),\n \"train/reconstruction\": wandb.Image(xrec_grid), \n \"global_step\": trainer.global_step\n })\n else: \n x_title = \"train/input\"\n trainer.logger.experiment.add_image(x_title, x_grid, global_step=trainer.global_step)\n xrec_title = \"train/reconstruction\"\n trainer.logger.experiment.add_image(xrec_title, xrec_grid, global_step=trainer.global_step)", "def _train_batch(self, review_fwd, review_bwd, summary):\n # feed in the data for forward model\n feed_dict_fwd = {self.enc_inp_fwd[t]: review_fwd[t] for t in range(self.seq_length)}\n feed_dict_fwd.update({self.labels[t]: summary[t] for t in range(self.seq_length)})\n\n # feed in the data for the backward model\n feed_dict_bwd = {self.enc_inp_bwd[t]: review_bwd[t] for t in range(self.seq_length)}\n feed_dict_bwd.update({self.labels[t]: summary[t] for t in range(self.seq_length)})\n\n # train forward model\n print 'Forward Batch Training.......'\n _, loss_t_forward = self.sess.run([self.train_op_fwd, self.loss_fwd], feed_dict_fwd)\n\n # train backward model\n print 'Backward Batch Training.......'\n _, loss_t_backward = self.sess.run([self.train_op_bwd, self.loss_bwd], feed_dict_bwd)\n\n return loss_t_forward, loss_t_backward", "def setup(self, stage: Optional[str] = None):\n if stage in (None, 'fit'):\n # Get a 20% of the train data for validation in a stratified way.\n _x = [i[1] for i in self.splits['train']]\n _y = [i[0] for i in self.splits['train']]\n\n _train_x, _val_x, _train_y, _val_y = train_test_split(_x, _y, test_size=0.2,\n stratify=_y)\n #print(np.unique(_train_y, return_counts=True))\n #print(np.unique(_val_y, return_counts=True))\n\n self.splits['train'] = [[i, j] for i,j in zip(_train_y, _train_x)]\n self.splits['valid'] = [[i, j] for i,j in zip(_val_y, _val_x)]\n\n self.datasets['train'] = FewShotDataset(self.splits['train'], self.ops)\n self.datasets['valid'] = FewShotDataset(self.splits['valid'], self.ops)\n\n if stage in (None, 'test'):\n self.datasets['test'] = FewShotDataset(self.splits['test'], self.ops)", "def train(self, data_source, batch_size=4096):\n self.mean, self.std_dev = stats_batchwise(data_source, batch_size)", "def train_batch(self, data, num_iteration, verbose=False):\n self.train(data, num_iteration, random_order=False, verbose=verbose)", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def on_validation_batch_end(\n self,\n trainer: 'pl.Trainer',\n pl_module: 'pl.LightningModule',\n outputs: Optional[STEP_OUTPUT],\n batch: Any,\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n if trainer.global_step % self.every_n_steps == 0:\n if self.multi_optim:\n x = outputs[0]['x']\n xrec = outputs[0]['xrec']\n else:\n x = outputs['x']\n xrec = outputs['xrec']\n x_grid = torchvision.utils.make_grid(\n tensor=x,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n xrec_grid = torchvision.utils.make_grid(\n tensor=xrec,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n if self.use_wandb:\n trainer.logger.experiment.log({\n \"val/input\": wandb.Image(x_grid),\n \"val/reconstruction\": wandb.Image(xrec_grid), \n \"global_step\": trainer.global_step\n })\n else: \n x_title = \"val/input\"\n trainer.logger.experiment.add_image(x_title, x_grid, global_step=trainer.global_step)\n xrec_title = \"val/reconstruction\"\n trainer.logger.experiment.add_image(xrec_title, xrec_grid, global_step=trainer.global_step)", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n if self.mode == tf.estimator.ModeKeys.PREDICT and self.imagenet_train_predict_partial:\n # Sort and shuffle with seed to randomize deterministically.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n random.shuffle(filenames)\n dataset = tf.contrib.data.TFRecordDataset(filenames)\n\n # Parse records.\n dataset = dataset.map(self.parser,\n num_threads=batch_size,\n output_buffer_size=2 * batch_size)\n\n # If training, shuffle and repeat indefinitely.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=50000 + 3 * batch_size)\n dataset = dataset.repeat(-1)\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n if self.predict_split == 'train':\n if self.imagenet_train_predict_partial:\n MAX_EXAMPLES = 50000\n # Skip to start at a random spot in the first TFRecord.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n skip_examples = random.randint(0, 1251)\n dataset = dataset.skip(skip_examples)\n # Continue shuffling amongst at least as many examples\n # as it could see in 3 cross validations.\n dataset.shuffle(buffer_size=3 * MAX_EXAMPLES,\n seed=self.imagenet_train_predict_shuffle_seed)\n num_examples = MAX_EXAMPLES\n else:\n # Take whole training set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.TRAIN)\n else:\n # Take whole validation set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.EVAL)\n # Take as much of the dataset as possible that can be evenly\n # divided by batch_size.\n while True:\n if num_examples % batch_size == 0:\n break\n else:\n num_examples -= 1\n dataset = dataset.take(num_examples)\n dataset = dataset.repeat(1)\n\n # dataset = dataset.take(1000) # For fast debugging!\n else:\n dataset = dataset.repeat(1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return image_batch, label_batch", "def _evaluate_during_fit(self, test_loader, epoch):", "def _epoch_step(self, dataset, epoch):\n dataloader = DataLoader(dataset, batch_size=self.batch_size,\n shuffle=True, num_workers=64)\n\n num_batchs = len(dataset) // self.batch_size\n\n # observe the training progress\n if self.verbose:\n bar = progressbar.ProgressBar(max_value=num_batchs)\n\n running_loss = 0\n for i, sample in enumerate(dataloader):\n input_batch, label_batch = sample['lr'], sample['hr']\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n # zero the grad\n self.optimizer.zero_grad()\n\n # Forward\n\n if self.model_name in ['TDAN']:\n output_batch, lrs = self.model(input_batch)\n num = input_batch.size(1)\n center = num // 2\n x = input_batch[:, center, :, :, :].unsqueeze(1).repeat(1, num, 1, 1, 1)\n loss = self.loss_fn(output_batch, label_batch) + 0.25 * self.loss_fn(lrs, x)\n else:\n output_batch = self.model(input_batch)\n loss = self.loss_fn(output_batch, label_batch)\n\n running_loss += loss.data[0]\n\n # Backward + update\n loss.backward()\n #nn.utils.clip_grad_norm(self.model.parameters(), 0.4)\n self.optimizer.step()\n\n if self.verbose:\n bar.update(i, force=True)\n\n average_loss = running_loss / num_batchs\n self.hist_loss.append(average_loss)\n if self.verbose:\n print('Epoch %5d, loss %.5f' \\\n % (epoch, average_loss))", "def train(self, data_loader):\n step = 0\n train_data, valid_data = data_loader()\n\n # Allow to call `next` builtin indefinitely.\n valid_data = iter(valid_data.repeat())\n\n for epoch in range(self.hparams.num_epochs):\n for x, y in train_data:\n\n with tf.GradientTape() as g:\n train_loss = self.loss(y, self(x))\n\n grads = g.gradient(train_loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(grads, self.trainable_variables))\n\n # Validate every 1000 training steps.\n if step % 1000 == 0:\n x, y = next(valid_data)\n valid_loss = self.loss(y, self(x))\n print(\n f\"step {step} (train_loss={train_loss} valid_loss={valid_loss})\"\n )\n step += 1\n\n print(f\"epoch {epoch} finished\")\n self.save()", "def on_eval_batch_begin(self, step, logs=None):", "def step_batch(self, x_batch, W, Y, beta, energy_batch, wx, idx_batch, rand_batch):\n raise NotImplementedError()", "def _yield_training_validation(self, batch_index):\n # print(f'requested batch with index: {batch_index}') # DEBUG\n num_identities = len(self.identities)\n num_ids_to_resample = 0\n # manage identities in a circular way \n ids_start = (batch_index*self.batch_size)%num_identities # identities' batch start\n ids_end = ((batch_index+1)*self.batch_size)%num_identities # identities' batch end\n # Manage the indetities array in a circular manner\n #batch_identities = self.identities[ids_start:ids_end] if ids_start < ids_end else self.identities[ids_start:].append(self.identities[:ids_end])\n if ids_start < ids_end:\n batch_identities = self.identities[ids_start:ids_end]\n else:\n batch_identities = self.identities[ids_start:]\n batch_identities.extend(self.identities[:ids_end])\n samples_batch = []\n labels_batch = []\n roi_batch = []\n for identity in batch_identities:\n identity_data = self.groundtruth_metadata[identity]\n # if there are images available for that identity\n if identity_data['index'] < len(identity_data['metadata']):\n # read the image and the necessary metadata\n img_info = identity_data['metadata'][identity_data['index']]\n img_path = os.path.join(self.dataset_root_path, img_info['path'])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if OpenCV is unable to read an image, it returns None\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n # increase the index, in order to avoid this path when building subsequent batches with this identity\n identity_data['index'] += 1\n # sample another image from another identity to replace this one in the batch\n num_ids_to_resample += 1\n continue\n #batch.append(AgeEstimationSample(img, img_info['roi'], img_info['age'], 'BGR')) # cv2 reads as BGR\n img = img.astype('float32')\n samples_batch.append(img)\n labels_batch.append(img_info['age'])\n roi_batch.append(img_info['roi'])\n # increase the index, because another sample for that identity has been used\n identity_data['index'] += 1\n else:\n num_ids_to_resample += 1\n\n # if for some identities there weren't available images, take them from other identities\n # note that this mechanism solves also the problems arising when less than batch_size identities are available, by\n # picking multiple images from the available entities\n # the __len__ method in the data generator associated to this data loader is responsible for avoiding that this\n # method is called when less than batch_size \"fresh\" images are available\n last_taken_identity_index = ids_end \n num_samples_when_last_taken = num_ids_to_resample\n while(num_ids_to_resample > 0):\n identity = self.identities[ids_end] # remeber that slicing at previous step excludes upper limit\n identity_data = self.groundtruth_metadata[identity]\n if identity_data['index'] < len(identity_data['metadata']):\n last_taken_identity_index = ids_end\n num_samples_when_last_taken = num_ids_to_resample\n # read the image and the necessary metadata\n img_info = identity_data['metadata'][identity_data['index']]\n img_path = os.path.join(self.dataset_root_path, img_info['path'])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if the path does not exist or there are problems while reading the image\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n # increase the index, in order to avoid this path when building subsequent batches with this identity\n identity_data['index'] += 1\n continue\n #batch.append(AgeEstimationSample(img, img_info['roi'], img_info['age'], 'BGR')) # cv2 reads as BGR\n img = img.astype('float32')\n samples_batch.append(img)\n labels_batch.append(img_info['age'])\n roi_batch.append(img_info['roi'])\n\n num_ids_to_resample -= 1\n identity_data['index'] += 1\n \n ids_end = ((ids_end+1)%num_identities)\n if ids_end == last_taken_identity_index and num_ids_to_resample == num_samples_when_last_taken and identity_data['index'] == len(identity_data['metadata']):\n raise Exception(f'No more images available, missing {num_ids_to_resample} images!')\n\n # cannot return numpy arrays since images in batch have different sizes\n return samples_batch, labels_batch, roi_batch\n #return batch", "def feed_batch(self, generated_batch, generated_labels):\n _, self.act2, _ = self.inference_net(generated_batch.cuda(self.gpu_id))\n self.g_labels = generated_labels", "def on_train_batch_end(self, step, logs=None):", "def load_data(self, batch_idx, args, mode=\"train\"):\n dat = self.dat\n if mode == \"train\":\n pfx = \"train\"\n elif mode == \"test\":\n pfx = \"test\"\n else:\n pfx = \"valid\"\n #pfx = \"train\" if train else \"valid\"\n loc = dat[\"%s_location\" % pfx] # nexamples x 3\n bsz = min(args.bsz, loc.size(0)-batch_idx)\n max_ctx_len = min(args.maxseqlen, loc[batch_idx:batch_idx+bsz, 1].max())\n self.word_ctx.resize_(max_ctx_len, bsz).zero_()\n self.answers.resize_(bsz).zero_()\n self.linenos.resize_(bsz).zero_()\n\n if args.std_feats or self.mt_loss != \"\":\n self.feats.resize_(max_ctx_len, bsz, 3).zero_()\n self.extr.resize_(max_ctx_len, bsz, self.extra_size).zero_()\n if args.speaker_feats:\n self.spee_feats.resize_(max_ctx_len, bsz, 2).zero_()\n\n if args.use_choices or (args.use_test_choices and mode != \"train\"):\n self.choicemask.resize_(bsz, max_ctx_len).zero_()\n\n if self.use_qidx:\n self.query_pos.resize_(bsz).fill_(-1) # assuming these always go together\n\n for b in xrange(bsz):\n ex_idx = batch_idx + b\n offset, ctx_len, self.linenos[b] = loc[ex_idx]\n capped_len = min(args.maxseqlen, ctx_len)\n answer_idx = offset + ctx_len\n self.answers[b] = dat[\"%s_data\" % pfx][answer_idx]\n\n self.word_ctx[-capped_len:, b].copy_(\n dat[\"%s_data\" % pfx][answer_idx-capped_len:answer_idx])\n if args.std_feats or self.mt_loss != \"\":\n self.feats[-capped_len:, b, 0].copy_(\n dat[\"%s_post\" % pfx][answer_idx-capped_len:answer_idx])\n self.feats[-capped_len:, b, 1].copy_(\n dat[\"%s_ner\" % pfx][answer_idx-capped_len:answer_idx])\n self.feats[-capped_len:, b, 2].copy_(\n dat[\"%s_sentence\" % pfx][answer_idx-capped_len:answer_idx])\n self.extr[-capped_len:, b, :].copy_(\n dat[\"%s_extr\" % pfx][answer_idx-capped_len:answer_idx])\n if args.speaker_feats:\n self.spee_feats[-capped_len:, b, 0].copy_(\n dat[\"%s_speech\" % pfx][answer_idx-capped_len:answer_idx])\n self.spee_feats[-capped_len:, b, 1].copy_(\n dat[\"%s_sid\" % pfx][answer_idx-capped_len:answer_idx])\n\n if args.use_choices or (args.use_test_choices and mode != \"train\"):\n bchoices = set(dat[\"%s_choices\" % pfx][ex_idx])\n [self.choicemask[b].__setitem__(jj, 1) for jj in xrange(max_ctx_len)\n if self.word_ctx[jj, b] in bchoices]\n\n if self.use_qidx:\n qpos = torch.nonzero(self.word_ctx[:, b] == self.query_idx)[0][0]\n self.query_pos[b] = qpos*bsz + b\n\n # if args.use_choices:\n # # get bsz x 2 tensor of idxs (need to transpose below to be right)\n # poss = torch.nonzero(self.word_ctx.t() == self.query_idx)\n # self.query_pos.copy_(poss[:, 1]) # 2nd col has nz col in transpose\n\n batch = {\"words\": self.word_ctx, \"answers\": self.answers}\n if args.std_feats or self.mt_loss != \"\":\n batch[\"feats\"] = self.feats\n batch[\"extr\"] = self.extr\n if args.speaker_feats:\n batch[\"spee_feats\"] = self.spee_feats\n if args.use_choices or (args.use_test_choices and mode != \"train\"):\n batch[\"choicemask\"] = self.choicemask\n if self.use_qidx:\n batch[\"qpos\"] = self.query_pos\n\n if self.mt_loss == \"idx-loss\":\n if batch_idx not in self.cache:\n targs = make_mt1_targ_idxs(batch, args.max_entities,\n args.max_mentions, self.per_idx)\n self.cache[batch_idx] = targs\n batch[\"mt1_targs\"] = self.cache[batch_idx]\n elif self.mt_loss == \"ant-loss\":\n if batch_idx not in self.cache:\n targs = make_mt2_targs(batch, args.max_entities,\n args.max_mentions, self.per_idx)\n self.cache[batch_idx] = targs\n batch[\"mt2_targs\"] = self.cache[batch_idx]\n\n return batch", "def batch_inputs(dataset, batch_size, train, semantic_level_settings, num_preprocess_threads=16):\n # Force all input processing onto CPU in order to reserve the GPU for the forward and backward.\n with tf.device('/cpu:0'):\n with tf.name_scope('batch_processing'):\n data_files = dataset.data_files()\n if data_files is None:\n raise ValueError('No data files found for this dataset')\n\n examples_per_shard = 1024\n # Create filename_queue\n if train:\n filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16)\n input_queue_memory_factor = 16\n num_readers = 4\n else:\n filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1)\n input_queue_memory_factor = 1\n num_readers = 1\n if num_preprocess_threads % 4:\n raise ValueError('Please make num_preprocess_threads a multiple '\n 'of 4 (%d % 4 != 0).', num_preprocess_threads)\n\n min_queue_examples = examples_per_shard * input_queue_memory_factor\n if train:\n examples_queue = tf.RandomShuffleQueue(\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples,\n dtypes=[tf.string])\n # Create multiple readers to populate the queue of examples.\n enqueue_ops = []\n for _ in range(num_readers):\n reader = dataset.reader()\n _, value = reader.read(filename_queue)\n enqueue_ops.append(examples_queue.enqueue([value]))\n\n tf.train.queue_runner.add_queue_runner(\n tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))\n example_serialized = examples_queue.dequeue()\n else:\n examples_queue = tf.FIFOQueue(\n capacity=examples_per_shard + 3 * batch_size,\n dtypes=[tf.string])\n # Create multiple readers to populate the queue of examples.\n reader = dataset.reader()\n _, example_serialized = reader.read(filename_queue)\n\n images_and_labels = []\n for thread_id in range(num_preprocess_threads):\n # Parse a serialized Example proto to extract the image and metadata.\n image_buffer, labels, filename = parse_example_proto(example_serialized,\n semantic_level_settings)\n image = decode_jpeg(image_buffer)\n if train:\n image = distort_image(image, dataset.height, dataset.width, thread_id)\n else:\n image = eval_image(image, dataset.height, dataset.width)\n\n # Finally, rescale to [-1,1] instead of [0, 1)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n images_and_labels.append([image, filename] + labels)\n\n batch_data = tf.train.batch_join(\n images_and_labels,\n batch_size=batch_size,\n capacity=2 * num_preprocess_threads * batch_size)\n\n # Get image data, filenames, level_labels separately.\n images = batch_data[0]\n images = tf.cast(images, tf.float32)\n images = tf.reshape(images, shape=[batch_size, dataset.height, dataset.width, 3])\n\n filenames = tf.reshape(batch_data[1], [batch_size])\n level_labels = {}\n for idx, settings in enumerate(semantic_level_settings):\n level_labels[settings[0]] = tf.reshape(batch_data[2 + idx], [batch_size, -1])\n\n return (images, level_labels, filenames)", "def _main(\n get_data: callable,\n EPOCHS: int = 10,\n PERIOD: int = 5,\n BATCH_SIZE: int = 256,\n LR: float = 1e-5,\n NEURONS: list = [128, 128],\n forecast: bool = False,\n tuning: bool = True,\n) -> None:\n @tf.function\n def train_step(x, y):\n with tf.GradientTape() as tape:\n pred = model(x)\n loss = loss_object(y, pred)\n grad = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grad, model.trainable_variables))\n train_loss.update_state(loss)\n train_accuracy.update_state(y, pred)\n\n\n @tf.function\n def test_step(x, y):\n # Test and validation step have the same operation.\n pred = model(x)\n loss = loss_object(y, pred)\n dev_loss.update_state(loss)\n dev_accuracy.update_state(y, pred)\n\n print(\"Reading data...\")\n X_train, X_dev, y_train, y_dev, X_test = get_data()\n print(\"X_train@{}, X_dev@{}\".format(X_train.shape, X_dev.shape))\n train_ds = tf.data.Dataset.from_tensor_slices(\n (X_train, y_train)).shuffle(int(1e6)).batch(BATCH_SIZE)\n\n dev_ds = tf.data.Dataset.from_tensor_slices(\n (X_dev, y_dev)).batch(BATCH_SIZE)\n\n num_fea = X_train.shape[1]\n model = NN(num_neurons=NEURONS)\n\n loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=False)\n optimizer = tf.keras.optimizers.Adam(learning_rate=LR)\n\n train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n train_accuracy = tf.keras.metrics.BinaryAccuracy(\n name=\"train_accuracy\")\n\n dev_loss = tf.keras.metrics.Mean(name=\"dev_loss\")\n dev_accuracy = tf.keras.metrics.BinaryAccuracy(\n name=\"dev_accuracy\")\n\n trace = {\"train\": [], \"val\": []}\n for epoch in range(EPOCHS):\n train_loss.reset_states()\n train_accuracy.reset_states()\n dev_loss.reset_states()\n dev_accuracy.reset_states()\n # Loop over batches.\n for x, y in train_ds:\n # x @ (batch_size, num_features)\n # y @ (batch_size, 1) --> probit\n train_step(x, y)\n\n for t_x, t_y in dev_ds:\n test_step(t_x, t_y)\n\n if (epoch+1) % PERIOD == 0:\n report = \"Epoch {:d}, Loss: {:0.6f}, Accuracy: {:0.6f}, Validation Loss: {:0.6f}, Validation Accuracy: {:0.6f}\"\n print(report.format(\n epoch+1,\n train_loss.result(),\n train_accuracy.result()*100,\n dev_loss.result(),\n dev_accuracy.result()*100))\n\n # Record loss\n trace[\"train\"].append(train_loss.result())\n trace[\"val\"].append(dev_loss.result())\n\n # AUC\n pred_train = model(X_train).numpy()\n pred_dev = model(X_dev).numpy()\n\n auc_train = metrics.roc_auc_score(y_true=y_train, y_score=pred_train)\n auc_dev = metrics.roc_auc_score(y_true=y_dev, y_score=pred_dev)\n\n print(\"AUC on Training Set: {: 0.6f}\".format(auc_train))\n print(\"AUC on Developing Set: {: 0.6f}\".format(auc_dev))\n\n if forecast:\n pred = model(X_test)\n return pred.numpy()\n if tuning:\n return {\n \"EPOCHS\": EPOCHS,\n \"BATCH_SIZE\": BATCH_SIZE,\n \"LR\": LR,\n \"NEURONS\": NEURONS,\n \"AUC_TRAIN\": auc_train,\n \"AUC_DEV\": auc_dev,\n \"LOSS_TRAIN\": train_loss.result().numpy(),\n \"LOSS_DEV\": dev_loss.result().numpy(),\n \"ACCURACY_TRAIN\": train_accuracy.result().numpy(),\n \"ACCURACY_DEV\": dev_accuracy.result().numpy(),\n }\n\n plt.plot(np.log(trace[\"train\"]))\n plt.plot(np.log(trace[\"val\"]))\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Log Cross Entropy Loss\")\n plt.legend([\"Training\", \"Validation\"])\n plt.title(\"LR={}, AUC_train={:0.3f}, AUC_dev={:0.3f}\".format(LR, auc_train, auc_dev))\n plt.show()", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)", "def process_batch(self, data):\n [embedding_batch] = self._sess.run([self._embedding_tensor],\n feed_dict={self._features_tensor: data})\n return embedding_batch", "def _process_single(data):\n \n # Read data from tfrecord\n frame = open_dataset.Frame()\n frame.ParseFromString(bytearray(data.numpy()))\n scan_name = frame.context.name.replace('_','X') + 'FRAMENUM{}'.format(str(uuid4())) \n # process frame into data format we want\n # mesh_vertices, instance_labels, semantic_labels, instance_bboxes = prep_data(frame, 150000)\n\n for result in prep_data(frame, 150000):\n tag, data = result\n mesh_vertices, instance_labels, semantic_labels, instance_bboxes = data \n scan_name = frame.context.name.replace('_','X') + 'FRAMENUM{}'.format(str(uuid4())) \n if tag != 'FRUSTUM':\n FILENAME_TEMPLATE = BASE_OUTPUT_DIR + DEFAULT_OUTPUT_FILE_TEMPLATE\n ## Write mesh verticies\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"vert\"), 'wb+') as f:\n np.save(f, mesh_vertices)\n \n ## Write instance labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"ins_label\"), 'wb+') as f:\n np.save(f, instance_labels)\n \n ## Write semantic labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"sem_label\"), 'wb+') as f:\n np.save(f, semantic_labels)\n \n ## Write instance_bboxes labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"bbox\"), 'wb+') as f:\n np.save(f, instance_bboxes)\n else:\n FILENAME_TEMPLATE = BASE_OUTPUT_DIR + FRUSTUM_OUTPUT_FILE_TEMPLATE\n ## Write mesh verticies\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"vert\"), 'wb+') as f:\n np.save(f, mesh_vertices)\n \n ## Write instance labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"ins_label\"), 'wb+') as f:\n np.save(f, instance_labels)\n \n ## Write semantic labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"sem_label\"), 'wb+') as f:\n np.save(f, semantic_labels)\n \n ## Write instance_bboxes labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"bbox\"), 'wb+') as f:\n np.save(f, instance_bboxes)", "def train_step(self, batch, generator):\n ##\n # Split into inputs and outputs\n ##\n\n input_frames = batch[:, :, :, :-3]\n gt_output_frames = batch[:, :, :, -3:]\n\n ##\n # Train\n ##\n\n feed_dict = self.build_feed_dict(input_frames, gt_output_frames, generator)\n\n _, global_loss, global_step, summaries = self.sess.run(\n [self.train_op, self.global_loss, self.global_step, self.summaries],\n feed_dict=feed_dict)\n\n ##\n # User output\n ##\n\n if global_step % c.STATS_FREQ == 0:\n print 'DiscriminatorModel: step %d | global loss: %f' % (global_step, global_loss)\n if global_step % c.SUMMARY_FREQ == 0:\n print 'DiscriminatorModel: saved summaries'\n self.summary_writer.add_summary(summaries, global_step)\n\n return global_step", "def process(self, data_batch: Any, predictions: Sequence[dict]) -> None:\n self.results.extend(_to_cpu(predictions))", "def _train_epoch(self, train_batches, dropout_keep_prob, data, batch_size, save_dir, save_prefix):\n pad_id = self.vocab.get_id(self.vocab.pad_token)\n total_num, total_loss = 0, 0\n log_every_n_batch, n_batch_loss = 50, 0\n eval_every_n_batch = (len(data.train_set) - 1) / (8 * batch_size)\n for bitx, batch in enumerate(train_batches, 1): \n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.pc: batch['passage_char_ids'],\n self.qc: batch['question_char_ids'],\n self.p_em: batch['passage_em'],\n self.p_pos: batch['passage_pos'],\n self.q_pos: batch['question_pos'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.pr: batch['passage_rank'],\n self.dropout_keep_prob: dropout_keep_prob}\n\n _, loss = self.sess.run([self.train_op, self.loss], \n feed_dict=feed_dict)\n\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n n_batch_loss += loss\n if log_every_n_batch > 0 and bitx % log_every_n_batch == 0:\n self.logger.info('Average loss from batch {} to {} is {}'.format(\n bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch))\n n_batch_loss = 0\n \n if eval_every_n_batch > 0 and bitx % eval_every_n_batch == 0:\n self.logger.info('Evaluating the model ...')\n if data.dev_set is not None:\n eval_batches = data.gen_mini_batches('dev', batch_size, pad_id, shuffle=False)\n eval_loss, bleu_rouge = self.evaluate(eval_batches)\n self.logger.info('Dev eval loss {}'.format(eval_loss))\n self.logger.info('Dev eval result: {}'.format(bleu_rouge))\n\n if bleu_rouge['ROUGE-L'] > self.max_rouge_l:\n self.save(save_dir, save_prefix)\n self.max_rouge_l = bleu_rouge['ROUGE-L']\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n return 1.0 * total_loss / total_num", "def __init__(self,\n batch_size,\n min_num_context,\n max_num_context,\n data,\n num_inst,\n testing=False):\n self._batch_size = batch_size\n self._min_num_context = min_num_context\n self._max_num_context = max_num_context\n self._data = data\n # Hardcoded for right now\n self._x_data = self._data[:,1:-1]\n self._y_data = self._data[:,-1:]\n self._testing = testing\n self._num_inst = num_inst\n self._num_pts_per_inst = tf.cast(self._data.get_shape().as_list()[0]/self._num_inst,tf.int32)\n self._x_uniq = self._x_data[:self._num_pts_per_inst]", "def run_step(self):\n self.hooked_sess.run(self.train_op)", "def predict_batch(self, model, context, data=None):\n pass", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def process_data(datapath, batch_sizes=[64, 32, 32],\n shuffles=[True, False, False]):\n train_dir = os.path.join(datapath, 'train')\n valid_dir = os.path.join(datapath, 'valid')\n test_dir = os.path.join(datapath, 'test')\n\n data_transforms = {'train': transforms.Compose([\n transforms.Resize(256),\n transforms.RandomHorizontalFlip(0.3),\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ])}\n\n image_dataset = {\n 'train': datasets.ImageFolder(\n train_dir, transform=data_transforms['train']\n ),\n 'valid': datasets.ImageFolder(\n valid_dir, transform=data_transforms['valid']\n ),\n 'test': datasets.ImageFolder(\n test_dir, transform=data_transforms['test']\n )\n }\n\n dataloaders = {\n 'train': torch.utils.data.DataLoader(\n image_dataset['train'],\n batch_size=batch_sizes[0],\n shuffle=shuffles[0]\n ),\n 'valid': torch.utils.data.DataLoader(\n image_dataset['valid'],\n batch_size=batch_sizes[1],\n shuffle=shuffles[1]\n ),\n 'test': torch.utils.data.DataLoader(\n image_dataset['test'],\n batch_size=batch_sizes[2],\n shuffle=shuffles[2]\n )\n }\n\n return image_dataset, dataloaders", "def on_validation_batch_end(\n self,\n trainer: 'pl.Trainer',\n pl_module: 'pl.LightningModule',\n outputs: Optional[STEP_OUTPUT],\n batch: Any,\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n if trainer.global_step % self.every_n_steps == 0: \n text, x = batch\n sample_text = text[:1]\n token_list = sample_text.masked_select(sample_text != 0).tolist()\n decoded_text = self.tokenizer.decode(token_list) \n text = text.to(pl_module.device)\n x = x.to(pl_module.device) \n with torch.no_grad():\n pl_module.eval()\n #generate sample with image provided\n x_rec = pl_module.generate_images(text[:1], img = x[:1], filter_thres=0.9) # topk sampling at 0.9\n\n #generate sample without image\n x_gen = pl_module.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9\n\n pl_module.train() \n\n\n x_grid = torchvision.utils.make_grid(\n tensor=x,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n xrec_grid = torchvision.utils.make_grid(\n tensor=x_rec,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n xgen_grid = torchvision.utils.make_grid(\n tensor=x_gen,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n text_title = \"val/text\"\n trainer.logger.experiment.add_text(text_title, decoded_text, global_step=trainer.global_step)\n x_title = \"val/input\"\n trainer.logger.experiment.add_image(x_title, x_grid, global_step=trainer.global_step)\n xrec_title = \"val/half_reconstruction\"\n trainer.logger.experiment.add_image(xrec_title, xrec_grid, global_step=trainer.global_step)\n xgen_title = \"val/generation\"\n trainer.logger.experiment.add_image(xgen_title, xgen_grid, global_step=trainer.global_step)", "def __init__(self,\n batch_size,\n max_num_context,\n data,\n num_inst,\n testing=False):\n self._batch_size = batch_size\n self._max_num_context = max_num_context\n self._data = data\n self._x_data = self._data[:,1:-1]\n self._y_data = self._data[:,-1]\n self._testing = testing\n self._num_inst = num_inst\n self._num_pts_per_inst = tf.cast(self._data.get_shape().as_list()[0]/self._num_inst,tf.int32)\n self._x_uniq = self._x_data[:self._num_pts_per_inst] #tf.unique(self._x_data)", "def train_step(x_batch_content, x_batch_question, x_batch_option, y_batch):\n feed_dict = {\n tarnn.input_x_content: x_batch_content,\n tarnn.input_x_question: x_batch_question,\n tarnn.input_x_option: x_batch_option,\n tarnn.input_y: y_batch,\n tarnn.dropout_keep_prob: args.dropout_rate,\n tarnn.is_training: True\n }\n _, step, summaries, loss = sess.run(\n [train_op, tarnn.global_step, train_summary_op, tarnn.loss], feed_dict)\n logger.info(\"step {0}: loss {1:g}\".format(step, loss))\n train_summary_writer.add_summary(summaries, step)", "def process(self, data_batch: Sequence[Dict],\n data_samples: Sequence[Dict]) -> None:\n for data_sample in data_samples:\n pred_labels = data_sample.get('pred_instances').get(self.key).cpu()\n gt_labels = data_sample.get('gt_instances').get(self.key).cpu()\n\n result = dict(\n pred_labels=pred_labels.flatten(),\n gt_labels=gt_labels.flatten())\n self.results.append(result)", "def train_step(self):\r\n batch_images = next(self.data_loader.next_batch())\r\n _, loss, summary, ea = self.sess.run([self.model.train_op, self.model.total_loss, self.model.merged, self.model.euclidean_a_p],\r\n feed_dict={self.model.input: batch_images, self.model.is_training: True})\r\n \r\n return loss, summary", "def val_step(self, data_batch, **kwargs):\n output = self.forward_test(**data_batch, **kwargs)\n return output", "def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds", "def train(self, training_data, cfg, **kwargs):\n pass", "def train(self, data_dict, label_dict):\n loaders = self.init_loaders(data_dict, label_dict)\n best_performance = 1e18\n loss_dict = self.init_loss_dict()\n performance_dict = self.init_performance_dict()\n\n for epoch in range(self.config_dict[\"num_epochs\"]):\n print(\"Epoch {}/{}\".format(epoch, self.config_dict[\"num_epochs\"] - 1))\n print(\"-\" * 10)\n\n if self.scheduler is not None:\n self.scheduler.step()\n\n for phase in [\"train\", \"val\"]:\n self.model.train(phase == \"train\")\n running_loss_dict = self.init_running_loss_dict(\n list(loss_dict[phase].keys())\n )\n output_dict = self.init_output_dict()\n i = 0\n for the_data in loaders[phase]:\n i += 1\n batch_loss_dict = {}\n inputs, labels = self.transform_batch(the_data)\n\n # zero parameter gradients\n self.optimizer.zero_grad()\n\n # forward\n outputs = self.model(inputs)\n\n output_dict = self.update_output_dict(output_dict, outputs, labels)\n\n batch_loss_dict[\"loss\"] = self.criterion(outputs, labels)\n if phase == \"train\":\n batch_loss_dict[\"loss\"].backward()\n self.optimizer.step()\n\n for key in batch_loss_dict.keys():\n running_loss_dict[key] += batch_loss_dict[key].item()\n\n # Compute epoch losses and update loss dict\n epoch_loss_dict = {\n key: running_loss_dict[key] / i for key in running_loss_dict.keys()\n }\n loss_dict[phase] = self.update_metric_dict(\n loss_dict[phase], epoch_loss_dict\n )\n\n # Compute epoch performance and update performance dict\n epoch_statistics = self.compute_epoch_statistics(output_dict)\n performance_dict[phase] = self.update_metric_dict(\n performance_dict[phase], epoch_statistics\n )\n\n print(\"Phase: {}:\".format(phase))\n self.print_metric_dict(epoch_loss_dict)\n self.print_metric_dict(epoch_statistics)\n\n if phase == \"val\":\n best_model_condition = epoch_loss_dict[\"loss\"] < best_performance\n if best_model_condition:\n print(\"Best model updated\")\n best_performance = epoch_loss_dict[\"loss\"]\n best_model_wts = copy.deepcopy(self.model.state_dict())\n\n print(\"Best val performance: {:4f}\".format(best_performance))\n self.model.load_state_dict(best_model_wts)\n result_dict = {\n phase: {**performance_dict[phase], **loss_dict[phase]}\n for phase in performance_dict.keys()\n }\n return result_dict", "def run_valid(self, dataset, attribution, logger, other=None):\n shuffle = self.model_config['shuffle'] if 'shuffle' in self.model_config else True\n\n model_class = self.model_config.model\n optim_class = self.model_config.optimizer\n stopper_class = self.model_config.early_stopper\n clipping = self.model_config.gradient_clipping\n\n loss_fn = get_loss_func(self.dataset_config['task_type'], self.model_config.exp_name)\n shuffle = self.model_config['shuffle'] if 'shuffle' in self.model_config else True\n\n train_loader, valid_loader, features_scaler, scaler = dataset.get_train_loader(self.model_config['batch_size'],\n shuffle=shuffle)\n\n model = model_class(dim_features=dataset.dim_features, dim_target=dataset.dim_target, model_configs=self.model_config, dataset_configs=self.dataset_config)\n net = ExplainerNetWrapper(model, attribution, dataset_configs=self.dataset_config, model_config=self.model_config,\n loss_function=loss_fn)\n optimizer = optim_class(model.parameters(),\n lr=self.model_config['learning_rate'], weight_decay=self.model_config['l2'])\n scheduler = build_lr_scheduler(optimizer, model_configs=self.model_config, num_samples=dataset.num_samples)\n\n train_loss, train_metric, val_loss, val_metric, _, _, _ = net.train(train_loader=train_loader, valid_loader=valid_loader,\n optimizer=optimizer, scheduler=scheduler,\n clipping=clipping, scaler=scaler,\n early_stopping=stopper_class,\n logger=logger)\n\n if other is not None and 'model_path' in other.keys():\n save_checkpoint(path=other['model_path'], model=model, scaler=scaler)\n\n return train_metric, val_metric", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def prepare_data(self, train_data, **kwargs):\n data_len = len(train_data[\"done\"])\n for index in range(data_len):\n if self.multi_step == 1:\n self.buff.add(train_data[\"cur_state\"][index],\n train_data[\"action\"][index],\n train_data[\"reward\"][index],\n train_data[\"next_state\"][index],\n float(train_data[\"done\"][index])) # Add replay buffer", "def train(self, dataset) -> None:\n raise NotImplementedError()", "def inputs_balanced(batch_size, fake_data=False, one_hot=False, dtype=tf.float32, eval_data=False):\n class DataSets(object):\n pass\n data_sets = DataSets()\n if fake_data:\n def fake():\n return DataSetBalanced([], [], batch_size, fake_data=True, one_hot=one_hot, dtype=dtype, eval_data=eval_data)\n data_sets.train = fake()\n data_sets.validation = fake()\n data_sets.test = fake()\n return data_sets\n\n #testing = dict()\n validation = dict()\n training = dict()\n validation_labels = dict()\n #testing_labels = dict()\n training_labels = dict()\n if USE_MULTIPLE_FILES:\n validation, validation_labels = create_data_set(VALIDATION_FILE_LOCATION, eval_data)\n if not eval_data:\n training, training_labels = create_data_set(FILE_LOCATION, eval_data)\n #### HACK: I needed to do this so there would be some strange eosinophil in the validation data ####\n validation['strange_eosinophils'] = training['strange_eosinophils'][0:10]\n validation_labels['strange_eosinophils'] = training_labels['strange_eosinophils'][0:10]\n training['strange_eosinophils'] = training['strange_eosinophils'][10:]\n training_labels['strange_eosinophils'] = training_labels['strange_eosinophils'][10:]\n else:\n VALIDATION_SIZE = 20\n #TESTING_SIZE = 1\n data_examples = np.load(os.path.join(DATA_LOCATION, FILE_LOCATION))\n for name in cell_names:\n print(\"data_examples\")\n print(name+\":\"+str(data_examples[name].shape[0]))\n for i, name in enumerate(cell_names):\n if not eval_data:\n # make the random data consistent across runs\n np.random.seed(1)\n # Shuffle the data\n perm = np.arange(data_examples[name].shape[0])\n np.random.shuffle(perm)\n randomized_data = data_examples[name][perm]\n else:\n randomized_data = data_examples[name]\n validation[name] = randomized_data[:VALIDATION_SIZE]\n #testing[name] = randomized_data[VALIDATION_SIZE:VALIDATION_SIZE+TESTING_SIZE]\n if not eval_data:\n training[name] = randomized_data[VALIDATION_SIZE:]\n #training[name] = randomized_data[VALIDATION_SIZE+TESTING_SIZE:]\n training_labels[name] = to_categorical(np.full((training[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n validation_labels[name] = to_categorical(np.full((validation[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n #testing_labels[name] = to_categorical(np.full((testing[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n\n data_sets.validation = DataSetBalanced(validation, validation_labels, batch_size, fake_data=False, one_hot=True,\n dtype=tf.uint8, eval_data=eval_data)\n #data_sets.testing = DataSetBalanced(testing, testing_labels, batch_size, fake_data=False, one_hot=True, dtype=tf.uint8, eval_data=eval_data)\n if not eval_data:\n data_sets.train = DataSetBalanced(training, training_labels, batch_size, fake_data=False, one_hot=True,\n dtype=tf.uint8, eval_data=eval_data)\n\n return data_sets", "def batch_end(self, batch_data, train_step_data):\n pass", "def on_train_batch_begin(\n self, batch: int, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None\n ):\n pass", "def fit(self, data_loader):\n train_data, valid_data = data_loader.load()\n\n self.compile(self.optimizer, self.loss)\n super().fit(\n x=train_data,\n validation_data=valid_data,\n validation_steps=32, # validate 32 batches at a time\n validation_freq=1, # validate every 1 epoch\n epochs=self.hparams.num_epochs,\n shuffle=False, # dataset instances already handle shuffling\n )\n self.save()", "def training_step(self, batch, batch_nb):\n # batch\n input_ids, attention_mask, token_type_ids, labels, emph_probs = batch\n inputs = {\n 'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': labels,\n }\n\n # XLM and RoBERTa don't use segment_ids\n if self.hparams.model_type != 'distilbert':\n inputs['token_type_ids'] = (\n token_type_ids if self.hparams.model_type in ['bert', 'xlnet'] else None\n )\n\n # forward and loss\n loss, _ = self.forward(**inputs)\n\n # logs\n logs = {\n 'train_loss': loss,\n 'lr': self.lr_scheduler.get_last_lr()[-1],\n }\n\n # output dict\n output = {\n 'loss': loss,\n 'progress_bar': logs,\n 'log': logs\n }\n return output", "def _Step(batch: py_utils.NestedMap):\n with tf.name_scope('tpu_train'):\n with py_utils.GradientTape(persistent=True):\n batch.Update(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Dequeue(batch)\n )\n metrics_dict, _ = self.task.FPropDefaultTheta(batch)\n # py_utils.ComputeGradientsSimple() needs to access the tape, so BProp\n # needs to be within the GradientTape context.\n self.task.BProp()\n\n self._metrics_dict_structure = metrics_dict\n self._metrics_mgr.AccumulateStepMetrics(metrics_dict)", "def train_step(x_batch, y_batch, x_batch_lex):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n # lexicon\n cnn.input_x_lexicon: x_batch_lex,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy, neg_r, neg_p, f1_neg, f1_pos, avg_f1 = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy,\n cnn.neg_r, cnn.neg_p, cnn.f1_neg, cnn.f1_pos, cnn.avg_f1],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n # print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n #print(\"{}: step {}, loss {:g}, acc {:g}, neg_r {:g} neg_p {:g} f1_neg {:g}, f1_pos {:g}, f1 {:g}\".\n # format(time_str, step, loss, accuracy, neg_r, neg_p, f1_neg, f1_pos, avg_f1))\n train_summary_writer.add_summary(summaries, step)", "def train(self, dataset):\n if not self.model:\n self._build()\n\n samples_per_batch = self.dataset.number_of_examples_train() // self.config.batch_size\n\n # Train over multiple epochs\n with tf.Session() as sess:\n best_loss = float('inf')\n best_val_epoch = 0\n sess.run(self.init)\n\n # train until we reach the maximum number of epochs\n for epoch in range(self.config.max_epochs):\n total_training_loss = 0\n num_correct = 0\n prev_prediction = 0\n\n print(\" \")\n print('Epoch {}'.format(epoch))\n# start = time.time()\n\n for i in range(samples_per_batch):\n tr_elems, answers, i_seq_len, q_seq_len, imask = self.dataset.next_batch(self.config.batch_size)\n tr_elems, answers, imask = self.preprocess_batch(tr_elems[0], tr_elems[1], answers, imask)\n ans = np.zeros((self.config.batch_size, self.dataset.vocab_size))\n for i in np.arange(self.config.batch_size):\n ans[i][answers[i]] = 1.\n # ans[np.arange(self.config.batch_size), answers] = 1.0\n print(\"ans\", ans)\n print(\"answers\", answers)\n print(\"ans shape\", ans.shape)\n\n # For debugging:\n # Input module: _input_tensor - self.input_only_for_testing\n # Question module: _question_representation - self.question_representation\n # Episode module: _e_i - self.e_i / _e_m_s - self.episodic_memory_state\n loss, _, pred_prob, _projections = sess.run(\n [self.cost, self.optimizer, self.prediction, self.projections],\n feed_dict={self.input_placeholder: tr_elems[0],\n self.input_length_placeholder: i_seq_len,\n self.end_of_sentences_placeholder: imask,\n self.question_placeholder: tr_elems[1],\n self.question_length_placeholder: q_seq_len,\n self.labels_placeholder: ans,\n #self.gate_placeholder: [float(self.train_gate[i])]\n })\n\n total_training_loss += loss\n\n if np.argmax(pred_prob) == np.argmax(ans):\n num_correct += 1\n\n if i % self.config.update_length == 0:\n print \"Current average training loss: {}\".format(total_training_loss / (i + 1))\n print \"Current training accuracy: {}\".format(float(num_correct) / (i + 1))\n print(\"Ans: \" + str(self.dataset.ivocab[np.argmax(ans)]))\n print(\"Pred: \" + str(self.dataset.ivocab[np.argmax(pred_prob)]))", "def validation_step(self, batch, batch_idx: int):\n\n # move metric to model device\n self.val_fbeta.to(self.model.device)\n\n X, y = batch[\"X\"], batch[\"y\"]\n # X = (batch_size, num_channels, num_samples)\n # y = (batch_size, num_frames, num_classes)\n\n y_pred = self.model(X)\n # y_pred = (batch_size, num_frames, num_classes)\n\n val_fbeta = self.val_fbeta(y_pred[:, ::10].squeeze(), y[:, ::10].squeeze())\n self.model.log(\n f\"{self.ACRONYM}@val_fbeta\",\n val_fbeta,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n )\n\n if batch_idx > 0:\n return\n\n # visualize first 9 validation samples of first batch in Tensorboard\n X = X.cpu().numpy()\n y = y.float().cpu().numpy()\n y_pred = y_pred.cpu().numpy()\n\n # prepare 3 x 3 grid (or smaller if batch size is smaller)\n num_samples = min(self.batch_size, 9)\n nrows = math.ceil(math.sqrt(num_samples))\n ncols = math.ceil(num_samples / nrows)\n fig, axes = plt.subplots(\n nrows=3 * nrows,\n ncols=ncols,\n figsize=(15, 10),\n )\n\n # reshape target so that there is one line per class when plottingit\n y[y == 0] = np.NaN\n if len(y.shape) == 2:\n y = y[:, :, np.newaxis]\n y *= np.arange(y.shape[2])\n\n # plot each sample\n for sample_idx in range(num_samples):\n\n # find where in the grid it should be plotted\n row_idx = sample_idx // nrows\n col_idx = sample_idx % ncols\n\n # plot waveform\n ax_wav = axes[row_idx * 3 + 0, col_idx]\n sample_X = np.mean(X[sample_idx], axis=0)\n ax_wav.plot(sample_X)\n ax_wav.set_xlim(0, len(sample_X))\n ax_wav.get_xaxis().set_visible(False)\n ax_wav.get_yaxis().set_visible(False)\n\n # plot target\n ax_ref = axes[row_idx * 3 + 1, col_idx]\n sample_y = y[sample_idx]\n ax_ref.plot(sample_y)\n ax_ref.set_xlim(0, len(sample_y))\n ax_ref.set_ylim(-1, sample_y.shape[1])\n ax_ref.get_xaxis().set_visible(False)\n ax_ref.get_yaxis().set_visible(False)\n\n # plot prediction\n ax_hyp = axes[row_idx * 3 + 2, col_idx]\n sample_y_pred = y_pred[sample_idx]\n ax_hyp.plot(sample_y_pred)\n ax_hyp.set_ylim(-0.1, 1.1)\n ax_hyp.set_xlim(0, len(sample_y))\n ax_hyp.get_xaxis().set_visible(False)\n\n plt.tight_layout()\n\n self.model.logger.experiment.add_figure(\n f\"{self.ACRONYM}@val_samples\", fig, self.model.current_epoch\n )", "def __init__(self, data_loader,\n data_train,\n data_test,\n dataset_name,\n model_kind,\n transaction_cost=0.0,\n BATCH_SIZE=30,\n GAMMA=0.7,\n ReplayMemorySize=50,\n TARGET_UPDATE=5,\n n_step=10,\n window_size=20):\n self.data_train = data_train\n self.data_test = data_test\n self.DATASET_NAME = dataset_name\n self.BATCH_SIZE = BATCH_SIZE\n self.GAMMA = GAMMA\n self.ReplayMemorySize = ReplayMemorySize\n self.window_size = window_size\n self.model_kind = model_kind\n\n self.split_point = data_loader.split_point\n self.begin_date = data_loader.begin_date\n self.end_date = data_loader.end_date\n\n self.TARGET_UPDATE = TARGET_UPDATE\n self.n_step = n_step\n self.transaction_cost = transaction_cost\n\n self.memory = ReplayMemory(ReplayMemorySize)\n\n self.train_test_split = True if data_test is not None else False\n\n self.EPS_START = 0.9\n self.EPS_END = 0.05\n self.EPS_DECAY = 500\n\n self.steps_done = 0\n\n self.PATH = os.path.join(Path(os.path.abspath(os.path.dirname(__file__))).parent,\n f'Results/{self.DATASET_NAME}/'\n f'{self.model_kind}; '\n f'DATA_KIND({self.data_train.data_kind}); '\n f'BEGIN_DATE({self.begin_date}); '\n f'END_DATE({self.end_date}); '\n f'SPLIT_POINT({self.split_point}); '\n f'WindowSize({self.window_size}); '\n f'BATCH_SIZE{self.BATCH_SIZE}; '\n f'GAMMA{self.GAMMA}; '\n f'REPLAY_MEMORY_SIZE{self.ReplayMemorySize}; '\n f'TARGET_UPDATE{self.TARGET_UPDATE}; '\n f'N_STEP{self.n_step}')\n\n if not os.path.exists(self.PATH):\n os.makedirs(self.PATH)\n\n self.model_dir = os.path.join(self.PATH, f'model.pkl')", "def train(self):\n acc_time = []\n data_test = self.val_data[0][0][0]\n labels_test = self.val_data[0][0][1]\n for i, train_batch in enumerate(self.dataset):\n \n writerDIM = SummaryWriter('runs/experiment_DIM'+str(i))\n data,labels, t = train_batch\n\n index_tr,index_cv,coreset = data_split(data.shape[0],777)\n\n # adding eventual replay patterns to the current batch\n if i == 0:\n ext_mem = [data[coreset], labels[coreset]]\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n else:\n dataP = ext_mem[0]\n labP = ext_mem[1]\n\n ext_mem = [\n np.concatenate((data[coreset], ext_mem[0])),\n np.concatenate((labels[coreset], ext_mem[1]))]\n if self.replay:\n dataC = np.concatenate((data[index_tr], data[index_cv],dataP),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv],labP),axis=0)\n else:\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n\n\n\n print(\"----------- batch {0} -------------\".format(i))\n print(\"Task Label: \", t)\n trC,cvC = data_split_Tr_CV(dataC.shape[0],777)\n\n train_set = LoadDataset(dataC,labC,transform=self.tr,indices=trC)\n val_set = LoadDataset(dataC,labC,transform=self.tr,indices=cvC)\n print('Training set: {0} \\nValidation Set {1}'.format(train_set.__len__(),val_set.__len__()))\n batch_size=32\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaders = {'train':train_loader,'val':valid_loader}\n \n ####### Set hyperparameters for the training\n if i ==0: \n prior = False\n ep=40\n dim_model = DIM_model(batch_s=32,num_classes =128,feature=True) \n dim_model.to(self.device)\n classifierM = _classifier(n_input=128,n_class=50,n_neurons=[256,256,128])\n classifierM = classifierM.to(self.device)\n writer = SummaryWriter('runs/experiment_C'+str(i))\n lr_new = 0.00001\n lrC=0.0001\n \n else:\n prior = True\n ep=6\n \n lr_new =0.000005\n lrC = 0.00005\n\n optimizer = torch.optim.Adam(dim_model.parameters(),lr=lr_new)\n scheduler = lr_scheduler.StepLR(optimizer,step_size=40,gamma=0.1) #there is also MultiStepLR\n\n tr_dict_enc = {'ep':ep,'writer':writerDIM,'best_loss':1e10,'t_board':True,\n 'gamma':.5,'beta':.5,'Prior_Flag':prior,'discriminator':classifierM} \n tr_dict_cl = {'ep':40,'writer':writer,'best_loss':1e10,'t_board':True,'gamma':1}#40\n\n if i==0 and self.load:\n print('Load DIM model weights first step')\n dim_model.load_state_dict(torch.load(self.path + 'weights/weightsDIM_T0.pt'))\n else:\n ############################## Train Encoder########################################\n dim_model,self.stats = trainEnc_MI(self.stats,dim_model, optimizer, scheduler,dataloaders,self.device,tr_dict_enc)\n ####################################################################################\n if i==0:\n torch.save(dim_model.state_dict(), self.path + 'weights/weightsDIM_T'+str(i)+'.pt')\n\n ####\n #Conversion of image into latent space representation for classifier training\n ####\n dim_model.requires_grad_(False)\n for phase in ['train','val']:\n dataF = None\n labF = None\n for inputs, labels in dataloaders[phase]:\n torch.cuda.empty_cache()\n if len(inputs.shape)==5:\n\n inputs = inputs[:,:,:,:,0].to(self.device)\n else:\n inputs = inputs.to(self.device)\n\n _,_,pred = dim_model(inputs)\n pred_l = pred.data.cpu().numpy()\n if dataF is None:\n dataF = pred_l\n labF = labels.data.cpu().numpy()\n else:\n dataF = np.concatenate((dataF,pred_l),axis=0)\n labF = np.concatenate((labF,labels.data.cpu().numpy()),axis=0)\n\n if phase == 'train':\n dataTr_f = dataF\n labTr_f = labF\n else:\n dataCv_f = dataF\n labCv_f = labF\n \n dim_model.requires_grad_(True)\n train_set = LoadFeat(dataTr_f,labTr_f)\n val_set = LoadFeat(dataCv_f,labCv_f)\n batch_size=32\n\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaderC = {'train':train_loader,'val':valid_loader}\n\n optimizerC = torch.optim.Adam(classifierM.parameters(),lr=lrC)\n schedulerC = lr_scheduler.StepLR(optimizerC,step_size=40,gamma=0.1)\n classifierM.requires_grad_(True)\n\n ############################## Train Classifier ########################################\n classifierM,self.stats = train_classifier(self.stats,classifierM, optimizerC, schedulerC,dataloaderC,self.device,tr_dict_cl) \n #################################### #################################### ##############\n\n torch.save(classifierM.state_dict(), self.path + 'weights/weightsC_T'+str(i)+'.pt')\n dim_model.eval()\n classifierM.eval()\n #### Cross_val Testing\n \n test_set = LoadDataset(data_test,labels_test,transform=self.trT)\n batch_size=32\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)\n score= []\n\n for inputs, labels in test_loader:\n torch.cuda.empty_cache()\n inputs = inputs.to(self.device)\n labels = labels.to(self.device) \n _,_,ww =dim_model(inputs)\n pred = classifierM(ww)\n pred_l = pred.data.cpu().numpy()\n score.append(np.sum(np.argmax(pred_l,axis=1)==labels.data.cpu().numpy())/pred_l.shape[0])\n print('TEST PERFORMANCES:', np.asarray(score).mean())\n acc_time.append(np.asarray(score).mean())\n del test_set,test_loader\n \n self.dim_model = dim_model\n self.classifierM = classifierM\n acc_time = np.asarray(acc_time)\n return self.stats,acc_time", "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n\n for batch_idx in range(0, self.num_total_batch):\n\n before_op_time = time.time()\n # Choosing the dataloader for training model\n if self.choosing_dataset_to_train_with(batch_idx):\n # Synthetic dataset\n self.syn_or_real = 'syn'\n try:\n inputs = self.syn_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the synthetic dataloader')\n self.syn_train_iter = iter(self.syn_train_loader)\n inputs = self.syn_train_iter.__next__()\n else:\n # Real dataset\n self.syn_or_real = 'real'\n try:\n inputs = self.real_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the real dataloader')\n self.real_train_iter = iter(self.real_train_loader)\n inputs = self.real_train_iter.__next__()\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n # log less frequently after the first 2000 steps to save time & disk space\n self.step += 1\n self.early_phase = batch_idx % self.opt.log_frequency == 0\n self.mid_phase = False and self.step % self.opt.save_frequency == 0\n self.late_phase = self.num_total_batch - 1 == batch_idx\n\n outputs, losses = {}, {}\n # Depth estimation\n outputs_d, losses_d = self.process_batch(inputs)\n outputs.update(outputs_d)\n losses.update(losses_d)\n\n # No more if else conditions, just combine all losses based on availability of gradients\n final_loss = torch.tensor(0.).to(self.device)\n for k, v in losses.items():\n if ('d_' not in k) and v.requires_grad and ('/' not in k):\n final_loss += v\n final_loss.backward()\n losses[\"loss\"] = final_loss\n\n if (batch_idx + 1) % 2 == 0:\n self.model_optimizer.step()\n self.model_optimizer.zero_grad()\n self.zero_grad()\n\n duration = time.time() - before_op_time\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.early_phase or self.mid_phase or self.late_phase:\n self.log(\"train\", inputs, outputs, losses)\n self.val(\"real\")\n self.val(\"syn\")\n\n if (batch_idx + 1) % 2 == 0:\n current_lr = self.update_learning_rate(self.model_optimizer, self.opt.learning_rate)", "def on_train_batch_end(\n self,\n trainer: 'pl.Trainer',\n pl_module: 'pl.LightningModule',\n outputs: Optional[STEP_OUTPUT],\n batch: Any,\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n if trainer.global_step % self.every_n_steps == 0: \n text, x = batch\n sample_text = text[:1]\n token_list = sample_text.masked_select(sample_text != 0).tolist()\n decoded_text = self.tokenizer.decode(token_list) \n text = text.to(pl_module.device)\n x = x.to(pl_module.device) \n with torch.no_grad():\n pl_module.eval()\n #generate sample with image provided\n x_rec = pl_module.generate_images(text[:1], img = x[:1], filter_thres=0.9) # topk sampling at 0.9\n\n #generate sample without image\n x_gen = pl_module.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9\n\n pl_module.train() \n\n\n x_grid = torchvision.utils.make_grid(\n tensor=x,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n xrec_grid = torchvision.utils.make_grid(\n tensor=x_rec,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n xgen_grid = torchvision.utils.make_grid(\n tensor=x_gen,\n nrow=self.nrow,\n padding=self.padding,\n normalize=self.normalize,\n value_range=self.norm_range,\n scale_each=self.scale_each,\n pad_value=self.pad_value,\n ) \n text_title = \"train/text\"\n trainer.logger.experiment.add_text(text_title, decoded_text, global_step=trainer.global_step)\n x_title = \"train/input\"\n trainer.logger.experiment.add_image(x_title, x_grid, global_step=trainer.global_step)\n xrec_title = \"train/half_reconstruction\"\n trainer.logger.experiment.add_image(xrec_title, xrec_grid, global_step=trainer.global_step)\n xgen_title = \"train/generation\"\n trainer.logger.experiment.add_image(xgen_title, xgen_grid, global_step=trainer.global_step)", "def fit(self):\n # Iterate and train.\n step_file = self.checkpointer.get_step_file()\n start_step = Pickle.load(open(step_file, 'rb'))\n for step in xrange(start_step, self.train_size // self.train_batch_size):\n print 'Step No.:', step\n # Checkpoint tensorflow variables for recovery\n if step % self.checkpointer.get_checkpoint_steps() == 0:\n print 'Checkpointing: Saving Tensorflow variables'\n self.saver.save(self.sess, self.checkpointer.get_save_address())\n Pickle.dump(step + 1, open(step_file, 'wb'))\n print 'Checkpointing Complete. Deleting historical checkpoints....'\n self.checkpointer.delete_previous_checkpoints(num_previous=2)\n print 'Deleted.. Moving forward...'\n\n offset = (step * self.train_batch_size) % self.train_size\n batch_data_fwd = self.X_trn_fwd[offset:(offset + self.train_batch_size), :].T\n batch_data_bwd = self.X_trn_bwd[offset:(offset + self.train_batch_size), :].T\n batch_labels = self.Y_trn[offset:(offset + self.train_batch_size), :].T\n\n loss_t_forward, loss_t_backward = self._train_batch(batch_data_fwd, batch_data_bwd, batch_labels)\n print \"Present Loss Forward:\", loss_t_forward\n print \"Present Loss Backward:\", loss_t_backward\n\n # check results on 2 tasks - Visual Validation\n print 'Train Data Validation\\n'\n self._visual_validate(self.X_trn_fwd[301, :], self.X_trn_bwd[301, :], self.Y_trn[301, :])\n print\n print\n print 'Test Data Validation\\n'\n self._visual_validate(self.X_tst_fwd[56, :], self.X_tst_bwd[56, :], self.Y_tst[56, :])\n print\n print\n\n # Store prediction after certain number of steps #############\n # This will be useful for the graph construction\n '''\n if(step % self.checkpointer.get_prediction_checkpoint_steps() == 0):\n self.predict()\n self.store_test_predictions('_' + str(step))\n '''", "def fit(self, data):\n\n \"\"\"YOUR CODE HERE \"\"\"\n # unormalized data\n un_st = np.concatenate([datum[\"observations\"] for datum in data])\n un_stp1 = np.concatenate([datum[\"next_observations\"] for datum in data])\n un_at = np.concatenate([datum[\"actions\"] for datum in data])\n \n # normalize data\n n_st = (un_st-self.mean_obs)/(self.std_obs+self.epsilon)\n n_at = (un_at-self.mean_action)/(self.std_action+self.epsilon)\n n_stat = np.concatenate([n_st,n_at],axis=1)\n \n n_delta = ((un_stp1-un_st)-self.mean_deltas)/(self.std_deltas+self.epsilon)\n\n # make a shuffle row of whole data to be used\n N = n_delta.shape[0]\n train_indicies = np.arange(N)\n np.random.shuffle(train_indicies)\n # train over the whole data set for the number of iterations\n for i in range(self.iterations):\n for i in range(int(math.ceil(N/self.batch_size))):\n # index for the batch points from a random row\n start_idx = i*self.batch_size%N\n idx = train_indicies[start_idx:start_idx+self.batch_size]\n # choose the batch\n feed_dict = {self.st_at : n_stat[idx,:], self.delta_ : n_delta[idx,:]}\n # train the data\n self.sess.run(self.update_op, feed_dict=feed_dict)", "def on_test_batch_begin(\n self, batch: int, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None\n ):\n pass", "def batch_fit(self, data_loader: torch.utils.data.DataLoader, epochs: int):\n pass" ]
[ "0.71952105", "0.7118524", "0.66174924", "0.6597654", "0.6585971", "0.65415645", "0.65149236", "0.6504006", "0.64783216", "0.6470306", "0.6465952", "0.64400965", "0.6413714", "0.6394377", "0.6394115", "0.6370589", "0.63683504", "0.63560295", "0.63350075", "0.6323913", "0.6297011", "0.6293274", "0.62880224", "0.6269447", "0.6267269", "0.6263693", "0.6261251", "0.62585866", "0.6245933", "0.6244427", "0.62231666", "0.62212986", "0.6202415", "0.6197431", "0.6191721", "0.6190991", "0.61870587", "0.6173142", "0.6166043", "0.6156772", "0.6155266", "0.61392474", "0.611928", "0.611501", "0.6099474", "0.60944796", "0.60941374", "0.60932195", "0.60924745", "0.60863966", "0.6085734", "0.6076175", "0.60733926", "0.60714656", "0.60649186", "0.6050466", "0.60497445", "0.6046125", "0.6043452", "0.60433465", "0.6033626", "0.602999", "0.6029838", "0.60289586", "0.6024967", "0.6022942", "0.60213655", "0.6019663", "0.60188407", "0.601824", "0.6015964", "0.60096097", "0.6006903", "0.60040665", "0.59997493", "0.5995336", "0.5994437", "0.59901035", "0.59884644", "0.59840035", "0.5983272", "0.59733576", "0.5971474", "0.59667903", "0.59611994", "0.59592617", "0.59566444", "0.5953491", "0.59531516", "0.5946177", "0.5942575", "0.59410363", "0.59343606", "0.5932946", "0.592367", "0.59173656", "0.59114516", "0.5910568", "0.5906162", "0.5905113" ]
0.6063903
55
Checks that X is transposed to [Lat, Lon, Sample, Feature] order
def check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert list(X.dims).index(x_lat_dim) == 0, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE' assert list(X.dims).index(x_lon_dim) == 1, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE' assert list(X.dims).index(x_sample_dim) == 2, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE' assert list(X.dims).index(x_feature_dim) == 3, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_input_transposed_vector(multiple_linear_regression_data):\n X, y = multiple_linear_regression_data\n x = X.copy().T\n y = pd.DataFrame(y)\n\n # There is a difference with a transposed array\n with pytest.raises(\n AssertionError, match=r\"N >= K: You need at least as many rows .*\"\n ):\n _ = multiple_linear_regression(x, y)", "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "def check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert type(X) == xr.DataArray, 'XCast requires a dataset to be of type \"Xarray.DataArray\"'", "def __check_2d_and_reshape(X):\n if len(X.shape) == 1:\n X = np.reshape(X, (-1, X.shape[0]))\n return X", "def _validate_X(X):\n return X if not isinstance(X, pd.DataFrame) else X.as_matrix()", "def test_feature_format(X):\r\n print(\"test_feature_format()...\", end = \"\")\r\n for row in range(len(X)):\r\n for col in range(len(X[0])):\r\n assert (isinstance(X[row][col], float) == True)\r\n print(\"Passed!\")", "def _validate_X_predict(self, X):\n # X = check_array(X, ensure_2d=False)\n X = np.atleast_2d(X)\n n_features = X.shape[1]\n if self.n_features_in_ != n_features:\n raise ValueError(\n f\"Number of features of the model must match the input. Model n_features_in_ is {self.n_features_in_} and input n_features is {n_features}. Reshape your data.\"\n )", "def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X'\n\tassert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X'\n\tassert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X'\n\tassert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'", "def check_conv_transpose(extract):\n call = extract\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d_transpose\":\n call = call.args[0]\n\n attrs = call.attrs\n if attrs.data_layout != \"NCHW\":\n return False\n\n return True", "def transpose(X):\n if len(X.shape) == 1:\n return X\n else:\n Xt = zeros((X.shape[1], X.shape[0]))\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n Xt[j][i] = X[i][j]\n\n\n return Xt", "def transform(self, X):\n X = np.asarray(X)\n if X.shape[1:] != self.features_shape_:\n raise ValueError(\"Shape of X used in fit and transform must be \" \"same\")\n return X.reshape(len(X), -1)", "def _need_transpose(expr_matrix, adj_matrix):\n return expr_matrix.shape[1] != adj_matrix.shape[0]", "def _check_input(self, X):\n symbols = np.concatenate(X)\n if len(symbols) == 1: # not enough data\n raise ValueError(\"expected at least 1 observation \"\n \"but none found.\")\n elif (symbols < 0).any(): # contains negative integers\n raise ValueError(\"expected non-negative features \"\n \"for each observation.\")\n elif X.shape[1] > 1: # contains to many features\n raise ValueError(\"expected only 1 feature but got {0} \"\n \"for each observation.\".format(X.shape[1]))\n else:\n return True", "def _validate_X_predict(self, X, check_input=True):\n if check_input:\n X = check_array(X, dtype=DTYPE, accept_sparse=\"csr\")\n if issparse(X) and (X.indices.dtype != np.intc or\n X.indptr.dtype != np.intc):\n raise ValueError(\"No support for np.int64 index based \"\n \"sparse matrices\")\n\n n_features = X.shape[1]\n # if self.n_features_ != n_features:\n # raise ValueError(\"Number of features of the model must \"\n # \"match the input. Model n_features is %s and \"\n # \"input n_features is %s \"\n # % (self.n_features_, n_features))\n\n return X", "def check_consistent_X(self, X):\n # X must be ndarray-type\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n\n return X", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def _validate_input(self, x, y):\n\n x, y = check_X_y(x, y, accept_sparse=[\"csr\", \"csc\", \"coo\"],\n multi_output=True, y_numeric=True)\n return x, y.ravel()", "def test_xy(self):\n x = np.array([[1,3], [2,8], [1,3]])\n y = np.array([1,1,-1])\n lro = LogisticRegressionOptimiser(x,y)\n expected = np.array([[1,3], [2,8], [-1,-3]])\n for i in 0,1,2:\n for j in 0,1:\n self.assertEqual(lro.xy[i][j], expected[i][j])", "def check_transformations(*args):\n assert args[0].shape == (21,21)\n assert args[0].dtype == np.float64\n if len(args) == 2:\n assert args[1].shape == (2,2)\n assert args[1].dtype == np.float64", "def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), \"XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), \"XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), \"XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), \"XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension\"", "def test_roundtrip_from_transpose1(self):\n transposed_array = np.array([[0, 1, 2], [2, 1, 0]]).T\n assert_array_equal(transposed_array, carray(transposed_array, dtype=None))", "def transform(\n self,\n X: FEATURES | None = None,\n y: TARGET | None = None,\n ) -> PANDAS | tuple[DATAFRAME, PANDAS]:\n check_is_fitted(self)\n X, y = self._prepare_input(X, y, columns=getattr(self, \"feature_names_in_\", None))\n\n self.log(\"Cleaning the data...\", 1)\n\n if X is not None:\n # Replace all missing values with NaN\n X = X.replace(self.missing + [np.inf, -np.inf], np.NaN)\n\n for name, column in X.items():\n # Drop features with invalid data type\n if column.dtype.name in lst(self.drop_types):\n self.log(\n f\" --> Dropping feature {name} for having a \"\n f\"prohibited type: {column.dtype.name}.\", 2\n )\n X = X.drop(name, axis=1)\n continue\n\n elif column.dtype.name in (\"object\", \"category\"):\n if self.strip_categorical:\n # Strip strings from blank spaces\n X[name] = column.apply(\n lambda val: val.strip() if isinstance(val, str) else val\n )\n\n # Drop prohibited chars from column names\n if self.drop_chars:\n X = X.rename(columns=lambda x: re.sub(self.drop_chars, \"\", str(x)))\n\n # Drop duplicate samples\n if self.drop_duplicates:\n X = X.drop_duplicates(ignore_index=True)\n\n if y is not None:\n if self.drop_chars:\n if isinstance(y, SERIES_TYPES):\n y.name = re.sub(self.drop_chars, \"\", y.name)\n else:\n y = y.rename(columns=lambda x: re.sub(self.drop_chars, \"\", str(x)))\n\n # Delete samples with NaN in target\n if self.drop_missing_target:\n length = len(y) # Save original length to count deleted rows later\n y = y.replace(self.missing + [np.inf, -np.inf], np.NaN).dropna()\n\n if X is not None:\n X = X[X.index.isin(y.index)] # Select only indices that remain\n\n if (d := length - len(y)) > 0:\n self.log(f\" --> Dropping {d} rows with missing values in target.\", 2)\n\n if self.encode_target:\n y_transformed = y.__class__(dtype=\"object\")\n for col, est in self._estimators.items():\n if est:\n if n_cols(out := est.transform(bk.DataFrame(y)[col])) == 1:\n self.log(f\" --> Label-encoding column {col}.\", 2)\n out = to_series(out, y.index, col)\n\n else:\n self.log(f\" --> Label-binarizing column {col}.\", 2)\n out = to_df(\n data=out,\n index=y.index,\n columns=[f\"{col}_{c}\" for c in est.classes_],\n )\n\n # Replace target with encoded column(s)\n if isinstance(y, SERIES_TYPES):\n y_transformed = out\n else:\n y_transformed = merge(y_transformed, out)\n\n else: # Add unchanged column\n y_transformed = merge(y_transformed, bk.DataFrame(y)[col])\n\n y = y_transformed\n\n return variable_return(X, y)", "def transform(self, X, y=None):\n\n check_is_fitted(self, ('n_features_', ))\n X = check_array(X, accept_sparse=True)\n\n if X.shape[1] != self.n_features_:\n raise ValueError('num_features differ between fit and transform!')\n\n return X # dummy pass-through, doing nothing except for shape checks.", "def check_data_type_column_data(X):\n if type(X) is not numpy.ndarray:\n raise TypeError(\"X should be type numpy.ndarray\")\n\n if len(X.shape) == 2 and X.shape[1] > 1:\n raise TypeError(\"X should have a single column.\")", "def _check_array(self, X):\n x = np.copy(X)\n if np.isfortran(x) is False:\n # print (\"Array must be in Fortran-order. Converting now.\")\n x = np.asfortranarray(x)\n if self.sampling > x.shape:\n raise ValueError(\"'sampling' is greater than the dimensions of X\")\n return x", "def DatasetToTuple(sample):\n \n X_elem = []\n Y_elem = []\n for x,y in sample:\n X_elem.append(x if x.dim() > 0 else x.item())\n Y_elem.append(y if y.dim() > 0 else y.item()) \n return (torch.stack(X_elem),torch.stack(Y_elem))", "def DatasetToTuple(sample):\n \n X_elem = []\n Y_elem = []\n for x,y in sample:\n X_elem.append(x if x.dim() > 0 else x.item())\n Y_elem.append(y if y.dim() > 0 else y.item()) \n return (torch.stack(X_elem),torch.stack(Y_elem))", "def data_as_timesteps(X):\n if isinstance(X, list):\n # Make tabular data from a list of examples\n X = np.vstack(X)\n # Stack data so that features are the 3rd dimension\n return np.dstack([X])", "def transform(self, X, details=False):\n Xm = ma.masked_equal(X, self.missing_values)\n p_y_given_x, log_z = self.calculate_latent(self.theta, Xm)\n labels = self.label(p_y_given_x)\n if details == 'surprise':\n # Totally experimental\n log_marg_x = self.calculate_marginals_on_samples(self.theta, Xm, return_ratio=False)\n n_samples = Xm.shape[0]\n surprise = []\n for l in range(n_samples):\n q = - sum([max([log_marg_x[j,l,i,labels[l, j]]\n for j in range(self.n_hidden)])\n for i in range(self.n_visible)])\n surprise.append(q)\n return p_y_given_x, log_z, np.array(surprise)\n elif details:\n return p_y_given_x, log_z\n else:\n return labels", "def separate_feature_class(data):\n data_c = data.copy()\n y = data_c.reindex(columns=['class'])\n X = data_c.drop(columns='class')\n return X,y", "def inverse_transform(self, X):\n X = np.asarray(X)\n if X.ndim not in (2, 3):\n raise ValueError(\n \"X should be of 2 or 3 dimensions but has shape \" \"%s\" % (X.shape,)\n )\n return X.reshape(X.shape[:-1] + self.features_shape_)", "def transform(self, X: pd.DataFrame):\n return self.feature_transformer.transform(X)", "def _check_X_y(self, X, column):\n column_idx = None\n if isinstance(X, pd.core.frame.DataFrame):\n if isinstance(column, str):\n # get index of current column\n column_idx = X.columns.get_loc(column)\n else:\n column_idx = column\n X = X.as_matrix()\n else:\n column_idx = column\n return X, column_idx", "def _validate_X(X):\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X should be a dataframe.\")", "def test_roundtrip_from_transpose2(self):\n transposed_array = np.array([[0, 1, 2], [2, 1, 0]]).T\n assert_array_equal(transposed_array, carray(transposed_array, dtype=transposed_array.dtype))", "def prepare_input_data(self, X):\n X = np.asarray(X)\n if X.dtype != \"f\" and X.dtype != \"d\":\n X = X.astype(float)\n\n self._check_input(X)\n missing_mask = np.isnan(X)\n self._check_missing_value_mask(missing_mask)\n return X, missing_mask", "def _transform(self, test_features, test_confounds):\n\n check_is_fitted(self, ('n_features_', ))\n test_features = check_array(test_features, accept_sparse=True)\n\n if test_features.shape[1] != self.n_features_:\n raise ValueError('number of features must be {}. Given {}'\n ''.format(self.n_features_, test_features.shape[1]))\n\n if test_confounds is None: # during estimator checks\n return test_features # do nothing\n\n test_confounds = check_array(test_confounds, ensure_2d=False)\n check_consistent_length(test_features, test_confounds)\n\n return np.column_stack((test_features, test_confounds))", "def preprocess_features(X):\n\t# Initialize new output DataFrame\n\toutput = pd.DataFrame(index = X.index)\n\n\t# Investigate new output DataFrame\n\tfor col, col_data in X.iteritems():\n\t\t# If data type is categorical, convert to dummy variables\n\t\tif col_data.dtype == object:\n\t\t\tcol_data = pd.get_dummies(col_data, prefix = col)\n\n\t\t\t# Collect the revised columns\n\t\t\toutput - output.join(col_data)\n\treturn output", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def _transform(self, X, y=None):\n # lazy imports to avoid hard dependency\n from tsfresh import extract_features\n\n Xt = extract_features(\n X,\n column_id=X.columns[0],\n column_value=X.columns[3],\n column_kind=X.columns[2],\n column_sort=X.columns[1],\n **self.default_fc_parameters_,\n )\n\n # When using the long input format, tsfresh seems to sort the index,\n # here we make sure we return the dataframe in the sort order as the\n # input data\n instances = X.iloc[:, 0].unique()\n Xt = Xt.reindex(instances)\n return Xt", "def transpose(x):\n return x[:, np.newaxis]", "def transform(self, X):\n extracted = []\n for columns, transformers in self.features:\n # columns could be a string or list of\n # strings; we don't care because pandas\n # will handle either.\n Xt = self._get_col_subset(X, columns)\n if transformers is not None:\n Xt = transformers.transform(Xt)\n extracted.append(_handle_feature(Xt))\n\n # handle features not explicitly selected\n if self.default is not False:\n Xt = self._get_col_subset(X, self._unselected_columns(X))\n if self.default is not None:\n Xt = self.default.transform(Xt)\n extracted.append(_handle_feature(Xt))\n\n\n # combine the feature outputs into one array.\n # at this point we lose track of which features\n # were created from which input columns, so it's\n # assumed that that doesn't matter to the model.\n\n # If any of the extracted features is sparse, combine sparsely.\n # Otherwise, combine as normal arrays.\n if any(sparse.issparse(fea) for fea in extracted):\n stacked = sparse.hstack(extracted).tocsr()\n # return a sparse matrix only if the mapper was initialized\n # with sparse=True\n if not self.sparse:\n stacked = stacked.toarray()\n else:\n stacked = np.hstack(extracted)\n\n return stacked", "def transform( self, X, y = None ):\n matrix = np.zeros((len(X),len(self.feature_names)))\n for i,bag in enumerate(X):\n for test in bag:\n try:\n matrix[i,self.feature_names.index(test)] = 1\n except ValueError:\n pass\n return matrix", "def test_transform(self):\n t = OneHotEncode(3)\n assert numpy.all(t.transform(0) == numpy.array((1.0, 0.0, 0.0)))\n assert numpy.all(t.transform(1) == numpy.array((0.0, 1.0, 0.0)))\n assert numpy.all(t.transform(2) == numpy.array((0.0, 0.0, 1.0)))\n with pytest.raises(AssertionError):\n t.transform(4)\n with pytest.raises(AssertionError):\n t.transform(-1)\n with pytest.raises(AssertionError):\n t.transform(2.2)\n assert numpy.all(\n t.transform([[2, 1], [0, 2]])\n == numpy.array(\n [[(0.0, 0.0, 1.0), (0.0, 1.0, 0.0)], [(1.0, 0.0, 0.0), (0.0, 0.0, 1.0)]]\n )\n )\n\n t = OneHotEncode(2)\n assert t.transform(0) == 0.0\n assert t.transform(1) == 1.0\n with pytest.raises(TypeError):\n t.transform(\"ipsi\")\n assert numpy.all(\n t.transform([[1, 1], [0, 1]]) == numpy.array([[1.0, 1.0], [0.0, 1.0]])\n )\n\n # for the crazy enough\n t = OneHotEncode(1)\n assert t.transform(0) == 0.0\n with pytest.raises(TypeError):\n t.transform(\"ipsi\")\n assert numpy.all(t.transform([[0, 0], [0, 0]]) == [[0.0, 0.0], [0.0, 0.0]])", "def split_by_feature(tX, y=None, feature22=None):\n # Split up the dataset by feature 22 by 0, 1 and >1\n tX_0 = tX[feature22 == 0]\n tX_1 = tX[feature22 == 1]\n tX_2 = tX[feature22 > 1]\n # Drop the undefined features\n #tX_0 = np.delete(tX_0, drop_0, axis=1)\n #tX_1 = np.delete(tX_1, drop_1, axis=1)\n print(\"Shape 0: {}, Shape 1: {}, Shape 2: {}\".format(tX_0.shape, tX_1.shape, tX_2.shape))\n if y is not None:\n y_0 = y[feature22 == 0]\n y_1 = y[feature22 == 1]\n y_2 = y[feature22 > 1]\n print(\"Shape 0: {}, Shape 1: {}, Shape 2: {}\".format(y_0.shape, y_1.shape, y_2.shape)) \n return tX_0, tX_1, tX_2, y_0, y_1, y_2\n else:\n return tX_0, tX_1, tX_2", "def validate_xy(x_train, y_train):\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')", "def get_X_y(data, column):\n if not is_string_type(data[0, :]):\n columns_titles = [column_title.decode('UTF-8') for column_title in data[0, :]]\n else:\n columns_titles = [column_title for column_title in data[0, :]]\n X = numpy.empty((data.shape[0] - 1, data.shape[1] - 1), dtype=object)\n y = numpy.empty((data.shape[0] - 1, 1), dtype=object)\n X_index = 0\n for index, column_title in enumerate(columns_titles):\n if column_title == column:\n y = numpy.asarray(data[1:len(data), index], dtype=numpy.float32)\n else:\n if is_numerical_type(data[1:len(data), index]):\n X[:, X_index] = numpy.asarray(data[1:len(data), index], dtype=numpy.float32)\n X_index += 1\n else:\n raise AttributeError(f'Error! Need one-hot encoding before getting X and y data.')\n return numpy.asarray(X, dtype=numpy.float32), y.reshape(-1, 1)", "def transform(self,X):\n X=np.array(X)\n if(X.ndim==1):\n return self.transform_1d(X) \n elif(X.ndim==2):\n X_tran=self.transform_1d(X[0])\n for i in range(1,X.shape[0]):\n X_tran=np.vstack((X_tran,self.transform_1d(X[i])))\n return X_tran \n else:\n print(\"Warning: The input array is not Transformed since its greater than 2 dimension\")\n print(\"Its dimension is:{} required is 2\".format(X.ndim))\n return X", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return sparse_to_tuple(features)", "def test_transform_interface_repr(example_tsds: TSDataset) -> None:\n trend_transform = TrendTransform(in_column=\"target\", detrend_model=LinearRegression(), model=\"rbf\")\n out_column = f\"regressor_{trend_transform.__repr__()}\"\n result = trend_transform.fit_transform(example_tsds.df)\n for seg in result.columns.get_level_values(0).unique():\n assert out_column in result[seg].columns", "def test_transform_interface_out_column(example_tsds: TSDataset) -> None:\n out_column = \"regressor_test\"\n trend_transform = TrendTransform(\n in_column=\"target\", detrend_model=LinearRegression(), model=\"rbf\", out_column=out_column\n )\n result = trend_transform.fit_transform(example_tsds.df)\n for seg in result.columns.get_level_values(0).unique():\n assert out_column in result[seg].columns", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return sparse_to_tuple(features)", "def corresponding_ravel(X,axis=0):\n\n t1=np.c_[tuple([di.ravel() for di in X])]\n\n if axis==1:\n t1=t1.T\n\n return t1", "def normalize_feature_data(feature, X_train, X_valid, X_test):\r\n if type(feature) == list:\r\n for i, f in enumerate(feature):\r\n \r\n if f in __normalizing_features__:\r\n stds = np.std(X_train[i], axis=0)\r\n stds[stds==0.0] = 1.0\r\n means = np.mean(X_train[i], axis=0)\r\n X_train[i] = (X_train[i]-means)/stds\r\n X_valid[i] = (X_valid[i]-means)/stds\r\n X_test[i] = (X_test[i]-means)/stds\r\n else:\r\n if feature in __normalizing_features__:\r\n stds = np.std(X_train, axis=0)\r\n stds[stds==0.0] = 1.0\r\n means = np.mean(X_train, axis=0)\r\n X_train = (X_train-means)/stds\r\n X_valid = (X_valid-means)/stds\r\n X_test = (X_test-means)/stds\r\n \r\n return X_train, X_valid, X_test", "def transform(self, X):\n check_array(X)\n X = _check_X(X)\n if not (0 <= len(self._k_features) <= X.shape[1]):\n raise ValueError(\"Cannot transform Data\")\n df_out = self._feat.transform(X)\n # x_train = pd.DataFrame(df_out, columns=self._k_features)\n return df_out", "def test_transpose():\n x = np.array([\n [1, 2],\n [3, 4]\n ])\n y = np.array([\n [5, 6],\n [7, 8]\n ])\n z = np.dstack([x, y])\n return z, z.transpose(1, 0, 2)", "def load_characteristics(self):\r\n data = self.data\r\n X = data[:, :-1]\r\n Y = data[:, -1]\r\n return X, Y", "def _shape_check(self, X, y):\n if not len(y.shape) > 1:\n raise RuntimeError(\"The shape of y is incorrect.\")\n if y.shape != X.shape[:-1]:\n raise RuntimeError(\"X and y must have the same number of \" +\n \"samples and microstructure shape.\")\n if X.shape[-1] != 3:\n raise RuntimeError(\"X must have 3 continuous local states \" +\n \"(euler angles)\")", "def transform(self, X):\n featurizers = [self.featurizer1, self.featurizer2, self.featurizer3, self.featurizer4, self.featurizer5,\n self.featurizer6, self.featurizer7, self.featurizer8, self.featurizer9, self.featurizer10]\n fvs = []\n for datum in X:\n fv = [f(datum) for f in featurizers if f is not None]\n fvs.append(fv)\n return np.array(fvs).astype(float)", "def _check_matrix(self, x, *args):\n if self._special and x.determinant() != 1:\n raise TypeError('matrix must have determinant one')\n F = self.invariant_bilinear_form()\n if x * F * x.transpose() != F:\n raise TypeError('matrix must be orthogonal with respect to the invariant form')\n # TODO: check that quadratic form is preserved in characteristic two", "def transform(self, data):\n data = self._common_preprocess(data)\n data = data.loc[:, self.include_cols]\n X = data.drop('ckd', axis = 1)\n y = data['ckd']\n\n return X, y", "def transform(self, X: FEATURES, y: TARGET | None = None) -> DATAFRAME:\n check_is_fitted(self)\n X, y = self._prepare_input(X, y, columns=self.feature_names_in_)\n\n self.log(\"Normalizing features...\", 1)\n X_transformed = self._estimator.transform(X[self._num_cols])\n\n # If all columns were transformed, just swap sets\n if len(self._num_cols) != X.shape[1]:\n # Replace the numerical columns with the transformed values\n for i, col in enumerate(self._num_cols):\n X[col] = X_transformed[:, i]\n else:\n X = to_df(X_transformed, X.index, X.columns)\n\n return X", "def transform(self, X):\n featurizers = [self.featurizer1, self.featurizer2, self.featurizer3, self.featurizer4, self.featurizer5,\n self.featurizer6, self.featurizer7, self.featurizer8, self.featurizer9, self.featurizer10]\n fvs = []\n for datum in X:\n [fv] = [f(datum) for f in featurizers if f is not None]\n fvs.append(fv)\n return np.array(fvs).astype(float)", "def transform(self, X):\n # check if fit is called prior\n check_is_fitted(self)\n\n # check input dataframe\n X = _is_dataframe(X)\n\n # check for input consistency\n _check_input_matches_training_df(X, self.input_shape_[1])\n\n X = X.drop(columns=self.features_to_drop)\n\n return X", "def _check_X(X, columns=None):\n if isinstance(X, (pd.DataFrame)):\n return X\n elif isinstance(X, (np.ndarray)):\n if columns is None:\n return pd.DataFrame(X)\n else:\n return pd.DataFrame(X, columns=columns)\n elif isinstance(X, pd.Series):\n return pd.DataFrame(X, columns=X.name)\n elif isinstance(X, (list, tuple)):\n X = np.array(X)\n return pd.DataFrame(X, columns=[str(i) for i in range(X.shape[1])])\n elif hasattr(X, (\"__array__\")):\n data = X.__array__()\n return pd.DataFrame(data, columns=[str(i) for i in range(data.shape[1])])\n return X", "def test_to_from_matrix(self):\n # The equality is only guaranteed up to a sign\n converted = rowan.from_matrix(rowan.to_matrix(input1))\n self.assertTrue(\n np.all(\n np.logical_or(\n np.isclose(input1 - converted, 0),\n np.isclose(input1 + converted, 0),\n )\n )\n )", "def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "def to_design(transformed_data) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]:\n X = np.concatenate(\n [transformed_data.X_cat, transformed_data.X_num], axis=1\n )\n y = transformed_data.y\n if y is not None:\n assert X.shape[0] == len(transformed_data.y)\n return X, y", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def test_example(self, example_dataset, expected_result):\n\n transformer = PreprocessFeatures()\n result = transformer.fit_transform(example_dataset)\n\n assert (result == expected_result).all()", "def _check_column_or_1d(y, context=\"\"):\n CHANGE = False\n try:\n s = tuple(np.shape(y))\n except Exception as e:\n raise ValueError(\"%sCould not get shape of y. \"\n \"y should be an ndarray or scipy sparse csr \"\n \"/csc matrix of shape (n_samples, ). Got %s.\"\n \"Details:\\n%r\" % (context, type(y), e))\n\n if len(s) == 0:\n raise ValueError(\"%sy is empty: y = %r.\" % (context, y))\n\n if len(s) == 2 and s[1] == 1:\n CHANGE = True\n warnings.warn(\"%sA column-vector y was passed when a 1d array was\"\n \" expected. Change the shape of y to \"\n \"(n_samples, ), for example using ravel().\" % context,\n InputDataWarning)\n\n if len(s) == 2 and s[1] > 1:\n CHANGE = True\n warnings.warn(\"%sA matrix y was passed for as for labels. \"\n \"Most estimators expect a one dimensional label vector.\"\n \"Consider changing the shape of y to (n_samples, ).\" %\n context, InputDataWarning)\n\n return CHANGE", "def __extract_xy_test(self, colmns):\n self.X_test_raw = self.test_data[colmns].as_matrix()\n n = self.X_test_raw.shape[0]\n self.X_test_raw = np.hstack((self.X_test_raw,\n np.ones(shape=(n, 1))))", "def fit_transform(self, x: Array2D) -> Array2D:", "def test_triangular_checks(self):\n A = np.random.rand(10, 10)\n MA = to_matrix(A)\n L, U = MA.decomposeLU()\n self.assertTrue(L.is_lower_triangular())\n self.assertTrue(U.is_upper_triangular())", "def transform(self, X: Tensor) -> Tensor:\n return X[..., self.feature_indices]", "def _check_transformation_matrix_homogeneity(self):\n transformation_matrices_similar = True # assume they are all similar\n first = True\n rows = None\n cols = None\n for transform in self:\n if first:\n rows = transform.rows\n cols = transform.cols\n first = False\n else:\n if transform.rows != rows or transform.cols != cols:\n transformation_matrices_similar = False\n break\n return transformation_matrices_similar, rows, cols", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert ((X == 0) | (X == 1)).all(), \"x should be equal to 0 or 1.\"", "def transpose():", "def test_interface(transform, example_tsds: TSDataset):\n start_columnns = example_tsds.columns\n example_tsds.fit_transform(transforms=[transform])\n assert np.all(start_columnns == example_tsds.columns)", "def test_x(self):\n g = gca()\n lines = g.get_lines() \n self.assertEquals(lines[0].get_xdata().tolist(), [3, 5, 5, 3, 3])", "def inverse_transform(self, X):\n check_is_fitted(self)\n n_feature = X.shape[1]\n if n_feature != self.n_total_features_:\n raise ValueError(\n \"The number of features in the input array ({}) does not match\"\n \" the total number of features in the multiview dataset\"\n \" ({})\".format(n_feature, self.n_total_features_)\n )\n\n return np.split(X, np.cumsum(self.n_features_)[:-1], axis=1)", "def fit_andor_transform(X_train, X_test):\n fit_transformed_df = mapper.fit_transform(X_train)\n\n X_np = np.asarray(fit_transformed_df)\n X_np_test = np.asarray(mapper.transform(X_test))\n\n return X_np, X_np_test", "def preprocess_data(X, Y):\n X = K.applications.xception.preprocess_input(X)\n Y = K.utils.to_categorical(Y, 10)\n return X, Y", "def transpose(self):\n pass", "def getxsys(xy):\n X_train = xy[0]\n X_test = xy[1]\n y_train = xy[2]\n y_test = xy[3]\n return X_train, X_test, y_train, y_test # Outside the for loop to optimise processing", "def getxsys(xy):\n X_train = xy[0]\n X_test = xy[1]\n y_train = xy[2]\n y_test = xy[3]\n return X_train, X_test, y_train, y_test # Outside the for loop to optimise processing", "def transp(self, x1, x2, d):\n raise NotImplementedError", "def check_wf_2d(ψ: ndarray):\n pass", "def _swap_xy_data(data_obj):\n swaps = [\n (\"x\", \"y\"),\n (\"x0\", \"y0\"),\n (\"dx\", \"dy\"),\n (\"xbins\", \"ybins\"),\n (\"nbinsx\", \"nbinsy\"),\n (\"autobinx\", \"autobiny\"),\n (\"error_x\", \"error_y\"),\n ]\n for swap in swaps:\n _swap_keys(data_obj, swap[0], swap[1])\n try:\n rows = len(data_obj[\"z\"])\n cols = len(data_obj[\"z\"][0])\n for row in data_obj[\"z\"]:\n if len(row) != cols:\n raise TypeError\n\n # if we can't do transpose, we hit an exception before here\n z = data_obj.pop(\"z\")\n data_obj[\"z\"] = [[0 for rrr in range(rows)] for ccc in range(cols)]\n for iii in range(rows):\n for jjj in range(cols):\n data_obj[\"z\"][jjj][iii] = z[iii][jjj]\n except (KeyError, TypeError, IndexError) as err:\n warn = False\n try:\n if data_obj[\"z\"] is not None:\n warn = True\n if len(data_obj[\"z\"]) == 0:\n warn = False\n except (KeyError, TypeError):\n pass\n if warn:\n warnings.warn(\n \"Data in this file required an 'xy' swap but the 'z' matrix \"\n \"in one of the data objects could not be transposed. Here's \"\n \"why:\\n\\n{}\".format(repr(err))\n )", "def _validate_features_in_predict_input(self, result):\n pass", "def _check_and_transform_input(self, data):\n if isinstance(data, list):\n if np.array(data).shape == (len(data),):\n if len(data) == 1:\n data = np.array(data).reshape(1, 1)\n data = np.array(data).reshape(len(data), 1)\n else:\n data = np.concatenate(data).reshape(len(data), -1)\n else:\n raise TypeError('Input data should be of type list, but found type {}'.format(type(data)))\n\n return data", "def transform(self, X):\n X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)\n n_samples, n_features = X.shape\n X_int = np.zeros_like(X, dtype=np.int)\n X_mask = np.ones_like(X, dtype=np.bool)\n\n for i in range(n_features):\n valid_mask = np.in1d(X[:, i], self.categories_[i])\n\n if not np.all(valid_mask):\n if self.handle_unknown == 'error':\n diff = np.unique(X[~valid_mask, i])\n msg = (\"Found unknown categories {0} in column {1}\"\n \" during transform\".format(diff, i))\n raise ValueError(msg)\n else:\n # Set the problematic rows to an acceptable value and\n # continue `The rows are marked `X_mask` and will be\n # removed later.\n X_mask[:, i] = valid_mask\n X[:, i][~valid_mask] = self.categories_[i][0]\n X_int[:, i] = self._label_encoders_[i].transform(X[:, i])\n\n if self.encoding == 'ordinal':\n return X_int.astype(self.dtype, copy=False)\n\n mask = X_mask.ravel()\n n_values = [cats.shape[0] for cats in self.categories_]\n n_values = np.array([0] + n_values)\n indices = np.cumsum(n_values)\n\n column_indices = (X_int + indices[:-1]).ravel()[mask]\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)[mask]\n data = np.ones(n_samples * n_features)[mask]\n\n out = sparse.csc_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n if self.encoding == 'onehot-dense':\n return out.toarray()\n else:\n return out", "def transform(self, X):\n X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)\n n_samples, n_features = X.shape\n X_int = np.zeros_like(X, dtype=np.int)\n X_mask = np.ones_like(X, dtype=np.bool)\n\n for i in range(n_features):\n valid_mask = np.in1d(X[:, i], self.categories_[i])\n\n if not np.all(valid_mask):\n if self.handle_unknown == 'error':\n diff = np.unique(X[~valid_mask, i])\n msg = (\"Found unknown categories {0} in column {1}\"\n \" during transform\".format(diff, i))\n raise ValueError(msg)\n else:\n # Set the problematic rows to an acceptable value and\n # continue `The rows are marked `X_mask` and will be\n # removed later.\n X_mask[:, i] = valid_mask\n X[:, i][~valid_mask] = self.categories_[i][0]\n X_int[:, i] = self._label_encoders_[i].transform(X[:, i])\n\n if self.encoding == 'ordinal':\n return X_int.astype(self.dtype, copy=False)\n\n mask = X_mask.ravel()\n n_values = [cats.shape[0] for cats in self.categories_]\n n_values = np.array([0] + n_values)\n indices = np.cumsum(n_values)\n\n column_indices = (X_int + indices[:-1]).ravel()[mask]\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)[mask]\n data = np.ones(n_samples * n_features)[mask]\n\n out = sparse.csc_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n if self.encoding == 'onehot-dense':\n return out.toarray()\n else:\n return out", "def transform(self, X):\n X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)\n n_samples, n_features = X.shape\n X_int = np.zeros_like(X, dtype=np.int)\n X_mask = np.ones_like(X, dtype=np.bool)\n\n for i in range(n_features):\n valid_mask = np.in1d(X[:, i], self.categories_[i])\n\n if not np.all(valid_mask):\n if self.handle_unknown == 'error':\n diff = np.unique(X[~valid_mask, i])\n msg = (\"Found unknown categories {0} in column {1}\"\n \" during transform\".format(diff, i))\n raise ValueError(msg)\n else:\n # Set the problematic rows to an acceptable value and\n # continue `The rows are marked `X_mask` and will be\n # removed later.\n X_mask[:, i] = valid_mask\n X[:, i][~valid_mask] = self.categories_[i][0]\n X_int[:, i] = self._label_encoders_[i].transform(X[:, i])\n\n if self.encoding == 'ordinal':\n return X_int.astype(self.dtype, copy=False)\n\n mask = X_mask.ravel()\n n_values = [cats.shape[0] for cats in self.categories_]\n n_values = np.array([0] + n_values)\n indices = np.cumsum(n_values)\n\n column_indices = (X_int + indices[:-1]).ravel()[mask]\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)[mask]\n data = np.ones(n_samples * n_features)[mask]\n\n out = sparse.csc_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n if self.encoding == 'onehot-dense':\n return out.toarray()\n else:\n return out", "def transform(self, X):\n X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)\n n_samples, n_features = X.shape\n X_int = np.zeros_like(X, dtype=np.int)\n X_mask = np.ones_like(X, dtype=np.bool)\n\n for i in range(n_features):\n valid_mask = np.in1d(X[:, i], self.categories_[i])\n\n if not np.all(valid_mask):\n if self.handle_unknown == 'error':\n diff = np.unique(X[~valid_mask, i])\n msg = (\"Found unknown categories {0} in column {1}\"\n \" during transform\".format(diff, i))\n raise ValueError(msg)\n else:\n # Set the problematic rows to an acceptable value and\n # continue `The rows are marked `X_mask` and will be\n # removed later.\n X_mask[:, i] = valid_mask\n X[:, i][~valid_mask] = self.categories_[i][0]\n X_int[:, i] = self._label_encoders_[i].transform(X[:, i])\n\n if self.encoding == 'ordinal':\n return X_int.astype(self.dtype, copy=False)\n\n mask = X_mask.ravel()\n n_values = [cats.shape[0] for cats in self.categories_]\n n_values = np.array([0] + n_values)\n indices = np.cumsum(n_values)\n\n column_indices = (X_int + indices[:-1]).ravel()[mask]\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)[mask]\n data = np.ones(n_samples * n_features)[mask]\n\n out = sparse.csc_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n if self.encoding == 'onehot-dense':\n return out.toarray()\n else:\n return out", "def predict(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n raise NotImplementedError" ]
[ "0.60901403", "0.6069965", "0.57867116", "0.57052827", "0.56909966", "0.5635825", "0.5586665", "0.54974854", "0.54878813", "0.5452838", "0.5424291", "0.54226655", "0.54185116", "0.5416405", "0.5388056", "0.5387666", "0.53499806", "0.5334254", "0.5315664", "0.5304115", "0.52881277", "0.52755845", "0.52741414", "0.521712", "0.51826435", "0.5177065", "0.5177065", "0.5163846", "0.51504445", "0.5141156", "0.5139852", "0.5139784", "0.5133639", "0.5130892", "0.5129782", "0.51247984", "0.5120001", "0.5118638", "0.5088717", "0.5086587", "0.5079924", "0.5074171", "0.5073036", "0.50700885", "0.50608516", "0.5058675", "0.50561446", "0.5052608", "0.50410813", "0.50404644", "0.50292903", "0.5027261", "0.5027261", "0.5021798", "0.50122184", "0.50122184", "0.5011049", "0.5005311", "0.49994195", "0.49890393", "0.4977358", "0.497626", "0.49719197", "0.49692068", "0.49687898", "0.49460182", "0.49423754", "0.4924287", "0.49188864", "0.49165833", "0.49064088", "0.49062818", "0.4883835", "0.48749086", "0.48691392", "0.48634878", "0.48560932", "0.48556224", "0.48521167", "0.4848957", "0.4847124", "0.48449358", "0.48400646", "0.48315948", "0.4828285", "0.48257765", "0.48249957", "0.48134655", "0.48027796", "0.48027796", "0.47983992", "0.47923508", "0.47858834", "0.47849607", "0.4784842", "0.47828028", "0.47828028", "0.47828028", "0.47828028", "0.4779232" ]
0.7853445
0
Checks that X is 4D, with Dimension Names as specified by x_lat_dim, x_lon_dim, x_sample_dim, and x_feature_dim
def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional' assert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X' assert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X' assert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X' assert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X'\n\tassert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X'\n\tassert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X'\n\tassert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'", "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), \"XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), \"XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), \"XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), \"XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension\"", "def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def check_input_dimension(self, data):\n if len(data[0]) != self.input_dimension:\n raise ValueError(\"Received {} features, expected {}.\".format(self.input_dimension, len(data[0])))", "def check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert list(X.dims).index(x_lat_dim) == 0, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_lon_dim) == 1, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_sample_dim) == 2, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_feature_dim) == 3, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'", "def check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert type(X) == xr.DataArray, 'XCast requires a dataset to be of type \"Xarray.DataArray\"'", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def test_dimension_mapping(self):\n fh = NetCDF4()\n\n with tempfile.TemporaryDirectory() as tdir:\n tfile = os.path.join(tdir, 'testfile')\n before = xr.Dataset({\n \"var1\": (\"dim1\", np.arange(5)),\n \"group1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group1/subgroup1/var1\":\n (\"group1/subgroup1/dim1\", np.arange(5)),\n \"group1/subgroup1/var2\":\n (\"group1/subgroup1/dim2\", np.arange(5)),\n \"group2/var1\": (\"group2/dim1\", np.arange(5)),\n \"group2/subgroup1/var1\":\n (\"group1/subgroup1/dim1\", np.arange(5)),\n \"group3/var1\": (\"group3/dim1\", np.arange(10)),\n }, coords={\n \"dim1\": (\"dim1\", np.arange(5)),\n \"group1/dim1\": (\"group1/dim1\", np.arange(5))\n })\n\n # Save the dataset and load it again:\n fh.write(before, tfile)\n after = fh.read(tfile)\n\n # How it should be after loading:\n check = xr.Dataset({\n \"var1\": (\"dim1\", np.arange(5)),\n \"group1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group1/subgroup1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/subgroup1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group2/var1\": (\"dim1\", np.arange(5)),\n \"group2/subgroup1/var1\": (\"dim1\", np.arange(5)),\n \"group3/var1\": (\"group3/dim1\", np.arange(10)),\n }, coords={\n \"dim1\": (\"dim1\", np.arange(5)),\n \"group1/dim1\": (\"group1/dim1\", np.arange(5))\n })\n\n assert after.equals(check)", "def has_dims(xobj, dims, kind):\n if isinstance(dims, str):\n dims = [dims]\n\n if not all(dim in xobj.dims for dim in dims):\n raise DimensionError(\n f'Your {kind} object must contain the '\n f'following dimensions at the minimum: {dims}'\n )\n return True", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def test_shapes(self):\n\n # Creates a raw layer\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop(\"feature_is_filtered\", axis=1, inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n\n # remove one gene\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Number of genes in X (3) is different than raw.X (4).\"],\n )", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def dims(x):\n return len(x.shape)", "def check_has_dims(hdr):\n try:\n return (hdr['startX'], hdr['startY'])\n except KeyError:\n return False", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def to_4d(x):\n xsh = np.asarray(x.get_shape().as_list())\n return tf.reshape(x, [xsh[0], xsh[1], xsh[2], np.prod(xsh[3:])])", "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "def assertIsNifti4D(*args):\n for f in args:\n assertIsNifti(f)\n d = ensure.ensureIsImage(f)\n assert len(d.shape) == 4, \\\n 'incorrect shape for 4D nifti: {}:{}'.format(d.shape, f)", "def test_get_dimension(self):\n\n v = Vector({ 'x': 1 })\n self.assertEqual(1, v.dimensions['x'])", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def feature_dim(self):\n raise NotImplementedError", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def x_dim(self) -> str:\n if self.get_attrs(\"x_dim\") not in self._obj.dims:\n self.set_spatial_dims()\n return self.attrs[\"x_dim\"]", "def is_float4x4(items):\n return len(items) == 4 and all(len(item) == 4 and all(isinstance(i, float) for i in item) for item in items)", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def check_dim(gr, DIM):\n l = len(gr)\n if(l != DIM):\n return False\n\n for i in range(0, DIM):\n if(len(gr[i]) != l):\n return False \n return True", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def _validate_X_predict(self, X):\n # X = check_array(X, ensure_2d=False)\n X = np.atleast_2d(X)\n n_features = X.shape[1]\n if self.n_features_in_ != n_features:\n raise ValueError(\n f\"Number of features of the model must match the input. Model n_features_in_ is {self.n_features_in_} and input n_features is {n_features}. Reshape your data.\"\n )", "def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)", "def convert_1d_to_3d(data_X, data_Y):\n\n data_X = data_X.tocsr()\n \n data_dim_x = [] # slices along x-axis (has shape of (total_trials * dim_x, dim_z, dim_y))\n data_dim_x_label = [] # contains (total_trials * dim_x) labels\n data_dim_y = [] # slices along y-axis (has shape of (total_trials * dim_y, dim_z, dim_x))\n data_dim_y_label = [] # contains (total_trials * dim_y) labels\n data_dim_z = [] # slices along z-axis (has shape of (total_trials * dim_z, dim_y, dim_x))\n data_dim_z_label = [] # contains (total_trials * dim_z) labels\n\n for num_trial in range(data_X.shape[0]):\n label = data_Y[num_trial]\n data_1d = data_X[num_trial]\n data_3d = np.squeeze(np.asarray(data_1d.todense())).reshape((dim_z, dim_y, dim_x))\n for x in range(dim_x):\n x_slice = data_3d[:,:,x]\n # append only if the slice is not empty \n if x_slice.sum() != 0:\n data_dim_x.append(data_3d[:, :, x])\n data_dim_x_label.append(label)\n for y in range(dim_y):\n y_slice = data_3d[:, y, :]\n if y_slice.sum() != 0:\n data_dim_y.append(data_3d[:, y, :])\n data_dim_y_label.append(label)\n for z in range(dim_z):\n z_slice = data_3d[:, :, z]\n if z_slice.sum() != 0:\n data_dim_z.append(data_3d[z, :, :])\n data_dim_z_label.append(label)\n\n return np.array(data_dim_x), np.array(data_dim_x_label), \\\n np.array(data_dim_y), np.array(data_dim_y_label), \\\n np.array(data_dim_z), np.array(data_dim_z_label)", "def xdim(self):\n return len(self._x)", "def is_dimension_error(self):\n return self._tag == 'dimension_error'", "def _check_shape(input_shape):\n msg = ('Input to SpatioTemporalExpansion must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def dims(self) -> tuple[str, str]:\n # if self.dim0 is not None:\n return self.y_dim, self.x_dim", "def test_slice_other_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)+shape[1:]", "def spatial_dimension(self):\r\n pass", "def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1", "def _verify_data(inputs, targets):\n check_value_type('inputs', inputs, Tensor)\n if len(inputs.shape) != 4:\n raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')\n check_value_type('targets', targets, (Tensor, int, tuple, list))\n if isinstance(targets, Tensor):\n if len(targets.shape) > 2:\n raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '\n 'But got {}D.'.format(len(targets.shape)))\n if targets.shape and len(targets) != len(inputs):\n raise ValueError(\n 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(\n len(inputs), len(targets)))", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def test_sample_from_extra_bounds_good(self):\n dim = Real(\"yolo\", \"norm\", 0, 2, low=-5, high=+5, shape=(4, 4))\n for _ in range(8):\n samples = dim.sample(8)\n for sample in samples:\n assert sample in dim", "def test_feature_format(X):\r\n print(\"test_feature_format()...\", end = \"\")\r\n for row in range(len(X)):\r\n for col in range(len(X[0])):\r\n assert (isinstance(X[row][col], float) == True)\r\n print(\"Passed!\")", "def required_input_dim(space: gym.Space, **kwargs) -> int:", "def __check_2d_and_reshape(X):\n if len(X.shape) == 1:\n X = np.reshape(X, (-1, X.shape[0]))\n return X", "def ndims(x):\n return len(x.get_shape())", "def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")", "def __DimSiz_restriction_correct_ndarray_ndarray4(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to the size of a dimension of another Numpy array (4) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 'aParameter1', 'pages', 'columns', mul=2)\n\n RxCSObject.parameter1 = np.random.randn(2, 3, 4) # * 5 pages, 3 rows, 4 columns\n RxCSObject.aParameter1 = np.random.randn(4, 3, 1) # 4 pages, 3 rows, * 1 column\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def correct_dims(self, X):\n N = tf.shape(X)[0]\n N_slice = tf.reduce_prod(self.coord_feed.dims[1:])\n return tf.concat([[tf.math.floordiv(N, N_slice)], self.coord_feed.dims[1:]], axis=0)", "def _sanity_check_datasource(ds):\n if len(ds) != 1:\n raise SanityCheckError('GeoJSON should have only 1 layer.')\n # TODO: add more checks", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def _4d_tensor(self, name, tensor, **kwargs):\r\n h, w, c, num_tensor = tensor.shape.as_list()\r\n\r\n # Try to visualise convolutional filters or feature maps\r\n # See: https://gist.github.com/kukuruza/03731dc494603ceab0c5\r\n # input shape: (Y, X, C, N)\r\n if c != 1 and c != 3:\r\n tensor = tf.reduce_mean(tensor, axis=2, keep_dims=True)\r\n c = 1\r\n # shape is now: (Y, X, 1|C, N)\r\n v_min = tf.reduce_min(tensor)\r\n v_max = tf.reduce_max(tensor)\r\n tensor -= v_min\r\n tensor *= 1.0 / (v_max - v_min)\r\n tensor = tf.pad(tensor, [[1, 0], [1, 0], [0, 0], [0, 0]], 'CONSTANT')\r\n tensor = tf.transpose(tensor, perm=(3, 0, 1, 2))\r\n # shape is now: (N, Y, X, C)\r\n # place tensor on grid\r\n num_tensor_x = int(np.round(np.sqrt(num_tensor)))\r\n num_tensor_y = num_tensor / num_tensor_x\r\n while not num_tensor_y.is_integer():\r\n num_tensor_x += 1\r\n num_tensor_y = num_tensor / num_tensor_x\r\n num_tensor_y = int(num_tensor_y)\r\n h += 1\r\n w += 1\r\n tensor = tf.reshape(tensor, (num_tensor_x, h * num_tensor_y, w, c))\r\n # shape is now: (N_x, Y * N_y, X, c)\r\n tensor = tf.transpose(tensor, (0, 2, 1, 3))\r\n # shape is now: (N_x, X, Y * N_y, c)\r\n tensor = tf.reshape(tensor, (1, w * num_tensor_x, h * num_tensor_y, c))\r\n # shape is now: (1, X * N_x, Y * N_y, c)\r\n tensor = tf.transpose(tensor, (0, 2, 1, 3))\r\n # shape is now: (1, Y * N_y, X * N_x, c)\r\n tensor = tf.pad(tensor, [[0, 0], [0, 1], [0, 1], [0, 0]], 'CONSTANT')\r\n\r\n self.image(name, tensor, **kwargs)", "def _squeeze_dims(ds):\n ds = ds.squeeze()\n for dim in ['lon', 'lat', 'bnds', 'depth', 'depth_2', 'depth_3']:\n if dim in ds:\n if ds[dim].size <= 1:\n del ds[dim]\n drop = []\n for dim in [\n 'hyai', 'hybi', 'hyam', 'hybm', 'time_bnds', 'lat_bnds', 'lon_bnds'\n ]:\n if dim in ds:\n drop.append(dim)\n ds = ds.drop(drop)\n return ds.squeeze()", "def is_4d(self):\n return self._4d", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError", "def _check_no_nonzero_features(self,\n features: np.ndarray,\n dropped_idx: np.ndarray,\n axis: Optional[int] = None):\n if axis is not None:\n # Dropped node.\n features_of_dropped_nodes = np.take(features, dropped_idx, axis=axis)\n self.assertEqual(0, np.nonzero(features_of_dropped_nodes)[0].shape[0])\n self.assertEqual(0, np.nonzero(features_of_dropped_nodes)[1].shape[0])\n else:\n # Dropped edge.\n features_of_dropped_edge = features[dropped_idx[0], dropped_idx[1]]\n self.assertEqual(0, np.nonzero(features_of_dropped_edge)[0].shape[0])", "def getDimensions():", "def check_niimg_4d(niimg, return_iterator=False, dtype=None):\n return check_niimg(\n niimg, ensure_ndim=4, return_iterator=return_iterator, dtype=dtype\n )", "def ndim(self):\n return self.X.ndim", "def ensure_dims(array: xr.DataArray, *dimensions: Hashable) -> xr.DataArray:\n missing_dims = set(dimensions) - set(array.dims)\n\n new_dims = defaultdict(list)\n for coord in missing_dims:\n cdim_tuple = array.coords[coord].dims\n\n if len(cdim_tuple) > 1:\n raise ValueError('Multi dimensional coordinates are not supported')\n\n cdim = cdim_tuple[0]\n\n new_dims[cdim].append(coord)\n\n for dim, coords in new_dims.items():\n array = array.set_index({cdim: tuple(coords)}) # type: ignore[assignment]\n\n if len(coords) > 1:\n array = array.unstack(dim)\n\n return array.drop_vars(array.coords.keys() - set(array.dims))", "def InferSpatialDimension(self):\n\n assert self.points is not None\n # if self.points.shape[1] == 3:\n # if self.element_type == \"tri\" or self.element_type == \"quad\":\n # print(\"3D surface mesh of \", self.element_type)\n\n return self.points.shape[1]", "def test_return_shape(self):\n print(\"Testing that get_region_data return values are the correct shape\")\n\n test = get_region_data(self.wmo_boxes, self.float_name, self.config,\n self.index, self.pres)\n\n self.assertTrue(test[0].shape == test[1].shape == test[2].shape,\n \"salinity, pressure, and potential temperature \"\n \"arrays should be same shape\")\n self.assertTrue(test[3].shape == test[4].shape == test[5].shape,\n \"longitude, latitude, and date arrays should be the same shape\")\n self.assertTrue(test[0].shape[1] == test[1].shape[1] == test[2].shape[1] ==\n test[3].shape[0] == test[4].shape[0] == test[5].shape[0] ==\n self.index.__len__(),\n \"Should get the same number of casts as we have asked for\")", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def has_dimension(self, dim):\n\n return self.units.dimensions == dim", "def __DimSiz_restriction_incorrect_ndarray_ndarray4(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher than the size of a dimension of another Numpy array (4) (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimH('parameter1', 'aParameter1', 'pages', 'columns', mul=2)\n\n RxCSObject.parameter1 = np.random.randn(2, 3, 4) # * 5 pages, 3 rows, 4 columns\n RxCSObject.aParameter1 = np.random.randn(4, 3, 1) # 4 pages, 3 rows, * 1 column\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def _validate_input_data(self):\n\n if type(self.data) in [np.ndarray, da.Array]:\n if not self.data.dtype.names:\n raise ValueError('QuadTree: numpy array provided for data, but no names were found, array must be a structured array')\n if 'x' not in self.data.dtype.names or 'y' not in self.data.dtype.names:\n raise ValueError('QuadTree: numpy structured array provided for data, but \"x\" or \"y\" not found in variable names')\n self.layernames = [self.rev_layer_lookup[var] for var in self.data.dtype.names if var in ['z', 'tvu']]\n elif type(self.data) == xr.Dataset:\n if 'x' not in self.data:\n raise ValueError('QuadTree: xarray Dataset provided for data, but \"x\" or \"y\" not found in variable names')\n if len(self.data.dims) > 1:\n raise ValueError('QuadTree: xarray Dataset provided for data, but found multiple dimensions, must be one dimensional: {}'.format(self.data.dims))\n self.layernames = [self.rev_layer_lookup[var] for var in self.data if var in ['z', 'tvu']]\n self._convert_dataset() # internally we just convert xarray dataset to numpy for ease of use\n else:\n raise ValueError('QuadTree: numpy structured array or dask array with \"x\" and \"y\" as variable must be provided')", "def set_spatial_dims(self, x_dim=None, y_dim=None) -> None:\n _dims = list(self._obj.dims)\n # Switch to lower case to compare to XDIMS and YDIMS\n _dimslow = [d.lower() for d in _dims]\n if x_dim is None:\n for dim in XDIMS:\n if dim in _dimslow:\n idim = _dimslow.index(dim)\n x_dim = _dims[idim]\n break\n if x_dim and x_dim in _dims:\n self.set_attrs(x_dim=x_dim)\n else:\n raise ValueError(\n \"x dimension not found. Use 'set_spatial_dims'\"\n + \" functions with correct x_dim argument provided.\"\n )\n\n if y_dim is None:\n for dim in YDIMS:\n if dim in _dimslow:\n idim = _dimslow.index(dim)\n y_dim = _dims[idim]\n break\n if y_dim and y_dim in _dims:\n self.set_attrs(y_dim=y_dim)\n else:\n raise ValueError(\n \"y dimension not found. Use 'set_spatial_dims'\"\n + \" functions with correct y_dim argument provided.\"\n )\n\n check_x = np.all(np.isclose(np.diff(np.diff(self._obj[x_dim])), 0, atol=1e-4))\n check_y = np.all(np.isclose(np.diff(np.diff(self._obj[y_dim])), 0, atol=1e-4))\n if check_x == False or check_y == False:\n raise ValueError(\"raster only applies to regular grids\")", "def test_11_dataset(self, example):\n example.zonenumbers_i = np.array([100, 200, 300])\n example.groupnames_g = np.array(['Female', 'Male'], dtype='O')\n example.create_ds()\n assert isinstance(example.ds, xr.Dataset)\n for attr, dtype in example.dtypes.items():\n data_array = example.ds[attr]\n # test if the shapes are correct\n if dtype.shape:\n np.testing.assert_array_equal(dtype.get_shape(example),\n data_array.shape,\n 'shape not correct')\n else:\n # not initialized array\n assert not np.any(data_array.shape), 'shape not initialized'\n #test if the datatypes are correct\n assert np.dtype(dtype.dtype) == data_array.dtype, 'dtype not correct'\n print(example.ds)", "def testgradsdim(self):\r\n assert self.data.grads.shape == (len(self.data.geovalues),self.data.natom,3)", "def get_feature_multitask_dim(X, y, sample_dim):\n # check if y has a multitask dim, i.e., y(sample, multitask)\n mt_dim = [x for x in y.dims if x != sample_dim]\n # check if X has a feature dim, i.e., X(sample, regressors)\n feature_dim = [x for x in X.dims if x != sample_dim]\n if mt_dim:\n mt_dim = mt_dim[0]\n if feature_dim:\n feature_dim = feature_dim[0]\n return feature_dim, mt_dim", "def test_vector_dimensions(self):\r\n # crear una lista 1-D (Horizontal, Entradas). \r\n Z = [1, 2, 3, 4, 5]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Notemos que las dimensiones de Z y W son diferentes.\r\n try:\r\n neuron = rhonn(W, Z)\r\n except ValueError as e:\r\n # Comprobamos que efectivamente hay un error en las dimensiones.\r\n self.assertEqual(type(e), ValueError)\r\n else:\r\n self.fail('El error no fue lanzado.')", "def check_qim_dim_match(cls, qim, dim):\n return len(qim) == len(dim)", "def dimensions():", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def _validate_raster_attributes(self, x):\r\n if self.extent != x.extent:\r\n raise ValueError(\"Extents do not match.\")\r\n if self.resolution != x.resolution:\r\n raise ValueError(\"Resolutions do not match.\")\r\n if not np.array_equal(self.x, x.x):\r\n raise ValueError(\"x attributes do not match.\")\r\n if not np.array_equal(self.y, x.y):\r\n raise ValueError(\"y attributes do not match.\")\r\n if len(self.layers) != len(x.layers):\r\n raise ValueError(\"layers lengths do not match.\")\r\n if self.crs != x.crs:\r\n raise ValueError(\"crs attributes do not match.\")", "def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()", "def _handle_input_data(data):\n data = np.asarray(data)\n if np.ndim(data) == 1:\n d_rows = 1\n d_cols = len(data)\n data = data.reshape((1, data.shape[0]))\n elif np.ndim(data) == 2:\n d_rows = data.shape[0]\n d_cols = data.shape[1]\n else:\n raise ValueError(\"Incorrect dimensionality of data. Must be <= 2\")\n return data, d_rows, d_cols", "def test_get_non_existent_dimension(self):\n\n v = Vector({ })\n self.assertEqual(0, v.dimensions['x'])", "def test_reduce_dimensionality(base_bertopic, embeddings, shape):\n umap_embeddings = base_bertopic._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def enforce_2D(self,):\n for data in (self.data_obs,self.data_fcst):\n shp = data.shape\n if len(shp) == 2:\n pass \n elif len(shp) == 3:\n if shp[0] == 0:\n data = data[0,:,:]\n elif len(shp) == 4:\n if (shp[0] == 0) and (shp[1] == 0):\n data = data[0,0,:,:]\n else:\n raise FormatError(\"Data needs to be 2D.\")\n return", "def test_load_return_shape(size):\n X_train, X_test, y_train, y_test = src.load(train_size=size)\n num_samples = 1797\n assert X_train.shape == (int(num_samples * size), 64)\n assert X_test.shape == (int(num_samples * (1 - size)) + 1, 64)", "def test_nrows_gtiff_array(self):\n self.assertEqual(_test_array(landsat_gtiff).shape[1], 224)", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def is_dimensionless(value):\n ureg = get_unit_registry()\n\n # Ensure the provided value is in the right format\n value = ureg.Quantity(value)\n\n if value.units == ureg.dimensionless:\n return True\n\n if value.to_base_units().units == ureg.dimensionless:\n return True\n\n # At this point, the value is not dimensionless\n return False", "def compute_dims_from_values(self, x):\n return ((),)", "def _is_ragged_in_1st_dim_only(value: Union[np.ndarray, list]) -> bool:\n if isinstance(value, np.ndarray) and value.dtype != np.dtype(\"O\"):\n return False\n else:\n\n def extract_dims(v):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n s = np.shape(v)\n return s[0], s[1:]\n\n dim1, dim_other = zip(*map(extract_dims, value))\n return len(set(dim1)) > 1 and len(set(dim_other)) == 1", "def feature_dim(self):\n return feature_dim_from_test_system(self)", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def _verify_space(self) -> None:\n\n for dimension in self.space.values():\n\n if dimension.type != \"fidelity\" and dimension.prior_name not in [\n \"uniform\",\n \"reciprocal\",\n \"int_uniform\",\n \"int_reciprocal\",\n \"choices\",\n ]:\n raise ValueError(\n \"TPE now only supports uniform, loguniform, uniform discrete \"\n f\"and choices as prior: {dimension.prior_name}\"\n )\n\n shape = dimension.shape\n if shape and len(shape) != 1:\n raise ValueError(\"TPE now only supports 1D shape.\")", "def extract_features(self, preprocessed_inputs):\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n\n feature_map_layout = {\n 'from_layer': ['conv4', '', '', '', '', '', ''],\n 'layer_depth': [-1, 1024, 1024, 512, 256, 256, 256],\n }\n\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('vgg_16',\n reuse=self._reuse_weights) as scope:\n net, image_features = vgg.vgg_16_base(\n preprocessed_inputs,\n final_endpoint='pool5',\n trainable=False,\n scope=scope)\n feature_maps = feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=self._depth_multiplier,\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n image_features=image_features)\n\n return feature_maps.values()", "def check_dimension(dim, meta, trace=False):\n if dim == \"..\":\n meta[\"dimension\"] = declast.AssumedRank()\n meta[\"assumed-rank\"] = True\n else:\n meta[\"dimension\"] = declast.ExprParser(dim, trace=trace).dimension_shape()", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?", "def num_flat_features(self, x):\n\n size = x.size()[1:] # All dimensions except batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n\n return num_features", "def reshape_tensor3d(self, x):\n if self.dim_ordering == 'th':\n tx = K.reshape(x, (-1, self.nb_filter, self.cols * self.rows))\n else:\n tx = K.reshape(x, (-1, self.cols * self.rows, self.nb_filter))\n tx = K.transpose(tx, (0,2,1))\n if self.cov_mode == 'channel' or self.cov_mode =='mean' or self.cov_mode =='pmean':\n return tx\n else:\n return K.transpose(tx, (0,2,1))", "def graph_x_dimensionality(self) -> int:\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)" ]
[ "0.6943855", "0.6864585", "0.6622508", "0.6591821", "0.647431", "0.63964295", "0.6374549", "0.6134668", "0.6122223", "0.5947658", "0.583848", "0.57911134", "0.57625794", "0.565753", "0.56400055", "0.5597365", "0.5592346", "0.5584553", "0.55217546", "0.5503743", "0.54942316", "0.54282486", "0.54202354", "0.5416537", "0.54095894", "0.53979105", "0.5296823", "0.5296293", "0.5294601", "0.5280257", "0.5275774", "0.525948", "0.5255702", "0.5248666", "0.52479786", "0.52447927", "0.5239938", "0.5237693", "0.52326185", "0.5221572", "0.52167714", "0.51924205", "0.51915824", "0.51897764", "0.51884186", "0.5175526", "0.51725227", "0.5168001", "0.5152627", "0.5148296", "0.51481515", "0.51422393", "0.5140656", "0.5125904", "0.51190066", "0.51134723", "0.5109444", "0.5109009", "0.5108309", "0.51066744", "0.50996214", "0.50926805", "0.50794566", "0.5077341", "0.5074515", "0.5069468", "0.5066421", "0.5049463", "0.50479126", "0.50476456", "0.5038031", "0.5033236", "0.50250417", "0.50234425", "0.50041026", "0.5001055", "0.49932718", "0.49931994", "0.49879134", "0.49838728", "0.4982043", "0.49812627", "0.49782857", "0.49721366", "0.49623793", "0.49620947", "0.49536392", "0.49511153", "0.49468622", "0.49425", "0.49413726", "0.49376738", "0.49347225", "0.49206644", "0.4918907", "0.49180654", "0.4908697", "0.49080676", "0.4906908", "0.49063003" ]
0.7832379
0
Checks that X has coordinates named as specified by x_lat_dim, x_lon_dim, x_sample_dim, and x_feature_dim
def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X' assert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X' assert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X' assert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), \"XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), \"XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), \"XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), \"XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension\"", "def check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert type(X) == xr.DataArray, 'XCast requires a dataset to be of type \"Xarray.DataArray\"'", "def check_input_dimension(self, data):\n if len(data[0]) != self.input_dimension:\n raise ValueError(\"Received {} features, expected {}.\".format(self.input_dimension, len(data[0])))", "def is_x(self, var):\n x_list = ['lon', 'longitude', 'LONGITUDE', 'Longitude', 'x']\n\n if self.get_units(var) == 'degrees_east':\n return True\n if self.get_name(var) in x_list:\n return True\n if self.get_description(var) in x_list:\n return True\n else:\n return False", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert list(X.dims).index(x_lat_dim) == 0, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_lon_dim) == 1, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_sample_dim) == 2, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_feature_dim) == 3, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'", "def _check_input(self, X):\n symbols = np.concatenate(X)\n if len(symbols) == 1: # not enough data\n raise ValueError(\"expected at least 1 observation \"\n \"but none found.\")\n elif (symbols < 0).any(): # contains negative integers\n raise ValueError(\"expected non-negative features \"\n \"for each observation.\")\n elif X.shape[1] > 1: # contains to many features\n raise ValueError(\"expected only 1 feature but got {0} \"\n \"for each observation.\".format(X.shape[1]))\n else:\n return True", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def check_has_dims(hdr):\n try:\n return (hdr['startX'], hdr['startY'])\n except KeyError:\n return False", "def _validate_raster_attributes(self, x):\r\n if self.extent != x.extent:\r\n raise ValueError(\"Extents do not match.\")\r\n if self.resolution != x.resolution:\r\n raise ValueError(\"Resolutions do not match.\")\r\n if not np.array_equal(self.x, x.x):\r\n raise ValueError(\"x attributes do not match.\")\r\n if not np.array_equal(self.y, x.y):\r\n raise ValueError(\"y attributes do not match.\")\r\n if len(self.layers) != len(x.layers):\r\n raise ValueError(\"layers lengths do not match.\")\r\n if self.crs != x.crs:\r\n raise ValueError(\"crs attributes do not match.\")", "def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg", "def has_dims(xobj, dims, kind):\n if isinstance(dims, str):\n dims = [dims]\n\n if not all(dim in xobj.dims for dim in dims):\n raise DimensionError(\n f'Your {kind} object must contain the '\n f'following dimensions at the minimum: {dims}'\n )\n return True", "def check_inputs(x_unlabeled, x_labeled, y_labeled, y_true):\n if x_unlabeled is None:\n if x_labeled is None:\n raise Exception(\"No data, labeled or unlabeled, passed to check_inputs!\")\n x_unlabeled = x_labeled[0:0]\n if x_labeled is not None and y_labeled is not None:\n pass\n elif x_labeled is None and y_labeled is None:\n x_labeled = x_unlabeled[0:0]\n y_shape = y_true.get_shape()[1 : K.ndim(y_true)].as_list()\n y_labeled = np.empty([0] + y_shape)\n else:\n raise Exception(\"x_labeled and y_labeled must both be None or have a value\")\n return x_unlabeled, x_labeled, y_labeled", "def _sanity_check_datasource(ds):\n if len(ds) != 1:\n raise SanityCheckError('GeoJSON should have only 1 layer.')\n # TODO: add more checks", "def test_feature_values(iris, name, x_feature, y_feature, x_vals, y_vals):\n iris.x_feature = x_feature\n iris.y_feature = y_feature\n assert iris.title == \"{} x {}\".format(x_feature, y_feature)\n data = iris.sources[name].data\n np.testing.assert_array_almost_equal(data[\"x\"][:2], x_vals)\n np.testing.assert_array_almost_equal(data[\"y\"][:2], y_vals)", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def validate_xy(x_train, y_train):\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def set_spatial_dims(self, x_dim=None, y_dim=None) -> None:\n _dims = list(self._obj.dims)\n # Switch to lower case to compare to XDIMS and YDIMS\n _dimslow = [d.lower() for d in _dims]\n if x_dim is None:\n for dim in XDIMS:\n if dim in _dimslow:\n idim = _dimslow.index(dim)\n x_dim = _dims[idim]\n break\n if x_dim and x_dim in _dims:\n self.set_attrs(x_dim=x_dim)\n else:\n raise ValueError(\n \"x dimension not found. Use 'set_spatial_dims'\"\n + \" functions with correct x_dim argument provided.\"\n )\n\n if y_dim is None:\n for dim in YDIMS:\n if dim in _dimslow:\n idim = _dimslow.index(dim)\n y_dim = _dims[idim]\n break\n if y_dim and y_dim in _dims:\n self.set_attrs(y_dim=y_dim)\n else:\n raise ValueError(\n \"y dimension not found. Use 'set_spatial_dims'\"\n + \" functions with correct y_dim argument provided.\"\n )\n\n check_x = np.all(np.isclose(np.diff(np.diff(self._obj[x_dim])), 0, atol=1e-4))\n check_y = np.all(np.isclose(np.diff(np.diff(self._obj[y_dim])), 0, atol=1e-4))\n if check_x == False or check_y == False:\n raise ValueError(\"raster only applies to regular grids\")", "def xcoords(self) -> xr.IndexVariable:\n xcoords = self._obj[self.x_dim]\n if self.x_dim not in self._obj.coords:\n for key in list(self._obj.coords.keys()):\n if key.startswith(self.x_dim):\n xcoords = self._obj.coords[key]\n break\n if xcoords.ndim == 2 and list(xcoords.dims).index(self.x_dim) != 1:\n raise ValueError(\n \"Invalid raster: dimension order wrong. Fix using\"\n f'\".transpose(..., {self.y_dim}, {self.x_dim})\"'\n )\n if xcoords.size < 2 or (xcoords.ndim == 2 and xcoords.shape[1] < 2):\n raise ValueError(f\"Invalid raster: less than 2 cells in x_dim {self.x_dim}\")\n return xcoords", "def is_resolution_and_offset_str(x):\n if x.count('x') == 1 and x.count('+') == 2:\n return True\n return False", "def _check_data_point(cube, metadata):\n point_index = []\n\n for dim_length in cube.shape:\n point_index.append(int(random.random() * dim_length))\n\n point_index = tuple(point_index)\n\n try:\n point_cube = cube[point_index]\n _data_point = point_cube.data\n except Exception:\n msg = 'Unable to extract data point {} from file: {}'.format(\n point_index, metadata['basename'])\n raise FileValidationError(msg)\n else:\n return True", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def validate_data_format(\n Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], metric_names: List[str]\n) -> None:\n if len({len(Xs), len(Ys), len(Yvars), len(metric_names)}) > 1:\n raise ValueError( # pragma: no cover\n \"Lengths of Xs, Ys, Yvars, and metric_names must match. Your \"\n f\"inputs have lengths {len(Xs)}, {len(Ys)}, {len(Yvars)}, and \"\n f\"{len(metric_names)}, respectively.\"\n )", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def check_bounds(x, param_name):\n for i in range(len(x)):\n if ((xmin[param_name][i] is not None and x[i] < xmin[param_name][i]) or\n (xmax[param_name][i] is not None and x[i] > xmax[param_name][i])):\n return False\n return True", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def _check_no_nonzero_features(self,\n features: np.ndarray,\n dropped_idx: np.ndarray,\n axis: Optional[int] = None):\n if axis is not None:\n # Dropped node.\n features_of_dropped_nodes = np.take(features, dropped_idx, axis=axis)\n self.assertEqual(0, np.nonzero(features_of_dropped_nodes)[0].shape[0])\n self.assertEqual(0, np.nonzero(features_of_dropped_nodes)[1].shape[0])\n else:\n # Dropped edge.\n features_of_dropped_edge = features[dropped_idx[0], dropped_idx[1]]\n self.assertEqual(0, np.nonzero(features_of_dropped_edge)[0].shape[0])", "def check_feature(feature, expected_name='Adriatic Sea',\n expected_type='Polygon'):\n assert feature['properties']['name'] == expected_name\n assert feature['properties']['component'] == 'ocean'\n assert feature['geometry']['type'] == expected_type", "def check_metadata(layer_name, neuron_indices, ideal_activation,\n multiply_by_input):\n\n error_checking.assert_is_string(layer_name)\n error_checking.assert_is_integer_numpy_array(neuron_indices)\n error_checking.assert_is_geq_numpy_array(neuron_indices, 0)\n error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1)\n error_checking.assert_is_not_nan(ideal_activation)\n error_checking.assert_is_boolean(multiply_by_input)", "def getxsys(xy):\n X_train = xy[0]\n X_test = xy[1]\n y_train = xy[2]\n y_test = xy[3]\n return X_train, X_test, y_train, y_test # Outside the for loop to optimise processing", "def getxsys(xy):\n X_train = xy[0]\n X_test = xy[1]\n y_train = xy[2]\n y_test = xy[3]\n return X_train, X_test, y_train, y_test # Outside the for loop to optimise processing", "def _has_coordinates_and_gradient(self) -> bool:\n return self._coords is not None and self._coords.g is not None", "def has_datapoint_with_dim(fake_ingest, key, value):\n return has_datapoint_with_all_dims(fake_ingest, {key: value})", "def test_shapes(self):\n\n # Creates a raw layer\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop(\"feature_is_filtered\", axis=1, inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n\n # remove one gene\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Number of genes in X (3) is different than raw.X (4).\"],\n )", "def _validate_columns(data, ip_column, lat_column, long_column, other_columns):\n if not ip_column and not (lat_column and long_column):\n raise ValueError(\n \"Data must have either an IpAddress ('ip_column')\",\n \"or latitude ('lat_column') and longitude ('long_column')\",\n )\n param_cols: List[str] = []\n for param in other_columns:\n if not param:\n continue\n if isinstance(param, list):\n param_cols.extend(param)\n else:\n param_cols.append(param)\n missing_columns = {col for col in param_cols if col not in data.columns}\n if missing_columns:\n raise LookupError(\n \"The following columns are not in the supplied DataFrame\",\n \",\".join(f\"'{col}'\" for col in missing_columns),\n )", "def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_sanity_check (self):\n X, Y = self.dm.get_data(std=True, lag_indicator=True)\n\n # Ensure number of rows between what we expect.\n row_bound = (800, 1000)\n actual_rows = X.shape[0]\n msg = 'Number of rows not within expected bounds.'\n self.assertTrue(row_bound[0] < actual_rows < row_bound[1], msg)\n\n msg = 'X and Y have different number of rows.'\n self.assertEqual(X.shape[0], Y.shape[0], msg)\n\n # Ensure X columns match.\n expected_x_cols = ['SP500', 'ltc_px_std', 'xrp_px_std', 'xlm_px_std',\n 'eth_px_std', 'btc_px_std', 'ltc_volume_std',\n 'xrp_volume_std', 'xlm_volume_std', 'eth_volume_std',\n 'btc_volume_std', 'lagged_others']\n actual_x_cols = X.columns.tolist()\n msg = 'Number of X columns different than expected.'\n self.assertEqual(len(actual_x_cols), len(expected_x_cols), msg)\n\n for col in expected_x_cols:\n msg = 'Expected column not found: {}'.format(col)\n self.assertTrue(col in actual_x_cols, msg)", "def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True", "def _check_shape(input_shape):\n msg = ('Input to SpatioTemporalExpansion must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def contains(self, x):\n # need more to assure its a real SSP - ie on right torus\n return (len(x) == self._shape[0])", "def _validate_X_predict(self, X):\n # X = check_array(X, ensure_2d=False)\n X = np.atleast_2d(X)\n n_features = X.shape[1]\n if self.n_features_in_ != n_features:\n raise ValueError(\n f\"Number of features of the model must match the input. Model n_features_in_ is {self.n_features_in_} and input n_features is {n_features}. Reshape your data.\"\n )", "def has_x(self):\n return any(map(lambda s: s.is_x, self))", "def test_sample_from_extra_bounds_good(self):\n dim = Real(\"yolo\", \"norm\", 0, 2, low=-5, high=+5, shape=(4, 4))\n for _ in range(8):\n samples = dim.sample(8)\n for sample in samples:\n assert sample in dim", "def is_dimension_error(self):\n return self._tag == 'dimension_error'", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def __init__(self, x_dimname='lon', y_dimname='lat', time_dimname='time'):\n self.x_dimname = x_dimname\n self.y_dimname = y_dimname\n self.time_dimname = time_dimname", "def is_geo_sample(sample):\n biosample_node = ET.fromstring(sample)\n ids = biosample_node.find('Ids')\n if ids is not None:\n for id in ids:\n\n db = id.get('db')\n if db == 'GEO':\n return True\n return False", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert ((X == 0) | (X == 1)).all(), \"x should be equal to 0 or 1.\"", "def validate_source(features):\n click.echo(f\"Validating features\", err=True)\n\n for feature in features:\n utils.validate_geojson(feature)\n\n click.echo(\"✔ valid\")", "def prepare_input_data(self, X):\n X = np.asarray(X)\n if X.dtype != \"f\" and X.dtype != \"d\":\n X = X.astype(float)\n\n self._check_input(X)\n missing_mask = np.isnan(X)\n self._check_missing_value_mask(missing_mask)\n return X, missing_mask", "def _check_variables(datasets, necessary_short_names):\n dataset_name = datasets[0]['dataset']\n necessary_short_names = set(necessary_short_names)\n short_names = set(group_metadata(datasets, 'short_name').keys())\n if short_names != necessary_short_names:\n raise ValueError(\n f\"Expected variables {necessary_short_names} for dataset \"\n f\"'{dataset_name}', got {short_names}\")", "def load_x(self):\n self.x = self.read_var(self.xvar)\n self.test_shape(self.xvar, self.x.shape, 1)", "def test_feature_format(X):\r\n print(\"test_feature_format()...\", end = \"\")\r\n for row in range(len(X)):\r\n for col in range(len(X[0])):\r\n assert (isinstance(X[row][col], float) == True)\r\n print(\"Passed!\")", "def check_1d(x, name):\n\n x = asarray(x)\n if size(x) == 1:\n x = asarray([x])\n if x.ndim == 2:\n raise Exception(\"Property: %s must be one-dimensional\" % name)\n x = x.flatten()\n\n return x", "def is_dataset(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = [\"data_vars\", \"coords\", \"dims\", \"to_array\"]\n\n return all([hasattr(X, name) for name in require_attrs])", "def ispoint(x):\n if isvect(x) and x[3] > 0.0:\n return True\n return False", "def _validate_input_data(self):\n\n if type(self.data) in [np.ndarray, da.Array]:\n if not self.data.dtype.names:\n raise ValueError('QuadTree: numpy array provided for data, but no names were found, array must be a structured array')\n if 'x' not in self.data.dtype.names or 'y' not in self.data.dtype.names:\n raise ValueError('QuadTree: numpy structured array provided for data, but \"x\" or \"y\" not found in variable names')\n self.layernames = [self.rev_layer_lookup[var] for var in self.data.dtype.names if var in ['z', 'tvu']]\n elif type(self.data) == xr.Dataset:\n if 'x' not in self.data:\n raise ValueError('QuadTree: xarray Dataset provided for data, but \"x\" or \"y\" not found in variable names')\n if len(self.data.dims) > 1:\n raise ValueError('QuadTree: xarray Dataset provided for data, but found multiple dimensions, must be one dimensional: {}'.format(self.data.dims))\n self.layernames = [self.rev_layer_lookup[var] for var in self.data if var in ['z', 'tvu']]\n self._convert_dataset() # internally we just convert xarray dataset to numpy for ease of use\n else:\n raise ValueError('QuadTree: numpy structured array or dask array with \"x\" and \"y\" as variable must be provided')", "def test_contains(self):\n dim = Fidelity(\"epoch\", 1, 10)\n\n assert 0 not in dim\n assert 1 in dim\n assert 5 in dim\n assert 10 in dim\n assert 20 not in dim", "def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")", "def x_dim(self) -> str:\n if self.get_attrs(\"x_dim\") not in self._obj.dims:\n self.set_spatial_dims()\n return self.attrs[\"x_dim\"]", "def test_contains_extra_bounds(self):\n dim = Real(\"yolo\", \"norm\", 0, 3, low=-3, high=+3)\n assert dists.uniform.rvs(-3, 3) in dim\n assert -4 not in dim\n assert +4 not in dim\n assert (1, 2) not in dim", "def validate_in(self, xcoord, ycoord):\r\n x = int(xcoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n y = int(ycoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n if not self.tr.turn_tracker and self.tr.bd.disks[x][y].halo_tag:\r\n return True, x, y\r\n else:\r\n return False, x, y", "def test_check_x(self):\n r1 = Rectangle(10, 2)\n self.assertEqual(r1.x, 0)\n\n r2 = Rectangle(2, 10, 6)\n self.assertEqual(r2.x, 6)\n\n r3 = Rectangle(5, 2, 3, 9, 12)\n self.assertEqual(r3.x, 3)\n\n r4 = Rectangle(5, 2, 0, 3, 12)\n self.assertEqual(r4.x, 0)", "def test_contains(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 6)\n\n assert 0.1 not in dim\n assert (0.1, -0.2) not in dim\n assert 0 in dim\n assert (1, 2) not in dim\n assert 6 not in dim\n assert -3 in dim\n assert -4 not in dim", "def check_consistent_X(self, X):\n # X must be ndarray-type\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n\n return X", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def contains_xy(geom, x, y=None, **kwargs):\n if y is None:\n coords = np.asarray(x)\n x, y = coords[:, 0], coords[:, 1]\n return lib.contains_xy(geom, x, y, **kwargs)", "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def param_scale_check(shape_x, shape_scale):\n\n length_x = len(shape_x)\n length_scale = len(shape_scale)\n\n if not(length_scale == 1 and shape_scale[0] == 1):\n if length_x != length_scale:\n raise RuntimeError(\n \"length_x and length_scale must be equal\")\n for i in range(length_scale):\n if shape_scale[i] != shape_x[i] and shape_scale[i] != 1:\n raise RuntimeError(\n \"shape_scale is not match to broadcast\")", "def test_columns_not_in_X_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=[\"a\", \"z\"])\n\n with pytest.raises(ValueError):\n\n x.columns_check(X=df)", "def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data", "def test_coords():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n\n return x, y", "def test_x(self):\n g = gca()\n lines = g.get_lines() \n self.assertEquals(lines[0].get_xdata().tolist(), [3, 5, 5, 3, 3])", "def _shape_check(self, X, y):\n if not len(y.shape) > 1:\n raise RuntimeError(\"The shape of y is incorrect.\")\n if y.shape != X.shape[:-1]:\n raise RuntimeError(\"X and y must have the same number of \" +\n \"samples and microstructure shape.\")\n if X.shape[-1] != 3:\n raise RuntimeError(\"X must have 3 continuous local states \" +\n \"(euler angles)\")", "def testgeovalues_atomcoords(self):\r\n count_geovalues = len(self.data.geovalues)\r\n count_coords = len(self.data.atomcoords) - self.extracoords\r\n msg = f\"len(atomcoords) is {int(count_coords)} but len(geovalues) is {int(count_geovalues)}\"\r\n assert count_geovalues == count_coords, msg", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X > 0).all() & isinteger(X), \"x should be greater then 0 and integer.\"", "def valid_coordinate(self,row,column):\r\n if row >= 0 and row < len(self.wordsearch):\r\n if column >= 0 and column < len(self.wordsearch[0]):\r\n return True\r\n return False", "def test_get_coord_by_attr_valid():\n pass", "def test_point_with_zero_value_is_good():\n point = np.array([0, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def check_n_features(n_features, cols):\n\n if n_features is None:\n raise Exception(\"n_features must be specified.\")\n\n if n_features <= 0:\n raise Exception(\"n_features cannot be 0 or a negative number.\")\n\n if n_features > 1 and not isinstance(n_features, int):\n raise Exception(\"n_features must be integer if > 1.\")\n\n if n_features > cols:\n raise Exception(\"n_features must be less than columns in X.\")\n\n return", "def testgeotargets(self):\r\n dim_geotargets = self.data.geotargets.shape\r\n dim_geovalues = (len(self.data.geovalues[0]), )\r\n assert dim_geotargets == dim_geovalues", "def test_valid_tensor_op_name_inputs(self, data, description):\n name_a, name_b = data\n self.assertEqual(name_a, name_b, msg=description)", "def validate_coords(coords):\n coords = list(coords)\n if coords != sorted(coords):\n raise ValueError(\"The argument 'coords' must be sorted.\")\n if any(c not in \"xyz\" for c in coords):\n raise ValueError(\"The argument 'coords' may only contain \" \"'x', 'y', or 'z'.\")\n return coords", "def _check_cell_measure_point(cfreader, metadata):\n cell_measure = None\n\n for cf_group in cfreader.cf_group.values():\n if cf_group.cf_name == metadata['cmor_name']:\n cell_measure = cf_group\n break\n\n if not cell_measure:\n msg = ('Unable to find cell measure in cfreader for variable {}'.\n format(metadata['cmor_name']))\n raise FileValidationError(msg)\n\n point_index = []\n for dim_length in cell_measure.cf_data.shape:\n point_index.append(random.randint(0, dim_length - 1))\n\n try:\n _data_point = cell_measure.cf_data[tuple(point_index)]\n except Exception:\n msg = 'Unable to extract data point {} from file: {}'.format(\n point_index, metadata['basename'])\n raise FileValidationError(msg)\n else:\n return True", "def test_point_positive_init_x_keyword_argument(self):\n p = Point(x=4)\n self.assertEqual(p.x, 4.0, 'Test of Point(x=4).x failed. Returned value != 4.0')\n self.assertEqual(p.y, 0.0, 'Test of Point(x=4).y failed. Returned value != 0.0')", "def is_coord_empty(self, data):\n check = False\n if data[\"topic\"] in DRONE_POS_TOPICS:\n check = self.drone.check_if_pos(data[\"coord\"])\n elif data[\"topic\"] in DRONE_VEL_TOPICS:\n check = self.drone.check_if_vel(data[\"coord\"])\n elif data[\"topic\"] in DRONE_ACC_TOPICS:\n check = self.drone.check_if_acc(data[\"coord\"])\n elif data[\"topic\"] in SUBJECT_TOPICS:\n check = self.subject.check_if_pos(data[\"coord\"])\n elif data[\"topic\"] in self.PEDESTRIAN_TOPICS:\n check = self.peds[data[\"pid\"]].check_if_pos(data[\"coord\"])\n return check", "def check_data_type_column_data(X):\n if type(X) is not numpy.ndarray:\n raise TypeError(\"X should be type numpy.ndarray\")\n\n if len(X.shape) == 2 and X.shape[1] > 1:\n raise TypeError(\"X should have a single column.\")", "def _check_features_df(df, features):\n # check columns\n if not set(features).issubset(df.columns):\n raise ValueError(\"The dataframe does not seem to have the right \"\n \"features. {0} instead of {1}\"\n .format(df.columns, features))\n\n return", "def _check_parameters(self, X):\n _, n_features = X.shape\n\n if self.weights_init is not None:\n self.weights_init = _check_weights(self.weights_init,\n self.n_components)", "def _validate_X(X):\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X should be a dataframe.\")", "def _verify_input_args(\n spatial_dimension: Optional[Union[str, int]] = None,\n tag: Optional[str] = None,\n tabulate: bool = True,\n) -> None:\n # --- Parse 'spatial_dimension'\n if not isinstance(spatial_dimension, (int, str, type(None))):\n raise TypeError(\n f\"Invalid type for spatial dimension! \"\n f\"Expected either an integer or a string. \"\n f\"Got instead {type(spatial_dimension)}.\"\n )\n if spatial_dimension is not None and isinstance(spatial_dimension, str):\n if spatial_dimension.lower() != \"m\":\n raise ValueError(\n f\"Invalid value ({spatial_dimension}) for spatial dimension! \"\n f\"Either a positive integer or 'M' to indicate \"\n f\"a variable-dimension test function.\"\n )\n if spatial_dimension is not None and isinstance(spatial_dimension, int):\n if spatial_dimension <= 0:\n raise ValueError(\n f\"Invalid value ({spatial_dimension}) for spatial dimension! \"\n f\"Either a positive integer or 'M' to indicate \"\n f\"a variable-dimension test function.\"\n )\n\n # --- Parse 'tag'\n if not isinstance(tag, (str, type(None))):\n raise TypeError(f\"Tag argument must be of str type! Got {type(tag)}.\")\n if tag is not None and tag not in SUPPORTED_TAGS:\n raise ValueError(\n f\"Tag {tag!r} is not supported. Use one of {SUPPORTED_TAGS}!\"\n )\n\n # --- Parse 'tabulate'\n if not isinstance(tabulate, (bool, type(None))):\n raise TypeError(\n f\"'tabulate' argument must be of bool type! Got {type(tabulate)}.\"\n )", "def is_valid_input(geometry, **kwargs):\n return lib.is_valid_input(geometry, **kwargs)", "def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1" ]
[ "0.77260077", "0.7620336", "0.7221872", "0.6576378", "0.6310555", "0.6308388", "0.6186717", "0.608041", "0.58673334", "0.5852697", "0.5782202", "0.5687635", "0.5607326", "0.5584896", "0.55696183", "0.5523855", "0.5504535", "0.5483882", "0.54531854", "0.54332227", "0.5432066", "0.5403734", "0.53723615", "0.5370909", "0.5358261", "0.53399915", "0.5332978", "0.53298247", "0.5316847", "0.52930784", "0.529001", "0.52843463", "0.5247847", "0.5237847", "0.5234042", "0.5234042", "0.52138144", "0.5204255", "0.5199316", "0.51989746", "0.5196009", "0.5186233", "0.51845354", "0.5180493", "0.51676124", "0.5150306", "0.5127444", "0.51255834", "0.512243", "0.5111505", "0.51067054", "0.5101084", "0.50946504", "0.50675005", "0.5061853", "0.50608337", "0.5050874", "0.50421524", "0.50404215", "0.5038578", "0.50360274", "0.50323987", "0.5021024", "0.50189173", "0.5016522", "0.5012769", "0.5007064", "0.5002667", "0.49919707", "0.4983434", "0.4971311", "0.49709576", "0.496708", "0.49653265", "0.4954432", "0.49530554", "0.49510074", "0.49505723", "0.49484512", "0.4947422", "0.49395066", "0.49277183", "0.49274585", "0.4927383", "0.49229744", "0.49138728", "0.49090374", "0.4907938", "0.49030346", "0.4901253", "0.48955446", "0.48914608", "0.4884208", "0.48780945", "0.4877585", "0.4873994", "0.48729825", "0.48703194", "0.48686677", "0.4868499" ]
0.8039974
0
Checks that X's Coordinates are the same length as X's Dimensions
def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), "XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension" assert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), "XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension" assert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), "XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension" assert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), "XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X'\n\tassert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X'\n\tassert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X'\n\tassert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'", "def check_size(self,x,y):\n assert(x <= 10**3), 'Width larger than 1000' \n assert(y <= 10**3), 'Height larger than 1000' \n assert(x*y <= 3*(10**5)), 'Resolution larger than 300000'", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def verify_coords(self, piece_coords):\n if piece_coords[0] >= self.size or piece_coords[0] < 0:\n return False\n if piece_coords[1] >= self.size or piece_coords[1] < 0:\n return False\n return True", "def validate_dimensions(self, dimensions):\n\n #safety checking\n if len(dimensions) != self.dimensionality:\n raise ValueError(f\"The number of dimensions provided {len(dimensions)}\"\n f\"do not match that of this coordinate system \"\n f\"{self.dimensionality}.\")\n\n if not all(isinstance(elem, int) for elem in dimensions):\n raise ValueError(f\"Not all dimensions are ints {dimensions}\")\n\n if not all(elem > 0 for elem in dimensions):\n raise ValueError(f\"Dimensions must be greater than 1 {dimensions}\")\n\n if not checkallequal(dimensions):\n raise ValueError(f\"Not all dimensions are equal {dimensions}. They \"\n f\"must be equal. This will be changed in a future version\")", "def __len__(self):\n\t\treturn len(self._coords)", "def test_point_within_dimensions_invalid_sizes():\n point = np.array([20, 20, 20])\n image_dimensions = np.array([100, 100])\n\n with pytest.raises(AssertionError):\n assert not point_within_dimensions(point, image_dimensions)\n\n point = np.array([20, 20])\n image_dimensions = np.array([100, 100, 100])\n\n with pytest.raises(AssertionError):\n assert not point_within_dimensions(point, image_dimensions)", "def __len__(self):\n return len(self._coords)", "def _shape_check(self, X, y):\n if not len(y.shape) > 1:\n raise RuntimeError(\"The shape of y is incorrect.\")\n if y.shape != X.shape[:-1]:\n raise RuntimeError(\"X and y must have the same number of \" +\n \"samples and microstructure shape.\")\n if X.shape[-1] != 3:\n raise RuntimeError(\"X must have 3 continuous local states \" +\n \"(euler angles)\")", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def testgeovalues_atomcoords(self):\r\n count_geovalues = len(self.data.geovalues)\r\n count_coords = len(self.data.atomcoords) - self.extracoords\r\n msg = f\"len(atomcoords) is {int(count_coords)} but len(geovalues) is {int(count_geovalues)}\"\r\n assert count_geovalues == count_coords, msg", "def _check_inputlengths(self):\n # Check x and y have more than 1 item, and x and y are equal length\n if not len(self.x) > 1:\n raise ValueError(\"Route input 'x' must contain more than 1 item\")\n\n if not (len(self.y) > 1):\n raise ValueError(\"Route input 'y' must contain more than 1 item\")\n\n if not (len(self.x) == len(self.y)):\n raise ValueError(\"Route inputs 'x' and 'y' must be of equal length\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (len(v) == len(self.x)):\n raise ValueError(\"Route input 'z' must be of equal length to 'x' and 'y'\")", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def _validate_raster_attributes(self, x):\r\n if self.extent != x.extent:\r\n raise ValueError(\"Extents do not match.\")\r\n if self.resolution != x.resolution:\r\n raise ValueError(\"Resolutions do not match.\")\r\n if not np.array_equal(self.x, x.x):\r\n raise ValueError(\"x attributes do not match.\")\r\n if not np.array_equal(self.y, x.y):\r\n raise ValueError(\"y attributes do not match.\")\r\n if len(self.layers) != len(x.layers):\r\n raise ValueError(\"layers lengths do not match.\")\r\n if self.crs != x.crs:\r\n raise ValueError(\"crs attributes do not match.\")", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def test_check_x(self):\n r1 = Rectangle(10, 2)\n self.assertEqual(r1.x, 0)\n\n r2 = Rectangle(2, 10, 6)\n self.assertEqual(r2.x, 6)\n\n r3 = Rectangle(5, 2, 3, 9, 12)\n self.assertEqual(r3.x, 3)\n\n r4 = Rectangle(5, 2, 0, 3, 12)\n self.assertEqual(r4.x, 0)", "def isValidCoord(coord, size):\n return coord[0] >= 0 and coord[0] < size and \\\n coord[1] >= 0 and coord[1] < size", "def issquare(self):\r\n if self.width == self.height:\r\n return True\r\n else:\r\n return False", "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)", "def check_dim(gr, DIM):\n l = len(gr)\n if(l != DIM):\n return False\n\n for i in range(0, DIM):\n if(len(gr[i]) != l):\n return False \n return True", "def _verify_integrity(self):\n if len(self.data.shape) != 1:\n raise ValueError(\n \"Data array must be one dimensional \"\n \"(is {})\".format(len(self.data.shape))\n )\n\n if len(self.shape.shape) != 2:\n raise ValueError(\n \"Shape array must be two dimensional \"\n \"(is {})\".format(len(self.shape.shape))\n )\n\n shape_size, data_size = self._cumsum[-1], self.data.size\n\n if not shape_size == data_size:\n raise ValueError(\n \"Size of data ({data_size}) does not match that \"\n \"of the given shapes ({shape_size}).\".format(\n data_size=data_size, shape_size=shape_size\n )\n )", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def check_consistent_X(self, X):\n # X must be ndarray-type\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n\n return X", "def test_shape_fail():\n lons, lats = np.arange(10), np.arange(10).reshape(5, 2)\n emsg = \"Require longitudes and latitudes with same shape\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats)", "def test_point_within_dimensions_border():\n point = np.array([100, 20])\n image_dimensions = np.array([100, 100])\n assert not point_within_dimensions(point, image_dimensions)", "def test_check_width(self):\n r1 = Rectangle(10, 2)\n self.assertEqual(r1.width, 10)\n\n r2 = Rectangle(2, 10)\n self.assertEqual(r2.width, 2)\n\n r3 = Rectangle(5, 2, 0, 0, 12)\n self.assertEqual(r3.width, 5)", "def is_square(self):\n lines, columns = self.get_size()\n return lines == columns", "def _acceptable_dimensions(self, box):\n return self._min_width < box.x1-box.x0 < self._max_width and\\\n self._min_height < box.y1-box.y0 < self._max_height", "def validate_xy(x_train, y_train):\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')", "def check_x_and_y_axis_len(self, x_axis, y_axis):\n if x_axis ==0: \n raise ValueError(\"Error! SOM X-Axis is 0!\")\n if y_axis==0:\n raise ValueError(\"Error! SOM Y-Axis is 0!\")", "def is_square(self):\n return self.shape[0] == self.shape[1]", "def is_square(self):\n return self.shape[0] == self.shape[1]", "def check_lengths(self, length: Expr) -> bool:\n for point1 in self.points:\n for point2 in self.points - {point1}:\n if abs(point2 - point1) == length:\n print(f'Length {length} found between points: {point1} and {point2}')\n return True\n return False", "def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg", "def validate_coords(coords, delta):\n global KEYPAD\n coord_x, coord_y = coords\n delta_x, delta_y = delta\n if any([(coord_x + delta_x) > 2,\n (coord_y + delta_y) > 2]):\n return False\n return True", "def contains(self, x):\n # need more to assure its a real SSP - ie on right torus\n return (len(x) == self._shape[0])", "def _check_xy(x: DataFrame, y: DataFrame) -> None:\n if x.shape[1] != y.shape[1]:\n raise ValueError(\n \"not compatible:\\n\"\n f\"- different number of columns: {x.shape[1]} vs {y.shape[1]}\"\n )\n\n in_y_not_x = setdiff(\n y.columns, x.columns, __calling_env=CallingEnvs.REGULAR\n )\n in_x_not_y = setdiff(\n x.columns, y.columns, __calling_env=CallingEnvs.REGULAR\n )\n if in_y_not_x or in_x_not_y:\n msg = [\"not compatible:\"]\n if in_y_not_x:\n msg.append(f\"- Cols in `y` but not `x`: {in_y_not_x}.\")\n if in_x_not_y:\n msg.append(f\"- Cols in `x` but not `y`: {in_x_not_y}.\")\n raise ValueError(\"\\n\".join(msg))", "def testatomcoords(self):\r\n natom = len(self.data.atomcoords[0])\r\n ref = self.data.natom\r\n msg = f\"natom is {int(ref)} but len(atomcoords[0]) is {int(natom)}\"\r\n assert natom == ref, msg", "def validate_dimensions(self, dimensions):\n #safety checking\n if len(dimensions) != self.dimensionality:\n raise ValueError(f\"The number of dimensions provided {len(dimensions)}\"\n f\"do not match that of this coordinate system \"\n f\"{self.dimensionality}.\")\n\n if not all(isinstance(elem, int) for elem in dimensions):\n raise ValueError(f\"Not all dimensions are ints {dimensions}\")", "def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))", "def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))", "def test_patch_grid_dimensions_are_consistent(self):\n with self.assertRaises(AssertionError):\n PatchGrid((1,), (1., 2.), (1., 2.))\n with self.assertRaises(AssertionError):\n PatchGrid((1, 2), (1., 2.), (1.,))", "def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")", "def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )", "def has_dims(xobj, dims, kind):\n if isinstance(dims, str):\n dims = [dims]\n\n if not all(dim in xobj.dims for dim in dims):\n raise DimensionError(\n f'Your {kind} object must contain the '\n f'following dimensions at the minimum: {dims}'\n )\n return True", "def is_valid_size(self, dot_width, dot_height, distance, screen_width, screen_height):\n if dot_width * distance > screen_width or dot_height * distance > screen_height:\n return False\n return True", "def _assert_valid(self, y: int, x: int) -> None:\n if not (0 <= y < self.size[0] and 0 <= x < self.size[1]):\n raise ValueError('Coordinates out of image boundary, {}'.format(self.size))", "def _position_validity_checker(position, start, n_elements):\n _assert_shape(position, (MaxDimension.value(), n_elements + 1), \"position\")\n\n # Check if the start position of the rod and first entry of position array are the same\n assert_allclose(\n position[..., 0],\n start,\n atol=Tolerance.atol(),\n err_msg=str(\n \"First entry of position\" + \" (\" + str(position[..., 0]) + \" ) \"\n \" is different than start \" + \" (\" + str(start) + \" ) \"\n ),\n )", "def _check_dimensions(self, a, b):\n units_a = self._get_units(a)\n units_b = self._get_units(b)\n dim_a = units_a.dimensions\n dim_b = units_b.dimensions\n if dim_a != dim_b:\n raise UnitConversionError(units_a, dim_a, units_b, dim_b)", "def _validate_elem_length(max_num_levels, elems_flat, axis):\n assertions = []\n\n elem_length = ps.shape(elems_flat[0])[axis]\n\n # The default size limit will overflow a 32-bit int, so make sure we're\n # using 64-bit.\n size_limit = 2**(ps.cast(max_num_levels, np.int64) + 1)\n enough_levels = ps.less(ps.cast(elem_length, np.int64), size_limit)\n enough_levels_ = tf.get_static_value(enough_levels)\n if enough_levels_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n enough_levels, True,\n message='Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis=={}`.'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit)))\n elif not enough_levels_:\n raise ValueError(\n 'Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis == {}`'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit))\n\n is_consistent = ps.reduce_all([ps.equal(ps.shape(elem)[axis], elem_length)\n for elem in elems_flat[1:]])\n\n is_consistent_ = tf.get_static_value(is_consistent)\n if is_consistent_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n is_consistent, True,\n message='Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat])))\n elif not is_consistent_:\n raise ValueError(\n 'Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat]))\n return elem_length, assertions", "def __len__(self):\n return len(self.X)", "def Check(self):\n return self.x, self.y, self.length, self.index", "def is_square (self):\n return self.width == self.height", "def check_input_dimension(self, data):\n if len(data[0]) != self.input_dimension:\n raise ValueError(\"Received {} features, expected {}.\".format(self.input_dimension, len(data[0])))", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def __len__(self):\n return len(self.__x)", "def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly", "def square_check(self):\n return len(self.matrix) == len(self.matrix[0])", "def check_has_dims(hdr):\n try:\n return (hdr['startX'], hdr['startY'])\n except KeyError:\n return False", "def check_image_size(self, x):\n _, _, h, w = x.size()\n mod_pad_h = (self.window_size -\n h % self.window_size) % self.window_size\n mod_pad_w = (self.window_size -\n w % self.window_size) % self.window_size\n x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')\n return x", "def __len__(self):\n return self.xyz.shape[0]", "def __len__(self):\n return self.xyz.shape[0]", "def test_has_correct_length(self) -> None:\n assert len(list(self._dataset)) == 7168", "def check_qim_dim_match(cls, qim, dim):\n return len(qim) == len(dim)", "def test_ndim_fail():\n lons = lats = np.array([0]).reshape(-1, 1, 1, 1)\n emsg = \"Require at most 3-D\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats)", "def xdim(self):\n return len(self._x)", "def check_coordinates(X, Y):\n\n # Accounting for elliptical Jupiter disk\n Y *= 1.071374\n\n return sqrt(X ** 2 + Y ** 2)", "def check_evaluation_points(x, y):\n assert x.ndim == y.ndim == 1\n assert x.shape == y.shape\n assert x.dtype == y.dtype == np.float64", "def _verify_data(inputs, targets):\n check_value_type('inputs', inputs, Tensor)\n if len(inputs.shape) != 4:\n raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')\n check_value_type('targets', targets, (Tensor, int, tuple, list))\n if isinstance(targets, Tensor):\n if len(targets.shape) > 2:\n raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '\n 'But got {}D.'.format(len(targets.shape)))\n if targets.shape and len(targets) != len(inputs):\n raise ValueError(\n 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(\n len(inputs), len(targets)))", "def has_dyadic_length(x):\n n = x.shape[0]\n j = dyadic_length(x)\n return n == 2**j", "def dims(x):\n return len(x.shape)", "def _check_size(size):\r\n\r\n if not isinstance(size, (list, tuple)):\r\n raise ValueError(\"Size must be a tuple\")\r\n if len(size) != 2:\r\n raise ValueError(\"Size must be a tuple of length 2\")\r\n if size[0] < 0 or size[1] < 0:\r\n raise ValueError(\"Width and height must be >= 0\")\r\n\r\n return True", "def _check_ensembles_are_same_size(p, q):\n if p.npdf != q.npdf:\n raise ValueError(\"Input ensembles should have the same number of distributions\")", "def validate_in(self, xcoord, ycoord):\r\n x = int(xcoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n y = int(ycoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n if not self.tr.turn_tracker and self.tr.bd.disks[x][y].halo_tag:\r\n return True, x, y\r\n else:\r\n return False, x, y", "def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))", "def validate_X_y(X: List[str], y: List[Any]):\n if len(X) != len(y):\n raise ValueError(\n f\"X and y must have the same length; X has length {len(X)}, and y has length {len(y)}\"\n )", "def test_sanity_check (self):\n X, Y = self.dm.get_data(std=True, lag_indicator=True)\n\n # Ensure number of rows between what we expect.\n row_bound = (800, 1000)\n actual_rows = X.shape[0]\n msg = 'Number of rows not within expected bounds.'\n self.assertTrue(row_bound[0] < actual_rows < row_bound[1], msg)\n\n msg = 'X and Y have different number of rows.'\n self.assertEqual(X.shape[0], Y.shape[0], msg)\n\n # Ensure X columns match.\n expected_x_cols = ['SP500', 'ltc_px_std', 'xrp_px_std', 'xlm_px_std',\n 'eth_px_std', 'btc_px_std', 'ltc_volume_std',\n 'xrp_volume_std', 'xlm_volume_std', 'eth_volume_std',\n 'btc_volume_std', 'lagged_others']\n actual_x_cols = X.columns.tolist()\n msg = 'Number of X columns different than expected.'\n self.assertEqual(len(actual_x_cols), len(expected_x_cols), msg)\n\n for col in expected_x_cols:\n msg = 'Expected column not found: {}'.format(col)\n self.assertTrue(col in actual_x_cols, msg)", "def check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert type(X) == xr.DataArray, 'XCast requires a dataset to be of type \"Xarray.DataArray\"'", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def __check_2d_and_reshape(X):\n if len(X.shape) == 1:\n X = np.reshape(X, (-1, X.shape[0]))\n return X", "def get_dimensions(self):\n return self.lon_arr.shape", "def test_convolve_input_dim_check(self, case, fn, x_shape, y_shape):\n x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)\n\n message = [\n \"The operands must be the same dimension\",\n \"Leading dimensions of x and y are not broadcastable\",\n ][case]\n with self.assertRaisesRegex(ValueError, message):\n fn(x, y)", "def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def test_shapes(self):\n\n # Creates a raw layer\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop(\"feature_is_filtered\", axis=1, inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n\n # remove one gene\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Number of genes in X (3) is different than raw.X (4).\"],\n )", "def verify_x_data(self, x_data):\n testx = deepcopy(x_data).flatten()\n xmin, xmax = self.print_verification_report(testx, 'X_data')\n\n if xmax > CLIP_VALUE or xmin < -CLIP_VALUE:\n n_clipped_elements = np.sum(CLIP_VALUE < np.abs(testx))\n n_elements = len(testx)\n x_data = np.clip(x_data, a_min=-CLIP_VALUE, a_max=CLIP_VALUE)\n logger.warning(\"Large inputs detected: clip values exceeding {}\".format(CLIP_VALUE))\n logger.info(\"{} of {} elements were clipped.\".format(n_clipped_elements, n_elements))\n\n return x_data", "def validatePosition(boardsize, pos):\n return pos.x in range(0, boardsize) and pos.y in range(0,boardsize)", "def have_same_shapes(array1, array2):\n return array1.shape == array2.shape", "def check_place(self, positions):\n return self.size == len(set(positions[i] + i for i in range(self.size))) == len(\n set(positions[i] - i for i in range(self.size)))" ]
[ "0.7469548", "0.7054452", "0.7046964", "0.6944492", "0.66742504", "0.6647212", "0.6620178", "0.6618185", "0.6572067", "0.6569208", "0.65156883", "0.6446175", "0.6434718", "0.64269143", "0.64210254", "0.6420011", "0.64147735", "0.6348928", "0.6343616", "0.6322013", "0.63205206", "0.6301553", "0.6300499", "0.62992257", "0.62875843", "0.6274657", "0.62700593", "0.6265241", "0.62646836", "0.6264012", "0.62548816", "0.62459934", "0.6243415", "0.62279415", "0.622718", "0.6221947", "0.62156767", "0.620166", "0.6173698", "0.6169092", "0.6166721", "0.614912", "0.614912", "0.61404926", "0.61323744", "0.6124416", "0.6107491", "0.61029524", "0.61009276", "0.60960466", "0.6083665", "0.6074194", "0.6072961", "0.6039758", "0.60344374", "0.60296535", "0.60244656", "0.60065734", "0.5962599", "0.59596384", "0.59591055", "0.5952267", "0.59486794", "0.5933353", "0.59279025", "0.59243435", "0.58979726", "0.5893977", "0.58896154", "0.58791596", "0.5876141", "0.5869291", "0.58656377", "0.58656377", "0.58580405", "0.58491796", "0.5845911", "0.58388764", "0.5835992", "0.58344936", "0.582764", "0.5822418", "0.5821315", "0.5806439", "0.57972294", "0.5797136", "0.5793466", "0.5786965", "0.57728255", "0.5771077", "0.5767716", "0.57650673", "0.57613677", "0.57612574", "0.57611424", "0.57570535", "0.57530034", "0.5751443", "0.5745312", "0.57435995" ]
0.7488213
0
Checks that X is an Xarray.DataArray
def check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert type(X) == xr.DataArray, 'XCast requires a dataset to be of type "Xarray.DataArray"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __check_is_xarray(self, data):\n if type(data) is xr.core.dataarray.DataArray or \\\n type(data) is xr.core.dataarray.Dataset:\n\n return True\n else:\n msg = \"Variable {data} is not an xarray DataArray/Dataset\"\n raise Exception(msg)", "def is_dataarray(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = [\"values\", \"coords\", \"dims\", \"to_dataset\"]\n\n return all([hasattr(X, name) for name in require_attrs])", "def _validate_X(X):\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X should be a dataframe.\")", "def check_data_type_column_data(X):\n if type(X) is not numpy.ndarray:\n raise TypeError(\"X should be type numpy.ndarray\")\n\n if len(X.shape) == 2 and X.shape[1] > 1:\n raise TypeError(\"X should have a single column.\")", "def is_xarray(func, *dec_args):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n ds_da_locs = dec_args[0]\n if not isinstance(ds_da_locs, list):\n ds_da_locs = [ds_da_locs]\n\n for loc in ds_da_locs:\n if isinstance(loc, int):\n ds_da = args[loc]\n elif isinstance(loc, str):\n ds_da = kwargs[loc]\n\n is_ds_da = isinstance(ds_da, (xr.Dataset, xr.DataArray))\n if not is_ds_da:\n typecheck = type(ds_da)\n raise IOError(\n f\"\"\"The input data is not an xarray DataArray or\n Dataset. climpred is built to wrap xarray to make\n use of its awesome features. Please input an xarray\n object and retry the function.\n\n Your input was of type: {typecheck}\"\"\"\n )\n except IndexError:\n pass\n # this is outside of the try/except so that the traceback is relevant\n # to the actual function call rather than showing a simple Exception\n # (probably IndexError from trying to subselect an empty dec_args list)\n return func(*args, **kwargs)\n\n return wrapper", "def _validate_X(X):\n return X if not isinstance(X, pd.DataFrame) else X.as_matrix()", "def check_consistent_X(self, X):\n # X must be ndarray-type\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n\n return X", "def _is_DataArrays(data):\n if isinstance(data, (Dataset, DataArray)):\n return True\n if isinstance(data, Mapping):\n for da in data.values():\n if not isinstance(da, DataArray):\n raise TypeError(\"Please provide List/Mapping of DataArrays\")\n return True\n if isinstance(data, Iterable):\n for da in data:\n if not isinstance(da, DataArray):\n return False\n # raise TypeError(\"Please provide List/Mapping of DataArrays\")\n return True\n return False", "def check_is_numpy(X):\n if isinstance(X, list):\n return np.asarray(X)\n if isinstance(X, np.ndarray):\n return X\n raise ValueError(\n \"Expected an python list or numpy array as input \"\n \"but got {}\".format(str(type(X)))\n )", "def check_is_numpy_or_pd(X):\n if isinstance(X, list):\n return np.asarray(X)\n if isinstance(X, pd.DataFrame):\n return X\n if isinstance(X, np.ndarray):\n return X\n \n raise ValueError(\n \"Expected an pandas DataFrame or numpy array or python list as input \"\n \"but got {}\".format(str(type(X)))\n )", "def _check_array(self, X):\n x = np.copy(X)\n if np.isfortran(x) is False:\n # print (\"Array must be in Fortran-order. Converting now.\")\n x = np.asfortranarray(x)\n if self.sampling > x.shape:\n raise ValueError(\"'sampling' is greater than the dimensions of X\")\n return x", "def check_array_1D(X):\n X = check_is_numpy(X)\n if X.ndim != 1:\n raise ValueError(\n \"If passed as a np.array, X must be a 1-dimensional \"\n \"array, but found shape: {}\".format(X.shape)\n )\n if X.size == 0:\n raise ValueError(\n \"Input is empty or have a dimension of size 0\"\n \", found shape: {}\".format(X.shape)\n )\n \n return X", "def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X'\n\tassert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X'\n\tassert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X'\n\tassert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'", "def is_dataset(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = [\"data_vars\", \"coords\", \"dims\", \"to_array\"]\n\n return all([hasattr(X, name) for name in require_attrs])", "def isscalar(x):\n arrayed_x = asarray(x)\n return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'", "def _check_input(self, X):\n symbols = np.concatenate(X)\n if len(symbols) == 1: # not enough data\n raise ValueError(\"expected at least 1 observation \"\n \"but none found.\")\n elif (symbols < 0).any(): # contains negative integers\n raise ValueError(\"expected non-negative features \"\n \"for each observation.\")\n elif X.shape[1] > 1: # contains to many features\n raise ValueError(\"expected only 1 feature but got {0} \"\n \"for each observation.\".format(X.shape[1]))\n else:\n return True", "def _check_X(X, columns=None):\n if isinstance(X, (pd.DataFrame)):\n return X\n elif isinstance(X, (np.ndarray)):\n if columns is None:\n return pd.DataFrame(X)\n else:\n return pd.DataFrame(X, columns=columns)\n elif isinstance(X, pd.Series):\n return pd.DataFrame(X, columns=X.name)\n elif isinstance(X, (list, tuple)):\n X = np.array(X)\n return pd.DataFrame(X, columns=[str(i) for i in range(X.shape[1])])\n elif hasattr(X, (\"__array__\")):\n data = X.__array__()\n return pd.DataFrame(data, columns=[str(i) for i in range(data.shape[1])])\n return X", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def _check_array(X):\n return check_array(X,\n accept_sparse=['csr', 'csc'], # Accept sparse csr, csc\n order=None, # Do not enforce C or Fortran\n copy=False, # Do not trigger copying\n force_all_finite=True, # Raise error on np.inf/np.nan\n ensure_2d=True, # Force 'X' do be a matrix\n allow_nd=True, # Allow 'X.ndim' > 2\n warn_on_dtype=False # Mute as 'dtype' is 'None'\n )", "def _check_input_timeseries(x: np.ndarray) -> np.ndarray:\n if not isinstance(x, np.ndarray):\n raise ValueError(\"The input time series must be a numpy array.\")\n if x.ndim <= 0 or x.ndim >= 4:\n raise ValueError(\n \"The input time series must have more than 0 dimensions and\"\n \"less than 4 dimensions.\"\n )\n if x.ndim == 3:\n return x[0]\n return x", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", "def _validate_input_data(self):\n\n if type(self.data) in [np.ndarray, da.Array]:\n if not self.data.dtype.names:\n raise ValueError('QuadTree: numpy array provided for data, but no names were found, array must be a structured array')\n if 'x' not in self.data.dtype.names or 'y' not in self.data.dtype.names:\n raise ValueError('QuadTree: numpy structured array provided for data, but \"x\" or \"y\" not found in variable names')\n self.layernames = [self.rev_layer_lookup[var] for var in self.data.dtype.names if var in ['z', 'tvu']]\n elif type(self.data) == xr.Dataset:\n if 'x' not in self.data:\n raise ValueError('QuadTree: xarray Dataset provided for data, but \"x\" or \"y\" not found in variable names')\n if len(self.data.dims) > 1:\n raise ValueError('QuadTree: xarray Dataset provided for data, but found multiple dimensions, must be one dimensional: {}'.format(self.data.dims))\n self.layernames = [self.rev_layer_lookup[var] for var in self.data if var in ['z', 'tvu']]\n self._convert_dataset() # internally we just convert xarray dataset to numpy for ease of use\n else:\n raise ValueError('QuadTree: numpy structured array or dask array with \"x\" and \"y\" as variable must be provided')", "def _check_input_for_asarray(array_like):\n if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):\n return True\n raise TypeError(\"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`\" + \\\n f\"or numpy.ndarray, but got {type(array_like)}\")", "def isarray(a):\n try:\n validity=isinstance(a,ndarray)\n except:\n validity=False\n\n return validity", "def isarray(a):\r\n try:\r\n validity = isinstance(a, ndarray)\r\n except:\r\n validity = False\r\n\r\n return validity", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X > 0).all() & isinteger(X), \"x should be greater then 0 and integer.\"", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X >= 0).all() & isinteger(\n X\n ), \"x should be greater or equal to 0 and integer.\"", "def test_data(self):\n\n self.assertIsInstance(self.image.data, np.ndarray)", "def check_array_3D(X, coerce_to_numpy=True, is_univariate=False, min_timestamps=2):\n X = check_is_numpy_or_pd(X)\n if X.ndim != 3:\n raise ValueError(\n \"If passed as a np.array, X must be a 3-dimensional \"\n \"array, but found shape: {}\".format(X.shape)\n )\n if X.size == 0:\n raise ValueError(\n \"Input is empty or have a dimension of size 0\"\n \", found shape: {}\".format(X.shape)\n )\n if X.shape[2] <= min_timestamps:\n raise ValueError(\n \"Input should have more than {} timestamp\"\n \", found only: {}\".format(min_timestamps,X.shape[2])\n )\n if isinstance(X, pd.DataFrame):\n raise ValueError('Only accepting numpy array as inputs for 3D')\n if is_univariate:\n if X.shape[1] != 1:\n raise ValueError(\n \"X must be a 3-dimensional array with dimension 1 equal to 1\"\n )\n return X", "def is_array(self):\n return False", "def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), \"XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), \"XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), \"XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), \"XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension\"", "def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data", "def test_11_dataset(self, example):\n example.zonenumbers_i = np.array([100, 200, 300])\n example.groupnames_g = np.array(['Female', 'Male'], dtype='O')\n example.create_ds()\n assert isinstance(example.ds, xr.Dataset)\n for attr, dtype in example.dtypes.items():\n data_array = example.ds[attr]\n # test if the shapes are correct\n if dtype.shape:\n np.testing.assert_array_equal(dtype.get_shape(example),\n data_array.shape,\n 'shape not correct')\n else:\n # not initialized array\n assert not np.any(data_array.shape), 'shape not initialized'\n #test if the datatypes are correct\n assert np.dtype(dtype.dtype) == data_array.dtype, 'dtype not correct'\n print(example.ds)", "def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, CudaNdarrayType):\r\n raise NotImplementedError()", "def check_numpy(x):\n if isinstance(x, torch.Tensor):\n x = x.detach().cpu().numpy()\n x = np.asarray(x)\n assert isinstance(x, np.ndarray)\n return x", "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "def __check_array__(self):\n if self.data_array is None:\n mess = 'No data array is present, please load before attempting to create XYZ array.'\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message(mess)\n else:\n print(mess)\n return False\n else:\n return True", "def validate_data(self, y, x=None, verbose=True):\n # Check dimensions\n if not self.ODE_order:\n if not y.ndim == 2:\n raise ValueError(\"y-array is not 2 dimensional, if ODE and you didn't provide y then x is one dim\")\n\n if verbose and y.shape[0] < y.shape[1]:\n print(\"Warning: y-array has more series (columns) than samples (rows). Check if this is correct\")\n\n # Checks for x\n if self.ODE_order and x is None:\n assert False\n if not x is None:\n\n # Check dimensions\n if not x.ndim == 2:\n raise ValueError(\"x-array is not 2 dimensional\")\n\n # Check shape equality\n if x.shape[0] != y.shape[0]:\n raise ValueError(\"y-array and x-array have different number of samples (rows)\")", "def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True", "def check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert list(X.dims).index(x_lat_dim) == 0, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_lon_dim) == 1, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_sample_dim) == 2, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_feature_dim) == 3, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'", "def check_1d(x, name):\n\n x = asarray(x)\n if size(x) == 1:\n x = asarray([x])\n if x.ndim == 2:\n raise Exception(\"Property: %s must be one-dimensional\" % name)\n x = x.flatten()\n\n return x", "def is_dataset(obj):\n return isinstance(obj, (DictDataset, ImageDataset, LabeledImageDataset,\n TupleDataset, DatasetMixin))", "def test_obsm_values_ara_numpy(self):\n\n self.validator.adata.obsm[\"X_tsne\"] = pd.DataFrame(\n self.validator.adata.obsm[\"X_umap\"], index=self.validator.adata.obs_names\n )\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, \"\n \"'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ],\n )", "def can_insert(data):\n if not isinstance(data, np.ndarray):\n return False\n if data.dtype.char in UNSUPPORTED_NUMERIC_TYPE_CODES:\n return False\n return np.issubdtype(data.dtype, np.number)", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert ((X == 0) | (X == 1)).all(), \"x should be equal to 0 or 1.\"", "def prepare_input_data(self, X):\n X = np.asarray(X)\n if X.dtype != \"f\" and X.dtype != \"d\":\n X = X.astype(float)\n\n self._check_input(X)\n missing_mask = np.isnan(X)\n self._check_missing_value_mask(missing_mask)\n return X, missing_mask", "def __check_signal(self, signal: np.ndarray):\n # if signal is a list, convert it to nparray\n if isinstance(signal, list):\n signal = np.array(signal)\n # if signal is still not nparray, raise error\n if not (isinstance(signal, np.ndarray)):\n print(\"1\") \n # unknown type of input signal\n raise TypeError()\n # if signal is not a vector of shape(nFeature, )\n if len(signal.shape) != 1:\n print(\"2\") \n # input signal has to be a vector\n raise TypeError()\n # set self.dim\n self.dim = signal.shape[0]\n # if self still doesn't has the attribute 'dim', set it.\n if not (hasattr(self, 'dim')):\n self.dim = signal.shape[0]\n else:\n # if dim of signal doesn't match self.dim, raise error\n if signal.shape[0] != self.dim:\n print(\"3\")\n raise TypeError()\n return signal", "def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)", "def is_pyvista_dataset(obj):\n return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))", "def __init__(self, xarray_obj):\n super(RasterDataArray, self).__init__(xarray_obj)", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def test_data_loaders(dataset):\n checks = _CHECKS[dataset]\n y = checks[\"data\"][0]\n X = checks[\"data\"][1]\n\n if y is not None:\n assert isinstance(y, pd.Series)\n assert len(y) == checks[\"len_y\"]\n assert y.dtype == checks[\"data_type_y\"]\n\n if X is not None:\n if len(checks[\"data_types_X\"]) > 1:\n assert isinstance(X, pd.DataFrame)\n else:\n assert isinstance(X, pd.Series)\n\n assert X.columns.values.tolist() == checks[\"columns\"]\n\n for col, dt in checks[\"data_types_X\"].items():\n assert X[col].dtype == dt\n\n assert len(X) == checks[\"len_X\"]", "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "def __check_signal(self, signal):\n if not(isinstance(signal, np.ndarray)):\n raise TypeError()\n if len(signal.shape) != 1:\n raise TypeError()\n if not(hasattr(self, 'dim')):\n self.dim = signal.shape[0]\n else:\n if signal.shape[0] != self.dim:\n raise TypeError()", "def make_test_dataArray():\n x = np.zeros(shape=(3,31))\n x[0,:] = np.nan\n x[1,[1,2,3,4,5,6,15,23,24,25]] = [np.nan,np.nan,0.1,0.5,2.,2.,2.,2.,0.9,2.]\n x[2,[3,4,5,6,15,23,24,25]] = [0.1,0.5,2.,2.,2.,2.,0.9,2.]\n da = xr.DataArray(x, dims=['x','time'])\n da.coords['time'] = pd.date_range('19790101', freq='D', periods=31)\n return da", "def check_array_2D(X, coerce_to_numpy=True):\n X = check_is_numpy_or_pd(X)\n if X.ndim != 2:\n raise ValueError(\n \"If passed as a np.array, X must be a 2-dimensional \"\n \"array, but found shape: {}\".format(X.shape)\n )\n if X.size == 0:\n raise ValueError(\n \"Input is empty or have a dimension of size 0\"\n \", found shape: {}\".format(X.shape)\n )\n if isinstance(X, pd.DataFrame):\n if coerce_to_numpy:\n X = X.values\n return X", "def test_get_adata_asarray():\n # test getting a dense matrix\n import scnym\n\n adata = anndata.AnnData(X=np.random.random((100, 100)))\n X = scnym.utils.get_adata_asarray(adata=adata)\n assert type(X) == np.ndarray\n\n # test getting a sparse matrix\n A = np.zeros((100, 100))\n ridx = np.random.choice(A.size, size=1000, replace=True)\n A.flat[ridx] = 1\n A = sparse.csr_matrix(A)\n adata = anndata.AnnData(X=A)\n X = scnym.utils.get_adata_asarray(adata=adata)\n assert sparse.issparse(X)\n return", "def IsArray(obj):\n return isinstance(obj, (list, tuple))", "def _checkData(data: Sequence[HistoryElement]):\r\n if not all(x.timeStamp for x in data):\r\n raise ValueError(\"At least one element in data doesn't have a TimeStamp\")", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def test_hk_getdata_field_array_type(self):\n fields, _ = load_data(self._file)\n assert isinstance(fields['position'], np.ndarray)", "def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def test_univariate(self):\n data = load_dataset(data_format=\"numpy\", standardize=True)\n assert data.shape[0] > 131, \"Imported time series collection has the wrong shape\"\n assert data.shape[-1] == 1000, \"Imported time series collection has the wrong shape\"", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def verify_x_data(self, x_data):\n testx = deepcopy(x_data).flatten()\n xmin, xmax = self.print_verification_report(testx, 'X_data')\n\n if xmax > CLIP_VALUE or xmin < -CLIP_VALUE:\n n_clipped_elements = np.sum(CLIP_VALUE < np.abs(testx))\n n_elements = len(testx)\n x_data = np.clip(x_data, a_min=-CLIP_VALUE, a_max=CLIP_VALUE)\n logger.warning(\"Large inputs detected: clip values exceeding {}\".format(CLIP_VALUE))\n logger.info(\"{} of {} elements were clipped.\".format(n_clipped_elements, n_elements))\n\n return x_data", "def validate_dataset(self):\n pass", "def test_single_null(self):\n dset = self.f.create_dataset('x', (1,), dtype='i1')\n out = dset[()]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,))", "def epics_data_plot(data):\n if isinstance(data, (xr.DataArray, xr.Dataset)):\n data = data.to_dataframe()", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def is_scalar(x: Any) -> bool:\r\n return np.isscalar(x) or (isinstance(x, np.ndarray) and x.ndim == 0)", "def is_arraylike(obj):\n if isinstance(obj, list):\n return True\n elif isinstance(obj, np.ndarray):\n return True\n elif isinstance(obj, pd.Series):\n return True\n elif isinstance(obj, pd.DataFrame):\n return True\n return False", "def is_array(self):\n return len(self.descriptor) > 1", "def validate_xy(x_train, y_train):\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')", "def _check_data(data, dset_attrs):\n dtype = dset_attrs['dtype']\n float_to_int = (np.issubdtype(dtype, np.integer)\n and np.issubdtype(data.dtype, np.floating))\n if float_to_int:\n attrs = dset_attrs['attrs']\n scale_factor = [c for c in attrs if 'scale_factor' in c][0]\n scale_factor = attrs[scale_factor]\n\n # apply scale factor and dtype\n data = np.multiply(data, scale_factor)\n if np.issubdtype(dtype, np.integer):\n data = np.round(data)\n\n data = data.astype(dtype)\n\n return data", "def _check_data_valid(self):\n\n is_valid = (sum(~np.isnan(self.data).flatten()) > 0 and self.data.flatten().sum() != 0)\n if not is_valid:\n raise FITSException(f\"No data in {self.survey}\")", "def is_valid(self, dataset):\n pass", "def _isscalar(x):\n return np.isscalar(x) or hasattr(x, \"shape\") and x.shape == ()", "def f_supports(self, data):\n dtype = type(data)\n if dtype is tuple or dtype is list and len(data) == 0:\n return True # ArrayParameter does support empty tuples\n elif dtype is np.ndarray and data.size == 0 and data.ndim == 1:\n return True # ArrayParameter supports empty numpy arrays\n else:\n return super(ArrayParameter, self).f_supports(data)", "def test__chk_asarray(self):\r\n\r\n exp = (array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]), 0)\r\n obs = _chk_asarray([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]], 0)\r\n assert_almost_equal(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])", "def check_input_dimension(self, data):\n if len(data[0]) != self.input_dimension:\n raise ValueError(\"Received {} features, expected {}.\".format(self.input_dimension, len(data[0])))", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def test_isarray_vrt(self):\n self.assertIsInstance(_test_array(landsat_vrt), np.ndarray)", "def verify_pandas(self):\n self.check_dataset_duplicate_ids(self.vertices)\n # self.check_dataset_children_ids()\n self.check_dataset_litter_ids()\n self.check_dataset_dates()", "def is_array(t):\n return isinstance(t, ast.Array)", "def check_finite_Dataset(ds):\n # Check is a Dataset\n if not isinstance(ds, xr.Dataset):\n raise TypeError(\"'ds' must be an xarray Dataset.\")\n # Check no NaN values\n ds_isnan = xr.ufuncs.isnan(ds)\n list_vars_with_nan = []\n flag_raise_error = False\n for var in list(ds_isnan.data_vars.keys()):\n if ds_isnan[var].sum().values != 0:\n list_vars_with_nan.append(var)\n flag_raise_error = True\n if flag_raise_error:\n raise ValueError(\n \"The variables {} contain NaN values\".format(list_vars_with_nan)\n )\n # Check no Inf values\n ds_isinf = xr.ufuncs.isinf(ds)\n list_vars_with_inf = []\n flag_raise_error = False\n for var in list(ds_isinf.data_vars.keys()):\n if ds_isinf[var].sum().values != 0:\n list_vars_with_inf.append(var)\n flag_raise_error = True\n if flag_raise_error:\n raise ValueError(\n \"The variables {} contain Inf values.\".format(list_vars_with_inf)\n )", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def _prep_data(self, data, func_input_dtype):\n if func_input_dtype in (None, 'DataArray'):\n return data\n if func_input_dtype == 'Dataset':\n # TODO: add logic that creates a single Dataset comprising all of\n # the DataArray objects in `data`.\n raise NotImplementedError(\"func_input_dtype of `Dataset` not yet \"\n \"implemented.\")\n if func_input_dtype == 'numpy':\n self.coords = data[0].coords\n return [d.values for d in data]", "def _linear_predictor_typecheck(x, coeffs):\n if type(x) == ndarray:\n if x.ndim == 1:\n if x.shape == coeffs.shape:\n pass\n else:\n raise RuntimeError(\"Got array of %d elements; wanted %d\"\n % (shape(x)[0], shape(coeffs)[0]))\n else:\n raise RuntimeError(\"Bulk predict not yet supported.\")\n elif (type(x) == RDD):\n raise RuntimeError(\"Bulk predict not yet supported.\")\n else:\n raise TypeError(\"Argument of type \" + type(x).__name__ + \" unsupported\")", "def test_isarray_gtiff(self):\n self.assertIsInstance(_test_array(landsat_gtiff), np.ndarray)", "def checkTrainData(cls, data):\n\n if data == None or len(data) == 0:\n raise Exception(\"No data\")\n\n if type(data[0]) != tuple:\n raise Exception(\"Not a list of tuples\")\n\n if len(data[0]) != 2 and type(data[0][0]) != str and type(data[0][1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n length = len(data[0][1])\n\n for tup in data:\n if len(tup) != 2 and type(tup[0]) != str and type(tup[1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n if len(tup[1]) != length:\n raise Exception(\"Not all elements have the same amount of data\")", "def check_stack(arrs):\r\n err1 = \"Object, structured arrays not supported, current type...\"\r\n err2 = \"3D arrays supported current ndim...\"\r\n if isinstance(arrs, (list, tuple)):\r\n arrs = np.array(arrs)\r\n if arrs.dtype.kind in ('O', 'V'):\r\n raise ValueError(\"{} {}\".format(err1, arrs.dtype.kind))\r\n if arrs.ndim != 3:\r\n raise ValueError(\"{} {}\".format(err2, arrs.ndim))\r\n return arrs", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def _validate_input(self, data: Union[np.ndarray, pd.DataFrame, pd.Series],\n expected_dim: int, inference: bool = False) -> np.ndarray:\n allowed_types = (\n np.ndarray,\n pd.core.frame.DataFrame,\n pd.core.frame.Series\n )\n\n if type(data) not in allowed_types:\n raise TypeError('Supported input types: np.ndarray, '\n 'pd.core.frame.DataFrame, pd.core.frame.Series got'\n ' {}'.format(type(data)))\n\n if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):\n data = data.values\n\n if data.size == 0:\n raise ValueError('Empty array passed to fit() or predict()')\n\n if data.ndim > expected_dim:\n raise ValueError('Data with incorrect number of dimensions '\n 'passed to fit() or predict(). Max dim is '\n '{}, got {}'.format(expected_dim, data.ndim))\n\n if not np.issubdtype(data.dtype, np.number):\n raise ValueError('Non numeric value found in data')\n\n if not np.isfinite(data).all():\n raise ValueError('Data contains nan or inf')\n\n if inference:\n # additional checks on prediction time\n if not self._fitted:\n raise ValueError('Fit the model first.')\n\n if self._ndim == 2 and data.shape[-1] != self._shape[-1]:\n raise ValueError('Number of features does not match'\n ' data model was trained on. Expected'\n ' {}, got {}'\n .format(self._shape[-1], data.shape[-1]))\n\n return data", "def DataArray(self, array, **kwargs):\n #import xarray as xr\n from ..utils import get_DA\n da = get_DA(array, simulation=self, **kwargs)\n return da", "def DataArray(self, array, **kwargs):\n #import xarray as xr\n from ..utils import get_DA\n da = get_DA(array, simulation=self, **kwargs)\n return da", "def DataArray(self, array, **kwargs):\n #import xarray as xr\n from ..utils import get_DA\n da = get_DA(array, simulation=self, **kwargs)\n return da", "def is_a_numpy_array(obj):\n return type(obj).__module__ == np.__name__", "def ExamineData(x):\r\n print(\"Data shape:\", x.shape)\r\n print(\"\\nColumns:\", x.columns)\r\n print(\"\\nData types\\n\", x.dtypes)\r\n print(\"\\nDescribe data\\n\", x.describe())\r\n print(\"\\nData\\n\", x.head(2))\r\n print (\"\\nSize of data:\", np.sum(x.memory_usage())) # Get size of dataframes\r\n print(\"\\nAre there any NULLS\\n\", np.sum(x.isnull()))", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")" ]
[ "0.86942047", "0.71866673", "0.7029208", "0.69669193", "0.6887847", "0.67700577", "0.66643125", "0.6535942", "0.63918483", "0.63843584", "0.636618", "0.63450086", "0.63026375", "0.6252001", "0.6233888", "0.6226298", "0.6150564", "0.61219764", "0.6118666", "0.6112856", "0.6090837", "0.60685945", "0.60465264", "0.60236305", "0.59703064", "0.59597814", "0.5922164", "0.59058094", "0.59038925", "0.5892678", "0.58668333", "0.5841358", "0.583953", "0.58066654", "0.5792095", "0.5786583", "0.57668775", "0.57666624", "0.57346743", "0.5682452", "0.56149435", "0.56130767", "0.5579066", "0.5577453", "0.557293", "0.5553439", "0.5549447", "0.5545864", "0.55430526", "0.55412984", "0.5538976", "0.5508871", "0.55063474", "0.55002695", "0.54900724", "0.5481458", "0.5481406", "0.54711086", "0.54673856", "0.546126", "0.54495454", "0.5437268", "0.5428974", "0.54231685", "0.54142964", "0.539852", "0.5398151", "0.53869265", "0.5383383", "0.5381261", "0.5379786", "0.53726196", "0.5367527", "0.53609216", "0.53567964", "0.5343412", "0.5341379", "0.53370756", "0.53358746", "0.53264433", "0.53244996", "0.5318711", "0.5314028", "0.5311214", "0.5310543", "0.53096557", "0.53051376", "0.52975446", "0.5296257", "0.5284276", "0.5267724", "0.52596116", "0.52591735", "0.52493376", "0.52449965", "0.52449965", "0.52449965", "0.5241377", "0.5240617", "0.5236437" ]
0.8198799
1
Checks that X satisfies all conditions for XCAST
def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert type(X) == xr.DataArray, 'XCast requires a dataset to be of type \"Xarray.DataArray\"'", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X > 0).all() & isinteger(X), \"x should be greater then 0 and integer.\"", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X >= 0).all() & isinteger(\n X\n ), \"x should be greater or equal to 0 and integer.\"", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert ((X == 0) | (X == 1)).all(), \"x should be equal to 0 or 1.\"", "def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X'\n\tassert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X'\n\tassert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X'\n\tassert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), \"XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), \"XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), \"XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), \"XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension\"", "def has_x(self):\n return any(map(lambda s: s.is_x, self))", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n X_union = (X == 0) | (X == 1)\n for k in range(2, kwargs[\"k\"]):\n X_union = X_union | (X == k)\n\n assert (\n X_union.all()\n ), f\"x should be equal to integer from 0 to {kwargs['k']} (exclusive).\"", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n X_union = (X == 0) | (X == 1)\n for k in range(2, kwargs[\"n\"] + 1):\n X_union = X_union | (X == k)\n\n assert (\n X_union.all()\n ), f\"x should be equal to integer from 0 to {kwargs['n']} (inclusive).\"", "def supports(self, x):\n return 0.0 < x", "def supports(self, x):\n return 0.0 < x", "def check_force_cast(FROM, TO, operations, value):\n import re\n r = re.compile('(\\w+) \\%i\\d, \\$(-?\\d+)')\n r2 = re.compile('(\\w+) \\%i\\d')\n #\n value = rffi.cast(FROM, value)\n value = rffi.cast(lltype.Signed, value)\n #\n expected_value = rffi.cast(TO, value)\n expected_value = rffi.cast(lltype.Signed, expected_value)\n #\n for op in operations:\n match = r.match(op)\n if match is None:\n match = r2.match(op)\n assert match, \"line %r does not match regexp\" % (op,)\n opname = match.group(1)\n if opname == 'int_and':\n value &= int(match.group(2))\n elif opname == 'int_signext':\n numbytes = int(match.group(2))\n value = int_signext(value, numbytes)\n elif opname == 'int_is_true':\n value = bool(value)\n else:\n assert 0, opname\n #\n assert rffi.cast(lltype.Signed, value) == expected_value", "def check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert list(X.dims).index(x_lat_dim) == 0, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_lon_dim) == 1, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_sample_dim) == 2, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'\n\tassert list(X.dims).index(x_feature_dim) == 3, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'", "def _check_special_conversion(self, expr):\n lhs_units = expr.eq.lhs.get_units()\n rhs_units = expr.eq.rhs.get_units()\n if lhs_units.dimensionally_equivalent(rhs_units):\n return\n for from_units, to_units in self.special_conversions.iterkeys():\n if (from_units.dimensionally_equivalent(rhs_units)\n and to_units.dimensionally_equivalent(lhs_units)):\n # We can apply this conversion\n self.special_conversions[(from_units, to_units)](expr)\n DEBUG('units-converter', \"Used special conversion from\", repr(from_units), \"to\", repr(to_units))#, \"giving\", expr.xml())\n break", "def assert_positive(x):\n \n assert(all(x) >= 0)", "def _check_input(self, X):\n symbols = np.concatenate(X)\n if len(symbols) == 1: # not enough data\n raise ValueError(\"expected at least 1 observation \"\n \"but none found.\")\n elif (symbols < 0).any(): # contains negative integers\n raise ValueError(\"expected non-negative features \"\n \"for each observation.\")\n elif X.shape[1] > 1: # contains to many features\n raise ValueError(\"expected only 1 feature but got {0} \"\n \"for each observation.\".format(X.shape[1]))\n else:\n return True", "def _validate_X(X):\n if not isinstance(X, pd.DataFrame):\n raise ValueError(\"X should be a dataframe.\")", "def verify_x_data(self, x_data):\n testx = deepcopy(x_data).flatten()\n xmin, xmax = self.print_verification_report(testx, 'X_data')\n\n if xmax > CLIP_VALUE or xmin < -CLIP_VALUE:\n n_clipped_elements = np.sum(CLIP_VALUE < np.abs(testx))\n n_elements = len(testx)\n x_data = np.clip(x_data, a_min=-CLIP_VALUE, a_max=CLIP_VALUE)\n logger.warning(\"Large inputs detected: clip values exceeding {}\".format(CLIP_VALUE))\n logger.info(\"{} of {} elements were clipped.\".format(n_clipped_elements, n_elements))\n\n return x_data", "def check_consistent_X(self, X):\n # X must be ndarray-type\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n\n return X", "def _pairwise_table_x_check(self, X, var_name=\"X\"):\n X_valid = check_is_scitype(X, \"Table\", return_metadata=False, var_name=var_name)\n\n if not X_valid:\n msg = (\n \"X and X2 must be in an sktime compatible format, of scitype Table, \"\n \"for instance a pandas.DataFrame or a 2D numpy.ndarray. \"\n \"See the data format tutorial examples/AA_datatypes_and_datasets.ipynb\"\n )\n raise TypeError(msg)\n\n X_inner_mtype = self.get_tag(\"X_inner_mtype\")\n X_coerced = convert_to(X, to_type=X_inner_mtype, as_scitype=\"Table\")\n\n return X_coerced", "def check_all_finite(X):\n return _check_all_finite(X.data if sp.issparse(X) else X)", "def is_castxml(self):\n return self._is_castxml", "def test_utils_to_bool(self, tcex, input_, expected):\n result = tcex.utils.to_bool(input_)\n assert result == expected, f'Input {input_} result of {result} != {expected}'", "def test_evaluate_cast_expression(self):\n value = self.evaluate_common(\"cast(2D,'Edm.Double')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 2.0, \"Expected 2.0\")\n value = self.evaluate_common(\"cast(2L,'Edm.Single')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Single, \"Expected Single\")\n self.assertTrue(value.value == 2.0, \"Expected 2.0\")\n value = self.evaluate_common(\"cast(2,'Edm.Int64')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 2, \"Expected 2\")\n try:\n value = self.evaluate_common(\"cast(2.0D,'Edm.Single')\")\n self.fail(\"Double cast to Single\")\n except:\n pass\n value = self.evaluate_common(\"cast('x','Edm.String')\")\n self.assertTrue(value.value == 'x', \"Expected 'x'\")\n try:\n value = self.evaluate_common(\"cast(X'DEAD','Edm.String')\")\n self.fail(\"Binary cast to String\")\n except:\n pass\n try:\n value = self.evaluate_common(\"cast(1,'Edm.Boolean')\")\n self.fail(\"1 cast to Boolean\")\n except:\n pass\n value = self.evaluate_common(\"cast(null,'Edm.String')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value is None, \"Expected None\")\n value = self.evaluate_common(\"cast('Edm.Int16')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int16, \"Expected Int16\")\n self.assertTrue(value.value is None, \"Expected None\")", "def check_for_float_and_int(check):", "def isavalidstate(self , x ):\n ans = False\n for i in range(self.n):\n ans = ans or ( x[i] < self.x_lb[i] )\n ans = ans or ( x[i] > self.x_ub[i] )\n \n return not(ans)", "def test_of_with_args(self) -> None:\n assert Result.of(lambda x: bool(x > 0), 1).unwrap() is True", "def check_deterministic_constraints(self, x):\n return True", "def check_deterministic_constraints(self, x):\n return True", "def _Check(self, cn0, num_obs, type_bits):\n\n if num_obs == 0:\n return\n\n if self._for_log:\n avg_cn0, max_cn0 = self.GetAvgAndMaxCn0FromTimeSeries(\n cn0, num_obs, type_bits)\n else:\n avg_cn0, max_cn0, _ = self.GetAvgAndMaxCn0(cn0, num_obs, type_bits)\n\n avg_ranges = check_range.Interval([40.0, None])\n max_ranges = check_range.Interval([45.0, None])\n all_inclusive = check_range.AllInclusiveRange()\n self._CheckByRange('%s (Avg)' % self._name, avg_cn0, avg_ranges,\n all_inclusive)\n self._CheckByRange('%s (Max)' % self._name, max_cn0, max_ranges,\n all_inclusive)", "def _input_checks(\n true_values: Union[np.ndarray, dask.array.core.Array], pred_values: Union[np.ndarray, dask.array.core.Array]\n):\n\n def _cast(data: Union[np.ndarray, dask.array.core.Array]) -> Tuple[np.ndarray, np.ndarray]:\n if isinstance(data, dask.array.core.Array):\n to_return = data.compute()\n elif isinstance(data, np.ndarray):\n to_return = data\n else:\n raise TypeError(f\"Type {type(data)} is not recognized for true/pred values.\")\n return to_return\n\n true_vals = _cast(true_values)\n pred_vals = _cast(pred_values)\n\n assert len(true_values.shape) == len(pred_values.shape), \"true_values must have same dimensions as pred_values\"\n assert np.all(true_values.shape == pred_values.shape), \"true_values must have same dimensions as pred_values\"\n\n return true_vals, pred_vals", "def test_upcast(self):\r\n if config.cast_policy == 'custom':\r\n assert arange(iscalar()).dtype == iscalar().dtype\r\n assert arange(fscalar()).dtype == fscalar().dtype\r\n assert arange(dscalar()).dtype == dscalar().dtype\r\n\r\n # int32 + float32 -> float64\r\n assert arange(iscalar(), fscalar()).dtype == dscalar().dtype\r\n assert arange(iscalar(), dscalar()).dtype == dscalar().dtype\r\n assert arange(fscalar(), dscalar()).dtype == dscalar().dtype\r\n\r\n assert arange(iscalar(), fscalar(), dscalar()).dtype == \\\r\n dscalar().dtype\r\n elif config.cast_policy in ('numpy', 'numpy+floatX'):\r\n for dtype in get_numeric_types():\r\n # Test with a single argument.\r\n arange_dtype = arange(scalar(dtype=str(dtype))).dtype\r\n numpy_dtype = numpy.arange(numpy.array(1, dtype=dtype)).dtype\r\n if (dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n\r\n # Test with two arguments.\r\n for stop_dtype in get_numeric_types():\r\n arange_dtype = arange(\r\n start=scalar(dtype=str(dtype)),\r\n stop=scalar(dtype=str(stop_dtype))).dtype\r\n numpy_dtype = numpy.arange(\r\n start=numpy.array(0, dtype=dtype),\r\n stop=numpy.array(1, dtype=stop_dtype)).dtype\r\n if (dtype != 'float64' and\r\n stop_dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n\r\n # Test with three arguments.\r\n for step_dtype in get_numeric_types():\r\n arange_dtype = arange(\r\n start=scalar(dtype=str(dtype)),\r\n stop=scalar(dtype=str(stop_dtype)),\r\n step=scalar(dtype=str(step_dtype))).dtype\r\n numpy_dtype = numpy.arange(\r\n start=numpy.array(0, dtype=dtype),\r\n stop=numpy.array(1, dtype=stop_dtype),\r\n step=numpy.array(1, dtype=step_dtype)).dtype\r\n if (dtype != 'float64' and\r\n stop_dtype != 'float64' and\r\n step_dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n else:\r\n raise NotImplementedError(config.cast_policy)", "def check_data_validity(X, y, query, task):\n # ADD IMPLEMENTATION HERE", "def is_x(self, var):\n x_list = ['lon', 'longitude', 'LONGITUDE', 'Longitude', 'x']\n\n if self.get_units(var) == 'degrees_east':\n return True\n if self.get_name(var) in x_list:\n return True\n if self.get_description(var) in x_list:\n return True\n else:\n return False", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def _check_if_event_event_or_event_timex(self):\n if (type(self.source) is Timex and type(self.target) is Event) or (type(self.source) is Event and type(self.target) is Timex):\n self._is_event_timex = True\n elif type(self.source) is Event and type(self.target) is Event:\n self._is_event_event = True\n elif type(self.source) is Timex and type(self.target) is Timex:\n self._is_timex_timex = True", "def is_castxml1(self):\n return self._is_castxml1", "def test_convert_logical():", "def data_checker(xml):\n if not xml or 'response code=\"102\"' in xml:\n LOGGER.debug(\"The service 'oclc' is temporarily down!\")\n return False\n return True", "def check_binning_parameter_range(x_min, x_max, ws_unit):\n if ws_unit == 'dSpacing' and not 0 < x_min < x_max < 20:\n # dspacing within (0, 20)\n x_range_is_wrong = True\n elif ws_unit == 'TOF' and not 1000 < x_min < x_max < 1000000:\n # TOF within (1000, 1000000)\n x_range_is_wrong = True\n elif ws_unit != 'dSpacing' and ws_unit != 'TOF':\n raise NotImplementedError('Impossible case for unit {}'.format(ws_unit))\n else:\n # good cases\n x_range_is_wrong = False\n\n if x_range_is_wrong:\n ero_msg = 'For {0}, X range ({1}, {2}) does not make sense' \\\n ''.format(ws_unit, x_min, x_max)\n print('[ERROR CAUSING CRASH] {}'.format(ero_msg))\n raise RuntimeError(ero_msg)\n\n return", "def _pairwise_panel_x_check(self, X, var_name=\"X\"):\n check_res = check_is_scitype(\n X, [\"Series\", \"Panel\"], return_metadata=True, var_name=var_name\n )\n X_valid = check_res[0]\n metadata = check_res[2]\n\n X_scitype = metadata[\"scitype\"]\n\n if not X_valid:\n msg = (\n \"X and X2 must be in an sktime compatible format, \"\n \"of scitype Series or Panel, \"\n \"for instance a pandas.DataFrame with sktime compatible time indices, \"\n \"or with MultiIndex and last(-1) level an sktime compatible time index.\"\n \" See the data format tutorial examples/AA_datatypes_and_datasets.ipynb\"\n )\n raise TypeError(msg)\n\n # if the input is a single series, convert it to a Panel\n if X_scitype == \"Series\":\n X = convert_Series_to_Panel(X)\n\n # can't be anything else if check_is_scitype is working properly\n elif X_scitype != \"Panel\":\n raise RuntimeError(\"Unexpected error in check_is_scitype, check validity\")\n\n X_inner_mtype = self.get_tag(\"X_inner_mtype\")\n X_coerced = convert_to(X, to_type=X_inner_mtype, as_scitype=\"Panel\")\n\n return X_coerced", "def sanity_checks(df: pd.DataFrame) -> None:\n df_temp = df.copy()\n # checks that the max date is less than tomorrow's date.\n assert datetime.datetime.strptime(df_temp['Date'].max(), '%Y-%m-%d') < (datetime.datetime.utcnow() + datetime.timedelta(days=1))\n # checks that there are no duplicate dates\n assert df_temp['Date'].duplicated().sum() == 0, 'One or more rows share the same date.'\n if 'Cumulative total' not in df_temp.columns:\n df_temp['Cumulative total'] = df_temp['Daily change in cumulative total'].cumsum()\n # checks that the cumulative number of tests on date t is always greater than the figure for t-1:\n assert (df_temp['Cumulative total'].iloc[1:] >= df_temp['Cumulative total'].shift(1).iloc[1:]).all(), \"On one or more dates, `Cumulative total` is greater on date t-1.\"\n # df.iloc[1:][df['Cumulative total'].iloc[1:] < df['Cumulative total'].shift(1).iloc[1:]]\n # cross-checks a sample of scraped figures against the expected result.\n assert len(sample_official_data) > 0\n for dt, d in sample_official_data:\n val = df_temp.loc[df_temp['Date'] == dt, SERIES_TYPE].squeeze().sum()\n assert val == d[SERIES_TYPE], f\"scraped value ({val:,d}) != official value ({d[SERIES_TYPE]:,d}) on {dt}\"\n return None", "def __call__(self, x):\n out = self.evaluate_filter(x)\n if isinstance(out, np.ndarray):\n assert(out.dtype == bool)\n out = bool(out.all())\n elif isinstance(out, np.bool_):\n out = bool(out)\n try:\n assert(isinstance(out, bool))\n except AssertionError: # pragma: debug\n print(out, type(out))\n raise\n return out", "def _check_inputvalues(self):\n # Check x, y and z are int or float dtypes\n # ie do not contain any unusable values like strings\n if not (self.x.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n if not (self.y.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (v.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")", "def _isvalid(self, x):\n return (x <= self.n) & (x > 0)", "def test_sanity_check (self):\n X, Y = self.dm.get_data(std=True, lag_indicator=True)\n\n # Ensure number of rows between what we expect.\n row_bound = (800, 1000)\n actual_rows = X.shape[0]\n msg = 'Number of rows not within expected bounds.'\n self.assertTrue(row_bound[0] < actual_rows < row_bound[1], msg)\n\n msg = 'X and Y have different number of rows.'\n self.assertEqual(X.shape[0], Y.shape[0], msg)\n\n # Ensure X columns match.\n expected_x_cols = ['SP500', 'ltc_px_std', 'xrp_px_std', 'xlm_px_std',\n 'eth_px_std', 'btc_px_std', 'ltc_volume_std',\n 'xrp_volume_std', 'xlm_volume_std', 'eth_volume_std',\n 'btc_volume_std', 'lagged_others']\n actual_x_cols = X.columns.tolist()\n msg = 'Number of X columns different than expected.'\n self.assertEqual(len(actual_x_cols), len(expected_x_cols), msg)\n\n for col in expected_x_cols:\n msg = 'Expected column not found: {}'.format(col)\n self.assertTrue(col in actual_x_cols, msg)", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def is_FriCASElement(x):\n return isinstance(x, FriCASElement)", "def _validate_raster_attributes(self, x):\r\n if self.extent != x.extent:\r\n raise ValueError(\"Extents do not match.\")\r\n if self.resolution != x.resolution:\r\n raise ValueError(\"Resolutions do not match.\")\r\n if not np.array_equal(self.x, x.x):\r\n raise ValueError(\"x attributes do not match.\")\r\n if not np.array_equal(self.y, x.y):\r\n raise ValueError(\"y attributes do not match.\")\r\n if len(self.layers) != len(x.layers):\r\n raise ValueError(\"layers lengths do not match.\")\r\n if self.crs != x.crs:\r\n raise ValueError(\"crs attributes do not match.\")", "def test_valid_function_cast(self):\n\t\tsource = \"\"\"\n\t\t\tpragma solidity ^0.4.22;\n\t\t\tcontract testContract {\n\t\t\t\tevent TestEvent(uint t);\n\t\t\t\tfunction testFunction () public returns (string) {\n\t\t\t\t\tuint foo = 5;\n\t\t\t\t\tuint bar = uint8(foo);\n\t\t\t\t\trequire(foo > 5);\n\t\t\t\t\treturn 'helloWorld';\n\t\t\t\t}\n\t\t\t}\n\t\t\"\"\"\n\t\tevents, statements = self.before_test(source)\n\t\tself.assertTrue(len(statements) > 0)\n\t\tstatement_under_test = filter_statements(events, statements)\n\t\tself.assertTrue(len(statement_under_test) == 3)", "def CPX(self, value):\n self._compare(value, 'X')", "def check_all(self, package=None, _true=lambda *a: True):\n if self.element_constraint:\n check1 = self.element_constraint.apply_to\n else:\n check1 = _true\n\n my_view = {}\n if self.content_mimetype is not None:\n my_view[\"mimetype\"] = self.content_mimetype\n if self.content_model is not None:\n my_view[\"model\"] = self.content_model\n if my_view:\n check2 = lambda e: apply_to(my_view, e)\n else:\n check2 = _true\n\n r = True\n for e in self.iter_elements(package):\n r = r & check1(e) & check2(e)\n return r", "def supports(self, x):\n return True", "def test_check_x(self):\n r1 = Rectangle(10, 2)\n self.assertEqual(r1.x, 0)\n\n r2 = Rectangle(2, 10, 6)\n self.assertEqual(r2.x, 6)\n\n r3 = Rectangle(5, 2, 3, 9, 12)\n self.assertEqual(r3.x, 3)\n\n r4 = Rectangle(5, 2, 0, 3, 12)\n self.assertEqual(r4.x, 0)", "def prepare_input_data(self, X):\n X = np.asarray(X)\n if X.dtype != \"f\" and X.dtype != \"d\":\n X = X.astype(float)\n\n self._check_input(X)\n missing_mask = np.isnan(X)\n self._check_missing_value_mask(missing_mask)\n return X, missing_mask", "def ispoint(x):\n if isvect(x) and x[3] > 0.0:\n return True\n return False", "def check_nodes(self) -> bool:\n # check the input-output consistency\n for op_name in self.__ops:\n op = cast(Operator, self.__ops[op_name])\n inputs: Dict[str, Operator] = op.input_ops\n for i in inputs.values():\n if op not in i.output_op_list:\n return False\n\n return True", "def check(self, input, ast):\n assert False # Must be redefined", "def isavalidinput(self , x , u):\n ans = False\n for i in range(self.m):\n ans = ans or ( u[i] < self.u_lb[i] )\n ans = ans or ( u[i] > self.u_ub[i] )\n \n return not(ans)", "def is_in_interval(self, x):\r\n \r\n bool_intval = False\r\n\r\n if np.logical_and(x > self.intval[0], x < self.intval[1]):\r\n bool_intval = True\r\n \r\n return bool_intval", "def _isproperdist(X):\n X = np.asarray(X)\n if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):\n return False\n else:\n return True", "def evaluate_filter(self, x):\n raise NotImplementedError", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def __check_is_xarray(self, data):\n if type(data) is xr.core.dataarray.DataArray or \\\n type(data) is xr.core.dataarray.Dataset:\n\n return True\n else:\n msg = \"Variable {data} is not an xarray DataArray/Dataset\"\n raise Exception(msg)", "def check_x_won(self, opp_sign = -1, empty_sign_num = 0): \n # Checks if X won horizontally\n if opp_sign not in self.board[0:3] and empty_sign_num not in self.board[0:3]:\n self.change_button_img_to_x(123)\n return True\n elif opp_sign not in self.board[3:6] and empty_sign_num not in self.board[3:6]:\n self.change_button_img_to_x(456)\n return True\n elif opp_sign not in self.board[6:9] and empty_sign_num not in self.board[6:9]:\n self.change_button_img_to_x(789)\n return True\n\n # Checks if X won vertically\n elif opp_sign not in self.board[0:7:3] and empty_sign_num not in self.board[0:7:3]:\n self.change_button_img_to_x(147)\n return True\n elif opp_sign not in self.board[1:8:3] and empty_sign_num not in self.board[1:8:3]:\n self.change_button_img_to_x(258)\n return True\n elif opp_sign not in self.board[2:9:3] and empty_sign_num not in self.board[2:9:3]:\n self.change_button_img_to_x(369)\n return True\n\n # Checks if X won diagonally\n elif opp_sign not in self.board[0:9:4] and empty_sign_num not in self.board[0:9:4]:\n self.change_button_img_to_x(159)\n return True\n elif opp_sign not in self.board[2:7:2] and empty_sign_num not in self.board[2:7:2]:\n self.change_button_img_to_x(357)\n return True\n\n # if X didn't win yet, return None \n else:\n return None", "def evaluate(self, X):\n\n\t\tpass", "def all(x) -> bool:\n pass", "def _can_handle_query(cls, *query):\n chkattr = [\"Time\", \"Instrument\", \"SatelliteNumber\"]\n chklist = [x.__class__.__name__ in chkattr for x in query]\n for x in query:\n if x.__class__.__name__ == \"Instrument\" and x.value.lower() in (\n \"xrs\",\n \"goes\",\n ):\n return all(chklist)\n return False", "def process_deductions_check(self, R_c_x, R_c_x_inv):\n p = self.p\n while len(self.deduction_stack) > 0:\n alpha, x = self.deduction_stack.pop()\n for w in R_c_x:\n if not self.scan_check(alpha, w):\n return False\n beta = self.table[alpha][self.A_dict[x]]\n if beta is not None:\n for w in R_c_x_inv:\n if not self.scan_check(beta, w):\n return False\n return True", "def checkSStx(tx):\n # Check to make sure there aren't too many inputs.\n # CheckTransactionSanity already makes sure that number of inputs is\n # greater than 0, so no need to check that.\n if len(tx.txIn) > MaxInputsPerSStx:\n raise DecredError(\"SStx has too many inputs\")\n\n # Check to make sure there aren't too many outputs.\n if len(tx.txOut) > MaxOutputsPerSStx:\n raise DecredError(\"SStx has too many outputs\")\n\n # Check to make sure there are some outputs.\n if len(tx.txOut) == 0:\n raise DecredError(\"SStx has no outputs\")\n\n # Check to make sure that all output scripts are the consensus version.\n for idx, txOut in enumerate(tx.txOut):\n if txOut.version != consensusVersion:\n raise DecredError(\"invalid script version found in txOut idx %d\" % idx)\n\n # Ensure that the first output is tagged OP_SSTX.\n if getScriptClass(tx.txOut[0].version, tx.txOut[0].pkScript) != StakeSubmissionTy:\n raise DecredError(\n \"First SStx output should have been OP_SSTX tagged, but it was not\"\n )\n\n # Ensure that the number of outputs is equal to the number of inputs\n # + 1.\n if (len(tx.txIn) * 2 + 1) != len(tx.txOut):\n raise DecredError(\n \"The number of inputs in the SStx tx was not the number of outputs/2 - 1\"\n )\n\n # Ensure that the rest of the odd outputs are 28-byte OP_RETURN pushes that\n # contain putative pubkeyhashes, and that the rest of the odd outputs are\n # OP_SSTXCHANGE tagged.\n for outTxIndex in range(1, len(tx.txOut)):\n scrVersion = tx.txOut[outTxIndex].version\n rawScript = tx.txOut[outTxIndex].pkScript\n\n # Check change outputs.\n if outTxIndex % 2 == 0:\n if getScriptClass(scrVersion, rawScript) != StakeSubChangeTy:\n raise DecredError(\n \"SStx output at output index %d was not an sstx change output\",\n outTxIndex,\n )\n continue\n\n # Else (odd) check commitment outputs. The script should be a\n # NullDataTy output.\n if getScriptClass(scrVersion, rawScript) != NullDataTy:\n raise DecredError(\n \"SStx output at output index %d was not a NullData (OP_RETURN) push\",\n outTxIndex,\n )\n\n # The length of the output script should be between 32 and 77 bytes long.\n if len(rawScript) < SStxPKHMinOutSize or len(rawScript) > SStxPKHMaxOutSize:\n raise DecredError(\n \"SStx output at output index %d was a NullData (OP_RETURN) push\"\n \" of the wrong size\",\n outTxIndex,\n )", "def _can_cast(from_dtype, to_dtype):\n if cudf.utils.utils.is_na_like(from_dtype):\n return True\n if isinstance(from_dtype, type):\n from_dtype = cudf.dtype(from_dtype)\n if isinstance(to_dtype, type):\n to_dtype = cudf.dtype(to_dtype)\n\n # TODO : Add precision & scale checking for\n # decimal types in future\n\n if isinstance(from_dtype, cudf.core.dtypes.DecimalDtype):\n if isinstance(to_dtype, cudf.core.dtypes.DecimalDtype):\n return True\n elif isinstance(to_dtype, np.dtype):\n if to_dtype.kind in {\"i\", \"f\", \"u\", \"U\", \"O\"}:\n return True\n else:\n return False\n elif isinstance(from_dtype, np.dtype):\n if isinstance(to_dtype, np.dtype):\n return np.can_cast(from_dtype, to_dtype)\n elif isinstance(to_dtype, cudf.core.dtypes.DecimalDtype):\n if from_dtype.kind in {\"i\", \"f\", \"u\", \"U\", \"O\"}:\n return True\n else:\n return False\n elif isinstance(to_dtype, cudf.core.types.CategoricalDtype):\n return True\n else:\n return False\n elif isinstance(from_dtype, cudf.core.dtypes.ListDtype):\n # TODO: Add level based checks too once casting of\n # list columns is supported\n if isinstance(to_dtype, cudf.core.dtypes.ListDtype):\n return np.can_cast(from_dtype.leaf_type, to_dtype.leaf_type)\n else:\n return False\n elif isinstance(from_dtype, cudf.core.dtypes.CategoricalDtype):\n if isinstance(to_dtype, cudf.core.dtypes.CategoricalDtype):\n return True\n elif isinstance(to_dtype, np.dtype):\n return np.can_cast(from_dtype._categories.dtype, to_dtype)\n else:\n return False\n else:\n return np.can_cast(from_dtype, to_dtype)", "def condition_forward_checking(csp, var) :\n return False", "def condition_forward_checking(csp, var) :\n return False", "def _checkData(data: Sequence[HistoryElement]):\r\n if not all(x.timeStamp for x in data):\r\n raise ValueError(\"At least one element in data doesn't have a TimeStamp\")", "def check_valid(x2,x1):\r\n if x2 < x1 or x2==x1:\r\n return True", "def check_predicate(self, predicate, x):\n if isinstance(predicate, dtype.TypeMeta):\n return issubclass(x, predicate)\n elif isinstance(predicate, type):\n return isinstance(x, predicate)\n elif callable(predicate):\n return predicate(self, x)\n else:\n raise ValueError(predicate) # pragma: no cover", "def check(self, X):\n if (np.min(X) < -90.) or (np.max(X) > 90.):\n print \"Warning: X may be defined in degrees instead of radians\"", "def validate_oee_error_14(self):\n sql = \"\"\"\n SELECT * FROM bdeview \n WHERE CAST(f11 AS INTEGER)<1000000 OR CAST(f11 AS INTEGER)>999000000\n \"\"\"\n lines = self.c.execute(sql).fetchall()\n return lines==[], lines", "def _can_cast_to(self, value, cast_type):\n try:\n _ = cast_type(value)\n return True\n except ValueError:\n return False", "def check_bounds(x, param_name):\n for i in range(len(x)):\n if ((xmin[param_name][i] is not None and x[i] < xmin[param_name][i]) or\n (xmax[param_name][i] is not None and x[i] > xmax[param_name][i])):\n return False\n return True", "def isx(tree, x, accept_attr=True):\n # WTF, so sometimes there **is** a Captured node, while sometimes there isn't (letdoutil.islet)? At which point are these removed?\n # Captured nodes only come from unpythonic.syntax, and we use from-imports\n # and bare names for anything hq[]'d; but any references that appear\n # explicitly in the user code may use either bare names or somemodule.f.\n ismatch = x if callable(x) else lambda s: s == x\n return ((type(tree) is Name and ismatch(tree.id)) or\n (type(tree) is Captured and ismatch(tree.name)) or\n (accept_attr and type(tree) is Attribute and ismatch(tree.attr)))", "def __eq__(self, x):\n assert isinstance(x, AxisDistance), 'incorrect type of arg x: should be type AxisDistance, is type {}'.format(type(x))\n return self.__cmp__(x) == 0", "def _check_domain_additional(cls, domain: D) -> bool:\n action_space = domain.get_action_space().unwrapped()\n observation_space = domain.get_observation_space().unwrapped()\n\n if not isinstance(action_space, Iterable) and not isinstance(action_space, gym.spaces.Tuple):\n action_space = [action_space]\n if not isinstance(observation_space, Iterable) and not isinstance(observation_space, gym.spaces.Tuple):\n observation_space = [observation_space]\n\n flat_action_space = list(flatten(action_space))\n flat_observation_space = list(flatten(observation_space))\n\n print(flat_action_space)\n print(flat_observation_space)\n\n valide_action_space = True\n for x in flat_action_space:\n valide_action_space = isinstance(x,(gym.spaces.Tuple, gym.spaces.Discrete, gym.spaces.Box))\n \n validate_observation_space = True\n for x in flat_observation_space:\n validate_observation_space = isinstance(x,(gym.spaces.Tuple, gym.spaces.Discrete, gym.spaces.Box))\n \n return valide_action_space and validate_observation_space", "def check_to_clause(shot, k, dic):\n if ('_to' in k and isinstance(shot,(list, tuple, ndarray)) \n and dic[k][1] == 0):\n print('******** Warning - valid shot of 0 in to clause?')", "def process_generic(x, lb, ub):\n x = x.abs()\n if x.dtype == 'float64':\n #print('float')\n x.loc[x.apply(lambda x: not x.is_integer())] = np.nan\n x.loc[(x <= lb ) | (x > ub)] = np.nan\n\n return x", "def check_precondition(self, code_of_source: str) -> bool:\n sections = metamorph.get_sections(code_of_source)\n source_code_circuit = sections[\"CIRCUIT\"]\n instructions = metamorph.get_instructions(source_code_circuit)\n self.instruction_x_gate = [\n i for i in instructions if i[\"gate\"] == \"XGate\"]\n self.tot_n_x_gates = len(self.instruction_x_gate)\n return self.tot_n_x_gates > 0", "def check():", "def __validateInput(self, fx: str, ux:str, lx:str) -> bool:\r\n # validate the input fields\r\n if fx == \"\" or ux == \"\" or lx == \"\":\r\n self.errorMessage = self.errorMessageMissingFields\r\n self.__showErrorMessage()\r\n return False\r\n\r\n # validate the limits\r\n self.lowerX = lx\r\n self.upperX = ux\r\n # check if numeric\r\n try:\r\n self.upperX = float(self.upperX)\r\n self.lowerX = float(self.lowerX)\r\n except:\r\n self.errorMessage = self.errorMessageLimitsNotNumeric\r\n self.__showErrorMessage()\r\n return False\r\n \r\n # check for inquality\r\n if self.lowerX > self.upperX:\r\n self.errorMessage = self.errorMessageLimitsNotOrdered\r\n self.upperXField.setText(str(self.lowerX))\r\n self.lowerXField.setText(str(self.upperX))\r\n self.lowerX, self.upperX = self.upperX, self.lowerX\r\n ##################################\r\n # validate and process the input function\r\n self.inputFunction = fx\r\n try:\r\n self.inputFunction = self.inputFunction.replace(\" \", \"\").replace(\"^\", \"**\").replace(\"sqrt\", \"np.sqrt\")\r\n self.inputFunction = self.inputFunction.replace(\"e**\", \"np.exp\").replace(\"log\", \"np.log\") \r\n self.inputFunction = self.inputFunction.replace(\"sin\", \"np.sin\").replace(\"cos\", \"np.cos\").replace(\"tan\", \"np.tan\")\r\n\r\n except:\r\n self.errorMessage = self.errorMessageNonValidFunction\r\n self.__showErrorMessage()\r\n return True", "def filter_variant(self, x):\n return True", "def tuples_2_bool(tuples, x):\n if np.ndim(tuples) == 1:\n tuples = [tuples]\n\n out = np.zeros(x.size, dtype=bool)\n for l, u in tuples:\n out[(x > l) & (x < u)] = True\n return out", "def check(self, input, node):\n assert False # Must be redefined", "def check_all(c):", "def _check_array(self, X):\n x = np.copy(X)\n if np.isfortran(x) is False:\n # print (\"Array must be in Fortran-order. Converting now.\")\n x = np.asfortranarray(x)\n if self.sampling > x.shape:\n raise ValueError(\"'sampling' is greater than the dimensions of X\")\n return x", "def an_check(self):\n\t\tfor filles in self.xelt:\n\t\t\t# parcours rapide des branches niveau 1\n\t\t\tif search(r'analytic$', filles.tag):\n\t\t\t\treturn True\n\t\treturn False", "def _assert_valid_value_and_cast(self, value):\n raise NotImplementedError()", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def test_badxvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, False, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')", "def check_for_x_over_absX(numerators, denominators):\r\n # TODO: this function should dig/search through dimshuffles\r\n # This won't catch a dimshuffled absolute value\r\n for den in list(denominators):\r\n if (den.owner and den.owner.op == T.abs_\r\n and den.owner.inputs[0] in numerators):\r\n if den.owner.inputs[0].type.dtype.startswith('complex'):\r\n #TODO: Make an Op that projects a complex number to\r\n # have unit length but projects 0 to 0. That\r\n # would be a weird Op, but consistent with the\r\n # special case below. I heard there's some\r\n # convention in Matlab that is similar to\r\n # this... but not sure.\r\n pass\r\n else:\r\n denominators.remove(den)\r\n numerators.remove(den.owner.inputs[0])\r\n numerators.append(T.sgn(den.owner.inputs[0]))\r\n return numerators, denominators", "def _validate_X(X):\n return X if not isinstance(X, pd.DataFrame) else X.as_matrix()" ]
[ "0.6669929", "0.65580815", "0.65137964", "0.6363444", "0.6190184", "0.5916384", "0.5903115", "0.58242035", "0.5805368", "0.5690557", "0.5617207", "0.5617207", "0.5495422", "0.54181933", "0.541543", "0.53723425", "0.52777237", "0.52643865", "0.52449125", "0.52307457", "0.51835376", "0.5146674", "0.5128955", "0.51271445", "0.50803035", "0.50787634", "0.50783247", "0.50665796", "0.50445026", "0.50445026", "0.50297296", "0.50263286", "0.5013843", "0.5007927", "0.5007811", "0.50059694", "0.4996283", "0.49938112", "0.4987207", "0.4986369", "0.4984517", "0.49833086", "0.49719766", "0.4952199", "0.49494448", "0.49361962", "0.49315143", "0.4931303", "0.49168086", "0.49160388", "0.49089396", "0.4902823", "0.49011034", "0.48976934", "0.4878868", "0.48770636", "0.48734796", "0.48724788", "0.48616016", "0.4861283", "0.48577055", "0.48540327", "0.48446247", "0.4841725", "0.48399645", "0.48358592", "0.48330334", "0.48310563", "0.48289046", "0.48115405", "0.48101315", "0.480861", "0.48030028", "0.48030028", "0.48026678", "0.4800941", "0.47999325", "0.4799604", "0.4794032", "0.47939196", "0.47683677", "0.47662675", "0.47567445", "0.47447348", "0.4738077", "0.4737864", "0.47267485", "0.47263792", "0.47125593", "0.4712185", "0.47105524", "0.4703862", "0.46960664", "0.46941906", "0.46936592", "0.46919456", "0.468106", "0.4676695", "0.46684408", "0.46638548" ]
0.59676224
5
Test for t(tau,x) = tau(1+asin(bx))
def flattenTest(): x = rampfloat(0,0,1,n1,n2) t = rampfloat(0,1,0,n1,n2) smax = 5.0 a = smax/(n1-1) b = 2*PI/(n2-1) bx = mul(b,x) bt = mul(b,t) cosbx = cos(bx) sinbx = sin(bx) acosbx = mul(a,cosbx) asinbx = mul(a,sinbx) p2 = div(mul(bt,acosbx),add(1,asinbx)) el = fillfloat(1,n1,n2) fl = FlattenerCg(8.0,0.01) sf = fl.findShifts(p2,el) # found shifts se = neg(mul(t,asinbx)) # exact shifts plot(sf,jet,-smax,smax) plot(se,jet,-smax,smax)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def akendalltau(x,y):\r\n n1 = 0\r\n n2 = 0\r\n iss = 0\r\n for j in range(len(x)-1):\r\n for k in range(j,len(y)):\r\n a1 = x[j] - x[k]\r\n a2 = y[j] - y[k]\r\n aa = a1 * a2\r\n if (aa): # neither array has a tie\r\n n1 = n1 + 1\r\n n2 = n2 + 1\r\n if aa > 0:\r\n iss = iss + 1\r\n else:\r\n iss = iss -1\r\n else:\r\n if (a1):\r\n n1 = n1 + 1\r\n else:\r\n n2 = n2 + 1\r\n tau = iss / math.sqrt(n1*n2)\r\n svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))\r\n z = tau / math.sqrt(svar)\r\n prob = erfcc(abs(z)/1.4142136)\r\n return tau, prob", "def test_tau_score(sample_weight):\n np.testing.assert_almost_equal(\n tau_score(Y_true, Y_pred, sample_weight),\n _tau_score(Y_true, Y_pred, sample_weight))", "def Tc (x,infin, a, nu):\r\n return infin + a* (x ** (-1/nu))", "def phi_tau(u, lambda_, b, m, a, t, A0=0.5):\n constant = np.power(lambda_ * b / (lambda_ * b - i_ * u), a * t)\n\n second = i_ * (m * t + (A0 - m) * (1 - np.exp(-lambda_ * t)) / lambda_) * u\n\n third = a / lambda_ * (Li2(1 - i_ * u / (i_ * u - lambda_ * b)) -\n Li2(1 - i_ * np.exp(-lambda_ * t) * u / (i_ * u - lambda_ * b)))\n\n return constant * np.exp(second + third)", "def estimate_tau(t, y):\n dt = np.min(np.diff(t))\n tt = np.arange(t.min(), t.max(), dt)\n yy = np.interp(tt, t, y, 1)\n f = acor_fn(yy)\n fs = gaussian_filter(f, 50)\n w = dt * np.arange(len(f))\n m = np.arange(1, len(fs)-1)[(fs[1:-1] > fs[2:]) & (fs[1:-1] > fs[:-2])]\n if len(m):\n return w[m[np.argmax(fs[m])]]\n return w[-1]", "def estimate_tau(t, y):\n dt = np.min(np.diff(t))\n tt = np.arange(t.min(), t.max(), dt)\n yy = np.interp(tt, t, y, 1)\n f = acor_fn(yy)\n fs = gaussian_filter(f, 50)\n w = dt * np.arange(len(f))\n m = np.arange(1, len(fs)-1)[(fs[1:-1] > fs[2:]) & (fs[1:-1] > fs[:-2])]\n if len(m):\n return w[m[np.argmax(fs[m])]]\n return w[-1]", "def calc_trig(self, tau):\n if self.A[self.k,self.p] != 0.0:\n if tau > 0:\n t = -tau + np.sqrt(tau**2 + 1.0)\n else:\n t = -tau - np.sqrt(tau**2 + 1.0)\n \n c = 1.0/(1.0 + t**2)\n s = t*c\n else:\n c = 1.0\n s = 0.0\n return c, s", "def isqrt( a, b ):\n return a*a - b", "def tau(self,x,n=50):\n sigma = np.zeros(n,dtype=np.int8)\n for k in range(n):\n if x<=self.rho:\n sigma[k] = 0\n x = self.f0(x)\n else:\n sigma[k] = 1\n x = self.f1(x)\n return sigma", "def rhs(x, t):\n\n return - np.sin(np.pi * x) * (np.sin(t) - 1 * np.pi ** 2 * np.cos(t))", "def is_tn(self, y, t):\n return t != 0 and y == 0", "def rt_isothermal(lam, T, I_0, tau):\n B_lam = planck_w(lam, T)\n return I_0 * np.exp(-tau) + B_lam * (1 - np.exp(-tau))", "def u_exact(t):\n return a * t + b", "def lkendalltau(x,y):\r\n n1 = 0\r\n n2 = 0\r\n iss = 0\r\n for j in range(len(x)-1):\r\n for k in range(j,len(y)):\r\n a1 = x[j] - x[k]\r\n a2 = y[j] - y[k]\r\n aa = a1 * a2\r\n if (aa): # neither list has a tie\r\n n1 = n1 + 1\r\n n2 = n2 + 1\r\n if aa > 0:\r\n iss = iss + 1\r\n else:\r\n iss = iss -1\r\n else:\r\n if (a1):\r\n n1 = n1 + 1\r\n else:\r\n n2 = n2 + 1\r\n tau = iss / math.sqrt(n1*n2)\r\n svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))\r\n z = tau / math.sqrt(svar)\r\n prob = erfcc(abs(z)/1.4142136)\r\n return tau, prob", "def boltzman(x, xmid, tau):\n return 1. / (1. + nx.exp(-(x-xmid)/tau))", "def boltzman(x, xmid, tau):\n return 1. / (1. + nx.exp(-(x-xmid)/tau))", "def _ig_tsz(self, x, b):\n return self.P(x*self.r500) * (x / np.sqrt(x**2. - b**2.))", "def exactsolution(x, t, u):\n if 0 <= (x - u*t) and (x - u*t) <= 0.2:\n temp = 1 - (10 * (x - u*t) -1)**2\n else:\n temp = 0\n return temp", "def ar1_t_like(t, y, tau, sigma=1.0):\n phi = np.exp(-np.abs(np.diff(t)) / tau)\n sigma_e = np.sqrt(sigma ** 2 * (1 - phi ** 2))\n yim1 = y[:-1]\n yi = y[1:]\n lnp = normal_like(yi, phi * yim1, sigma=sigma_e)\n lnp += normal_like(y[0], mu=0.0, sigma=sigma)\n return lnp", "def vi1(t):\n u_t = 1*(t>0)\n return (np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t)) * u_t", "def t(l3,Ei,Et,Et_axis):\n Ef=Ei-Et\n T=(-(l3/vFrmE(Ef))+(l3/np.sqrt(vFrmE(Ei)**2-vsq_from_E(Et_axis))))*1e6\n return (T)", "def tau_plus_minus(self):\n ct = self.cartan_type()\n L,R = ct.index_set_bipartition()\n return self.tau_epsilon_operator_on_almost_positive_roots(L), self.tau_epsilon_operator_on_almost_positive_roots(R)", "def fitzhugh_nagumo(x, t, a, b, tau, I):\n return np.array([x[0] - x[0]**3 - x[1] + I, \n (x[0] - a - b * x[1])/tau])", "def asin(x):\n return 0.0", "def ThetaFunc(self, x):\n return 0.5 * (np.sign(x) + 1)", "def tau_calculation(self, det, gt):\n return np.round((self.area_of_intersection(det, gt) / self.area(det)), 2)", "def safe_asin(x):\n safe_x = torch.where(x < 1, x, torch.zeros_like(x))\n return torch.where(x < 1, torch.asin(safe_x), (math.pi/2) * torch.ones_like(x))", "def state_eq(t, x, u, tu):\n # u = u[0, :]\n u_interp = np.interp(t, tu, u[0, :])\n # print(f'u: {u}')\n return np.vstack((x[1], -x[1] + u_interp))", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def is_tr(self, y, t):\n return t != 0 and y != 0", "def curly_F_tau(Teff, tau):\n\n return 2*np.pi*(trapezoidal(lambda t: integrated_planck(Teff*(0.5+ 3/4*t)**(1/4))*sc.expn(2, t-tau), tau, 20, 5000)-trapezoidal(lambda t: integrated_planck(Teff*(0.5+ 3/4*t)**(1/4))*sc.expn(2, tau-t), 0, tau, 5000))", "def tau_V(self, value):\n if not (self.tau_V_range[0] <= value <= self.tau_V_range[1]):\n raise InputParameterError(\n \"parameter tau_V must be between \"\n + str(self.tau_V_range[0])\n + \" and \"\n + str(self.tau_V_range[1])\n )", "def test_tau(self):\n tau_values = [5.0, 15.0, 25.0]\n \n tmax = 50.0\n dt = 0.1\n N = 3\n\n ini_rate = 80.0\n\n nsteps = int_r(tmax/dt)\n\n # reproducible arbitrariness\n np.random.seed(34342)\n\n tutor_out_trace = ini_rate + 20.0*np.random.randn(nsteps, N)\n # have some correlation between reward trace and tutor.out trace\n rho = 0.2\n reward_trace = (rho*(tutor_out_trace[:, 0] - ini_rate)/20.0 +\n (1-rho)*np.random.randn(nsteps))\n \n scaling = None\n\n for crt_tau in tau_values:\n tutor = SimpleNeurons(N, out_fct=lambda i: tutor_out_trace[i])\n reward = MockReward(lambda t: reward_trace[int_r(t/dt)])\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=crt_tau,\n constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,\n use_tutor_baseline=False)\n\n sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim.run(tmax)\n\n drates = tutor_rule.rates - ini_rate\n\n # this should be a convolution of tutor_out_trace*reward_trace with an\n # exponential with time constant crt_tau\n # that means that tau*(d/dt)drates + drates must be proportional to it\n expected_rhs = (tutor_out_trace - ini_rate)*np.reshape(reward_trace,\n (-1, 1))\n\n lhs = np.vstack((float(crt_tau)*np.reshape(drates[0, :], (1, -1))/dt,\n (crt_tau/dt)*np.diff(drates, axis=0) + drates[:-1, :]))\n \n # allow scaling to be arbitrary, but *independent of tau*\n if scaling is None:\n mask = (expected_rhs != 0)\n scaling = np.mean(lhs[mask]/expected_rhs[mask])\n\n # scaling shouldn't be negative or zero!\n self.assertGreater(scaling, 1e-9)\n\n mag = np.mean(np.abs(expected_rhs))\n\n self.assertLess(np.max(np.abs(lhs - scaling*expected_rhs)), 1e-6*mag)", "def _tand(v):\n return math.tan(math.radians(v))", "def get_tau_gap(vals,tau): \n ans = 0.0\n m = 0.0\n N = len(vals)\n for i in range(N):\n ai = 2.0*vals[i]-1\n if ai > 0:\n m += 1\n tmp = 0.0\n if tau >= 0: # votes close to 50% are weighed more (\"traditional\")\n if ai >= 0: \n tmp = pow(ai,tau+1)\n else:\n tmp = -pow(-ai,tau+1)\n else: # votes close to 50% are weighed less \n if ai >= 0:\n tmp = pow((1-ai),-tau+1)\n else:\n tmp = -pow((1+ai),-tau+1)\n ans += tmp\n \n if tau >= 0:\n return 2.0*(ans/N + 0.5 - m/N)\n else:\n return -2.0*(ans/N + 0.5 - m/N)", "def __etaBin(self,eta):\n if len(self._etabins)>0:\n return reduce(lambda x,y:x+y,map(lambda x:abs(eta)>x,self._etabins))\n else:\n return 0", "def est_am(t,y):\n ch=t>t_eq\n y1=y[ch]\n return y1[-1]-y1[0]", "def exp(t,tau):\n return np.exp(-t/tau)", "def t_test_(x):\n assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))\n\n if (len(x) <= 1) or (not np.all(np.isfinite(x))):\n return 1.0 # Can't say anything about scale => p=1\n\n _, pval = sst.ttest_1samp(x, 0.0)\n if np.isnan(pval):\n # Should only be possible if scale underflowed to zero:\n assert np.var(x, ddof=1) <= 1e-100\n # It is debatable if the condition should be ``np.mean(x) == 0.0`` or\n # ``np.all(x == 0.0)``. Should not matter in practice.\n pval = np.float(np.mean(x) == 0.0)\n assert 0.0 <= pval and pval <= 1.0\n return pval", "def basis_function(t, i, knots):\n t_this = knots[i]\n t_next = knots[i+1]\n out = 1. if (t>=t_this and t<t_next) else 0.\n return out", "def get_kendell_tau(ys):\n\n from scipy.stats import kendalltau\n\n # calculate Kendall tau\n tau, p = kendalltau(range(len(ys)), ys)\n\n return tau, p", "def _get_tb(I, nu, beam):\n from astropy import units as u\n return (1222.0*I/(nu**2*(beam.minor/1.0).to(u.arcsecond)*(beam.major/1.0).to(u.arcsecond))).value", "def phi(t, *args):\n # Unpacking data\n mu_1, pi_mu_2, distance, affine_transfo = args\n A, b = get_Ab(t)\n N = len(mu_1)\n assert len(mu_1) == len(pi_mu_2)\n # Computing value of objective function\n r = 0.\n for i in np.arange(N):\n r += distance(affine_transfo(A, b, mu_1[i]), pi_mu_2[i]) ** 2\n return r", "def test_timescale_beta(self):\n param_pairs = [(0, 1, self.rule.tau2)]\n\n nsteps = 10\n self.conductor.out_fct = lambda i: 10*np.ones(self.Nc) if i == 0 \\\n else np.zeros(self.Nc)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n W0 = np.copy(self.syns.W)\n\n for params in param_pairs:\n self.rule.alpha = params[0]\n self.rule.beta = params[1]\n tau = params[2]\n\n self.tutor.out_fct = lambda i: (self.rule.theta + (10 if i == 0 else 0))*\\\n np.ones(self.Ns)\n\n self.syns.W = np.copy(W0)\n sim.run(self.dt)\n\n change0 = self.syns.W - W0\n\n self.assertGreater(np.linalg.norm(change0), 1e-10)\n \n self.tutor.out_fct = lambda i: (self.rule.theta + (10\n if i == nsteps-1 else 0))*np.ones(self.Ns)\n\n self.syns.W = np.copy(W0)\n sim.run(nsteps*self.dt)\n\n change1 = self.syns.W - W0\n\n change1_exp = change0*(1 - float(self.dt)/tau)**(nsteps-1)\n\n self.assertTrue(np.allclose(change1, change1_exp),\n msg=\"Timescale not verified, alpha={}, beta={}.\".format(*params[:2]))", "def _eqns(self, x: np.ndarray, t):\n # Could in principle sanity check the args but speed is probably paramount\n\n return np.array([-(self._angular_freq ** 2) * np.sin(x[1]) - x[0], x[0]])", "def tau_plus(self,x,n=50):\n sigma = np.zeros(n,dtype=np.int8)\n for k in range(n):\n if x>=self.rho:\n sigma[k] = 1\n x = self.f1(x)\n else:\n sigma[k] = 0\n x = self.f0(x)\n return sigma", "def NacaEquation(self,x,t):\n a0 = 0.2969\n a1 = 0.1260\n a2 = 0.3516\n a3 = 0.2843\n a4 = 0.1036\n\n y_t = 5*t * (a0*x**0.5 - a1*x - a2*x**2 + a3*x**3 - a4*x**4)\n\n return y_t", "def ap2t(pval,df):\r\n pval = N.array(pval)\r\n signs = N.sign(pval)\r\n pval = abs(pval)\r\n t = N.ones(pval.shape,N.float_)*50\r\n step = N.ones(pval.shape,N.float_)*25\r\n print \"Initial ap2t() prob calc\"\r\n prob = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n print 'ap2t() iter: ',\r\n for i in range(10):\r\n print i,' ',\r\n t = N.where(pval<prob,t+step,t-step)\r\n prob = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n step = step/2\r\n print\r\n # since this is an ugly hack, we get ugly boundaries\r\n t = N.where(t>99.9,1000,t) # hit upper-boundary\r\n t = t+signs\r\n return t #, prob, pval\r", "def check_for_eta(self, *args, **kwargs):\r\n pass", "def quadratic_source(tau, a):\n\n return a[0] + a[1]*tau + a[2]*tau**2", "def tand(A):\n Arad = np.deg2rad(A)\n x = np.tan(Arad) \n return x", "def tau(n,mu,P):\n if (not isinstance(n, int)): # Python 3.\n #if (not isinstance(n, (int, long))): # Python 2.\n print(\"Error! n must be an integer: \", n)\n exit(1)\n tau = 0\n for jj in range(len(mu)):\n tau += np.math.exp(-mu[jj]) * mu[jj]**n * P[jj]\n tau = tau / np.math.factorial(n)\n return tau", "def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)", "def tnuc_region_in_exon(np, beg, end):\n\n if beg.tpos != 0: return False\n if end.tpos != 0: return False\n for i in range(beg.pos, end.pos-1):\n if abs(np[i] - np[i+1]) != 1:\n return False\n return True", "def test_atan2_special_case_pinf_and_ninf(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray(dpt.inf, dtype=dt)\n x2 = dpt.asarray([-dpt.inf], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(3 * dpt.pi / 4, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def rect_xi(t, T=1):\n return (t>=-T/2) & (t <= T/2), 0", "def __phi(x):\n return max([1 - abs(x), 0])", "def test_one_qubit_hard_thetas_all_basis(self, basis, simp_tol):\n for gate in HARD_THETA_ONEQS:\n self.check_one_qubit_euler_angles(\n Operator(gate), basis, simplify=simp_tol[0], tolerance=simp_tol[1]\n )", "def f(t, T=2*np.pi):\n if t==0:\n return(0)\n if 0<t<T/2:\n return(1)\n if -T/2<t<0:\n return (-1)", "def test_atan2_special_case_ninf_and_pinf(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray(-dpt.inf, dtype=dt)\n x2 = dpt.asarray([dpt.inf], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(-dpt.pi / 4, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def test_dynamics_with_tau_ref(self):\n n = 10\n t_max = 100.0\n dt = 0.1\n\n G = StudentLayer(n)\n\n i_values = np.linspace(0.02, 0.4, 28)\n\n different = 0\n for i_ext in i_values:\n # start with different initial voltages to take advantage of averaging\n # effects\n G.v_init = np.linspace(G.vR, G.v_th, n, endpoint=False)\n G.i_ext_init = i_ext\n\n M = simulation.EventMonitor(G)\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n rate = float(len(M.t))/n/t_max*1000.0\n # first source of uncertainty: a spike might not fit before the end of a\n # simulation\n uncertainty1 = 1.0/np.sqrt(n)/t_max*1000.0\n \n expected0 = 0.0\n expected = 0.0\n if G.R*i_ext > G.v_th - G.vR:\n expected0 = 1000.0/(G.tau_m*np.log(G.R*i_ext/(G.vR-G.v_th+G.R*i_ext)))\n expected = expected0/(1 + expected0*G.tau_ref/1000.0)\n\n # second source of uncertainty: spikes might move due to the granularity\n # of the simulation\n uncertainty2 = dt*expected*rate/1000.0\n uncertainty = uncertainty1 + uncertainty2\n\n self.assertLess(np.abs(rate - expected), uncertainty)\n\n if np.abs(expected - expected0) >= uncertainty:\n different += 1\n else:\n self.assertAlmostEqual(rate, 0.0)\n \n # make sure that in most cases the firing rate using the refractory period\n # was significantly different from the case without refractory period\n self.assertGreater(different, len(i_values)*2/3)", "def J_over_JUV_inside_slab(tau, tau_SF):\n # if not np.all(np.abs(tau) <= 0.5*tau_SF):\n # raise ValueError(\"tau must be smaller than or equal to tau_SF/2\")\n\n return 0.5/tau_SF*(2.0 - expn(2,0.5*tau_SF - tau) - expn(2,0.5*tau_SF + tau))", "def isAcute(trpl):\n vd = vectorFormat(trpl)\n if angle_between(*vd) < np.pi/2:\n return True\n else:\n return False", "def tand(x):\n if isinstance(x, numpy.ndarray):\n return numpy.tan(math.pi * x / 180.0)\n return math.cos(math.radians(x))", "def taucurveder(self, p, x):\n y = -(p[1] * numpy.exp((p[2] + x) / p[3]) / p[3] - p[4] * numpy.exp(-(p[5] + x) / p[6]) / p[6]) / (\n p[1] * numpy.exp((p[2] + x) / p[3]) +\n p[4] * numpy.exp(-(p[5] + x) / p[6])) ** 2.0\n # print 'dy: ', y\n return y", "def test_atan2_special_case_pinf_and_pinf(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray(dpt.inf, dtype=dt)\n x2 = dpt.asarray([dpt.inf], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(dpt.pi / 4, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def qrst_tm(x):\n return 0.2228*x - 0.6685", "def branin_with_params(x, a, b, c, r, s, t):\n x1 = x[0]\n x2 = x[1]\n neg_ret = float(a * (x2 - b*x1**2 + c*x1 - r)**2 + s*(1-t)*np.cos(x1) + s)\n return - neg_ret", "def test_atan2_special_case_pinf_and_finite(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray(dpt.inf, dtype=dt)\n x2 = dpt.asarray([-2, -0.0, 0.0, 2], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(dpt.pi / 2, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def t99(B, alpha):\n\n Q = 2*scc.pi*(1+alpha**2)/scc.physical_constants['electron gyromag. ratio'][0]\n return (((Q/B)**2)/(1-0.99**2))**0.5", "def test_atan2_special_case_nfinite_and_pinf(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray([-0.5, -1, -2, -5], dtype=dt)\n x2 = dpt.asarray(dpt.inf, dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(-0.0, dtype=dt)\n assert dpt.all(dpt.equal(actual, expected))\n assert dpt.all(dpt.signbit(actual))", "def test_atan2_special_case_positive_and_nzero(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray([0.5, 1, 2, dpt.inf], dtype=dt)\n x2 = dpt.asarray([-0.0], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(dpt.pi / 2, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def Optimize_tau(x,dt):\n\n xdot = central_diff(x,dt)\n var_xdot = np.var(xdot)\n tau_target = CorrelationTime(x,dt=dt)\n\n \n k = var_xdot\n beta0 = 0.1\n alpha = 1.0/k # beta/D ratio\n\n @jit\n def f1(x): # spring force\n return k*x\n \n MySys = Stochastic_Oscillator(f1,beta0,beta0 / alpha)\n R = 5000 # how many taus required for integration\n MySys.Match_Correlation(tau_target,np.array([1e-2,20]),alpha,T=R*tau_target,N=int(R*100))\n MySys.k=k\n\n return MySys", "def test_timescales(self):\n np.random.seed(2312321)\n param_pairs = [(1, 0, self.rule.tau1), (0, 1, self.rule.tau2)]\n\n nsteps = 10\n self.conductor.out_fct = lambda i: 10*np.ones(self.Nc) if i == 0 \\\n else np.zeros(self.Nc)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n W0 = np.copy(self.syns.W)\n\n for params in param_pairs:\n self.rule.alpha = params[0]\n self.rule.beta = params[1]\n tau = params[2]\n\n self.tutor.out_fct = lambda i: (self.rule.theta + (10 if i == 0 else 0))*\\\n np.ones(self.Ns)\n\n self.syns.W = np.copy(W0)\n sim.run(self.dt)\n\n change0 = self.syns.W - W0\n\n self.assertGreater(np.linalg.norm(change0), 1e-10)\n \n self.tutor.out_fct = lambda i: (self.rule.theta + (10\n if i == nsteps-1 else 0))*np.ones(self.Ns)\n\n self.syns.W = np.copy(W0)\n sim.run(nsteps*self.dt)\n\n change1 = self.syns.W - W0\n\n change1_exp = change0*(1 - float(self.dt)/tau)**(nsteps-1)\n\n self.assertTrue(np.allclose(change1, change1_exp),\n msg=\"Timescale not verified, alpha={}, beta={}.\".format(*params[:2]))", "def test_atan2_special_case_ninf_and_ninf(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray(-dpt.inf, dtype=dt)\n x2 = dpt.asarray([-dpt.inf], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(-3 * dpt.pi / 4, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def isInProlate(sample, alpha, beta): \n E = sample[0] * sample[0] / (alpha * alpha)\n E += (sample[1] * sample[1] + sample[2] * sample[2] ) / (beta * beta)\n if E > 1.0:\n return False\n else:\n return True", "def timeintegrand(z,omegalambda,omegam,omegak):\n\n return 1./((1+z)*adotovera(z,omegalambda,omegam,omegak))", "def rt_general(lam, T, I_0, tau):\n\n B_lam = planck_w(lam, T)\n tau_total = tau[-1]\n delta_tau = tau[1:] - tau[0:-1]\n integrate_radiation = B_lam[1:] * np.exp(- (tau_total - tau[:])) * delta_tau\n return I_0 * np.exp(-tau) + B_lam * (1 - np.exp(-tau))", "def tau(self, R, m, z):\n result = self.ne2d(R, m, z)\n \n # multiply by Thompson cross section (physical)\n sigma_T = 6.6524e-29 # Thomson cross section in m^2\n mpc = 3.08567758e16*1.e6 # 1Mpc in m\n sigma_T *= (self.U.bg.h/mpc)**2 # in (Mpc/h)^2\n \n result *= sigma_T # dimensionless\n return result", "def probit_phi(x):\n mu = 0;sd = 1;\n return 0.5 * (1 + tsr.erf((x - mu) / (sd * tsr.sqrt(2))))", "def init_tau(self, type: str = 'safest', weight: float = 1.5):\n\n P = self.toeplitz_op.P\n weighted_gram = 2 * self.linear_op.gram\n if self.beta is not None:\n beta = self.beta\n else:\n try:\n beta = eigs(weighted_gram, k=1, which='LM', return_eigenvectors=False, tol=self.eig_tol)\n beta *= (1 + self.eig_tol)\n except Exception('Eigs solver did not converge, trying again with small tolerance...'):\n beta = eigs(weighted_gram, k=1, which='LM', return_eigenvectors=False, tol=1e-3)\n beta *= (1 + 1e-3)\n ub = 1 / beta * (1 + 1 / np.sqrt(P + 1))\n lb = 1 / beta * (1 - 1 / np.sqrt(P + 1))\n if type == 'fastest':\n try:\n alpha = eigs(weighted_gram, k=1, which='SM', return_eigenvectors=False, tol=self.eig_tol)\n alpha *= (1 + self.eig_tol)\n except Exception('Eigs solver did not converge. Alpha is set to zero.'):\n alpha = 0\n tau_opt = 2 / (beta + alpha)\n if (tau_opt <= ub) & (tau_opt >= lb):\n self.tau = tau_opt\n else:\n min_lb = np.fmin(np.abs(1 - lb * alpha), np.abs(1 - lb * beta))\n min_ub = np.fmin(np.abs(1 - ub * alpha), np.abs(1 - ub * beta))\n if np.argmin([min_lb, min_ub]) == 0:\n self.tau = lb\n else:\n self.tau = ub\n elif type == 'safest':\n self.tau = 1 / beta\n elif type == 'largest':\n self.tau = ub\n else:\n self.tau = weight / beta", "def func_exact_sol(x, case):\n TB, TA, L, q, k = get_valdict(case, 'TB,TA,L,q,k')\n return ((TB-TA)/L + (q/(2*k)*(L-x)))*x + TA", "def f(self,un,tn):\n return -self.a(tn)*un + self.b(tn)", "def runge_integrator(self, t, y, dt, tau):\n\n k1 = self.plant.rhs(t, y, tau)\n k2 = self.plant.rhs(t + 0.5 * dt, y + 0.5 * dt * k1, tau)\n k3 = self.plant.rhs(t + 0.5 * dt, y + 0.5 * dt * k2, tau)\n k4 = self.plant.rhs(t + dt, y + dt * k3, tau)\n return (k1 + 2 * (k2 + k3) + k4) / 6.0", "def test_atan2_special_case_pfinite_and_pinf(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray([0.5, 1, 2, 5], dtype=dt)\n x2 = dpt.asarray(dpt.inf, dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(+0.0, dtype=dt)\n assert dpt.all(dpt.equal(actual, expected))\n assert not dpt.any(dpt.signbit(actual))", "def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def A_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.f_TB(l1, l2, phi) * self.F_TB(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n\n # if first time, initialize integrator\n if not hasattr(self.A_TB.__func__, \"integ\"):\n self.A_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.A_TB.integ(integrand, nitn=8, neval=1000)\n result = self.A_TB.integ(integrand, nitn=1, neval=5000)\n result = L**2 / result.mean\n if not np.isfinite(result):\n result = 0.\n return result", "def iou(bb_test, bb_gt):\n xx1 = np.maximum(bb_test[0], bb_gt[0])\n yy1 = np.maximum(bb_test[1], bb_gt[1])\n xx2 = np.minimum(bb_test[2], bb_gt[2])\n yy2 = np.minimum(bb_test[3], bb_gt[3])\n w = np.maximum(0., xx2 - xx1)\n h = np.maximum(0., yy2 - yy1)\n wh = w * h\n o = wh / ((bb_test[2]-bb_test[0])*(bb_test[3]-bb_test[1])\n + (bb_gt[2]-bb_gt[0])*(bb_gt[3]-bb_gt[1]) - wh)\n\n return o", "def test_atan(doctest):", "def test_atan2_special_case_nfinite_and_ninf(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray([-0.5, -1, -2, -5], dtype=dt)\n x2 = dpt.asarray(-dpt.inf, dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(-dpt.pi, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def test_inverse_b(self):\n for q in self.all:\n self.assertTrue(\n (q*q.inverse()).almost_equal(Quaternion(1, 0, 0, 0)))", "def phi_xt(u, alpha, theta, mu, sigma, lambda_, b, m, a, t, A0=0.5):\n first = mu * u\n\n second = (2 * alpha - 2 * i_ * theta * u + sigma**2 * u**2) / (2 * alpha)\n\n return phi_tau(first + i_ * alpha * np.log(second), lambda_, b, m, a, t, A0)", "def test_atan2_special_case_nzero_and_nzero(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray([-0.0], dtype=dt)\n x2 = dpt.asarray([-0.0], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(-dpt.pi, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def sample_ar1_t(t, tau, sigma=1.0, size=1):\n dt = np.diff(t)\n x = sigma * np.random.randn(len(t), size)\n for i in range(1, len(t)):\n a = np.exp(-dt[i - 1] / tau)\n s = np.sqrt((1 - a ** 2))\n x[i] = x[i] * s + x[i - 1] * a\n return x.T.squeeze()", "def test_tma(self):\n periods = 200\n tma = qufilab.tma(self.close, periods)\n tma_talib = talib.TRIMA(self.close, periods)\n np.testing.assert_allclose(tma, tma_talib, rtol = self.tolerance)", "def test_atan2_special_case_pzero_and_negatvie(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray(+0.0, dtype=dt)\n x2 = dpt.asarray([-0.5, -1, -2, -dpt.inf], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(dpt.pi, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))", "def find_subsidy_rate(self, tau):\n \n #a. find residual with lower and upper guesses\n tauv_0 = np.array([-self.tau_s_0, 0, tau]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(tauv_0) \n \n Kss_d = self.solve_stationary_equilibrium()[1]\n residual0 = Kss_d / self.Kss_b - 1\n \n tauv_1 = np.array([-self.tau_s_1, 0, tau]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(tauv_1) \n \n Kss_d = self.solve_stationary_equilibrium()[1]\n residual1 = Kss_d / self.Kss_b - 1\n \n assert residual0*residual1 < 0, 'find_subsidy_rate -- WARNING: No equilibrium tau exists'\n \n #b. bisection to find the subsidy rate that genereates the same ss capital as in benchmark case\n \n tau_s_0 = self.tau_s_0 \n tau_s_1 = self.tau_s_1\n \n for i_t in range(self.maxit):\n taus = (tau_s_0 + tau_s_1)/2\n tauv = np.array([-taus, 0, tau]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(tauv) \n \n Kss_d = self.solve_stationary_equilibrium()[1]\n residual = Kss_d / self.Kss_b - 1\n \n if np.abs(residual) < self.tol:\n \n taus_star = taus\n break\n \n else:\n if residual1 * residual>0 :\n tau_s_1 = taus\n else :\n tau_s_0 = taus\n \n assert i_t+1 < self.maxit, 'find_subsidy_rate -- taus has not converged'\n \n return taus_star", "def tnuc_region_in_intron(np, beg, end):\n\n if beg.tpos == 0 or end.tpos == 0: return False\n if beg.pos == end.pos and beg.tpos*end.tpos > 0:\n return True\n if beg.pos+1 == end.pos and beg.tpos>0 and end.tpos<0:\n return True\n if end.pos+1 == beg.pos and beg.tpos<0 and end.tpos>0:\n return True\n\n return False", "def test_dynamics_no_tau_ref(self):\n n = 50\n t_max = 100.0\n dt = 0.1\n\n G = StudentLayer(n)\n G.tau_ref = 0.0\n\n i_values = np.linspace(0.01, 0.4, 50)\n\n for i_ext in i_values:\n # start with different initial voltages to take advantage of averaging\n # effects\n G.v_init = np.linspace(G.vR, G.v_th, n, endpoint=False)\n G.i_ext_init = i_ext\n\n M = simulation.EventMonitor(G)\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n \n rate = float(len(M.t))/n/t_max*1000.0\n # first source of uncertainty: a spike might not fit before the end of a\n # simulation\n uncertainty1 = 1.0/np.sqrt(n)/t_max*1000.0\n \n expected = 0.0\n uncertainty = uncertainty1\n if G.R*i_ext > G.v_th - G.vR:\n expected = 1000.0/(G.tau_m*np.log(G.R*i_ext/(G.vR-G.v_th+G.R*i_ext)))\n # second source of uncertainty: spikes might move due to the granularity\n # of the simulation\n uncertainty2 = dt*expected*rate/1000.0\n uncertainty = uncertainty1 + uncertainty2\n uncertainty *= 1.5\n self.assertLess(np.abs(rate - expected), uncertainty)\n else:\n self.assertAlmostEqual(rate, 0.0)", "def rhs(y,t):\n return math.cos(t)", "def test_atan2_special_case_nzero_and_negative(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray([-0.0], dtype=dt)\n x2 = dpt.asarray([-dpt.inf, -2, -1, -0.5], dtype=dt)\n\n actual = dpt.atan2(x1, x2)\n expected = dpt.asarray(-dpt.pi, dtype=dt)\n\n diff = dpt.abs(dpt.subtract(actual, expected))\n atol = 8 * dpt.finfo(diff.dtype).eps\n assert dpt.all(dpt.less_equal(diff, atol))" ]
[ "0.6248144", "0.6197467", "0.6141509", "0.6100883", "0.60800886", "0.60800886", "0.6000496", "0.59086967", "0.58352697", "0.58224106", "0.5792972", "0.5762635", "0.5752603", "0.57476383", "0.5732413", "0.5732413", "0.57315695", "0.5711668", "0.57024705", "0.5690728", "0.56630313", "0.5659238", "0.5657875", "0.56201094", "0.5616487", "0.56158674", "0.5591441", "0.55712545", "0.55624795", "0.55070686", "0.55033106", "0.5491075", "0.54750746", "0.5472691", "0.542861", "0.542139", "0.5413068", "0.5410943", "0.5410281", "0.539841", "0.5393477", "0.5386879", "0.5382221", "0.53653276", "0.5344761", "0.533946", "0.5327736", "0.53186387", "0.5301972", "0.53001595", "0.52970874", "0.5294739", "0.52879614", "0.5286289", "0.52862203", "0.5278691", "0.52744484", "0.52673125", "0.52598226", "0.5255288", "0.5249366", "0.5244126", "0.52435267", "0.523126", "0.52272546", "0.5216604", "0.52142423", "0.5212853", "0.52123237", "0.5211248", "0.5185776", "0.5178463", "0.51750684", "0.51724136", "0.51683164", "0.51662713", "0.5166231", "0.51652247", "0.51577866", "0.5144124", "0.51379806", "0.5127926", "0.51164126", "0.51160693", "0.51120687", "0.51086736", "0.5108051", "0.5106056", "0.5100847", "0.509771", "0.5096855", "0.50962615", "0.5095751", "0.50935394", "0.5091956", "0.50885487", "0.50885165", "0.50869036", "0.5084873", "0.508332", "0.5082855" ]
0.0
-1
Return a new ObjectChange representing a change made to this object. This will typically be called automatically by ChangeLoggingMiddleware.
def to_objectchange(self, action): return ObjectChange( changed_object=self, object_repr=str(self), action=action, object_data=serialize_object(self), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_object(self):\n o = deepcopy(self.object)\n o[\"name\"] += \"-copy\"\n return o", "def change_object(self, new_object):\n raise NotImplementedError", "def __call__(self, change: ChangeDict) -> None:\n old = None\n new = None\n ctype = change[\"type\"]\n if ctype == \"create\":\n new = change[\"value\"]\n elif ctype == \"update\":\n old = change[\"oldvalue\"]\n new = change[\"value\"]\n elif ctype == \"delete\":\n old = change[\"value\"]\n attr = self.attr\n owner = change[\"object\"]\n handler = getattr(owner, self.funcname)\n if isinstance(old, Atom):\n old.unobserve(attr, handler)\n if isinstance(new, Atom):\n new.observe(attr, handler)\n elif new is not None:\n msg = \"cannot attach observer '%s' to non-Atom %s\"\n raise TypeError(msg % (attr, new))", "def modified_object(obj, event):\n now = datetime.now(tz=_zone)\n obj.modification_date = now", "def modified(self, **attributes):\n new_obj = deepcopy(self)\n new_obj.__dict__.update(attributes)\n new_obj.initialize()\n return new_obj", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def save(self):\n if config is None: self.objects.reload_cache()\n # If this is a new object, we save it with config.item_add()\n if self.is_new is True or self._meta['filename'] is None:\n if not self._meta['filename']:\n 'discover a new filename'\n self._meta['filename'] = self.get_suggested_filename()\n for k,v in self._changes.items():\n self._defined_attributes[k] = v\n self._original_attributes[k] = v\n del self._changes[k]\n config.item_add(self._original_attributes, self._meta['filename'])\n return\n # If we get here, we are making modifications to an object\n number_of_changes = 0\n for field_name, new_value in self._changes.items():\n save_result = config.item_edit_field(item=self._original_attributes, field_name=field_name, new_value=new_value)\n if save_result == True:\n del self._changes[field_name]\n self._event(level='write', message=\"%s changed from '%s' to '%s'\" % (field_name, self[field_name], new_value))\n if not new_value:\n if self._defined_attributes.has_key(field_name):\n del self._defined_attributes[field_name]\n if self._original_attributes.has_key(field_name):\n del self._original_attributes[field_name]\n else:\n self._defined_attributes[field_name] = new_value\n self._original_attributes[field_name] = new_value\n number_of_changes += 1\n else:\n raise Exception(\"Failure saving object. filename=%s, object=%s\" % (self['meta']['filename'], self['shortname']) )\n self.objects.clean_cache()\n return number_of_changes", "def orig_obj(self):\n return self._orig_obj", "def orig_obj(self):\n return self._orig_obj", "def orig_obj(self):\n return self._orig_obj", "def patch(self, obj, field, value):\n return Patch(obj, field, value)", "def __copy__(self):\n copy = self.__class__(self.param)\n copy.last_string2object_failed = self.last_string2object_failed\n copy.msg_handler = self.msg_handler\n return copy", "def modified(self):\n raise NotImplementedError", "def make_immutable(self):\n # just set the flag to make object immutable and hashable\n self.immutable = True", "def _update_model_instance(self, obj_filter_dict, new_data_dict):\n obj = self._get_or_create_model_instance(obj_filter_dict)\n obj.modify(**new_data_dict)", "def get_change(self, ):\n return self.get_parameter('change')", "def add_change(self, action, name, type, ttl=600, alias_hosted_zone_id=None, alias_dns_name=None):\r\n change = Record(name, type, ttl, alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name)\r\n self.changes.append([action, change])\r\n return change", "def GetChanges(self):\n return self._changes", "def get_direct_change(self):\n return self._direct_change", "def created_changed(cls): # noqa\n\n def _set_created_and_changed(mapper, connection, instance):\n instance.created = instance.changed = datetime.utcnow()\n\n def _set_changed(mapper, connection, instance):\n instance.changed = datetime.utcnow()\n\n event.listen(cls, \"before_insert\", _set_created_and_changed)\n event.listen(cls, \"before_update\", _set_changed)\n return cls", "def __init__(self, changed_by=None, id=None, organization_id=None, entity=None, entity_id=None, audit_action=None, field_changes=None, new_entity=None, state=None, created=None):\n self.swagger_types = {\n 'changed_by': 'str',\n 'id': 'str',\n 'organization_id': 'str',\n 'entity': 'str',\n 'entity_id': 'str',\n 'audit_action': 'str',\n 'field_changes': 'list[str]',\n 'new_entity': 'list[str]',\n 'state': 'str',\n 'created': 'datetime'\n }\n\n self.attribute_map = {\n 'changed_by': 'changedBy',\n 'id': 'id',\n 'organization_id': 'organizationID',\n 'entity': 'entity',\n 'entity_id': 'entityID',\n 'audit_action': 'auditAction',\n 'field_changes': 'fieldChanges',\n 'new_entity': 'newEntity',\n 'state': 'state',\n 'created': 'created'\n }\n\n self._changed_by = changed_by\n self._id = id\n self._organization_id = organization_id\n self._entity = entity\n self._entity_id = entity_id\n self._audit_action = audit_action\n self._field_changes = field_changes\n self._new_entity = new_entity\n self._state = state\n self._created = created", "def diff(self):\n if self.event == 'Create':\n old = ''\n else:\n # Get the Change just ahead of _this_ change because that has the\n # state of the Resource before this Change occurred.\n # TODO(nickpegg): Get rid of this if we change the behavior of\n # Change to store the previous version of the object\n old_change = Change.objects.filter(\n change_at__lt=self.change_at,\n resource_id=self.resource_id,\n resource_name=self.resource_name\n ).order_by(\n '-change_at'\n ).first()\n old = json.dumps(old_change._resource, indent=2, sort_keys=True)\n\n if self.event == 'Delete':\n current = ''\n else:\n resource = apps.get_model(self._meta.app_label, self.resource_name)\n obj = resource.objects.get(pk=self.resource_id)\n\n serializer_class = self.get_serializer_for_resource(\n self.resource_name)\n serializer = serializer_class(obj)\n current = json.dumps(serializer.data, indent=2, sort_keys=True)\n\n diff = \"\\n\".join(difflib.ndiff(\n old.splitlines(),\n current.splitlines()\n ))\n\n return diff", "def getChanges():", "def assertLoggedChange(self, obj, **kwargs):\n self.assertLogged(obj, CHANGE, **kwargs)", "def make_change_plan_action(self) -> ChangePlanAction:\r\n return ChangePlanAction(\r\n change_plan_id=self.change_plan_id,\r\n step=self.step,\r\n action=self.action,\r\n original_asset_number=self.original_asset_number,\r\n new_record=self.new_record,\r\n )", "def modified(self):\n return self.__modified", "def modified(self):\n return self.__modified", "def new_object(cls):\n return cls.for_value([])", "def addChange(change):", "def addChange(change):", "def handle(self, object, name, old, new):\n raise NotImplementedError", "def _add_change_log(self):\n wiz = self.machine_email_id\n change_log = self.env['machine.instance.change_log'].create({\n 'name': wiz.sub_subject,\n 'date': wiz.date,\n 'duration': wiz.duration,\n 'user_id': wiz.user_id.id,\n 'priority': wiz.priority,\n 'machine_instance_id': self.machine_instance_id.id,\n\n })\n self.change_log_id = change_log.id", "def _mkObject(self):\n return ImmutableObject(\n store=self.store,\n hash=u'somehash',\n contentDigest=u'quux',\n content=self.store.newFilePath('foo'),\n contentType=u'application/octet-stream')", "def changed(self, *args, **kwargs): # real signature unknown\n pass", "def new(self, obj):\n new_key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.__objects[new_key] = obj", "def log_change(request, object, message):\n from django.contrib.admin.models import LogEntry, CHANGE\n from django.contrib.contenttypes.models import ContentType\n from django.utils.encoding import force_unicode\n\n LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=ContentType.objects.get_for_model(object).pk,\n object_id=object.pk,\n object_repr=force_unicode(object),\n action_flag=CHANGE,\n change_message=message\n )", "def touch_object(self, bucket_name, object_name, last_access=-1, last_modification=-1):\n\n return h3lib.touch_object(self._handle, bucket_name, object_name, last_access, last_modification, self._user_id)", "def changed_version(self):\r\n try:\r\n return CampaignChange.objects.get(campaign__pk=self.pk)\r\n except CampaignChange.DoesNotExist:\r\n return None", "def new(self, obj):\n self.__objects.update({\"{}.{}\".format(obj.__class__.__name__,\n obj.id): obj})", "def modified(self):\n return self._modified", "def modified(self):\n return self._modified", "def modified(self):\n return self._modified", "def modified(self):\n return self._modified", "def changeAdded(change):", "def alter(self, instance, activity, **kwargs):\n return activity", "def new_entity(self):\n return self._new_entity", "def Modified(self):\r\n\t\treturn self._p_changed", "def clone( self ):\n new = copy( self )\n try: del new.input_changed\n except AttributeError: pass\n return new", "def __init__(self, record, modifications):\n self.modifications = modifications\n super(ModifyAction, self).__init__(record)", "def process_object(self, new, old=None):\n new = super().process_object(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.modified_field, self.model.permissions_field)\n validate_from_bucket_schema_or_400(\n new,\n resource_name=\"group\",\n request=self.request,\n ignore_fields=internal_fields,\n id_field=self.model.id_field,\n )\n\n return new", "def to_legacy(self) -> object:\n pass", "def clone(self):\n return attr.evolve(self)", "def copy(self):\n return self.__class__(dict(self))", "def update(self):\n return self", "def original_dict(self):\n return self.obj.__dict__", "def _comm_changed(self, change):\n if change['new'] is None:\n return\n self._model_id = self.model_id\n\n self.comm.on_msg(self._handle_msg)\n _instances[self.model_id] = self", "def copy(self):\n return Level(repr=self.as_dict())", "def changed(self):\n\t\tpass", "def __copy__(self):\n\t\tcopy_paster = Log()\n\t\tcopy_paster.__dict__.update(self.__dict__)\n\t\tcopy_paster.cur_tensor = self.cur_tensor.clone()\n\t\treturn copy_paster", "def clean_copy(self) -> \"StorableObject\":\n return StorableObject(\n id=self.id, data=self.data, tags=self.tags, description=self.description\n )", "def copy(self):\n return self.mutate().simple_copy()", "def save(self, *args, **kwargs):\n changesDict = self.diff\n\n # Let's construct the history record entry and add it to the dict.\n # Each record is further going to be dictionary stored in the DB as a string.\n\n # We need to keep track of which user updated the data. Check if the user has\n # assigned a callable to fetch it. Else we will initialize it to be empty.\n who = None\n if self.history__get_user_hook is not None:\n if callable(self.history__get_user_hook):\n who = self.history__get_user_hook()\n else:\n logger.error(\"history_get_user initialized with a non-callable\")\n\n # only add the 'who' field if we have a string value.\n if who is not None and isinstance(who,str):\n entry = {\n \"who\" : str(who),\n \"when\" : timezone.now().isoformat(),\n \"what\" : changesDict,\n }\n else:\n entry = {\n \"when\" : timezone.now().isoformat(),\n \"what\" : changesDict,\n }\n\n # prepend this entry into the list\n if isinstance(self.history, list):\n self.history.insert(0,entry)\n if isinstance(self.history__max_entry_count, int):\n if self.history__max_entry_count >= 0:\n # Value has been specified. Trim the list. Value of 0 deletes the whole list.\n del self.history[self.history__max_entry_count: ]\n logger.debug('Added the following record to history:\\n %s'%(str(entry),))\n else:\n logger.error(\"History field not saved initially as list!!\")\n\n # Let's not hold up the save. Save and reset the initial state.\n super(ChangeHistoryMixin, self).save(*args, **kwargs)\n self.__initial = self._dict", "def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result", "def copy(self):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\treturn NamedObject.copy(self)", "def copy(self):\n return self.from_builder(self)", "def new(self, obj):\n pass", "def clone(self):\n return attr.evolve(self, env=dict(self._env))", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(self)", "def field_changes(self):\n return self._field_changes", "def new_object(self):\r\n\t\tpass", "def get_history_changeset(self, instance):\n # Load user from instance or request\n user = self.get_history_user(instance)\n assert user, 'History User is required'\n\n try:\n # Load manually applied changeset\n changeset = instance._history_changeset\n except AttributeError:\n # Load existing changeset\n changeset = getattr(self.thread.request, 'changeset', None)\n if changeset is None:\n # Create new, auto-closing changeset\n changeset = Changeset.objects.create(user=user)\n self.thread.request.changeset = changeset\n self.thread.request.close_changeset = True\n\n # These should be verified in the middleware before the instance is\n # saved, but let's be sure\n assert user == changeset.user, 'User must match changeset user'\n assert not changeset.closed, 'Changeset is closed'\n return changeset", "def save_model(self, request, obj, form, *args, **kwargs):\n if form.changed_data:\n obj.last_change = now()\n super().save_model(request, obj, form, *args, **kwargs)", "def GetDirty(self, *args, **kwargs):\n pass", "def stream_changes(self) -> Iterator[Change]:\n raise NotImplementedError", "def update(self, collection_id, parent_id, object_id, object,\n unique_fields=None, id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n obj = Session.query(self.collection).get(object_id)\n # TODO: verify permissions\n if obj is None:\n obj = self.create(collection_id=collection_id, parent_id=parent_id,\n record=object, unique_fields=unique_fields,\n id_field=id_field, modified_field=modified_field,\n auth=None)\n else:\n for k, v in object.items():\n setattr(obj, k, v)\n return obj.deserialize()", "def is_change(self) -> bool:\n return self._change", "def copy(self):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def __copy__(self):\n return self.copy()", "def obj(self):\n if not self._obj:\n self._get()\n return self._obj", "def copy(self, new_name):\n new_model = dill.loads(dill.dumps(self.model))\n model_fn = lambda: new_model\n return self.__class__(new_name, model_fn)", "def copy(log_entry):\n\n\t\tassert(isinstance(log_entry, LogEntry))\n\t\treturn LogEntry.from_data(log_entry.data, log_entry.intrusion)", "def modify(self, context: Context):\n data = attr.asdict(self)\n\n yield self\n self.last_author = context.user.nickname\n self.updated_at = pendulum.now()\n\n try:\n attr.validate(self)\n except TypeError:\n logger.warning(\"failed to validate {}\", self)\n for key, value in data.items():\n # since we can't overwrite self we will need to roll back one by one\n setattr(self, key, value)\n raise", "def obj(self) -> object:\n pass", "def contribute_to_object(self, obj):\n pass", "def copy(self):\n return object.__new__(type(self))", "def audit(cls):\n old_save = cls.save\n old_delete = cls.delete\n def save(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n return old_save(self, *arg, **kw)\n\n\n def delete(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n cls.save(self)\n return old_delete(self, *arg, **kw)\n cls.save = save\n cls.delete = delete\n cls.last_user_id = models.IntegerField(null=True, blank=True, editable=False)\n return cls", "def __copy__(self):\n new = FSMTransition(self.from_state, self.to_state,\n self.word_in, self.word_out)\n if hasattr(self, 'hook'):\n new.hook = self.hook\n return new", "def new(self, obj):\n key = '{}.{}'.format(obj.__class__.__name__, obj.id)\n self.__objects[key] = obj", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def __init__(self):\n this = _diff.new_svn_prop_patch_t()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def add_change(self, change: Change) -> bool:\n raise NotImplementedError", "def copy_as_new(self) -> \"Individual\":\n return Individual(self.main_node.copy(), to_pipeline=self._to_pipeline)", "def step_changes(self) -> pd.Series:\n return self._get_deltas().copy()", "def __init__(self):\n this = _diff.new_svn_patch_t()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def make_mutable(obj):\n _mutable_objs.append(obj)", "def copy(self):\n new = self\n return new" ]
[ "0.61876494", "0.5870512", "0.56965613", "0.5530178", "0.55052364", "0.54316956", "0.54316956", "0.54316956", "0.53980166", "0.5397867", "0.5397867", "0.5397867", "0.5333922", "0.53334", "0.531325", "0.5294916", "0.5289745", "0.528219", "0.5259374", "0.5217978", "0.52005905", "0.51784784", "0.5172461", "0.513816", "0.51311094", "0.5122082", "0.5120521", "0.5096946", "0.5096946", "0.5077663", "0.5040269", "0.5040269", "0.50320464", "0.5025893", "0.5024482", "0.5019332", "0.50145596", "0.50101215", "0.49845847", "0.49732882", "0.49674803", "0.49631506", "0.49631506", "0.49631506", "0.49631506", "0.49607533", "0.49589676", "0.49470994", "0.49366325", "0.4936423", "0.49344045", "0.49312708", "0.49280575", "0.49245256", "0.4916613", "0.49072123", "0.489616", "0.4895084", "0.48909304", "0.48872465", "0.48717746", "0.48470432", "0.48415262", "0.48402587", "0.4834445", "0.48343974", "0.48295578", "0.4824627", "0.48204583", "0.48189294", "0.48189294", "0.481489", "0.4801706", "0.4795912", "0.47889736", "0.47842008", "0.47788388", "0.47761318", "0.4772601", "0.47714487", "0.47708812", "0.47685298", "0.47323334", "0.47303697", "0.4727649", "0.4718814", "0.47151285", "0.47123048", "0.47107345", "0.4707157", "0.47049922", "0.4703709", "0.4698737", "0.4697067", "0.46947408", "0.4684449", "0.46833926", "0.4679496", "0.46767068", "0.46723884" ]
0.73542356
0
initialize your data structure here.
def __init__(self): self.stack = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_empty(self):\n self._data = []", "def __init__(self):\n self.data = []\n self.record = {}", "def __init__(self):\n self.structure = {}", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def initialize(self):\n\t\tpass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def __init__(self):\n self.data = []\n self.idx = {}", "def initialize(self):\n self.data = None\n self.errors = []", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n self.d = {}\n self.l = []", "def __init__(self):\n self.l = {}\n self.s = {}", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def init(self) -> None:", "def initialize(self):\n self.muondEdx = []\n self.muondNdx = []\n self.muonmomentum = []\n self.piondEdx = []\n self.piondNdx = []\n self.pionmomentum = []\n self.kaondEdx = []\n self.kaondNdx = []\n self.kaonmomentum = []\n self.protdEdx = []\n self.protdNdx = []\n self.protmomentum = []\n self.elecdEdx = []\n self.elecdNdx = []\n self.elecmomentum = []", "def __init__(self):\n self._data = [] # non-public underlying Python list as storage", "def __init__(self):\n self._dict = {}\n self._array = []", "def initialise(self):", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def __init__(self):\n self.d = {}\n self.h = []", "def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db", "def __init__(self):\n self._data=[]", "def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass", "def __init__(self):\n self.dic={}\n self.data=[]", "def initialize(self):\n return", "def __init__(self):\n self.keys = []\n self.values = []", "def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)", "def _init(self):\n pass", "def memb_init(self):\n self.initialize()", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def initialize(self):\n pass # pragma: no cover", "def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def initialize(self) -> None:\n pass", "def __init__(self):\n self.table = {}\n self.ls = []", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def init(self) -> None:\n ...", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self):\n self.root = [None, dict(), False] # val, sons, end-able", "def init(self):", "def init(self):", "def __init__(self):\n dict.__init__(self)\n self.datatype = None", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def __init__(self):\r\n self.indices = {}\r\n self.data = []\r\n self.len = 0", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def __init__(self):\n self.data = {}\n self.refresh()", "def __init__(self):\n self._data = PositionalList() # list of _Item instances", "def initialize(self): \r\n pass", "def __init__(self):\n self.key_dict = {}\n self.value_dict = {}\n self.head, self.last = None, None", "def initialize(self):\n self.keys = [None] * BUCKET_SIZE\n self.values = [None] * BUCKET_SIZE", "def initialize(self):\r\n self.bucket_array.initialize()", "def __init__(self):\n # Dict of minecraft object in form of \"dict[id] = name\"\n self.data_values = dict()\n self.parser = self.setup_parser()", "def __init__(self):\n self.table = {}", "def initialize(self):\n self.voteskips = []\n self.response = {}\n self.route = {}\n self.userlist = []\n self.poll = []\n self.media = []\n self.init = False\n self.question = None\n self.jumble = None\n self.imgur = None", "def __init__(self):\n self._data = set()", "def __init__(self, data={}):\n self._update_(data)", "def _init(self):\n raise NotImplementedError", "def __init__(self):\n self.x = {}\n self.len = 0\n self.annotations = {}", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def __init__(self):\n self._list = []\n self._dict = {}", "def __init__(self):\n self.elements = {}", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def __init__(self):\n self.map = {}\n self.array = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n\n self.nodes = {}", "def __init__(self):\n self._data = PositionalList() # list of Item instances", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()" ]
[ "0.7761043", "0.76102185", "0.7555967", "0.7549892", "0.7549892", "0.7549892", "0.7549892", "0.7549892", "0.7549892", "0.752797", "0.7446006", "0.7446006", "0.7446006", "0.7446006", "0.7446006", "0.743338", "0.743338", "0.7408609", "0.7385719", "0.737986", "0.737986", "0.737986", "0.737986", "0.737986", "0.737986", "0.737986", "0.737986", "0.736901", "0.7362467", "0.7338006", "0.7338006", "0.73374635", "0.7331393", "0.73284763", "0.7322671", "0.73200834", "0.73099154", "0.72804767", "0.72795427", "0.72795427", "0.72795427", "0.72795427", "0.7271782", "0.727051", "0.72517675", "0.723993", "0.7239426", "0.72144645", "0.72107", "0.7197864", "0.71817285", "0.7179268", "0.71732044", "0.71732044", "0.71732044", "0.71732044", "0.71713084", "0.7168557", "0.7164963", "0.7164963", "0.7164963", "0.7150857", "0.7138392", "0.7127341", "0.7127341", "0.7127341", "0.71187997", "0.7115647", "0.71066", "0.71065164", "0.71065164", "0.7088567", "0.70847344", "0.70709914", "0.7046784", "0.7046784", "0.7046169", "0.70175964", "0.7016285", "0.70112973", "0.69934696", "0.6990926", "0.6983394", "0.6975254", "0.69616264", "0.69537103", "0.6949236", "0.69455796", "0.6944689", "0.6937002", "0.69317675", "0.69317675", "0.69307524", "0.69156027", "0.6909471", "0.6908198", "0.6902257", "0.6902257", "0.69008106", "0.6895019", "0.687953" ]
0.0
-1
Resets the array to its original configuration and return it.
def reset(self) -> List[int]: return self.nums
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self) -> List[int]:\n self.array = deepcopy(self.original)\n return self.array", "def reset(self) -> np.array:\n raise NotImplementedError", "def reset_original(self):\n self._original = [] # Empty out self._originals", "def reset(self):\n b = [0] * self.n\n for i in xrange(self.n):\n b[self.to[i]] = self.a[i]\n self.a = b\n self.to = range(self.n)\n return self.a", "def reset(self):\r\n self.A = np.zeros(self.A.shape)", "def reset(self):\n\n # Implement your reset method here\n # return observation\n self._state = np.random.uniform(-1, 1, size=(2,))\n observation = np.copy(self._state)\n return observation", "def reset(self) -> np.ndarray:\n self._instrument = self.np_random.choice(list(self._prices.keys()))\n prices = self._prices[self._instrument]\n bars = self._state.bars\n if self.do_random_offsets_on_reset:\n offset = self.np_random.choice(prices.high.shape[0] - bars * 10) + bars\n else:\n offset = bars\n self._state.reset(prices=prices, offset=offset)\n self_encoded_state = self._state.encode_self()\n return self_encoded_state", "def reset(self):\n self.observation = self.initial_observation.copy()\n self.sim = copy.deepcopy(self.initial_sim)\n return self.observation", "def reset(self):\n self._instrument = self.np_random.choice(list(self._prices.keys()))\n prices = self._prices[self._instrument]\n bars = self._state.bars_count\n if self.offset_close:\n offset = self.np_random.choice(prices.high.shape[0] - bars * 10) + bars\n else:\n offset = bars\n self._state.reset(prices, offset)\n return self._state.to_numpy_array()", "def reset(self):\n rich_obs = super(ColoredCostmapRandomAisleTurnEnv, self).reset()\n obs = rich_obs.costmap.get_data() # pylint: disable=no-member\n obs = np.expand_dims(obs, -1)\n return obs", "def reset(self):\n \n # start with all zeros\n self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)\n\n return self.observation(self.env.reset())", "def reset_values(self):\n\n self.values = np.array([])", "def reset(self):\n self.init_nums = copy.deepcopy(self.orig)\n return(self.init_nums)", "def reset_config():\n return _set_config(_gen_config())", "def reset(self):\n self.mat = np.zeros(9).reshape(3,3).astype(np.int32)\n return self.mat", "def reset(self) -> np.ndarray:\n # Initialize a new gamefield\n self.game.reset_game()\n self.turns_count = 0\n\n self.payoff_fields = constants.PAYOFF_LOCATIONS\n self.special_payoff_fields = random.sample(self.payoff_fields, 1)\n\n return self.make_alternative_states()", "def reset(self):\n self._data = []", "def clear(self):\n self.array = np.zeros(shape=(0, 2))", "def reset(self):\n self.observation = self.initial_observation.copy()\n # self.observation = self.observation_space.sample()\n return self.observation", "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim_to_state()] * self.action_repeat)\n return state", "def clear(self):\n self._length = 0 # \"Erase\" values by ignoring them\n self._resize_arr(1) # Shrink array to original size", "def reset_state(self):\n self.s = np.copy(self.s_i)", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def reset(self):\n self.sample['masked'] = [False]*len(self.sample.index)\n self.sample['colour'] = ['undefined']*len(self.sample.index)", "def reset(self):\n self.data = self._defaults", "def reset(self) -> List[int]:\n return self.orignial #Return the original copy.", "def reset(self):\n self.curstr = self.orig[:]\n return self.orig", "def restore(self):\n raise NotImplementedError", "def reset(self):\n return self.aux", "def reset(self):\n self.rand_start = int(np.random.rand()*25000)+self.offset\n state = np.array(np.zeros(self.obs))\n self.time = self.rand_start\n self.residual = 0\n self.cum_r = 0\n return state", "def reset(self):\n for i in range(self.k):\n self.list[i] = self.dic[i]\n return self.list", "def copy(self):\n copy_arr = DynamicArray(self._growth_factor) # Create new array to store values\n for i in range(self._length): # Append all values from original\n copy_arr.append(self._arr[i])\n return copy_arr", "def reset(self):\n self.state = CartPoleState(self.env.reset())\n return self.state.copy()", "def reset(self):\n newPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(newPerm)", "def reset(self):\n self.board.reset()\n return self.obs()", "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "def reset(self):\n self.items = np.arange(self.ratings.shape[1])", "def _reset(self):\n self._values = {}", "def restore_data(self):\n self.R = self._Ro\n del self._Ro", "def _resize_array(self, capacity):\n old_array = self._array\n self._array = [None] * capacity\n for index in range(self._size):\n self._array[index] = old_array[index]", "def reset_values(self):\n\n self.values = []", "def reset(self):\n\t\tself.buf = []", "def reset(self):\n self.sim.reset() \n state = np.tile(np.concatenate((self.sim.pose,self.sim.v,self.sim.angular_v),axis=0)/self.state_scale,self.action_repeat)\n\n return state", "def reset_data(self):\n self.data = []", "def _reset(self):\n self.obs_buffer.clear()\n obs = self._convert(self.env.reset())\n self.buffer.clear()\n self.counter = 0\n for _ in range(self.n - 1):\n self.buffer.append(np.zeros_like(obs))\n self.buffer.append(obs)\n obsNew = np.stack(self.buffer, axis=self.ch_axis)\n return obsNew.astype(np.float32) * self.scale", "def reset(self):\n self.board = np.zeros(shape = self.dim, dtype = int)\n self.current_player = 1\n self.steps = 0\n self.invalid_moves = 0\n self._is_done = False\n return self.board", "def reset( self ):\n self.conf = self.defaults", "def reset(self, **kwargs):\n observation = self.env.reset(**kwargs)\n observation = cv2.resize(observation, (self.size, self.size))\n observation = np.array(observation, dtype=np.uint8)\n observation = observation.transpose(2, 0, 1)\n return observation", "def reset_reservoir(self):\n self.state = np.zeros((self.state_size,1),dtype=self.typefloat)", "def clone(self, *args, **kwargs):\n return self.copy().reset(*args, **kwargs)", "def reset(self):\n self.sim.reset()\n # state = np.concatenate([self.sim.pose] * self.action_repeat) \n state = self.sim.pose\n return state", "def reset(self):\n return self._reset", "def reset(self):\n return self._reset", "def reset(self):\n \n self._obs_buffer.clear()\n obs = self.env.reset()\n self._obs_buffer.append(obs)\n \n return obs", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self) -> None:", "def restore(self):\n\n self.dispersion = self.raw_dispersion\n self.flux = self.raw_flux\n self.flux_err = self.raw_flux_err\n self.reset_mask()", "def reset_R(self):\n self.R = [np.ones((1, 1)) for _ in range(self.L + 2)]\n self.R[-1] = None", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def Reset(self):\n n = len(self.Values)\n for i in range(0, n):\n self.Values.pop(i)", "def reset(self):\n self.elements = [0] * len(self)", "def reset(self):\n self.grid = np.array([0] * 9) # grid\n self.turn = 1 # whose turn it is\n self.done = False # whether game is doneT\n return self.grid", "def reset_board(self):\n\n self.board = np.array(self.initial_board)", "def reset(self):\n self.grid = np.array([0] * 9) # grid\n self.turn = 1 # whose turn it is\n self.done = False # whether game is done\n return self.grid", "def restore_via_init(objt: _ty.Type[MyArray]) -> Restorer[BaseArray, MyArray]:\n return objt", "def reset(self):\n self.my_board = np.ones((self.board_size, self.board_size), dtype=int) * CLOSED\n self.board = place_mines(self.board_size, self.num_mines)\n self.num_actions = 0\n self.valid_actions = np.ones((self.board_size * self.board_size), dtype=bool)\n\n return self.my_board", "def clear(self) -> None:\n self.da = DynamicArray()", "def reset(self):\r\n self.env.reset()\r\n return self.env.get_obs()", "def clear(self):\n length = len(self.data)\n self.data = [[] for j in range(length)]", "def reset(self):\r\n return self._api.reset()", "def reset(self):\n self.sim.reset()\n self.success=0\n state = np.concatenate([self.sim.pose] * self.action_repeat)\n return state", "def reset(self):\r\n self.results = []\r\n return self.results", "def reset_state(self):\n for name in self._buffers:\n self._buffers[name] = self._defaults[name]", "def reset(self) -> List[int]:", "def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0", "def reset(self):\n self.state = [\n ['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R'],\n ['P'] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n ['p'] * 8,\n ['r', 'n', 'b', 'q', 'k', 'b', 'n', 'r']\n ]", "def reset(self) -> None:\n raise NotImplementedError", "def reset(self) -> list:\n self.x1 = 0\n self.y1 = -1\n self.z = 3\n self.x2 = 0\n self.y2 = 0\n self.frame = 0\n return self.get_state()", "def reset(self) -> List[int]:\n pass", "def reset(self) -> List[int]:\n return self.origin", "def reset(self):\n raise NotImplementedError", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):\n raise NotImplementedError()", "def reset(self):\n raise NotImplementedError()", "def reset(self):\n raise NotImplementedError()", "def reset(self):\n raise NotImplementedError()", "def original(self):\n return self._original", "def original(self):\n return self._original", "def clone(self) -> Any:\n clone = super().clone()\n clone.clear()\n return clone", "def reset(self):\n self.z = rand(*self.z.shape)\n self.c = ones_like(self.c)", "def reset(self):\n self.board = place_mines(self.board_size, self.num_mines)\n self.my_board = np.ones((self.board_size, self.board_size), dtype=int) * CLOSED\n self.valid_actions = np.ones((self.board_size, self.board_size), dtype=np.bool)\n return self.my_board" ]
[ "0.77292603", "0.74091315", "0.680525", "0.661002", "0.65122974", "0.6499901", "0.64658946", "0.6412889", "0.64038277", "0.63267505", "0.6321126", "0.6252074", "0.6242088", "0.6207423", "0.62034124", "0.620192", "0.6169583", "0.61650425", "0.6147524", "0.6144126", "0.61250526", "0.6121113", "0.6061123", "0.60557353", "0.60134983", "0.6000699", "0.59824777", "0.5974402", "0.597249", "0.5962037", "0.59550965", "0.5949704", "0.59417707", "0.59212595", "0.5893384", "0.5887674", "0.5887674", "0.5887674", "0.58840865", "0.5869901", "0.58669055", "0.58650243", "0.58579034", "0.58573264", "0.5845939", "0.5841268", "0.5809664", "0.58071446", "0.580029", "0.58000004", "0.57902205", "0.57833916", "0.5780132", "0.57671154", "0.57671154", "0.57625407", "0.5759307", "0.5759307", "0.5759307", "0.5741484", "0.5735835", "0.57202077", "0.57202077", "0.57202077", "0.57202077", "0.57202077", "0.57156736", "0.57040817", "0.5702756", "0.570128", "0.5701162", "0.56985986", "0.56903255", "0.5679178", "0.56773853", "0.5670306", "0.5664071", "0.5659097", "0.5655024", "0.56491214", "0.5646527", "0.56420976", "0.5638266", "0.5631824", "0.56171817", "0.5616696", "0.5615467", "0.56148154", "0.5613481", "0.5613481", "0.5613481", "0.5613481", "0.5613387", "0.5613387", "0.5613387", "0.5613387", "0.56044954", "0.56044954", "0.56023955", "0.56003416", "0.5598247" ]
0.0
-1
Returns a random shuffling of the array.
def shuffle(self) -> List[int]: index_set = set() index_list = [] while len(index_set) < len(self.nums): index = random.randint(0,len(self.nums) - 1) if index not in index_set: index_set.add(index) index_list.append(index) return [self.nums[i] for i in index_list]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shuffle(self) -> List[int]:\n for i in range(len(self.array)):\n tmp_idx = randrange(i, len(self.array))\n self.array[i], self.array[tmp_idx] = self.array[tmp_idx], self.array[i]\n return self.array", "def shuffle(self):\n for i in xrange(self.n - 1):\n pos = random.randint(i, self.n - 1)\n self.to[i], self.to[pos] = self.to[pos], self.to[i]\n self.a[i], self.a[pos] = self.a[pos], self.a[i]\n return self.a", "def randomize(data):\r\n permutation = np.random.permutation(data.shape[0])\r\n shuffled_data = data[permutation, :]\r\n # shuffled_y = y[permutation]\r\n return shuffled_data", "def shuffle_T(self):\n np.random.shuffle(self.T)", "def shuffle(arr):\n for i in range(len(arr)):\n j = int(math.floor(random.random() * len(arr)))\n arr[i], arr[j] = arr[j], arr[i]", "def shuffle(self):\n new_nums = self.nums[:]\n n = len(new_nums)\n import random\n for i in range(n):\n rand_num = random.randint(0, n - 1)\n # Swap nums[i] with nums[randint]\n temp = new_nums[i]\n new_nums[i] = new_nums[rand_num]\n new_nums[rand_num] = temp\n return new_nums", "def shuffle(self):\n x = len(self.org)\n result = self.org[:]\n var = x\n for i in range(x):\n id = random.randrange(0, var)\n result[id], result[var - 1] = result[var - 1], result[id]\n var -= 1\n\n return result", "def shuffle(self):\n order = np.arange(len(self.data))\n np.random.seed(0xA5EED)\n np.random.shuffle(order)\n self.data = self.data.iloc[order]\n return order", "def shuffle(self):\n self.np_random.shuffle(self.deck)", "def create_array_shuffler(rng):\n def nruter(in_array):\n return jax.random.permutation(rng, jnp.asarray(in_array))\n\n return nruter", "def shuffle_data(data):\n idx = np.arange(len(data))\n np.random.shuffle(idx)\n return data[idx, ...]", "def shuffle_question(self):\n r = random.SystemRandom()\n r.shuffle(self.question_list)", "def shuffle(self) -> List[int]:\n runs = self.nums.copy()\n # Fisher-Yates Algorithm\n n = len(runs)\n for i in range(n):\n j = random.randint(i, n - 1)\n runs[i], runs[j] = runs[j], runs[i]\n return runs", "def shuffle_data(data):\n indices = list(range(data.shape[0]))\n np.random.shuffle(indices)\n return data[indices]", "def array_shuffle(length, *arrays):\n p = np.random.RandomState(42).permutation(length)\n return [entry[p] for entry in arrays]", "def shuffle(data):\n\n shuffle = np.arange(data.shape[0])\n np.random.shuffle(shuffle)\n undo_shuffle = np.argsort(shuffle)\n\n return (data[shuffle], undo_shuffle)", "def shuffle(self):\n return self._shuffle", "def shuffle(self):\n return self._shuffle", "def shuffle(self):\n return self._shuffle", "def shuffle(self):\n random_list = list(self.nums)\n shuffle(random_list)\n # Alternative would be to loop over every item and randomly shuffle it:\n # for i in xrange(len(self.now) - 1):\n # idx = random.randint(i,len(self.now) - 1)\n # self.now[i],self.now[idx] = self.now[idx],self.now[i]\n return random_list", "def shuffle( self ):\n random.shuffle(self.__deck)", "def shuffle(self) -> List[int]:", "def shuffle(self):\n random.SystemRandom().shuffle(self.deck)", "def shuffle(self):\n import random\n random.shuffle(self.cards)", "def _shuffle():\n\n random.shuffle(deck)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def Shuffle(self):\r\n random.shuffle(self.cards_list)", "def shuffle(self):\r\n random.shuffle(self.deck)", "def randomOrder(n):\n shu = np.arange(n)\n np.random.shuffle(shu)\n return shu", "def _shuffle(self, r, idx_to_shuffle = None):\n n_features = r.shape[0]\n n_trials = r.shape[1]\n r_sh = r\n if idx_to_shuffle is None:\n idx_to_shuffle = range(n_features)\n for i in range(n_features):\n if i in idx_to_shuffle:\n r_sh[i,:] = r[i,random.permutation(range(n_trials))]\n return r_sh", "def shuffle(self):\n random.shuffle(self.get_cards())", "def shuffle_mask(x):\n ix_i = np.random.sample(x.shape).argsort(axis=0)\n ix_j = np.tile(np.arange(x.shape[1]), (x.shape[0], 1))\n return x[ix_i, ix_j]", "def shuffle(self):\n return self._state.get(\"random\", False)", "def shuffle(list_, random_seed=123):\n random.Random(random_seed).shuffle(list_)", "def shuffle(self):\r\n random.shuffle(self.deck_of_cards)\r\n return self.deck_of_cards", "def shuffle(self):\n self.__c_elem().melange()", "def shuffle(self, random_state=None): \n if random_state is None:\n random_state = self.random_state\n perm_ids = random_state.permutation(self.n_examples)\n self.u = self.u[perm_ids]\n self.v = self.v[perm_ids]\n self.rating = self.rating[perm_ids]", "def shuffle(self):\n lens = len(self.curstr)\n for i in xrange(lens-1, 0, -1):\n j = random.randint(0, i)\n self.curstr[i], self.curstr[j] = self.curstr[j], self.curstr[i]\n return self.curstr", "def shuffle(self):\n self.shuffle_range(len(self.cards))", "def shuffle(self):\n shuffle(self.cards)", "def shuffle(self):\n shuffle(self.cards)", "def shuffle(self) -> None:\r\n random.shuffle(self._deck)", "def main():\n input_1 = [7, 6, 5, 4, 3, 2, 1]\n print shuffle(input_1)\n print input_1", "def shuffle(values):\n num_values = len(values)\n for v in range(num_values):\n # Get a random, different index\n s = v + int(random() * (num_values - v))\n # Swap values\n values[s], values[v] = values[v], values[s]\n return values", "def stable_shuffle(self, seq):\n seq = numpy.asarray(seq)\n if len(seq) != len(self._argshuf):\n # Reset the rng using seq length as the seed.\n # Why not just use the same seed every time? Dunno.\n rng = numpy.random.default_rng(len(seq))\n # Save the first permutation generated thereby.\n self._argshuf = rng.permutation(len(seq))\n return seq[self._argshuf]", "def shuffle(self):\r\n puzzle = self\r\n for _ in range(1000):\r\n puzzle = random.choice(puzzle.actions)[0]()\r\n return puzzle", "def shuffle(self):\n new_X = np.empty(self.X_data.shape, dtype=self.X_data.dtype)\n new_Y = np.empty(self.Y_data.shape, dtype=self.Y_data.dtype)\n perm = np.random.permutation(self.X_data.shape[0])\n for old_idx, new_idx in enumerate(perm):\n new_X[new_idx] = self.X_data[old_idx]\n new_Y[new_idx] = self.Y_data[old_idx]\n self.X_data = new_X\n self.Y_data = new_Y", "def shuffle(self) -> List[int]:\n n = len(self.q)\n \n for i in range(n):\n j = random.randrange(i, n)\n self.q[i], self.q[j] = self.q[j], self.q[i]\n return self.q", "def shuffle(self):\n random.shuffle(self.deckcards)", "def sort_random(i):\n return np.random.rand()", "def shuffle_cards(self):\r\n\r\n cards = np.arange(self.total_cards) # [0, 1, 2, ... 49, 50, 51]\r\n np.random.shuffle(cards) # shuffle this\r\n initial_state = cards.reshape((4, int(self.total_cards / 4))) # reshape this to 4x13\r\n return initial_state", "def test_shuffle(self):\n r = self.RNA(\"UUUUCCCCAAAAGGGG\")\n s = r.shuffle()\n self.assertNotEqual(r, s)\n self.assertEqualItems(r, s)", "def shuffle(L):\n return [L[i] for i in permutation(len(L))]", "def shuffle(self) -> List[int]:\n for i in range(len(self.nums)): #Traverse nums.\n r = randint(i, len(self.nums) - 1) #Generate a random int in [i, len(self.nums) - 1].\n self.nums[i], self.nums[r] = self.nums[r], self.nums[i] #Swap self.nums[i] and self.nums[r].\n return self.nums #Return self.nums.", "def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]", "def shuffle(self) -> List[int]:\n res = self.q[::]\n random.shuffle(res)\n #res = random.sample(self.q, len(self.q))\n return res", "def shuffle_1d_nb(a, seed=None):\n if seed is not None:\n np.random.seed(seed)\n return np.random.permutation(a)", "def __shuffle_cards(self):\n random.shuffle(self.cards)", "def shuffled(iterable):\n items = list(iterable)\n random.shuffle(items)\n return items", "def shuffle(inputData):\n\n x = inputData[0]\n y = inputData[1]\n m = inputData[2]\n \n N = np.arange(0,x.shape[0])\n np.random.shuffle(N)\n \n x = x[N,:]\n y = y[N]\n m = m[N,:]\n \n return [x,y,m]", "def shuffle(self):\n self.edges = np.random.permutation(self.edges)\n self.batch_num = 0", "def shuffle(self) -> None:\n shuffle(self.cards)", "def shuffle():\n deck = []\n # By Baccarat rules, there are 4 aces worth 1 point, 16 face cards and tens\n # worth 0 point, and 32 other cards worth their numerical value.\n # 8 decks are suffled together to create a shoe.\n for n in range(8):\n for i in range (32):\n deck.append((i % 8) + 2)\n \n for i in range (16):\n deck.append(0)\n \n for i in range (4):\n deck.append(1)\n \n random.shuffle(deck)\n\n return deck", "def shuffle(data, shuffle_size=10000):\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= shuffle_size:\n random.shuffle(buf)\n for x in buf:\n yield x\n buf = []\n # The sample left over\n random.shuffle(buf)\n for x in buf:\n yield x", "def shuffle(self):\r\n self._current = 0\r\n random.shuffle(self._cards)", "def _shuffle(x, y):\n ind = np.arange(y.shape[0])\n np.random.shuffle(ind)\n\n return x[:, ind, :], y[ind]", "def shuffle(self) -> 'List':\n copy = self.copy()\n shuffle(copy)\n\n return copy", "def shuffle(self) -> List[int]:\n for i in range(len(self.nums) - 1, 0, -1):\n pivot = random.randint(0, i) # 前闭后闭\n self.nums[i], self.nums[pivot] = self.nums[pivot], self.nums[i]\n return self.nums", "def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]\n self.target_ids = self.target_ids[perm]", "def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0", "def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0", "def shuffle(self):\n new_list = [] \n while True:\n if len(self.init_nums) == 0 :\n pass\n break\n else: \n while self.init_nums is not None: \n if len(self.init_nums) is 0: \n break\n else :\n ks = random.choice(self.init_nums) \n new_list.append(ks)\n self.init_nums.remove(ks)\n\n if self.orig == new_list:\n continue\n else:\n print(new_list)\n break \n self.init_nums = new_list\n return(new_list)", "def calc_granger_shuffle(self):\n if not hasattr(self, 'input_data'):\n self.preprocess_and_check_stationarity()\n temp_series = [np.stack([np.random.permutation(x)\n for x in self.input_data.T]).T\n for i in trange(self.n_shuffles)]\n\n outs_temp = parallelize(self.calc_granger, temp_series, n_jobs=30)\n outs_temp = [x[0] for x in outs_temp]\n self.shuffle_outs = np.array(outs_temp)", "def get_random_samplers(self, n):\n if not self.has_samplers():\n self.draw_samplers()\n \n def get_shuffledcopy(x):\n x_ = x.copy()\n np.random.shuffle(x_)\n return x_\n \n return get_shuffledcopy(self.samplers)[:n]", "def shuffle(self, seed=0):\n if isinstance(seed, numbers.Integral): # also allow for np.int8(n) and the like\n if seed != -1:\n if seed == 0:\n random.seed()\n else:\n random.seed(seed)\n random.shuffle(self.idxs)\n else: # not an integer seed: None or some other type\n # same as seed 0\n random.seed()\n random.shuffle(self.idxs)", "def shuffle(self, random_state=None):\n if random_state is not None:\n self.df = self.df.sample(frac=1, random_state=random_state)\n else:\n self.df = self.df.sample(frac=1)", "def shuffle_nb(a, seed=None):\n if seed is not None:\n np.random.seed(seed)\n out = np.empty_like(a, dtype=a.dtype)\n\n for col in range(a.shape[1]):\n out[:, col] = np.random.permutation(a[:, col])\n return out", "def shuffle(self):\n index = list(range(self.k))\n random.shuffle(index)\n for i, j in enumerate(index):\n self.list[i] = self.dic[j]\n return self.list", "def test_shuffle(self):\n random.shuffle(self.seq)\n self.seq.sort()\n self.assertEqual(self.seq, range(10))", "def random(self, af=False):\n rank = randrange(self.order())\n return self.coset_unrank(rank, af)", "def knuth_shuffle_forward(arr):\n for i in range(0, len(arr)-2): # i from 0..n-2 \n j = random.randint(i, len(arr)-1) # Pick randomly i <= j < n\n arr[i], arr[j] = arr[j], arr[i]", "def flat_shuffle(tensor):\n shape_ = tensor.size()\n flat_tensor = tensor.view(-1)\n shuffled_flat_tensor = shuffle(flat_tensor)\n return shuffled_flat_tensor.view(shape_)", "def sample(self):\n u = np.asarray(np.random.uniform())\n return self.invert(u)", "def shuffle(self):\n reorder(self.cards) #importing shuffle as reorder", "def shuffle(self, random_seed: int = 0):\n # shuffle feature model clauses (used for div. promotion)\n random.seed(random_seed)\n clauses_ = []\n for clause in self.clauses_raw:\n clause_ = random.sample(clause, len(clause))\n clauses_.append(clause_)\n clauses = random.sample(clauses_, len(clauses_))\n \n self.clauses, self.target = FeatureModel.__convert_dimacs_to_bitvec(clauses, len(self.feature_dict))", "def shuffle_dataset(self, seed=None):\n stacked = np.r_[self.X_train,\n self.y_train]\n shuffle(stacked.T)\n X_shuffled = stacked[:self.nfeatures,:]\n y_shuffled = stacked[self.nfeatures:,:]\n return X_shuffled, y_shuffled", "def sample(n, seed= 0):\n data = list(range(n))\n while True:\n np.random.seed(seed)\n np.random.shuffle(data)\n yield from data", "def _get_rand_array(self):\n pass", "def random():\n np.random.seed(0)", "def shuffle_points(batch_data):\n idx = np.arange(batch_data.shape[1])\n np.random.shuffle(idx)\n return batch_data[:, idx, :]", "def random():\n np.random.seed(1939)", "def shuffle(lol, seed):\n for l in lol:\n random.seed(seed)\n random.shuffle(l)", "def shuffle_points(batch_data):\n idx = np.arange(batch_data.shape[1])\n np.random.shuffle(idx)\n return batch_data[:,idx,:]", "def shuffle(self, return_perm=False):\n perm = torch.randperm(len(self))\n dataset = self.index_select(perm)\n return (dataset, perm) if return_perm is True else dataset", "def generate_shuffle(self):\n self._shuffle_list = range(len(self._playlist))\n random.shuffle(self._shuffle_list)\n if self._active_index in self._shuffle_list:\n self._shuffle_list.remove(self._active_index)", "def shuffle_list(a):\n if isinstance(a, int):\n a = range(a)\n a = copy.copy(a)\n try:\n random.shuffle(a)\n except TypeError:\n a = list(a)\n random.shuffle(a)\n return a", "def shuffle_list(self):\n eight_pic = get_image_list(self.folder)\n if len(eight_pic) > 8:\n random.shuffle(eight_pic)\n full_list = eight_pic[:9] * 2\n random.shuffle(full_list)\n return full_list" ]
[ "0.74818784", "0.7243617", "0.72220534", "0.72162074", "0.7185313", "0.7155056", "0.71378976", "0.7131264", "0.7099804", "0.70315886", "0.7027391", "0.70198023", "0.7006563", "0.6986513", "0.69661355", "0.69302654", "0.6921108", "0.6921108", "0.6921108", "0.69205916", "0.690033", "0.68126494", "0.6800903", "0.67991215", "0.6792264", "0.67703545", "0.67703545", "0.67703545", "0.67703545", "0.67703545", "0.67561954", "0.6754142", "0.6753444", "0.6751369", "0.67484033", "0.6738139", "0.67081594", "0.6671086", "0.6655608", "0.66490984", "0.6635859", "0.65697545", "0.655332", "0.65285814", "0.65285814", "0.6527256", "0.6513981", "0.65124786", "0.65033925", "0.64914757", "0.6484507", "0.64838064", "0.6468496", "0.645145", "0.6449737", "0.6440766", "0.64402395", "0.6435978", "0.6366854", "0.6362749", "0.6360608", "0.63542277", "0.6349204", "0.63429904", "0.6328835", "0.63286364", "0.6320642", "0.63196343", "0.631877", "0.63044727", "0.62994677", "0.62897265", "0.62779677", "0.6269147", "0.6269147", "0.62643015", "0.62573904", "0.6256897", "0.62566566", "0.6251349", "0.62502676", "0.62486523", "0.6244671", "0.62390184", "0.6235017", "0.62275034", "0.62137306", "0.61964095", "0.6182604", "0.618071", "0.6179021", "0.61769646", "0.61766326", "0.61654425", "0.61626184", "0.61565846", "0.6155639", "0.6155581", "0.61493653", "0.6135942", "0.6127567" ]
0.0
-1
Initialize ProjectForm, adding crispy_forms helper and layout.
def __init__(self, *args, editable=False, request=None, **kwargs): super().__init__(*args, **kwargs) self.helper = FormHelper(self) user_authenticated = request and request.user.is_authenticated if hasattr(self.instance, 'pk') and self.instance.pk is not None: new = False slug_field = HTML(""" <div id="div_slug" class="row form-group"> <label for="id_url" class="col-md-2 col-form-label form-control-label"> URL </label> <input type="text" name="slug" maxlength="50" id="id_slug" class="textinput textInput form-control" hidden value="{{object.slug}}"> <div class="col-md w-100"> <div class="input-group"> <input type="text" name="slug" maxlength="50" id="id_url" class="textinput textInput form-control" disabled value="http://cobwebarchive.org{{object.get_absolute_url}}"> </div> </div> </div> """) form_buttons_kwargs = { 'confirm_title': 'Save changes', 'confirm_text': 'Click the submit button to save changes to this project or click on cancel to return to Cobweb without saving.', } else: new = True self.fields['slug'].label = "Choose a Cobweb URL" slug_field = PrependedAppendedText('slug', prepended_text='http://cobwebarchive.org/proj/') form_buttons_kwargs = { 'confirm_title': 'Add new project', 'confirm_text': 'Click the submit button to add this project to Cobweb or click on cancel to return to Cobweb without saving.', } self.helper.layout = Layout( slug_field, HField('title', edit=editable), FormSection( Row(Column(HField('description', edit=editable))), HField('collecting_scope', edit=editable), Row( Field('status', edit=editable, wrapper_class='col-md-5'), Field('administrators', edit=editable, wrapper_class='col-md-7', show=user_authenticated), ), ), FormSection( Row( Column(Field('nomination_policy', edit=editable), css_class='col-md-5'), Column( Field('nominators', edit=editable, show=user_authenticated), Field('nominator_blacklist', edit=editable, show=editable), css_class='col-md-7' ), ), ), FormSection(select_field('tags', edit=editable)), form_buttons(**form_buttons_kwargs) if editable else HTML(''), TIMESTAMPS, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setupUi(self)", "def get_add_project_form():\n\n return render_template(\"project_add.html\")", "def get_project_add_form():\n\n return render_template(\"project_add.html\")", "def initForm(self):\n\t\tcontrol_path = tools.getFileInSameDirectory(__file__, \"textInput.ui\")\n\t\tself._form = uic.loadUi(control_path)\n\t\tself.form.label.setText(self._label)\n\t\tself.form.lineEdit.setText(self._value)\n\t\tself.form.setToolTip(self.help)\n\n\t\t# self.form.lineEdit.editingFinished.connect(self.finishEditing)", "def get_project_form():\n\n return render_template(\"project_search.html\")", "def init():\n defaults = _project_defaults()\n\n if Project.prompt:\n defaults['name'] = prompt(\"Enter the project's name:\", defaults['name'])\n defaults['package'] = prompt(\"Enter the project's package:\", defaults['package'])\n defaults['author'] = prompt(\"Enter the project's author:\", defaults['author'])\n defaults['author_email'] = prompt(\"Enter the project's author's email:\", defaults['author_email'])\n defaults['description'] = prompt(\"Enter the project's description:\", defaults['description'])\n\n # print(\"defaults:\\n{defaults}\".format(defaults=pformat(defaults)))\n\n if Project.use_templates:\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)", "def _setup_ui(self):\n from functools import partial\n\n self.setStyleSheet(\n \"\"\"\n QLabel[labelField=\"true\"] {\n font-weight: bold;\n }\n \"\"\"\n )\n\n # The main layout\n self.main_layout = QtWidgets.QVBoxLayout(self)\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n\n # the form layout\n self.form_layout = QtWidgets.QFormLayout()\n self.form_layout.setLabelAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter\n )\n\n # store roles\n label_role = QtWidgets.QFormLayout.LabelRole\n field_role = QtWidgets.QFormLayout.FieldRole\n\n self.main_layout.addLayout(self.form_layout)\n\n i = -1\n\n # Reviewer\n i += 1\n reviewer_name_label = QtWidgets.QLabel(self)\n reviewer_name_label.setText(\"Reviewer\")\n self.form_layout.setWidget(i, label_role, reviewer_name_label)\n\n self.reviewer_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.reviewer_name_widget)\n\n # Task Name field\n i += 1\n task_name_label = QtWidgets.QLabel(self)\n task_name_label.setText(\"Task\")\n self.form_layout.setWidget(i, label_role, task_name_label)\n\n self.task_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.task_name_widget)\n\n # # Version Info field\n # from anima.ui.widgets.version import VersionDetailsWidget\n # self.latest_version_widget = VersionDetailsWidget(parent=self)\n # self.main_layout.insertWidget(0, self.latest_version_widget)\n\n # Review Type Field\n i += 1\n review_type_label = QtWidgets.QLabel(self)\n review_type_label.setText(\"Review Type\")\n self.form_layout.setWidget(i, label_role, review_type_label)\n\n self.review_type_widget = ReviewTypeWidget(self)\n self.review_type_widget.currentIndexChanged.connect(\n partial(self.review_type_changed_callback)\n )\n\n self.form_layout.setWidget(i, field_role, self.review_type_widget)\n\n # Timing Field\n i += 1\n effort_label = QtWidgets.QLabel(self)\n effort_label.setText(\"Timing\")\n self.form_layout.setWidget(i, label_role, effort_label)\n\n effort_layout = QtWidgets.QHBoxLayout()\n self.form_layout.setLayout(i, field_role, effort_layout)\n\n from anima.ui.widgets.timing import ScheduleTimingWidget\n from anima import defaults\n\n self.timing_widget = ScheduleTimingWidget(\n self, timing_resolution=defaults.timing_resolution\n )\n self.timing_widget.setEnabled(False)\n # set the default to 1 hour\n self.timing_widget.set_schedule_info(timing=1, unit=\"h\")\n effort_layout.addWidget(self.timing_widget)\n\n # Description Field\n i += 1\n description_label = QtWidgets.QLabel(self)\n description_label.setText(\"Description\")\n self.form_layout.setWidget(i, label_role, description_label)\n\n self.description_widget = QtWidgets.QTextEdit(self)\n self.form_layout.setWidget(i, field_role, self.description_widget)", "def _initialize_f0(self, lang='en-US'):\n #left column\n self.f0_ttp = Label(self.f0, text=LOCALIZED_TEXT[lang]['f0_ttp'], \\\n anchor='w', justify='left', wraplength=600)\n self.f0_ttp.grid(column=0, row=0, columnspan=3, padx=5, pady=5, \\\n sticky='ew')\n\n self.lblCurProject = Label(self.f0, \\\n text=LOCALIZED_TEXT[lang]['Current Project>'], \\\n anchor='w', justify='right')\n self.lblCurProject.grid(column=0, row=1, columnspan=2, \\\n padx=5, pady=5, sticky='e')\n\n self.ddnCurProject = Combobox(self.f0, validate='focusout', \\\n validatecommand=(self._check_project, '%s', '%P'), \\\n textvariable=self.current_project)\n# self.current_project.trace('w', self._load_project)\n self.ddnCurProject.grid(column=2, row=1, columnspan=2, \\\n padx=5, pady=5, sticky='news')\n self.ddnCurProject.bind('<<ComboboxSelected>>', self._change_lang)\n self.ddnCurProject['text'] = 'Current Project:'\n self.ddnCurProject['justify'] = 'left'\n\n self.btnDelProject = Button(self.f0, \\\n text=LOCALIZED_TEXT[lang]['Delete Project'], \\\n command=self._on_del_project)\n self.btnDelProject.grid(column=4, row=1, padx=5, pady=5, sticky='news')\n self.btnDelProject_ttp = CreateToolTip(self.btnDelProject, \\\n LOCALIZED_TEXT[lang]['Delete Project_ttp'])\n\n self.btnNewProject = Button(self.f0, \\\n text=LOCALIZED_TEXT[lang]['New Project'], \\\n command=self._on_new_project)\n self.btnNewProject.grid(column=5, row=1, padx=5, pady=5, sticky='news')\n self.btnNewProject_ttp = CreateToolTip(self.btnNewProject, \\\n LOCALIZED_TEXT[lang]['New Project_ttp'])\n\n #browse to the source/fallback dictionary exported from Map Creator\n self.lblDictIn = Label(self.f0, \\\n text=LOCALIZED_TEXT[lang]['Source/Fallback dictionary'], \\\n anchor='w', justify='left')\n self.lblDictIn.grid(column=0, row=2, columnspan=2, padx=5, pady=5, \\\n sticky='ew')\n self.etrDictIn = Entry(self.f0, \\\n textvariable=self.dict_in, width=70)\n self.etrDictIn.grid(column=2, row=2, \\\n columnspan=3, padx=5, pady=5, sticky='news')\n self.btnDictIn = Button(self.f0, \\\n text=LOCALIZED_TEXT[lang]['...'], \\\n command=self._browse_to_dict)\n self.btnDictIn.grid(column=5, row=2, padx=5, pady=5, sticky='news')\n self.btnDictIn_ttp = CreateToolTip(self.btnDictIn, \\\n LOCALIZED_TEXT[lang]['DictIn_ttp'])\n\n #browse to the html file exported from Paratext Bilical Terms tool\n self.lblTermsIn = Label(self.f0, \\\n text=LOCALIZED_TEXT[lang]['Biblical terms list'], \\\n anchor='w', justify='left')\n self.lblTermsIn.grid(column=0, row=3, columnspan=2, padx=5, pady=5, \\\n sticky='ew')\n self.etrTermsIn = Entry(self.f0, \\\n textvariable=self.terms_in, width=70)\n self.etrTermsIn.grid(column=2, row=3, \\\n columnspan=3, padx=5, pady=5, sticky='news')\n self.btnTermsIn = Button(self.f0, \\\n text=LOCALIZED_TEXT[lang]['...'], \\\n command=self._browse_to_terms)\n self.btnTermsIn.grid(column=5, row=3, padx=5, pady=5, sticky='news')\n self.btnTermsIn_ttp = CreateToolTip(self.btnTermsIn, \\\n LOCALIZED_TEXT[lang]['TermsIn_ttp'])\n\n #browse to the existing source/target dict file exported from Paratext\n #Bilical Terms tool, if any.\n self.lblOldDict = Label(self.f0, \\\n text=LOCALIZED_TEXT[lang]['Old Source/Target dictionary'], \\\n anchor='w', justify='left')\n self.lblOldDict.grid(column=0, row=4, columnspan=2, padx=5, pady=5, \\\n sticky='ew')\n self.etrOldDict = Entry(self.f0, \\\n textvariable=self.old_dict, width=70)\n self.etrOldDict.grid(column=2, row=4, \\\n columnspan=3, padx=5, pady=5, sticky='news')\n self.btnOldDict = Button(self.f0, \\\n text=LOCALIZED_TEXT[lang]['...'], \\\n command=self._browse_to_old_dict)\n self.btnOldDict.grid(column=5, row=4, padx=5, pady=5, sticky='news')\n self.btnOldDict_ttp = CreateToolTip(self.btnDelProject, \\\n LOCALIZED_TEXT[lang]['OldDict_ttp'])\n\n self.chkAddCLDRfields = Checkbutton(self.f0, \\\n text=LOCALIZED_TEXT[lang][\"AddCLDRfields\"], \\\n variable=self.add_cldr_fields)\n self.chkAddCLDRfields.grid(column=0, row=6, columnspan=3, \\\n padx=5, pady=5, \\\n sticky='w')\n self.chkAcceptRegionalDigits = Checkbutton(self.f0, \\\n text=LOCALIZED_TEXT[lang][\"AcceptRegionalDigits\"], \\\n variable=self.accept_regional_digits)\n self.chkAcceptRegionalDigits.grid(column=0, row=5, columnspan=3, \\\n padx=5, pady=5, \\\n sticky='w')\n self.accept_regional_digits.set(1)\n\n self.btnF0Next = Button(self.f0, \\\n text=LOCALIZED_TEXT[lang]['Next'], \\\n command=self._on_click_f0_next)\n self.btnF0Next.grid(column=4, row=7, padx=5, pady=5, sticky='news')\n self.btnF0Next_ttp = CreateToolTip(self.btnF0Next, \\\n LOCALIZED_TEXT[lang]['F0Next_ttp'])\n\n # list projects in BibTerms2Dict and load to ddnCurProject\n# self.BibTerm = os.path.normpath(os.path.expanduser('~') + '/BibTerm')\n# print(self.BibTerm)\n self.BibTerm = os.path.normpath(\\\n os.environ['HomeDrive'] + os.environ['HomePath'] + '\\\\BibTerm')\n if not os.path.isdir(self.BibTerm):\n os.makedirs(self.BibTerm)\n \n# messagebox.showwarning('self.BibTerm', \">{}<\".format(self.BibTerm))\n\n self.list_projects = sorted([f[:-4] for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')])\n\n self.ddnCurProject['values'] = self.list_projects", "def create_project_form(request):\n \n # First we check to see the site has been set up, otherwise we throw the user to the config screen\n if not bool(os.path.isdir(Project.project_options.repository_directory)):\n request.user.message_set.create(message=\"The site has not been set up yet. Log in as your admin user and create your settings!\")\n return HttpResponseRedirect(reverse('site-config'))\n \n if request.is_ajax():\n template ='project/project_create_ajax.html'\n else:\n template = 'project/project_create.html'\n \n # Lets check if this form is being shown or processed\n if request.method == \"POST\":\n # We're processing the form, so lets create the instance\n form = NewProjectForm(request.POST, auto_id=False)\n # The form is correct, lets proceeed.\n if form.is_valid():\n # Lets check the user has conformed to a sites T&C's\n if form.cleaned_data['t_and_c'] == True:\n # Create the project instance\n project = Project(\n project_id = string.lower(form.cleaned_data['project_id']),\n project_name = form.cleaned_data['project_name'],\n short_description = form.cleaned_data['short_description'],\n full_description = form.cleaned_data['full_description'],\n project_manager = request.user,\n hgweb_style = form.cleaned_data.get('hgweb_style', ''),\n project_icon = form.cleaned_data['project_icon'],\n )\n # Ok, we're all good, so lets save.\n project.save()\n # We'll tell the user that there site has been saved\n request.user.message_set.create(message=_(\"The project \" + form.cleaned_data['project_name'] + \" has been created\"))\n if request.is_ajax():\n return HttpResponse(\n \"{'success': 'true', 'url': '\" + reverse('project-detail', kwargs={'slug':form.cleaned_data['project_id']}) + \"', 'project': \" + json_encode(project) + \"}\"\n , mimetype=\"application/json\")\n else:\n return HttpResponseRedirect(reverse('project-detail', kwargs={'slug': form.cleaned_data['project_id']}))\n else:\n return render_to_response(template,\n {\n 'form':form.as_table(),\n }, context_instance=RequestContext(request)\n )\n #return HttpResponseRedirect(reverse('project-detail', kwargs={'slug':form.cleaned_data['name_short']}))\n else:\n form = NewProjectForm()\n is_auth = request.user.is_authenticated()\n \n return render_to_response(template,\n {\n 'form':form.as_table(),\n 'is_auth': is_auth\n }, context_instance=RequestContext(request)\n )", "def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()", "def build_gui(self):\n # Build header\n layout = [[sg.Text(f\"Welcome to {self.app_name}\")], [sg.Text('')]]\n\n # Build form\n for (field_name, field) in (self.config.get(\"fields\")).items():\n # By default we will use str as type\n if \"type\" not in field:\n field.update({\"type\": \"str\"})\n\n # Make sure we have a default value\n if \"default\" not in field:\n field.update({\"default\": \"\"})\n\n if field.get(\"type\") == \"str\" or field.get(\"type\") == \"int\":\n layout.append(self.build_string_field(field_name, field))\n elif field.get(\"type\") == \"date\":\n layout.append(self.build_date_field(field_name, field))\n elif field.get(\"type\") == \"list\":\n layout.append(self.build_list_field(field_name, field))\n elif field.get(\"type\") == \"textarea\":\n layout.append(self.build_textarea_field(field_name, field))\n else: # If not identified, just treat it as a str\n layout.append(self.build_string_field(field_name, field))\n\n # Build footer\n layout.append([sg.Text('')])\n layout.append([sg.Text('* Mandatory fields', text_color=\"Red\")])\n layout.append([sg.Button('Build'), sg.Button('Cancel')])\n layout.append([sg.Text('')])\n return layout", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def initUI(self, context):\n self.formlayout = QtWidgets.QFormLayout(self)\n\n self.username_le = QtWidgets.QLineEdit(self)\n self.username_le.returnPressed.connect(self.accept)\n\n if self.askpassword:\n self.formlayout.addRow(\"Användarnamn:\", self.username_le)\n self.password_le = QtWidgets.QLineEdit(self)\n self.password_le.setEchoMode(QtWidgets.QLineEdit.Password)\n self.password_le.returnPressed.connect(self.accept)\n self.formlayout.addRow(\"Lösenord:\", self.password_le)\n else:\n self.formlayout.addRow(context, self.username_le)\n\n self.okbutton = QtWidgets.QPushButton('OK', self)\n self.okbutton.clicked.connect(self.accept)\n\n self.cancelbutton = QtWidgets.QPushButton('Avbryt', self)\n self.cancelbutton.clicked.connect(self.reject)\n\n self.buttonrow = QtWidgets.QHBoxLayout()\n self.buttonrow.addWidget(self.okbutton)\n self.buttonrow.addWidget(self.cancelbutton)\n self.formlayout.addRow(\"\", self.buttonrow)\n\n self.setWindowTitle(context)\n\n self.show()", "def loadForm(self):\n\n formUI = os.path.join(sys.path[0], 'views/acquisition.ui')\n file = QtCore.QFile(formUI)\n file.open(QtCore.QFile.ReadOnly)\n loader = QtUiTools.QUiLoader()\n self.window = loader.load(file)\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(self.window)\n self.setLayout(layout)", "def _init_ui(self):\n\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel('Kies een normtraject:')\n\n hlayout.addWidget(label)\n\n self.section_combobox = QtWidgets.QComboBox()\n self.section_combobox.setFixedWidth(60)\n self.section_ids = sorted([''] + io.geometry.import_section_ids(self.datadir))\n self.section_combobox.addItems(self.section_ids)\n\n hlayout.addWidget(self.section_combobox)\n\n self.add_button = QtWidgets.QPushButton('Toevoegen', clicked=self._add_flooddefence)\n\n hlayout.addWidget(self.add_button)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n self.close_button = QtWidgets.QPushButton('Sluiten', clicked=self.close)\n vlayout.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.setLayout(vlayout)\n\n self.setWindowTitle(\"HB Havens: normtrajecten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)", "def __init__(self, *args, **kwargs):\n\n super(FlirCameraWidget, self).__init__(*args, **kwargs)\n #self.loadForm()\n self.window = Ui_MainWindow()\n self.window.setupUi(self)\n\n self.initUI()\n Styles(self)", "def __init__(self):\n super(QTUIProject, self).__init__()\n self.setupUi(self)\n self.assignWidgets()\n self.show()\n self.SlotsJsonName = \"Slots Assets Folder\"\n self.BingoJsonName = \"Bingo Assets Folder\"", "def setup_ui(self):\n #super(Dialog, self).__init__()\n self.createFormGroupBox()\n\n buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n buttonBox.accepted.connect(self.check_input)\n buttonBox.rejected.connect(self.reject)\n\n mainLayout = QVBoxLayout()\n mainLayout.addWidget(self.formGroupBox)\n mainLayout.addWidget(buttonBox)\n self.setLayout(mainLayout)\n\n self.make_connections()\n\n self.setWindowTitle(\"Load Data ...\")", "def _init_ui(self):\n hlayout = QtWidgets.QHBoxLayout()\n\n hlayout.addWidget(QtWidgets.QLabel('Kies een normtraject:'))\n\n self.section_combobox = QtWidgets.QComboBox()\n self.section_combobox.setFixedWidth(60)\n self._update_combobox()\n\n hlayout.addWidget(self.section_combobox)\n\n self.remove_button = QtWidgets.QPushButton('Verwijderen', clicked=self._del_flooddefence)\n hlayout.addWidget(self.remove_button)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n self.close_button = QtWidgets.QPushButton('Sluiten', clicked=self.close)\n vlayout.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.setLayout(vlayout)\n\n self.setWindowTitle(\"HB Havens: normtrajecten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)", "def initialize_new_project(self, flag_new=True):\n logging.debug(\"initialize new project...\")\n\n # self.lbLogoUnito.setVisible(not flag_new)\n self.lbLogoeMOC.setVisible(not flag_new)\n self.dwEthogram.setVisible(flag_new)\n self.dwSubjects.setVisible(flag_new)", "def __init__(self, *args, **kwargs):\n project = kwargs.pop('project', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n # self.fields['parent'].queryset = Task.tree.filter(project = project)\n\n self.fields['parent'].queryset = Task.objects.filter(project=project)\n\n self.fields['type'].queryset = Type.objects.filter(\n is_project_type=True)\n self.fields['owner'].queryset = User.objects.filter(\n is_active=True, is_staff=False).order_by('username')\n\n try:\n self.fields['milestone'].queryset = project.milestone.filter(\n category=Milestone.ENGINEERING)\n\n self.fields['assigned_resources'].queryset = project.team.filter(\n is_active=True, is_staff=False).order_by('username')\n except BaseException:\n self.fields['assigned_resources'].queryset = User.objects.filter(\n is_active=True, is_staff=False).order_by('username')", "def initUI(self):\n # Setting the main layout as Vertical.\n self.mainLayout = QHBoxLayout()\n\n # Create title.\n self.title = QLabel(self.__name + \" : \")\n\n # Add description as tooltip.\n self.title.setToolTip(self.__description)\n\n # Add title to main layout.\n self.mainLayout.addWidget(self.title)\n\n # Create ComboBox.\n self.dropDown = QComboBox()\n\n # Add datas to drop down.\n self.dropDown.addItems(self.__datas)\n\n # Set default index to dropdown.\n self.dropDown.setCurrentIndex(self.__currentValue)\n\n # Connect dropdown with update method.\n self.dropDown.currentIndexChanged.connect(self.changeCurrentValue)\n\n # Add ComboBox to main layout.\n self.mainLayout.addWidget(self.dropDown)\n\n # Add the main layout to the window.\n self.setLayout(self.mainLayout)", "def init_ui(self):\n\n self.master.title(\"Upload file\")\n self.master.geometry(\"300x200\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_select_file = Button(self, text=\"Select file\", command=self.on_open)\n self.btn_select_file.place(x=80, y=50)\n\n self.selected_file_name = Label(self, text=\"<Selected file name>\")\n self.selected_file_name.place(x=60, y=90)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=80, y=130)\n\n self.btn_back = Button(self, text=\"Back\", command=self.go_back)\n self.btn_back.place(x=10, y=10)", "def initGUI(self):\n\n\t\t# Set main frame's location \n\t\tself.grid(row=0, column=0, sticky=\"nsew\")\n\n\t\t# Set path entry frame and its location\n\t\tself.entryFrame = Frame(self, relief = RAISED, borderwidth = 1)\n\t\tself.entryFrame.pack(fill = BOTH, expand = False)\n\t\t# Make label\n\t\tif self.message:\n\t\t\tmessageLabel = Label(self.entryFrame, text = self.message, font=(\"Bradley\", 10))\n\t\t\tmessageLabel.pack(anchor=W, padx=0, pady=0)\n\n\t\t# Set path entry and its location\n\t\tself.filePathEntry = Entry(self.entryFrame, bd = 4, width = 50)\n\t\tself.filePathEntry.pack(side = LEFT, padx=2, pady=1)", "def initcomponentform():\n form = ComponentsForm()\n form.name.value = []\n form.id.value = []\n form.currentstock.value = []\n form.reorderlevel.value = []\n form.unitprice.value = []\n form.supplier.value = []\n form.location.value = []\n form.datasheet.value = []\n return form", "def __init__(self, parent=None):\n super(Dialog, self).__init__(parent)\n self.setupUi(self)", "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def initialize(self):\n self._ui.img_name.setText('No files selected')\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n self._ui.bt_right.setEnabled(False)\n self._ui.bt_left.setEnabled(False)\n self._ui.gps_button.setEnabled(False)\n\n self._open_btn = QPushButton('Open File', self._ui.img_label)\n self.adjustSize()", "def init_layout(self):\n\t\tself.pack_start(self.edit, expand=True)\n\t\tself.pack_start(self.button, expand=False)\n\t\tself.show_all()", "def fill_ui(self):\n self.review_type_widget.set_review_type(self.review_type)\n\n if self.reviewer:\n self.reviewer_name_widget.setText(self.reviewer.name)\n\n if self.task:\n self.task_name_widget.setText(\n \"%s (%s) (%s)\"\n % (\n self.task.name,\n \" | \".join(\n [self.task.project.name]\n + [parent_task.name for parent_task in self.task.parents]\n ),\n self.task.id,\n )\n )\n\n # from stalker import Version\n # version = Version.query.filter(Version.task == self.task).order_by(Version.date_created.desc()).first()\n #\n # if version:\n # self.latest_version_widget.version = version", "def _setup_ui(self):\n self.resize(750, 180)\n self.vertical_layout = QtWidgets.QVBoxLayout(self)\n\n # Dialog Label\n self.dialog_label = QtWidgets.QLabel(self)\n self.dialog_label.setText(\"%s Filename Template\" % self.mode)\n self.dialog_label.setStyleSheet(\"color: rgb(71, 143, 202);font: 18pt;\")\n self.vertical_layout.addWidget(self.dialog_label)\n\n # Title Line\n line = QtWidgets.QFrame(self)\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.vertical_layout.addWidget(line)\n\n # Form Layout\n self.form_layout = QtWidgets.QFormLayout()\n self.form_layout.setLabelAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter\n )\n self.vertical_layout.addLayout(self.form_layout)\n\n # ------------------------------------------------\n # Target Entity Type Field\n\n # label\n self.target_entity_type_label = QtWidgets.QLabel(\"Target Entity Type\", self)\n self.form_layout.setWidget(\n 0, QtWidgets.QFormLayout.LabelRole, self.target_entity_type_label\n )\n\n # field\n self.target_entity_type_combo_box = QtWidgets.QComboBox(self)\n self.form_layout.setWidget(\n 0, QtWidgets.QFormLayout.FieldRole, self.target_entity_type_combo_box\n )\n\n # ------------------------------------------------\n # Name Field\n self.name_label = QtWidgets.QLabel(\"Name\", self)\n self.form_layout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.name_label)\n self.name_fields_vertical_layout = QtWidgets.QVBoxLayout()\n self.name_validator_label = QtWidgets.QLabel(self)\n self.name_validator_label.setStyleSheet(\"color: rgb(255, 0, 0);\")\n\n from anima.ui.widgets import ValidatedLineEdit\n\n self.name_line_edit = ValidatedLineEdit(\n self, message_field=self.name_validator_label\n )\n\n self.name_fields_vertical_layout.addWidget(self.name_line_edit)\n self.name_fields_vertical_layout.addWidget(self.name_validator_label)\n self.form_layout.setLayout(\n 1, QtWidgets.QFormLayout.FieldRole, self.name_fields_vertical_layout\n )\n\n # ------------------------------------------------\n # Path Code Field\n self.path_label = QtWidgets.QLabel(\"Path\", self)\n self.form_layout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.path_label)\n\n self.path_line_edit = QtWidgets.QLineEdit(self)\n # set the default value to something useful\n self.form_layout.setWidget(\n 2, QtWidgets.QFormLayout.FieldRole, self.path_line_edit\n )\n\n # ------------------------------------------------\n # Filename Code Field\n self.filename_label = QtWidgets.QLabel(\"Filename\", self)\n self.form_layout.setWidget(\n 3, QtWidgets.QFormLayout.LabelRole, self.filename_label\n )\n\n self.filename_line_edit = QtWidgets.QLineEdit(self)\n self.form_layout.setWidget(\n 3, QtWidgets.QFormLayout.FieldRole, self.filename_line_edit\n )\n\n # ------------------------------------------------\n # Button Box\n self.button_box = QtWidgets.QDialogButtonBox(self)\n self.button_box.setOrientation(QtCore.Qt.Horizontal)\n self.button_box.setStandardButtons(\n QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok\n )\n self.vertical_layout.addWidget(self.button_box)\n self.vertical_layout.setStretch(2, 1)\n\n # ------------------------------------------------\n # Default values\n self.target_entity_type_combo_box.addItems(\n [\"Task\", \"Asset\", \"Shot\", \"Sequence\"]\n )\n self.name_line_edit.set_invalid() # Empty field is not valid\n self.path_line_edit.setText(\n \"$REPO{{project.repository.code}}/{{project.code}}/\"\n \"{%- for parent_task in parent_tasks -%}{{parent_task.nice_name}}\"\n \"/{%- endfor -%}\"\n )\n self.filename_line_edit.setText(\n '{{version.nice_name}}_v{{\"%03d\"|format(version.version_number)}}'\n )\n\n # ------------------------------------------------\n # Disable Fields\n if self.mode == \"Update\":\n self.target_entity_type_combo_box.setEnabled(False)\n\n # ------------------------------------------------\n # Signals\n # Name\n QtCore.QObject.connect(\n self.name_line_edit,\n QtCore.SIGNAL(\"textChanged(QString)\"),\n self.name_line_edit_changed,\n )\n\n # Button box\n QtCore.QObject.connect(\n self.button_box, QtCore.SIGNAL(\"accepted()\"), self.accept\n )\n QtCore.QObject.connect(\n self.button_box, QtCore.SIGNAL(\"rejected()\"), self.reject\n )", "def initializeUI(self):\n self.setGeometry(100, 100, 300, 200)\n self.setWindowTitle('Event Handling Example')\n\n self.show()", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def __init__(self, *args, **kwargs):\n\n # Les lignes suivantes permettent de modifier les label d'un champ dans la page\n super(ModelForm, self).__init__(*args, **kwargs)\n self.fields[\"nom_de_l_evenement\"].label = \"Nom de l'évènement\"\n self.fields[\"date_de_l_evenement\"].label = \"Date de l'évènement\" # utiliser plutôt l'attribut label comme pour AbonnementEvenementForm\n self.fields[\"fichier\"].label = \"Photo(s)\"", "def init_widget(self):\n self._build_config()\n self._raw_toolbar.initToolbar(self.config)", "def _setup_general_ui(self):\n\n layout = QFormLayout()\n layout.setSpacing(20)\n layout.setVerticalSpacing(20)\n\n self.device_dialog_checkbox = QCheckBox(self)\n device_dialog_label = QLabel(\"Open device dialog on start:\")\n layout.addRow(device_dialog_label,\n self.device_dialog_checkbox)\n\n self.reopen_device_checkbox = QCheckBox(self)\n reopen_device_label = QLabel(\"Reopen device on start(ignores device dialog):\", self)\n layout.addRow(reopen_device_label,\n self.reopen_device_checkbox)\n\n self.use_dutils_checkbox = QCheckBox(self)\n self.use_dutils_label = QLabel(\"Use tiscamera dutils, if present:\", self)\n layout.addRow(self.use_dutils_label,\n self.use_dutils_checkbox)\n\n if not self.enabled_dutils:\n self.use_dutils_label.setToolTip(\"Enabled when tiscamera-dutils are installed\")\n self.use_dutils_label.setEnabled(False)\n self.use_dutils_checkbox.setToolTip(\"Enabled when tiscamera-dutils are installed\")\n self.use_dutils_checkbox.setEnabled(False)\n\n self.general_widget.setLayout(layout)", "def _init_widgets(self):\n # Container frame\n self.container = Frame(self)\n # Workspace block\n self.main_container = Frame(self.container)\n\n self.text = Label(self.main_container)\n self.text.config(text=\"PyEventLogViewer is a timeline-based tool used to simplify the way\\n\"\n \"a user can view and explore Windows EVTX files. To begin using this\\n\"\n \"software you must do the following:\\n\\n\"\n \"\\t1) File → New → 'Create a new project'\\n\"\n \"\\t2) Tools → Import Log File → 'Open a specified EVTX file'\\n\"\n \"\\t3) Explore the presented timeline.\\n\"\n \"\\t4) Double-click a specific record to view the XML data for that record.\\n\"\n \"\\t5) File → Export → 'Generate a CSV or HTML file for timeline presentation.'\\n\\n\"\n \"At this point, only System and Security EVTX files are parsable with this software.\")\n\n self.show_var = BooleanVar()\n self.show_check = Checkbutton(self.main_container, text=\"Don't Show on Startup\", variable=self.show_var)\n\n # Action block\n self.button_ok = Button(self.main_container, text='Ok', underline=0, command=self.callback_close)\n self.bind('<Return>', self.callback_close)\n self.bind('<Escape>', self.callback_close)\n\n # Focus on window - required for binds to work.\n self.focus_set()", "def build(self):\n self.main_layout = MainLayout()\n self.main_layout.settings_panel.load_settings()\n return self.main_layout", "def onStart(self):\n self.register_form(JobManager.id, JobManager,\n title=\"JobManager\", shortcut=\"^D\", name=\"JobManager\")\n self.register_form(SystemOverviewForm.id, SystemOverviewForm,\n title=\"System Overview\", shortcut=\"^O\", name=\"System Overview\")\n self.register_form(StateManagerForm.id, StateManagerForm,\n title=\"State Manager\", shortcut=\"^T\", name=\"State Manager\")\n self.register_form(ModuleRunnerForm.id, ModuleRunnerForm,\n title=\"Module Runner\", shortcut=\"^T\", name=\"Module Runner\")\n self.init_forms()", "def show_create_form(self):\n # if there is no add permission then does not show the form\n if not self.has_add_permissions(): return\n\n params = {\n 'title':'Create',\n 'model':self.model,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines':self.INLINES})\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly':self.READ_ONLY})\n\n createform = self.addmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_ADD:\n self._list.hide()\n self._details.show()\n self._details.value = createform\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o):\n getattr(self, o).hide()\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n self.input_elements['factor Tm Tp'] = widgets.ParameterInputLine(\n label='Factor Tm naar Tp:',\n labelwidth=labelwidth,\n unitlabel='(NVT: Tp aanwezig)' if 'Tp' in self.hydraulic_loads.columns else '',\n validator=QtGui.QDoubleValidator(0.01, 99.99, 20),\n )\n\n if 'Tp' in self.hydraulic_loads.columns or self.parent_tab.step != 'I1':\n self.input_elements['factor Tm Tp'].set_enabled(False)\n\n # Add line edit with browsebutton for Master template\n self.input_elements['mastertemplate'] = widgets.ExtendedLineEdit(\n label='Master template bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_master_template)\n )\n\n # Add line edit with browsebutton for depth file\n self.input_elements['depthfile'] = widgets.ExtendedLineEdit(\n label='Bathymetry bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_bathymetry_file)\n )\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['swanfolder'] = widgets.ExtendedLineEdit(\n label='SWAN uitvoer folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_swan_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Genereer invoer')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def iniciaUI(self):\n\n self.setGeometry(100,100, 300, 200)\n self.setWindowTitle(\"Formulario\")\n self.displayWidgets()\n\n self.show()", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.layout = Layout(\n 'condition_1',\n 'condition_2',\n 'condition_3',\n 'condition_4',\n 'condition_5',\n 'condition_6',\n 'condition_7',\n 'send_study_results',\n 'condition_8',\n Div(\n HTML(\n '<a class=\"btn btn-secondary\" href={% url \"research:home\" %}>'\n 'I do not agree'\n '</a>'\n ),\n Submit('submit', 'I agree', css_class='btn-success'),\n css_class='d-flex justify-content-around my-5',\n ),\n )", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['hares folder'] = widgets.ExtendedLineEdit(\n label='HARES uitvoerbestanden folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_hares_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Start lezen uitvoerbestanden')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def form(self):\n proj = self.project\n if not proj:\n return\n return RPEventCreateForm(proj, owner=self.caller.Dominion)", "def initUI(self):\n self.logger.debug('Setting up the Measurement GUI')\n self.setWindowTitle(self.title)\n\n self.show()\n\n self.make_combobox_scanner()\n self.make_combobox_movements()\n self.make_combobox_configurate()\n self.make_combobox_basic()", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def add_project(request):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save()\n messages.success(request, 'Project added successfully!')\n return redirect(reverse('portfolio'))\n else:\n messages.error(request, 'Failed to add project.\\\n # Please ensure the form is valid')\n else:\n form = ProjectForm()\n\n form = ProjectForm()\n template = 'portfolio/add_project.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def __init__(self, *args, **kwargs):\n kwargs.pop('widget_syntax')\n\n super(TemplateForm, self).__init__( *args, **kwargs)\n print self.fields", "def advanced_gui(self, master):\r\n\r\n # Turn off polling function\r\n self.newProj.isQuickGenerate = False\r\n self._retLoop = None\r\n\r\n #Remove active widgets from the screen and then clear widget list out\r\n if self.widgetList:\r\n for w in self.widgetList:\r\n w.grid_remove()\r\n del self.widgetList[:]\r\n\r\n osName = platform.system()\r\n\r\n if osName != 'Darwin':\r\n labelFont = 'Arial 9 bold'\r\n else:\r\n labelFont = 'bold'\r\n\r\n ### Widget 0 is a label for padding column 0\r\n self.widgetList.append(Label(self, text=''))\r\n self.widgetList[0].grid(row=0, column=0, sticky=E+W, padx=5)\r\n\r\n ### Widget 1 is a button to return to simple menu\r\n self.widgetList.append(Button(self, text='Return', \\\r\n command=lambda: self.launch_basic(master)))\r\n self.widgetList[1].grid(row=16, column=1, sticky=W)\r\n\r\n ### Widget 2 is a label for the project name text field\r\n self.widgetList.append(Label(self, text='Project Name: ', font=labelFont))\r\n self.widgetList[2].grid(row=0, column=1, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 3 is the text field for project name entry\r\n self.widgetList.append(Entry(self, width=25))\r\n self.widgetList[3].insert(0, self.newProj.name)\r\n self.widgetList[3].grid(row=1, column=1, sticky=W, pady=(0, 0))\r\n\r\n ### Widget 4 is the label for project type\r\n self.widgetList.append(Label(self, text='Project Type:', font=labelFont))\r\n self.widgetList[4].grid(row=2, column=1, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 5 is a radio button for configuring a new project\r\n self.widgetList.append(Radiobutton(self, text='New', variable=self.advancedProjType, \\\r\n value=0))\r\n self.widgetList[5].grid(row=3, column=1, sticky=W)\r\n\r\n ### Widget 6 is a radio button for configuring a cloned project\r\n self.widgetList.append(Radiobutton(self, text='Clone', variable=self.advancedProjType, \\\r\n value=1))\r\n self.widgetList[6].grid(row=3, column=1, sticky=E)\r\n\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList.append(Label(self, text='Device:', font=labelFont))\r\n self.widgetList[7].grid(row=0, column=3, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 8 is te drop down menu for the devices\r\n self.pop_adv_devices()\r\n #self.widgetList.append(OptionMenu(self, userDev, *self.localSDK.devList))\r\n self.widgetList.append(Combobox(self, state='readonly'))\r\n self.widgetList[8].config(textvariable=self.advDevSelect)\r\n self.widgetList[8]['values'] = self.localSDK.devList\r\n self.widgetList[8].grid(row=1, column=3, sticky=W, pady=(0, 0))\r\n try:\r\n self.newProj.add_board(self.currBoard, self.localSDK.brdList)\r\n self.widgetList[8].current(self.localSDK.devList.index(self.newProj.device[0]))\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n self.widgetList[8].current(0)\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n self.widgetList[8].current(0)\r\n\r\n ### Widget 9 is a label for the library configuration radio buttons\r\n libraryConfigurationWidget = Label(self, text='Library Configuration:', font=labelFont)\r\n self.widgetList.append(libraryConfigurationWidget)\r\n self.widgetList[9].grid(row=4, column=1, sticky=W, columnspan=2)\r\n\r\n ### Widget 10 is a radio button for the library configuration\r\n halOnlyWidget = Radiobutton(self, text='HAL only', variable=self.advancedLibType,value=0)\r\n self.widgetList.append(halOnlyWidget)\r\n self.widgetList[10].grid(row=6, column=1, sticky=W)\r\n\r\n ### Widget 11 is a radio button for the library configuration\r\n platformWidget = Radiobutton(self, text='Platform', variable=self.advancedLibType, value=1)\r\n self.widgetList.append(platformWidget)\r\n self.widgetList[11].grid(row=5, column=1, sticky=W)\r\n\r\n # Set default to select platform library\r\n self.advancedLibType.set(1)\r\n \r\n # in new version there is not hal vs. platform\r\n if self.localSDK.isNewVersion():\r\n libraryConfigurationWidget.grid_remove()\r\n halOnlyWidget.grid_remove()\r\n platformWidget.grid_remove()\r\n\r\n ### Widget 12 is a label for the library configuration radio buttons\r\n self.widgetList.append(Label(self, text='RTOS Configuration:', font=labelFont))\r\n self.widgetList[12].grid(row=7, column=1, sticky=W, columnspan=2)\r\n\r\n ### Widget 13 is a radio button for the library configuration\r\n self.widgetList.append(Radiobutton(self, text='None', variable=self.advancedRtosType, \\\r\n value=0))\r\n self.widgetList[13].grid(row=8, column=1, sticky=W)\r\n\r\n ### Widget 14 is a radio button for the library configuration\r\n mqxWidget = Radiobutton(self, text='MQX', variable=self.advancedRtosType, value=1)\r\n self.widgetList.append(mqxWidget)\r\n mqxWidget.grid(row=9, column=1, sticky=W)\r\n\r\n # in KSDK 2.0 and newer version there is no MQX support so the MQX option has to be removed\r\n # in some older version of KSDK (1.2, 1.3) MQX support is missing so this option has to be removed\r\n if not self.localSDK.isMQXSupported():\r\n mqxWidget.grid_remove()\r\n\r\n\r\n ### Widget 15 is a radio button for the library configuration\r\n freeRTOSWidget = Radiobutton(self, text='FreeRTOS', variable=self.advancedRtosType, value=2)\r\n self.widgetList.append(freeRTOSWidget)\r\n freeRTOSWidget.grid(row=10, column=1, sticky=W)\r\n # if FreeRTOS is not supported in KSDK option should be removed\r\n if not self.localSDK.isFreeRTOSSupported():\r\n freeRTOSWidget.grid_remove()\r\n\r\n ### Widget 16 is a radio button for the library configuration\r\n uCOSIIWidget = Radiobutton(self, text='uC/OS-II', variable=self.advancedRtosType, value=3)\r\n self.widgetList.append(uCOSIIWidget)\r\n uCOSIIWidget.grid(row=11, column=1, sticky=W)\r\n if not self.localSDK.isuCOSIISupported():\r\n uCOSIIWidget.grid_remove()\r\n\r\n ### Widget 17 is a radio button for the library configuration\r\n uCOSIIIWidget = Radiobutton(self, text='uC/OS-III', variable=self.advancedRtosType, value=4)\r\n self.widgetList.append(uCOSIIIWidget)\r\n uCOSIIIWidget.grid(row=12, column=1, sticky=W)\r\n if not self.localSDK.isuCOSIIISupported():\r\n uCOSIIIWidget.grid_remove()\r\n\r\n ### Widget 18 is a label for the toolchain check boxes\r\n self.widgetList.append(Label(self, text='Choose Supported Toolchain(s):', font=labelFont))\r\n self.widgetList[18].grid(row=4, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 19 is a check box for KDS\r\n kdsOptionWidget = Checkbutton(self, text=kSdk.KDSname, variable=self.advIsKds)\r\n self.widgetList.append(kdsOptionWidget)\r\n self.widgetList[19].grid(row=5, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 20 is a check box for IAR\r\n iarOptionWidget = Checkbutton(self, text=kSdk.IARname, variable=self.advIsIar)\r\n self.widgetList.append(iarOptionWidget)\r\n self.widgetList[20].grid(row=6, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 21 is a check box for MDK\r\n keilMdkOptionWidget = Checkbutton(self, text=kSdk.keilMDKARMname, variable=self.advIsMdk)\r\n self.widgetList.append(keilMdkOptionWidget)\r\n self.widgetList[21].grid(row=7, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 22 is a check box for ATS\r\n atollicOptionWidget = Checkbutton(self, text=kSdk.AtollicStudio, variable=self.advIsAts)\r\n self.widgetList.append(atollicOptionWidget)\r\n self.widgetList[22].grid(row=8, column=3, sticky=W, columnspan=2)\r\n\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.KinetisDesignStudio):\r\n kdsOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.IARname):\r\n iarOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.KeilMDK):\r\n keilMdkOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.AtollicStudio):\r\n atollicOptionWidget.grid_remove()\r\n\r\n ### Widget 23 is a check box for GCC\r\n self.widgetList.append(Checkbutton(self, text='GCC Command Line', variable=self.advIsGcc))\r\n self.widgetList[23].grid(row=9, column=3, sticky=W, columnspan=2)\r\n self.widgetList[23].state([\"disabled\"])\r\n self.widgetList[23].grid_remove()\r\n\r\n ### Widget 24 is a label for adding BSP\r\n #self.widgetList.append(Label(self, text='USB and Board Support:', font=labelFont))\r\n boardSupportLabel = Label(self, text='Board Support:', font=labelFont)\r\n self.widgetList.append(boardSupportLabel)\r\n self.widgetList[24].grid(row=10, column=3, sticky=W, columnspan=2, pady=(5, 0))\r\n\r\n ### Widget 25 is a checkbox for adding BSP\r\n includeBSPFilesOption = Checkbutton(self, text='Include BSP files', variable=self.advIsBsp)\r\n self.widgetList.append(includeBSPFilesOption)\r\n self.widgetList[25].grid(row=11, column=3, sticky=W, columnspan=2)\r\n self.widgetList[25].state([\"!disabled\"])\r\n \r\n if self.localSDK.isNewVersion():\r\n boardSupportLabel.grid_remove()\r\n includeBSPFilesOption.grid_remove()\r\n\r\n ### Widget 26 is a label for the output path entry\r\n self.widgetList.append(Label(self, text='Project Parent Directory:', \\\r\n font=labelFont))\r\n self.widgetList[26].grid(row=13, column=1, sticky=W, columnspan=4, pady=(5, 0))\r\n\r\n ### Widget 27 is a text entry for the output path\r\n if self.newProj.osType == 'Windows':\r\n entryWidth = int(77.0 / WIN_SCALE)\r\n self.widgetList.append(Entry(self, width=entryWidth))\r\n else:\r\n self.widgetList.append(Entry(self, width=71))\r\n self.newProj.workSpace = self.newProj.sdkPath \r\n if self.newProj.osType == 'Windows':\r\n self.newProj.workSpace = kT.string_replace(self.newProj.workSpace, '/', '\\\\')\r\n self.widgetList[27].insert(0, self.newProj.workSpace)\r\n self.widgetList[27].grid(row=14, column=1, sticky=W, columnspan=4)\r\n\r\n ### Widget 28 is a button for browsing to a directory\r\n self.dir_opt['title'] = 'Select the directory you want the project to be generated into. '\r\n self.widgetList.append(Button(self, text='Browse', \\\r\n command=lambda: self.proj_set_directory(False, 27)))\r\n if self.newProj.osType == 'Windows':\r\n self.widgetList[28].grid(row=14, column=5, sticky=E)\r\n else:\r\n self.widgetList[28].grid(row=14, column=4, sticky=E)\r\n\r\n self.widgetList[28].state([\"disabled\"])\r\n\r\n ### Widget 29 is a checkbox for making a standalone project\r\n self.widgetList.append(Checkbutton(self, text='Generate standalone project', \\\r\n variable=self.advIsStandalone))\r\n self.widgetList[29].grid(row=15, column=1, sticky=W, columnspan=2, pady=5)\r\n\r\n ### Widget 30 is a help button\r\n self.widgetList.append(Button(self, text='Help', \\\r\n command=lambda: self.advanced_help(master, (Constants.ADV_HELP if self.localSDK.isNewVersion() else ADV_HELP))))\r\n if self.newProj.osType == 'Windows':\r\n self.widgetList[30].grid(row=1, column=5, sticky=E, pady=(0, 0))\r\n else:\r\n self.widgetList[30].grid(row=1, column=4, sticky=E, pady=(0, 0))\r\n #self.widgetList[30].state([\"disabled\"])\r\n\r\n ### Widget 31 is a button to generate the project\r\n if self.newProj.osType == 'Windows':\r\n style = Style()\r\n style.configure(\"Bold.TButton\", font='system 8 bold')\r\n self.widgetList.append(Button(self, text='Advanced Generate!', style=\"Bold.TButton\", \\\r\n command=lambda: self.package_select(master)))\r\n self.widgetList[31].grid(row=16, column=4, sticky=E+W+N+S, rowspan=2, columnspan=2)\r\n else:\r\n self.widgetList.append(Button(self, text='Advanced Generate!',\\\r\n command=lambda: self.package_select(master)))\r\n self.widgetList[31].grid(row=16, column=3, sticky=E+N+S, rowspan=2, columnspan=2)\r\n self.widgetList[31].state([\"!disabled\"])\r\n\r\n ### Widget 32 is a label for padding row 13\r\n self.widgetList.append(Label(self, text='', font=labelFont))\r\n self.widgetList[32].grid(row=0, column=6, sticky=E+W, padx=5)\r\n\r\n ### Widget 33 is a label for explaining the return button\r\n self.widgetList.append(Label(self, text='Click here to return to previous menu.'))\r\n self.widgetList[33].grid(row=17, column=1, columnspan=3, sticky=W)\r\n\r\n ### Widget 34 is a checkbox for adding USB\r\n self.widgetList.append(Checkbutton(self, text='Include USB', variable=self.advIsUsb))\r\n self.widgetList[34].grid(row=12, column=3, sticky=W, columnspan=2)\r\n self.widgetList[34].state([\"disabled\"])\r\n self.widgetList[34].grid_remove()\r\n\r\n ### Widget 35 is a radio button for configuring a new project\r\n self.widgetList.append(Radiobutton(self, text='Device', variable=self.advancedDevType, \\\r\n value=0))\r\n self.widgetList[35].grid(row=3, column=3, sticky=W)\r\n\r\n ### Widget 36 is a radio button for configuring a cloned project\r\n self.widgetList.append(Radiobutton(self, text='Board', variable=self.advancedDevType, \\\r\n value=1))\r\n self.widgetList[36].grid(row=3, column=3, sticky=E)\r\n\r\n ### Widget 37 is the label for project type\r\n self.widgetList.append(Label(self, text='Device or Board:', font=labelFont))\r\n self.widgetList[37].grid(row=2, column=3, sticky=W, pady=(5, 0))\r\n\r\n self.poll_advanced()\r\n \r\n #update project to set correct supported tools, path etc.\r\n self.update_proj()\r\n return", "def initUI(self):\n \n self.setWindowTitle(\"Intecol Flir camera\")\n self.setGeometry(300, 100, 1012, 622)", "def get_form(self):\n # setup request layer\n self.request = TestRequest()\n # get add view\n form = getMultiAdapter((self.experiments, self.request),\n name=\"newProjection\")\n # update the form once to initialise all widgets\n form.update()\n # go through all widgets on the form and update the request with default values\n data = {}\n for widget in form.widgets.values():\n data[widget.name] = widget.value\n data.update({\n 'form.widgets.IDublinCore.title': u\"My CC Experiment\",\n 'form.widgets.IDublinCore.description': u'This is my experiment description',\n 'form.widgets.species_distribution_models': unicode(self.sdmexp.UID()),\n 'form.widgets.species_distribution_models.model': [unicode(self.sdmmodel.UID())],\n 'form.widgets.future_climate_datasets': [unicode(self.future.UID())]\n })\n self.request.form.update(data)\n form = getMultiAdapter((self.experiments, self.request),\n name=\"newProjection\")\n return form", "def _form(self, r, widget, **attr):\n\n widget_get = widget.get\n\n label = widget_get(\"label\", \"\")\n # Activate if-required\n #if label and isinstance(label, str):\n if label:\n label = current.T(label)\n icon = widget_get(\"icon\", \"\")\n if icon:\n icon = ICON(icon)\n\n context = widget_get(\"context\", None)\n tablename = widget_get(\"tablename\", None)\n resource, context = self._resolve_context(r, tablename, context)\n\n # Widget filter option\n widget_filter = widget_get(\"filter\", None)\n if widget_filter:\n resource.add_filter(widget_filter)\n\n record = resource.select([\"id\"], limit=1, as_rows=True).first()\n if record:\n record_id = record.id\n else:\n record_id = None\n\n if record_id:\n readonly = not current.auth.s3_has_permission(\"update\", tablename, record_id)\n else:\n readonly = not current.auth.s3_has_permission(\"create\", tablename)\n\n sqlform = widget.get(\"sqlform\", None)\n if not sqlform:\n sqlform = resource.get_config(\"crud_form\")\n if not sqlform:\n from ..ui import S3SQLDefaultForm\n sqlform = S3SQLDefaultForm()\n\n get_config = current.s3db.get_config\n if record_id:\n # Update form\n onvalidation = get_config(tablename, \"create_onvalidation\") or \\\n get_config(tablename, \"onvalidation\")\n onaccept = get_config(tablename, \"create_onaccept\") or \\\n get_config(tablename, \"onaccept\")\n else:\n # Create form\n onvalidation = get_config(tablename, \"create_onvalidation\") or \\\n get_config(tablename, \"onvalidation\")\n onaccept = get_config(tablename, \"create_onaccept\") or \\\n get_config(tablename, \"onaccept\")\n\n form = sqlform(request = r,\n resource = resource,\n record_id = record_id,\n readonly = readonly,\n format = \"html\",\n onvalidation = onvalidation,\n onaccept = onaccept,\n )\n _class = self._lookup_class(r, widget)\n\n # Render the widget\n output = DIV(H4(icon,\n label,\n _class = \"profile-sub-header\",\n ),\n DIV(form,\n _class = \"form-container thumbnail\",\n ),\n _class = _class,\n )\n\n return output", "def ui_setup(self):\n loader = QUiLoader()\n file = QFile('./user_interface/form/main_window.ui')\n file.open(QFile.ReadOnly)\n self._window = loader.load(file)\n file.close()\n\n status_bar = QStatusBar(self._window)\n status_bar.showMessage(__copyright__)\n self._window.setStatusBar(status_bar)\n self._window.setWindowIcon(QIcon('./user_interface/media/bucketing_icon.jpeg'))\n self._window.setWindowTitle('PySide2 Project - Basic UI Framework')\n\n self._option_panel = OptionPanel()\n self._option_panel.add_button('DekBan', './user_interface/media/dekban.png')\n self._option_panel.add_button('Charlie', './user_interface/media/charlie.jpeg')\n self._option_panel.add_button('Simon', './user_interface/media/Simon.jpeg')\n\n # Add widget to main layout\n main_layout = self._window.main_layout\n main_layout.itemAtPosition(0, 0).setAlignment(QtCore.Qt.AlignCenter)\n main_layout.itemAtPosition(0, 1).setAlignment(QtCore.Qt.AlignVCenter)\n main_layout.addWidget(self._option_panel, 2, 0, 1, 1)\n\n # Add page widget to stack\n self._pages['item'] = ItemWidget()\n self._pages['text1'] = TextPage(text=PAUSE_TEXT)\n self._pages['text2'] = TextPage(text=STOP_TEXT)\n\n for index, name in enumerate(self._pages):\n print('pages {} : {} page'.format(index, name))\n self._window.widget_stack.addWidget(self._pages[name].widget)\n\n self._window.widget_stack.setCurrentIndex(0)\n\n # Build up signal / slot\n self._option_panel.currentItemChanged.connect(self.set_page)", "def __init__(self, *args, **kwargs):\n super(AppswellSimpleModelForm, self).__init__(*args, **kwargs)\n\n # override labels\n self.fields['message'].label = 'log message'", "def init_UI(self):\n\n self.master.title(\"Search for different companies\")\n self.master.geometry(\"400x400\")\n\n self.label_combobox = Label(self, text=\"Search by\")\n self.label_combobox.pack()\n\n self.combo_searching_options = Combobox(self, state=\"readonly\")\n self.combo_searching_options['values'] = self.combobox_values\n self.combo_searching_options.pack()\n\n self.label_input = Label(self, text=\"Entry the value\")\n self.label_input.pack()\n\n self.user_input = Entry(self, width=40)\n self.user_input.pack()\n\n self.btn_submit = Button(self, text=\"Submit\", command=self.submit)\n self.btn_submit.pack()\n\n self.text_area = scrolledtext.ScrolledText(self)\n self.text_area.pack()\n\n sys.stdout = RedirectOutputText(self.text_area)\n\n self.btn_back = Button(self, text=\"Back\", command=self.go_back)\n self.btn_back.pack()", "def buildPage(self):\n Users = [(u['name']) for u in driver.nodes.match(\"User\")]\n Tissues = [(t['name']) for t in driver.nodes.match(\"Tissue\")]\n Diseases = [(d['name']) for d in driver.nodes.match(\"Disease\")]\n self.add_basic_layout()\n layout = [html.Div([\n html.Div([html.H4('Project information', style={'width': '15.5%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.H4('', id='update_project_id', style={'width': '15%', 'verticalAlign': 'top', 'display': 'none'}),\n html.Br(),\n html.Div(children=[html.Label('Project name:*', style={'marginTop': 15}),\n dcc.Input(id='project name', placeholder='Insert name...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '100%'}),\n html.Br(),\n html.Div(children=[html.Label('Project Acronym:', style={'marginTop': 15}),\n dcc.Input(id='project acronym', placeholder='Insert name...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '100%'}),\n html.Br(),\n html.Div(children=[html.Label('Project Responsible:*', style={'marginTop': 15})],\n style={'width': '49%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Participants:*', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='responsible-picker', options=[{'label': i, 'value': i} for i in Users], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='participant-picker', options=[{'label': i, 'value': i} for i in Users], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Data Types:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Disease:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='data-types-picker', options=[{'label': i, 'value': i} for i in DataTypes], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='disease-picker', options=[{'label': i, 'value': i} for i in Diseases], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Tissue:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Intervention:', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='tissue-picker', options=[{'label': i, 'value': i} for i in Tissues], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='intervention-picker', placeholder='E.g. SNOMED identifier|SNOMED identifier|...', type='text', style={'width': '100%', 'height': '54px'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Number of subjects:*', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Timepoints:', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='number_subjects', placeholder='E.g. 77 (each unique patient counts as 1 subject)', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='number_timepoints', placeholder='E.g. 2 months|15 days|24 hours...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Follows up project:', style={'marginTop': 15}),\n dcc.Input(id='related_to', placeholder='Use the Project Identifier (P000000X)', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Description:', style={'marginTop': 15}),\n dcc.Textarea(id='project description', placeholder='Enter description...', style={'width': '100%', 'height': '100px'})]),\n html.Br(),\n html.Div(children=[html.Label('Starting Date:', style={'marginTop': 10}),\n dcc.DatePickerSingle(id='date-picker-start', placeholder='Select date...', clearable=True)],\n style={'width': '30%', 'verticalAlign': 'top', 'marginTop': 10, 'display': 'inline-block'}),\n html.Div(children=[html.Label('Ending Date:', style={'marginTop': 10}),\n dcc.DatePickerSingle(id='date-picker-end', placeholder='Select date...', clearable=True)],\n style={'width': '30%', 'verticalAlign': 'top', 'marginTop': 10, 'display': 'inline-block'}),\n html.Div(children=html.Button('Create Project', id='project_button', n_clicks=0, className=\"button_link\",\n style={'fontSize': '25px'}), style={'width': '100%', 'padding-left': '87%', 'padding-right': '0%'}),\n html.Br(),\n html.Div(children=[html.A(children=html.Button('Download Clinical Data template', id='download_button', n_clicks=0,\n style={'fontSize': '16px', 'display': 'block'}),\n id='download_link', href='', n_clicks=0)], style={'width': '100%', 'padding-left': '87%', 'padding-right': '0%'}),\n html.Br(),\n html.Div(id='project-creation', style={'fontSize': '20px', 'marginLeft': '70%'}),\n html.Br()]),\n html.Hr()])]\n\n self.extend_layout(layout)", "def _init_ui(self):\r\n\t\t\r\n\t\tself.input_frame = Input(self)\r\n\t\tself.input_frame.pack()\r\n\t\t\r\n\t\tbutton_ok = Button(self, text = \"Ping\", command = self._go)\r\n\t\tbutton_ok.pack()\r\n\t\t\r\n\t\tself.result_frame = Result(self)\r\n\t\tself.result_frame.pack()", "def init(self):\n sg.theme(gui.app_theme)\n self.window = sg.Window(\n gui.app_title,\n gui.create_layout(),\n **gui.window_config,\n )\n gui.after_window_init(self.window)", "def init_gui(self):\n # Choose a layout.\n main_vb = QtGui.QVBoxLayout(self)\n\n # Add a list or tree view.\n self.list_view = QtGui.QListWidget()\n\n # Add the buttons.\n load_btn = QtGui.QPushButton('Load Selected')\n cancel_btn = QtGui.QPushButton('Cancel')\n load_btn.clicked.connect(self.update_list_view)\n cancel_btn.clicked.connect(self.close)\n\n # Connect the list/tree view with a method appropriate for user interaction.\n self.list_view.currentItemChanged['QListWidgetItem*', 'QListWidgetItem*'].connect(self.set_current_name)\n self.list_view.itemChanged['QListWidgetItem*'].connect(self.change_name)\n\n # Add the widgets to the layout.\n btn_hb = QtGui.QHBoxLayout()\n btn_hb.addWidget(load_btn)\n btn_hb.addWidget(cancel_btn)\n main_vb.addWidget(self.list_view)\n main_vb.addLayout(btn_hb)\n\n # Show the GUI.\n self.setGeometry(300, 300, 450, 300)\n self.setWindowTitle('Hello World')\n img_icon = 'C:/Users/caj150430/code/so_much_win.png'\n self.setWindowIcon(QtGui.QIcon(img_icon))\n self.show()", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'business_name': 'Please enter your business name',\n 'user_type': 'Please select the type of user',\n 'phone': 'Phone Number',\n 'postcode': 'Postcode',\n 'city': 'City',\n 'street_address': 'Street Address',\n 'street_address2': 'Street Address 2',\n 'county': 'County',\n 'country': 'Country'\n }\n\n # to force cursor to start in business name field\n self.fields['business_name'].widget.attrs['autofocus'] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = False\n self.fields[field].label = placeholder\n self.fields[field].widget.attrs['class'] = 'form-control'", "def __init__(self, parent=None):\n super(Inj, self).__init__(parent)\n self.setupUi(self)", "def init_layout(self):\n pass", "def InitUI(self):\n\t\tself._initMenuBar()\n\t\tself._initLayout()\n\t\t\n\t\t# Bindings\n\t\tself.Bind(wx.EVT_BUTTON, self.OnButtonClicked)\n\t\t\n\t\t# We can't even start without an input file\n\t\tself.OnOpen(None)", "def _show_project_list_panel(self):\n self.project_list_panel.show()\n self.project_list_panel.load_config()\n # set geometry\n _button_pos = self.project_frame.projectlist_button.pos()\n _button_pos = self.project_frame.mapTo(self, _button_pos)\n _button_height = self.project_frame.projectlist_button.height()\n _glo_pos = self.mapTo(tomaya.GetMayaMainWindowPoint(), _button_pos)\n self.project_list_panel.setGeometry(_glo_pos.x(), _glo_pos.y() + _button_height, self.width()*1/2.0, tomaya.GetMayaMainWindowPoint().height()*1/2.0)", "def show(cls, context: DataContext, project: ResearchProject, parent):\n dialog = cls(context, project, parent)\n dialog.exec_()", "def __init__(self):\n Form, Window = uic.loadUiType(\"Visuals/QtFiles/ConfirmationMenu.ui\")\n self.window = Window()\n self.form = Form()\n self.form.setupUi(self.window)\n self.centre = self.window.findChild(QWidget, \"centralwidget\")", "def __init__(self, *args):\n super(DefaultSystem, self).__init__(*args)\n self.use_strong_form = True\n self.main = self.cavity_grid.main\n self.cavities = self.cavity_grid.cavities", "def setup(self):\n self.ui.setup_window()", "def populateUI():\n \n # Main form layout\n form = cmds.formLayout()\n\n # Tab Layout\n tabs = cmds.tabLayout(innerMarginWidth=5, innerMarginHeight=5)\n # Form attachment config\n cmds.formLayout( form, edit=True, attachForm=((tabs, 'top', 0), (tabs, 'left', 0), (tabs, 'bottom', 0), (tabs, 'right', 0)) )\n\n # The different Tabs on the window\n spawnTab = SpawnObjectsTab()\n roadTab = RoadRiverTab()\n environmentTab = EnvironmentTab()\n\n # Tab creation\n cmds.tabLayout( tabs, edit=True, tabLabel=((spawnTab, 'Spawn Buildings'), (roadTab, 'Create Roads / Rivers'), (environmentTab, \"Create Environment\") ))", "def __init__(self, parent, rokucontrol=None, is_error=False, error_message=''):\n tk.Frame.__init__(self, parent, background=\"black\")\n self.style = ttk.Style()\n self.parent = parent\n self.rokucontrol = rokucontrol\n\n self.init_ui()\n\n if is_error:\n self.place_error_message(error_message)\n else:\n self.place_buttons()\n self.place_app_buttons()", "def __init__(self):\n self.view = GuiView(self)\n return", "def set_ui(self):\r\n\r\n self.canvas = tk.Canvas(self)\r\n self.canvas.pack()\r\n\r\n self.entry = ttk.Entry(self.canvas, justify=\"center\", font=(\"Calibri\", 12))\r\n\r\n self.grid = Grid(self.canvas)", "def make_form(self):", "def __init__(self, *args, **kwargs):\n\n\t\tsuper(AssonaForm, self).__init__(*args, **kwargs)\n\n\t\tfor field in self.fields:\n\t\t\tif field == 'versicherungskarte':\n\t\t\t\tself.fields[field].widget.attrs.update({\n\t\t\t\t\t'class': 'form-control-file'\n\t\t\t\t})\n\t\t\telse:\n\t\t\t\tself.fields[field].widget.attrs.update({\n\t\t\t\t\t'class': 'form-control'\n\t\t\t\t})", "def initUI(self):\n\n self.wid = RosGenWidget()\n self.setCentralWidget(self.wid)\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&Файл')\n editMenu = menubar.addMenu('&Редактирование')\n self.create_menu_par('Менеджер подписчиков и издателей', self.wid.show_manager, fileMenu, 'Ctrl+M')\n self.create_menu_par('Очистить', self.wid.clear_all_lines, editMenu, 'Ctrl+D')\n self.create_menu_par('Загрузить данные из...', self.wid.open_fileDialog, fileMenu, 'Ctrl+F')\n self.create_menu_par('Сохранить как...', self.wid.save_fileDialog, fileMenu, 'Ctrl+S')\n self.create_menu_par('Выход', self.exit_app, fileMenu, 'Esc')\n self.statusbar = self.statusBar()\n self.statusbar.showMessage('Ожидание данных')\n self.wid.msg2Statusbar[str].connect(self.statusbar.showMessage)\n self.setGeometry(600, 200, 700, 400)\n self.setWindowTitle('Генератор шаблонов ROS-приложения')\n self.show()", "def _initDialog(self):\n\n # ===== Configure focus policy ===== #\n self.setFocusPolicy(Qt.NoFocus)\n self.setFocus(False)\n\n # ===== Create movie parameters ===== #\n movieLabel = QLabel()\n self._movie = QMovie('./icons/loading.gif')\n movieLabel.setMovie(self._movie)\n\n # ===== Create layout ===== #\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(0)\n layout.addWidget(movieLabel)\n self.setLayout(layout)", "def _initialize_project_variables(self):\n self.Source = ''\n self.Regional = ''\n self.Vernacular = ''\n self.Fallback = dict()\n self.New_Target = dict()\n self.Biblical_Terms = dict()\n self.Old_Target = dict()\n\n# self.list_projects = []\n# self.project_lines = []\n# self.indent = 0\n# self.Treed = False\n self.root = etree.Element('root')\n# #add child 'settings', all user configurable bits under here\n self.settings = etree.SubElement(self.root, \"settings\")\n# self.old_mode = dict()\n# self.spreferred = etree.SubElement(self.settings, \"preferred\")\n# self.smode = etree.SubElement(self.settings, \"mode\")\n# self.stemp = etree.SubElement(self.settings, \"template\")\n self.sf0 = etree.SubElement(self.settings, \"f0\")\n self.sf1 = etree.SubElement(self.settings, \"f1\")\n self.sf2 = etree.SubElement(self.settings, \"f2\")\n self.trout = etree.SubElement(self.root, \"tree\")", "def __init__(self, *args, **kwargs):\n super(JobSearchForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-inline'\n self.helper.form_method = 'get'\n self.helper.add_input(Submit('submit', 'Search'))", "def __init__(__self__, *,\n project: Optional[pulumi.Input[str]] = None):\n if project is not None:\n pulumi.set(__self__, \"project\", project)", "def project():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n\n menu = M(c=\"project\")(\n M(\"Projects\", f=\"project\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Locations\", f=\"location\")(\n M(\"Map\", m=\"map\"),\n M(\"Contacts\", f=\"location_contact\"),\n ),\n M(\"Reports\", f=\"location\", m=\"report\")(\n M(\"3W\", f=\"location\", m=\"report\"),\n M(\"Beneficiaries\", f=\"beneficiary\", m=\"report\"),\n #M(\"Indicators\", f=\"indicator\", m=\"report\",\n # check=indicators,\n # ),\n #M(\"Indicators over Time\", f=\"indicator\", m=\"timeplot\",\n # check=indicators,\n # ),\n M(\"Funding\", f=\"organisation\", m=\"report\"),\n ),\n M(\"Import\", f=\"project\", m=\"import\", p=\"create\", restrict=[ADMIN])(\n M(\"Import Projects\", m=\"import\", p=\"create\"),\n M(\"Import Project Organizations\", f=\"organisation\",\n m=\"import\", p=\"create\"),\n M(\"Import Project Communities\", f=\"location\",\n m=\"import\", p=\"create\"),\n ),\n M(\"Activity Types\", f=\"activity_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Beneficiary Types\", f=\"beneficiary_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Sectors\", f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Themes\", f=\"theme\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )\n\n return menu", "def _construct_form(self, i, **kwargs):\n defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i)}\n if self.is_bound:\n defaults['data'] = self.data\n defaults['files'] = self.files\n if self.initial:\n try:\n defaults['initial'] = self.initial[i]\n except IndexError:\n pass\n # Allow extra forms to be empty.\n if i >= self.initial_form_count():\n defaults['empty_permitted'] = True\n defaults.update(kwargs)\n form = self.form(self.params[len(self.params) - i - 1][1], self.params[len(self.params) - i - 1][0], i, **defaults) #passando o params[i] para o form[i]\n self.add_fields(form, i)\n return form", "def __init__(self):\r\n super().__init__()\r\n self.init_ui()", "def __init__(self, *args, **kwargs):\n super(SignupForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n self.fields['first_name'].required = True\n self.fields['password'].widget = forms.PasswordInput() \n\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def setup_ui(self):\n # Creation du layout\n self.layout = QtWidgets.QVBoxLayout(self)\n # Creation des widgets\n self.le_movieTitle = QtWidgets.QLineEdit()\n self.btn_addMovie = QtWidgets.QPushButton(\"Ajouter un film\")\n self.lw_movies = QtWidgets.QListWidget()\n self.lw_movies.setSelectionMode(QtWidgets.QListWidget.ExtendedSelection)\n self.btn_removeMovies = QtWidgets.QPushButton(\"Supprimer le(s) film(s)\")\n # Ajout des widgets dans le layout\n self.layout.addWidget(self.le_movieTitle)\n self.layout.addWidget(self.btn_addMovie)\n self.layout.addWidget(self.lw_movies)\n self.layout.addWidget(self.btn_removeMovies)", "def __init__(self, *args, **kwargs):\n\n\t\tsuper(CustomStatusFormset, self).__init__(*args, **kwargs)\n\n\t\tfor form in self.forms:\n\t\t\tfor field in form.fields:\n\t\t\t\tform.fields[field].widget.attrs.update({'class': 'form-control'})", "def __init__(self, *args, **kwargs):\n super(UserJobSearchForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-horizontal'\n self.helper.form_method = 'get'\n self.helper.add_input(Submit('submit', 'Search'))", "def build(self):\n self.title = 'Processamento Digital de Imagens'\n self.main_layout = MainLayout()\n return self.main_layout", "def _initialize_main_window(self, lang='en-US'):\n\n self._initialize_main_window_menu(lang)\n self.f_1 = Frame(self)\n self.f_1.grid(column=0, row=0, sticky='news')\n self.f_1.grid_rowconfigure(0, weight=0)\n self.f_1.grid_columnconfigure(0, weight=0)\n\n #in top of window\n self.btnSaveProject = Button(self.f_1, \\\n text=LOCALIZED_TEXT[lang][\"Save\"], \\\n command=self._on_save_project)\n self.btnSaveProject.grid(column=0, row=0, padx=5, pady=5, sticky='e')\n self.btnSaveProject['state'] = 'disabled'\n self.btnSaveProject_ttp = CreateToolTip(self.btnSaveProject, \\\n LOCALIZED_TEXT[lang]['Save_ttp'])\n self.lblProject = Label(self.f_1, text=\\\n LOCALIZED_TEXT[lang]['Current Project>'], \\\n width=50)\n self.lblProject.grid(column=1, row=0, columnspan=2, padx=5, pady=5, \\\n sticky='ew')\n self.lblProject['justify'] = 'left'\n\n self.lblGuiLanguage = Label(self.f_1, \\\n text=LOCALIZED_TEXT[lang]['Interface language>'])\n self.lblGuiLanguage.grid(column=4, row=0, padx=5, pady=5, sticky='e')\n self.lblGuiLanguage['justify'] = 'right'\n # Create and fill the dropdown ComboBox.\n self.ddnGuiLanguage = Combobox(self.f_1, \\\n textvariable=self.selected_lang)\n self.ddnGuiLanguage.grid(column=5, columnspan=1, row=0, \\\n padx=5, pady=5, sticky='w')\n self.ddnGuiLanguage['text'] = 'Interface language:'\n self.ddnGuiLanguage['justify'] = 'left'\n self.ddnGuiLanguage.bind('<<ComboboxSelected>>', self._change_lang)\n self.ddnGuiLanguage['values'] = [INTERFACE_LANGS['langs'][k] \\\n for k in sorted(INTERFACE_LANGS['langs'])]\n self.ddnGuiLanguage.set(INTERFACE_LANGS['langs']['0'])\n\n #assumes tab based interface\n #main frame holds gui interface lange pull down, lists current project,\n #and save settings button\n self._initialize_main_window_notebook(lang)\n self.progbar = Progressbar(self, maximum=100, variable=self.int_var)\n self.progbar.grid(column=0, row=6, columnspan=8, padx=5, pady=5, \\\n sticky='news')\n self.status = Label(self, text=LOCALIZED_TEXT[lang]['empty string'], \\\n anchor='w', justify='left')\n self.status.grid(column=0, row=7, columnspan=8, padx=5, pady=5, \\\n sticky='news')", "def initUI(self):\n centralwidget = QtWidgets.QWidget()\n self.setCentralWidget(centralwidget)\n\n person_table = PersonTable(self)\n person_table.embed(self.sql)\n\n # Box Layout to organize our GUI\n lay = QtWidgets.QVBoxLayout(centralwidget)\n lay.addWidget(person_table)\n self.setGeometry(0, 0, person_table.width() + 20, person_table.height() + 20)\n self.person_table = person_table\n self.show()", "def _initUI(self):\n\n vlayout = QtWidgets.QVBoxLayout()\n\n # Description\n #----------------------------------------------------------------\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel()\n label.setText('Locatie:')\n label.setFixedWidth(100)\n hlayout.addWidget(label)\n\n label = QtWidgets.QLabel()\n label.setText(self.name)\n hlayout.addWidget(label)\n hlayout.setSpacing(10)\n\n vlayout.addLayout(hlayout)\n\n # Exportnaam\n #----------------------------------------------------------------\n self.exportname = ParameterInputLine(label='Exportnaam:', labelwidth=100)\n self.exportname.LineEdit.setMinimumWidth(200)\n vlayout.addLayout(self.exportname.layout)\n\n # Exportdatabase\n #----------------------------------------------------------------\n self.exportpath = ExtendedLineEdit(label='SQLite-database:', labelwidth=100, browsebutton=True)\n self.exportpath.BrowseButton.clicked.connect(self._get_path_database)\n vlayout.addLayout(self.exportpath.layout)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n # Buttons\n #----------------------------------------------------------------\n hbox = QtWidgets.QHBoxLayout()\n hbox.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum))\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n hbox.addWidget(self.closebutton)\n # Add ok/close\n self.savebutton = QtWidgets.QPushButton('Opslaan')\n self.savebutton.clicked.connect(self._save)\n hbox.addWidget(self.savebutton)\n\n vlayout.addLayout(hbox)\n\n # Add layout to widget\n self.setLayout(vlayout)", "def __init__(self, parent=None):\n super(union_Dialog, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, *args, **kwargs):\n\n\t\tsuper(SchadensmeldungFileForm, self).__init__(*args, **kwargs)\n\n\t\tfor field in self.fields:\n\t\t\tself.fields[field].widget.attrs.update({\n\t\t\t\t'class': 'form-control'\n\t\t\t\t})", "def __init__(self, form_name, request, relative_forms):\n super(RelativeView, self).__init__(form_name, request)\n\n # set relative forms's names\n self.relative_forms = relative_forms\n self.relative_data = {}", "def template_CustomisedFormLayout(request):" ]
[ "0.6280024", "0.6280024", "0.6275415", "0.61467266", "0.5942895", "0.5872091", "0.5814412", "0.5774404", "0.57378775", "0.56497526", "0.56302845", "0.56079954", "0.56048656", "0.56025636", "0.5512409", "0.5445713", "0.5410989", "0.53841966", "0.5374642", "0.5372924", "0.5369817", "0.53354776", "0.5305461", "0.5302981", "0.52633333", "0.5258817", "0.5253237", "0.52424204", "0.52414775", "0.52412647", "0.5209187", "0.52030104", "0.5201786", "0.5199394", "0.5169933", "0.5157658", "0.51441556", "0.51426655", "0.51405126", "0.5129459", "0.5118461", "0.51146346", "0.51128983", "0.51047885", "0.5103896", "0.5088928", "0.5085342", "0.5077606", "0.5075546", "0.507205", "0.50698996", "0.50698996", "0.5063568", "0.5060074", "0.50585335", "0.5055189", "0.50528395", "0.5041757", "0.5035188", "0.50245106", "0.5023226", "0.5022979", "0.50187016", "0.5017814", "0.50101507", "0.50094306", "0.500272", "0.5001636", "0.49978188", "0.4996458", "0.4994971", "0.49914944", "0.49914095", "0.49834782", "0.49788377", "0.49763635", "0.4969914", "0.49688762", "0.4962213", "0.49607268", "0.49600494", "0.49557745", "0.49472415", "0.4946359", "0.4931845", "0.4923931", "0.4922596", "0.4918412", "0.4907212", "0.4905391", "0.49048156", "0.49031422", "0.48954427", "0.4895313", "0.4895038", "0.48943093", "0.48921686", "0.48870143", "0.48847052", "0.48834592" ]
0.54038054
17
Publisher unique numerical id. Getter only
def id(self): return self._id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def unique_id(self):\n return self._id", "def unique_id(self):\n return self._id", "def id(self):\n\n return sha256(self.pub.export()).digest()", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id() -> str:", "def unique_id(self) -> str:\n return \"{}-{}-{}\".format(*self._id)", "def unique_id(self):\n return (\n \"a80f3d5b-df3d-4e38-bbb7-1025276830cd\"\n )", "def getUniqueID(self):\n return self.unique_id", "def unique_id(self) -> \"long\":\n return _beamforming_swig.beamformer_sptr_unique_id(self)", "def unique_id(self) -> \"long\":\n return _beamforming_swig.randomsampler_sptr_unique_id(self)", "def getID():", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def get_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def unique_id(self):\n return self.properties.get(\"UniqueId\", None)", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n \"\"\"Requires use of Python 3\"\"\"\n return str(self.id)", "def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id", "def unique_id(self):\n id = \"{}{}{}\".format(\n DOMAIN, self._account, self.sensorName.lower().replace(\" \", \"\")\n )\n return id", "def unique_id(self):\n return self._uuid", "def unique_id(self):\n return self._uuid", "def unique_id(self) -> str:\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def gen_id(self) -> str:\n self._id += 1\n return str(self._id)", "def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return '%d%d' % (int(time.time()), unique_id_increment)", "def get_id(self): # pragma: no cover\n pass", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def unique_id(self):\n return f\"{self.device.id}-{self.key}\"", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")" ]
[ "0.74543864", "0.74381155", "0.74381155", "0.74232537", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73634946", "0.73634946", "0.73634946", "0.73634946", "0.73634946", "0.73634946", "0.73634946", "0.73634946", "0.73162866", "0.7292706", "0.7282958", "0.72741467", "0.7268692", "0.7212011", "0.7210504", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.71774876", "0.7161427", "0.7131676", "0.7131595", "0.7131595", "0.7131595", "0.7131595", "0.71251047", "0.7122834", "0.7102584", "0.7097789", "0.7097789", "0.70763516", "0.70404136", "0.70404136", "0.70404136", "0.70328546", "0.7024491", "0.70138675", "0.6998804", "0.69933695", "0.6988101" ]
0.0
-1
Publisher unique service advertise identifier. Getter only
def advertise_id(self): return self._advertise_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advertisement_id(self):\n return uuid.uuid4()", "def service_id(self) -> str:\n return pulumi.get(self, \"service_id\")", "def service_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_id\")", "def service_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_id\")", "def advertised_id(self):\n namespace = '0x' + self.uuid[:8] + self.uuid[-12:]\n major, minor = map(int, (self.major, self.minor))\n temp_instance = self._append_hex(major, minor)\n instance = self._add_padding(temp_instance)\n beacon_id = self._append_hex(int(namespace, 16), instance)\n return base64.b64encode(self.long_to_bytes(beacon_id))", "def id(self):\n return self.service_id", "def get_id(self):\n from ranger_performance_tool import perf_globals\n enabled_services = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"enabled_services\")\n service = random.choice(enabled_services)\n policy_list = self.remote_store.get_policy_list()[service]\n return random.choice(policy_list).id", "def get_uuid(self):\n uuid = self.request.GET.get('id')\n\n self.publisher = get_object_or_404(Publisher, verbose_id=uuid)\n\n if not self.publisher.is_active:\n raise Http404(\"Link already used or expired.\")\n\n self.publisher.session_start = timezone.now()\n self.publisher.save()\n\n return uuid", "def service_signal(self, service):\n signal = \"{}_{}_{}\".format(DOMAIN, service, self.unique_id.replace(\".\", \"_\"))\n return signal", "def sapd_id(self) -> str:\n return self._sapd_id", "def service_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_id\")", "def service_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_id\")", "def service_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_id\")", "def unique_id(self):\n return f\"sma-{self._sensor.key}-{self._sensor.name}\"", "def client_id(self) -> str:", "def getSerpentId(self):\n raise NotImplementedError", "def unique_id(self):\n id = \"{}{}{}\".format(\n DOMAIN, self._account, self.sensorName.lower().replace(\" \", \"\")\n )\n return id", "def id(self):\n\n return sha256(self.pub.export()).digest()", "def DeploymentId(self) -> _n_0_t_0:", "def get_primary_id(self):", "def identifier(self):\n return self._client.identifier", "def unique_id(self) -> str:\n return str(self.coordinator.gios.station_id)", "def identifier(self):\r\n return self.id", "def build_article_ap_id(self, author, article):\n if article.ap_id:\n return article.ap_id\n # Local article, build ID manually\n normalised_host = self.normalise_hostname(author.host)\n if normalised_host is None or author.host == \"\":\n normalised_host = self.normalise_hostname(self._host_name)\n return f'{normalised_host}/ap/@{author.handle}/{article.global_id}'", "async def uuid(self) -> str:\n if not hasattr(self, \"_uuid\"):\n result = await self.app.sparql.query(\"\"\"\n SELECT DISTINCT ?o\n WHERE {\n <{{uri}}> <http://mu.semte.ch/vocabularies/core/uuid> ?o .\n }\n \"\"\", uri=self.uri)\n self._uuid = result['results']['bindings'][0]['o']['value']\n return self._uuid", "def get_id(self) -> str:\n return self._register_id", "def unique_id(self):\n return f\"{self.device.id}-{self.key}\"", "def uuid(self):\n try:\n return self.keystore['id']\n except KeyError:\n return None", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def unique_id(self) -> str:\n return f\"{self._mac}_tracker\"", "def unique_id(self):\n return f\"bhyve:program:{self._program_id}\"", "def publisher(self):\n return self.get(\"publisher\")", "def unique_id(self) -> str:\n return \"cid_listener\"", "def unique_id(self):\n return self.config_entry.entry_id + \"lsa\"", "def service(self) -> str:\n return pulumi.get(self, \"service\")", "def get_id(self):\n return self.get_sitename()", "def link_id(self):\n return uuid.uuid4().hex", "def identifier(self):\n return self.__id", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def unique_id(self) -> str:\n return f\"{self._host}_{self._name}_{self._unique_id}\"", "def get_aaguid(self)->UUID:\n return DICEKey.DICEKEY_AUTHENTICATOR_AAGUID", "def identifier(self):\n return self._id", "def internal2spice(self,asteroid):\n return(str(2444444+asteroid.id))", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def getID():", "def get_id(self): # pragma: no cover\n pass", "def AdvertiseSRMSPreference(self):\r\n\t\treturn self._get_attribute('advertiseSRMSPreference')", "def ApplicationId(self) -> _n_0_t_0:", "def hardware_id(self):\n return uuid.uuid4()", "def service_code(self):\n return self._service_code", "def identifier(self) -> str:\n return self.doc['id']", "def id(self) -> str:\n pass", "def autoid(self) -> str:", "def autoid(self) -> str:", "def uuid(self):\n raise NotImplementedError", "def uuid(self):\n raise NotImplementedError", "def uuid(self):\n raise NotImplementedError", "def id(self) -> str:\n return self.properties[DBUS_ATTR_ID]", "def unique_id(self):\n return self.device_id", "def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'", "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def get_identifier(self):", "def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id", "def unique_id(self) -> str:\n return f\"{self._inst.lower()}-enable_switch-{self._data['port-mac-address']}_{self._data['default-name']}\"", "def _ClientID(cls): # pragma: no cover\n return ''", "def party_id(self):\n pass", "def _id(self):\n pass", "def uuid(self):\n return self.__uuid", "def uuid(self):\n return self.__uuid", "def get_publisher(self):\n return self.publisher", "def device_id(self):\n return self.unique_id", "def unique_id(self) -> str:\n return '_'.join(['wavin', self._controller_id, str(self._name), 'battery'])", "def identifier(self):\n raise NotImplementedError", "def uuid(self) -> str:\n return self.__uuid", "def unique_id(self):\n return self.device_id + '_' + self._sensor_type", "def unique_id(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_unique_id(self)", "def unique_id(self):\n return self._device_id", "def unique_id(self) -> str:\n return self.tahoma_device.url", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def generate_public_ID(self, mapping):\n raise exception.NotImplemented() # pragma: no cover", "def VendorId(self):\n\t\treturn self._get_attribute('vendorId')", "def get_service(self):", "def unique_id(self) -> str:\n return self.get_unique_id(wallet=self.wallet_id, nonce=self.nonce)", "def unique_id(self) -> str | None:\n return self._config[CONF_ID]", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id" ]
[ "0.6714376", "0.67001706", "0.65665245", "0.65665245", "0.6548375", "0.65236896", "0.6251711", "0.6107556", "0.60676295", "0.60120785", "0.5996947", "0.59193593", "0.59193593", "0.5900588", "0.5853006", "0.5787707", "0.57624674", "0.57600904", "0.5751242", "0.5715866", "0.57096285", "0.56892776", "0.56833345", "0.566769", "0.56661624", "0.5646505", "0.56326485", "0.5629219", "0.56247574", "0.56247574", "0.56247574", "0.5592649", "0.55926013", "0.5591628", "0.5577784", "0.5563461", "0.55407673", "0.55343455", "0.5534316", "0.5532451", "0.5518677", "0.5518616", "0.5514174", "0.5511468", "0.55113137", "0.5505268", "0.5488871", "0.5488871", "0.5488871", "0.5488871", "0.54876584", "0.548733", "0.5468154", "0.54676664", "0.5460754", "0.54487056", "0.5447271", "0.5445492", "0.5444152", "0.5444152", "0.5439527", "0.5439527", "0.5439527", "0.54376495", "0.5434355", "0.5427314", "0.5424366", "0.5417743", "0.5417672", "0.54104936", "0.5395819", "0.5394619", "0.5392874", "0.5392346", "0.5392346", "0.5390827", "0.538824", "0.53801936", "0.5375193", "0.53714657", "0.53643864", "0.53633916", "0.5362828", "0.5361584", "0.53610754", "0.5360579", "0.5358614", "0.53574675", "0.5357375", "0.5355595", "0.53538835", "0.53538835", "0.53538835", "0.53538835", "0.53538835", "0.53538835", "0.53538835", "0.53538835", "0.53538835", "0.53538835" ]
0.73240006
0
Queue size of the ROS Topic
def queue_size(self): return self._queue_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getqueuesize(self):\n return self._queuesize", "def queue_size(self) -> ConfigNodePropertyInteger:\n return self._queue_size", "def ctrlqueue_queue_size(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(10), ctypes.c_int32(0))", "def queue_size(self) -> int:\n return self._queue.qsize()", "def qsize(self):\r\n return len(self._queue)", "def qsize(self) -> int:\n return len(self._queue)", "def size(self):\r\n return len(self.queue)", "def size(self):\r\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def queue_size(self):\n # pylint: disable=protected-access\n if self._handler._received_messages:\n return self._handler._received_messages.qsize()\n return 0", "def qsize(self) -> int:\n return self._queue.qsize()", "def record_queue_size():\n statsd.gauge('rabbitmq.size', rabbitmq_queue_size())", "def size(self):\n return len(self.__queue)", "def qsize(self):\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n try:\n size = self.__db.llen(self._key)\n except redis.ConnectionError as e:\n raise redis.ConnectionError(repr(e))\n return size", "def test_the_queue_size(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n assert the_queue.size() == 3", "def test_size_increments_with_enqueue():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1", "def get_message_queue_size(self):\n with self.lock:\n return len(self.message_queue)", "def queue_size(self):\n return len(self.groups)", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def queue_size(self):\n return self.sql_queue.qsize()", "def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)", "def __len__(self):\n\n return len(self._queue)", "def __len__(self):\n # replica + max wait + min bytes + len(topics)\n size = self.HEADER_LEN + 4 + 4 + 4 + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition + fetch offset + max bytes => for each partition\n size += (4 + 8 + 4) * len(parts)\n return size", "def size(self):\n return len(self._queue_items)", "def size(self):\n return self.url_queue.qsize()", "def queue_size(self, queue_size: ConfigNodePropertyInteger):\n\n self._queue_size = queue_size", "def test_size_stays_same_with_peak():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1\n queue.peak()\n assert queue.size() == 1", "def get_size(self, node: int):\n return self._servers[node].size + self._queues[node].size", "def qsize(self) -> int:\n pass", "def __len__(self) -> int:\n return len(self._data_queue)", "def __len__(self):\n # Header + replicaId + len(topics)\n size = self.HEADER_LEN + 4 + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition + fetch offset + max bytes => for each partition\n size += (4 + 8 + 4) * len(parts)\n return size", "def queue_count(self):\n with self.mutex:\n return len(self.queues)", "def __len__(self):\n size = self.HEADER_LEN + 2 + 4 + 4 # acks + timeout + len(topics)\n for topic, parts in iteritems(self.msets):\n # topic name\n size += 2 + len(topic) + 4 # topic name + len(parts)\n # partition + mset size + len(mset)\n size += sum(4 + 4 + len(mset) for mset in itervalues(parts))\n return size", "def qsize(self): \n return self.__db.llen(self.key)", "def test_size_of_new_queue():\n queue = Queue()\n assert queue.size() == 0", "def redis_size(self):\n def func(server):\n return server.size()\n self.__run_redis_cmd(func, dbs=range(0,8))", "def __len__(self):\n # Header + consumer group + len(topics)\n size = self.HEADER_LEN + 2 + len(self.consumer_group) + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition => for each partition\n size += 4 * len(parts)\n return size", "def _queue_len(queue_path: str) -> int:\n if not os.path.exists(queue_path):\n return 0\n return len([path for path in os.listdir(queue_path)])", "def test_get_queue_msg_count1(self):\n self.queue.direct_declare(TEST_QUEUE)\n self.queue.publish(TEST_QUEUE, 'this is a test msg')\n\n msg_count = self.queue.get_queue_msg_count(TEST_QUEUE)\n assert isinstance(msg_count, int)", "def size(self): \n return self.qSize", "def Length(self):\n return len(self.jobQueue)", "def celery_queue_lengths(request):\n queue_len = collect_queue_metrics()\n LOG.info(f\"Celery queue backlog info: {queue_len}\")\n return Response(queue_len)", "def test_size_decrements_with_dequeue():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1\n queue.dequeue()\n assert queue.size() == 0", "def __len__(self):\n\t\treturn self.qsize()", "def new_messages_number(self, tag):\n return len(self._message_queue.setdefault(tag,[]))", "def queue_message_count(self, queue_name):\n queue_list = self.__session.getObjects(_class=\"queue\", _name=queue_name)\n if len(queue_list):\n return queue_list[0].msgDepth", "def __len__(self):\n # Header + string size + consumer group size\n size = self.HEADER_LEN + 2 + len(self.consumer_group)\n # + generation id + string size + consumer_id size + array length\n size += 4 + 2 + len(self.consumer_id) + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition + offset + timestamp => for each partition\n size += (4 + 8 + 8) * len(parts)\n # metadata => for each partition\n for partition, (_, _, metadata) in iteritems(parts):\n size += 2 + len(metadata)\n return size", "def api_get_queue_len():\n try:\n ret = AppStatus.check_manager_status(brief=False)\n if ret is not None:\n return jsonify({\"total_queue_len\": ret.get(\"total_queue_len\", 0)})\n except Exception as e:\n logger.error(\"Traceback:\\n%s\", traceback.format_exc())\n abort(500, \"failed to send message or invalid manager response\")", "def qsize(self):\n return self.q_size.current_value", "def __payload_size(self):\n return (\n self.SIZE_LINEUP_ID + self.players_per_lineup * self.SIZE_PLAYER) * self.entries.count()", "def __len__(self):\n return self.HEADER_LEN + 4 + sum(len(t) + 2 for t in self.topics)", "def get_size(self):\n return len(self.get_payload()) + 4", "def len(self) -> int:\n\n return int(self.q)", "def __repr__(self):\n return 'Queue({})'.format(self.length())", "def __init__(self, size):\n self.size = size\n self.queue = []", "def get_queue_size(queue):\n with current_celery_app.pool.acquire(block=True) as connection:\n bound_queue = queue.bind(connection)\n _, size, _ = bound_queue.queue_declare(passive=True)\n return size", "def ctrlqueue_num_actions(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(2), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def n_worker(self):\n return self.redis.pubsub_numsub(MSG)[0][-1]", "async def size(self) -> int:", "def get_Q_size(self):\n return len(self.qTable)", "def size():\r\n qry = ImportQueue.query.filter(or_(\r\n ImportQueue.status != COMPLETE,\r\n ImportQueue.status != ERROR))\r\n return qry.count()", "def testQueueMultiMsg(self):\n for i in range(10):\n self.mgr.queueMsg(i)\n\n self.assertEqual( self.mgr.msgQueue.qsize(), 9)", "def test_size_empty(the_queue):\n assert the_queue.size() == 0", "def pre_qos_queue_read(self, resource_id):\n pass", "def post_qos_queue_read(self, resource_id, resource_dict):\n pass", "def output_queue_size(self):\r\n results_dirname = get_param('results_dir')\r\n filename = os.path.join(results_dirname,\r\n '%s_%s' % (get_param('file_prefix'),\r\n 'queued_tasks'))\r\n queued_tasks_file = open(filename, 'w')\r\n queued_tasks_file.write('time\\ttotal_queued_tasks\\n')\r\n for time, queued_tasks in self.enqueued_tasks:\r\n queued_tasks_file.write('%s\\t%s\\n' % (time, queued_tasks))\r\n queued_tasks_file.close()", "def __init__(self, size):\n self.size = size\n self.queue = []\n self.sum = 0", "def __len__(self, schema):\r\n return self.server.llen(PROXIES_REDIS_FORMATTER.format(schema))", "def _get_qos(self):\n return self.__qos", "def _get_qos(self):\n return self.__qos", "def _get_qos(self):\n return self.__qos", "def __init__(self, size):\n self.queue = collections.deque(maxlen = size)", "def __init__(self, size):\n self.queue = collections.deque(maxlen=size)", "def __len__(self):\n\n return len(self._block_queue)", "def __len__(self) -> int:\n\n return len(self._space.CHANNELS) + 1", "def get_text_queue_count(self):\n return self.db.llen(\"soq_texts\")", "def show_queue(Q):\n print(\"(Size of the queue:\", Q.qsize(), \")\", end=\" \")\n for n in list(Q.queue):\n print(n, end=\" \")\n print()", "def getQueueCount(self, force=False):\n if self.queuecount == -1 or force:\n self.queuecount = self.db.queuecount()\n\n return self.queuecount", "def get_size(self):\n ...", "def SendPacketsSendSize(self) -> int:", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def join(self, queue_name):\n while True:\n size = 0\n for name in (queue_name, dq_name(queue_name)):\n size += self.client.hlen(self._add_namespace(name + \".msgs\"))\n\n if size == 0:\n return\n\n time.sleep(1)", "def post_qos_queue_create(self, resource_dict):\n pass", "def __init__(self, queueLength):\r\n self.queueLength = queueLength\r\n self.data = []\r\n return", "def get_amount_queued(self):\n return self._num_queued", "def get_number_of_messages(queue_name):\n queue = sqs.get_queue_by_name(QueueName=queue_name)\n return queue.attributes.get('ApproximateNumberOfMessages')", "def test_message_integrity_across_sizes(\n size, start_result_q_publisher, start_result_q_subscriber, default_endpoint_id\n):\n result_pub = start_result_q_publisher()\n data = \"x\" * size\n message = {\"task_id\": str(uuid.uuid4()), \"result\": data}\n b_message = json.dumps(message).encode()\n result_pub.publish(b_message)\n\n results_q = multiprocessing.Queue()\n start_result_q_subscriber(queue=results_q)\n\n result_message = results_q.get(timeout=2)\n assert result_message == (result_pub.queue_info[\"test_routing_key\"], b_message)", "def snmpqosqos_bytes_rx(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_bytes_rx\n\t\texcept Exception as e:\n\t\t\traise e", "def __len__(self):\n count = 0\n topics = set(six.iterkeys(self._topics))\n while topics:\n event_type = topics.pop()\n try:\n listeners = self._topics[event_type]\n count += len(listeners)\n except KeyError:\n pass\n return count", "def getSize(self) -> long:\n ...", "def __len__(self):\n if not hasattr(self.limitedstream, \"limit\"):\n return 0\n return self.limitedstream.limit", "def GetTotalQueueCount(handler, query):\n # pylint: disable=unused-argument\n\n json_config = {}\n json_config['count'] = 0\n\n with active_tivos_lock:\n for tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['count'] += len(active_tivos[tivoIP]['queue'])\n\n handler.send_json(json.dumps(json_config))", "def get_size(self):", "def snmpqosqos_real_bytesrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_real_bytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)", "def assert_queue_size(sizes):\n for queue in sizes:\n assert_that(count_messages(queue), is_(sizes[queue]))" ]
[ "0.7185033", "0.7173836", "0.71359736", "0.71171594", "0.7078327", "0.6965806", "0.6958423", "0.6958423", "0.69455695", "0.69455695", "0.69455695", "0.69455695", "0.69437385", "0.6941902", "0.68795055", "0.68710214", "0.68147635", "0.6792856", "0.674943", "0.6711253", "0.67072815", "0.6685156", "0.66571504", "0.6591745", "0.6576509", "0.65705884", "0.6559014", "0.65024793", "0.64889663", "0.64658725", "0.642738", "0.6411738", "0.6398867", "0.638871", "0.63291174", "0.6284452", "0.6258315", "0.62580514", "0.62426376", "0.62197196", "0.6192602", "0.6183597", "0.6170945", "0.61410594", "0.6128898", "0.61091644", "0.61005276", "0.6055949", "0.6009146", "0.59895414", "0.5987177", "0.5971691", "0.5937582", "0.59295744", "0.5929371", "0.5914853", "0.58978444", "0.58968025", "0.5886057", "0.5876125", "0.5876088", "0.5862438", "0.58605903", "0.585035", "0.5833823", "0.5830816", "0.5826282", "0.5821103", "0.581159", "0.5780172", "0.5773943", "0.57564664", "0.57564664", "0.57564664", "0.57449025", "0.57447916", "0.5744713", "0.5744408", "0.5738209", "0.5730914", "0.5722163", "0.5713746", "0.5707776", "0.56933296", "0.56933296", "0.5691554", "0.56860983", "0.5678434", "0.5673617", "0.56686497", "0.56617945", "0.565969", "0.56579477", "0.56442285", "0.5640728", "0.5635934", "0.56359184", "0.5632276", "0.56285495", "0.5618397" ]
0.70792633
4
Publishing ROS message type property. e.g. 'std_msgs/String'
def message_type(self): return self._message_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):\n data = {} if data is None else data\n metadata = {} if metadata is None else metadata\n content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))\n self.kernel.session.send(\n self.kernel.iopub_socket,\n msg_type,\n content,\n metadata=json_clean(metadata),\n parent=self.kernel._parent_header,\n ident=self.topic,\n buffers=buffers,\n )", "def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):\n data = {} if data is None else data\n metadata = {} if metadata is None else metadata\n content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))\n msg = self.kernel_client.session.msg(msg_type, content=content, parent=self.parent_header, metadata=metadata)\n self.kernel_client.shell_channel.send(msg)", "def __init__(self, topic, message_type): \n self.topic = topic\n \n # find message type\n package, message = message_type.split('/')\n m = load_pkg_module(package)\n\n m2 = getattr(m, 'msg')\n self.message = getattr(m2, message)\n self.publisher = rospy.Publisher(topic, self.message)", "def test_topic_type(self):\n self.failureResultOf(self.producer.send_messages(1234, msgs=[b\"\"]), TypeError)", "def get_msg_type(self):\n return self._msg_type", "def message_type(self):\n return self.type", "def publish(self, message: str) -> None:", "def message_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"message_type\")", "def getPropsAsOSCMsg(self):\r\n if BLENDER_MODE == 'BGE':\r\n world_tranform = self.obj.worldTransform\r\n # obj_type_id = self.obj[self.type] # based on game property\r\n obj_type_id = 1 # hardcoded for now (limited to a unique self.type then)\r\n elif BLENDER_MODE == 'BPY':\r\n # self._checkForUndoMess()\r\n world_tranform = self.obj.matrix_world\r\n # obj_type_id = self.obj.game.properties[self.type].value\r\n obj_type_id = 1 # hardcoded for now (limited to a unique self.type then)\r\n\r\n msg = self._shapeOSCMsg('/' + self.type, self.type + '_' + str(obj_type_id), world_tranform)\r\n return msg", "def comsume_msg(self, msg_type):", "def resource_type(self):\n return 'message'", "def type(self) -> MessageType:\n raise NotImplementedError", "def publish(self, data=None):\n rospy.loginfo(\"Message published on topic %s\", self.topic)", "def register_msg(self, path, msgtype, msg):", "def publish_message(producer_instance, topic_name, key, value):\n key_serializer = repr(key).encode()\n value_serializer = repr(value).encode()\n\n producer_instance.send(topic_name, key=key_serializer, value=value_serializer)\n producer_instance.flush()\n print('Message published successfully.')", "def encode(self, rosMsg):\r\n if not isinstance(rosMsg, Message):\r\n raise TypeError('Given rosMsg object is not an instance of '\r\n 'genpy.message.Message.')\r\n\r\n for converter, cls in self._customTypes.itervalues():\r\n if isinstance(rosMsg, cls):\r\n return converter().encode(rosMsg)\r\n\r\n return self._encode(rosMsg)", "def encode(rosMsg): #@NoSelf", "def _publish(self, topic_name, message):\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)", "def test_key_type(self):\n self.failureResultOf(self.producer.send_messages(\"topic\", key=\"key\", msgs=[b\"msg\"]), TypeError)", "def message_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message_type\")", "def message_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message_type\")", "def get_message_type(self):\n return self.message_type", "def test_message_type(self):\n self.failureResultOf(self.producer.send_messages(\"topic\", msgs=[1, 2, 3]), TypeError)\n self.failureResultOf(self.producer.send_messages(\"topic\", msgs=[\"asdf\"]), TypeError)", "def publish_to_simulation(self, topic, message, **kwargs):\n pass", "def type_as_string(self):\n return self.properties.get('TypeAsString', None)", "def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.", "def mogrify(topic, msg):\n return topic + ' ' + json.dumps(msg)", "def command(self, msg):\n self.cmd_pub.publish(msg)", "def publish(self, topic, value):\n msg = self.topics[topic]['msg']\n msg.data = value\n self.topics[topic]['publisher'].publish(msg)\n print(\"published \\t{} \\t{}\".format(topic, value))", "def publish():\n pub = rospy.Publisher('/turtle/quality', Quality, queue_size=10)\n rospy.init_node('turtle_publisher')\n rate = rospy.Rate(5) # 5hz\n i = 0\n while not rospy.is_shutdown():\n msg = Quality()\n i += 1\n\tmsg.index = i\n\tmsg.value = random.randint(1,10)\n pub.publish(msg)\n rate.sleep()", "def _type(self) -> str:\n ...", "def typename(self, irc, msg, args, typeID):\n try:\n name = self._get_type(typeID)['typeName']\n irc.reply(name, prefixNick=False)\n except:\n irc.error('Unknown type')", "def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)", "def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Sns\"][\"Type\"]", "def gen_message(msg: Message) -> str:\n msg_dict = msg._asdict()\n msg_dict.update({MSG_TYPE_NAME: type(msg).__name__})\n\n return json.dumps(msg_dict)", "def publish(node, payload, settings):\n entry = dict2node(payload)\n iq = build_iq(node, entry, settings)\n send_message(iq, settings)", "def _create_pub(name, rostype, *args, **kwargs):\n # counting publisher instance per topic name\n if name in TopicBack.pub_instance_count.keys():\n TopicBack.pub_instance_count[name] += 1\n else:\n TopicBack.pub_instance_count[name] = 1\n\n return rospy.Publisher(name, rostype, *args, **kwargs)", "def PublishMessage(device_note_message, event_type):\n if not env_config.CONFIG.use_google_api:\n logging.warning(\n \"Unabled to send device note message to pubsub: use_google_api=False\"\n )\n return\n device_note_message.publish_timestamp = _Now()\n encoded_message = protojson.encode_message(device_note_message) # pytype: disable=module-attr\n data = common.UrlSafeB64Encode(encoded_message)\n if event_type == common.PublishEventType.DEVICE_NOTE_EVENT:\n data_type = \"deviceNote\"\n topic = DEVICE_NOTE_PUBSUB_TOPIC\n else:\n data_type = \"hostNote\"\n topic = HOST_NOTE_PUBSUB_TOPIC\n _CreatePubsubClient().PublishMessages(topic, [{\n \"data\": data,\n \"attributes\": {\n \"type\": data_type,\n }\n }])", "def input_message_type(self) -> type:\n raise NotImplementedError()", "def _get_python_prop_type(prop_type: Type[Variable]) -> str:\n if prop_type is VariableBool:\n return \"bool\"\n if prop_type in (VariableInt, VariableUInt):\n return \"int\"\n if prop_type is VariableFloat:\n return \"float\"\n if prop_type is VariableString:\n return \"bytes\"\n if prop_type is VariableVec2:\n return \"(float, float)\"\n if prop_type is VariableStruct:\n return \"dict[str, Variable]\"\n if prop_type is VariableArray:\n return \"MutableSequence\"\n raise TypeError(\"unexpected variable type\")", "def add_widgets(msg_instance, widget_dict, widget_list, prefix=''):\n # import only here so non ros env doesn't block installation\n #from genpy import Message\n \"\"\"\n if msg_instance._type.split('/')[-1] == 'Image':\n w = widgets.Text()\n widget_dict['img'] = w\n w_box = widgets.HBox([widgets.Label(value='Image path:'), w])\n widget_list.append(w_box)\n return widget_dict, widget_list\n \"\"\"\n if(rut.is_message(msg_instance) == False):\n return 0\n ## Type of msg\n #msg_class = msg_instance._fields_and_field_types\n\n \n \n for idx, slot in enumerate(msg_instance._fields_and_field_types):\n \n \n \n try:\n msg_inst = msg_instance()\n except:\n msg_inst = msg_instance\n \n attr = getattr(msg_inst, slot)\n\n \n \n## Determine if the Message is the a basic form of Messages such as Point, String etc \n if(type(attr) in [int, str, float]):\n msg_attr = msg_inst.get_fields_and_field_types()\n basic_flag = 1\n if(idx != 0):\n continue\n \n else:\n msg_attr = attr.get_fields_and_field_types()\n basic_flag = 0\n \n \n w = None\n \n if(rut.is_message(msg_instance)):\n if(basic_flag == 1):\n slot = type(msg_inst).__name__\n widget_list.append(widgets.Label(value=slot))\n\n widget_dict[slot] = {}\n \n for s_t in msg_attr:\n \n \n if msg_attr[s_t] in ['float', 'float32', 'float64', 'double']:\n w = widgets.FloatText()\n\n if msg_attr[s_t] in ['int', 'int8', 'uint8', 'int32', 'uint32', 'int64', 'uint64']:\n w = widgets.IntText()\n\n if msg_attr[s_t] in ['string']:\n w = widgets.Text()\n \n\n \n if(w):\n\n widget_dict[slot] = w\n w_box = widgets.HBox([widgets.Label(value=s_t, layout=widgets.Layout(width=\"100px\")), w])\n widget_list.append(w_box)\n\n return widget_dict, widget_list", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def type(self):\n return self.properties.get('type')", "def send_mqtt(self, data_type, data):\n try:\n client = mqtt.Client(\"rpi1_qnas\")\n client.on_connect = self.on_connect\n client.on_message = self.on_message\n client.connect(MQTT_BROKER_ADDRESS)\n client.loop_start()\n client.publish(MQTT_TOPIC + \"/{}\".format(data_type), data)\n client.disconnect()\n client.loop_stop()\n except Exception:\n msg = \"{} \\nMQTT error\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.loggers[\"log_stdout\"].warning(msg)\n self.loggers[\"log_errors\"].warning(msg)\n self.verbose(msg)", "def publish(self, topic_name, ros_message):\n # First check if we already advertised the topic\n d = self._advertise_dict\n for k in d:\n if d[k]['topic_name'] == topic_name:\n # Already advertised, do nothing\n break\n else:\n # Not advertised, so we advertise\n topic_type = ros_message._type\n self._advertise(topic_name, topic_type)\n # Converting ROS message to a dictionary thru YAML\n ros_message_as_dict = yaml.load(ros_message.__str__())\n # Publishing\n self._publish(topic_name, ros_message_as_dict)", "def ToPpapiType(self, type_, array=False, optional=False):\n if isinstance(type_, model.Function) or type_.property_type in (\n model.PropertyType.OBJECT, model.PropertyType.ENUM):\n return self._FormatPpapiTypeName(\n array, optional, '_'.join(\n cpp_util.Classname(s) for s in self._NameComponents(type_)),\n namespace=cpp_util.Classname(self._namespace.name))\n elif type_.property_type == model.PropertyType.REF:\n return self.ToPpapiType(self._namespace.types[type_.ref_type],\n optional=optional, array=array)\n elif type_.property_type == model.PropertyType.ARRAY:\n return self.ToPpapiType(type_.item_type, array=True,\n optional=optional)\n elif type_.property_type == model.PropertyType.STRING and not array:\n return 'PP_Var'\n elif array or optional:\n if type_.property_type in self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP:\n return self._FormatPpapiTypeName(\n array, optional,\n self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP[type_.property_type], '')\n return self._PPAPI_PRIMITIVE_TYPE_MAP.get(type_.property_type, 'PP_Var')", "def msg_type(self) -> ResponseMessageType:\n return self._msg_type", "def publish(self, message, topic=''):\n if type(message) != types.ListType:\n message = [message]\n if topic:\n message = [topic] + message\n self.send(message)", "def protocol_type(self, value):\n self._write(MX_PROTOCOL_TYPE, value)", "def get_stream_type(self) -> str:", "def register_msgtype_callback(self, path, msg_type, callback_func):", "def typestr(self) -> str:\n return self.type if not self.subtype else f\"{self.type}:{self.subtype}\"", "def notification_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_type\")", "def publishEvent(eventName,publisher, msg):", "def __init__(self, msgtype,precision,maxFreqError,originateNanos,receiveNanos,transmitNanos,originalOriginate=None):\n super(WCMessage,self).__init__()\n self.log = logging.getLogger(\"dvbcss.protocol.wc.WCMessage\")\n\n self.msgtype = msgtype #: (read/write :class:`int`) Type of message. 0=request, 1=response, 2=response-with-followup, 3=followup\n self.precision = precision #: (read/write :class:`int`) Precision encoded in log base 2 seconds between -128 and +127 inclusive. For example: -10 encodes a precision value of roughly 0.001 seconds.\n self.maxFreqError = maxFreqError #: (read/write :class:`int`) Maximum frequency error in units of 1/256ths ppm. For example: 12800 encodes a max freq error of 50ppm.\n self.originateNanos = originateNanos #: (read/write :class:`int`) Originate timevalue in integer number of nanoseconds\n self.receiveNanos = receiveNanos #: (read/write :class:`int`) Receive timevalue in integer number of nanosecond\n self.transmitNanos = transmitNanos #: (read/write :class:`int`) Transmit timevalue in integer number of nanosecond\n self.originalOriginate = originalOriginate #: (read/write :obj:`None` or (:class:`int`, :class:`int`)) Optional original encoding of the originate timevalue as (seconds, nanos). Overrides `originateNanos` when the message is packed if the value is not `None`. ", "def test_pod_type(self):\n pod = Pod('1')\n self.assertEqual(pod.get_type(), 'pod')", "def register_topic(self, name, command):\n topic_name = command['topic_name']\n try:\n topic_type = self.get_interface_type(command['interface_type'], '.msg')\n self.pubs[topic_name] = self.create_publisher(topic_type, topic_name, 1)\n except JoyTeleopException as e:\n self.get_logger().error(\n 'could not register topic for command {}: {}'.format(name, str(e)))", "def publish(self, message: model.MQTTMessage):\n self.client.publish(message.topic, payload=message.get_payload())", "def publish(self, data):\n # [START pubsub_quickstart_publisher]\n # [START pubsub_publish]\n # Data must be a bytestring\n logger.info(\"publishing message %s\" % data)\n data = data.encode('utf-8')\n self.publisher.publish(self.topic_path, data=data)\n\n logger.info('Published messages: {}'.format(data))\n # [END pubsub_quickstart_publisher]\n # [END pubsub_publish]", "def send(self, mtype, **kwargs):\n\n self.transport.write(Message(mtype, **kwargs))\n\n if mtype == 'interested':\n self.am_interested = True\n elif mtype == 'not_interested':\n self.am_interested = False\n elif mtype == 'choke':\n self.am_choking = True\n elif mtype == 'unchoke':\n self.am_choking = False", "def receive(self) -> Message[ValueType]:", "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def set_message(msg, m_type):\n cherrypy.session.acquire_lock()\n cherrypy.session[KEY_MESSAGE] = msg\n cherrypy.session[KEY_MESSAGE_TYPE] = m_type\n cherrypy.session.release_lock()", "def rosbridge_outgoing(self,message):\n\n message = json.loads(message)\n\n op=message.get(\"op\")\n if op == \"publish\":\n topic_name = message[\"topic\"]\n message[\"msg\"] = self.compress(topic_name,message.get(\"msg\"))\n message = self.serialize(topic_name,message)\n elif op == \"call_service\":\n message[\"service\"] = self.inv_remap_service(message[\"service\"])\n\n \n if isinstance(message, bytes):\n self.outgoing(message,isBinary=True,identifier=topic_name)\n elif isinstance(message,str):\n self.outgoing(message)\n else:\n message = json.dumps(message)\n self.outgoing(message)", "def type(self, string):\n\n\t\tself._interface.type(string)", "def on_publish(client, userdata, mid):\n print(\"Message Published.\")", "def recv_type(self, type_):\n msg = self.recv()\n assert msg and msg['type'] == type_, msg\n return msg", "def themify(topic,msg):\n return topic + ' ' + json.dumps(msg)", "def themify(topic,msg):\n return topic + ' ' + json.dumps(msg)", "async def publish(self, body, routing_key=None):\n pass # pragma: no cover", "def generate_node_topic(self, node, topic, type='commands'):\n return '/nodes/%s/%s/%s' % (node.node_id, type, topic)", "def send_msg(self, type, data):\n data = json.dumps(\n {\n \"job\": self._job_id,\n \"idx\": self._job_idx,\n \"tool\": self._tool,\n \"type\": type,\n \"data\": data\n },\n\n # use this so that users don't run into errors with ObjectIds not being\n # able to be encodable. If using bson.json_util.dumps was strictly used\n # everywhere, could just use that dumps method, but it's not, and I'd rather\n # keep it simple for now\n cls=FriendlyJSONEncoder\n )\n\n self._connected.wait(2 ** 31)\n\n data_len = struct.pack(\">L\", len(data))\n if not self._dev:\n try:\n with self._send_recv_lock:\n self._sock.send(data_len + data)\n except:\n # yes, just silently fail I think???\n pass", "def mqtt_publish_lwt(status):\n if not mqtt.connected:\n return\n cfg_option = Script.lwt\n cfg_section = mqtt.GROUP_TOPICS\n message = iot.get_status(status)\n try:\n mqtt.publish(message, cfg_option, cfg_section)\n logger.debug(\n 'Published to LWT MQTT topic %s: %s',\n mqtt.topic_name(cfg_option, cfg_section),\n message\n )\n except Exception as errmsg:\n logger.error(\n 'Publishing %s to LWT MQTT topic %s failed: %s',\n message,\n mqtt.topic_name(cfg_option, cfg_section),\n errmsg,\n )", "def publishstats(self, topic=None, stats=None):\n ltopic = self.msgtopic\n if topic:\n ltopic = topic\n if not self.echotopic:\n self.echotopic = True\n logging.info(\"Selecting MQTT topic {}\".format(ltopic))\n\n self.mqttc.publish(ltopic, json.dumps(stats))", "def getType(self, *args):\n return _libsbml.ConversionProperties_getType(self, *args)", "def publish_metric(name, value, type):\n t = time.time()\n m = json.dumps({'monitor':name, type:value, 'time':t})\n r = redis.StrictRedis(host='localhost', port=6379, db=0) \n r.lpush('sensor_readings',m)", "def create_publisher():\n pub = rospy.Publisher(\"/number\", Int64, queue_size=10)\n rospy.set_param(\"/number_publish_frequency\", 2)\n pub_freq = rospy.get_param(\"/number_publish_frequency\")\n rate = rospy.Rate(pub_freq)\n \n rospy.set_param(\"/number_to_publish\",3)\n number = rospy.get_param(\"/number_to_publish\")\n rospy.set_param(\"/try_param\", \"what's up\")\n \n while not rospy.is_shutdown():\n msg = Int64()\n msg.data = number\n pub.publish(msg)\n rate.sleep()", "def publish_status(client):\n client.publish(config.topic_get, payload=getlight())", "def publisher(self, iTag, msgType, addr):\r\n return ROSPublisher(self, iTag, msgType, addr)", "def notification_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"notification_type\")", "def process_MESSAGE_TYPE_EMG(self, raw):\n\n pass", "def test_create_policy_type(mock_send_message):\n A1sim.create_policy_type(BASE_URL, \"test_id\", {})\n mock_send_message.assert_called_once_with('PUT',\n 'Create Policy Type',\n (f\"{BASE_URL}/policytype?id=test_id\"),\n data={},\n headers=HEADER)", "def build_message(self, src, dst, typee, msg):\n my_dict = {\n SRCE: src,\n DEST: dst,\n TYPE: typee,\n MESG: msg\n }\n\n return json.dumps(my_dict).encode()", "def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)", "def __str__(self):\n\n if self._msg_type in self.msg_types:\n msg_type_name = self.msg_types[self._msg_type]\n else:\n msg_type_name = '?(%u)' % (self._msg_type,)\n\n return 'CCPMessage: ver %u msg_type %s conn_id %s data [%s]' % (\n self._version, msg_type_name, self._conn_id, self._data)", "def test_type_attribute(self):\n\n self._create_string()\n self.assertEquals(\"%s:%s\" % (\"xs\",\"string\"), self.string.schema_node.get(\"type\"))", "def publish(self, message: Union[SubmissionMessage, CommentMessage]) -> int:\n self.publisher.publish(self.topic, message.serialize().encode(\"utf-8\")).result()", "def create_pubsub_notification(context, depends_on, status_string):\n\n return [{\n 'name': 'pubsub-notification-{}'.format(status_string),\n 'action': 'gcp-types/pubsub-v1:pubsub.projects.topics.publish',\n 'properties': {\n 'topic':\n context.properties['pubsubTopic'],\n 'messages': [{\n 'attributes': {\n 'projectId': context.properties['projectId'],\n 'status': status_string,\n }\n }]\n },\n 'metadata': {\n # The notification should only run after *all* project-related\n # resources have been deployed.\n 'dependsOn': depends_on,\n # Only trigger the pubsub message when the deployment is created (not on\n # update or delete).\n 'runtimePolicy': ['UPDATE_ALWAYS'],\n },\n }]", "def frequency_publisher():\n \n # Create and Initialize a ROS Node with name\n rospy.init_node('freq_pub_node', anonymous=True)\n\n\n # Create publisher to publish random floating point value\n pub = rospy.Publisher('frequency', Float32, queue_size=10)\n rate = rospy.Rate(1) # 1 Hz = 1 message per second\n \n while not rospy.is_shutdown():\n # Generate random floating point value\n frequency = random.random()\n rospy.loginfo(f\"SENSOR FREQUNCY: {frequency}\")\n\n # Publish the ROS Message\n pub.publish(frequency)\n \n rate.sleep()", "def publish_message(message: str, broker_ip: str, exchange_name: str, exchange_type: str):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=broker_ip))\n channel = connection.channel()\n channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type, durable=True)\n channel.basic_publish(exchange=exchange_name, routing_key='', body=message)\n print(f'Published {message} to the exchange')\n connection.close()", "def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)", "def typeString(self):\n return Parameter.string_dict[self._field.type]", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.type is None:\n self.type = std_msgs.msg.String()\n if self.parent_name is None:\n self.parent_name = std_msgs.msg.String()\n if self.name is None:\n self.name = std_msgs.msg.String()\n if self.pose is None:\n self.pose = geometry_msgs.msg.Pose()\n if self.sensed_objects is None:\n self.sensed_objects = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.sim_step,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.type.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.type.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parent_name.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.parent_name.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.name.data = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.wall_time, _x.sim_time, _x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.count,) = _get_struct_2f7dI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.triggered = s.unpack(str[start:end])\n self.triggered = list(map(bool, self.triggered))\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.range = s.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.measurement = s.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.sensed_objects = []\n for i in range(0, length):\n val1 = std_msgs.msg.String()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.data = str[start:end]\n self.sensed_objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.sensed_objects_map = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.graspable_objects is None:\n self.graspable_objects = None\n if self.image is None:\n self.image = sensor_msgs.msg.Image()\n if self.camera_info is None:\n self.camera_info = sensor_msgs.msg.CameraInfo()\n if self.meshes is None:\n self.meshes = None\n if self.reference_to_camera is None:\n self.reference_to_camera = geometry_msgs.msg.Pose()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.graspable_objects = []\n for i in range(0, length):\n val1 = manipulation_msgs.msg.GraspableObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.reference_frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.reference_frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.potential_models = []\n for i in range(0, length):\n val2 = household_objects_database_msgs.msg.DatabaseModelPose()\n start = end\n end += 4\n (val2.model_id,) = _get_struct_i().unpack(str[start:end])\n _v32 = val2.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v32.key = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v32.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v32.db = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v32.db = str[start:end]\n _v33 = val2.pose\n _v34 = _v33.header\n start = end\n end += 4\n (_v34.seq,) = _get_struct_I().unpack(str[start:end])\n _v35 = _v34.stamp\n _x = _v35\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v34.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v34.frame_id = str[start:end]\n _v36 = _v33.pose\n _v37 = _v36.position\n _x = _v37\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v38 = _v36.orientation\n _x = _v38\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val2.confidence,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.detector_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2.detector_name = str[start:end]\n val1.potential_models.append(val2)\n _v39 = val1.cluster\n _v40 = _v39.header\n start = end\n end += 4\n (_v40.seq,) = _get_struct_I().unpack(str[start:end])\n _v41 = _v40.stamp\n _x = _v41\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v40.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v40.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v39.points = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point32()\n _x = val3\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])\n _v39.points.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v39.channels = []\n for i in range(0, length):\n val3 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val3.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n val3.values = s.unpack(str[start:end])\n _v39.channels.append(val3)\n _v42 = val1.region\n _v43 = _v42.cloud\n _v44 = _v43.header\n start = end\n end += 4\n (_v44.seq,) = _get_struct_I().unpack(str[start:end])\n _v45 = _v44.stamp\n _x = _v45\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v44.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v44.frame_id = str[start:end]\n _x = _v43\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v43.fields = []\n for i in range(0, length):\n val4 = sensor_msgs.msg.PointField()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val4.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val4.name = str[start:end]\n _x = val4\n start = end\n end += 9\n (_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])\n _v43.fields.append(val4)\n _x = _v43\n start = end\n end += 9\n (_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])\n _v43.is_bigendian = bool(_v43.is_bigendian)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v43.data = str[start:end]\n start = end\n end += 1\n (_v43.is_dense,) = _get_struct_B().unpack(str[start:end])\n _v43.is_dense = bool(_v43.is_dense)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v42.mask = s.unpack(str[start:end])\n _v46 = _v42.image\n _v47 = _v46.header\n start = end\n end += 4\n (_v47.seq,) = _get_struct_I().unpack(str[start:end])\n _v48 = _v47.stamp\n _x = _v48\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v47.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v47.frame_id = str[start:end]\n _x = _v46\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v46.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v46.encoding = str[start:end]\n _x = _v46\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v46.data = str[start:end]\n _v49 = _v42.disparity_image\n _v50 = _v49.header\n start = end\n end += 4\n (_v50.seq,) = _get_struct_I().unpack(str[start:end])\n _v51 = _v50.stamp\n _x = _v51\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v50.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v50.frame_id = str[start:end]\n _x = _v49\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v49.encoding = str[start:end]\n _x = _v49\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v49.data = str[start:end]\n _v52 = _v42.cam_info\n _v53 = _v52.header\n start = end\n end += 4\n (_v53.seq,) = _get_struct_I().unpack(str[start:end])\n _v54 = _v53.stamp\n _x = _v54\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v53.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v53.frame_id = str[start:end]\n _x = _v52\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v52.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v52.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v52.D = s.unpack(str[start:end])\n start = end\n end += 72\n _v52.K = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 72\n _v52.R = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 96\n _v52.P = _get_struct_12d().unpack(str[start:end])\n _x = _v52\n start = end\n end += 8\n (_x.binning_x, _x.binning_y,) = _get_struct_2I().unpack(str[start:end])\n _v55 = _v52.roi\n _x = _v55\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _get_struct_4IB().unpack(str[start:end])\n _v55.do_rectify = bool(_v55.do_rectify)\n _v56 = _v42.roi_box_pose\n _v57 = _v56.header\n start = end\n end += 4\n (_v57.seq,) = _get_struct_I().unpack(str[start:end])\n _v58 = _v57.stamp\n _x = _v58\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v57.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v57.frame_id = str[start:end]\n _v59 = _v56.pose\n _v60 = _v59.position\n _x = _v60\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v61 = _v59.orientation\n _x = _v61\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v62 = _v42.roi_box_dims\n _x = _v62\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.collision_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.collision_name = str[start:end]\n self.graspable_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.image.height, _x.image.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.camera_info.header.seq, _x.camera_info.header.stamp.secs, _x.camera_info.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.camera_info.height, _x.camera_info.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.camera_info.D = s.unpack(str[start:end])\n start = end\n end += 72\n self.camera_info.K = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 72\n self.camera_info.R = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 96\n self.camera_info.P = _get_struct_12d().unpack(str[start:end])\n _x = self\n start = end\n end += 25\n (_x.camera_info.binning_x, _x.camera_info.binning_y, _x.camera_info.roi.x_offset, _x.camera_info.roi.y_offset, _x.camera_info.roi.height, _x.camera_info.roi.width, _x.camera_info.roi.do_rectify,) = _get_struct_6IB().unpack(str[start:end])\n self.camera_info.roi.do_rectify = bool(self.camera_info.roi.do_rectify)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.meshes = []\n for i in range(0, length):\n val1 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.triangles = []\n for i in range(0, length):\n val2 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val2.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val1.triangles.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.vertices = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Point()\n _x = val2\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val1.vertices.append(val2)\n self.meshes.append(val1)\n _x = self\n start = end\n end += 56\n (_x.reference_to_camera.position.x, _x.reference_to_camera.position.y, _x.reference_to_camera.position.z, _x.reference_to_camera.orientation.x, _x.reference_to_camera.orientation.y, _x.reference_to_camera.orientation.z, _x.reference_to_camera.orientation.w,) = _get_struct_7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def type(self):\n if self.__type is None:\n message_type = find_definition(\n self.__type_name, self.message_definition())\n if not (message_type is not Message and\n isinstance(message_type, type) and\n issubclass(message_type, Message)):\n raise FieldDefinitionError(\n 'Invalid message class: %s' % message_type)\n self.__type = message_type\n return self.__type", "def getType(self): #$NON-NLS-1$\r", "def serialize_property(self, value):\r\n if isinstance(value, self.LITERAL_PROPERTY_TYPES):\r\n return value\r\n\r\n return port.to_u(value)", "def message(self, value: str):\n self._properties[\"message\"] = value", "def publish(self, data, properties):\n logger.debug(\"Publisher: Sending a message to MQ...\")\n rqueue = Queue(\n properties['reply_to'],\n Exchange(\n properties[\"replyToExchange\"], 'direct',\n durable=True, no_declare=self.no_declare),\n routing_key=properties['reply_to'],\n no_declare=self.no_declare\n )\n if properties.get(\"encode\", True):\n rsp_body = (base64.b64encode(data.encode('utf-8'))).decode()\n else:\n rsp_body = (base64.b64encode(data)).decode() # raw data\n rsp_msg = {\n 'id': properties.get('id', None),\n 'headers': {\n 'Content-Type': properties.get(\n \"Content-Type\", \"application/*+json;version=31.0\" # default\n ),\n 'Content-Length': len(data)\n },\n 'statusCode': properties.get(\"statusCode\", 200),\n 'body': rsp_body\n }\n try:\n self.connection.Producer().publish(\n rsp_msg,\n correlation_id=properties['correlation_id'],\n routing_key=rqueue.routing_key,\n exchange=rqueue.exchange,\n retry = True,\n expiration = 10000\n )\n logger.info(\"Publisher: Response sent to MQ\")\n except ConnectionResetError:\n logger.error(\"Publisher: ConnectionResetError: message may be not sent...\")" ]
[ "0.6201101", "0.6103247", "0.6074486", "0.5716341", "0.56712085", "0.5609717", "0.56032944", "0.55876714", "0.5475563", "0.5447158", "0.54009306", "0.5368012", "0.5363983", "0.5361609", "0.53507215", "0.5346041", "0.5328286", "0.5312357", "0.5275769", "0.5265659", "0.5265659", "0.52576226", "0.5248328", "0.5243847", "0.52359825", "0.52304", "0.5224806", "0.5219242", "0.52094847", "0.5205575", "0.51861763", "0.5185484", "0.51763356", "0.5167021", "0.516111", "0.5148872", "0.5140999", "0.5140657", "0.51072794", "0.5104788", "0.5080337", "0.5069047", "0.5063273", "0.5058472", "0.5047979", "0.50477636", "0.50437826", "0.50340825", "0.50338876", "0.50314647", "0.5026352", "0.5017997", "0.4999784", "0.49968055", "0.4988503", "0.4982484", "0.49818158", "0.4972291", "0.49706855", "0.49658376", "0.49612632", "0.49601862", "0.49562696", "0.49356207", "0.49346247", "0.49304017", "0.49118346", "0.4899604", "0.4899604", "0.48947346", "0.48946208", "0.48920003", "0.4888129", "0.48853806", "0.48727852", "0.4856639", "0.48499808", "0.48481265", "0.48455518", "0.4840287", "0.48301515", "0.48051786", "0.4805057", "0.48043436", "0.48031262", "0.4801865", "0.4778857", "0.4778857", "0.4776487", "0.47761977", "0.47751454", "0.4774468", "0.47729748", "0.4768032", "0.47679508", "0.4765426", "0.47615787", "0.47580516", "0.4750869" ]
0.52689123
19
Publishing latch status. Getter only property
def latch(self): return self._latch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitor(self):", "def _get_status(self):\n return self.__status", "def status(self):\n raise NotImplementedError()", "def power_status(self):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def set_On(self):\n if not(self._locked):\n self.__dict__['statusOn']=True\n self._do_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)", "def status(self):", "def get_status(self):\n return self._refreshed", "def status(self):\n pass", "def status(self):\n pass", "def GetStatus(self):\r\n return self.status", "def publish_actuator_status(self, reference):\n state, value = self.actuator_status_provider.get_actuator_status(reference)\n actuator_status = ActuatorStatus.ActuatorStatus(reference, state, value)\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)", "def _ready(cls):\n sync_call(cls.ready)", "def get_raw_status(self):\n self.__param_lock.acquire()\n status = self.__status\n self.__param_lock.release()\n return status", "def get_status(self, state):\n raise NotImplementedError", "def status(self):\n return None", "def monitor(self, s):\n raise NotImplementedError()", "def poll(self):\n raise NotImplementedError()", "def status(self):\n\t\treturn self._status", "def publish_status(client):\n client.publish(config.topic_get, payload=getlight())", "def state(self):\n return self.status", "async def get_status():", "def status(self):\n return self.state", "def __await__(self):\n return self.waiter.__await__()", "def __await__(self):\n return self.waiter.__await__()", "def monitored(self):\n return self.monitor", "def getStatus():", "def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status", "def get_Value(self):\n obj_p=object.__getattribute__(self, 'StoredValue')\n if isinstance(obj_p, type(True)):\n return obj_p & self.statusOn \n else: \n if self.statusOn: \n return obj_p \n else: \n self._log.warning('You are attempting to access the job'+ \n 'property %s which has not been set', self._context_name) \n return None", "def status(self):\r\n return self._status", "def status(self):\r\n return self._status", "def __self__(self, GPIO_LED):\n # GPIO.setup()\n # if error raise exception \"Device Not Ready\"\n self.status = false\n return self.status", "def rest_api_status(self):\n with self.resource_lock:\n pass", "def __init__(self):\n self._monitor_lock = threading.Lock() # type: threading.Lock", "def status(self):\n return self.__status", "def status(self):\n return self.__status", "def lock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')", "def status():\n pass", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def status(self):\n if hasattr(self, \"_status\"):\n return self._status\n else:\n return None", "def status(self, value):\n if self._status != value:\n self._status = value\n self._last_changed = now()\n self.status_events.notify(self.status_feedback)\n return self._status", "def getStatus(self):\n return self.__status", "def isVolatile(self) -> bool:\n ...", "def getstatus(self):\n return self.__status", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.async_publish(\n self._config[CONF_COMMAND_TOPIC],\n self._config[CONF_PAYLOAD_ON],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that switch has changed state.\n self._attr_is_on = True\n self.async_write_ha_state()", "def monitor_behavior_status(self):\n self._flexbe_status_subscriber = rospy.Subscriber('/flexbe/status', BEStatus, self.callback_flexbe_status)", "def is_on(self):\n return self._sensor_state()", "def get_status(self):\n return self._status", "def status(self):\n return self.status", "def readable(self):\n return self.interface.in_waiting", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def CaptureStatus(self):\n raise NotImplementedError(\n \"abstract method -- subclass %s must override\" % self.__class__)", "def poll_instruction(self):\n raise NotImplementedError()", "def indicateReady(self):\n self.Data.Sync.IsWritten = 0", "def getStatus(self):\n return self._status", "def __init__(self):\n # Global attributes\n self.ON = {\"RED\":[0], \"GREEN\":[2], \"YELLOW\":[4], \"BLINK\":[6], \"NORMAL\":[2], \"WARNING\":[2,6], \"CRITICAL\":[4], \"ERROR\":[0]}\n self.OFF = {\"RED\":[1], \"GREEN\":[3], \"YELLOW\":[5], \"BLINK\":[5], \"NORMAL\":[3], \"WARNING\":[3,5], \"CRITICAL\":[5], \"ERROR\":[1]}\n\n # Indicator topic\n topic = rospy.get_param(rospy.get_name() + \"/indicator_topic\", \"/tower_lights_cmd\")\n # Namespace fixing\n if (topic[0] != '/'): topic = rospy.get_name() + \"/\" + topic\n\n # Starting publisher\n self.indicator_publisher = rospy.Publisher(topic, Int32, queue_size=100)\n rospy.sleep(0.8) # Publisher initialization tiom\n\n # Turn off all indications\n for state in self.OFF:\n for cmd in self.OFF[state]:\n self.publish_cmd(cmd)\n \n # Start indicator thread\n self.event = threading.Condition()\n thread = threading.Thread(target=self.indicator_thread)\n thread.start()\n\n # Initialize default indication\n self.current_indication = \"NORMAL\"\n self.indication = \"NORMAL\"\n for i in self.ON[self.current_indication]:\n self.publish_cmd(i)", "def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)", "def is_ready(cls):\n\n return SUB_NOTIFY_READY", "def status(self) -> Status:\n return self._status", "def task_status():\n pass", "def getStatus(self) -> MultithreadedTableWriterStatus:\n status=MultithreadedTableWriterStatus()\n status.update(self.writer.getStatus())\n return status", "def check_status(self):\n return self.status", "def check_status(self):\n return self.status", "def status(self):\n return self.microblaze.state", "def state(self):\n raise NotImplementedError", "def is_on(self):\r\n return self._state", "def is_on(self):\r\n return self._state", "def is_on(self):\r\n return self._state", "def check_status(self):", "def in_waiting(self) -> int:\n pass", "def ready(self):\n return _cantera.wall_ready(self.__wall_id)", "def get_status(self):\n return self.read_register(259, 0, 3)", "def ready(cls):\n return cls.started_hungry, cls.started", "def output_status(self):\n raise NotImplementedError", "def state(self):\n raise NotImplementedError()", "def __init__(self, versioned=False):\n self._state = None\n state_topic = 'robot/state'\n self._state_sub = rospy.Subscriber(state_topic,\n AssemblyState,\n self._state_callback\n )\n if versioned and not self.version_check():\n sys.exit(1)\n\n baxter_dataflow.wait_for(\n lambda: not self._state is None,\n timeout=2.0,\n timeout_msg=(\"Failed to get robot state on %s\" %\n (state_topic,)),\n )" ]
[ "0.60354245", "0.5962349", "0.5958736", "0.59539264", "0.5925358", "0.59236515", "0.5900095", "0.57918376", "0.57918376", "0.5746601", "0.5694383", "0.56685543", "0.56662333", "0.5662871", "0.56223184", "0.5591586", "0.5569208", "0.5568028", "0.556712", "0.5565975", "0.556073", "0.5548806", "0.5547178", "0.5547178", "0.55319947", "0.5516137", "0.55028504", "0.550205", "0.5498743", "0.5498743", "0.54894346", "0.5477368", "0.54748946", "0.5471247", "0.5471247", "0.5463234", "0.5458933", "0.5449732", "0.5449732", "0.5449732", "0.54423034", "0.54422754", "0.5439428", "0.5438607", "0.543659", "0.5428837", "0.54261976", "0.54240936", "0.542151", "0.5398628", "0.5392826", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.538703", "0.53631604", "0.53468233", "0.53422755", "0.53331393", "0.5327379", "0.53193325", "0.5316781", "0.52839977", "0.5277097", "0.5276624", "0.5276175", "0.5276175", "0.5269795", "0.5263182", "0.52623105", "0.52623105", "0.52623105", "0.525627", "0.52545834", "0.5253151", "0.52490133", "0.52423567", "0.52409106", "0.5226726", "0.5226611" ]
0.73604715
0
Getter only property. Returns publishing topic name.
def topic(self): return self._topic_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topic(self) -> str:\n return self._topic", "def pubsub_topic(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pubsub_topic\")", "def topic(self):\n return self.config.get('topic', f'{NAMESPACE}/{self.id}')", "def get_topic(self):\n return self.topic", "def pubsub_topic(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pubsub_topic\")", "def kafka_topic(self) -> str:\n return self._kafka_topic", "def topic(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"topic\")", "def __get_topic(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_TOPIC', 'app/event')", "def __get_topic(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVER_TOPIC', 'app/book/#')", "def __str__(self):\n return f\"topic: {self.__topic__}\"", "def response_kafka_topic_name(self) -> str:\n return self._response_kafka_topic_name", "def GetTopicName(args):\n if args.add_topic:\n topic_ref = args.CONCEPTS.add_topic.Parse()\n elif args.remove_topic:\n topic_ref = args.CONCEPTS.remove_topic.Parse()\n else:\n topic_ref = args.CONCEPTS.update_topic.Parse()\n\n return topic_ref.RelativeName()", "def publisher_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"publisher_name\")", "def partner_topic_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"partner_topic_name\")", "def getTopicName(nd_proj):\n # does not line &\n return '-'.join(nd_proj.generateProjectInfo())", "def kafka_topic(self):\n from corehq.apps.change_feed.topics import get_topic_for_doc_type\n return get_topic_for_doc_type(self.document_class().to_json()['doc_type'])", "def display_topic(self):\n return ', '.join(topic.name for topic in self.topic.all()[:3])", "def _topic(self, topic):\n base = \"engine.%s\" % self.engine_id\n\n return f\"{base}.{topic}\".encode()", "def topic(self, topic):\n self.connection.topic(str(self), topic)", "def topic_arn(self) -> Optional[str]:\n return pulumi.get(self, \"topic_arn\")", "def topic_name_from_path(path, project):\n return _name_from_project_path(path, project, _TOPIC_TEMPLATE)", "def publisher_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"publisher_name\")", "def publisher_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher_name\")", "def topic_arn(self) -> str:\n return pulumi.get(self, \"topic_arn\")", "def sns_topic_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"sns_topic_arn\")", "def create_name (self):\n return self.create_topic().create_name('Name')", "def topic_arn(self) -> str:\n return self[\"Sns\"][\"TopicArn\"]", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def publisher(self) -> str:\n return pulumi.get(self, \"publisher\")", "def topic_id(self) -> TopicId:\n return self._topic_id", "def topic(self, topic_id):\r\n return topics.Topic(self, topic_id)", "def topic(kafka_broker, request):\n topic_name = '%s_%s' % (request.node.name, random_string(10))\n kafka_broker.create_topics([topic_name])\n return topic_name", "def publisher(self):\n return self.get(\"publisher\")", "def sns_topic_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sns_topic_arn\")", "def sns_topic_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sns_topic_arn\")", "def _get_generic_topic(self):\n\n content_type = ContentType.objects.get_for_model(self.__class__)\n app_label = content_type.app_label\n\n return super(ProducerModel, self)._get_generic_topic(identifier=app_label)", "def get_topic_identifier(self):\n if 'topic_identifier' in self.options:\n return self.options['topic_identifier']\n else:\n raise DiscourseError('No :topic_identifier: option found in ::discourse: directive.')", "def topic_name( self ):\n return Constants.TopicNames.TMP36Volts2CentigradeStep", "def get_kafka_source_topic(self) -> str:\n if self.source and self.source.source_type == \"Kafka\":\n return self.source.topic\n raise Exception(\"Source type could not be identified\")", "def _get_topic_for_response():\n return _get_topic_base() + \"res/\"", "def describe_topic(self, index):\n assert(self.has_topics)\n assert(0 <= index < self.K)\n return self.topics[index]", "def topic(self, topic_id):\r\n return contents.Topic(self, topic_id)", "def get_topicname ( base_name, object_type, condition ) :\n return base_name + '-' + object_type.upper( ) + '-' + condition.upper( )", "def topic(phenny, input):\n if not input.admin:\n return\n text = input.group().split()\n topic = ' '.join(text[1:])\n if topic == '':\n return\n channel = input.sender\n phenny.write(['PRIVMSG', 'ChanServ'], 'TOPIC %s %s' % (input.sender, topic))\n return", "def create_topic (self):\n return self.tm.create_topic()", "def broker_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"broker_name\")", "def topics(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"topics\")", "def get_name(self):\n return self.load_name(self.subject)", "def topic(self, channel, topic=None):\n if topic:\n channel += ' :' + topic\n self.send_line('TOPIC %s' % channel)", "def get_unified_publisher_name(publisher):\n return mappings.PUBLISHER_MAPPINGS.get(publisher, publisher)", "def topic(self, topic_id):\n return topics.Topic(self, topic_id)", "def topic_identity(self, name=None):\n identity = self._wc2identity.get(name)\n if not identity:\n identity = self.next_topic_identity(name)\n if name:\n self._wc2identity[name] = identity\n return identity", "def get_topic(self, label):\n\n for attr in self.parm_list:\n if attr.label == label:\n return attr.topic\n\n return \"\"", "def get_full_topicarn ( base_topicarn, topicname ) :\n return base_topicarn + ':' + topicname", "def subject_property_name(self):\n subject_property_name = 'subject'\n if 'participant' in self.schemas.keys():\n subject_property_name = 'participant'\n return subject_property_name", "def Publisher(self, default=None):\n return self.data.get('publisher', default)", "def get_channel_topic(channel_name):\n return get_cmd_arg('spring.cloud.stream.bindings.{}.destination'.format(channel_name))", "def get_name(self):\n return self.soup.find('div', id = 'zh-topic-title').h1\\\n .get_text(strip = True).encode(CODE)", "def get_name(self):\n return self.settings.get(\"name\", None)", "def _get_topic_base():\n return \"$dps/registrations/\"", "def topics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"topics\")", "def create_pubsub_topic(client, project, name):\n full_name = pubsub.topic_name(project, name)\n if client.get_topic(full_name):\n return\n\n client.create_topic(full_name)", "def get_pod_name(self) -> Text:\n return self._pod_name", "def get_topic_controller(self, topic, project=None):\n target = self.lookup(topic, project)\n return target and target.topic_controller", "def publisher(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher\")", "def publisher(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher\")", "def get_publisher(self):\n return self.publisher", "def get_document_distribution_over_topic(self):\n return self.document_distribution_over_topic", "def get_topic(title):\n return Topic.get(Topic.title == title)", "def name(self):\n return self.properties.get('name', None)", "def property_name(self) -> str:\n return str(self.prop_name)", "def base_topicarn ( self ) :\n if not self.topic_arn_prefix :\n self.topic_arn_prefix = get_base_topicarn( self.sns_conn( ) )\n return self.topic_arn_prefix", "def _create_topic(self):\n topic_name = self.generate_name()\n try:\n topic = self.sns.create_topic(Name=topic_name)\n except Exception as e:\n raise RuntimeError('SNS could create topic: %s' % e)\n self.topic_name, self.topic = topic_name, topic", "def get_subscription_owner(request, topic):\n return topic.profile.km_user.user", "def test_schema_name_for_topic(self):\n self.assertEqual(\n eventlogging.topic.schema_name_for_topic('test_topic'),\n 'TestSchema'\n )\n with self.assertRaises(eventlogging.topic.TopicNotConfigured):\n eventlogging.topic.schema_name_for_topic('not a schema')", "def get_topics(self):\r\n return [x[0] for x in get_published_topics()]", "def promote_name(self):\n return self._promote_name", "def name(self):\n return self.properties.get('name')", "def name(self):\n return self.properties.get('name')", "def ftopickeys(self):\n return self.prefix + 'topickeys.txt'", "def getHelpTopic(self, topicName):\n if topicName == \"\":\n return self.defaultTopic\n if self.helpTopics.containsKey(topicName):\n return self.helpTopics.get(topicName)\n return None", "def get_topic_distribution_over_term(self):\n return self.topic_distribution_over_term", "def create_key_name(topic):\n\t\treturn utils.get_hash_key_name(topic)", "def subject(self):\n return self.properties.get(\"subject\", None)", "def name(self):\n return self.properties.get(\"Name\", None)", "def kafka_configuration_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kafka_configuration_name\")", "def service_bus_topic_endpoint_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_bus_topic_endpoint_id\")", "def stream_name(self):\n return self._stream_name", "def stream_name(self):\n return self._stream_name", "def topic(self, msg):\n self.make_topic(msg, new_topic=msg.args[0])\n self.bot.log.info(\"Topic changed by \" + msg.user)", "def generate_node_topic(self, node, topic, type='commands'):\n return '/nodes/%s/%s/%s' % (node.node_id, type, topic)", "def broker_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"broker_name\")", "def broker_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"broker_name\")", "def parent(self):\r\n return self.topic", "def subject(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subject\")", "def create_key_name(callback, topic):\n\t\treturn utils.get_hash_key_name(u'%s\\n%s' % (callback, topic))", "def getSubject(self):\r\n return self.msg[\"Subject\"]", "def get_publish_link(self):\n return self.get_link(PUBLISH_LINK_REL)", "def kafka_topic(self, kafka_topic: str):\n \n self._kafka_topic = kafka_topic" ]
[ "0.7873912", "0.77815044", "0.7672942", "0.7589836", "0.7427634", "0.73527247", "0.7141051", "0.7079695", "0.70687497", "0.70312816", "0.7003432", "0.6849306", "0.6837315", "0.6716", "0.66638047", "0.6615674", "0.65900433", "0.6494008", "0.6449045", "0.6389532", "0.6370327", "0.62724817", "0.6259532", "0.62492216", "0.6246504", "0.6154337", "0.6149471", "0.6116299", "0.6116299", "0.6116299", "0.6073897", "0.6057286", "0.60533625", "0.60525167", "0.6051903", "0.6051903", "0.60326946", "0.6011447", "0.59931505", "0.5986942", "0.59854", "0.5980873", "0.5970363", "0.59384674", "0.5927605", "0.5896577", "0.58940744", "0.5885851", "0.58725667", "0.5866937", "0.5835305", "0.5832149", "0.5826626", "0.58143127", "0.5811834", "0.57697505", "0.5759601", "0.5750718", "0.57457936", "0.57197964", "0.57132816", "0.5701456", "0.56873566", "0.56855696", "0.56665814", "0.5665969", "0.5665969", "0.56627274", "0.56398463", "0.5638578", "0.5634937", "0.561887", "0.56033814", "0.5601772", "0.5586128", "0.5580123", "0.5578936", "0.5577778", "0.55493057", "0.55493057", "0.5546531", "0.55167437", "0.5514569", "0.5497407", "0.54935884", "0.5471444", "0.54707825", "0.54549557", "0.54463667", "0.54463667", "0.54436123", "0.5432298", "0.54269254", "0.54269254", "0.5414371", "0.5398693", "0.5392046", "0.5383878", "0.5382191", "0.5372978" ]
0.8160925
0
Publish a ROS message
def publish(self, message): logger.info("Publishing to topic [{0}]: {1}".format(self._topic_name, message)) self._executor.send(json.dumps({ 'op': 'publish', 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id), 'topic': self._topic_name, 'msg': message }))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, message: str) -> None:", "def publish(self, data=None):\n rospy.loginfo(\"Message published on topic %s\", self.topic)", "async def publish(self, body, routing_key=None):\n pass # pragma: no cover", "def _publish(self, topic_name, message):\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)", "def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)", "def publish(self, message: model.MQTTMessage):\n self.client.publish(message.topic, payload=message.get_payload())", "def command(self, msg):\n self.cmd_pub.publish(msg)", "def publish(self, data):\n # [START pubsub_quickstart_publisher]\n # [START pubsub_publish]\n # Data must be a bytestring\n logger.info(\"publishing message %s\" % data)\n data = data.encode('utf-8')\n self.publisher.publish(self.topic_path, data=data)\n\n logger.info('Published messages: {}'.format(data))\n # [END pubsub_quickstart_publisher]\n # [END pubsub_publish]", "def on_publish(client, userdata, mid):\n print(\"Message Published.\")", "def publish(self, topic_name, ros_message):\n # First check if we already advertised the topic\n d = self._advertise_dict\n for k in d:\n if d[k]['topic_name'] == topic_name:\n # Already advertised, do nothing\n break\n else:\n # Not advertised, so we advertise\n topic_type = ros_message._type\n self._advertise(topic_name, topic_type)\n # Converting ROS message to a dictionary thru YAML\n ros_message_as_dict = yaml.load(ros_message.__str__())\n # Publishing\n self._publish(topic_name, ros_message_as_dict)", "def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)", "def publish(self, message: None):\n response = self.client.publish(TopicArn=self.params['topic_arn'], Message=message)\n return response", "def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)", "def publish(node, payload, settings):\n entry = dict2node(payload)\n iq = build_iq(node, entry, settings)\n send_message(iq, settings)", "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def publishEvent(eventName,publisher, msg):", "def publish():\n pass", "def publish_to_simulation(self, topic, message, **kwargs):\n pass", "def publish_messages(message):\n\n publisher = pubsub_v1.PublisherClient()\n topic_path = publisher.topic_path(PROJECT, TOPIC)\n\n message = message.encode('utf-8')\n publisher.publish(topic_path, data=message)\n\n print('Message published\\n')", "def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):\n data = {} if data is None else data\n metadata = {} if metadata is None else metadata\n content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))\n msg = self.kernel_client.session.msg(msg_type, content=content, parent=self.parent_header, metadata=metadata)\n self.kernel_client.shell_channel.send(msg)", "def publish(self, message):\n pika_message = message.to_pika_message()\n self._channel.basic_publish(exchange='',\n routing_key=self.name,\n properties=pika_message.properties,\n body=message.body)", "def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )", "def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):\n data = {} if data is None else data\n metadata = {} if metadata is None else metadata\n content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))\n self.kernel.session.send(\n self.kernel.iopub_socket,\n msg_type,\n content,\n metadata=json_clean(metadata),\n parent=self.kernel._parent_header,\n ident=self.topic,\n buffers=buffers,\n )", "def publish(self, message: str, message_id: int) -> None:\n payload: str = self._create_payload(message, message_id)\n max_payload_bytes = 268435455\n if size(payload) > max_payload_bytes:\n msg = Message.status_message('Message too large.')\n self.client.queue.put(msg)\n return\n return_value: mqtt.MQTTMessageInfo = self.client.publish(self.client.topic, payload, qos=2)\n if return_value.rc == 0: # Publication successful\n return\n else:\n raise SubscriptionError(f'MQTTMessageInfo error code: {return_value.rc}')", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def rosbridge_outgoing(self,message):\n\n message = json.loads(message)\n\n op=message.get(\"op\")\n if op == \"publish\":\n topic_name = message[\"topic\"]\n message[\"msg\"] = self.compress(topic_name,message.get(\"msg\"))\n message = self.serialize(topic_name,message)\n elif op == \"call_service\":\n message[\"service\"] = self.inv_remap_service(message[\"service\"])\n\n \n if isinstance(message, bytes):\n self.outgoing(message,isBinary=True,identifier=topic_name)\n elif isinstance(message,str):\n self.outgoing(message)\n else:\n message = json.dumps(message)\n self.outgoing(message)", "def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.", "def publish(self, message: Union[SubmissionMessage, CommentMessage]) -> int:\n self.publisher.publish(self.topic, message.serialize().encode(\"utf-8\")).result()", "async def publish(self, body, routing_key=None):\n properties = pika.BasicProperties(\n app_id='example-publisher',\n content_type='application/json'\n )\n self.log.debug(\"Publish to %s:%s\", self.exchange,\n routing_key or self.routing_key)\n channel = await self._backend.channel('publish')\n try:\n channel.basic_publish(\n self.exchange,\n routing_key or self.routing_key or '',\n # pylint: disable=c-extension-no-member\n ujson.dumps(body, ensure_ascii=False),\n properties)\n except pika.exceptions.ChannelClosed: # pragma: no cover\n self.log.error(\n 'Message not delivered (%s): %s',\n routing_key, body\n )", "def publish(self, event):\n self.pubsub_router.send(event)", "def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )", "def sendMessage(self):\n #print('sendMessage\\r')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "async def publish(self, message):\n try:\n self.write('data: {}\\n\\n'.format(message))\n await self.flush()\n except StreamClosedError:\n self.finished = True", "def sendMessage(self):\n print(\"sendMessage\")\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def sendMessage(self):\n print('sendMessage')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "async def publish(self, msgDict):\n try:\n msgId = msgDict.get(\"id\", None)\n msgType = msgDict.get(\"type\", None)\n msgRetry = msgDict.get(\"retry\", None)\n if msgId:\n self.write('id: {}\\n'.format(msgId))\n if msgType:\n self.write('event: {}\\n'.format(msgType))\n if msgRetry:\n self.write('retry: {}\\n'.format(msgRetry))\n self.write('data: {}\\n\\n'.format(msgDict[\"data\"]))\n await self.flush()\n return True\n except StreamClosedError:\n return False", "def publish(self, message, routing_key, mandatory=True):\n\n log.debug(\"Publishing message via exchange %s: %r\", self, message)\n if self.internal:\n # Caught on the client side to prevent channel closure\n raise ValueError(\"cannot publish to internal exchange: '%s'!\" % self.name)\n\n raise gen.Return((yield self.__publish_method(\n self.name,\n routing_key,\n message.body,\n properties=message.properties,\n mandatory=mandatory)))", "def publish(self, topic, payload):\n complete_topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(complete_topic, payload, qos=2)\n logger.info(\"On topic %s published: %s\", complete_topic, payload)", "def publish(self, channel: str, message):\n raise TypeError(\"{} - publish not implemented!\")", "def publish(self, topic, value):\n msg = self.topics[topic]['msg']\n msg.data = value\n self.topics[topic]['publisher'].publish(msg)\n print(\"published \\t{} \\t{}\".format(topic, value))", "def publish(host, mqtt_port, rest_port, topic, payload):\n\n click.echo(\"Publishing the following message: \" + payload + \".\")\n service = Service(host, mqtt_port, rest_port)\n if service.mqtt_client.is_connected:\n if service.publish(topic, payload):\n click.secho(\"Message successfully published on topic: \" + topic +\n \".\", fg=\"green\", bold=True)\n else:\n click.secho(\"There was an error publishing this message!\",\n fg=\"red\", bold=True)\n else:\n click.secho(\"The client was unable to connect to the mqtt broker!\",\n fg=\"red\", bold=True)", "def publish(self, publisher):\n publisher._send(self.payload.event, self.info, *self.payload.args,\n **self.payload.kwargs)", "def publish(self, topic:str, data:bytes) -> None:\n\t\tself.mqttClient.publish(topic, data)", "def publish(self, message_body, routing_key, exchange=None):\n\n publish_exchange = exchange or self.producer.exchange\n\n self.producer.publish(\n body=message_body,\n exchange=publish_exchange,\n routing_key=routing_key,\n retry=settings.PUBLISH_RETRY,\n retry_policy={\n # First retry immediately,\n 'interval_start': settings.PUBLISH_RETRY_INTERVAL_START,\n # then increase by 2s for every retry.\n 'interval_step': settings.PUBLISH_RETRY_INTERVAL_STEP,\n # but don't exceed 30s between retries.\n 'interval_max': settings.PUBLISH_RETRY_INTERVAL_MAX,\n # give up after 30 tries.\n 'max_retries': settings.PUBLISH_RETRY_MAX_RETRIES,\n # callback for logging\n 'errback': self.on_publish_error,\n 'on_revive': self.on_connection_revival\n },\n # declare exchange and queue and bind them\n declare=list(self.queues.values())) # queues is a dict.\n log.info(f'Published '\n f'message: {self.producer.exchange.name}::{routing_key}')\n log.debug(f'Published '\n f'message_body: {message_body}')", "def sendNotification(self, message):\n if self.topicArn is None:\n print 'ERROR: Notification topic not set!'\n return\n\n publishResponse = self.snsClient.publish(\n TopicArn=self.topicArn,\n Message=message\n )", "def on_publish(mqttc, obj, mid):\n logger.debug(\"MQTT PUBLISH: mid: \" + str(mid))", "def publish(self, data, properties):\n logger.debug(\"Publisher: Sending a message to MQ...\")\n rqueue = Queue(\n properties['reply_to'],\n Exchange(\n properties[\"replyToExchange\"], 'direct',\n durable=True, no_declare=self.no_declare),\n routing_key=properties['reply_to'],\n no_declare=self.no_declare\n )\n if properties.get(\"encode\", True):\n rsp_body = (base64.b64encode(data.encode('utf-8'))).decode()\n else:\n rsp_body = (base64.b64encode(data)).decode() # raw data\n rsp_msg = {\n 'id': properties.get('id', None),\n 'headers': {\n 'Content-Type': properties.get(\n \"Content-Type\", \"application/*+json;version=31.0\" # default\n ),\n 'Content-Length': len(data)\n },\n 'statusCode': properties.get(\"statusCode\", 200),\n 'body': rsp_body\n }\n try:\n self.connection.Producer().publish(\n rsp_msg,\n correlation_id=properties['correlation_id'],\n routing_key=rqueue.routing_key,\n exchange=rqueue.exchange,\n retry = True,\n expiration = 10000\n )\n logger.info(\"Publisher: Response sent to MQ\")\n except ConnectionResetError:\n logger.error(\"Publisher: ConnectionResetError: message may be not sent...\")", "def send_notification(data):\n red = Redis(dd.REDIS_HOST, int(dd.REDIS_PORT))\n red.publish(\"all\", ['publish', data])", "def send_notification(data):\n red = Redis(dd.REDIS_HOST, int(dd.REDIS_PORT))\n red.publish(\"all\", ['publish', data])", "def publish(self, name, data, timeout=None):\n\n message = Message(name, data)\n\n if self.encrypted:\n message.encrypt(self.__cipher)\n\n if self.ably.options.use_text_protocol:\n request_body = message.as_json()\n else:\n request_body = message.as_thrift()\n\n path = '/channels/%s/publish' % self.__name\n headers = HttpUtils.default_post_headers(not self.ably.options.use_text_protocol)\n return self.ably.http.post(\n path,\n headers=headers,\n body=request_body,\n timeout=timeout\n ).json()", "def publish_messages(line): \n command = \"gcloud beta pubsub topics publish \"+ topic_name+\" --message \"+'\"'+str(line)+'\"'\n os.system(command)", "def kafka_publish_message(self, message):\n self.kf_sender = self.kf_producer.send(self.kf_topic, value=message.encode('utf-8'));", "def publish(self, payload, **kwargs):\n if self.opts.get(\"ipc_mode\", \"\") == \"tcp\":\n pull_uri = int(self.opts.get(\"tcp_master_publish_pull\", 4514))\n else:\n pull_uri = os.path.join(self.opts[\"sock_dir\"], \"publish_pull.ipc\")\n if not self.pub_sock:\n self.pub_sock = salt.utils.asynchronous.SyncWrapper(\n salt.transport.ipc.IPCMessageClient,\n (pull_uri,),\n loop_kwarg=\"io_loop\",\n )\n self.pub_sock.connect()\n self.pub_sock.send(payload)", "def publish(self, message: str) -> None:\n if __debug__:\n logger.warning(\n \"WARN: Unnecessary call on publish on FileDistroStream\"\n )", "def publish_message(producer_instance, topic_name, key, value):\n key_serializer = repr(key).encode()\n value_serializer = repr(value).encode()\n\n producer_instance.send(topic_name, key=key_serializer, value=value_serializer)\n producer_instance.flush()\n print('Message published successfully.')", "def publish(self, topic, content):\n # check if ACKed\n if not self.connack_rec:\n return 1\n\n # compose frame\n frame = Message.PublishFrame().compose(topic, content)\n\n # send frame\n self.send_q.put(frame.encode())", "def mqtt_publish(image):\n logging.debug('publishing image to mqtt broker topic %s', \n config['mqtt']['publish_topic'])\n mqtt_client.publish(config['mqtt']['publish_topic'], image)", "def publish_sensor_message(self):\n try:\n sensor_msg = EvoSensorMsg()\n if self.lidar_configuration[constants.LidarConfigurationKeys.USE_LIDAR]:\n self.lidar_buffer.get()\n self.camera_buffer.get()\n if self.lidar_configuration[constants.LidarConfigurationKeys.USE_LIDAR]:\n try:\n lidar_data = self.lidar_buffer.get_nowait()\n except Exception as ex:\n self.get_logger().error(f\"Error while reading lidar data: {ex}\")\n try:\n camera_images = self.camera_buffer.get_nowait()\n except Exception as ex:\n self.get_logger().error(f\"Error while reading camera data: {ex}\")\n\n sensor_msg.images = camera_images\n if self.lidar_configuration[constants.LidarConfigurationKeys.USE_LIDAR]:\n sensor_msg.lidar_data = lidar_data\n else:\n sensor_msg.lidar_data = \\\n [self.max_lidar_dist] * \\\n self.lidar_configuration[constants.LidarConfigurationKeys.NUM_LIDAR_VALUES]\n self.sensor_message_publisher.publish(sensor_msg)\n except Exception as ex:\n self.get_logger().error(f\"Error in publishing sensor message: {ex}\")", "def send_notification (event):\n Publisher.sendMessage (event)", "def publish(self, message, topic=''):\n if type(message) != types.ListType:\n message = [message]\n if topic:\n message = [topic] + message\n self.send(message)", "def on_publish(client: mqtt.Client, userdata: Any, mid: int) -> None:\n logging.info(f\"Successfully published a message: mid={mid}\")", "def publish(self):\n return", "def on_pubmsg(self, raw_msg, source, msg, **kwargs):", "def publish_mqtt(self, topic, data={}, on_publish=None, on_response=None, inject_rid=True):\n payload = data\n\n # If this is a dict and we're allowed to inject a request ID, do so\n # Injecting a request ID allows the nodes to respond and us to execute callbacks\n if (type(data) is dict) and inject_rid:\n data['rid'] = str(shortuuid.uuid())\n\n # JSON encode dicts, lists and stuff\n if type(data) in [dict, list, tuple]:\n payload = json.dumps(data)\n\n result, mid = self.mqtt.publish(topic, payload, qos=1)\n\n if on_publish:\n self.publish_callbacks[mid] = on_publish\n\n if on_response and data and data.get('rid', None):\n self.response_callbacks[data['rid']] = on_response\n\n self.publishes.append(mid)\n\n while mid in self.publishes:\n self.wait()", "def on_publish(client, userdata, mid):\n print('on_publish')\n print(\" userdata:\" + str(userdata))\n print(\" mid:\" + str(mid))\n print()", "def publish(self):\n msg_imu1, msg_mag1, msg_imu2, msg_mag2, msg_imu, msg_mag= self._create_msg()\n self.pub_imu.publish(msg_imu)\n self.pub_mag.publish(msg_mag)\n #------Uncomment to publish IMUs data separately------", "def publish_message(self):\n\n message_count = 0\n while message_count < self._messages:\n message_count += 1\n message_body = \"task number %i\" %(message_count)\n self._channel.basic_publish(exchange='',\n routing_key=self._queue_name,\n body=message_body,\n properties=pika.BasicProperties(\n delivery_mode=2 # make message persistant\n ))\n print(\"Published message %i\" %(message_count))\n time.sleep(self._message_interval)", "def produce(self, message):\n self.producer.send(self.topic, message)", "def create_publisher():\n pub = rospy.Publisher(\"/number\", Int64, queue_size=10)\n rospy.set_param(\"/number_publish_frequency\", 2)\n pub_freq = rospy.get_param(\"/number_publish_frequency\")\n rate = rospy.Rate(pub_freq)\n \n rospy.set_param(\"/number_to_publish\",3)\n number = rospy.get_param(\"/number_to_publish\")\n rospy.set_param(\"/try_param\", \"what's up\")\n \n while not rospy.is_shutdown():\n msg = Int64()\n msg.data = number\n pub.publish(msg)\n rate.sleep()", "def publish(self, name, **params):\n if self.__streaming:\n raise ImplementationError(\"Cannot publish event during stream.\")\n with self.stream() as publish:\n event = DTO(\n urn=\"%s:%s\" % (self.namespace, name.lower()),\n name=name,\n params=ImmutableDTO(params).as_dto(),\n version=self.__version,\n timestamp=sq.timezone.now()\n )\n publish(event)", "def pub(payload):\n print(payload)\n sys.stdout.flush()\n\n corr_id = pub.send_request(payload)\n r.lpushx(\"payload\", payload)\n\n\n while pub.queue[corr_id] is None:\n time.sleep(0.1)\n\n return pub.queue[corr_id]", "def on_reply(self, msg: str):\n self._logger.debug(f\"Got msg: {msg}\")\n self._rabbit_channel.basic_publish(exchange='', routing_key=QueueName.MSG_REPLY, body=str(msg))", "def publish_status(client):\n client.publish(config.topic_get, payload=getlight())", "def send_message(self, text):\n self.redis.publish('message_to_user', json.dumps((self.operator_token, text)))", "def on_publish(unused_client, unused_userdata, unused_mid):\n\tprint('on_publish')", "def publish(\n hass: HomeAssistant,\n topic: str,\n payload: PublishPayloadType,\n qos: int | None = 0,\n retain: bool | None = False,\n encoding: str | None = DEFAULT_ENCODING,\n) -> None:\n hass.add_job(async_publish, hass, topic, payload, qos, retain, encoding)", "def publish_messages(project_id, topic_id, message):\n # [START pubsub_quickstart_publisher]\n # [START pubsub_publish]\n from google.cloud import pubsub_v1\n\n # TODO(developer)\n #project_id = \"covid-canada-dashboard\"\n #topic_id = \"test_topic\"\n\n publisher = pubsub_v1.PublisherClient()\n # The `topic_path` method creates a fully qualified identifier\n # in the form `projects/{project_id}/topics/{topic_id}`\n topic_path = publisher.topic_path(project_id, topic_id)\n print(topic_path)\n\n #data = f\"Message number {n}\"\n #data = \"THIS WORKS! Awesome job Braveenth!\"\n data = message\n # Data must be a bytestring\n data = data.encode(\"utf-8\")\n # When you publish a message, the client returns a future.\n future = publisher.publish(topic_path, data)\n print(future.result())\n\n print(f\"Published messages to {topic_path}.\")\n # [END pubsub_quickstart_publisher]\n # [END pubsub_publish]", "def publish():\n pub = rospy.Publisher('/turtle/quality', Quality, queue_size=10)\n rospy.init_node('turtle_publisher')\n rate = rospy.Rate(5) # 5hz\n i = 0\n while not rospy.is_shutdown():\n msg = Quality()\n i += 1\n\tmsg.index = i\n\tmsg.value = random.randint(1,10)\n pub.publish(msg)\n rate.sleep()", "def publish(self, path, document, content_type='text/plain'):\n path = ensure_slash(path)\n self.send_request('__pub', document, {\n 'path': path,\n 'type': content_type\n })\n return self.uri(path)", "def __answer(self, msg: str):\n self.channel.basic_publish(\n exchange='main', routing_key='answer', body=msg)", "def _publish(self, data):\n json_data = json.dumps(data)\n self._udp_socket.sendto(json_data, (self._hsflowd_addr, self._hsflowd_port))", "def send(self):\n if self._stopping:\n return\n\n mytype = 'text/plain'\n\n try:\n if isinstance(json.loads(self.message),dict):\n mytype = 'application/json'\n except (TypeError,json.JSONDecodeError):\n if (isinstance(self.message,dict)):\n mytype = 'application/json'\n self.message = json.dumps(self.message)\n else:\n self.message = str(self.message)\n\n properties = pika.BasicProperties(app_id='sender',\n content_type=mytype)\n\n self._channel.basic_publish(self.exchange, self.routing_key, self.message, properties)\n self._message_number += 1\n self._deliveries.append(self._message_number)\n self.logger.info('published message # %i', self._message_number)", "def send_message(msg, exchange, key=None):\n print(msg)\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq'))\n channel = connection.channel()\n exchange_type = 'direct' if exchange == 'other' else 'topic'\n channel.exchange_declare(exchange=exchange, exchange_type=exchange_type)\n if key is not None and exchange == 'logs':\n routing_key = f'scheduler.{key}'\n else:\n routing_key = ''\n channel.basic_publish(exchange=exchange, routing_key=routing_key, body=msg)\n connection.close()", "def do_pocs_message(self, cmd):\n try:\n self.msg_publisher.send_message('POCS-SHELL', cmd)\n except AttributeError:\n print_info('Messaging not started')", "def publish(self, topic, message, subject=None):\r\n params = {'ContentType' : 'JSON',\r\n 'TopicArn' : topic,\r\n 'Message' : message}\r\n if subject:\r\n params['Subject'] = subject\r\n response = self.make_request('Publish', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "def writer():\n \n # initiate a publisher node called \"writer_node\"\n # anonymous=True ensure every node is unique by adding random to end of a name \n rospy.init_node('writer_node', anonymous=True)\n\n # ensures the publisher publish to topic \"/print_topic\" with message type \"String\"\n # queue_size is the limit to the number of messages in queued messages \n pub = rospy.Publisher('/print_topic', String, queue_size=10)\n\n # looping at a desired rate, the number of times per second to go through the while loop\n rate = rospy.Rate(1)\n \n # ensure the rospy is running until the program is shutdown \n while not rospy.is_shutdown():\n \n writer_str = \"Hello world, this is the message published %s\" % rospy.get_time()\n \n\t# Print message to screen, write message to node log file\n\t# write message to rosout\n rospy.loginfo(writer_str)\n\n\t# publish \"writer_str\" to topic \"/print_topic\"\n pub.publish(writer_str)\n\n # ensures the loop sleep for some seconds just like time.sleep \n rate.sleep()", "def publish_receive(message):\n topic, content = message # \"femag_log\" + text\n # topics: femag_log, progress, file_modified,\n # model_image, calc_image, field_image, babs_image, demag_image, color_scale\n if topic == 'femag_log' or topic == 'progress':\n logger.info(\"%s: %s\", topic, content.strip())\n else:\n logger.info('%s: len %d', topic, len(content.strip()))", "def publish(self, kpi_dict):\n pass", "def publish_to_bus(\n context: Any, payload: dict, extra_attributes: dict = {}, subject: Union[str, None] = None\n) -> int:\n message_bus_arn = os.environ['MESSAGE_BUS_ARN']\n return publish(message_bus_arn, context, payload, extra_attributes, subject)", "async def publish(self, topic: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n acknowledge: bool = None,\n blackwhitelist: aiowamp.BlackWhiteList = None,\n exclude_me: bool = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> None:\n ...", "def publish_messages(self, project_id, topic_name, orderNumber):\r\n\r\n publisher = pubsub_v1.PublisherClient()\r\n topic_path = publisher.topic_path(project_id, topic_name)\r\n\r\n data = u'Message: {}'.format('test')\r\n # Data must be a bytestring\r\n data = data.encode('utf-8')\r\n # Add attribute to the message\r\n future = publisher.publish(\r\n topic_path, data, OrderNumber=orderNumber)\r\n print(future.result())\r\n\r\n print('Published messages with custom attributes.')", "def publish(self, channel: str, content: str) -> None:\n print(f\"{self._name} publishes message '{content}' to \"\n f\"channel-[{channel}]\")\n self._server.route(channel, content)", "def publishCmd(self, cmd):\n cmd_to_publish = Twist()\n cmd_to_publish.linear.x = cmd[0]\n cmd_to_publish.angular.z = cmd[1]\n self.cmd_pub.publish(cmd_to_publish)", "def test_publish1(self):\n publish = self.queue.publish(TEST_QUEUE, 'this is a test msg')\n assert publish", "def publish_message(message: str, broker_ip: str, exchange_name: str, exchange_type: str):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=broker_ip))\n channel = connection.channel()\n channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type, durable=True)\n channel.basic_publish(exchange=exchange_name, routing_key='', body=message)\n print(f'Published {message} to the exchange')\n connection.close()", "def publish(self):\n data = self.read_all_values()\n logger.info(data)\n if self.mqtt:\n self.mqtt.publish_json(data)", "def publish(self, id: uplink.Path):\n pass", "def trace_callback(msg):\n # Construct topic\n msg_topic = 'modbus/msg/trace/{}/{}/{}'.format(node_id, msg.address, msg.function)\n # Send message as JSON\n logging.debug('Publishing message on {}, address={}, function={}'.format(msg_topic, msg.address, msg.function))\n client.publish(topic = msg_topic, payload = msg.to_JSON())", "def encode(rosMsg): #@NoSelf" ]
[ "0.79041046", "0.7305027", "0.7107372", "0.7052109", "0.7042832", "0.70376146", "0.69866174", "0.69642437", "0.69358927", "0.69347024", "0.6902832", "0.68914086", "0.6865876", "0.68259144", "0.67583984", "0.6745768", "0.674358", "0.67271394", "0.6675353", "0.6673596", "0.6660116", "0.66282445", "0.6610533", "0.6571911", "0.6560032", "0.6560032", "0.6549331", "0.6489394", "0.6461715", "0.6433216", "0.6421332", "0.6413372", "0.64122546", "0.63807577", "0.6349249", "0.6337618", "0.6319858", "0.62945515", "0.6270091", "0.62679297", "0.62552524", "0.62482345", "0.62300354", "0.61649436", "0.61573535", "0.6139316", "0.6127186", "0.61271137", "0.61224014", "0.61224014", "0.61127937", "0.6111858", "0.61056864", "0.6087809", "0.60877", "0.6070043", "0.6063076", "0.60607207", "0.6054646", "0.6042055", "0.6023209", "0.5995852", "0.59836966", "0.5969316", "0.59677297", "0.5951271", "0.5948842", "0.59343535", "0.5925827", "0.5920326", "0.59174323", "0.59040475", "0.5893906", "0.5891163", "0.5886309", "0.5884667", "0.58845013", "0.58837366", "0.5869236", "0.58468556", "0.5840976", "0.58284175", "0.582526", "0.5824215", "0.5811826", "0.5803206", "0.5801706", "0.57928824", "0.5790006", "0.57842374", "0.57824624", "0.5765357", "0.57574594", "0.5750492", "0.57439506", "0.5741969", "0.57365865", "0.57193834", "0.5719277", "0.5718109" ]
0.7000611
6
Reduce the usage of the publisher. If the usage is 0, unadvertise this topic.
def unregister(self): self._executor.unregister_publisher(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_pub(pub):\n # counting publisher instance per topic name\n TopicBack.pub_instance_count[pub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return pub.unregister()", "def suppress_topics ( *topics ) :\n if topics and 1 == len( topics ) :\n t = str ( topics [ 0 ] ).lower()\n if 'config' == t : return suppress_topics() \n\n if not topics :\n newtopics = [] \n import ostap.core.config as CONFIG\n if 'RooFit' in CONFIG.config :\n import string\n ws = string.whitespace \n node = CONFIG.config [ 'RooFit' ]\n data = node.get('RemoveTopics','(,)' )\n topics = tuple ( i.strip ( ws ) for i in data.split ( ',' ) if i.strip ( ws ) ) \n \n if topics : \n svc = ROOT.RooMsgService.instance()\n svc.saveState () \n topic = msg_topic ( *topics ) \n num = svc.numStreams()\n for i in range ( num ) : ok = Ostap.Utils.remove_topic ( i , topic )", "def publisher():\r\n pub = rospy.Publisher('tibbling', Int16, queue_size=1)\r\n rospy.init_node('publisher', anonymous = True)\r\n rate = rospy.Rate(20) #Sets rate at 20 Hz\r\n k = 0 #Number to be sent\r\n n = 4 #The increment for k with each loop\r\n\r\n while not rospy.is_shutdown():\r\n k += n #After 20 ms, send k + n to topic\r\n rospy.loginfo(k) #Used for troubleshooting\r\n pub.publish(k)\r\n rate.sleep()", "def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)", "def unregister_publisher(self, hostname):", "def _remove_sub(sub):\n # counting publisher instance per topic name\n TopicBack.sub_instance_count[sub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return sub.unregister()", "def set_qos(self, on_ok):\n self._channel.basic_qos(\n prefetch_count=self._prefetch_count, callback=on_ok)", "def _update_usage(self, usage, write_weights):\n with torch.no_grad():\n write_weights = 1 - torch.prod(1 - write_weights, 1)\n updated_usage = usage + (1 - usage) * write_weights\n return updated_usage", "def reset(self):\n self._topics.clear()", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def purge(self):\n self.remaining = 0", "def cleanup(self):\n # Removing the ROS system wide advert about which topic are interfaced with this process\n # TODO : lock this for concurrent access\n if_topics = rospy.get_param('~' + TopicBack.IF_TOPIC_PARAM, [])\n if_topics.remove(self.fullname)\n rospy.set_param('~' + TopicBack.IF_TOPIC_PARAM, if_topics)\n\n # cleanup pub and sub, so we can go through another create / remove cycle properly\n self._remove_pub(self.pub)\n self._remove_sub(self.sub)", "def unpublishService(self, name):\n self.published[name].Reset()\n del self.published[name]", "def suppress(self, t, w=None):\n return super(SmartCentroidPublisher, self).suppress(t, w)", "def onUnsubscribed(self, connection:MQTTConnection, topic:str) -> bool:\n\t\tconnection.subscribedCount -= 1\n\t\treturn True", "async def throttle_pubs(self):\n while True:\n ch, msg = await self._pub_throttle.get() # Blocks until we get an item\n resp = await self._publish_to_channel(ch, msg)\n print(resp)\n print(\"[ *] Published to channel {ch} message: \\n{msg}\\n\".format(ch=ch, msg=msg))\n if resp:\n await asyncio.sleep(self.pub_rate)", "def remove_topic ( topics , level = ROOT.RooFit.INFO , stream = -1 ) :\n return Ostap.Utils.RemoveTopic ( topics , level , stream )", "def _registerPublisher(self, callerId, topic, topicType, callerApi):\n if topic not in self.FilterPublishedTopic:\n self.__docWriter.addPub(callerId, topic, topicType)", "async def publish(self, topic: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n acknowledge: bool = None,\n blackwhitelist: aiowamp.BlackWhiteList = None,\n exclude_me: bool = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> None:\n ...", "def unsubscribe_values(self, req):\n \n rospy.loginfo(\"Unsubscribing values for \" + str(req.component) +\" \" + str(req.field))\n \n resp = UnsubscribeValuesResponse()\n resp.success = False\n \n if (req.component, req.field, req.datatype) in self.publishers.keys():\n rospy.loginfo(\"Removing publisher thread for \" + str((req.component, req.field)) + \"...\")\n t = self.publishers[(req.component, req.field, req.datatype)]\n if t.running:\n t.stop()\n \n timeout = 0\n while t.running and timeout <=5:\n time.sleep(1) #waiting\n timeout += 1\n if not t.running:\n t.join()\n with self.lock: \n del self.publishers[req.component, req.field, req.datatype]\n resp.success = True\n self.set_max_rate()\n rospy.loginfo(\"..done!\")\n else:\n rospy.logerr(\"Something went wrong, publisher not removed\")\n else:\n rospy.loginfo(\"publisher does not exist, nothing to delete...\")\n return resp", "def _create_pub(name, rostype, *args, **kwargs):\n # counting publisher instance per topic name\n if name in TopicBack.pub_instance_count.keys():\n TopicBack.pub_instance_count[name] += 1\n else:\n TopicBack.pub_instance_count[name] = 1\n\n return rospy.Publisher(name, rostype, *args, **kwargs)", "def suppression_article_ref(self, reference) :\n if self.get_Ref == reference:\n del self\n print(\" article supprimer\")", "def get_document_distribution_over_topic(self):\n return self.document_distribution_over_topic", "def maybe_notify_lessee(request, response):\n if request.get('pubsub_topic'):\n pubsub.publish(\n pubsub.full_topic_name(\n request['pubsub_project'], request['pubsub_topic']),\n json.dumps(response),\n {},\n )\n metrics.pubsub_messages_sent.increment(fields={'target': 'lessee'})", "def create_publisher():\n pub = rospy.Publisher(\"/number\", Int64, queue_size=10)\n rospy.set_param(\"/number_publish_frequency\", 2)\n pub_freq = rospy.get_param(\"/number_publish_frequency\")\n rate = rospy.Rate(pub_freq)\n \n rospy.set_param(\"/number_to_publish\",3)\n number = rospy.get_param(\"/number_to_publish\")\n rospy.set_param(\"/try_param\", \"what's up\")\n \n while not rospy.is_shutdown():\n msg = Int64()\n msg.data = number\n pub.publish(msg)\n rate.sleep()", "def _advertise(self, topic_name, topic_type):\n new_uuid = str(uuid4())\n self._advertise_dict[new_uuid] = {'topic_name': topic_name,\n 'topic_type': topic_type}\n advertise_msg = {\"op\": \"advertise\",\n \"id\": new_uuid,\n \"topic\": topic_name,\n \"type\": topic_type\n }\n self.ws.send(json.dumps(advertise_msg))\n return new_uuid", "def test_normal_pub_sub(self):\n # Create bus and foo topic\n bus = pubsub.PubSubBus()\n bus.create_topic('foo')\n\n # Create two initial subscribers not subscribed to any topics yet\n exp_data = {'payload': 'test'}\n foo_sub = SingleSubscriber(self, bus, 'foo', exp_data)\n bar_sub = SingleSubscriber(self, bus, 'bar', 5)\n self.assertEquals(foo_sub.num_cb, 0)\n self.assertEquals(bar_sub.num_cb, 0)\n\n # Publish on topic foo and ensure that no subscribers see it (since\n # nothing is subscribed yet)\n bus.publish('foo', exp_data)\n self.assertEquals(foo_sub.num_cb, 0)\n self.assertEquals(bar_sub.num_cb, 0)\n\n # Have both subscribers subscribe to topics foo and bar, respectively\n foo_sub.subscribe_good()\n bar_sub.subscribe_good()\n\n # Publish on topic foo again and ensure that only the foo subscriber\n # saw it\n bus.publish('foo', exp_data)\n self.assertEquals(foo_sub.num_cb, 1)\n self.assertEquals(bar_sub.num_cb, 0)\n\n # Create a new foo subscriber and immediately subscribe\n foo_sub2 = SingleSubscriber(self, bus, 'foo', exp_data)\n foo_sub2.subscribe_good()\n self.assertEquals(foo_sub2.num_cb, 0)\n\n # Publish on topic foo again and make sure that the first foo sub got\n # both messages (this and last) and the second subscriber only received\n # the message just published\n bus.publish('foo', exp_data)\n self.assertEquals(foo_sub.num_cb, 2)\n self.assertEquals(bar_sub.num_cb, 0)\n self.assertEquals(foo_sub2.num_cb, 1)\n\n # Publish on topic bar and make sure that only the bar subscriber\n # received it\n bus.publish('bar', 5)\n self.assertEquals(foo_sub.num_cb, 2)\n self.assertEquals(bar_sub.num_cb, 1)\n self.assertEquals(foo_sub2.num_cb, 1)", "def publish():\n rating = int(ratings[sorted(ratings, reverse=True)[0]])\n global pub_count\n print \"Publishing rating: \", rating\n print \"Pub count\" , pub_count\n if (pub_count == flag_count):\n print \"publish count == flag_count\", pub_count\n data = { 'Rating' : rating*6} # rating for final value\n global flag\n flag = False# to end the program\n else:\n data = { 'Rating' : rating}\n deviceCli.publishEvent(\"Rating\", \"json\", data)\n pub_count += 1", "def _async_unsubscribe(self, topic: str) -> None:\n if self._is_active_subscription(topic):\n if self._max_qos[topic] == 0:\n return\n subs = self._matching_subscriptions(topic)\n self._max_qos[topic] = max(sub.qos for sub in subs)\n # Other subscriptions on topic remaining - don't unsubscribe.\n return\n if topic in self._max_qos:\n del self._max_qos[topic]\n if topic in self._pending_subscriptions:\n # Avoid any pending subscription to be executed\n del self._pending_subscriptions[topic]\n\n self._pending_unsubscribes.add(topic)\n self._unsubscribe_debouncer.async_schedule()", "def leak(self, value):\n\t\t\n\t\t#Charge buffer(bucket)\n\t\tif self.buff + value < self.BUCKET_SIZE:\n\t\t\tself.buff += value\n\t\t\tself.queue_package.put(value)\n\t\t#When buffer is full - \n\t\t#started sending packages under rate via Thread\n\t\telif not self.rate_thread.isAlive():\n\t\t\tself.rate_thread.start()", "def killSubscribers(self):\n if (self.off_TB1_Viewer.isChecked()):\n pass", "def publisher(self, publisher):\n self._publisher = publisher", "def suppresses(self, other_describer):\n return False", "async def create_unconsumed_topics():\n # ################################################ #\n # TODO: remove these once there is someone consuming the topics\n unconsumed_topics = ['dummy']\n\n logger.warning(\n f'Creating topics on the publisher: {unconsumed_topics} due to lack of consumers. '\n 'Remove them once there are consumers'\n )\n for topic in unconsumed_topics:\n await kafka.topic(topic).maybe_declare()\n\n # ################################################ #", "def count_topic_dist(self):\n if len(self.representants) == 0:\n self.log_writer(\"Representants not set. Cannot make topic dist.\")\n return\n for key, value in self.representants.items():\n self.topic_distributions.append(len(value)/len(self.training_docs))\n self.topic_numbers.append(key)", "def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )", "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def on_publish(unused_client, unused_userdata, unused_mid):\n\tprint('on_publish')", "def reduceTTL(self):\n self.TTL -= 1\n return self.TTL <= 0", "def test_topic_reduction(reduced_topics):\n base_bertopic = BERTopic(bert_model='distilbert-base-nli-mean-tokens', verbose=False)\n nr_topics = reduced_topics + 2\n base_bertopic.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents.copy(), topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents.copy(), c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(base_bertopic.mapped_topics, dict)\n assert not set(base_bertopic.get_topics_freq().Topic).difference(set(new_documents.Topic))\n assert base_bertopic.mapped_topics", "def fast_publish(self, request):\n self.__connection.fast_publish(request)", "def unregisterProducer():", "def on_next(self, msg):\n # publish the message to the topics\n retain = msg.retain if hasattr(msg, 'retain') else False\n for (topic, qos) in self.topics:\n self.client.publish(topic, msg, qos, retain)", "def on_subscribe(self, client, userdata, mid, granted_qos):\n\t\tprint (\"[{}] Client subscribed to {}\".format(\n\t\t\tint(time.time()),\n\t\t\tself.topic\n\t\t))\n\t\t#the following lines are here and not in on_connect() only for printing purpose\n\t\tif not self.printed_sub:\n\t\t\tself.printed_sub = True\n\t\t\tself.subscribe(\"measure/people\")", "def output_topic_callback(self, msg):\n with self.callback_lock:\n if self._time_received_input != 0:\n # Get actual time from ROS\n time_now = self.node.get_clock().now().nanoseconds\n\n # Compute the amount of time elapsed from receiving the last\n # message in the input topic\n measure = time_now - self._time_received_input\n\n # Transform from nanoseconds to milliseconds\n measure = measure / (1000 * 1000)\n\n publish_msg = Int64()\n publish_msg.data = int(measure)\n\n # Publish the measurement\n self._publisher.publish(publish_msg)\n\n self._time_received_input = 0", "def useWater(self, used):\n self.amount = max(0, self.amount - used)", "def __producer__(self):\n import time\n i = 0\n while True:\n self.publish( i )\n i += 1\n time.sleep(1)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def __init__(self,sub_topic=\"\",pub_topic=\"\",data_type=None,tag=\"\",alt_type=None):\n self.sub_topic=sub_topic;\n self.pub_topic=pub_topic;\n self.data_type=data_type;\n self.alt_type=alt_type;\n self.tag=tag;\n self.subscriber=rospy.Subscriber(self.sub_topic+self.tag,self.data_type, self.callback_function,queue_size=20);\n self.message_publisher=None;", "def publish():\n pass", "def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics", "def wipe_all_topics(self):\n # doc_count = self.posts_read.find({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}}).count()\n doc_count = self.posts_write.update({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}},\n {'$unset':{'postwise.topic_distro':True,'postwise.topic_assignment':True}}, multi=True)\n\n print 'wiped topics from %i documents' % doc_count['nModified']", "def desubscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = DeSubscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def metrics_publisher(self):\n return self._metrics_publisher", "def _get_multicastWeight(self):\n return self.__multicastWeight", "def publish_goal(self):\n goal_publisher = rospy.Publisher(self.goal_distance_topic, Float64, queue_size=10)\n while not rospy.is_shutdown() and (goal_publisher.get_num_connections() == 0):\n rospy.sleep(1)\n msg = Float64()\n msg.data = self.distance\n goal_publisher.publish(msg)", "def __init__(self, topic, address, group):\n super().__init__(\n topic=topic,\n address=address,\n group=group,\n message_type=MessageFlagType.ALL_LINK_CLEANUP,\n )", "def test_normal_sub_pub(self):\n # Create bus\n bus = pubsub.PubSubBus()\n\n # Create two initial subscribers not subscribed to any topics yet\n exp_data = {'payload': 'test'}\n foo_sub = SingleSubscriber(self, bus, 'foo', exp_data)\n bar_sub = SingleSubscriber(self, bus, 'bar', 5)\n self.assertEquals(foo_sub.num_cb, 0)\n self.assertEquals(bar_sub.num_cb, 0)\n\n foo_sub.subscribe_good()\n bar_sub.subscribe_good()\n\n bus.publish('foo', exp_data)\n self.assertEquals(foo_sub.num_cb, 1)\n self.assertEquals(bar_sub.num_cb, 0)", "def publisher(self, publisher):\r\n return publishers.Publisher(self, publisher)", "def purge() -> None:\r\n _purge_func(False)", "def set_publisher (self, publisher):\n self.publisher = publisher", "def publish_and_wait(self, node, topic, data={}):\n pass", "def dpub(self, topic, delay_ms, msg, callback=None):\n return self._pub('dpub', topic, msg, delay_ms, callback=callback)", "def get_topic_distribution_over_term(self):\n return self.topic_distribution_over_term", "def bumperCallback(self,data):\n if data.state == 1 :\n rospy.loginfo(\"Bumper enfonce\") \n self.soundPub.publish(0)", "def truncate(x, y):\n if comm.get().get_world_size() != 3:\n raise NotImplementedError(\n \"RSS truncation is only implemented for world_size == 3.\"\n )\n\n rank = x.rank\n\n if rank == 0:\n x.share = x.share.div(y, rounding_mode=\"trunc\")\n elif rank == 1:\n x2 = comm.get().recv(x.share, 2)\n x.share = x.share.add(x2).div(y, rounding_mode=\"trunc\")\n elif rank == 2:\n comm.get().send(x.share, 1)\n x.share -= x.share\n\n # Add PRZS - this takes the place of r\n x.share += x.PRZS(x.size(), device=x.device).share\n\n return x", "async def qos(\n self,\n prefetch_size: int = 0,\n prefetch_count: int = 0,\n connection_global: bool = False,\n ):\n await self.channel.basic_qos(\n prefetch_size=prefetch_size,\n prefetch_count=prefetch_count,\n connection_global=connection_global,\n )", "def louder(self):\n self._prepare()\n vol = self._eng.getProperty(\"volume\")\n newvol = vol + 0.25\n logging.debug(\"louder %f => %f\" %(vol, newvol))\n self._eng.setProperty(\"volume\", newvol)\n self._eng.runAndWait()\n self.say(\"louder\")", "def delete_topic():\n return dict()", "def total_volume(self):\n del self._total_volume", "def publish():\n while True:\n mqttClient.reconnect()\n\n energy_data = getEnergyUsage()\n wats = float(energy_data['power_mw']) / 1000\n wat_hours = float(energy_data['total_wh'])\n\n sentPayload(name=\"power\", site=\"bathroom\", value=wats)\n sentPayload(name=\"energy_total\", site=\"bathroom\", value=wat_hours)\n\n time.sleep(updateInterval)", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def oversampling(self):\n return self._oversampling", "def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)", "def unregisterEvent(eventName, publisher):", "def _off_received(self):\n self._call_subscribers(on_level=0x00)\n publish_topic(self._off_subscriber_topic, on_level=0x00)", "def _off_received(self):\n self._call_subscribers(on_level=0x00)\n publish_topic(self._off_subscriber_topic, on_level=0x00)", "def send_free_space(self, distance):\n self.client.publish('free_space', str(distance))", "def publisher(self, iTag, msgType, addr):\r\n return ROSPublisher(self, iTag, msgType, addr)", "def deregister_event(self, event_type):\n return len(self._topics.pop(event_type, []))", "def nack(self, visibility_timeout_seconds=None):\n self.consumer.nack(self, visibility_timeout_seconds)", "def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)", "def on_unsubscribe(self, mqtt_client, userdata, mid ):\n logging.debug(\"DEBUG - unsubscribe ack received\")", "def cleartopics(self):\n\n # Clear previous topics, if any\n if self.topics:\n for uid in self.scan():\n self.removeattribute(uid, \"topic\")\n self.removeattribute(uid, \"topicrank\")\n\n if self.categories:\n self.removeattribute(uid, \"category\")\n\n self.topics, self.categories = None, None", "def test_enable_retainunsent_default_age():\n clean_tables()\n set_configuration()\n config = update_configuration(age=72, retain_unsent=True) \n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"True\" \n\n insert_into_reading()\n row_count = get_count() \n min_id, max_id = min_max_id() \n update_last_object(min_id=min_id, max_id=max_id)\n \n total_purged, unsent_purged = purge(config, _READING_TABLE)\n log = get_log() \n\n assert total_purged == 0\n assert total_purged == log['rowsRemoved']\n assert unsent_purged == 0 \n assert unsent_purged == log['unsentRowsRemoved'] \n assert log['failedRemovals'] == 0 \n assert log['rowsRemaining'] == row_count - total_purged \n clean_tables()", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def surge_purge(self):\n\n # Sends all packets!\n for x in self.packet_pool:\n self.accept(x)\n\n pool_len = len(self.packet_pool)\n\n self.print_clear()\n self.print(\"[!] Packets sent: {} - Surge Interval: {:.2f}s\".format(pool_len, self.collection_period), end='\\r')\n\n self.packet_pool = []\n self.start_purge_monitor()", "def depart_topic(self, node):\n\n return BaseTranslator.depart_admonition(self, node)", "def purge_req(req):\n if req.key in best:\n del best[req.key]\n [dependency.remove_from_ws(w, req._chosen_dist)\n for w in (ws, setup_dists) if req._chosen_dist in w]", "def terminate_publisher(self, log_path):\n if log_path in self.publishers:\n self.publishers[log_path].terminate()", "def enable_purging() -> None:\r\n global _purge, _purge_timer\r\n if not purging():\r\n _purge = True\r\n _purge_timer = threading.Timer(5.0, _purge_func)\r\n _purge_timer.start()", "def publish():\n pub = rospy.Publisher('/turtle/quality', Quality, queue_size=10)\n rospy.init_node('turtle_publisher')\n rate = rospy.Rate(5) # 5hz\n i = 0\n while not rospy.is_shutdown():\n msg = Quality()\n i += 1\n\tmsg.index = i\n\tmsg.value = random.randint(1,10)\n pub.publish(msg)\n rate.sleep()", "def _registerSubscriber(self, callerId, topic, topicType, callerApi):\n if topic not in self.FilterSubscribedTopics:\n self.__docWriter.addSub(callerId, topic, topicType)", "def test_producer_dr_only_error():\n p = confluent_kafka.Producer({\"bootstrap.servers\": bootstrap_servers,\n 'broker.address.family': 'v4',\n \"delivery.report.only.error\": True})\n\n class DrOnlyTestErr(object):\n def __init__(self):\n self.remaining = 1\n\n def handle_err(self, err, msg):\n \"\"\" This delivery handler should only get called for errored msgs \"\"\"\n assert \"BAD:\" in msg.value().decode('utf-8')\n assert err is not None\n self.remaining -= 1\n\n class DrOnlyTestSuccess(object):\n def handle_success(self, err, msg):\n \"\"\" This delivery handler should never get called \"\"\"\n # FIXME: Can we verify that it is actually garbage collected?\n assert \"GOOD:\" in msg.value().decode('utf-8')\n assert err is None\n assert False, \"should never come here\"\n\n def __del__(self):\n # Indicate that gc has hit this object.\n global DrOnlyTestSuccess_gced\n DrOnlyTestSuccess_gced = 1\n\n print('only.error: Verifying delivery.report.only.error')\n\n state = DrOnlyTestErr()\n p.produce(topic, \"BAD: This message will make not make it\".encode('utf-8'),\n partition=99, on_delivery=state.handle_err)\n\n not_called_state = DrOnlyTestSuccess()\n p.produce(topic, \"GOOD: This message will make make it\".encode('utf-8'),\n on_delivery=not_called_state.handle_success)\n\n # Garbage collection should not kick in yet for not_called_state\n # since there is a on_delivery reference to it.\n not_called_state = None\n gc.collect()\n global DrOnlyTestSuccess_gced\n assert DrOnlyTestSuccess_gced == 0\n\n print('only.error: Waiting for flush of %d messages' % len(p))\n p.flush(10000)\n\n print('only.error: Remaining messages now %d' % state.remaining)\n assert state.remaining == 0\n\n # Now with all messages flushed the reference to not_called_state should be gone.\n gc.collect()\n assert DrOnlyTestSuccess_gced == 1", "def purge(self):\n while self.bus.inWaiting() > 0:\n self.bus.read(self.bus.inWaiting())", "def samples_keep(self,index):\n\n\t\tif isinstance(index, (int, long)): index = range(self.samples)[-index:]\n\n\t\tself.sampled_topics = np.take(self.sampled_topics,index,axis=0)\n\t\tself.tt = np.take(self.tt,index,axis=2)\n\t\tself.dt = np.take(self.dt,index,axis=2)\n\n\t\tself.samples = len(index)" ]
[ "0.59047306", "0.5432222", "0.520002", "0.51949817", "0.51929474", "0.5141639", "0.5133911", "0.5067576", "0.5030349", "0.49800456", "0.49592015", "0.4953709", "0.49524015", "0.49356797", "0.49280757", "0.4905593", "0.48718688", "0.4860922", "0.48507154", "0.48441455", "0.48342484", "0.48125046", "0.48067188", "0.47813278", "0.47430122", "0.47303694", "0.472951", "0.46746418", "0.46745428", "0.46724477", "0.4671004", "0.46625644", "0.46545166", "0.46497533", "0.46344376", "0.46317852", "0.46210444", "0.46004957", "0.45993698", "0.4584268", "0.45559666", "0.45517215", "0.45446357", "0.45362708", "0.45335403", "0.45328993", "0.4531186", "0.4527187", "0.4512985", "0.4511513", "0.45082152", "0.4505291", "0.44955236", "0.44940972", "0.44938406", "0.44920468", "0.44706595", "0.44652313", "0.4447128", "0.4417398", "0.44121477", "0.44118118", "0.44117877", "0.44098026", "0.4408456", "0.4395288", "0.4391605", "0.43888643", "0.43875077", "0.4386862", "0.4386364", "0.43817613", "0.43599966", "0.43595165", "0.43424666", "0.43422756", "0.43422756", "0.4342266", "0.43408024", "0.43282977", "0.4322961", "0.43176115", "0.43135202", "0.4305033", "0.43043974", "0.42989054", "0.42989054", "0.42989054", "0.42989054", "0.42989054", "0.4298756", "0.42943358", "0.42919996", "0.42873174", "0.42863864", "0.42826393", "0.42813745", "0.4280616", "0.4279983", "0.42754057" ]
0.43292674
79
Creates estimator for predicting hashtag based on graph construction
def __init__(self, minimal_random_walk_change_difference_value: float, damping_factor: float, max_iterations: int, verbose: bool = False): self.graph: nx.Graph = None self._hashtags_tf_idf_vectorizer: TfidfVectorizer = None self._hashtags_tf_idf_representation: np.ndarray = None self._hashtag_labels: Union[set, np.ndarray] = None self._users_labels: Union[set, np.ndarray] = None self._tweet_labels: Union[set, np.ndarray] = None self._transition_matrix: np.ndarray = None self._hashtag_encoder: ModifiedOneHotEncoder = ModifiedOneHotEncoder() self.minimal_random_walk_change_difference_value = minimal_random_walk_change_difference_value self.damping_factor = damping_factor self.max_iterations = max_iterations self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_estimator(self):\n # Hyperparameters to create the Estimator\n hparams = tf.contrib.training.HParams(\n job_dir='test_dir',\n save_checkpoints_steps=1,\n keep_checkpoint_max=1,\n num_layers=2,\n dnn_dropout=0.7,\n dnn_optimizer='test_optimizer',\n linear_optimizer='test_optimizer',\n first_layer_size=10)\n estimator = model.create_estimator(hparams)\n self.assertIsInstance(estimator, tf.estimator.Estimator)", "def _make_model(self):\n self._model = tf.estimator.Estimator(model_fn=self.model_fn,\n model_dir=self.model_dir,\n config=self._config,\n params=self._params,\n )", "def _build_graph(self, hparams, scope=None):\n\n sample = self.iterator.get_next()\n if self.mode != tf.contrib.learn.ModeKeys.INFER:\n enc_inputs, dec_inputs, dec_outputs, seq_len = sample\n else:\n # At inference, only two inputs are given\n enc_inputs, seq_len, dec_start = sample\n #indices = (hparams.num_labels-1)*tf.ones([enc_inputs.shape[0]], tf.int32)\n #depth = hparams.num_labels\n #dec_start = tf.one_hot(indices, depth, axis=-1)\n\n with tf.variable_scope(scope or \"dynamic_seq2seq\", dtype=tf.float32):\n # create encoder\n dense_input_layer = tf.layers.Dense(hparams.num_units)\n\n if hparams.dense_input:\n enc_inputs = dense_input_layer(enc_inputs)\n\n enc_cells = mdl_help.create_rnn_cell(unit_type=hparams.unit_type,\n num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n depth=hparams.depth,\n num_residual_layers=hparams.num_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n mode=self.mode)\n\n # run encoder\n enc_outputs, enc_state = tf.nn.dynamic_rnn(cell=enc_cells,\n inputs=enc_inputs,\n sequence_length=seq_len,\n dtype=tf.float32,\n scope=\"encoder\")\n\n tgt_seq_len = tf.add(seq_len, tf.constant(1, tf.int32))\n\n # create decoder\n dec_cells = mdl_help.create_rnn_cell(unit_type=hparams.unit_type,\n num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n depth=hparams.depth,\n num_residual_layers=hparams.num_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n mode=self.mode)\n\n # output project layer\n projection_layer = tf.layers.Dense(hparams.num_labels, use_bias=False)\n\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN:\n if hparams.train_helper == \"teacher\":\n # teacher forcing\n helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_inputs,\n sequence_length=tgt_seq_len)\n elif hparams.train_helper == \"sched\":\n # scheduled sampling\n helper = tf.contrib.seq2seq.\\\n ScheduledOutputTrainingHelper(inputs=dec_inputs,\n sequence_length=tgt_seq_len,\n sampling_probability=self.sample_probability,\n next_inputs_fn=lambda x: mdl_help.multiclass_sample(x),\n )\n elif self.mode == tf.contrib.learn.ModeKeys.EVAL:\n helper = tf.contrib.seq2seq.\\\n ScheduledOutputTrainingHelper(inputs=dec_inputs,\n sequence_length=tgt_seq_len,\n sampling_probability=tf.constant(1.0),\n next_inputs_fn=lambda x: mdl_help.multiclass_sample(x))\n\n else: # running inference\n def end_fn(sample_ids):\n are_eq = tf.equal(dec_start, sample_ids)\n reduce_eq = tf.reduce_all(are_eq, axis=-1)\n return reduce_eq\n helper = tf.contrib.seq2seq.\\\n InferenceHelper(sample_fn=lambda x: mdl_help.multiclass_sample(x),\n sample_shape=[hparams.num_labels],\n sample_dtype=tf.float32,\n start_inputs=dec_start,\n end_fn=lambda x: end_fn(x))\n\n max_len = tf.reduce_max(tgt_seq_len)\n\n decoder = tf.contrib.seq2seq.BasicDecoder(cell=dec_cells,\n helper=helper,\n initial_state=enc_state,\n output_layer=projection_layer)\n\n # run decoder\n final_outputs, final_states, final_lengths = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n impute_finished=True,\n maximum_iterations=tf.constant(2)*max_len,\n scope=\"decoder\")\n\n logits = final_outputs.rnn_output\n sample_ids = final_outputs.sample_id\n\n if self.mode == tf.contrib.learn.ModeKeys.INFER:\n return enc_inputs, sample_ids\n\n # mask out entries longer than target sequence length\n mask = tf.expand_dims(tf.sequence_mask(tgt_seq_len, dtype=tf.float32), axis=-1)\n\n #stop gradient thru labels by crossent op\n labels = tf.stop_gradient(dec_outputs)\n\n crossent = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,\n labels=labels,\n name=\"crossent\")\n\n\n loss = tf.reduce_sum((crossent * mask) / tf.expand_dims(\n tf.expand_dims(tf.cast(tgt_seq_len, tf.float32), -1), -1))/hparams.batch_size\n\n# loss = tf.reduce_sum(crossent*mask)/(hparams.batch_size*tf.reduce_mean(tf.cast(tgt_seq_len,\n# tf.float32)))\n\n metrics = []\n update_ops = []\n if self.mode == tf.contrib.learn.ModeKeys.EVAL:\n # for predictions, we will scale the logits and then count each class as\n # active if it is over .5\n predictions = mdl_help.multiclass_prediction(logits)\n targets = dec_outputs\n acc, acc_update = tf.metrics.accuracy(predictions=predictions,\n labels=targets,\n weights=mask)\n metrics = [acc]\n update_ops = [acc_update]\n\n return logits, loss, metrics, update_ops", "def build_model():\n #\n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),('tfidf', TfidfTransformer())])),\n ('starting_verb', StartingVerbExtractor())])),\n ('clf', RandomForestClassifier())\n ])\n \n # hyerparameters for grid to search within\n# parameters = [{'clf__bootstrap': [False, True],\n# 'clf__bootstrap': [False, True],\n# 'clf__n_estimators': [80,90, 100, 110, 130],\n# 'clf__max_features': [0.6, 0.65, 0.7, 0.73, 0.7500000000000001, 0.78, 0.8],\n# 'clf__min_samples_leaf': [10, 12, 14],\n# 'clf__min_samples_split': [3, 5, 7]\n# }\n# ]\n\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__text_pipeline__vect__max_features': (None, 5000, 10000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf__n_estimators': [50, 80, 90, 100, 200],\n 'clf__min_samples_split': [2, 3, 4, 5, 7],\n 'features__transformer_weights': (\n {'text_pipeline': 1, 'starting_verb': 0.5},\n {'text_pipeline': 0.5, 'starting_verb': 1},\n {'text_pipeline': 0.8, 'starting_verb': 1},\n )\n }\n\n\n # Final model ready to be applied on dataset\n model = GridSearchCV(pipeline, param_grid=parameters)\n \n return model", "def __init__(\n self,\n estimator = SGDClassifier(),\n ):\n self.estimator = estimator", "def build(self, graph, name_scopes, training):\n raise NotImplementedError('Must be overridden by concrete subclass')", "def __init__(self, hparams: Namespace):\n super(TransformerTagger, self).__init__()\n self.hparams = hparams\n self.labels = ['O', 'I']\n config_class, model_class, tokenizer_class = MODEL_CLASSES[self.hparams.model_type]\n\n config = config_class.from_pretrained(\n self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,\n num_labels=self.hparams.num_labels,\n cache_dir=self.hparams.cache_dir if self.hparams.cache_dir else None,\n )\n tokenizer = tokenizer_class.from_pretrained(\n self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,\n do_lower_case=self.hparams.do_lower_case,\n cache_dir=self.hparams.cache_dir if self.hparams.cache_dir else None,\n )\n model = model_class.from_pretrained(\n self.hparams.model_name_or_path,\n from_tf=bool('.ckpt' in self.hparams.model_name_or_path),\n config=config,\n cache_dir=self.hparams.cache_dir if self.hparams.cache_dir else None,\n )\n\n self.config, self.tokenizer, self.model = config, tokenizer, model\n self.pad_token_label_id = nn.CrossEntropyLoss().ignore_index\n\n self.prepare_data()\n\n # get total train steps - for lr scheduler, idk how to do better for now without double loading\n self.total_train_steps = self.get_total_train_steps()\n logger.info('Total training steps: %s', self.total_train_steps)\n\n # init predictions\n self.preds = {\n 'val': defaultdict(dict),\n 'test': defaultdict(dict),\n }\n\n self.model_id_name = (f'{hparams.model_name_or_path}_bs-{hparams.train_batch_size}'\n f'_accum-{hparams.accumulate_grad_batches}'\n f'_lr-{hparams.lr}_labmode-{hparams.label_mode}'\n f'_maxep-{hparams.max_epochs}')", "def __init__(self, estimator, target_language='java',\n target_method='predict', **kwargs):\n super(DecisionTreeClassifier, self).__init__(\n estimator, target_language=target_language,\n target_method=target_method, **kwargs)\n self.estimator = estimator", "def build_model():\n nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer())\n ])),\n \n ('strarting_verb', StartingVerbExtractor())\n \n ])),\n\n ('clf', MultiOutputClassifier(estimator = AdaBoostClassifier(random_state = 42)))\n\n ])\n \n parameters = {\"clf__estimator__learning_rate\": [0.1, 0.5, 1.0],\n \"clf__estimator__n_estimators\": [25, 50, 75]\n }\n \n from sklearn.model_selection import GridSearchCV\n cv = GridSearchCV(pipeline, param_grid = parameters) \n \n return cv", "def _build_model(self, architecture):\n estimator = NN_estimator(architecture)\n weight_file = architecture[\"WEIGHT_FILE\"]\n if weight_file is None:\n pass\n else:\n estimator.load_weights(weight_file)\n return estimator", "def _construct_graph(self):\n raise NotImplementedError", "def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n \n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n # specify parameters for grid search\n parameters = {\n 'clf__estimator__n_estimators': [50],\n 'clf__estimator__learning_rate': [1]\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv", "def build_tf_graph(self):\n raise NotImplementedError", "def define_estimator(mode, features, labels, model_fn, config, params):\r\n\r\n assert mode in _ALLOWED_MODES, (\r\n 'mode should be TRAIN, EVAL or PREDICT from tf.estimator.ModeKeys.')\r\n assert params.name_feature_extractor in {'resnet_v1_50', 'resnet_v1_101'}, (\r\n 'params must have name_feature_extractor attribute in resnet_v1_{50,101}.')\r\n if params.name_feature_extractor == 'resnet_v1_101':\r\n raise NotImplementedError(\r\n 'Use of resnet_v1_101 as base feature extractor is not yet implemented.')\r\n\r\n # unpack features\r\n rawimages = features['rawimages'] if 'rawimages' in features.keys() else None\r\n rawimagespaths = features['rawimagespaths'] if 'rawimagespaths' in features.keys() else None\r\n proimages = features['proimages']\r\n prolabels = labels if labels else None\r\n\r\n ## build a fully convolutional model for semantic segmentation\r\n # predictions refer to the training class ids\r\n # for plotting of results (inference) or assessment, predictions should be transformed\r\n # using `{inference, evaluation}_problem_def`s\r\n _, _, predictions = model_fn(mode, proimages, prolabels, config, params)\r\n\r\n # TODO(panos): assert that proimages and predictions have same spatial size\r\n\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n\r\n # global step\r\n global_step = tf.train.get_or_create_global_step()\r\n\r\n # losses\r\n with tf.variable_scope('losses'):\r\n losses = define_losses(mode, predictions, prolabels, config, params)\r\n\r\n # exponential moving averages\r\n # creates variables in checkpoint with name: 'emas/' + <variable_name> +\r\n # {'ExponentialMovingAverage,Momentum}\r\n # ex.: for 'classifier/logits/Conv/biases' it saves also\r\n # 'emas/classifier/logits/Conv/biases/ExponentialMovingAverage'\r\n # and 'emas/classifier/logits/Conv/biases/Momentum'\r\n # create_train_op guarantees to run GraphKeys.UPDATE_OPS collection\r\n # before total_loss in every step, but doesn't give any guarantee\r\n # for running after some other op, and since ema need to be run\r\n # after applying the gradients maybe this code needs checking\r\n if params.ema_decay > 0:\r\n with tf.variable_scope('exponential_moving_averages'):\r\n #for mv in slim.get_model_variables():\r\n # print('slim.model_vars:', mv.op.name)\r\n ema = tf.train.ExponentialMovingAverage(params.ema_decay,\r\n num_updates=global_step,\r\n zero_debias=True)\r\n variables_to_ema = []\r\n for mv in tf.model_variables():\r\n if 'BatchNorm/moving' not in mv.name:\r\n variables_to_ema.append(mv)\r\n print(\r\n f\"\\nFound {len(tf.model_variables())} variables, saving exponential \"\r\n f\"moving averages for {len(variables_to_ema)} of them.\\n\")\r\n maintain_ema_op = ema.apply(var_list=variables_to_ema)\r\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, maintain_ema_op)\r\n\r\n # create training operation\r\n with tf.variable_scope('train_ops'):\r\n\r\n # optimizer\r\n optimizer = define_optimizer(global_step, params)\r\n\r\n # training op\r\n train_op = create_train_op(\r\n losses['total'],\r\n optimizer,\r\n global_step=global_step,\r\n # update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS),\r\n summarize_gradients=False,\r\n # transform_grads_fn=,\r\n # gradient_multipliers=gradient_multipliers,\r\n check_numerics=False,\r\n )\r\n\r\n # TODO: maybe parameterize it\r\n training_hooks = [\r\n _RunMetadataHook(params.log_dir,\r\n every_n_iter=max(params.num_training_steps//50,\r\n params.save_checkpoints_steps))]\r\n\r\n # next two lines were added for distributed debugging\r\n if params.distribute:\r\n tower_context = tf.contrib.distribute.get_tower_context()\r\n assert tower_context\r\n print(f\"Tower {tower_context.tower_id}: _RunMetadataHook is not supported \"\r\n \"yet for distributed training.\")\r\n training_hooks = []\r\n\r\n replace_initializers(config, params)\r\n\r\n summaries_data = {'features': features,\r\n 'labels': labels,\r\n 'predictions': predictions,\r\n 'losses': losses,\r\n 'learning_rate': optimizer._learning_rate} #pylint: disable=protected-access\r\n\r\n scaffold = _define_scaffold(mode, config, params, summaries_data)\r\n estimator_spec = tf.estimator.EstimatorSpec(mode,\r\n predictions=predictions,\r\n loss=losses['total'],\r\n train_op=train_op,\r\n training_hooks=training_hooks,\r\n scaffold=scaffold)\r\n\r\n if mode == tf.estimator.ModeKeys.EVAL:\r\n with tf.variable_scope('losses'):\r\n losses = define_losses(mode, predictions, prolabels, config, params)\r\n\r\n # returns (variable, update_op)\r\n # TF internal error/problem: _streaming_confusion_matrix internally casts\r\n # labels and predictions to int64, and since we feed a dictionary, tensors are\r\n # passed by reference leading them to change type, thus we send an identity\r\n # confusion_matrix = metrics_impl._streaming_confusion_matrix( # pylint: disable=protected-access\r\n # tf.identity(prolabels),\r\n # tf.identity(predictions['decisions']),\r\n # params.output_Nclasses)\r\n # l1_probs, decs = itemgetter('l1_probabilities', 'decisions')(predictions)\r\n # create a new dict with the supported keys only\r\n predictions = _map_predictions_to_new_cids(predictions, params.training_cids2evaluation_cids)\r\n if params.replace_voids:\r\n predictions = _replace_voids(predictions, params)\r\n # TODO(panos): confusion matrix expects prolabels and predictions to have the same shape\r\n # this may not the case when preserve_aspect_ratio is set and this will give an error\r\n if hasattr(params, 'preserve_aspect_ratio'):\r\n if params.preserve_aspect_ratio:\r\n raise NotImplementedError('evaluation with preserving aspect ratio is not implemented.')\r\n predictions = _resize_predictions(predictions, tf.shape(labels['prolabels'])[1:3], params)\r\n tcids2ecids = _replacevoids(params.training_cids2evaluation_cids)\r\n confusion_matrix = metrics_impl._streaming_confusion_matrix( # pylint: disable=protected-access\r\n labels['prolabels'],\r\n predictions['decisions'],\r\n # +1 due to convention of starting counting at 0\r\n max(tcids2ecids) + 1)\r\n\r\n # dict of metrics keyed by name with values tuples of (metric_tensor, update_op)\r\n # TODO: add more semantic segmentation metrics\r\n eval_metric_ops = {'confusion_matrix': (\r\n tf.to_int32(confusion_matrix[0]), confusion_matrix[1])}\r\n\r\n scaffold = _define_scaffold(mode, config, params)\r\n estimator_spec = tf.estimator.EstimatorSpec(\r\n mode,\r\n predictions=predictions,\r\n loss=losses['total'],\r\n eval_metric_ops=eval_metric_ops,\r\n scaffold=scaffold)\r\n\r\n if mode == tf.estimator.ModeKeys.PREDICT:\r\n # create a new dict with the supported keys only\r\n l1_probs, l2_vehicle_probs, l2_human_probs, decs = itemgetter(\r\n 'l1_probabilities', 'l2_vehicle_probabilities', 'l2_human_probabilities', 'decisions')(\r\n predictions)\r\n predictions = {'l1_probabilities': l1_probs,\r\n 'l2_vehicle_probabilities': l2_vehicle_probs,\r\n 'l2_human_probabilities': l2_human_probs,\r\n 'decisions': decs}\r\n # workaround for connecting input pipeline outputs to system output\r\n # TODO(panos): maybe from a system perspective makes more sense to have mapping and\r\n # resizing in the system_factory\r\n # since these are functions of the system and not the network/estimator\r\n # new size defaults to provided values\r\n # if at least one is None then new size is the arbitrary size of rawimage in each step\r\n new_size = (params.height_system, params.width_system)\r\n is_arbitrary = not all(new_size)\r\n if is_arbitrary:\r\n if rawimages is not None:\r\n predictions['rawimages'] = rawimages\r\n if rawimagespaths is not None:\r\n predictions['rawimagespaths'] = rawimagespaths\r\n new_size = tf.shape(predictions['rawimages'])[1:3]\r\n predictions = _resize_predictions(predictions, new_size, params)\r\n tf.logging.warn('Mapping of predictions to new cids is not implemented for now.')\r\n # predictions = _map_predictions_to_new_cids(predictions, params.training_cids2inference_cids)\r\n if params.replace_voids:\r\n predictions = _replace_voids(predictions, params)\r\n\r\n scaffold = _define_scaffold(mode, config, params)\r\n estimator_spec = tf.estimator.EstimatorSpec(\r\n mode,\r\n predictions=predictions,\r\n scaffold=scaffold)\r\n\r\n return estimator_spec", "def __init__(self, estimator = LogisticRegression()): \n\t self.estimator = estimator", "def __init__(self, estimator, name='multi-label-classifier'):\n \n self.estimator = estimator\n self.name = name\n self.last_scores = None\n self.last_hypes = {}\n self.best_hypes = {}\n self.last_splits = []", "def _build(self, # pylint: disable=arguments-differ\n features, labels, params=None, config=None):\n # Pre-process features and labels\n features, labels = self._preprocess(features, labels)\n results = self._call_graph_fn(features=features, labels=labels)\n\n loss = None\n train_op = None\n eval_metrics = None\n if Modes.is_infer(self.mode):\n predictions = self._build_predictions(results=results, features=features, labels=labels)\n extra_ops = self._build_extra_ops(results=results, features=features, labels=labels)\n else:\n _, loss = self._build_loss(results, features, labels)\n eval_metrics = self._build_eval_metrics(results, features, labels)\n\n if Modes.is_train(self.mode):\n train_op = self._build_train_op(loss)\n self._build_summary_op(results=results, features=features, labels=labels)\n\n predictions = self._build_predictions(results=results, features=features, labels=labels)\n extra_ops = self._build_extra_ops(results=results, features=features, labels=labels)\n\n track(predictions, tf.GraphKeys.PREDICTIONS)\n\n return EstimatorSpec(mode=self.mode,\n predictions=predictions,\n loss=loss,\n extra_ops=extra_ops,\n train_op=train_op,\n eval_metric_ops=eval_metrics)", "def build_estimator(config,\n hidden_units=None,\n learning_rate=1e-4,\n num_classes=3):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n return tf.estimator.DNNClassifier(\n optimizer=optimizer,\n config=config,\n feature_columns=get_feature_columns(),\n hidden_units=hidden_units,\n n_classes=num_classes)", "def __init__(self, estimator=LogisticRegression(), theta=0.1, demote=True):\n # TODO: assert that estimator has a predict_proba method.\n self.estimator = estimator\n self.theta = theta\n self.demote = demote", "def build_model(X_train, Y_train):\n #Choosing a straighforward single tree model to make training tractable in terms of time\n DTC = DecisionTreeClassifier(random_state = 11)\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(estimator=DTC))\n ])\n\n parameters = {'clf__estimator__criterion': [\"gini\", \"entropy\"],\n 'clf__estimator__splitter': [\"best\", \"random\"],\n 'clf__estimator__max_depth': randint(3, 6),\n 'clf__estimator__min_samples_split': randint(2,6)}\n\n grid_obj = RandomizedSearchCV(pipeline,parameters,n_iter=5, cv=5 )\n grid_obj.fit(X_train, Y_train)\n\n return grid_obj.best_estimator_", "def create_model(self):\n self.classifier = DecisionTreeClassifier(max_depth=1)", "def _build_graph(self, train_data, test_data):\n\n # Network for testing / evaluation\n # As before, we define placeholders for the input. These here now can be fed\n # directly, e.g. with a feed_dict created by _evaluation_food\n self.expert_outputs = {m: test_pipeline(test_data[m], self.config['prefixes'][m],\n **self.config)\n for m in self.modalities}\n self.prediction = self._fusion(self.expert_outputs)", "def predict(self, instances):\r\n raise NotImplementedError", "def __init__(self,\n ckpt: Text,\n max_embedding_batch_size: Optional[int] = 128,\n max_score_batch_size: Optional[int] = 128) -> None:\n super(HolparamPredictor, self).__init__(\n max_embedding_batch_size=max_embedding_batch_size,\n max_score_batch_size=max_score_batch_size)\n self._graph = tf.Graph()\n self._sess = tf.Session(graph=self._graph)\n with self._graph.as_default():\n saved_model_path = get_saved_model_path(ckpt)\n if saved_model_path:\n self._training_meta = False\n tf.logging.info('Importing from metagraph in %s.', saved_model_path)\n saved_model = saved_model_pb2.SavedModel()\n with tf.gfile.GFile(saved_model_path, 'rb') as f:\n saved_model.ParseFromString(f.read())\n metagraph = saved_model.meta_graphs[0]\n else:\n self._training_meta = True\n metagraph = ckpt + '.meta'\n tf.logging.warn(\n 'No exported eval graph found. Using training metagraph %s',\n metagraph)\n # Load metagraph from proto or filepath\n saver = tf.train.import_meta_graph(metagraph, clear_devices=True)\n saver.restore(self._sess, ckpt)\n table_init_op = tf.tables_initializer()\n self._sess.run(table_init_op)\n self.pairwise_score = tf.squeeze(\n self._graph.get_collection('pairwise_score'), axis=[2])", "def __create_estimator(self, hidden_units, feature_columns, nlabels, model_dir, **kwargs):\n return tf.estimator.DNNClassifier(hidden_units, feature_columns,\n n_classes=nlabels, model_dir=model_dir, **kwargs)", "def build_estimator(config, embedding_size=8, hidden_units=None):\n (time, v1, v2, v3, v4,\n v5, v6, v7, v8, v9, \n v10, v11, v12, v13, v14, \n v15,v16,v17,v18,v19,v20,v21,\n v22,v23,v24,v25,v26,v27,v28, amount) = INPUT_COLUMNS\n \"\"\"Build an estimator.\"\"\"\n \n # Reused Transformations.\n # Continuous columns can be converted to categorical via bucketization\n # We use the (bucketized) amount column in the Wide part\n amount_buckets = tf.feature_column.bucketized_column(amount, boundaries=[4,8,12,15,35,75,100, 200, 300, 1000])\n\n # Wide columns and deep columns.\n wide_columns = [amount_buckets]\n\n # All the other CCF features will be used in the deep part\n deep_columns = [\n time, v1, v2, v3, v4,\n v5, v6, v7, v8, v9, \n v10, v11, v12, v13, v14, \n v15,v16,v17,v18,v19,v20,v21,\n v22,v23,v24,v25,v26,v27,v28\n ]\n \n # We hardcode here the models in order to avoid the exponential decaying model which is already implemented\n hidden_units = [20,15]\n\n # We can try either Wide and Deep models or Deep Neural Networks (DNN)\n #\"\"\"\n return tf.contrib.learn.DNNLinearCombinedClassifier(\n config=config,\n linear_feature_columns=wide_columns,\n dnn_feature_columns=deep_columns,\n dnn_hidden_units=hidden_units or [100, 70, 50, 25],\n dnn_optimizer=tf.train.AdamOptimizer(),\n fix_global_step_increment_bug=True\n )\n\n \"\"\"\n deep_columns = deep_columns + [amount]\n return tf.contrib.learn.DNNClassifier(\n config=config,\n feature_columns=deep_columns,\n hidden_units=hidden_units or [100, 70, 50, 25],\n optimizer=tf.train.AdamOptimizer()\n #optimizer=tf.train.ProximalAdagradOptimizer(\n #learning_rate=0.1,\n #l2_regularization_strength=0.001\n\t #)\n )\n #\"\"\"", "def build_train_model(hparams,\n scope=\"train\"):\n src_lang = hparams.src_lang\n src_vocab_file_name = hparams.src_vocab_file_name\n tgt_lang = hparams.tgt_lang\n tgt_vocab_file_name = hparams.tgt_vocab_file_name\n\n\n\n tf.reset_default_graph()\n\n train_graph = tf.Graph()\n with train_graph.as_default() as g:\n with tf.container(scope):\n src_vocab, tgt_vocab = load_vocabs(src_lang, src_vocab_file_name,\n tgt_lang, tgt_vocab_file_name)\n src_dataset_file_name = tf.placeholder(tf.string, name=\"src_dataset_file_name\")\n tgt_dataset_file_name = tf.placeholder(tf.string, name=\"tgt_dataset_file_name\")\n\n src_dataset = tf.data.TextLineDataset(src_dataset_file_name)\n tgt_dataset = tf.data.TextLineDataset(tgt_dataset_file_name)\n\n batch_size = tf.placeholder(tf.int64, name=\"batch_size\")\n\n # maximum sequence length for training example\n max_len = tf.placeholder(tf.int64, name=\"max_len\")\n\n iterator = Iterator(src_dataset, src_vocab,\n tgt_dataset, tgt_vocab, batch_size=batch_size, max_len=max_len)\n\n # actual TensorFlow Dataset Iterator\n iterator_tf = iterator.create_iterator()\n\n model_class = _get_model_from_str_type(hparams.model_name)\n\n model = model_class(hparams, src_vocab, tgt_vocab)\n\n model_graph = model.build_graph(iterator_tf,\n tf.contrib.learn.ModeKeys.TRAIN, batch_size, g)\n\n return NTModel(src_vocab=src_vocab,\n tgt_vocab=tgt_vocab,\n iterator_tf=iterator_tf,\n model_graph=model_graph,\n model=model,\n hparams=hparams,\n mode=tf.contrib.learn.ModeKeys.TRAIN)", "def __init__(self, algorithm, iters, **params):\n self.algorithm=algorithm\n self.iters=iters\n if self.iters <= 0:\n raise ValueError(\"the number of iterations must be greater than zero\")\n if self.algorithm=='dtree':\n self.depth = params.pop('depth')\n if self.algorithm=='ann':\n self.gamma = params.pop('gamma')\n\n self.estimators_ = []\n self.estimator_weights_ = np.zeros(self.iters, dtype=np.float) \n self.clf=object", "def __init__(self,estimator, param = None):\n self.estimator=estimator", "def gen_graph(self):", "def build_model():\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n])\n \n # specify parameters for grid search\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.75, 1.0)\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs= 8, cv = 3, verbose = 2)\n\n return cv", "def __init__(self, edge_idx, edge_vals, U, I, K, hparams, ground_truth=None, simple_graph=False, GPU=False,\n fix_item_params=False, comp_rem=True, edge_param_splits=1, seed=None, sess=None, device='/cpu:0',\n ppm=False):\n\n self.ppm = ppm\n # Launch the session\n if sess:\n self.sess = sess\n else:\n if GPU:\n # For GPU mode\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n config.gpu_options.allocator_type = 'BFC'\n self.sess = tf.Session(config=config)\n else:\n config = tf.ConfigProto(allow_soft_placement=True)\n self.sess = tf.Session(config=config)\n\n self.device = device\n self.comp_rem = comp_rem\n self.seed = seed\n self.K = K\n self.ground_truth = ground_truth\n self.simple_graph = simple_graph\n self.U, self.I = U, I\n self.fix_item_params = fix_item_params\n self.hparams = hparams\n self.edge_param_splits = edge_param_splits # Splitting GPU parameters to fit according to GPU size\n self.GPU = GPU\n\n # store the data here:\n self.edge_idx_d = edge_idx\n\n if self.simple_graph:\n self.edge_vals_d = np.ones(edge_vals.shape[0], dtype=np.float32)\n else:\n self.edge_vals_d = edge_vals.astype(np.float32)\n\n # create placeholders for the computational graph\n with tf.name_scope(\"placeholders\"):\n with tf.device(self.device):\n self.edge_idx = tf.placeholder(dtype=tf.int32,shape=(edge_idx.shape[0], edge_idx.shape[1]))\n self.edge_vals = tf.placeholder(dtype=tf.float32,shape=(edge_idx.shape[0]))\n\n if simple_graph:\n # Degree computation without tensorflow. Only works for simple graphs\n _,self.user_degree = np.unique(self.edge_idx_d[:,0],return_counts=True)\n _,self.item_degree = np.unique(self.edge_idx_d[:,1],return_counts=True)\n self.user_degree = self.user_degree.astype(np.float32)\n self.item_degree = self.item_degree.astype(np.float32)\n else:\n with tf.name_scope(\"init_deg_comp\"):\n with tf.device(self.device):\n user_degree, item_degree = compute_degrees2(tf.expand_dims(self.edge_vals, axis=1), self.edge_idx,\n self.U, self.I)\n user_degree = tf.squeeze(user_degree)\n item_degree = tf.squeeze(item_degree)\n\n with tf.Session(config=config) as sess:\n self.user_degree, self.item_degree = sess.run([user_degree, item_degree],\n feed_dict={self.edge_vals: self.edge_vals_d,\n self.edge_idx: self.edge_idx_d})\n\n \n print repr(np.sum(self.user_degree))\n print repr(np.sum(self.item_degree))\n\n self.occupied_pairs = edge_idx.shape[0] # oc_pa\n\n self._initialize_parameters(hparams, ppm)\n\n # random sample for diagnostics\n np.random.seed(self.seed)\n self.included_sample = self.edge_idx_d[np.random.choice(self.edge_idx_d.shape[0], 1000, replace=False)]\n user_sample = np.random.choice(self.U, 1000)\n item_sample = np.random.choice(self.I, 1000)\n self.pair_sample = np.vstack((user_sample, item_sample)).T\n\n # appx llhd for assessing convergence\n with tf.name_scope(\"appx_llhd\"):\n self._build_appx_elbo()\n\n # computational graph for coordinate ascent\n with tf.name_scope(\"coordinate_ascent\"):\n self._build_computation_graph()\n\n with tf.name_scope(\"evaluation\"):\n with tf.device(self.device):\n self._build_predict_edges()\n self.edge_mean_summary = tf.reduce_mean(self.q_e_aux_vals.mean(), axis=0)\n\n with tf.name_scope(\"recommendation\"), tf.device(self.device):\n self._build_rec_uncensored_edge_pops()\n\n self._censored_edge_pops = tf.placeholder(dtype=tf.float32)\n self._num_rec = tf.placeholder(dtype=tf.int32, shape=())\n self._top_k = tf.nn.top_k(self._censored_edge_pops, self._num_rec)\n\n # logging\n self.summary_writer = tf.summary.FileWriter('../logs', graph=self.sess.graph)\n\n # Initializing the tensor flow variables\n with tf.device(self.device):\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n # qm_du, qm_di were initialized arbitrarily, and are thus inconsistent w initialize value of the edge params\n # this line fixes that\n if not(ppm):\n self.sess.run(self.deg_update, feed_dict={self.edge_vals: self.edge_vals_d, self.edge_idx: self.edge_idx_d})", "def _create_inference_model(pl_module):\n new_model = get_model(pl_module.hparams)\n new_model.normalization_metrics = AnomalyScoreDistribution().cpu()\n new_model.load_state_dict(pl_module.state_dict())\n return new_model", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'clf__estimator__n_estimators': [50, 100],\n 'clf__estimator__min_samples_split': [2, 3, 5],\n 'clf__estimator__criterion': ['entropy', 'gini']\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv", "def predict(self, params, exog=None, *args, **kwargs):\n raise NotImplementedError # pragma: no cover", "def init_estimator(self):\n raise NotImplementedError()", "def trainModel( self, featureTrain, classTrain):", "def make_predict_step(self):\n return self.make_eval_step()", "def build_model():\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(DecisionTreeClassifier()))\n ])\n\n \n parameters = {'clf__estimator__min_samples_split':[2, 4, 6],\n 'clf__estimator__max_depth': [2, 4]}\n\n #parameters = {'clf__estimator__min_samples_split':[2]}\n cv = GridSearchCV(pipeline, parameters)\n\n return(cv)", "def __init__(self, hparams):\n super(ImagenetTransferLearning, self).__init__()\n self.hparams = hparams\n self.feature_extractor = models.mobilenet_v2(pretrained=True)\n self.feature_extractor.eval()\n\n # Establish classifier\n # self.layer_1 = torch.nn.Linear(hparams[\"input_size\"], 128)\n self.layer_1 = torch.nn.Linear(1000, 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, hparams[\"targets\"])", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def create_hparams(hparam_string=None):\n hparams = tf.contrib.training.HParams(\n # The name of the architecture to use.\n arch='resnet',\n lrelu_leakiness=0.2,\n batch_norm_decay=0.9,\n weight_decay=1e-5,\n normal_init_std=0.02,\n generator_kernel_size=3,\n discriminator_kernel_size=3,\n\n # Stop training after this many examples are processed\n # If none, train indefinitely\n num_training_examples=0,\n\n # Apply data augmentation to datasets\n # Applies only in training job\n augment_source_images=False,\n augment_target_images=False,\n\n # Discriminator\n # Number of filters in first layer of discriminator\n num_discriminator_filters=64,\n discriminator_conv_block_size=1, # How many convs to have at each size\n discriminator_filter_factor=2.0, # Multiply # filters by this each layer\n # Add gaussian noise with this stddev to every hidden layer of D\n discriminator_noise_stddev=0.2, # lmetz: Start seeing results at >= 0.1\n # If true, add this gaussian noise to input images to D as well\n discriminator_image_noise=False,\n discriminator_first_stride=1, # Stride in first conv of discriminator\n discriminator_do_pooling=False, # If true, replace stride 2 with avg pool\n discriminator_dropout_keep_prob=0.9, # keep probability for dropout\n\n # DCGAN Generator\n # Number of filters in generator decoder last layer (repeatedly halved\n # from 1st layer)\n num_decoder_filters=64,\n # Number of filters in generator encoder 1st layer (repeatedly doubled\n # after 1st layer)\n num_encoder_filters=64,\n\n # This is the shape to which the noise vector is projected (if we're\n # transferring from noise).\n # Write this way instead of [4, 4, 64] for hparam search flexibility\n projection_shape_size=4,\n projection_shape_channels=64,\n\n # Indicates the method by which we enlarge the spatial representation\n # of an image. Possible values include:\n # - resize_conv: Performs a nearest neighbor resize followed by a conv.\n # - conv2d_transpose: Performs a conv2d_transpose.\n upsample_method='resize_conv',\n\n # Visualization\n summary_steps=500, # Output image summary every N steps\n\n ###################################\n # Task Classifier Hyperparameters #\n ###################################\n\n # Which task-specific prediction tower to use. Possible choices are:\n # none: No task tower.\n # doubling_pose_estimator: classifier + quaternion regressor.\n # [conv + pool]* + FC\n # Classifiers used in DSN paper:\n # gtsrb: Classifier used for GTSRB\n # svhn: Classifier used for SVHN\n # mnist: Classifier used for MNIST\n # pose_mini: Classifier + regressor used for pose_mini\n task_tower='doubling_pose_estimator',\n weight_decay_task_classifier=1e-5,\n source_task_loss_weight=1.0,\n transferred_task_loss_weight=1.0,\n\n # Number of private layers in doubling_pose_estimator task tower\n num_private_layers=2,\n\n # The weight for the log quaternion loss we use for source and transferred\n # samples of the cropped_linemod dataset.\n # In the DSN work, 1/8 of the classifier weight worked well for our log\n # quaternion loss\n source_pose_weight=0.125 * 2.0,\n transferred_pose_weight=0.125 * 1.0,\n\n # If set to True, the style transfer network also attempts to change its\n # weights to maximize the performance of the task tower. If set to False,\n # then the style transfer network only attempts to change its weights to\n # make the transferred images more likely according to the domain\n # classifier.\n task_tower_in_g_step=True,\n task_loss_in_g_weight=1.0, # Weight of task loss in G\n\n #########################################\n # 'simple` generator arch model hparams #\n #########################################\n simple_num_conv_layers=1,\n simple_conv_filters=8,\n\n #########################\n # Resnet Hyperparameters#\n #########################\n resnet_blocks=6, # Number of resnet blocks\n resnet_filters=64, # Number of filters per conv in resnet blocks\n # If true, add original input back to result of convolutions inside the\n # resnet arch. If false, it turns into a simple stack of conv/relu/BN\n # layers.\n resnet_residuals=True,\n\n #######################################\n # The residual / interpretable model. #\n #######################################\n res_int_blocks=2, # The number of residual blocks.\n res_int_convs=2, # The number of conv calls inside each block.\n res_int_filters=64, # The number of filters used by each convolution.\n\n ####################\n # Latent variables #\n ####################\n # if true, then generate random noise and project to input for generator\n noise_channel=True,\n # The number of dimensions in the input noise vector.\n noise_dims=10,\n\n # If true, then one hot encode source image class and project as an\n # additional channel for the input to generator. This gives the generator\n # access to the class, which may help generation performance.\n condition_on_source_class=False,\n\n ########################\n # Loss Hyperparameters #\n ########################\n domain_loss_weight=1.0,\n style_transfer_loss_weight=1.0,\n\n ########################################################################\n # Encourages the transferred images to be similar to the source images #\n # using a configurable metric. #\n ########################################################################\n\n # The weight of the loss function encouraging the source and transferred\n # images to be similar. If set to 0, then the loss function is not used.\n transferred_similarity_loss_weight=0.0,\n\n # The type of loss used to encourage transferred and source image\n # similarity. Valid values include:\n # mpse: Mean Pairwise Squared Error\n # mse: Mean Squared Error\n # hinged_mse: Computes the mean squared error using squared differences\n # greater than hparams.transferred_similarity_max_diff\n # hinged_mae: Computes the mean absolute error using absolute\n # differences greater than hparams.transferred_similarity_max_diff.\n transferred_similarity_loss='mpse',\n\n # The maximum allowable difference between the source and target images.\n # This value is used, in effect, to produce a hinge loss. Note that the\n # range of values should be between 0 and 1.\n transferred_similarity_max_diff=0.4,\n\n ################################\n # Optimization Hyperparameters #\n ################################\n learning_rate=0.001,\n batch_size=32,\n lr_decay_steps=20000,\n lr_decay_rate=0.95,\n\n # Recomendation from the DCGAN paper:\n adam_beta1=0.5,\n clip_gradient_norm=5.0,\n\n # The number of times we run the discriminator train_op in a row.\n discriminator_steps=1,\n\n # The number of times we run the generator train_op in a row.\n generator_steps=1)\n\n if hparam_string:\n tf.logging.info('Parsing command line hparams: %s', hparam_string)\n hparams.parse(hparam_string)\n\n tf.logging.info('Final parsed hparams: %s', hparams.values())\n return hparams", "def get_estimator_fn(num_gpus,\n variable_strategy,\n run_config,\n hparams):\n estimator = tf.estimator.Estimator(\n model_fn=get_model_fn(num_gpus, variable_strategy,\n run_config.num_worker_replicas or 1),\n config=run_config,\n params=hparams)\n\n return estimator", "def model_build(self, estimators):\n return RandomForestClassifier(n_estimators=estimators, n_jobs=-1)", "def predict_api():\n pass", "def __init__(self, tagger):\n self.tagger = tagger\n self.classifier = Perceptron()", "def __init__(self, the_model, lrp_exponent=1, beta=.5, epsilon=1e-6,\n method=\"e-rule\"):\n super(InnvestigateModel, self).__init__()\n self.model = the_model\n self.device = torch.device(\"cpu\", 0)\n self.prediction = None\n self.r_values_per_layer = None\n self.only_max_score = None\n # Initialize the 'Relevance Propagator' with the chosen rule.\n # This will be used to back-propagate the relevance values\n # through the layers in the innvestigate method.\n self.inverter = RelevancePropagator(lrp_exponent=lrp_exponent,\n beta=beta, method=method, epsilon=epsilon,\n device=self.device)\n\n # Parsing the individual model layers\n self.register_hooks(self.model)\n if method == \"b-rule\" and float(beta) in (-1., 0):\n which = \"positive\" if beta == -1 else \"negative\"\n which_opp = \"negative\" if beta == -1 else \"positive\"\n print(\"WARNING: With the chosen beta value, \"\n \"only \" + which + \" contributions \"\n \"will be taken into account.\\nHence, \"\n \"if in any layer only \" + which_opp +\n \" contributions exist, the \"\n \"overall relevance will not be conserved.\\n\")", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize, min_df = 5)),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators = 10,\n min_samples_split = 10)))\n ])\n\n # Create parameters dictionary\n parameters = {'vect__min_df': [1, 5],\n 'tfidf__use_idf':[True, False],\n 'clf__estimator__n_estimators':[10, 25],\n 'clf__estimator__min_samples_split':[2, 5, 10]}\n\n # create grid search\n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def create_fit_model(features_df):\n model = NearestNeighbors(n_neighbors=10,\n n_jobs=-1)\n knn_spotify = model.fit(features_df)\n return knn_spotify", "def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n ('starting_verb', StartingVerbExtractor())\n ])),\n ('clf', DecisionTreeClassifier())\n ])\n\n parameters = [\n {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (DecisionTreeClassifier(min_samples_split=3),),\n 'clf__max_depth': (None, 4)\n }, {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (MultiOutputClassifier(LinearSVC(multi_class='ovr')),)\n }, {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (MLPClassifier(),),\n 'clf__hidden_layer_sizes': ((100, 10), (50,), (50, 10))\n }\n ]\n\n cv = GridSearchCV(pipeline, parameters, cv=3, n_jobs=4, verbose=10)\n \n return cv", "def train_model(evidence, labels):\n model = sklearn.neighbors.KNeighborsClassifier(n_neighbors = 1)\n model.fit(evidence,labels)\n return model", "def model_fn(features,labels,mode,params):\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec", "def generate_estimator(\n mode_feature_cols_map,\n params,\n config):\n model_fn = generate_model_fn(mode_feature_cols_map)\n\n return tf.estimator.Estimator(\n model_fn,\n model_dir=config.model_dir,\n params=params,\n config=config\n )", "def __init__(self) :\n self.prediction_ = None", "def __init__(self) :\n self.prediction_ = None", "def __init__(self) :\n self.prediction_ = None", "def __init__(self):\n self.clf = DummyClassifier(strategy='most_frequent')", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n parameters = {\n 'vect__stop_words': ['english',None],\n 'tfidf__smooth_idf': [True, False],\n 'tfidf__norm': ['l2','l1'],\n 'clf__estimator__learning_rate': [0.5, 1, 2],\n 'clf__estimator__n_estimators': [20, 60, 100]\n }\n\n clf_grid_model = RandomizedSearchCV(pipeline,\n parameters,\n cv=3,\n refit=True,\n verbose=10,\n n_jobs=-1)\n return clf_grid_model", "def __init__(self, params, device_assigner=None, model_dir=None,\n graph_builder_class=tensor_forest.RandomForestGraphs,\n config=None, weights_name=None, keys_name=None,\n feature_engineering_fn=None,\n early_stopping_rounds=100,\n num_trainers=1, trainer_id=0,\n report_feature_importances=False,\n local_eval=False):\n super(TensorForestEstimator, self).__init__(\n model_fn=get_model_fn(\n params.fill(),\n graph_builder_class,\n device_assigner,\n weights_name=weights_name,\n keys_name=keys_name,\n early_stopping_rounds=early_stopping_rounds,\n num_trainers=num_trainers,\n trainer_id=trainer_id,\n report_feature_importances=report_feature_importances,\n model_dir=model_dir,\n local_eval=local_eval),\n model_dir=model_dir,\n config=config,\n feature_engineering_fn=feature_engineering_fn)", "def config1() :\n data_name = \"titanic\" ### in data/input/\n model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py\n n_sample = 1000\n\n def post_process_fun(y): ### After prediction is done\n return int(y)\n\n def pre_process_fun(y): ### Before the prediction is done\n return int(y)\n\n\n model_dict = {'model_pars': {\n ### LightGBM API model #######################################\n 'model_class': model_class\n ,'model_pars' : {\n 'total_time_limit' : 20,\n 'algorithms' : 'auto',\n 'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',\n 'eval_metric' : 'auto'\n\n # mode='Explain',\n # ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,\n # stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',\n # golden_features='auto', features_selection='auto', start_random_models='auto',\n # hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)\n }\n\n , 'post_process_fun' : post_process_fun ### After prediction ##########################################\n , 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################\n\n\n ### Pipeline for data processing ##############################\n 'pipe_list': [\n #### coly target prorcessing\n {'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },\n\n\n {'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },\n {'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },\n\n #### catcol INTO integer, colcat into OneHot\n {'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },\n # {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },\n\n\n ### Cross_feat = feat1 X feat2\n # {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},\n\n\n #### Example of Custom processor\n #{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' }, \n\n\n ],\n }\n },\n\n 'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']\n\n ,'mlflow_pars' : None # {} ### Not empty --> use mlflow\n },\n\n 'data_pars': { 'n_sample' : n_sample,\n\n 'download_pars' : None,\n\n\n 'cols_input_type' : cols_input_type_1,\n ### family of columns for MODEL #########################################################\n # \"colnum\", \"colnum_bin\", \"colnum_onehot\", \"colnum_binmap\", #### Colnum columns\n # \"colcat\", \"colcat_bin\", \"colcat_onehot\", \"colcat_bin_map\", #### colcat columns\n # 'colcross_single_onehot_select', \"colcross_pair_onehot\", 'colcross_pair', #### colcross columns 'coldate', 'coltext',\n 'cols_model_group': [ 'colnum_bin',\n 'colcat_bin',\n # 'coltext',\n # 'coldate',\n #'colcross_pair',\n \n ### example of custom\n # 'col_myfun'\n ]\n\n ### Filter data rows ##################################################################\n ,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }\n\n }\n }\n\n ##### Filling Global parameters ############################################################\n model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )\n return model_dict", "def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model", "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'vect__ngram_range': ((1, 1), (1, 2)),\n 'clf__estimator__min_samples_split': [2, 4],\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def _train_model(self):\n raise NotImplementedError()", "def _build_predict_edges(self, N=100):\n with tf.device(self.device):\n self.pred_edges_ph = tf.placeholder(dtype=tf.int32)\n # MC estimate\n self.predict_edges = tf.reduce_mean(self._edge_prob_samples(self.pred_edges_ph, N=N), axis=0)", "def build_estimator(\n data_dir,\n num_gpus,\n variable_strategy,\n run_config,\n hparams,\n use_distortion_for_training=True,\n ws=None,\n):\n\n # Create estimator.\n train_input_fn = functools.partial(\n input_fn,\n data_dir,\n subset=\"train\",\n num_shards=num_gpus,\n batch_size=hparams.train_batch_size,\n use_distortion_for_training=use_distortion_for_training,\n )\n\n eval_input_fn = functools.partial(\n input_fn,\n data_dir,\n subset=\"validation\",\n batch_size=hparams.eval_batch_size,\n num_shards=num_gpus,\n )\n\n # validation: 5000, eval:10000\n num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch(\n \"validation\"\n )\n\n if num_eval_examples % hparams.eval_batch_size != 0:\n raise ValueError(\n \"validation set size must be multiple of eval_batch_size\"\n )\n\n classifier = tf.estimator.Estimator(\n model_fn=get_model_fn(\n num_gpus, variable_strategy, run_config.num_worker_replicas or 1\n ),\n config=run_config,\n params=hparams,\n warm_start_from=ws,\n )\n\n return train_input_fn, eval_input_fn, classifier", "def __init__(self, uniform_variables, knn=50, iterations=10,\n base_estimator=DecisionTreeClassifier(max_depth=6),\n train_variables=None, learning_rate=10, efficiencies_as_sum=True):\n self.base_estimator = base_estimator\n self.uniform_variables = uniform_variables\n self.knn = knn\n self.iterations = iterations\n self.train_variables = train_variables\n self.learning_rate = learning_rate\n self.efficiencies_as_sum = efficiencies_as_sum", "def create_eval_model(model_creator, hparams, scope=None, extra_args=None):\n vocab_file = hparams.vocab_file\n graph = tf.Graph()\n\n with graph.as_default(), tf.container(scope or \"eval\"):\n vocab_table = vocab_utils.create_vocab_tables(vocab_file)[0]\n data_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)\n kb_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)\n data_dataset = tf.data.TextLineDataset(data_file_placeholder)\n kb_dataset = tf.data.TextLineDataset(kb_file_placeholder)\n # this is the eval_actual iterator\n eval_iterator = iterator_utils.get_iterator(\n data_dataset,\n kb_dataset,\n vocab_table,\n batch_size=hparams.batch_size,\n t1=hparams.t1.encode(),\n t2=hparams.t2.encode(),\n eod=hparams.eod,\n len_action=hparams.len_action,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n max_dialogue_len=hparams.max_dialogue_len)\n # this is the placeholder iterator\n handle = tf.placeholder(tf.string, shape=[])\n iterator = tf.data.Iterator.from_string_handle(\n handle, eval_iterator.output_types, eval_iterator.output_shapes)\n batched_iterator = iterator_utils.get_batched_iterator(iterator)\n\n model = model_creator(\n hparams,\n iterator=batched_iterator,\n handle=handle,\n mode=tf.estimator.ModeKeys.EVAL,\n vocab_table=vocab_table,\n scope=scope,\n extra_args=extra_args)\n\n return EvalModel(\n graph=graph,\n model=model,\n placeholder_iterator=iterator,\n placeholder_handle=handle,\n eval_iterator=eval_iterator,\n data_file_placeholder=data_file_placeholder,\n kb_file_placeholder=kb_file_placeholder)", "def predict(self):\n raise NotImplementedError", "def __init__(self, estimator, **kwargs):\n super(LogisticRegression, self).__init__(\n estimator, **kwargs)\n\n self.estimator = estimator", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def create_training(logits):\r\n \r\n\r\n return train_op, loss, label_ph", "def train_model(evidence, labels):\n\n model = KNeighborsClassifier(n_neighbors=1)\n model.fit(evidence, labels)\n \n return model", "def _build_predictor(self):\n try: \n predict_fn = tf.contrib.predictor.from_saved_model(self.saved_path)\n except OSError as err: \n print(f\"OSError: {err}\")\n self._predict_fn = predict_fn", "def nizza_model_fn(self, features, mode, params):\n precomputed = self.precompute(features, mode, params)\n loss = self.compute_loss(features, mode, params, precomputed)\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.train.get_global_step(),\n optimizer=tf.train.AdamOptimizer,\n learning_rate=params.learning_rate\n )\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op)", "def __init__(\n self,\n estimator: Estimator,\n num_deep_models: 5,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.estimator = estimator\n self.num_deep_models = num_deep_models", "def _hg_model_fn(features, labels, mode, params):\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n weight_decay = params.weight_decay\n momentum = params.momentum\n decay_factor = params.decay_factor\n decay_step = params.decay_step\n init_learning_rate = params.init_learning_rate\n num_stacks = params.num_stacks\n num_joints = params.num_joints\n\n tower_features = features\n if mode == tf.estimator.ModeKeys.PREDICT:\n if num_gpus < 1:\n tower_labels = [None]\n else:\n tower_labels = [None for i in range(num_gpus)]\n else:\n tower_labels = labels\n\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = 'channels_last'\n else:\n data_format = 'channels_first'\n\n if num_gpus == 0:\n num_devices = 1\n device_type = 'cpu'\n else:\n num_devices = num_gpus\n device_type = 'gpu'\n\n for i in range(num_devices):\n worker_device = '/{}:{}'.format(device_type, i)\n if variable_strategy == 'CPU':\n device_setter = utils.local_device_setter(\n worker_device=worker_device)\n elif variable_strategy == 'GPU':\n device_setter = utils.local_device_setter(\n ps_device_type='gpu',\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn))\n if mode == tf.estimator.ModeKeys.TRAIN:\n batch_size = params.train_batch_size / num_devices\n else:\n batch_size = params.eval_batch_size / num_devices\n\n with tf.variable_scope('hg', reuse=bool(i != 0)):\n with tf.name_scope('tower_%d' % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n mode, weight_decay, tower_features[i][0], tower_labels[i],\n data_format, params.batch_norm_decay,\n params.batch_norm_epsilon, params.num_stacks, params.num_out, params.n_low, params.num_joints, batch_size,params.seq_length)\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,\n name_scope)\n\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope('gradient_averaging'):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'\n with tf.device(consolidation_device):\n\n learning_rate = tf.train.exponential_decay(init_learning_rate, tf.train.get_global_step(), decay_step, decay_factor, staircase=True, name= 'learning_rate')\n\n loss = tf.reduce_mean(tower_losses, name='loss')\n\n examples_sec_hook = utils.ExamplesPerSecondHook(\n params.train_batch_size, every_n_steps=10)\n\n tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n train_hooks = [logging_hook, examples_sec_hook]\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers)\n sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step())\n ]\n \n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n if mode==tf.estimator.ModeKeys.EVAL:\n hm = predictions['heatmaps']\n stacked_labels = tf.concat(labels[0][0][0], axis=0)\n \n gt_labels = tf.transpose(stacked_labels,[1,0,3,4,2])\n\n joint_accur = []\n for j in range(params.seq_length):\n for i in range(params.num_joints):\n joint_accur.append(_pck_hm(hm[j,:,-1, :, :,i], gt_labels[j,:, :, :, i], params.eval_batch_size/num_devices))\n accuracy = tf.stack(joint_accur)\n metrics = {'Mean Pixel Error': tf.metrics.mean(accuracy)}\n tf.logging.info('Accuracy op computed')\n else:\n metrics = None\n \n else:\n train_op = None\n loss = None\n train_hooks = None\n metrics = None\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n \n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics)", "def _make_estimator(self, append=True, random_state=None):\n estimator = clone(self.base_estimator_)\n estimator.set_params(**dict((p, getattr(self, p))\n for p in self.estimator_params))\n # print estimator.get_params()\n\n if random_state is not None:\n _set_random_states(estimator, random_state)\n\n if append:\n self.estimators_.append(estimator)\n\n return estimator", "def populate_graph(self):", "def dec_model(params):\n\n if (params['random']):\n print(\"Random Decision Tree Parameters.\")\n params['criterion'] = random.choice([\"gini\", \"entropy\"])\n params['splitter'] = random.choice([\"best\", \"random\"])\n params['max_features'] = random.choice(['auto', 'sqrt', 'log2', random.randrange(50, 1000, step=25), None])\n params['max_depth'] = random.choice([None, random.randrange(5, 1000, step=5)])\n params['min_samples_split'] = random.choice([2, random.randrange(1, 50, step=1)])\n params['max_leaf_nodes'] = random.choice([None, random.randrange(2, 50, step=1)])\n params['min_samples_leaf'] = random.choice([1, random.randrange(5, 100, step=5)])\n print(params)\n \n model = tree.DecisionTreeClassifier(\n criterion=params['criterion'],\n splitter=params['splitter'],\n max_features=params['max_features'],\n max_depth=params['max_depth'],\n min_samples_split=params['min_samples_split'],\n max_leaf_nodes=params['max_leaf_nodes'],\n min_samples_leaf=params['min_samples_leaf']\n )\n\n return model", "def build_model():\n # Pipeline of CountVextorizer, TfdifTransformer and MultiOutputClassifier\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'clf__estimator__n_estimators': [50, 30],\n 'clf__estimator__min_samples_split': [3, 2] \n }\n \n cv = GridSearchCV(pipeline, param_grid= parameters, verbose=2, n_jobs=4)\n return cv", "def build_inference_graph(self):\n self.build_train_graph()", "def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))", "def test_classifier_age_estimator(x, class_model):\n return class_model.predict(x)", "def model_fn(features,labels,mode,params):\n tf.logging.info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s ,shape = %s\" % (name,features[name].shape))\n\n input_ids = features['input_ids']\n input_mask = features['input_mask']\n segment_ids = features['segment_ids']\n label_ids = features['label_ids']\n if 'is_real_example' in features:\n is_real_example = tf.cast(features['is_real_example'],dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids),dtype=tf.float32)\n\n is_training = (mode == tf_estimator.estimator.ModeKeys.TRAIN)\n\n (total_loss,per_example_loss,probabilities,predictions) = \\\n create_model(albert_config,is_training,input_ids,input_mask,\n segment_ids,label_ids,num_labels,\n use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map,initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint,assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf_estimator.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,use_tpu=False)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf_estimator.estimator.ModeKeys.EVAL:\n def metric_fn(per_example_loss,label_ids,logits,is_real_example):\n accuracy = tf.metrics.accuracy(\n labels=label_ids,predictions=predictions,\n weights=is_real_example\n )\n loss = tf.metrics.mean(\n values=per_example_loss,weights=is_real_example\n )\n return {\n 'eval_accuracy':accuracy,\n 'eval_loss':loss,\n }\n\n eval_metrics = metric_fn(per_example_loss,label_ids,predictions,is_real_example)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'probabilities':probabilities,\n 'predictions':predictions,\n },\n )\n\n return output_spec", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n self.build_network()\n if self.mode != tf.estimator.ModeKeys.PREDICT:\n self.build_losses()", "def build(self):\n self.global_step = tf.train.get_or_create_global_step()\n self.build_network()\n if self.mode != tf.estimator.ModeKeys.PREDICT:\n self.build_losses()", "def model_fn_builder(bert_model_hub, num_labels, learning_rate,\n num_train_steps, num_warmup_steps):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(label_ids,\n predicted_labels)\n auc = tf.metrics.auc(label_ids, predicted_labels)\n recall = tf.metrics.recall(label_ids, predicted_labels)\n precision = tf.metrics.precision(label_ids, predicted_labels)\n true_pos = tf.metrics.true_positives(label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(config):\n def model_fn(features,labels,mode,params):\n \"\"\"The model_fn for Estimator\"\"\"\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec\n return model_fn", "def predict(self, *args, **kwargs):\n return self(*args, **kwargs)", "def build_eval_graph(input_fn, model_fn, hparams):\n dataset = input_fn(None)\n batch = dataset.make_one_shot_iterator().get_next()\n\n batch_holder = {\n \"transform\":\n tf.placeholder(\n tf.float32,\n [1, 1, hparams.n_parts, hparams.n_dims + 1, hparams.n_dims + 1]),\n \"joint\":\n tf.placeholder(tf.float32, [1, 1, hparams.n_parts, hparams.n_dims]),\n \"point\":\n tf.placeholder(tf.float32, [1, 1, None, hparams.n_dims]),\n \"label\":\n tf.placeholder(tf.float32, [1, 1, None, 1]),\n }\n latent_holder, latent, occ = model_fn(batch_holder, None, None, \"gen_mesh\")\n\n # Eval Summary\n iou_holder = tf.placeholder(tf.float32, [])\n best_holder = tf.placeholder(tf.float32, [])\n tf.summary.scalar(\"IoU\", iou_holder)\n tf.summary.scalar(\"Best_IoU\", best_holder)\n\n return {\n \"batch_holder\": batch_holder,\n \"latent_holder\": latent_holder,\n \"latent\": latent,\n \"occ\": occ,\n \"batch\": batch,\n \"iou_holder\": iou_holder,\n \"best_holder\": best_holder,\n \"merged_summary\": tf.summary.merge_all(),\n }", "def train_model(evidence, labels):\n model = KNeighborsClassifier(n_neighbors=1)\n model.fit(evidence, labels)\n return model", "def train_model(evidence, labels):\n model = KNeighborsClassifier(n_neighbors=1)\n model.fit(evidence, labels)\n return model", "def train_model(evidence, labels):\n model = KNeighborsClassifier(n_neighbors=1)\n model.fit(evidence, labels)\n return model", "def predict(self, **kwargs):\n raise NotImplementedError", "def create_tracker(dynamic_model, track_id, label, P_init = None, Q = None, R = None):\n\n # Constant Velocity Model:\n if dynamic_model == \"CV\":\n\n P_init_def, Q_def, R_def = EKF_CV.EKF_CV_track.get_default_param();\n if P_init is None:\n P_init = P_init_def;\n if Q is None:\n Q = Q_def;\n if R is None:\n R = R_def;\n\n\n # Create new traker:\n tracker = EKF_CV.EKF_CV_track(Q, R, P_init, track_id, label);\n\n # Constant Velocity Constant Yaw Rate Model:\n elif dynamic_model == \"CVCYR\":\n\n P_init_def, Q_def, R_def = EKF_CVCYR.EKF_CVCYR_track.get_default_param();\n if P_init is None:\n P_init = P_init_def;\n if Q is None:\n Q = Q_def;\n if R is None:\n R = R_def;\n\n # Create new traker:\n tracker = EKF_CVCYR.EKF_CVCYR_track( Q, R, P_init, track_id, label);\n\n # Bicycle model\n elif dynamic_model == \"BM2\":\n\n P_init_def, Q_def, R_def = EKF_BM2.EKF_BM2_track.get_default_param();\n if P_init is None:\n P_init = P_init_def;\n if Q is None:\n Q = Q_def;\n if R is None:\n R = R_def;\n\n # Create new traker:\n tracker = EKF_BM2.EKF_BM2_track( Q, R, P_init, track_id, label);\n\n # Raise Error if the model is not recognized\n else:\n raise ValueError('Dynamic Model not handled: {}'.format(dynamic_model));\n\n\n return tracker;", "def _train_and_evaluate(estimator, output_dir):\n \n \"\"\"X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val = utils.read_from_bigquery(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\"\"\"\n \n df_train=utils.over_sample(\"amiable-octane-267022.kkbox.output_train_1\",\"amiable-octane-267022\")\n X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val=utils.over_sample(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\n\n estimator.fit(X_train, y_train)\n f1_scorer = make_scorer(f1_score)\n accuracy_scorer =make_scorer(accuracy_score)\n\n if metadata.HYPERPARAMTER_TUNING:\n scores=model_selection.cross_val_score(estimator, X_val, y_val, cv=3,scoring=f1_scorer)\n #,scoring=f1_scorer\n\n logging.info('Score: %s', scores)\n\n #tune hyper\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='F1_SCORE',\n metric_value=np.mean(scores),\n global_step=10000)\n \n#joblib.dump(estimator, 'model.joblib')\n\n # Write model and eval metrics to `output_dir`\n model_output_path = os.path.join(output_dir, 'model',metadata.MODEL_FILE_NAME)\n \n utils.dump_object(estimator, model_output_path)", "def __init__(self, estimator, target_language='java',\n target_method='predict', **kwargs):\n super(RandomForestClassifier, self).__init__(\n estimator, target_language=target_language,\n target_method=target_method, **kwargs)\n\n # Check type of base estimators:\n if not isinstance(estimator.base_estimator, DecisionTreeClassifier):\n msg = \"The classifier doesn't support the given base estimator %s.\"\n raise ValueError(msg, estimator.base_estimator)\n\n # Check number of base estimators:\n if not estimator.n_estimators > 0:\n msg = \"The classifier hasn't any base estimators.\"\n raise ValueError(msg)\n\n self.estimator = estimator", "def __init__(self, graph: ghidra.graph.GImplicitDirectedGraph, metric: ghidra.graph.GEdgeWeightMetric):\n ...", "def __init__(self, parameters: ParametersList, algorithm: ClassVar, algorithm_data: AlgorithmData):\n super(GreedyTrain, self).__init__(parameters, algorithm, algorithm_data)" ]
[ "0.57602215", "0.56859493", "0.5646092", "0.5621122", "0.5566403", "0.55499375", "0.55365247", "0.5525229", "0.547958", "0.54549783", "0.5403307", "0.5403233", "0.5356686", "0.53558606", "0.53558564", "0.53543764", "0.53396976", "0.5337955", "0.5331275", "0.53096443", "0.5309373", "0.52815545", "0.5277724", "0.5277445", "0.5256219", "0.52543944", "0.5240419", "0.5240196", "0.5237329", "0.5234087", "0.52261204", "0.5211918", "0.5203461", "0.51965255", "0.51880586", "0.5186999", "0.51628596", "0.516278", "0.51577765", "0.51492167", "0.51414424", "0.51414424", "0.51385415", "0.51263016", "0.5118376", "0.51162636", "0.5115886", "0.51129425", "0.51061195", "0.51025873", "0.51012725", "0.5098086", "0.50968856", "0.5091396", "0.50908816", "0.50908816", "0.50908816", "0.5088398", "0.50754803", "0.50740397", "0.5070888", "0.50650173", "0.50605375", "0.5055019", "0.50542325", "0.50536436", "0.50535643", "0.5048763", "0.5045361", "0.5043083", "0.50413865", "0.5041188", "0.5032251", "0.5031815", "0.5031239", "0.5030943", "0.5026315", "0.50254774", "0.50178224", "0.5016232", "0.50134313", "0.50132895", "0.50032824", "0.50009805", "0.4999057", "0.49854994", "0.4980512", "0.4980512", "0.49785718", "0.49749783", "0.4974702", "0.49716106", "0.4965974", "0.4965974", "0.4965974", "0.49651378", "0.4963611", "0.49628994", "0.49627826", "0.49599177", "0.49589312" ]
0.0
-1
Transforms single row of pandas `original_tweets_with_lemmas.p` to graph. Suffixes in node names are needed due to intersection between hashtags and user names.
def _transform_single_row(self, hashtag_agg: Dict, row: pd.Series): user_name = row["username"] + "_user" tweet_id = str(row["id"]) + "_tweet" tags = row["hashtags"] self._users_labels.add(user_name) self._tweet_labels.add(tweet_id) if not self.graph.has_node(user_name): self.graph.add_node(user_name, type="username") if not self.graph.has_node(tweet_id): self.graph.add_node(tweet_id, type="tweet_id") for hashtag_index in tags: tag = hashtag_index["text"] + "_tag" hashtag_agg[tag] += row["lemmas"] if not self.graph.has_node(tag): self.graph.add_node(tag, type="hashtag") if not self.graph.has_edge(tag, user_name): self.graph.add_edge(tag, user_name) if not self.graph.has_edge(tag, tweet_id): self.graph.add_edge(tag, tweet_id) self._hashtag_labels.add(tag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_node_ids(df):\n le = LabelEncoder()\n all_node_names = list(set(df['from_name'].values.tolist() + df['to_name'].values.tolist()))\n le.fit(all_node_names)\n df['from_id'] = le.transform(df['from_name'])\n df['to_id'] = le.transform(df['to_name'])\n return df, le", "def lemmatize(data: pd.Series) -> pd.Series:\n lemmatizer = WordNetLemmatizer()\n return data.apply(lambda row: re.sub(\n r'\\b\\w+\\b', lambda match: lemmatizer.lemmatize(\n match.group(), pos=to_pos([match.group()])), row))", "def addToGraph(tid,uid,mentions) :\n global G,found\n\n user = r.get(int(uid))\n \n if user == None :\n return\n\n user = re.findall('\"((?:(?!(?:\",\")).)*)\"', user)\n \n # lower the hashtags\n mentions = [t.lower() for t in mentions if t not in [\"\"]]\n \n usern = user[1].lower()\n\n G.add_node(usern)\n\n found = found + 1\n\n # iterate through mentions\n for m in mentions :\n # add hashtag to graph\n G.add_node(m)\n \n # update edge weight for every hashtag 2-permutation of the tweet\n if G.has_edge(usern,m) :\n G[usern][m]['weight'] += 1\n else :\n G.add_edge(usern,m,weight=1)", "def get_unlabelled_tweets_reannotation():\n conn = get_connection()\n c = conn.cursor()\n #res = c.execute('SELECT * FROM tweets WHERE tweets.is_about_depression is null AND tweets.username IN (SELECT username FROM users WHERE mentions_depr=1)').fetchall()\n res = c.execute('SELECT * FROM tweets WHERE tweets.is_about_depression IN (0, 1, 2) AND tweets.is_about_depression2 IS NULL ORDER BY random()').fetchall()\n conn.close()\n return np.array(res)", "def tweetDump2Neo(db, user, tweet_dump):\n \n # user->[tweeted/RTed/quoted]->(tweet/RT/quoteTweet)\n for label in ['tweet', 'retweet', 'quotetweet']:\n if tweet_dump[label]:\n tweets2Neo(db, tweet_dump[label], label=label)\n tweetActions(db, user, tweet_dump[label], label=label)\n \n # push original tweets from RTs/quotes\n for label in ['retweet', 'quotetweet']:\n tweets = [(tw[0],) for tw in tweet_dump[label]]\n if tweets:\n tweets2Neo(db, tweets, label='tweet')\n \n # (RT/quote)-[RETWEET_OF/QUOTE_OF]->(tweet)\n if tweet_dump['retweet']:\n tweetLinks(db, tweet_dump['retweet'], 'retweet', 'tweet', 'RETWEET_OF')\n if tweet_dump['quotetweet']:\n tweetLinks(db, tweet_dump['quotetweet'], 'quotetweet', 'tweet', 'QUOTE_OF')\n\n # push users of original tweets.\n if tweet_dump['users']:\n users2Neo(db, tweet_dump['users'].values())\n multi_user_tweet_actions(db, tweet_dump['users'])\n \n # mentions\n for label in ['tweet', 'retweet', 'quotetweet']:\n mentions = [m[1] for m in tweet_dump['entities'][label]['user_mentions']]\n if mentions:\n users2Neo(db, mentions)\n entities = tweet_dump['entities'][label]['user_mentions']\n entity_links(db, entities, 'MENTIONS', label, 'twitter_user', 'id_str', 'screen_name')\n\n # hashtags, urls and media\n for label in ['tweet', 'retweet', 'quotetweet']:\n for entity_type in ['hashtags', 'urls', 'media']:\n entities = [e[1] for e in tweet_dump['entities'][label][entity_type]]\n if entities:\n entities2neo(db, entities, entity_type)\n\n if tweet_dump['entities'][label]['hashtags']:\n entity_links(db, tweet_dump['entities'][label]['hashtags'],\n 'TAGGED', label, 'hashtag', 'id_str', 'text')\n \n if tweet_dump['entities'][label]['urls']:\n entity_links(db, tweet_dump['entities'][label]['urls'],\n 'LINKS_TO', label, 'url', 'id_str', 'expanded_url')\n \n if tweet_dump['entities'][label]['media']:\n entity_links(db, tweet_dump['entities'][label]['media'],\n 'EMBEDS', label, 'media', 'id_str', 'id_str')", "def preprocess_tweet(tweet):\n clean_tweet = tp.clean(tweet)\n\n # perform lemmatization\n tokenizer = TweetTokenizer()\n tweet_tokens = tokenizer.tokenize(clean_tweet)\n\n lemmatized_tweet = lemmatize_tweet(tweet_tokens)\n\n # remove stopwords\n preprocessed_tweet = remove_stopwords(lemmatized_tweet)\n return preprocessed_tweet", "def preprocess_tweet(tweet):\n\n\n clean_tweet, hashtags = separate_hastags_mentions_urls(tweet)\n clean_tweet = remove_emoji_punc(clean_tweet)\n return clean_tweet, hashtags", "def multiUserTweetDump2Neo(db, tweet_dump):\n\n # user->[tweeted/RTed/quoted]->(tweet/RT/quoteTweet)\n for label in ['tweet', 'retweet', 'quotetweet']:\n if tweet_dump[label]:\n tweets2Neo(db, tweet_dump[label], label=label)\n multi_user_labelled_tweet_actions(db, tweet_dump[label], label=label)\n\n # push original tweets from RTs/quotes\n for label in ['retweet', 'quotetweet']:\n tweets = [(tw[0],) for tw in tweet_dump[label]]\n if tweets:\n tweets2Neo(db, tweets, label='tweet')\n\n # (RT/quote)-[RETWEET_OF/QUOTE_OF]->(tweet)\n if tweet_dump['retweet']:\n tweetLinks(db, tweet_dump['retweet'], 'retweet', 'tweet', 'RETWEET_OF')\n if tweet_dump['quotetweet']:\n tweetLinks(db, tweet_dump['quotetweet'], 'quotetweet', 'tweet', 'QUOTE_OF')\n\n # push users of original tweets.\n if tweet_dump['users']:\n users2Neo(db, tweet_dump['users'].values())\n multi_user_tweet_actions(db, tweet_dump['users'])\n\n # mentions\n for label in ['tweet', 'retweet', 'quotetweet']:\n mentions = [m[1] for m in tweet_dump['entities'][label]['user_mentions']]\n if mentions:\n users2Neo(db, mentions)\n entities = tweet_dump['entities'][label]['user_mentions']\n entity_links(db, entities, 'MENTIONS', label, 'twitter_user', 'id_str', 'screen_name')\n\n # hashtags, urls and media\n for label in ['tweet', 'retweet', 'quotetweet']:\n for entity_type in ['hashtags', 'urls', 'media']:\n entities = [e[1] for e in tweet_dump['entities'][label][entity_type]]\n if entities:\n entities2neo(db, entities, entity_type)\n\n if tweet_dump['entities'][label]['hashtags']:\n entity_links(db, tweet_dump['entities'][label]['hashtags'],\n 'TAGGED', label, 'hashtag', 'id_str', 'text')\n\n if tweet_dump['entities'][label]['urls']:\n entity_links(db, tweet_dump['entities'][label]['urls'],\n 'LINKS_TO', label, 'url', 'id_str', 'expanded_url')\n\n if tweet_dump['entities'][label]['media']:\n entity_links(db, tweet_dump['entities'][label]['media'],\n 'EMBEDS', label, 'media', 'id_str', 'id_str')", "def simplify_directed_as_dataframe(df: gpd.GeoDataFrame) -> gpd.GeoDataFrame:\n df.reset_index(inplace=True)\n\n g = gt.Graph(directed=True)\n osm_id = g.new_edge_property('string')\n highway = g.new_edge_property('string')\n level = g.new_edge_property('int')\n lanes = g.new_edge_property('int')\n width = g.new_edge_property('float')\n bicycle = g.new_edge_property('bool')\n bicycle_safety = g.new_edge_property('int')\n foot = g.new_edge_property('bool')\n foot_safety = g.new_edge_property('int')\n max_speed = g.new_edge_property('int')\n motorcar = g.new_edge_property('bool')\n linestring = g.new_edge_property('python::object')\n\n edgelist = df[\n ['u', 'v', 'osm_id', 'highway', 'level', 'lanes', 'width', 'bicycle', 'bicycle_safety', 'foot', 'foot_safety',\n 'max_speed', 'motorcar', 'geometry']].values\n\n nodes_id = g.add_edge_list(edgelist, hashed=True,\n eprops=[osm_id, highway, level, lanes, width, bicycle, bicycle_safety, foot, foot_safety,\n max_speed, motorcar, linestring])\n\n # we are gonna replace the original repeated nodes with a linestring\n e_path = g.new_ep('vector<int64_t>')\n for e in g.edges():\n e_path[e] = []\n\n vs = g.get_vertices()\n in_out_deg_2 = (g.get_in_degrees(vs) == 2) & (g.get_out_degrees(vs) == 2)\n\n logging.debug('selecting degree 4 candidates')\n candidates = set()\n for i, v in enumerate(vs):\n if in_out_deg_2[i]:\n ns = list(set(g.get_all_neighbors(v)))\n if len(ns) == 2:\n u, w = ns[0], ns[1]\n uv, vw, wv, vu = g.edge(u, v), g.edge(v, w), g.edge(w, v), g.edge(v, u)\n if highway[uv] == highway[vw] and highway[wv] == highway[vu]:\n candidates.add(v)\n logging.debug('found {} degree 4 candidates to simplify'.format(len(candidates)))\n\n seen = set()\n unregister_candidates = set()\n\n for i, candidate in enumerate(candidates):\n if i == 100000:\n logging.debug('100000 degree 4 candidates')\n if candidate in seen:\n continue\n\n seen.add(candidate)\n\n u, w = g.get_out_neighbors(candidate)\n is_u_fringe, is_w_fringe = u not in candidates, w not in candidates\n\n cu, cw = g.edge(candidate, u), g.edge(candidate, w)\n\n us = []\n ws = []\n\n while not is_u_fringe:\n seen.add(u)\n us.append(u)\n neighbors = set(g.get_out_neighbors(u))\n neighbors -= seen\n if len(neighbors) > 0:\n u = neighbors.pop()\n is_u_fringe = u not in candidates\n elif u == w:\n us.pop(-1)\n u = us.pop(-1)\n unregister_candidates.add(u)\n unregister_candidates.add(w)\n is_u_fringe = True\n is_w_fringe = True\n g.remove_edge(g.edge(s=u, t=w))\n g.remove_edge(g.edge(s=w, t=u))\n else:\n logging.debug('degree 2: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n while not is_w_fringe:\n seen.add(w)\n ws.append(w)\n neighbors = set(g.get_out_neighbors(w))\n neighbors -= seen\n if len(neighbors) > 0:\n w = neighbors.pop()\n is_w_fringe = w not in candidates\n else:\n logging.debug('degree 2: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n if is_u_fringe and is_w_fringe:\n e = g.add_edge(source=u, target=w)\n path = [u] + list(reversed(us)) + [candidate] + ws + [w]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[cw], highway[cw], level[cw], lanes[cw], width[cw], bicycle[cw], \\\n bicycle_safety[cw], \\\n foot[cw], foot_safety[cw], max_speed[cw], motorcar[cw]\n\n e = g.add_edge(source=w, target=u)\n path = [w] + list(reversed(ws)) + [candidate] + us + [u]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[cu], highway[cu], level[cu], lanes[cu], width[cu], bicycle[cu], \\\n bicycle_safety[cu], \\\n foot[cu], foot_safety[cu], max_speed[cu], motorcar[cu]\n\n else:\n logging.debug(\n 'unexpected behavior, source={0}, target={1}, candidate={2}, us={3}, ws={4}'.format(u, w, candidate, us,\n ws))\n\n unseen = candidates - seen\n if len(unseen) > 0:\n logging.debug(\n 'Network scan after degree 4 simplification uncomplete: candidates {0} have not been examined'.format(\n unseen))\n\n candidates -= unregister_candidates\n g.remove_vertex(list(candidates))\n\n vs = g.get_vertices()\n in_out_deg_1 = (g.get_in_degrees(vs) == 1) & (g.get_out_degrees(vs) == 1)\n\n logging.debug('selecting degree 2 candidates')\n candidates = set()\n for i, v in enumerate(vs):\n if in_out_deg_1[i]:\n u = g.get_in_neighbors(v)[0]\n w = g.get_out_neighbors(v)[0]\n\n if u != w:\n uv, vw = g.edge(u, v), g.edge(v, w)\n if highway[uv] == highway[vw]:\n candidates.add(v)\n logging.debug('found {} degree 2 candidates to simplify'.format(len(candidates)))\n\n seen = set()\n unregister_candidates = set()\n\n for candidate in candidates:\n if candidate in seen:\n continue\n\n seen.add(candidate)\n\n u = g.get_in_neighbors(candidate)[0]\n w = g.get_out_neighbors(candidate)[0]\n\n uc = g.edge(u, candidate)\n\n is_u_fringe, is_w_fringe = u not in candidates, w not in candidates\n\n us = []\n ws = []\n\n while not is_u_fringe:\n seen.add(u)\n us.append(u)\n neighbors = set(g.get_in_neighbors(u))\n neighbors -= seen\n if len(neighbors) > 0:\n u = neighbors.pop()\n is_u_fringe = u not in candidates\n elif u == w:\n us.pop(-1)\n u = us.pop(-1)\n unregister_candidates.add(u)\n unregister_candidates.add(w)\n is_u_fringe = True\n is_w_fringe = True\n g.remove_edge(g.edge(s=w, t=u))\n else:\n logging.debug('degree 1: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n while not is_w_fringe:\n seen.add(w)\n ws.append(w)\n neighbors = set(g.get_out_neighbors(w))\n neighbors -= seen\n if len(neighbors) > 0:\n w = neighbors.pop()\n is_w_fringe = w not in candidates\n else:\n logging.debug('degree 1: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n if is_u_fringe and is_w_fringe:\n e = g.add_edge(source=u, target=w)\n path = [u] + list(reversed(us)) + [candidate] + ws + [w]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[uc], highway[uc], level[uc], lanes[uc], width[uc], bicycle[uc], \\\n bicycle_safety[uc], \\\n foot[uc], foot_safety[uc], max_speed[uc], motorcar[uc]\n else:\n logging.error('unexpected behavior, source={0}, target={1}, candidate={2}, us={3}, ws={4}', u, w, us, ws)\n\n unseen = candidates - seen\n if len(unseen) > 0:\n logging.debug(\n 'Network scan after degree 2 simplification not finished: candidates {0} have not been examined'.format(\n unseen))\n\n candidates -= unregister_candidates\n g.remove_vertex(list(candidates))\n\n logging.debug(' linestring path')\n edges_tuples = []\n for e in g.edges():\n source, target, path = nodes_id[e.source()], nodes_id[e.target()], e_path[e]\n if len(path) == 0:\n path = [source, target]\n else:\n path = [int(i) for i in path]\n\n e_tuples = (g.edge_index[e], source, target, path,\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e],\n foot_safety[e], max_speed[e], motorcar[e], linestring[e])\n edges_tuples.append(e_tuples)\n\n df_edges_simplified = pd.DataFrame.from_records(edges_tuples, index='edge_id',\n columns=['edge_id', 'u', 'v', 'path', 'osm_id', 'highway',\n 'level', 'lanes', 'width', 'bicycle', 'bicycle_safety',\n 'foot', 'foot_safety', 'max_speed', 'motorcar',\n 'geometry'])\n\n df_edges_simplified.osm_id = df_edges_simplified.osm_id.str.split('-').str[0]\n df_edges_simplified = gpd.GeoDataFrame(df_edges_simplified, geometry='geometry')\n df_edges_simplified.crs = df.crs\n return df_edges_simplified", "def annotate_tm_edge_label(self):\n if self._mapping is not None:\n kw = dict(fontsize=6, color='black', ha='center')\n m = self._mapping\n n_rows = m.metadata['n_rows']\n f_tm_top = lambda g: m.ix[m.ix[g.index]['row'].idxmax(), 'slot']\n f_tm_bottom = lambda g: m.ix[m.ix[g.index]['row'].idxmin(), 'slot']\n tm_top = np.unique(m.groupby('col').agg(f_tm_top)['slot'])\n tm_bottom = np.unique(m.groupby('col').agg(f_tm_bottom)['slot'])\n for tm in tm_top:\n df = m.loc[m['slot'] == tm]\n row = df['row'].max() + n_rows/48 + 0.4\n col = df['col'].mean()\n tm_txt = \"TM{:02d}\".format(tm)\n self.ax.text(col, row, tm_txt, va='bottom', **kw)\n for tm in tm_bottom:\n df = m.loc[m['slot'] == tm]\n row = df['row'].min() - n_rows/48 - 0.4\n col = df['col'].mean()\n tm_txt = \"TM{:02d}\".format(tm)\n self.ax.text(col, row, tm_txt, va='top', **kw)\n else:\n print(\"Cannot annotate, no mapping attached to class\")", "def sents(path):\n\n data = pd.read_csv( path , sep = \"\\t\", index_col=False, encoding='latin-1', low_memory=False)\n df = DataFrame(data)\n# print(df['Sentiment'])\n labelCount = df.groupby(df['Sentiment']).count()\n #print(labelCount)\n x = df['SentimentText'].str.replace('http\\S+|www.\\S+', '', case=False)\n y = df['Sentiment']\n x = x.str.replace('[^a-zA-Z]', ' ') #\n x_check = [\" \".join([lemmatize(word) for word in sentence.split(\" \")]) for sentence in x]\n stopset = set(stopwords.words('English'))\n x_check = [' '.join(w for w in sentence.split() if w.lower() not in stopset)\n for sentence in x\n ]\n #print(x_check)\n return x_check, y", "def flatten_row(row):\n # [u'hop_survey.node_id', u'hop_survey.created', u'hop_survey',\n # u'case.node_id', u'case',\n # u'experiment.node_id', u'experiment',\n # u'project.node_id', u'project',\n # u'program.node_id', u'program']\n label_created_col_name = [k for k in row.keys() if 'created' in k][0]\n label_object_col_name = label_created_col_name.split('.')[0]\n label_node_id_col_name = '{}.node_id'.format(label_object_col_name)\n\n flattened = row[label_object_col_name]\n flattened['_created'] = row[label_created_col_name]\n flattened['_node_id'] = row[label_node_id_col_name]\n\n path_members = [k for k in row.keys() if 'node_id' not in k and label_object_col_name not in k]\n for path_member in path_members:\n for k in row[path_member]:\n flattened['{}_{}'.format(path_member, k)] = row[path_member][k]\n flattened['{}_{}'.format(path_member, 'node_id')] = row['{}.node_id'.format(path_member)]\n\n return flattened", "def preprocess_tweet(self, tweet):\n\n\t\treplaced_urls = [] # Create an empty list\n\t\treplaced_mentions = [] # Create an empty list\n\n\t\t# Replace emojis\n\t\ttweet = emoji.demojize(tweet)\n\n\t\t# Tokenize using NLTK\n\t\ttokenizer = TweetTokenizer(preserve_case=False, reduce_len=True)\n\t\ttokens = tokenizer.tokenize(tweet)\n\n\t\t# Iterate over tokens\n\t\tfor index, token in enumerate(tokens):\n\t\t\t# Replace URLs\n\t\t\tif token[0:4] == \"http\":\n\t\t\t\treplaced_urls.append(token)\n\t\t\t\ttokens[index] = \"<URLURL>\"\n\t\t\t\t# ↳ *tokens[index]* will directly modify *tokens*, whereas any changes to *token* will be lost.\n\n\t\t\t# Replace mentions (Twitter handles; usernames)\n\t\t\telif token[0] == \"@\" and len(token) > 1:\n\t\t\t\t# ↳ Skip the single '@' tokens\n\t\t\t\treplaced_mentions.append(token)\n\t\t\t\ttokens[index] = \"<UsernameMention>\"\n\n\t\t# Detokenize using NLTK's Treebank Word Detokenizer\n\t\tdetokenizer = TreebankWordDetokenizer()\n\t\tprocessed_tweet = detokenizer.detokenize(tokens)\n\n\t\t# *replaced_urls* and *replaced_mentions* will contain all of the replaced URLs and Mentions of the input string.\n\t\treturn processed_tweet", "def process_pm_row(row):\n out = row.copy()\n new_input = insert_postmod_token(row['sentence'], row['target'])\n out['input'] = new_input\n del out['sentence']\n return out", "def cleaninto_df(frame:pd) -> pd:\n # remove repeated characters EXAMPLE: DIMPLLLLEEEEE -> DIMPLE\n # nopunc = word_tokenize(nopunc) this might not work. find something else\n\n stop = stopwords.words('english')\n newStopWords = ['get', 'http','there','and','i','t','it','d']\n stop.extend(newStopWords)\n lemmatizer = WordNetLemmatizer()\n clean = []\n new_col = []\n frame['Cleaned'] = None\n for tweet in frame.content:\n if 'RT' in tweet:\n if tweet.index('RT')>5:\n tweet = tweet[:tweet.index('RT')]\n else:\n tweet = tweet[2:]\n # WHAT ARE WE TRYING TO CLEAN HERE?\n # cleaning with preprocessor library https://pypi.org/project/tweet-preprocessor/\n tweet = ' '.join(re.sub(\"(@\\w+)|([^A-Za-z]+)|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n # changes #November1 -> November: need to remove full hashtag?\n # changes @poetweatherford: -> poetweatherford\n # changes don’t -> don t, children's -> children s\n print(\"after regex:\" + str(tweet))\n clean.append(tweet.lower())\n for clean_tweet in clean:\n word_tokens = word_tokenize(clean_tweet)\n clean_tokens = [word for word in word_tokens if word not in stop]\n stems = []\n for item in clean_tokens:\n stems.append(lemmatizer.lemmatize(item))\n new_sentence = ' '.join(stems)\n new_col.append(new_sentence.lower())\n frame['Cleaned'] = new_col\n return frame", "def sanitize_labels(df):\n df['labels'] = df['query'].apply(lambda x: x.replace(' ', '<br>')) # linebreaks\n df['ranking_label'] = df.ranking.replace({'top': f'Evergreens - updated {df.date[0]}',\n 'rising': f'Trending - updated {df.date[0]}'})\n return df", "def convert_mel_spectrogram_to_linear(self, mel: 'torch.tensor', **kwargs) -> 'torch.tensor':", "def addTweetHashtagEdges(self):\n self.hashtagGraph.add_edges_from(self.builtEdgeList)", "def plot_reason(tweets, sentiment_scores):\n\n \"\"\"\n Calculate the sentiment of each individual token in the tweets.\n \"\"\"\n\n # list tokens, keeping only unique tokens (e.g. remove repeated words).\n all_toks = []\n for tweet in tweets:\n toks = tweet.lower().split()\n all_toks.extend(toks)\n all_toks = [tok for tok in set(all_toks)] # remove duplicates.\n\n # calculate sentiment of each token.\n sm = VaderSentimentModel()\n toks_sentiment = [sm.classify_sentiment(tok) for tok in all_toks]\n\n \"\"\" \n sort tokens by sentiment.\n if overall valence is negative, sort negative to postitive.\n if overall valence is positive, sort positive to negative.\n thus, in any case, the earliest elements in the list are the most informative words.\n \"\"\"\n\n nwords = 20\n\n # negative? sort neg -> positive.\n if np.mean(sentiment_scores) < 0:\n sorted_indices = np.argsort(toks_sentiment)\n # else (positive)? sort positive -> negative\n else:\n sorted_indices = np.argsort(toks_sentiment)[::-1]\n\n # toks_to_plot: shape (nwords, ) list of informative tokens.\n # sentiment_to_plot: shape (nwords, ) list of sentiment of these tokens.\n toks_to_plot = np.array(all_toks)[sorted_indices][:nwords]\n sentiment_to_plot = np.array(toks_sentiment)[sorted_indices][:nwords]\n\n # convert all sentiment scores to positive values.\n # this is for DISPLAY only, to make all plots go from left to right.\n # we still retain the correct tokens and sorting order.\n sentiment_to_plot = np.array([abs(v) for v in sentiment_to_plot])\n\n \"\"\"\n Set up plot.\n - create data source object.\n - define formatting variables. \n \"\"\"\n text_offset = 0.1\n\n source = ColumnDataSource(data={\n \"token\": toks_to_plot,\n \"sentiment\": sentiment_to_plot,\n \"x\": np.arange(len(toks_to_plot))[::-1],\n \"label_x\": sentiment_to_plot + text_offset\n })\n\n \"\"\"\n Make plot.\n \"\"\"\n\n # Create initial plot.\n width = 0.9\n xrange = [0, max(sentiment_to_plot) + 1]\n p2 = figure(x_axis_label=\"Sentiment\", y_axis_label=\"Word\", x_range=xrange)\n p2.hbar(source=source, y=\"x\", right=\"sentiment\", height=width)\n\n \"\"\"\n Format plot.\n \"\"\"\n\n # Annotate each bar with the word being represented.\n glyph = Text(x=\"label_x\", y=\"x\", text=\"token\")\n p2.add_glyph(source, glyph)\n\n # Axis labels.\n p2.xaxis.axis_label_text_font_size = \"15pt\"\n p2.yaxis.axis_label_text_font_size = \"15pt\"\n\n # Remove ticks.\n p2.xaxis.major_tick_line_color = None\n p2.xaxis.minor_tick_line_color = None\n p2.yaxis.major_tick_line_color = None\n p2.yaxis.minor_tick_line_color = None\n\n # Remove y axis tick labels.\n p2.yaxis.major_label_text_font_size = '0pt'\n\n # Plot width, height.\n scale = 1.5\n p2.plot_height = int(250 * scale)\n p2.plot_width = int(250 * scale)\n\n # remove toolbar (e.g. move, resize, etc) from right of plot.\n p2.toolbar.logo = None\n p2.toolbar_location = None\n\n # remove gridlines\n p2.xgrid.visible = False\n p2.ygrid.visible = False\n\n # remove x axis tick labels (set font to 0pt)\n p2.xaxis.major_label_text_font_size = '0pt'\n\n # get bokeh component for plot 2.\n script2, div2 = components(p2)\n\n return (script2, div2)", "def preprocess_data(data, tweet_column, column_label):\n\n le = LabelEncoder()\n ohe = OneHotEncoder(sparse=False)\n\n ints = le.fit_transform(data.loc[:, column_label].astype(str))\n labels = ohe.fit_transform(ints.reshape(len(ints), 1))\n\n lst = np.array(data.values.tolist())\n tweets = [clean_str(twt) for twt in list(lst[:, tweet_column])]\n padded = pad_tweets(tweets)\n vocabulary, vocabulary_inv = build_vocab(padded)\n x, y = build_input_data(padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv, le, ohe]", "def extract_emo_relations(self):\n for tweet_idx, tweet in enumerate(self.tweets):\n tweet_tokens = []\n idx2word, child2parent = {}, {}\n for word in tweet.rstrip().split('\\n'):\n if not word:\n sys.stderr.write(\"wat\")\n continue\n curr_word = Word(word.rstrip().split('\\t'), tweet_idx)\n idx2word[curr_word.idx] = curr_word\n child2parent[curr_word] = curr_word.parent\n\n # Isolate emotion words that are Verbs or Adjectives\n if curr_word.text in self.emo_kws and curr_word.pos in self.POS_LIST:\n self.tweet2emo[tweet_idx].append(curr_word)\n curr_word.is_emotion_word = True\n\n tweet_tokens.append(curr_word.text)\n\n # update tweet dictionary and add children to words\n self.add_relatives(child2parent, idx2word)\n tweet_text = \" \".join(tweet_tokens)\n self.idx2tweet[tweet_idx] = tweet_text\n\n # Create Tweet object\n self.add_tweet(tweet_idx, tweet_text, tweet_tokens, list(idx2word.values()))", "def lemmatization(dataset):\n\n lemmatizer = WordNetLemmatizer()\n new_dataset = np.array([])\n for sent in dataset:\n new_sent = [lemmatizer.lemmatize(w) for w in sent]\n new_dataset = np.append(new_dataset, list([new_sent]))\n return new_dataset", "def extract_municipality_hashtags(df):\n mun_dict = {\n '@CityofCTAlerts' : 'Cape Town',\n '@CityPowerJhb' : 'Johannesburg',\n '@eThekwiniM' : 'eThekwini' ,\n '@EMMInfo' : 'Ekurhuleni',\n '@centlecutility' : 'Mangaung',\n '@NMBmunicipality' : 'Nelson Mandela Bay',\n '@CityTshwane' : 'Tshwane'\n }\n municipality = []\n hashtags = [] #creates two empty lists\n\n tweets = [i.split(\" \") for i in df['Tweets']] #creates a list from datframe column\n\n new_munic_list = []\n new_tag_list = [] #final set of lists that will be added into the dataframe\n\n for tweet in tweets: #appends the initial set of lists to extract words starting with # and key values of mun dict\n municipality.append([mun_dict[word] for word in tweet if word in list(mun_dict.keys())])\n hashtags.append([tag.lower() for tag in tweet if tag.startswith('#')])\n\n for item in municipality:\n if item == []: \n item = np.nan #if list is empty, retunr NaN\n new_munic_list.append(item) \n\n for tag in hashtags:\n if tag == []:\n tag = np.nan\n new_tag_list.append(tag)\n \n df['municipality'] = new_munic_list #creates two new columns in dataframe with #'s and key values from mun_dict dictionary\n df['hashtags'] = new_tag_list\n \n return df", "def _refine_matrix_with_additional_connections(self):\n new_graph = self.graph.copy()\n for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose):\n if self.graph.node[node][\"type\"] == \"hashtag\":\n for neighbour in self.graph.neighbors(node):\n if self.graph.node[neighbour][\"type\"] == \"username\":\n for other_node in self.graph.neighbors(neighbour):\n if self.graph.node[other_node][\"type\"] == \"hashtag\" \\\n and not self.graph.has_edge(node, other_node) \\\n and not node == other_node:\n new_graph.add_edge(node, other_node)\n self.graph = new_graph", "def df_lda_preprocessing(df, col_name, remove_stopwords=True, add_features=False):\n df['text'] = df[col_name] # Create a copy of the input col_name: text\n \n # df_clean_sting(df, 'text') # Clean the text from col_name # TEST FJERN RENGØRING\n\n # Test other way of handling strings\n df_simple_clean_string(df, 'text')\n\n if add_features:\n df_make_features_from_string(df, 'text') # Add features\n\n # This is a hack soly for the scope of this project to concat ThreadSubject\n # When the message is initiated by the Member\n if col_name == 'SignalMessageBodyClean':\n df_aka = df.copy(deep=True)\n # df_aka['text_1'] = df_aka['ThreadSubject']\n # df_clean_sting(df_aka, 'ThreadTopic')\n df_simple_clean_string(df_aka, 'ThreadTopic')\n\n df['text'] = (df['text'] +' '+df_aka['ThreadTopic']).where(df['IsFirstMessageInthread']==1,df['text'])\n\n df_get_tokens(df, 'text') # Returns col: tokenized_text\n\n # df_stem_words(df, 'tokenized_text') # Returns col: stemmed_text\n\n df_bigrams(df, 'tokenized_text') # Returns bigrams\n df_trigrams(df, 'tokenized_text') # Returns trigrams\n\n df['ngrams'] = df['tokenized_text'] + df['bigrams'] + df['trigrams']\n\n if remove_stopwords:\n df_remove_stopwords(df, 'ngrams') # returns stopwords_removed", "def get_hashtags_df(graph_path: str = '/data/graphs/train_graph.p') -> pd.DataFrame:\n with open(PATH + graph_path, 'rb') as f:\n G = pickle.load(f)\n\n hashtags = [{'hashtag': node, **G.nodes[node]}\n for node in G.nodes\n if G.nodes[node]['node_type'] == 'hashtag']\n hashtags = pd.DataFrame(hashtags)\n return hashtags", "def extract_lemmas(lines_raw, mystem):\n lines_lemmatized = []\n for line_raw in lines_raw:\n words_analyses = mystem.analyze(line_raw)\n line_lemmas = \" \".join([parse[\"analysis\"][0][\"lex\"] for parse in words_analyses if parse.get(\"analysis\")])\n lines_lemmatized.append(line_lemmas)\n return lines_lemmatized", "def flattenAnnotations(transcriptAnnotations):\n root = StatNode()\n root.name = 'not_ok'\n for ta in transcriptAnnotations:\n pos = root\n # descend the implicit heirarchy of the labels and build a tree\n for label in ta.labels:\n label = cleanLabel(label)\n if label not in pos.childrenNames:\n n = StatNode()\n n.name = label\n n.parent = pos\n n.heirarchy = pos.heirarchy[:]\n n.heirarchy.append(n.name)\n pos.childrenNames[label] = n\n pos.children.append(n)\n pos = pos.childrenNames[label]\n return root", "def map_tweepy_array (self, tweet):\n new_tweet = [tweet.created_at,\n tweet.id,\n tweet.id_str,\n tweet.truncated,\n tweet.text,\n str(constants.TRACKS),\n tweet.source,\n tweet.source_url,\n tweet.in_reply_to_status_id,\n tweet.in_reply_to_status_id_str,\n tweet.in_reply_to_user_id,\n tweet.in_reply_to_user_id_str,\n tweet.in_reply_to_screen_name,\n tweet.user.screen_name,\n tweet.user.location,\n tweet.geo,\n tweet.coordinates,\n tweet.place,\n tweet.contributors,\n tweet.is_quote_status,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.favorited,\n tweet.retweeted,\n tweet.lang ]\n\n return new_tweet", "def tweet_preprocess(tweet, append_hashtag, test_file=False):\n if test_file:\n tweet = tweet.split(',', 1)[1]\n tweet = re.sub(r'<3|< 3', '<heart>', tweet)\n res = []\n for word in tweet.split(' '):\n w = re.sub(r'[\\.\\*,%/\\\\\"\\-\\_]+', ' ', word)\n w = tweet_grammar_rules(w)\n w = tweet_clean_numbers(w)\n w = tweet_clean_dup_chars(w)\n w = tweet_split_hashtags(w, append_hashtag)\n res.append(w)\n tweet = ' '.join(res).strip()\n tweet = re.sub(r'[ ]+', ' ', tweet)\n return tweet", "def get_tw_edgelist(path_tw, path_connection, path_tw_edgelist, path_tw_core_edgelist, csv):\n # read connection info\n connect = pd.read_csv(path_connection, index_col=0)\n\n core_names = []\n edge_list = pd.DataFrame(columns=['source', 'target'])\n\n # only keep files that are in connect\n filenames = [x for x in os.listdir(path_tw) if clear_filename2(x) in connect['twitterusername'].values]\n\n # iterate through all twitter follow files\n for filename in filenames:\n\n # each file name is a core node name\n core_names.append(clear_filename2(filename))\n\n df = pd.read_csv(path_tw + filename, index_col=0)\n if not df.empty:\n\n # name of file is source node\n source_id = pd.Series(clear_filename2(filename))\n\n # create df containing all edges of respective file\n inter_edge_list = pd.DataFrame(columns=['source', 'target'])\n\n # repeat source node to length of df\n inter_edge_list['source'] = source_id.repeat(len(df)).reset_index(drop=True)\n\n # add content of df as target column\n inter_edge_list['target'] = df['screen_name']\n\n edge_list = edge_list.append(inter_edge_list)\n\n # create core edgelist by selecting all rows where target node is a core node\n core_edgelist = edge_list[edge_list['target'].isin(core_names)]\n\n if csv:\n #edge_list.to_csv(path_tw_edgelist, index=False)\n core_edgelist.to_csv(path_tw_core_edgelist, index=False)", "def _translate_dataset(source_stream, target_lang_iso_1, config):\n translated_mentries = []\n for line in source_stream:\n splits = line.strip().split()\n mentry = MENtry(splits[0], splits[1], splits[2])\n translated_mentry = _translate_mentry(mentry,\n target_lang_iso_1,\n config)\n logger.info('Translated MENtry: {} {}'\n .format(translated_mentry.pair.first,\n translated_mentry.pair.last))\n translated_mentries.append(translated_mentry)\n return translated_mentries", "def analyze_tweets(tweets, model, w2v_model):\n # TODO DO EVERYTHING HERE\n #tweets = [(\"StarWars\", tc.query_tweets(\"StarWars\"))]\n \n #tweets = tc.query_tweets('starwars')\n df = pd.DataFrame(columns=['pos', 'neu', 'neg'])\n if not os.path.isdir('results'):\n os.mkdir('results')\n for topic, topic_tweets in tweets:\n tokenized_tweets = tp.process_raw_tweets(topic_tweets)\n df.loc[topic], dummy = classify_tweets(tokenized_tweets, model, w2v_model)\n vis.word_cloud_from_frequencies(tp.count_tokens(tokenized_tweets), f\"results/{topic}_cloud.png\", width=800, height=400,)\n \n vis.bar_plot_from_dataframe(df, 'results/results.png')\n print(\"\\n\")\n print(df)", "def hms_to_ml(hms):\n mlhms = [];\n for hm in tqdm(hms):\n mlhm = np.transpose(hm,(1,2,0));\n mlhms.append(np.expand_dims(mlhm,3));\n mlhms = matlab.single(np.concatenate(mlhms,axis=3).tolist());\n return mlhms;", "def transform_article(article):\n tokens = clean_article(article)\n lemmatizer = WordNetLemmatizer()\n lemmatized = [lemmatizer.lemmatize(token) for token in tokens]\n transformed_article = \" \".join(lemmatized)\n return transformed_article", "def _proc(dat):\n def lemma(text):\n lemmatizer = WordNetLemmatizer()\n w_tokenizer = WhitespaceTokenizer()\n return [lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)]\n\n dat['text_lemmatized'] = dat['clean_comments'].apply(lemma)\n dat['text_lemmatized'] = dat['text_lemmatized'].apply(' '.join)", "def _preprocess_feed(tweet: str):\n t = tweet.lower()\n t = re.sub(url_re, \" <URL> \", t)\n t = t.replace(\"\\n\", \"\")\n t = t.replace(\"#\", \" <HASHTAG> \")\n t = re.sub(mention_re, \" <USER> \", t)\n t = re.sub(smile_re, \" <EMOTICON> \", t)\n t = re.sub(emoji_re, \" <EMOJI> \", t)\n t = re.sub(time_re, \" <TIME> \", t)\n t = re.sub(numbers_re, \" <NUMBER> \", t)\n t = re.sub(not_ascii_re, \"\", t)\n t = re.sub(space_collapse_re, \" \", t)\n t = t.strip()\n return t", "def _preprocess_feed(tweet: str):\n t = tweet.lower()\n t = re.sub(url_re, \" <URL> \", t)\n t = t.replace(\"\\n\", \"\")\n t = t.replace(\"#\", \" <HASHTAG> \")\n t = re.sub(mention_re, \" <USER> \", t)\n t = re.sub(smile_re, \" <EMOTICON> \", t)\n t = re.sub(emoji_re, \" <EMOJI> \", t)\n t = re.sub(time_re, \" <TIME> \", t)\n t = re.sub(numbers_re, \" <NUMBER> \", t)\n t = re.sub(not_ascii_re, \"\", t)\n t = re.sub(space_collapse_re, \" \", t)\n t = t.strip()\n return t", "def sent2lm(sent, word_dict):\n UNK = word_dict['<unk>']\n ids = [word_dict.get(w, UNK) for w in sent.strip().lower().split()] + [word_dict['<e>']]\n return ids[:-1], ids[1:]", "def __encode_genre_transform(self, df):\n\t\treturn df.join(\n\t\t\tpd.DataFrame(self.genre_mlb.transform(df.pop(\"genres\")), columns=self.genre_mlb.classes_, index=df.index),\n\t\t\tlsuffix='l')", "def transform(self, x: Union[Tuple[Tuple[str, ...], ...], Tuple[str, ...]], **kwargs) -> np.ndarray:\n lemmatised = list(x[:])\n if isinstance(lemmatised[0], str):\n for i, xi in enumerate(lemmatised):\n lemmatised[i] = get_wcrft2_results_for_text(xi)\n if isinstance(lemmatised[0], list):\n for i, xi in enumerate(lemmatised):\n lemmatised[i] = ' '.join(xi)\n\n query_hashtags = kwargs.get(\"query\", None)\n if query_hashtags is not None:\n assert len(query_hashtags) == len(x), \\\n \"If at least 1 query is given, the array should have the same dimension as input `x`\"\n if isinstance(query_hashtags, str):\n query_hashtags = [query_hashtags] * len(lemmatised)\n\n # as in fit, vectorizer has normalization inside ...\n tf_idf_vectors = self._hashtags_tf_idf_vectorizer.transform(lemmatised)\n\n # ... so this simplifies to cosine similarity - no normalisation required\n similarities = self._hashtags_tf_idf_representation.dot(tf_idf_vectors.T).T.toarray()\n preference_vectors = self._get_preference_vectors(similarities, query_hashtags)\n similarity_rank_vertices = self._random_walk(preference_vectors)\n\n best_indices = np.argsort(-similarities * similarity_rank_vertices, axis=1)\n result = self._hashtag_labels[best_indices].tolist()\n return self.post_process_result(result)", "def preprocess_query(query):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n wordnet_lemmatizer = WordNetLemmatizer()\r\n # for i in range(len(query)):\r\n query = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(query)) if tag in tags]\r\n query = [wordnet_lemmatizer.lemmatize(w, t) for (w, t) in query ]\r\n return query", "def make_graph_dict(data, dates):\n with open(data, 'rb') as tweet_file:\n graph_dict = {}\n RTpattern = re.compile(\"RT @\\w+\")\n tweet_reader = csv.reader(tweet_file, delimiter=',')\n for row in tweet_reader:\n # Extract retweets\n retweet = RTpattern.match(row[2])\n # Extract correct dates \n if dates == 'all':\n date_match = True\n else: \n for date in dates:\n date_match = re.compile('.*' + date + '.*').match(row[3])\n if date_match != None:\n break\n \n if (retweet != None) & (date_match != None):\n retweeter = '@' + row[1]\n tweeter = retweet.group()[3:]\n\n #build graph\n if retweeter in graph_dict.keys():\n graph_dict[retweeter] = graph_dict[retweeter] + [tweeter]\n else:\n graph_dict[retweeter] = [tweeter]\n if graph_dict == {}:\n return 'NaN'\n else:\n return graph_dict", "def map_tweet(tweet, is_self_training):\n\n feature_set_code = globals.FEATURE_SET_CODE\n\n vector = []\n\n preprocessed_tweet = ppros.pre_process_tweet(tweet)\n postag_tweet = postag.pos_tag_string(preprocessed_tweet)\n\n # Score obtaining phase these are common for selftraining except obtaining unigram and\n # postag unigram score\n\n if not is_self_training:\n unigram_score = ngram.score(preprocessed_tweet, ds.POS_UNI_GRAM, ds.NEG_UNI_GRAM, ds.NEU_UNI_GRAM, 1)\n post_unigram_score = ngram.score(postag_tweet, ds.POS_POST_UNI_GRAM, ds.NEG_POST_UNI_GRAM, ds.NEU_POST_UNI_GRAM,\n 1)\n else:\n unigram_score = ngram.score(preprocessed_tweet, ds.POS_UNI_GRAM_SELF, ds.NEG_UNI_GRAM_SELF,\n ds.NEU_UNI_GRAM_SELF, 1)\n post_unigram_score = ngram.score(postag_tweet, ds.POS_POST_UNI_GRAM_SELF, ds.NEG_POST_UNI_GRAM_SELF,\n ds.NEU_POST_UNI_GRAM_SELF, 1)\n\n lexicon_score_gen = lexicon_score.get_lexicon_score(preprocessed_tweet)\n afinn_score_96 = lexicon_score.get_afinn_99_score(preprocessed_tweet)\n afinn_score_111 = lexicon_score.get_afinn_111_score(preprocessed_tweet)\n senti_140_score = lexicon_score.get_senti140_score(preprocessed_tweet)\n NRC_score = lexicon_score.get_NRC_score(preprocessed_tweet)\n binliu_score = lexicon_score.get_senti_word_net_score(preprocessed_tweet)\n sentiword_score = lexicon_score.get_binliu_score(preprocessed_tweet)\n\n emoticon_score = micro_blog_score.emoticon_score(tweet)\n unicode_emoticon_score = micro_blog_score.unicode_emoticon_score(tweet)\n\n writing_style = ws.writing_style_vector(tweet)\n\n # These classification are just for ease of division in general practice\n # Generally we use default feature code 15 which takes all the feature\n # You can evaluate that by analysing below code blocks :)\n\n if feature_set_code % 2 == 1:\n vector.append(afinn_score_96)\n vector.append(afinn_score_111)\n vector.append(lexicon_score_gen)\n vector.append(senti_140_score)\n vector.extend(NRC_score)\n vector.append(binliu_score)\n vector.append(sentiword_score)\n if feature_set_code % 4 >= 2:\n vector.extend(writing_style)\n if feature_set_code % 8 >= 4:\n vector.append(emoticon_score)\n vector.append(unicode_emoticon_score)\n if feature_set_code % 16 >= 8:\n vector.extend(post_unigram_score)\n vector.extend(unigram_score)\n return vector", "def preprocessing(raw_text_df):\r\n \r\n stemmer = nltk.stem.porter.PorterStemmer()\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n # iterate over all lines for preprocessing\r\n for index, line in enumerate(raw_text_df):\r\n \r\n # if there is mention of stars from 1-5, change the integer into\r\n # text and combine the number and the word \"star\" to make a new word\r\n # example: \"I give this product 1 star\" is now \"I give this product onestar\"\r\n # why? numbers are removed as part of preprocessing\r\n if \"1 star\" in line:\r\n line = line.replace(\"1 star\", \"onestar\")\r\n if \"1 stars\" in line:\r\n line = line.replace(\"1 stars\", \"onestar\")\r\n if \"2 star\" in line:\r\n line = line.replace(\"2 star\", \"twostars\")\r\n if \"2 stars\" in line:\r\n line = line.replace(\"2 stars\", \"twostars\")\r\n if \"3 star\" in line:\r\n line = line.replace(\"3 star\", \"threestars\")\r\n if \"3 stars\" in line:\r\n line = line.replace(\"3 stars\", \"threestars\")\r\n if \"4 star\" in line:\r\n line = line.replace(\"4 star\", \"fourstars\")\r\n if \"4 stars\" in line:\r\n line = line.replace(\"4 stars\", \"fourstars\")\r\n if \"5 star\" in line:\r\n line = line.replace(\"5 star\", \"fivestars\")\r\n if \"5 stars\" in line:\r\n line = line.replace(\"5 stars\", \"fivestars\")\r\n \r\n # tokenize lines\r\n tokens = re.split('(\\d+)',line)\r\n # remove numbers\r\n no_digits = [w for w in tokens if not w.isdigit()]\r\n # join tokens\r\n joined_text = \" \".join(no_digits)\r\n # re tokenize\r\n tokens = tokenizer.tokenize(joined_text)\r\n # make tokens lowercase\r\n lower_tokens = [w.lower() for w in tokens if type(w) == str] \r\n # remove stopwords\r\n stopped_tokens = [w for w in lower_tokens if not w in stopwords.words('english')]\r\n # stem words\r\n clean_tokens = [stemmer.stem(w) for w in stopped_tokens]\r\n # join text\r\n joined_text = \" \".join(clean_tokens)\r\n # replace line with preprocessed line\r\n raw_text_df[index] = joined_text\r\n print(index)", "def compute_flatten_retweeted_status_attribute(row):\n retweeted_status_original_field_names = [\n 'created_at', 'id', 'full_text', 'in_reply_to_status_id', 'in_reply_to_user_id', 'in_reply_to_screen_name',\n 'retweet_count', 'favorite_count', 'lang', 'entities', 'user', 'coordinates', 'place']\n\n if not pd.isnull(row[\"retweeted_status\"]):\n series = pd.read_json(json.dumps(row[\"retweeted_status\"]), typ='series')\n return series[retweeted_status_original_field_names]\n row[retweeted_status_original_field_names] = np.NaN\n return row[retweeted_status_original_field_names]", "def _make_links(tweet):\n for pattern, repl in (USER_SUB, KEYWORD_SUB):\n tweet = re.sub(pattern, repl, tweet)\n return tweet", "def annotate_tm_edge_label(self):\n if self._mapping is not None:\n kw = dict(fontsize=6, color='black', ha='center')\n m = self._mapping\n pix_size = self._mapping.metadata['size']\n f_tm_top = lambda g: m.ix[m.ix[g.index]['row'].idxmax(), 'slot']\n f_tm_bottom = lambda g: m.ix[m.ix[g.index]['row'].idxmin(), 'slot']\n tm_top = np.unique(m.groupby('col').agg(f_tm_top)['slot'])\n tm_bottom = np.unique(m.groupby('col').agg(f_tm_bottom)['slot'])\n for tm in tm_top:\n df = m.loc[m['slot'] == tm]\n ypix = df['ypix'].max() + pix_size * 0.7\n xpix = df['xpix'].mean()\n tm_txt = \"TM{:02d}\".format(tm)\n self.ax.text(xpix, ypix, tm_txt, va='bottom', **kw)\n for tm in tm_bottom:\n df = m.loc[m['slot'] == tm]\n ypix = df['ypix'].min() - pix_size * 0.7\n xpix = df['xpix'].mean()\n tm_txt = \"TM{:02d}\".format(tm)\n self.ax.text(xpix, ypix, tm_txt, va='top', **kw)\n else:\n print(\"Cannot annotate, no mapping attached to class\")", "def extract_labels(tweets):\n result = extract_retweet_counts(tweets)\n return result", "def analyze_data(df, sentiment_col, tweet_col, path):\n\n # create empty dictionaries to store all encountered words and their frequencies\n all_dict = {}\n pos_dict = {}\n neg_dict = {}\n neu_dict = {}\n # initialize counters to counter total number of tweets based on their emotion\n pos_count = 0\n neg_count = 0\n neu_count = 0\n\n # iterate through each row of the df\n for index, row in df.iterrows():\n if row[sentiment_col] == \"positive\":\n pos_count = iterate_words(\n pos_count, row[tweet_col], all_dict, pos_dict)\n\n if row[sentiment_col] == \"negative\":\n neg_count = iterate_words(\n neg_count, row[tweet_col], all_dict, neg_dict)\n\n if row[sentiment_col] == \"neutral\":\n neu_count = iterate_words(\n neu_count, row[tweet_col], all_dict, neu_dict)\n\n # visualize statistics\n visualize_stats(all_dict, 'all_plot.png', 'all_cloud.png',\n 'Word frequency in all tweets', path)\n visualize_stats(pos_dict, 'pos_plot.png', 'pos_cloud.png',\n 'Word frequency in positive tweets', path)\n visualize_stats(neg_dict, 'neg_plot.png', 'neg_cloud.png',\n 'Word frequency in negative tweets', path)\n visualize_stats(neu_dict, 'neu_plot.png', 'neu_cloud.png',\n 'Word frequency in neutral tweets', path)\n\n # make plot for emotion frequency\n emotions = ('Positive', 'Negative', 'Neutral')\n freq = [pos_count, neg_count, neu_count]\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.xaxis.grid(False)\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.bar(range(len(emotions)), freq, align='center',\n color=['forestgreen', 'firebrick', 'goldenrod'])\n plt.xticks(range(len(emotions)), emotions)\n plt.title('Tweet frequency based on emotion')\n plt.savefig(path + 'emotion_plot.png')\n plt.close()\n\n # make pie for emotion frequency\n sizes = [pos_count / len(df.index), neg_count /\n len(df.index), neu_count / len(df.index)]\n colors = ['forestgreen', 'firebrick', 'goldenrod']\n plt.pie(sizes, labels=emotions, colors=colors,\n autopct='%1.1f%%', startangle=140)\n plt.title('Tweet frequency based on emotion')\n plt.axis('equal')\n plt.savefig(path + 'emotion_pie.png')\n plt.close()", "def mapRow(row):\n commentsRow = row.comments\n captionRow = row.caption\n comments = commentsRow.data # select comments\n textComments = \" \".join([x.text for x in comments]) # remove metadata from comments\n if hasattr(captionRow, \"edges\"):\n captions = captionRow.edges\n textCaptions = \" \".join([x.node.text for x in captions])\n if hasattr(captionRow, \"text\"):\n textCaptions = captionRow.text\n if not row.tags is None:\n tags = \" \".join([x for x in row.tags])\n else:\n tags = \"\"\n textComments = textComments.replace(\"\\n\", \" \")\n textComments = textComments.replace(\"\\t\", \" \")\n textComments = textComments.replace(\",\", \" \")\n textCaptions = textCaptions.replace(\"\\n\", \" \")\n textCaptions = textCaptions.replace(\"\\t\", \" \")\n textCaptions = textCaptions.replace(\",\", \" \")\n tags = tags.replace(\"\\n\", \" \")\n tags = tags.replace(\"\\t\", \" \")\n tags = tags.replace(\",\", \" \")\n if len(row.urls) > 0:\n url = row.urls[0]\n else:\n url = \"missing-url\"\n id = row.id\n return pyspark.sql.Row(comments=textComments, caption=textCaptions, tags=tags, id=id, url=url)", "def lemma_texts(raw_texts):\r\n tokens = pos_tag(word_tokenize(raw_texts))\r\n res = [lemmatize(pos_res[0], get_wordnet_pos(pos_res[1])) for pos_res in tokens]\r\n return \" \".join(res)", "def label_7m(predictor, zhibo7m):\n for item_ in zhibo7m.find():\n try:\n live_texts_ = item_[\"content\"][\"textFeed\"]\n except Exception as e:\n zhibo7m.delete_one({\"_id\": item_['_id']})\n print(\"delete error id: {}\".format(item_[\"_id\"]))\n print(e)\n for l_index_, l_item_ in enumerate(live_texts_):\n l_item_[\"p_label\"] = predictor.predict(l_item_[\"msg\"])[0]\n live_texts_[l_index_] = l_item_\n # print(l_item_)\n zhibo7m.update_one({\"_id\": item_['_id']}, {\"$set\": {\"textFeed\": live_texts_}})", "def parse_rttm_for_ms_targets(self, sample):\n rttm_lines = open(sample.rttm_file).readlines()\n uniq_id = self.get_uniq_id_with_range(sample)\n rttm_timestamps = extract_seg_info_from_rttm(uniq_id, rttm_lines)\n fr_level_target = assign_frame_level_spk_vector(\n rttm_timestamps, self.round_digits, self.frame_per_sec, target_spks=sample.target_spks\n )\n seg_target, base_clus_label = self.get_diar_target_labels(uniq_id, sample, fr_level_target)\n clus_label_index, scale_mapping = self.assign_labels_to_longer_segs(uniq_id, base_clus_label)\n return clus_label_index, seg_target, scale_mapping", "def build_edges(self, train_ratings: pd.DataFrame) -> torch.LongTensor:\n edges = np.hstack(\n (\n np.stack(\n [\n train_ratings[\"user_id\"].values,\n train_ratings[\"movie_id\"].values,\n ]\n ),\n np.stack(\n [\n train_ratings[\"movie_id\"].values,\n train_ratings[\"user_id\"].values,\n ]\n ),\n )\n )\n edges = torch.LongTensor(edges)\n return edges", "def _annotations_to_targets(self, labels):\n roots = ['A','B','C','D','E','F','G']\n natural = zip(roots, [0, 2, 3, 5, 7, 8, 10])\n root_note_map = {}\n for chord, num in natural:\n root_note_map[chord] = num\n root_note_map[chord + '#'] = (num + 1) % 12\n root_note_map[chord + 'b'] = (num - 1) % 12\n\n root_note_map['N'] = 24\n root_note_map['X'] = 24\n \n labels = [c.decode('UTF-8') for c in labels]\n chord_root_notes = [c.split(':')[0].split('/')[0] for c in labels]\n chord_root_note_ids = np.array([root_note_map[crn] for crn in chord_root_notes])\n \n chord_type = [c.split(':')[1] if ':' in c else '' for c in labels]\n chord_type_shift = np.array([12 if 'min' in chord_t or 'dim' in chord_t else 0 for chord_t in chord_type])\n return one_hot(chord_root_note_ids + chord_type_shift, self.num_classes)", "def get_tw_nodelist(path_tw, path_connection, path_tw_nodelist, path_tw_core_nodelist, csv):\n names = pd.Series()\n core_names = []\n\n # iterate all .csv 'following' files, each file belongs to one core user\n for filename in os.listdir(path_tw):\n\n # append id to core user list\n core_names.append(clear_filename2(filename))\n\n # also append core user to complete ids\n names = names.append(pd.Series(clear_filename2(filename)))\n\n # read following info\n df = pd.read_csv(path_tw + filename, index_col=0)\n\n if not df.empty:\n # append friend (following) contacts to complete id series\n names = names.append(df['screen_name'])\n\n names = names.unique()\n\n # create nodelist\n nodelist = pd.DataFrame(columns=['id', 'label', 'timeset', 'relevant'])\n\n # fill complete names\n nodelist['id'] = names\n\n # read connection info\n connect = pd.read_csv(path_connection, index_col=0).drop(['twitterid'], axis=1)\n\n # label complete list as core or follow node\n nodelist.loc[nodelist['id'].isin(core_names), 'relevant'] = 'core'\n nodelist['relevant'].fillna('follow', inplace=True)\n nodelist['label'] = nodelist['relevant']\n\n # rename screen_name for merge\n connect.rename(columns={'twitterusername': 'id'}, inplace=True)\n\n # create core nodelist by merging complete nodelist with connection df\n core_nodelist = pd.merge(nodelist, connect, on='id')\n core_nodelist['label'] = core_nodelist['id']\n\n if csv:\n #nodelist.to_csv(path_tw_nodelist, index=False)\n core_nodelist.to_csv(path_tw_core_nodelist, index=False)", "def extract_municipality_hashtags(df):\n # dictionary mapping official municipality twitter handles to the municipality name\n mun_dict = {\n '@CityofCTAlerts' : 'Cape Town',\n '@CityPowerJhb' : 'Johannesburg',\n '@eThekwiniM' : 'eThekwini' ,\n '@EMMInfo' : 'Ekurhuleni',\n '@centlecutility' : 'Mangaung',\n '@NMBmunicipality' : 'Nelson Mandela Bay',\n '@CityTshwane' : 'Tshwane'\n }\n\n\n # Create 'municipality' column: Monica\n df['municipality'] = df['Tweets']\n\n # Extract municipality from Tweets: Mikael\n l = 0\n\n for tweet in df['Tweets']:\n tweet = tweet.split(' ')\n for key in mun_dict.keys():\n if key in tweet:\n df.loc[l, 'municipality'] = mun_dict[key]\n else: \n #Fill empty values in 'municipality' columns with np.nan: Courtney\n df.loc[l, 'municipality'] = np.NaN\n\n l += 1\n\n # Create 'hashtags' column: Mikael\n df['hashtags'] = df['Tweets'].str.lower().str.split()\n\n # Extract hashtags from Tweets: Monica\n i = 0\n\n for tweet in df['hashtags']:\n hashtags = []\n for word in tweet:\n if word.startswith('#'):\n hashtags.append(word)\n df.loc[i, 'hashtags'] = hashtags\n # Fill empty values in 'hashtags' columns with np.nan: Courtney\n if len(hashtags) == 0:\n df.loc[i, 'hashtags'] = np.NaN\n i += 1\n\n return df", "def remove_promotional_tweets(tweets):\n clean = cleaner()\n n_tweets = {}\n for tweet in tweets:\n if not clean.linkChecker(tweets[tweet][\"text\"]):\n n_tweets[tweet] = tweets[tweet]\n return n_tweets", "def associate_renames(df_renames, tr, node):\n # If any part of the node string is in the index of the rename dataframe\n # then build the original name.\n if any(new_nm.lower() in node.lower() for new_nm in df_renames.index):\n row_index = list(\n filter(lambda x: x.lower() in node, df_renames.index)\n )\n old_name = df_renames.loc[row_index].to_numpy()\n row_index = [x.lower() for x in row_index]\n old_name = [x.lower() for x in chain(*old_name)]\n new_old_tup = zip(row_index, old_name)\n # take the original name and the current name and use the current name\n # as a template to build up the old name.\n original_name = reduce(\n lambda new, kv: new.replace(*kv), new_old_tup, node\n )\n if node == original_name:\n row_index = list(filter(lambda x: x in node, df_renames.index))\n old_name = df_renames.loc[row_index].to_numpy()\n new_old_tup = zip(row_index, chain(*old_name))\n original_name = reduce(\n lambda new, kv: new.replace(*kv), new_old_tup, node\n )\n\n # Get the ID of node and the ID of the original node name that was\n # generated above.\n original_id = tr.get_uml_id(name=original_name)\n tr.uml_id.update({node: original_id})\n return {\"original_name\": original_name, \"original_id\": original_id}\n else:\n return {\"original_name\": None, \"original_id\": None}", "def convert_parse2lda_input(parsed):\r\n res = []\r\n for sent in parsed[\"sentences\"]:\r\n for w in sent[\"tokens\"]:\r\n if w[\"originalText\"] in punctuation: # avoid corenlp trans \"(\" to \"-lrb-\"\r\n res.append(w[\"originalText\"])\r\n else:\r\n res.append(w[\"lemma\"])\r\n return \" \".join(res)", "def parse_tweet(line):\n # The following regex just strips of an URL (not just http), any punctuations,\n # or Any non alphanumeric characters\n # http://goo.gl/J8ZxDT\n text = re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\",\" \",json.loads(line[1])[\"value\"]).strip()\n # remove terms <= 2 characters\n text = ' '.join(filter(lambda x: len(x) > 2, text.split(\" \")))\n\n return (line[0], text)", "def preprocess_tweets(tweets):\n tweets = clean_html(tweets)\n tweets = detweettify(tweets)\n tweets = remove_numbers(tweets)\n tweets = remove_punctuation(tweets)\n tweets = remove_links(tweets)\n return tweets", "def compute_flatten_retweeted_status_user_attributes(row):\n retweeted_status_original_user_field_names = [\n 'id', 'name', 'screen_name', 'location', 'description', 'followers_count', 'friends_count',\n 'listed_count', 'favourites_count', 'statuses_count', 'created_at', 'time_zone', 'lang']\n\n if not pd.isnull(row[\"retweeted_status_user\"]):\n series = pd.read_json(json.dumps(row[\"retweeted_status_user\"]), typ='series')\n return series[retweeted_status_original_user_field_names]\n # So, context-sensitive menus will give us available function calls.\n # row = pd.Series(row)\n # row.append(pd.Series(retweeted_status_user_object_fields), ignore_index=True)\n # print(f\"{row}\")\n row[retweeted_status_original_user_field_names] = np.NaN\n return row[retweeted_status_original_user_field_names]", "def transform(self, nodelist, word_list, labels):\n word_ids = []\n node_ids = []\n entry_ids = []\n # first entry is pad so skipping index 0\n for index in range(1,len(nodelist)):\n node_ids.append(self._node_vocab.token_to_id(nodelist[index]))\n word_ids.append(torch.tensor(self._word_vocab.doc2id(word_list[index])))\n # if self._use_char:\n # char_ids.append([self._char_vocab.doc2id(w) for w in input_data[1]])\n\n entry_ = [self._label_vocab.doc2id(label) for label in labels[index]]\n unique_entry = []\n for i in entry_:\n unique_entry.extend(i)\n entry_ids.append(list(set(unique_entry)))\n\n word_ids = nn.utils.rnn.pad_sequence(word_ids, batch_first=True)\n return [node_ids, word_ids, entry_ids]", "def preproc_db(ldc):\n for dc in ldc:\n if (isinstance(dc['hashtags'], str)):\n print(\"Already PreProcessed\")\n return ldc\n if 'retweeted_status_url' in dc:\n dc['retweeted_status_url'] = \",\".join(dc['retweeted_status_url'])\n if 'hashtags' in dc and dc['hashtags']:\n # print(dc['hashtags'])\n dc['hashtags'] = \",\".join(dc['hashtags'])\n else:\n dc['hashtags'] = 'NULL'\n\n if 'urls' in dc:\n dc['urls'] = \",\".join(dc['urls'])\n else:\n dc['urls'] = 'NULL'\n\n if 'media' in dc:\n dc['media'] = \",\".join(dc['media'])\n else:\n dc['media'] = 'NULL'\n if 'user_mentions_id' in dc:\n dc['user_mentions_id'] = ','.join(\n str(x) for x in (dc['user_mentions_id']))\n else:\n dc['user_mentions_id'] = 'NULL'\n if 'user_mentions_name' in dc:\n dc['user_mentions_name'] = \",\".join(dc['user_mentions_name'])\n else:\n dc['user_mentions_name'] = 'NULL'\n return (ldc)", "def translate_graph(lines: List[str]) -> Graph:\n nodes = []\n edges = []\n is_node = True\n\n # first_value used to offset node/edge ids to start from 0\n first = True\n first_value = 0\n\n for line in lines:\n a = line.split(\",\")\n\n if \"edge\" in line:\n is_node = False\n elif is_node:\n if first:\n first = False\n first_value = int(a[0])\n\n # Create node with Node(id, label, size)\n pos_x = float(a[4])\n pos_y = float(a[5])\n n = Node(int(a[0]),\n str(a[1]).replace(\"\\\"\", \"\"),\n int(float(a[2])),\n kp=KeyPoint(pos_x, pos_y, float(a[2])),\n pos=(float(a[4]),float(a[5])))\n\n if (a[1] == \"\"):\n print(\"Node \" + a[0] + \" missing label.\")\n exit(1)\n nodes.append(n)\n else:\n n1 = None\n n2 = None\n for node in nodes:\n if node.node_id == int(a[0]):\n n1 = node\n for node in nodes:\n if node.node_id == int(a[1]):\n n2 = node\n e = Edge(n1, n2, distance(n1.pos,n2.pos))\n edges.append(e)\n #all_edges.append(e)\n #edges.append((int(a[0]) - first_value, int(a[1]) - first_value))\n\n\n return Graph(nodes, edges, 1)", "def preprocess(self, arr):\n t1 = 'CLS ' + arr[0].lower()\n t2 = 'CLS ' + arr[1].lower()\n t3 = 'CLS ' + arr[2].lower()\n\n # print(\"preprocess deepmoji=\", self.deepmoji)\n\n if self.elmo:\n t1 = self.tt.tokenize(clean_sentence(t1))\n t2 = self.tt.tokenize(clean_sentence(t2))\n t3 = self.tt.tokenize(clean_sentence(t3))\n\n if self.hier:\n return t1, t2, t3\n else:\n return np.concatenate((t1, t2, t3))\n elif self.deepmoji:\n t1, _, _ = self.deepmoji_tt.tokenize_sentences([t1]) #vectorize\n t2, _, _ = self.deepmoji_tt.tokenize_sentences([t2])\n t3, _, _ = self.deepmoji_tt.tokenize_sentences([t3])\n\n t1 = np.trim_zeros(t1.astype(np.int32)[0])\n t2 = np.trim_zeros(t2.astype(np.int32)[0])\n t3 = np.trim_zeros(t3.astype(np.int32)[0])\n\n if self.hier:\n return torch.LongTensor(t1),torch.LongTensor(t2),torch.LongTensor(t3)\n else:\n return torch.LongTensor(t1+t2+t3)\n else:\n t1 = self.vectorize(t1)\n t2 = self.vectorize(t2)\n t3 = self.vectorize(t3)\n \n if self.hier:\n return torch.LongTensor(t1),torch.LongTensor(t2),torch.LongTensor(t3)\n else:\n return torch.LongTensor(t1+t2+t3)", "def convert(i, t, o):\n\n df = pd.read_csv(i)\n\n topics_col = t\n df[topics_col] = df[topics_col].astype(str)\n\n df[topics_col] = df[topics_col].apply(lambda x: x.split(','))\n mlb = MultiLabelBinarizer()\n one_hot_encoded_train = mlb.fit_transform(df[topics_col])\n df = df.join(pd.DataFrame(one_hot_encoded_train, columns=mlb.classes_, index=df.index))\n print(\"Classes: \", mlb.classes_)\n\n df = df.rename(columns={'label': 'label_tag'})\n df = df.rename(columns={'text': 'text_tag'})\n\n df['labels'] = pd.Series(list(one_hot_encoded_train), index=df.index)\n\n df.to_csv(o, index=False)", "def Preprocess_MR(path=\"datasets/raw/rt10662\"):\n\n output_path = \"datasets/preprocessed/MR_Data\"\n\n # load positive and negative data\n with io.open(os.path.join(path, \"rt-polarity.pos\"), encoding='latin-1') as f:\n pos_data = f.readlines()\n pos_data = [sentence.strip() for sentence in pos_data]\n with io.open(os.path.join(path, \"rt-polarity.neg\"), encoding='latin-1') as f:\n neg_data = f.readlines()\n neg_data = [sentence.strip() for sentence in neg_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def parse_ltag_from_dict(self, tree_dict):\n\n spine = re.sub('[()]', '', tree_dict['spine'])\n node_labels = spine.split()\n nodes = [SpinalLTAG(label, children=[], tree_type=tree_dict['type']) for label in node_labels]\n nodes.append(tree_dict['terminal'])\n\n for current, next in pairwise(nodes):\n current.append(next)\n\n root = nodes[0]\n root.predicate = tree_dict['predicate']\n root.roleset_id = tree_dict['roleset_id']\n root.num_args = tree_dict['num_args']\n root.tree_id = tree_dict['tree_id']\n root.parent_id = tree_dict['parent_id']\n root.parent_attach_id = tuple(tree_dict['parent_attach_id']) if tree_dict['parent_attach_id'] is not None else None\n\n # Create rules and assign them to nodes in tree\n for rule_dict in tree_dict['rules']:\n rule = Rule.from_dict(rule_dict)\n root = self.add_rule_to_tree(root, rule)\n\n return root", "def prepare_graph(label, nodes, edges, graphID):\n features = {'label': label}\n\n G = nx.DiGraph()\n nodes[\"id\"] = nodes[\"id\"].apply(lambda x : str(x))\n features['num_nodes'] = nodes.shape[0]\n op_node = None\n times = []\n friends = []\n followers = []\n for index, row in nodes.iterrows():\n G.add_node(row['id'], time=row['time'], friends=row['friends'], followers = row['followers'])\n times.append(row['time'])\n friends.append(2**row['friends'])\n followers.append(2**row['followers'])\n if row['time'] == 0:\n features['poster_friend_cnt'] = 2**row['friends']\n features['poster_follower_cnt'] = 2**row['followers']\n tweeter_id = row['id']\n op_node = row['id']\n features['avg_time'] = np.mean(times)\n features['avg_friends'] = np.mean(friends)\n features['avg_followers'] = np.mean(followers)\n features['max_followers'] = max(followers)\n features['max_friends'] = max(friends)\n features['friends_25th_percentile'] = np.percentile(friends, 25)\n features['friends_75th_percentile'] = np.percentile(friends, 75)\n features['followers_25th_percentile'] = np.percentile(followers, 25)\n features['followers_75th_percentile'] = np.percentile(followers, 75)\n node_list = []\n edge_count = 0\n for pair in edges:\n node1, node2 = pair.split()[0], pair.split()[1]\n node_list.append(node1)\n node_list.append(node2)\n G.add_edge(node1, node2)\n edge_count += 1\n features['num_edges'] = edge_count\n sum_users_without_followers = sum([1 for (node, val) in G.in_degree() if val==0])\n features['ratio_users_w/out_followers'] = sum_users_without_followers / len(G.nodes)\n features['num_connected_components'] = nx.number_strongly_connected_components(G)\n features['number_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id)\n features['percentage_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id) / features['poster_follower_cnt']\n features['avg_clustering'] = nx.average_clustering(G)\n features['op_clustering'] = nx.clustering(G,op_node)\n features['transitivity'] = nx.transitivity(G)\n node_list = list(set(node_list))\n features['nodeID_list'] = np.array(node_list)\n features['graph_id'] = graphID\n return features, G", "def flatten_transcript(line):\n if line:\n transcript_text = \" \".join(map(lambda x: x[\"text\"], line[\"transcript\"]))\n transcript_text = \" \".join(transcript_text.split())\n line[\"transcript_text\"] = transcript_text\n return line", "def html_ann_tweet(tweets):\r\n for tweet in tweets:\r\n\r\n # Fairly efficient way of dealing with the fact that these keys might not exist\r\n try:\r\n text = tweet['text']\r\n except:\r\n pass\r\n\r\n try:\r\n text = tweet['full_text']\r\n except:\r\n pass\r\n\r\n try:\r\n text = tweet['extended_tweet']['full_text']\r\n except:\r\n pass\r\n\r\n\r\n # Hashtags\r\n tweet['text_html_annotated'] = re.sub(r'\\B#\\w\\w+',\r\n '<span class=\"hashtag\">\\g<0></span>',\r\n text)\r\n\r\n # Usernames\r\n tweet['text_html_annotated'] = re.sub(r'(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@'\r\n r'([A-Za-z]+[A-Za-z0-9]+)',\r\n '<span class=\"user\">\\g<0></span>',\r\n tweet['text_html_annotated'])\r\n\r\n # Links\r\n tweet['text_html_annotated'] = re.sub(\r\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|'\r\n r'(?:%[0-9a-fA-F][0-9a-fA-F]))+', '<a href=\"\\g<0>\">\\g<0></a>',\r\n tweet['text_html_annotated'])\r\n\r\n return tweets", "def LABELED_DIGRAPH_DATA(Digraph, edges=None, nodes=None):\n digraph = Digraph([\n ('John', 'Mary', 'likes'),\n ('Mary', 'Peter', 'likes'),\n ('Peter', 'John', 'likes'),\n ('Peter', 'Sarah', 'likes')\n ], ['Rob'])\n return digraph", "def inverse_transform_lemmas(self, predictions):\n pred_lemmas = []\n if self.include_lemma == 'generate':\n for pred in predictions:\n pred_lem = ''\n for positions in pred:\n top_idx = np.argmax(positions) # winning position\n c = self.lemma_char_idx[top_idx] # look up corresponding char\n if c in ('$', '%'):\n continue\n if c == '|':\n break\n else:\n pred_lem += c # add character\n pred_lemmas.append(pred_lem)\n\n elif self.include_lemma == 'label':\n predictions = np.argmax(predictions, axis=1)\n pred_lemmas = self.lemma_encoder.inverse_transform(predictions) \n \n return pred_lemmas", "def lemma(self, docs):\n \n new_docs = []\n for text in docs:\n new_doc = []\n for token in word_tokenize(text):\n token = lemmatizer.lemmatize(token)\n new_doc.append(token)\n new_docs.append(new_doc)\n \n return pd.Series(new_docs)", "def _nodes_to_nml_nodes(nodes):\n\n nml_nodes = []\n for idx, row in nodes.iterrows():\n nml_node = wknml.Node(\n id=int(row.id),\n position=tuple(row.position.values),\n radius=float(row.radius),\n rotation=tuple(row.rotation.values),\n inVp=int(row.inVp),\n inMag=int(row.inMag),\n bitDepth=int(row.bitDepth),\n interpolation=bool(row.interpolation.values),\n time=int(row.time)\n )\n nml_nodes.append(nml_node)\n\n return nml_nodes", "def diffuse(graph, metapath, source_node_weights, column_damping=0, row_damping=1):\n\n # Initialize node weights\n source_metanode = metapath.source()\n source_node_to_position = get_node_to_position(graph, source_metanode)\n node_scores = numpy.zeros(len(source_node_to_position))\n for source_node, weight in source_node_weights.items():\n i = source_node_to_position[source_node]\n node_scores[i] = weight\n\n for metaedge in metapath:\n row_names, column_names, adjacency_matrix = metaedge_to_adjacency_matrix(\n graph, metaedge\n )\n\n # Row/column normalization with degree damping\n adjacency_matrix = diffusion_step(adjacency_matrix, row_damping, column_damping)\n\n node_scores = node_scores @ adjacency_matrix\n\n node_to_score = OrderedDict(zip(column_names, node_scores))\n return node_to_score", "def make_adversarial(df, altered):\n new_df = df.copy()\n botnets = new_df[new_df['label'] == 'Botnet'] # keep the botnet flows\n new_df = new_df[new_df['label'] != 'Botnet'] # and remove them from the original dataset\n\n # alter the packets or (and) the bytes according ot the values of the altered dictionary\n botnets['packets'] = botnets['packets'].apply(lambda z: z + (0 if 'packets' not in altered.keys() else altered['packets']))\n botnets['bytes'] = botnets['bytes'].apply(lambda z: z + (0 if 'bytes' not in altered.keys() else altered['bytes']))\n\n # and concatenate the new botnet flows with the original dataset with the original dataset\n fin_df = pd.concat([new_df, botnets])\n return fin_df", "def preprocess_lines(movie_line):\n\ttokens = tokenizer.tokenize(movie_line)\n\twords = [word for word in tokens if word not in stopwords_set]\n\tstemmed_terms = [porter_stemmer.stem(word) for word in words]\n\tlemmatized_terms = [wordnet_lemmatizer.lemmatize(word) for word in stemmed_terms]\n\treturn lemmatized_terms", "def adapt_tweet(feedpost):\n tweet = feedpost['title']\n for action in (_make_links, _clean_name, urlize):\n tweet = action(tweet)\n feedpost['title'] = _get_tweet_number(feedpost['link'])\n feedpost['body'] = u'<p>%s</p>' % tweet\n return feedpost", "def translate_row(row):\n # extract the display name\n display_input = utils.cleanUnicodeFractions(row['input'])\n tokens = tokenizer.tokenize(display_input)\n\n labels = _row_to_labels(row)\n label_data = _addPrefixes([(t, _matchUp(t, labels)) for t in tokens])\n\n translated = ''\n for i, (token, tags) in enumerate(label_data):\n features = utils.getFeatures(token, i + 1, tokens)\n translated += utils.joinLine(\n [token] + features + [_bestTag(tags)]) + '\\n'\n return translated", "def data_preprocessing(dataframe):\n\n dataframe = dataframe.loc[:, 0:]\n dataframe.columns = [\"url\", \"date\", \"personurl\", \"name\"]\n dataframe[\"date_new\"] = [i.replace(\"Düzenlendi\", \"\").replace(\"•\", \"\").strip() for i in dataframe[\"date\"]]\n dataframe[\"date_new\"] = [str(int(i.replace(\"yıl\", \"\").strip()) * 48) if \"yıl\" in i else i for i in\n dataframe[\"date_new\"]]\n dataframe[\"date_new\"] = [str(int(i.replace(\"ay\", \"\").strip()) * 4) if \"ay\" in i else i for i in\n dataframe[\"date_new\"]]\n dataframe[\"date_new\"] = [i.replace(\"hafta\", \"\").strip() if \"hafta\" in i else i for i in dataframe[\"date_new\"]]\n\n return dataframe", "def parse_ltags_from_dict(self, tree_dict):\n\n spine = re.sub('[()]', '', tree_dict['spine'])\n node_labels = spine.split()\n nodes = [SpinalLTAG(label, children=[], tree_type=tree_dict['tree_type']) for label in node_labels]\n\n for current, next in pairwise(nodes):\n current.append(next)\n\n root = nodes[0]\n root.predicate = tree_dict['predicate']\n root.roleset_id = tree_dict['roleset_id']\n root.num_args = tree_dict['num_args']\n root.tree_id = tree_dict['tree_id']\n\n # Create rules and assign them to nodes in tree\n for rule_dict in tree_dict['rules']:\n rule_dict['treeposition'] = \".\".join(['0'] + [str(i) for i in rule_dict['treeposition']])\n rule = Rule.from_dict(rule_dict)\n\n attach_key = str((rule_dict['treeposition'], str(rule.action_location.slot)))\n if attach_key in tree_dict['attach_counts']:\n rule.attach_counts = tree_dict['attach_counts'][attach_key]\n\n root = self.add_rule_to_tree(root, rule)\n\n trees = self.lexicalize_tree(root, tree_dict['lexicalization'])\n return trees", "def tweet_to_df(tweet):\r\n\r\n count = helper(\"./data\")\r\n\r\n dict_ = {}\r\n dict_[\"text\"] = tweet.text\r\n dict_[\"user\"] = tweet.user.description\r\n dict_[\"user_location\"] = tweet.user.location\r\n dict_[\"screem_name\"] = tweet.user.screen_name\r\n dict_[\"account_date_cr\"] = tweet.user.created_at\r\n dict_[\"nb_followers\"] = tweet.user.followers_count\r\n dict_[\"profile_color\"] = tweet.user.profile_background_color\r\n dict_[\"tweet_id\"] = tweet.id_str\r\n dict_[\"tweet_date\"] = tweet.created_at\r\n dict_[\"nb_retweeted\"] = tweet.retweet_count\r\n dict_[\"tweet coordinates\"] = tweet.coordinates\r\n\r\n tweet_data = pd.DataFrame(dict_, index=[0])\r\n return tweet_data.to_csv(f\"C:/Users/info/Desktop/projects/tweetanalyser/data/{count+1}.csv\")", "def _label_encode_metadata(metadata: pd.DataFrame) -> pd.DataFrame:\n for col in metadata.columns:\n # Skip over filepath and filename columns\n if \"file\" not in col:\n # Add a new column containing the label-encoded data\n metadata[f\"{col}_le\"] = metadata[col].factorize()[0]\n return metadata", "def clean_tweet(tweet):\r\n reply_pattern = re.compile(\"^@([a-zA-Z0-9]*) (.*)\")\r\n regexhandler = regex.RegexHandler()\r\n # add mark if tweets starts with a mention (@user)\r\n if reply_pattern.match(tweet.tweet[\"text\"]) is not None:\r\n temp = \"MarkReply \" + tweet.tweet[\"text\"]\r\n else:\r\n temp = tweet.tweet[\"text\"]\r\n # language dependent\r\n\r\n if floodtags.core.statics.StaticData.locations:\r\n for location in floodtags.core.statics.StaticData.locations:\r\n if location in temp:\r\n temp += \" MarkLocation\"\r\n\r\n try:\r\n stemmer = SnowballStemmer(floodtags.core.statics.StaticData.language.lower())\r\n # stem words\r\n temp = \" \".join(\r\n [stemmer.stem(x) if x not in tweet.tweet[\r\n \"keywords\"] and \"MarkReply\" not in x and \"MarkLocation\" not in x else x for x in temp.split()])\r\n except ValueError:\r\n print(\"language not found:\", floodtags.core.statics.StaticData.language)\r\n # pass\r\n\r\n # store language\r\n temp = \"Mark\" + tweet.language + \" \" + temp\r\n\r\n # store keyword\r\n\r\n # replace each website with 'MarkWebsite' to create more similarity\r\n temp = regexhandler.replace(temp, 'MarkWebsite', regex.Expressions.website)\r\n # replace each photo url with 'MarkPhoto' to create more similarity\r\n for i in range(len(tweet.tweet[\"photos\"])):\r\n temp = Vectorizer.rreplace(temp, \"MarkWebsite\", \"MarkPhoto\", 1)\r\n # replace each height with 'MarkHeight' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkHeight\", regex.Expressions.waterheight)\r\n # replace each time with 'MarkTime' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkTime\", regex.Expressions.time)\r\n # replace each date with 'MarkDate' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkDate\", regex.Expressions.date)\r\n # replace each number with 'MarkNumber' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkNumber\", regex.Expressions.number)\r\n temp = re.sub('\\n', ' ', temp)\r\n results = re.findall(\"(^|[^@\\w])@(\\w{1,15})\", temp)\r\n # add mark for each user name\r\n if results is not None:\r\n for i in range(len(results)):\r\n temp += \" MarkUserName\"\r\n results = re.findall(\"#(\\S*)\", temp)\r\n # add mark for each hashtag\r\n if results is not None:\r\n for i in range(len(results)):\r\n temp += \" MarkHashTag\"\r\n # add sender as feature\r\n temp = \"Sender\" + tweet.tweet[\"source\"][\"username\"] + \" \" + temp\r\n # remove unnecessary characters and chance text to lower case\r\n return re.sub('[#\\.,:]', '', temp)", "def multiply_trtr_diagram(data):\n\n names = data.index.names\n data = pd.concat([ data.xs('rere', level=2), \\\n data.xs('reim', level=2) + data.xs('imre', level=2)], \\\n keys=['re', 'im']).reorder_levels([1,2,0]).\\\n sort_index(level=[0,1])\n data.index.names = names\n return data", "def join(tw_df, rtt_df):\n original_tw_id = []\n author_ids = []\n rtt_dates = []\n groups = rtt_df.groupby('original_tweet_id').groups\n for k in groups.keys():\n l_a = []\n l_r = []\n original_tw_id.append(k)\n for index in groups[k]:\n line = rtt_df.iloc[[index]]\n l_a.append(int(line['author_id']))\n l_r.append(str(line['retweet_date']))\n author_ids.append(l_a)\n rtt_dates.append(l_r)\n \n df_temp = pd.DataFrame()\n df_temp['natural_key'] = original_tw_id\n df_temp['rtt_author_ids'] = author_ids\n df_temp['retweet_dates'] = rtt_dates\n df_temp = df_temp.set_index('natural_key')\n tw_df = tw_df.set_index('natural_key')\n return tw_df.join(df_temp)", "def merge_nodes_to_one(node_list):\n \"\"\"\n 16092020: return a dictionary of colon separated entries\n \"\"\"\n colon_separated_entry_dict = list()\n\n # no need to set new id, it is handled in the next part of the workflow\n new_left = Node(node_list[0]).left.__int__(node_list[0].left)\n new_top = min([node.top for node in node_list])\n new_word = \"\"\n new_width = 0\n new_height = 0\n new_label = str()\n new_node_colon_splited = Node(node_list[0]).isEntryColonSplited\n\n # word_num: number of characters\n if len(node_list) > 0:\n new_label = node_list[0].label\n for index, node in enumerate(node_list):\n new_word += str(node.word) + \" \"\n # width is a bit tricky\n if index == 0:\n new_width += Node(node).width.__int__(node.width)\n else:\n # because you need to consider the gap!\n # sometimes there is no left (the margin)\n # please, don't mess up init with int\n # cannot add width like this,need to minus left\n new_width += Node(node).width.__int__(node.width) + \\\n Node(node).left.__int__(node.left) - \\\n Node(node_list[index - 1]).left.__int__(node_list[index - 1].left) - \\\n Node(node_list[index - 1]).width.__int__(node_list[index - 1].width)\n\n # determine height is a bit tricky.\n # top point is new_top\n # I just check max top for now\n new_height = max(Node(node).height.__int__(node.height), new_height)\n\n new_word_num = len(new_word)\n new_node_final = Node(new_word, new_left, new_top, new_width, new_height, new_word_num)\n # print(new_node_final.word)\n # set the label for the word\n new_node_final.label = new_label\n \"\"\"\n 16092020: also consider entry?\n \"\"\"\n new_node_final.isEntryColonSplited = new_node_colon_splited\n return new_node_final\n\n # for node in node_list:", "def visualize_plotly(self, topics):\r\n \r\n df_palette = pd.DataFrame([\r\n [0, '#C03028'],\r\n [1, '#F08030'],\r\n [2, '#6890F0'],\r\n [3, '#78C850'],\r\n [4, '#A890F0'],\r\n [5, '#B22222'],\r\n [6, '#F8D030'],\r\n [7, '#D3D3D3'],\r\n [8, '#F85888'],\r\n [9, '#7FFFD4']])\r\n #[10, '#98D8D8']])\r\n \r\n #[11, '#A8B820'],\r\n #[12, '#7038F8'],\r\n #[13, '#705898'],\r\n #[14, '#705848'],\r\n #[15, '#B8B8D0'],\r\n #[16, '#A8A878'],\r\n #[17, '#EE99AC']])\r\n\r\n df_palette.columns = ['labels', 'typecolor']\r\n self.tweet_dataframe.merge(df_palette, on = 'labels')\r\n\r\n #Divide up the tsne data\r\n\r\n plot_list = []\r\n\r\n for idx, (label, color) in df_palette.iterrows():\r\n\r\n df_filter = self.tweet_dataframe[self.tweet_dataframe['labels'] == label]\r\n \r\n df_filter['custom_text'] = df_filter[['username', 'text']].apply('<br />'.join, axis = 1) \r\n sentiment_boxplot = go.Box(\r\n x = df_filter['vader_polarity'],\r\n name = \"{}\".format(topics[label]),\r\n #text = pd.Series(self.tweet_dataframe['text']),\r\n boxmean = True,\r\n jitter = .5,\r\n boxpoints = 'all',\r\n hoverinfo = 'x+text',\r\n text = df_filter['custom_text'],\r\n marker = dict(color = color) \r\n )\r\n plot_list.append(sentiment_boxplot) \r\n\r\n # Override plotly \r\n axis_layout = dict(zeroline=False, showaxeslabels=False, autotick = True, ticks = '', showticklabels=False, title='')\r\n\r\n layout = go.Layout(\r\n yaxis = axis_layout,\r\n hovermode = \"closest\",\r\n title = \"Sentiment distribution per topic\",\r\n showlegend = True)\r\n\r\n fig = dict(data=plot_list, layout=layout)\r\n #plot_url = py.plot(fig)\r\n offline_plot.plot(fig, filename='data/sentiment_boxplot.html', auto_open = False)\r\n\r\n return plot_list, layout", "def compute_bigrams(line: str) -> Sequence[str]:\n return DatasetLSTM.pairwise(chain(line, [\"</s>\"]))", "def test_graph_histogram_of_sentiment_scores_on_link_ids():\n graph_histogram_of_sentiment_scores_on_link_ids('politics_30_months_comments_cleaned_standardized_vader_flair.csv')", "def convert(df: pd.DataFrame, outpath):\n labels = df['source'].apply(lambda x: LABEL_XINHUA if x == '新华社' else LABEL_NON_XINHUA)\n labels.to_pickle(outpath)", "def tokenize_and_encode_pandas(dataframe,stopindex=1e9,max_length=128):\n encoded_tweets = []\n token_type_ids = []\n attention_mask = []\n labels_6_types = []\n labels_4_types = []\n interaction_types = []\n \n counter = 0\n for i in range(len(dataframe)):\n try:\n #tokenized_parent= tokenizer.tokenize(dataframe.iloc[i]['clean_target_text'])\n #tokenized_tweet = tokenizer.tokenize(dataframe.iloc[i]['clean_response_text'])\n text_parent= dataframe.iloc[i]['clean_target_text']\n text_tweet = dataframe.iloc[i]['clean_response_text']\n except Exception:\n #tokenized_parent= tokenizer.tokenize(dataframe.iloc[i]['target_text'])\n #tokenized_tweet = tokenizer.tokenize(dataframe.iloc[i]['response_text'])\n text_parent= dataframe.iloc[i]['target_text']\n text_tweet = dataframe.iloc[i]['response_text']\n \n interaction = dataframe.iloc[i]['interaction_type'] # reply or quote\n topic = dataframe.iloc[i]['event'] # get event\n topic = topic.replace('_', ' ') # replace underscore with space\n sep_token = ' [SEP] ' \n text1 = interaction + sep_token + topic + sep_token + text_parent\n text2 = text_tweet\n encoded_dict = tokenizer.__call__(text=text1,\n text_pair=text2,\n padding='max_length',\n truncation=True,\n is_split_into_words=False,\n max_length=max_length,\n return_tensors='pt')\n \n '''\n encoded_dict = tokenizer.__call__(text=tokenized_parent,\n text_pair=tokenized_tweet,\n padding='max_length',\n truncation=True,\n is_split_into_words=True,\n max_length=max_length,\n return_tensors='pt')\n \n '''\n '''\n encoded_dict = tokenizer.encode_plus(text=tokenized_tweet,\n text_pair=tokenized_parent,\n max_length=max_length,\n pad_to_max_length=True)\n '''\n encoded_tweets.append(encoded_dict['input_ids'])\n token_type_ids.append(encoded_dict['token_type_ids'])\n attention_mask.append(encoded_dict['attention_mask'])\n \n label = dataframe.iloc[i]['label']\n labels_6_types.append(convert_label_string2num(label, num_types=6))\n labels_4_types.append(convert_label_string2num(label, num_types=4))\n \n interaction_type = dataframe.iloc[i]['interaction_type']\n interaction_types.append(convert_interaction_type_string2num(interaction_type))\n \n if counter % 100 == 0:\n print('Tokenizing comment: %00000d' % counter)\n if counter > stopindex:\n break\n counter = counter + 1\n \n width = dataframe.shape[1]\n dataframe.insert(width+0,'encoded_tweets', encoded_tweets)\n dataframe.insert(width+1,'token_type_ids', token_type_ids)\n dataframe.insert(width+2,'attention_mask', attention_mask)\n dataframe.insert(width+3,'number_labels_6_types', labels_6_types)\n dataframe.insert(width+4,'number_labels_4_types', labels_4_types)\n dataframe.insert(width+5,'interaction_type_num', interaction_types)\n return dataframe", "def generate_graph_feature(self):\n traj_graph_feature = [traj.get_graph_feature() for traj in self.trajectories]\n self.df_graph_feature = pd.DataFrame(traj_graph_feature)\n self.df_graph_feature[\"LABEL\"] = self.df[\"LABEL\"]\n return self.df_graph_feature", "def transform_line(line):\n f = line.split('\\t')\n # escape parenthesis\n token = f[1]\n if token == '(':\n token = '-OP-'\n elif token == ')':\n token = '-CP-'\n # The nltk v3 implementation of dependency graphs needs an explicit\n # root relation label. Mate's output uses '--' as a label for relations\n # to the root, but also for punctuations. We thus translate the\n # relation label to 'ROOT'.\n if f[9] == '0':\n f[11] = TOP_RELATION_LABEL\n return '\\t'.join([f[0], token, f[3], f[5], f[5], f[7], f[9], f[11],\n '_', '_'])", "def preprocess(tweet): \n #Remove URL\n tweet = re.sub('\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', tweet) \n #Remove user\n tweet = re.sub('@[^\\s]+','',tweet)\n #Remove not alphanumeric symbols white spaces\n tweet = re.sub(r'[^\\w]', ' ', tweet)\n #Replace #word with word\n tweet = re.sub(r'#([^\\s]+)', ' ', tweet) \n tweet = re.sub('[:;>?<=*+()/,\\-#!$%\\{˜|\\}\\[^_\\\\@\\]1234567890’‘]',' ', tweet)\n #Remove :( or :)\n tweet = tweet.replace(':)','')\n tweet = tweet.replace(':(','') \n #Remove additional white spaces\n tweet = re.sub('[\\s]+', ' ', tweet)\n tweet = re.sub('[\\n]+', ' ', tweet)\n return tweet", "def transform(self):\n # df = pd.DataFrame(self.nodelist, columns = ['LPsol'])\n df = self.df['LPsol'].apply(pd.Series).fillna(value=0)\n if self.transformation == 'mds':\n mf = manifold.MDS(n_components=2)\n elif self.transformation == 'tsne':\n mf = manifold.TSNE(n_components=2)\n self.xy = mf.fit_transform(df)\n self.stress = mf.stress_\n\n self.df['x'] = self.xy[:,0]\n self.df['y'] = self.xy[:,1]\n self._generateEdges()" ]
[ "0.53392446", "0.53363657", "0.53016007", "0.52973044", "0.5226778", "0.5155888", "0.51276714", "0.5085444", "0.4898099", "0.4861826", "0.48439053", "0.481717", "0.48156443", "0.4814153", "0.47771806", "0.4760283", "0.475947", "0.47401556", "0.47281075", "0.46995685", "0.46956778", "0.46571055", "0.46563962", "0.4650436", "0.46453455", "0.46387392", "0.46333063", "0.461716", "0.4615685", "0.4600078", "0.45857406", "0.45841596", "0.45741442", "0.45705777", "0.45697254", "0.4567185", "0.4562107", "0.4562107", "0.45474568", "0.45286494", "0.45192254", "0.4517404", "0.45054522", "0.45047724", "0.45042276", "0.45006895", "0.44997764", "0.44968978", "0.44964725", "0.448672", "0.4482912", "0.4482445", "0.44815293", "0.4474863", "0.44742423", "0.44716507", "0.44674397", "0.44553933", "0.44550183", "0.44506037", "0.44486734", "0.444288", "0.44420654", "0.4437356", "0.44367892", "0.443282", "0.44284293", "0.44262946", "0.4424631", "0.44160166", "0.44105294", "0.44093794", "0.44059086", "0.44053724", "0.43996358", "0.43905032", "0.4382855", "0.437521", "0.4366448", "0.4363146", "0.4361744", "0.43602931", "0.4359072", "0.43529683", "0.43525305", "0.43523806", "0.4347756", "0.43459874", "0.4344826", "0.43425965", "0.43416223", "0.43411383", "0.43383387", "0.4331273", "0.43266883", "0.4324565", "0.43223035", "0.43207934", "0.43117443", "0.43115896" ]
0.58514464
0
Adds edges between hashtag nodes if they share the same user.
def _refine_matrix_with_additional_connections(self): new_graph = self.graph.copy() for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose): if self.graph.node[node]["type"] == "hashtag": for neighbour in self.graph.neighbors(node): if self.graph.node[neighbour]["type"] == "username": for other_node in self.graph.neighbors(neighbour): if self.graph.node[other_node]["type"] == "hashtag" \ and not self.graph.has_edge(node, other_node) \ and not node == other_node: new_graph.add_edge(node, other_node) self.graph = new_graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToGraph(tid,uid,mentions) :\n global G,found\n\n user = r.get(int(uid))\n \n if user == None :\n return\n\n user = re.findall('\"((?:(?!(?:\",\")).)*)\"', user)\n \n # lower the hashtags\n mentions = [t.lower() for t in mentions if t not in [\"\"]]\n \n usern = user[1].lower()\n\n G.add_node(usern)\n\n found = found + 1\n\n # iterate through mentions\n for m in mentions :\n # add hashtag to graph\n G.add_node(m)\n \n # update edge weight for every hashtag 2-permutation of the tweet\n if G.has_edge(usern,m) :\n G[usern][m]['weight'] += 1\n else :\n G.add_edge(usern,m,weight=1)", "def addTweetHashtagEdges(self):\n self.hashtagGraph.add_edges_from(self.builtEdgeList)", "def add_edge(self, u, v):\r\n keys = self.d.keys()\r\n #if nodes are not in graph, add them\r\n if u not in keys:\r\n self.add_node(u)\r\n if v not in keys:\r\n self.add_node(v)\r\n #add each node to the value set of each other\r\n u_old = self.d[u]\r\n u_new = u_old.union(set(str(v)))\r\n v_old = self.d[v]\r\n v_new = v_old.union(set(str(u)))\r\n self.d.update({u:u_new, v:v_new})", "def add_edge_between(self, a: tuple, b: tuple):\n if a not in self.graph:\n self.graph[a] = set()\n if b not in self.graph:\n self.graph[b] = set()\n self.graph[a].add(b)\n self.graph[b].add(a)", "def addEdge(this, a, b):\n if not a in this.m:\n this.m[a]=set()\n this.m[a].add(b)", "def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self", "def add_edges(self):\n for u in self.G.nodes():\n for v in self.G.nodes():\n if u != v and u != \"Sink\" and v != \"Source\":\n self.G.add_edge(\n u, v, cost=self.manhattan(u, v), time=self.manhattan(u, v)\n )", "def add_edge(self, node1, node2):\n if node1 not in self._neighbors:\n self._neighbors[node1] = []\n self._neighbors[node1].append(node2)\n if node2 not in self._neighbors:\n self._neighbors[node2] = []", "def add_all_edges(self):\n for n1 in self.vertices():\n for n2 in self.vertices():\n if n1 != n2:\n self.add_edge((n1, n2))", "def add_edge(self, edge):\r\n edge = set(edge)\r\n (vertex1, vertex2) = tuple(edge)\r\n \r\n if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():\r\n if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]:\r\n return\r\n self.__graph_dict[vertex1].add(vertex2)\r\n self.__graph_dict[vertex2].add(vertex1)\r\n elif vertex1 not in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():\r\n self.__graph_dict[vertex1] = {vertex2}\r\n self.__graph_dict[vertex2].add(vertex1)\r\n elif vertex1 in self.__graph_dict.keys() and vertex2 not in self.__graph_dict.keys():\r\n self.__graph_dict[vertex2] = {vertex1}\r\n self.__graph_dict[vertex1].add(vertex2)\r\n else:\r\n self.__graph_dict[vertex1] = {vertex2}\r\n self.__graph_dict[vertex2] = {vertex1}", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.graph_dict:\n self.graph_dict[vertex1].append(vertex2)\n else:\n self.graph_dict[vertex1] = [vertex2]", "def create_graph(users, friend_counts):\n ###TODO-- Completed\n G = nx.Graph()\n\n #For Filtering the Nodes\n #print(friend_counts)\n friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]\n candidate_nodes = [user['screen_name'] for user in users]\n\n #print(\"Nodes: \",len(friend_nodes), len(candidate_nodes))\n #Adding Nodes to graph\n G.add_nodes_from(friend_nodes + candidate_nodes)\n\n #Connecting the Nodes with Edges\n for candidate in users:\n for friend in friend_nodes:\n if friend in candidate['friends']:\n G.add_edge(candidate['screen_name'], friend)\n\n return G", "def add_node_pairs(self, node_a,node_b):\r\n \r\n if node_b is not None : \r\n self.nodes[node_a].append(node_b)", "def add_edge(self, v1, v2):\n pass # TODO", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.graph_dict:\n self.graph_dict[vertex1].append(vertex2)\n else:\n self.graph_dict[vertex1] = [vertex2]\n return edge", "def add_edge(self,node1,node2):\n # add nodes if not already in graph\n if node1 not in self.nodes():\n self.add_node(node1)\n if node2 not in self.nodes():\n self.add_node(node2)\n\n # make connections to nodes\n self.__graph[node1].append(node2)\n self.__graph[node2].append(node1)", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph_dict[vertex1].append(vertex2)\n else:\n self.__graph_dict[vertex1] = [vertex2]", "def add_edge(self, nodes, X, U, V):\n assert(nodes[0] in self.nodes)\n assert(nodes[1] in self.nodes)\n\n if nodes[0] != nodes[1]:\n\n self.edges[nodes] = Graph.new_path([X, U, V])\n self.nodes[nodes[0]].linked_to.append(nodes[1])\n self.join_connex_groups(self.connex_elements[nodes[0]],\n self.connex_elements[nodes[1]])", "def add_edge(self, v1, v2):\n # add the 2nd node to the list of edges for the first node\n if v1 in self.vertices and v2 in self.vertices:\n\n self.vertices[v1].add(v2)", "def _transform_single_row(self, hashtag_agg: Dict, row: pd.Series):\n user_name = row[\"username\"] + \"_user\"\n tweet_id = str(row[\"id\"]) + \"_tweet\"\n tags = row[\"hashtags\"]\n\n self._users_labels.add(user_name)\n self._tweet_labels.add(tweet_id)\n\n if not self.graph.has_node(user_name):\n self.graph.add_node(user_name, type=\"username\")\n\n if not self.graph.has_node(tweet_id):\n self.graph.add_node(tweet_id, type=\"tweet_id\")\n\n for hashtag_index in tags:\n tag = hashtag_index[\"text\"] + \"_tag\"\n hashtag_agg[tag] += row[\"lemmas\"]\n\n if not self.graph.has_node(tag):\n self.graph.add_node(tag, type=\"hashtag\")\n\n if not self.graph.has_edge(tag, user_name):\n self.graph.add_edge(tag, user_name)\n\n if not self.graph.has_edge(tag, tweet_id):\n self.graph.add_edge(tag, tweet_id)\n\n self._hashtag_labels.add(tag)", "def add_edge(self, val1, val2):\n if val1 not in self._g:\n self.add_node(val1)\n if val2 not in self._g:\n self.add_node(val2)\n if val2 == val1:\n raise ValueError('Cannot have a self-referential edge.')\n if val2 in self._g[val1]:\n self._g[val1].remove(val2)\n self._g[val1].append(val2)", "def get_hashtags(self):\n\t\t# Only first level comments should be checked for hashtag. Maybe.\n\t\tpassl", "def addNode(self, node):\n if node in self.edges:\n raise ValueError('Duplicate node')\n else:\n self.edges[node] = []", "def _add_to_graph(self, user_profiles, new_infected_users):\n self.diffusion_tree.add_nodes_from(new_infected_users)\n parents = self._find_parents(user_profiles, new_infected_users)\n # connect parent(s) and child(ren)\n if parents is not None:\n edges = np.vstack((parents, new_infected_users)).T\n self.diffusion_tree.add_edges_from(edges)", "def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)", "def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)", "def add_edge(self, u, v):\n self.graph[u].append(v)", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def addEdge(self,u,v):\r\n self.graph[u].append(v)", "def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # graphs=Graph()\n # for i in self.users:\n # graphs.add_vertex(i)\n \n # for i in self.users:\n # for x in self.friendships[i]:\n # graphs.add_edge(i,x)\n\n # for i in graphs.vertices:\n # if graphs.bfs(i,user_id):\n # visited[i]=graphs.bfs(i,user_id)\n queue=Queue()\n queue.enqueue([user_id])\n while queue.size()>0:\n path=queue.dequeue()\n current_user = path[-1]\n if current_user not in visited:\n visited[current_user]=path\n for ID in self.friendships[current_user]:\n new_path=list(path)\n new_path.append(ID)\n queue.enqueue(new_path)\n return visited", "def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)", "def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()", "def add_edge(self, from_website, to_link):\n self.graph[to_link - 1, from_website - 1] = 1", "def add_edge(self, val1, val2):\n self.setdefault(val1, [])\n self.setdefault(val2, [])\n if val2 not in self[val1]:\n self[val1].append(val2)", "def addEdge2(graph, NodeA, NodeB):\n graph[NodeA][NodeB] = 1", "def edges(self, uid):\n\n raise NotImplementedError", "def add_tweet(self,hash_tag_tuple,epoch_time):\n # Check if tweet is in order, inside the window duration, or outside\n t_diff = self.latest_time - epoch_time > self.t_window\n\n if t_diff <= self.t_window:\n self.latest_time = max(epoch_time,self.latest_time)\n\n current_vertices = self.graph.vs._name_index\n if self.verbose:\n print('Graph name index: '+str(current_vertices))\n print('Graph name index type: '+str(type(current_vertices)))\n\n # current vertivces will have none type when it is initilazed empty\n if current_vertices is not None:\n\n # Add hashtag to graph as vertex, if its already exists, nothing happens\n for hash_tag in hash_tag_tuple:\n # only add hashtag if it isn't already in the graph\n if hash_tag not in current_vertices:\n if self.verbose: print(\"Adding Vertex: \"+str(hash_tag))\n self.graph.add_vertex(name=hash_tag)\n else:\n # Add hashtag to graph as vertex, if its already exists, nothing happens\n for hash_tag in hash_tag_tuple:\n if self.verbose: print(\"Adding Vertex: \"+str(hash_tag))\n self.graph.add_vertex(name=hash_tag)\n\n\n\n # Add edges with associated epoch time\n for edge in combinations(hash_tag_tuple,r=2):\n if self.verbose: print('Adding Edge Pair:'+str(edge)+\" Time:\"+str(epoch_time))\n\n self.graph.add_edge(source=edge[0],target=edge[1],time=epoch_time)\n\n self.trim()\n\n # if tweet is outside of the time window than toss it\n else:\n return\n\n return", "def add_edge(self, vertex1, vertex2):\n self._numedges += 1\n self._adjacents[vertex1].append(vertex2)\n if not self._directed:\n self._adjacents[vertex2].append(vertex1)", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 not in self.__graph_dict:\n self.__graph_dict[vertex1] = []\n dbg_str = \"Vertex being initialized ..\" + str(vertex1)\n # logging.debug(dbg_str)\n if vertex2 not in self.__graph_dict:\n self.__graph_dict[vertex2] = []\n dbg_str = \"Vertex being initialized ..\" + str(vertex2)\n # logging.debug(dbg_str)\n if vertex2 not in self.__graph_dict[vertex1]:\n self.__graph_dict[vertex1].append(vertex2)\n dbg_str = \"Appending .. \" + str(vertex2), \"to ->\" +str(vertex1)\n # logging.debug(dbg_str)\n\n if vertex1 not in self.__graph_dict[vertex2]:\n self.__graph_dict[vertex2].append(vertex1)\n dbg_str = \"Appending .. \" + str(vertex1), \"to ->\" +str(vertex2)\n # logging.debug(dbg_str)", "def add(self, node1, node2):\r\n if not(node1 in self._graph):\r\n # if it's the first time we see this node\r\n self._graph[node1] = [node2]\r\n else:\r\n if not(node2 in self._graph[node1]):\r\n # if node2 is not already in the connections of node1 \r\n # self._graph[node1].add(node2)\r\n self._graph[node1].append(node2)\r\n\r\n \r\n # if undirected graph\r\n if not self._directed:\r\n if not(node2 in self._graph):\r\n # if it's the first time we see node2\r\n self._graph[node2] = [node1]\r\n else:\r\n if not(node1 in self._graph[node2]):\r\n # if node1 is not already in the connections of node1 \r\n # self._graph[node2].add(node1)\r\n self._graph[node2].append(node1)", "def add_connection(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n if user_B not in network[user_A][0]:\n network[user_A][0].append(user_B)\n return network[user_A][0]", "def add_connection(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n if not user_B in network[user_A]['connections']:\n network[user_A]['connections'].append(user_B)\n return network", "def addEdge(source, target):\n\n\t\t# append the edge which contain source and target to the graph defaultdict\n\t\tgraph[source].append(target)\n\n\t\t\"\"\"initialize reference dictionary for each node\"\"\"\n\n\t\t# append the source as key and 0 as value to the reference dictionary\n\t\treference[source] = [0, 0, False, False]", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].edges.add(v2)\n self.vertices[v2].edges.add(v1)\n else:\n raise IndexError(\"That vertex does not exist!\")", "def add_edges_from(self, edges_to_add, **attr):\n for e in edges_to_add:\n if len(e) == 3:\n u, v, d = e\n else:\n u, v = e\n d = {}\n u, v = sorted([e[0], e[1]])\n d = {**attr, **d}\n self.add_edge(u, v, **d)", "def add_edge(self, n1, n2, weight):\n self.edges[n1.identifier][n2.identifier] = weight\n self.edges[n2.identifier][n1.identifier] = weight", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n print(\"ERROR ADDING EDGE: Vrtes not found\")", "def connections2Neo(db, user, renderedTwits, friends=True):\n started = datetime.now()\n right_now = started.isoformat()\n \n users2Neo(db, renderedTwits)\n \n match = (\"MATCH (t:twitter_user {{screen_name: '{}'}}),\" +\n \" (f:twitter_user {{screen_name: d.screen_name}})\").format(user)\n\n if friends:\n merge = \"MERGE (t)-[:FOLLOWS]->(f)\"\n update = \"SET {}.friends_last_scraped = '{}'\".format('t'+user, right_now)\n else:\n merge = \"MERGE (t)<-[:FOLLOWS]-(f)\"\n update = \"SET {}.followers_last_scraped = '{}'\".format('t'+user, right_now)\n \n query = '\\n'.join(['UNWIND $data AS d', match, merge])\n \n data = [{'screen_name': twit.get('screen_name', False)}\n for twit in renderedTwits if twit.get('screen_name', False)]\n\n userNode = nodeRef(user, 'twitter_user', {'screen_name': user})\n update_query = '\\n'.join([mergeNode(userNode, match=True), update])\n\n neo_tx(db, update_query)\n neo_tx(db, query, data=data)\n\n how_long = (datetime.now() - started).seconds\n logging.info(\n '*** PUSHED %d CONNECTIONS FOR %s TO NEO IN %ds ***' %\n (len(renderedTwits), user, how_long))", "def test_node_neighbors(graph_no_edges):\n graph_no_edges.add_edge('BB', 82, 5)\n assert graph_no_edges.neighbors('BB') == {82: 5}", "def _add_edge(self, actor, target):\n nodes = (actor, target)\n for i in (0, 1):\n self._vertices.setdefault(nodes[i], _Vertex()).add_neighbor(\n nodes[(i + 1) % 2])", "def add_edge(self, u: Hashable, v: Hashable, metadata: dict):\n # If u doesn't exist:\n if self.has_node(u):\n x = self._names.get_id(u)\n else:\n x = self.add_node(u, None)\n\n if self.has_node(v):\n y = self._names.get_id(v)\n else:\n y = self.add_node(v, None)\n\n # Insert metadata for this edge, replacing the previous metadata:\n self._meta.add_edge(u, v, metadata)\n\n # TODO: Support multigraphs, and allow duplicate edges.\n if self.has_edge(u, v):\n return\n return self._nk_graph.addEdge(x, y)", "def add_edge(self, n1, n2, weight=0):\n self.add_node(n1)\n self.add_node(n2)\n if n2 in self.node_dict[n1]:\n raise ValueError(\"Edge already exists\")\n self.node_dict[n1][n2] = weight", "def add(self, a, b):\n a, b = (a, b) if a in self.node_id else (b, a)\n target_id = self.node_id[a]\n self.node_id[b] = target_id\n self.groups[target_id] |= set([b])", "def get_node_neighbors(\n self, u: Hashable, include_metadata: bool = False\n ) -> Generator:\n if include_metadata:\n return {\n e[\"target\"]: _node_to_metadata(e[\"properties\"])\n for e in (\n self._g.V()\n .has(ID, u)\n .outE()\n .project(\"target\", \"source\", \"properties\")\n .by(__.inV().values(ID))\n .by(__.outV().values(ID))\n .by(__.valueMap(True))\n .toList()\n )\n }\n return self._g.V().has(ID, u).out().values(ID).toList()", "def add_node(self, node):\n if node in self.edges:\n raise ValueError('Duplicate node')\n else:\n self.edges[node]=[]\n self.nodes.add(node)", "def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm", "def add_edge(self, n1, n2, weight):\n self.edges[n1.identifier][n2.identifier] = weight", "def connect_all(graph, nodeset):\n for element in nodeset:\n graph.add_node(element)\n for element1 in nodeset:\n for element2 in nodeset:\n if not element1 == element2:\n graph.add_edge(element1, element2)\n return graph", "def add_edge(self, u, v, **kwargs):\n if u == v:\n raise ValueError(\"Self loops are not allowed.\")\n if u in self.nodes() and v in self.nodes() and nx.has_path(self, v, u):\n raise ValueError(\n \"Loops are not allowed. Adding the edge from (%s->%s) forms a loop.\"\n % (u, v)\n )\n else:\n super(BayesianModel, self).add_edge(u, v, **kwargs)", "def add_cell_and_edges(self,nodes,**kws): \n for a,b in circular_pairs(nodes):\n j=self.nodes_to_edge(a,b)\n if j is None:\n self.add_edge(nodes=[a,b])\n return self.add_cell(nodes=nodes,**kws)", "def add_node(self, n):\r\n keys = self.d.keys()\r\n #check for node in graph\r\n if n not in keys:\r\n self.d.update({str(n): set()})", "def add_bidirectional_edges( self, node1, node2, distance):\n n1, n2 = self.__create_node(node1.lower(), node2.lower())\n\n #Set neighbour between edges\n #Check if 2 edges oredi a neihbor exist\n if n1 in n2.getNeighbors() and n2 in n1.getNeighbors():\n print(n1.getId()+\" and \"+n2.getId()+\" already a neighbour.\")\n else:\n n1.setNeighbor(n2,distance) # n1 ----> n2\n n2.setNeighbor(n1,distance) # n2 ----> n1", "def merge_nodes(self,n0,n1):\n # -- Sanity checks - does not yet allow for collapsing edges.\n\n # if they share any cells, would update the cells, but for now\n # just signal failure.\n n0_cells=list(self.node_to_cells(n0))\n n1_cells=list(self.node_to_cells(n1))\n cell_to_edge_cache={}\n\n for c in n1_cells:\n if c in n0_cells:\n print(\"cell %d common to both nodes\"%c)\n raise GridException(\"Not ready for merging nodes in the same cell\")\n # otherwise record and fix up below\n\n # while we're looping, cache the edges as they will\n # be mutated along the way.\n cell_to_edge_cache[c]=self.cell_to_edges(c).copy()\n\n # do they share an edge, but not already fixed in the above stanza?\n j=self.nodes_to_edge(n0,n1)\n if j is not None:\n raise GridException(\"Not ready for merging endpoints of an edge\")\n\n edge_map={} # index of superceded edge => superceding edge\n\n # Update edges of n1 to point to n0\n # if that would cause a duplicate edge, then the n1 version is deleted\n n1_edges=list(self.node_to_edges(n1)) # make copy since we'll mutate it\n for j in n1_edges:\n if self.edges['nodes'][j,0]==n1:\n nj=0\n elif self.edges['nodes'][j,1]==n1:\n nj=1\n else:\n assert False # sanity check\n newnodes=self.edges[j]['nodes'].copy()\n newnodes[nj]=n0\n # it's possible that this is an edge which already exists\n jother=self.nodes_to_edge(*newnodes)\n if jother is not None:\n # want to keep jother, delete j. but is there info on\n # cells which should be brought over?\n edge_map[j]=jother\n # wait to delete j until after cells have been moved to jother.\n else:\n self.log.debug(\"Modifying edge j=%d\"%j)\n self.modify_edge(j,nodes=newnodes)\n\n # -- Transition any cells. \n for c in n1_cells:\n # update the node list:\n cnodes=self.cell_to_nodes(c).copy()\n nc=list(cnodes).index(n1)\n cnodes[nc]=n0\n\n # Dangerous to use cell_to_edges, since it may\n # have to consult the edge topology, which is disrupted\n # in the above code. \n # cell_to_edges: first checks cells['edges'], may \n # go to cell_to_nodes(c): that's safe.\n # and nodes_to_edge\n # -> node_to_edges, which in turn may consult self.edges['nodes']\n\n #cedges=self.cell_to_edges(c).copy()\n cedges=cell_to_edge_cache[c]\n\n for ji,j in enumerate(cedges):\n if j in edge_map:\n # is there were edges['cells'] should be updated?\n\n # sever the edge=>cell pointer, to p\n # could just set to [-1,-1], but this keeps things very explicit\n # for debugging\n j_cells=list(self.edges['cells'][j])\n j_cells_side=j_cells.index(c)\n j_cells[ j_cells_side ] = -1\n self.modify_edge(j,cells=j_cells)\n\n # and modify the receiving edge, too\n jo=edge_map[j]\n jo_cells=list(self.edges['cells'][jo])\n # which side of jo? a bit tedious...\n if list(self.edges['nodes'][j]).index(n1) == list(self.edges['nodes'][jo]).index(n0):\n # same orientation\n jo_cells_side=j_cells_side\n elif list( self.edges['nodes'][j]).index(n1) == 1-list(self.edges['nodes'][jo]).index(n0):\n jo_cells_side=1-j_cells_side\n else:\n raise Exception(\"Failed in some tedium\")\n assert jo_cells[jo_cells_side]<0\n jo_cells[jo_cells_side]=c\n self.modify_edge(edge_map[j],cells=jo_cells)\n # yikes. any chance that worked?\n\n cedges[ji]=edge_map[j]\n\n # maybe this is where we'd update cells['edges'] too?\n self.modify_cell(c,nodes=cnodes,edges=cedges)\n\n for dead_edge in edge_map:\n self.delete_edge(dead_edge)\n\n self.delete_node(n1)", "def add_edge(self, node1: Node, node2: Node):\n self.__add_edge(node1, node2)\n self.__add_edge(node2, node1)", "def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError('nonexistent vertex/node')", "def link_edges(strong_edges, weak_edges):\n\n H, W = strong_edges.shape\n indices = np.stack(np.nonzero(strong_edges)).T\n edges = np.zeros((H, W), dtype=np.bool)\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n weak_indices = [(i, j) for i, j in np.stack(np.nonzero(weak_edges)).T]\n complete = set()\n for x, y in indices:\n weak_neighbors = [(x, y)]\n while len(weak_neighbors) > 0:\n i, j = weak_neighbors.pop(0)\n if ((i, j) in complete):\n continue\n edges[i, j] = 1\n complete.add((i, j))\n weak_neighbors += [(i, j) for i, j in get_neighbors(i, j, H, W) if (i, j) in weak_indices]\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return edges", "def add_edge(self, edge):\n\n add_egde = True\n for edge_this in self.edges:\n if edge_this == edge:\n add_egde = False\n\n if add_egde:\n self.edges.append(edge)\n\n return self", "def get_node_neighbors(\n self, u: Hashable, include_metadata: bool = False\n ) -> Generator:\n my_id = self._names.get_id(u)\n if include_metadata:\n val = {}\n for vid in self._nk_graph.iterNeighbors(my_id):\n v = self._names.get_name(vid)\n if self.is_directed():\n val[v] = self._meta.get_edge(u, v)\n else:\n try:\n val[v] = self._meta.get_edge(u, v)\n except KeyError:\n val[v] = self._meta.get_edge(v, u)\n return val\n\n return iter(\n [self._names.get_name(i) for i in self._nk_graph.iterNeighbors(my_id)]\n )", "def make_conn_graph(interaction_logs):\n G = pgv.AGraph(directed=True)\n\n for module_id in interaction_logs['module_id'].unique():\n G.add_node(module_id, label='module')\n\n grouped = interaction_logs.groupby('user_id')\n for user_id, group in grouped:\n G.add_node(user_id, label='student')\n for module_id in set(group['module_id'].values):\n G.add_edge(user_id, module_id)\n\n return G", "def test_graph_adds_nodes_and_edges(graph_no_edges):\n graph_no_edges.add_edge('Louisiana Crawfish', 'WA Invasive Species', 3)\n assert graph_no_edges.edges() == [(\n 'Louisiana Crawfish', 'WA Invasive Species', 3)]", "def add_edge(self, v1, v2): # O(1) time complexity\n if v1 in self.vertices and v2 in self.vertices: # check to see if v1 & v2 exists already\n self.vertices[v1].add(v2) # # add connection from v1 to v2 \n else: # else \n print(\"That vertex does not exist\")\n\n # additional options (class)\n \"\"\"\n if (v1 or v2) not in self.vertices:\n return \"vertex does exist\"\n self.vertices[v1].add(v2)\n ###\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices{v1}.add(v2)\n else:\n print(\"One of these vertices does not exist)\n \"\"\"", "def _append_source_and_target(self, graph):\n graph.add_node( \"source\" )\n graph.add_node( \"target\" )\n \n for leave in (n for n,d in graph.out_degree_iter() if d==0):\n if leave is not \"source\" and leave is not \"target\":\n graph.add_edge( leave, \"target\" )\n \n for root in (n for n,d in graph.in_degree_iter() if d==0):\n if root is not \"source\" and root is not \"target\": \n graph.add_edge( \"source\", root )", "def buildEdgeList(self, hashtagsList):\n hashtagLen = len(hashtagsList)\n if hashtagLen > 1:\n for index in range(1, hashtagLen):\n self.builtEdgeList.append((hashtagsList[FIRST], hashtagsList[index]))\n hashtagsList.pop(FIRST)\n self.buildEdgeList(hashtagsList)", "def add_edges_from(self, edges: Iterable):\n for i, j in edges:\n self.add_edge(i, j)", "def add_edge(self, v1, v2):\n\n (x1, y1) = v1\n (x2, y2) = v2\n\n if not self.has_vertex(x1, y1) or not self.has_vertex(x2, y2): return\n if v1 not in self.get_neighbors(x2, y2): return\n\n self._reachable[v1].add(v2)\n self._reachable[v2].add(v1)", "def add_node(self, node, graph=None):\n if not graph:\n graph = self.graph\n if node.name in graph:\n raise KeyError(\"node %s already exists\" % node.name)\n\n graph[node.name] = set()", "def get_edge_query(from_id, rel_type, to_id):\n # TODO: what to do with labels here.\n\n return ((\"MERGE (a:user {id: %s}) \"\n \"MERGE (b:user {id: %s}) \"\n \"MERGE a-[:%s]->b \"\n \"RETURN *\") % (from_id, to_id, rel_type))", "def add_edge(self, id1: int, id2: int, weight: float) -> bool:\n if id1 in self.Nodes and id2 in self.Nodes and id2 not in self.Edges[id1]:\n if self.Edges[id1] is None:\n self.Edges[id1] = {}\n self.Edges[id1][id2] = weight\n else:\n self.Edges[id1][id2] = weight\n self.edgesize += 1\n self.MC += 1\n return True\n else:\n return False", "def add_edge(self, u: Hashable, v: Hashable, metadata: dict):\n try:\n self.get_edge_by_id(u, v)\n e = self._g.V().has(ID, u).outE().as_(\"e\").inV().has(ID, v).select(\"e\")\n except IndexError:\n if not self.has_node(u):\n self.add_node(u, {})\n if not self.has_node(v):\n self.add_node(v, {})\n e = (\n self._g.V()\n .has(ID, u)\n .addE(EDGE_NAME)\n .as_(\"e\")\n .to(__.V().has(ID, v))\n .select(\"e\")\n )\n for key, val in metadata.items():\n e = e.property(key, val)\n return e.toList()", "def create_node2edges_on2freq_grid(self):\n trip_id2model = pickle.load(open('pickles/trip_id2model.pickle','rb'))\n old_trip_id = -1\n model = trip_id2model[1]\n sub_x = 5\n sub_y = 5\n node2edges_on2sub_grid2points = {}\n for line in self.lines:\n trip_id,lat,lon = normalize_simple(line)\n if trip_id != old_trip_id:\n #print trip_id\n model = trip_id2model[trip_id]\n old_trip_id = trip_id\n node = self.gps_to_node(lat,lon)\n if node == -1:\n continue\n #print \"pushed through\"\n incident_edges = self.incident_edges(node)\n edges_on = []\n for edge in incident_edges:\n if model[edge] == 1:\n edges_on.append(edge)\n edges_on.sort()\n edges_on = tuple(edges_on)\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon(self.node_to_coords(node))\n\n sub_row,sub_col = gen_gps_to_coords(lat,lon,sub_x,sub_y,min_lat,max_lat,min_lon,max_lon)\n sub_tuple = (sub_row,sub_col)\n if node not in node2edges_on2sub_grid2points:\n node2edges_on2sub_grid2points[node] = {}\n edges_on2sub_grid2points = node2edges_on2sub_grid2points[node]\n if edges_on not in edges_on2sub_grid2points:\n edges_on2sub_grid2points[edges_on] = defaultdict(list)\n sub_grid2points = edges_on2sub_grid2points[edges_on]\n points = sub_grid2points[sub_tuple]\n node2edges_on2sub_grid2points[node][edges_on][sub_tuple].append([lat,lon])\n #points.append([lat,lon])\n\n print node2edges_on2sub_grid2points.keys()\n print node2edges_on2sub_grid2points[2].keys()\n print node2edges_on2sub_grid2points[2][(2,3)].keys()\n \n node2edges_on2median = {}\n for node in node2edges_on2sub_grid2points:\n print node\n edges_on2sub_grid2points = node2edges_on2sub_grid2points[node]\n node2edges_on2median[node] = {}\n for edges_on in edges_on2sub_grid2points:\n sub_grid2points = edges_on2sub_grid2points[edges_on]\n best_spot = (-1,-1)\n best_score = 0\n for spot in sub_grid2points:\n score = len(sub_grid2points[spot])\n if score > best_score:\n best_score = score\n best_spot = spot\n node2edges_on2median[node][edges_on] = list_median(sub_grid2points[spot])\n \n with open('pickles/node2edges_on2median-%d-%d.pickle' % (self.rows,self.cols),'wb') as output:\n pickle.dump(node2edges_on2median,output)", "def flow_graph_hash(graph):\n assert isinstance(graph, BasicGraph)\n sources = graph.nodes(in_degree=0)\n\n original_hash = 'original hash'\n new_hash = 'new_hash'\n cls = type(graph)\n hash_graph = cls() # new graph with hashes.\n visited = set()\n\n while sources:\n source = sources[0]\n sources = sources[1:]\n\n suppliers = graph.nodes(to_node=source)\n\n hash_func = hashlib.sha3_256()\n hash_func.update(bytes(str(source), 'utf-8'))\n for supplier in suppliers:\n if graph.depth_first_search(start=source, end=supplier):\n continue # it's a cycle.\n d = hash_graph.node(supplier)\n hash_func.update(bytes(d[new_hash], 'utf-8'))\n source_hash = hash_func.hexdigest()\n\n if source not in hash_graph:\n obj = {original_hash: source, new_hash: source_hash}\n hash_graph.add_node(source, obj=obj)\n else:\n n = hash_graph.node(source)\n n[new_hash] = source_hash\n\n receivers = graph.nodes(from_node=source)\n for receiver in receivers:\n if receiver in visited:\n continue\n visited.add(receiver)\n\n if receiver not in hash_graph:\n obj = {original_hash: receiver, new_hash: None}\n hash_graph.add_node(node_id=receiver, obj=obj)\n hash_graph.add_edge(source, receiver)\n if receiver not in sources:\n sources.append(receiver)\n\n for sink in graph.nodes(out_degree=0):\n n = hash_graph.node(sink)\n assert n[new_hash] is not None, n\n\n return hash_graph", "def add_unidirectional_edges(self, from_node_id, to_node_id, distance):\n n1, n2 = self.__create_node(from_node_id.lower(), to_node_id.lower())\n\n # add neighbour\n if n2 in n1.getNeighbors():\n print(\"Node \"+n1.getId()+\" is already neighbour with node \"+n2.getId())\n else:\n n1.setNeighbor(n2,distance) # n1 ----> n2", "def add_edge_directed(u, v):\n adj[u].append(v)", "def add_edge_directed(u, v):\n adj[u].append(v)", "def addInLink(source, target):\n if inlinkGraph.has_key(source):\n # if target not in inlinkGraph[source]:# uncomment to remove repetitives\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = inlinkGraphDegree[source] + 1\n else:\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = 1", "def add_edge(i, j):\n if (i, j) in edges or (j, i) in edges:\n # Si ya esta agregado en la lista no agrega nada\n return\n edges.add( (i, j) )\n edge_points.append(points[ [i, j] ])", "def add_edge(self, v1, v2):\n # Check if they exist\n # if v1 in self.vertices and v2 in self.vertices:\n if v1 in self.vertices:\n # Add the edge\n self.vertices[v1].add(v2)\n else:\n print(f\"ERROR ADDING EDGE between {v1} and {v2} : Vertex not found\")", "def merge(self, a, b):\n old_id, target_id = sorted((self.node_id[a], self.node_id[b]), key = lambda id: len(self.groups[id]))\n for node in self.groups[old_id]:\n self.node_id[node] = target_id\n self.groups[target_id] |= self.groups[old_id]\n del self.groups[old_id]", "def get_all_social_paths(self, user_id): #each user v v each path V V\n # output example- {1: [1], 8: [1, 8], 10: [1, 10], 5: [1, 5], 2: [1, 10, 2], 6: [1, 10, 6], 7: [1, 10, 2, 7]}\n visited = {} # Note that this is a dictionary, not a set\n # Need to do a bfs using the user id\n # first step is to traverse the graph and record all the vertices as keys in visited using bft\n # then take those keys and use bfs on each, using user_id as the starting node and and the key as\n # the destination node\n\n # Modification of BFT\n # create an empty dict\n # q = Queue()\n q = []\n\n # init enqueue the starting node\n q.append(user_id)\n\n while len(q) > 0:\n # Dequeue the first item\n v = q.pop(0)\n # If it's not been visited:\n if v not in visited:\n # Mark as visited (i.e. add to the visited set)\n visited[v] = []\n\n # Do something with the node\n print(f\"Visited {v}\")\n\n # Add all neighbors to the queue\n for next_vert in self.friendships[v]:\n q.append(next_vert)\n\n # once visited is filled, then we start the bfs\n #print('vv',visited)\n possible_paths = {}\n #run a bfs for each key in visited\n for v in visited:\n possible_paths[v] = []\n \n if v == user_id:\n visited[v] = [user_id]\n\n path = []\n while len(path) < len(visited):\n\n # Add all neighbors to the queue\n for next_vert in self.friendships[v]:\n print(possible_paths[v])\n # copy the path\n # temp_path = list(path)\n # temp_path.append(next_vert)\n # add path to possible_paths\n path.append(next_vert)\n\n possible_paths[v].append(path) # HAVE TO USE QUEUE OR STACK, THEY ENSURE THE NEIGHBORS\n # FOLLOW THE CORRECT ORDER WHEN LOOPING \n\n if v == path[-1]:\n \n # IF SO, RETURN PATH\n visited[v] = path\n break\n\n # for x in visited:\n # bfs(user_id, x)\n # visited[x].add(path)\n \n print('pct of total users in network', len(visited[1])/len(visited))\n print('degrees of separation', len(visited[1]) - 1)\n return visited", "def add_edge(self, u: str, v: str) -> None:\n if (u == v):\n return\n else:\n # add_vertex handles the checks\n # for if the vertices already\n # exist and if they already do,\n # nothing happens. Else it adds them\n self.add_vertex(u)\n self.add_vertex(v)\n\n # check if the edge already exists\n if self.contains_edge(u, v):\n return\n\n # create the edge\n self.adj_list[u].append(v)\n self.adj_list[v].append(u)", "def add_edge(self, name_from, name_to):\n self.add_node(name_from)\n self.add_node(name_to)\n self._main_dictionary[name_from].add(name_to)", "def addEdges(self, edges):\n for edge in edges:\n self.addEdge(edge[0], edge[1], edge[2])", "def make_link(Graph, node1, node2):\n if node1 not in Graph:\n Graph[node1] = {}\n (Graph[node1])[node2] = 1\n if node2 not in Graph:\n Graph[node2] = {}\n (Graph[node2])[node1] = 1\n return Graph", "def merge(self, g1, g2):\n logger = logging.getLogger(__name__)\n \n \n g = BaseGraph()\n g.copy_graph_from(g1)\n\n plwn2sumo_dict = defaultdict(set)\n plwn2sumo_dict = self.get_plwn2sumo_dict()\n\n synset_on_vertex_dict = {}\n for node in g.all_nodes():\n synset_id = node.synset.synset_id\n if synset_id in synset_on_vertex_dict:\n logger.warning(\"ID of some synset is not unique.\")\n continue\n synset_on_vertex_dict[synset_id] = node\n\n num_of_edge = 0\n for edge in g2.all_edges():\n num_of_edge += 1\n logger.info(\"%d/%d\", num_of_edge, g2.num_edges())\n\n parent_sumo_concept = edge.source().sumo\n child_sumo_concept = edge.target().sumo\n\n if parent_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", parent_sumo_concept)\n continue\n if child_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", child_sumo_concept)\n continue\n\n for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]:\n if parent_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", parent_syn_id)\n continue\n p_node = synset_on_vertex_dict[parent_syn_id]\n for child_syn_id in plwn2sumo_dict[child_sumo_concept]:\n if child_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", child_syn_id)\n continue\n ch_node = synset_on_vertex_dict[child_syn_id]\n \n g.add_edge(p_node,\n ch_node,\n [(\"rel\", edge.rel)],\n simply=True)\n \n\n return g", "def process_graph_data(graph_data):\n graph_data = deepcopy(graph_data)\n for username, info in six.iteritems(graph_data):\n # Leave only users with followers info (do not draw huge amount\n # of isolated nodes).\n info['followers'] = [\n f\n for f in info.get('followers', [])\n if 'followers' in graph_data.get(f, {})\n ]\n info['following'] = [\n f\n for f in info.get('following', [])\n if 'followers' in graph_data.get(f, {})\n ]\n return graph_data", "def add_edges(self, edges):\n if self.is_weighted():\n for vertex1, vertex2, weight in edges:\n self.add_edge(vertex1, vertex2, weight)\n else:\n for vertex1, vertex2 in edges:\n self.add_edge(vertex1, vertex2)", "def sub_graph_merging(self):", "def get_node_merge_query(user):\n labels = \"user\"\n\n statement = ((\"MERGE (n {id: {node_id}}) \"\n \"ON MATCH SET n={props}, n :%s \"\n \"ON CREATE SET n={props}, n :%s \"\n \"RETURN (n)\") % (labels, labels))\n\n props = {\"node_id\": user[\"id\"], \"props\": user}\n\n return statement, props", "def add_edge(self, vertex1, vertex2):\n\n vertex1.add_outgoing_node(vertex2)\n vertex2.add_incoming_node(vertex1)", "def merge_edge(graph, u, v):\n # Attach v's adjacency list to u's adjacency list\n graph[u].extend(graph[v])\n\n # Go through all of the vertices in v's adjacency list and replace all instances of v in those adjacency lists with\n # u\n for i in graph[v]:\n adjacency_list = graph[i]\n\n for j in range(0, len(adjacency_list)):\n if adjacency_list[j] == v:\n adjacency_list[j] = u" ]
[ "0.6894521", "0.68589467", "0.5790534", "0.571554", "0.5655163", "0.5619311", "0.56077766", "0.5606925", "0.5577704", "0.5519251", "0.5487911", "0.54744655", "0.54540205", "0.5451284", "0.5424921", "0.53971547", "0.53903186", "0.53803366", "0.5375972", "0.53607416", "0.5350522", "0.53194726", "0.53191626", "0.53126264", "0.53114814", "0.53114814", "0.5280574", "0.5275292", "0.5267292", "0.5256628", "0.52463245", "0.52447826", "0.523779", "0.52301294", "0.52137357", "0.5212994", "0.52121174", "0.5200449", "0.52002615", "0.5197951", "0.51879025", "0.5174625", "0.51669997", "0.51534206", "0.5145105", "0.5133492", "0.5129313", "0.51287407", "0.5128703", "0.5110946", "0.51105773", "0.51060176", "0.51021725", "0.50944436", "0.50935", "0.5093115", "0.50924903", "0.5091038", "0.5089712", "0.5084353", "0.50719446", "0.50702304", "0.50701344", "0.50675356", "0.5064464", "0.50614756", "0.5058554", "0.50558925", "0.5051311", "0.5050583", "0.50454706", "0.5042811", "0.50376564", "0.5036834", "0.50348616", "0.5026274", "0.5025581", "0.50178236", "0.50176287", "0.50128573", "0.501261", "0.5004012", "0.4999556", "0.4999556", "0.49965018", "0.4995161", "0.499147", "0.49913317", "0.49894258", "0.4981664", "0.49757713", "0.49754936", "0.49700248", "0.4964106", "0.49637663", "0.4961906", "0.49576432", "0.49539983", "0.4945709", "0.4942907" ]
0.6445627
2
Builds tri partite graph of Users Hashtags Tweets. Hashtags are connected if has the same user.
def fit(self, x: pd.DataFrame, y=None, **fit_params) -> "Method": self.graph = nx.Graph() minimal_hashtag_occurence = fit_params["minimal_hashtag_occurence"] x = self.drop_tweets_with_hashtags_that_occurred_less_than(x, minimal_hashtag_occurence) hashtag_agg = defaultdict(list) self._hashtag_labels = set() self._users_labels = set() self._tweet_labels = set() if self.verbose: print("Building graph ...") tqdm.tqdm.pandas() x.progress_apply(lambda r: self._transform_single_row(hashtag_agg, r), axis=1) else: x.apply(lambda r: self._transform_single_row(hashtag_agg, r), axis=1) self._refine_matrix_with_additional_connections() self._hashtag_labels = np.asarray(list(sorted(self._hashtag_labels))) self._users_labels = np.asarray(list(sorted(self._users_labels))) self._tweet_labels = np.asarray(list(sorted(self._tweet_labels))) if self.verbose: print("Building incidence matrix ...") incidence_matrix = self._get_binary_incidence_matrix()[ :len(self._hashtag_labels), len(self._hashtag_labels):] weighted_adjacency_matrix_of_tags = incidence_matrix.dot(incidence_matrix.T) weighted_adjacency_matrix_of_tags.setdiag(0) if self.verbose: print("Building hashtag graph ...") hashtag_graph = nx.from_scipy_sparse_matrix(weighted_adjacency_matrix_of_tags) weighted_degree = np.asarray( list(map(itemgetter(1), hashtag_graph.degree(weight="weight")))) matrix_weighted_degree = sps.diags([1 / (weighted_degree + 1e-8)], [0]) self._transition_matrix = weighted_adjacency_matrix_of_tags.dot( matrix_weighted_degree) if self.verbose: print("Calculating tf idf ...") document_list = [' '.join(hashtag_agg[key]) for key in self._hashtag_labels] # it has normalization inside, so no L2 is necessary self._hashtags_tf_idf_vectorizer = TfidfVectorizer(norm="l2") self._hashtags_tf_idf_representation = self._hashtags_tf_idf_vectorizer.fit_transform( document_list) if self.verbose: print("Fitting hashtag encoders ...") # [:-4] because each hashtag has "_tag" postfix to distinguish it in the graph self._hashtag_encoder.fit([lab[:-4] for lab in self._hashtag_labels]) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToGraph(tid,uid,mentions) :\n global G,found\n\n user = r.get(int(uid))\n \n if user == None :\n return\n\n user = re.findall('\"((?:(?!(?:\",\")).)*)\"', user)\n \n # lower the hashtags\n mentions = [t.lower() for t in mentions if t not in [\"\"]]\n \n usern = user[1].lower()\n\n G.add_node(usern)\n\n found = found + 1\n\n # iterate through mentions\n for m in mentions :\n # add hashtag to graph\n G.add_node(m)\n \n # update edge weight for every hashtag 2-permutation of the tweet\n if G.has_edge(usern,m) :\n G[usern][m]['weight'] += 1\n else :\n G.add_edge(usern,m,weight=1)", "def addTweetHashtagEdges(self):\n self.hashtagGraph.add_edges_from(self.builtEdgeList)", "def buildHashtagsDict(tweets):\n hashtags = {}\n for tweet in tweets:\n if tweet['entities']['hashtags']:\n for hashtag in tweet['entities']['hashtags']:\n tag = hashtag['text'].lower().strip()\n if tag not in hashtags:\n hashtags[tag] = 1\n else:\n hashtags[tag] += 1\n return hashtags", "def add_tweet(self,hash_tag_tuple,epoch_time):\n # Check if tweet is in order, inside the window duration, or outside\n t_diff = self.latest_time - epoch_time > self.t_window\n\n if t_diff <= self.t_window:\n self.latest_time = max(epoch_time,self.latest_time)\n\n current_vertices = self.graph.vs._name_index\n if self.verbose:\n print('Graph name index: '+str(current_vertices))\n print('Graph name index type: '+str(type(current_vertices)))\n\n # current vertivces will have none type when it is initilazed empty\n if current_vertices is not None:\n\n # Add hashtag to graph as vertex, if its already exists, nothing happens\n for hash_tag in hash_tag_tuple:\n # only add hashtag if it isn't already in the graph\n if hash_tag not in current_vertices:\n if self.verbose: print(\"Adding Vertex: \"+str(hash_tag))\n self.graph.add_vertex(name=hash_tag)\n else:\n # Add hashtag to graph as vertex, if its already exists, nothing happens\n for hash_tag in hash_tag_tuple:\n if self.verbose: print(\"Adding Vertex: \"+str(hash_tag))\n self.graph.add_vertex(name=hash_tag)\n\n\n\n # Add edges with associated epoch time\n for edge in combinations(hash_tag_tuple,r=2):\n if self.verbose: print('Adding Edge Pair:'+str(edge)+\" Time:\"+str(epoch_time))\n\n self.graph.add_edge(source=edge[0],target=edge[1],time=epoch_time)\n\n self.trim()\n\n # if tweet is outside of the time window than toss it\n else:\n return\n\n return", "def __init__(self):\n self.users = defaultdict(set)\n self.users_tweet = {}\n self.uid = 0", "def get_hashtags_by_user(cls, userid):\n\n QUERY = \"\"\"\n SELECT name FROM hashtags\n WHERE hashtag_id IN (SELECT hashtag_id\n FROM recipe_hashtags\n WHERE recipe_id IN (select recipe_id\n FROM recipes\n WHERE user_id= :userid))\n \"\"\"\n\n cursor = db.session.execute(QUERY, {'userid': userid})\n hashtag_data = cursor.fetchall()\n\n return hashtag_data", "def multiUserTweetDump2Neo(db, tweet_dump):\n\n # user->[tweeted/RTed/quoted]->(tweet/RT/quoteTweet)\n for label in ['tweet', 'retweet', 'quotetweet']:\n if tweet_dump[label]:\n tweets2Neo(db, tweet_dump[label], label=label)\n multi_user_labelled_tweet_actions(db, tweet_dump[label], label=label)\n\n # push original tweets from RTs/quotes\n for label in ['retweet', 'quotetweet']:\n tweets = [(tw[0],) for tw in tweet_dump[label]]\n if tweets:\n tweets2Neo(db, tweets, label='tweet')\n\n # (RT/quote)-[RETWEET_OF/QUOTE_OF]->(tweet)\n if tweet_dump['retweet']:\n tweetLinks(db, tweet_dump['retweet'], 'retweet', 'tweet', 'RETWEET_OF')\n if tweet_dump['quotetweet']:\n tweetLinks(db, tweet_dump['quotetweet'], 'quotetweet', 'tweet', 'QUOTE_OF')\n\n # push users of original tweets.\n if tweet_dump['users']:\n users2Neo(db, tweet_dump['users'].values())\n multi_user_tweet_actions(db, tweet_dump['users'])\n\n # mentions\n for label in ['tweet', 'retweet', 'quotetweet']:\n mentions = [m[1] for m in tweet_dump['entities'][label]['user_mentions']]\n if mentions:\n users2Neo(db, mentions)\n entities = tweet_dump['entities'][label]['user_mentions']\n entity_links(db, entities, 'MENTIONS', label, 'twitter_user', 'id_str', 'screen_name')\n\n # hashtags, urls and media\n for label in ['tweet', 'retweet', 'quotetweet']:\n for entity_type in ['hashtags', 'urls', 'media']:\n entities = [e[1] for e in tweet_dump['entities'][label][entity_type]]\n if entities:\n entities2neo(db, entities, entity_type)\n\n if tweet_dump['entities'][label]['hashtags']:\n entity_links(db, tweet_dump['entities'][label]['hashtags'],\n 'TAGGED', label, 'hashtag', 'id_str', 'text')\n\n if tweet_dump['entities'][label]['urls']:\n entity_links(db, tweet_dump['entities'][label]['urls'],\n 'LINKS_TO', label, 'url', 'id_str', 'expanded_url')\n\n if tweet_dump['entities'][label]['media']:\n entity_links(db, tweet_dump['entities'][label]['media'],\n 'EMBEDS', label, 'media', 'id_str', 'id_str')", "def get_hashtags(self):\n\t\t# Only first level comments should be checked for hashtag. Maybe.\n\t\tpassl", "def getHashtagsAndMentions(tweets):\n hashtags = Counter()\n mentions = Counter()\n plain = Counter()\n\n pattern = re.compile(r\"[^#@\\w'-]+\")\n\n for t in tweets:\n words = pattern.split(t.message)\n for word in words:\n # Ignore null strings caused by split characters at the end of a\n # message and remove standalone hyphens.\n if word and not word.startswith(\"-\"):\n # Increment count for the word in the Counter.\n if word.startswith(\"#\"):\n hashtags.update({word: 1})\n elif word.startswith(\"@\"):\n mentions.update({word: 1})\n else:\n # TODO: apply nltk.corpus.stopwords.words() here,\n # across languages. Consider that the stopwords cut off\n # before apostrophe, therefore check if the word\n # starts with the stopword.\n plain.update({word: 1})\n\n return hashtags, mentions, plain", "def connections2Neo(db, user, renderedTwits, friends=True):\n started = datetime.now()\n right_now = started.isoformat()\n \n users2Neo(db, renderedTwits)\n \n match = (\"MATCH (t:twitter_user {{screen_name: '{}'}}),\" +\n \" (f:twitter_user {{screen_name: d.screen_name}})\").format(user)\n\n if friends:\n merge = \"MERGE (t)-[:FOLLOWS]->(f)\"\n update = \"SET {}.friends_last_scraped = '{}'\".format('t'+user, right_now)\n else:\n merge = \"MERGE (t)<-[:FOLLOWS]-(f)\"\n update = \"SET {}.followers_last_scraped = '{}'\".format('t'+user, right_now)\n \n query = '\\n'.join(['UNWIND $data AS d', match, merge])\n \n data = [{'screen_name': twit.get('screen_name', False)}\n for twit in renderedTwits if twit.get('screen_name', False)]\n\n userNode = nodeRef(user, 'twitter_user', {'screen_name': user})\n update_query = '\\n'.join([mergeNode(userNode, match=True), update])\n\n neo_tx(db, update_query)\n neo_tx(db, query, data=data)\n\n how_long = (datetime.now() - started).seconds\n logging.info(\n '*** PUSHED %d CONNECTIONS FOR %s TO NEO IN %ds ***' %\n (len(renderedTwits), user, how_long))", "def process_tweet(tweet):\n d = {}\n d['hastags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]\n d['text'] = tweet['text']\n d['user'] = tweet['user']['screen_name']\n d['user_loc'] = tweet['user']['location']\n return d", "def __init__(self):\r\n self.tweets_by_user = collections.defaultdict(list)\r\n self.follows = collections.defaultdict(set)\r\n self.timestamp = 0", "def extract_hashtags(self):\n most_common_words = get_most_common_words(\n self.get_full_text_from_source())\n queryset = Hashtag.objects.all()\n for word in most_common_words:\n tag = queryset.get_or_create(word=word.lower())[0]\n tag.documents.add(self)", "def users2Neo(db, renderedTwits):\n started = datetime.now()\n right_now = started.isoformat()\n \n for twit in renderedTwits:\n twit['last_scraped'] = right_now\n \n data = [{'screen_name': twit.get('screen_name', False), 'props':twit}\n for twit in renderedTwits if twit.get('screen_name', False)]\n \n unwind_tx(db, data, 'MERGE (x:twitter_user {screen_name: d.screen_name})',\n 'SET x += d.props')\n\n how_long = (datetime.now() - started).seconds\n logging.info(\n '*** PUSHED %d USERS TO NEO IN %ds ***' %\n (len(renderedTwits), how_long))", "def buildEdgeList(self, hashtagsList):\n hashtagLen = len(hashtagsList)\n if hashtagLen > 1:\n for index in range(1, hashtagLen):\n self.builtEdgeList.append((hashtagsList[FIRST], hashtagsList[index]))\n hashtagsList.pop(FIRST)\n self.buildEdgeList(hashtagsList)", "def separate_hastags_mentions_urls(tweet):\n \n text = tweet.lower()\n hashtag_list = re.findall(\"#([a-zA-Z0-9_]{1,50})\", text)\n \n text = re.sub(r'http\\S+', '', text)\n clean_tweet = re.sub(\"@[A-Za-z0-9_]+\",\"\", text)\n clean_tweet = re.sub(\"#[A-Za-z0-9_]+\",\"\", clean_tweet)\n \n return clean_tweet, hashtag_list", "def hashtags(self, users_list, hashtags_list, feature_size=None, relative_freq=True):\n # get the counts of each hashtag shared\n # Collapse the list of lists: hashtags_list\n hashtag_counts = sorted_count([h for l in hashtags_list for h in l if h])\n\n # fitler against feature_size, Default is None=Selects all.\n hashtag_counts = hashtag_counts[:feature_size]\n hashtag_vector = tuple([h for h,_ in hashtag_counts])\n\n # zip users,hastags\n users_hashtags_zip = list(zip(users_list, hashtags_list))\n\n # findng hashtag feature for each user\n hashtag_features = {}\n for user in tqdm(set(users_list), desc=\"hashtag_features\", leave=LEAVE_BAR):\n user_hashtags = [h for u,hts in users_hashtags_zip for h in hts if u==user]\n hashtag_features[user] = np.array( [ user_hashtags.count(h) for h in hashtag_vector ] )\n if relative_freq and np.sum(hashtag_features[user])>0:\n hashtag_features[user] = hashtag_features[user]/np.sum(hashtag_features[user])\n \n return hashtag_features", "def fill_tweet(self, t, data):\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"entities\"][\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n #\n # update the country cache\n #\n try:\n # see: https://bitbucket.org/richardpenman/reverse_geocode/src/default/\n #country = reverse_geocode.search(data[\"coordinates\"][\"coordinates\"][0])[\"country\"]\n country = data[\"place\"][\"country_code\"]\n if country in country_cache:\n country_cache[country] += 1\n else:\n country_cache[country] = 1\n except:\n print(\" .... Could not identify county by coordinates\")\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user\"][\"screen_name\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n #\n # update the tweets per minute cache\n # \n\n #tweets_descending = OrderedDict(sorted(self.application.tweet_cache.items(), key=lambda kv: kv[1], reverse=True))\n #hash_descending = OrderedDict(sorted(hash_cache.items(), key=lambda kv: kv[1], reverse=True))\n #for counter, elem in enumerate(hash_descending):\n # if counter < 9:\n # print(\"hash top #{} : {} : {}\".format(counter, elem, str(hash_descending[elem])))\n # else:\n # break\n try:\n t.user_screenname=data[\"user\"][\"screen_name\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"user\"][\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = dateutil.parser.parse(data[\"created_at\"])\n except:\n t.timestamp = datetime.datetime.utcnow()\n return t", "def tweetDump2Neo(db, user, tweet_dump):\n \n # user->[tweeted/RTed/quoted]->(tweet/RT/quoteTweet)\n for label in ['tweet', 'retweet', 'quotetweet']:\n if tweet_dump[label]:\n tweets2Neo(db, tweet_dump[label], label=label)\n tweetActions(db, user, tweet_dump[label], label=label)\n \n # push original tweets from RTs/quotes\n for label in ['retweet', 'quotetweet']:\n tweets = [(tw[0],) for tw in tweet_dump[label]]\n if tweets:\n tweets2Neo(db, tweets, label='tweet')\n \n # (RT/quote)-[RETWEET_OF/QUOTE_OF]->(tweet)\n if tweet_dump['retweet']:\n tweetLinks(db, tweet_dump['retweet'], 'retweet', 'tweet', 'RETWEET_OF')\n if tweet_dump['quotetweet']:\n tweetLinks(db, tweet_dump['quotetweet'], 'quotetweet', 'tweet', 'QUOTE_OF')\n\n # push users of original tweets.\n if tweet_dump['users']:\n users2Neo(db, tweet_dump['users'].values())\n multi_user_tweet_actions(db, tweet_dump['users'])\n \n # mentions\n for label in ['tweet', 'retweet', 'quotetweet']:\n mentions = [m[1] for m in tweet_dump['entities'][label]['user_mentions']]\n if mentions:\n users2Neo(db, mentions)\n entities = tweet_dump['entities'][label]['user_mentions']\n entity_links(db, entities, 'MENTIONS', label, 'twitter_user', 'id_str', 'screen_name')\n\n # hashtags, urls and media\n for label in ['tweet', 'retweet', 'quotetweet']:\n for entity_type in ['hashtags', 'urls', 'media']:\n entities = [e[1] for e in tweet_dump['entities'][label][entity_type]]\n if entities:\n entities2neo(db, entities, entity_type)\n\n if tweet_dump['entities'][label]['hashtags']:\n entity_links(db, tweet_dump['entities'][label]['hashtags'],\n 'TAGGED', label, 'hashtag', 'id_str', 'text')\n \n if tweet_dump['entities'][label]['urls']:\n entity_links(db, tweet_dump['entities'][label]['urls'],\n 'LINKS_TO', label, 'url', 'id_str', 'expanded_url')\n \n if tweet_dump['entities'][label]['media']:\n entity_links(db, tweet_dump['entities'][label]['media'],\n 'EMBEDS', label, 'media', 'id_str', 'id_str')", "def create_user_links(verbose=False):\n rg = global_ratings_graph()\n if verbose:\n print \"Ratings graph loaded.\"\n uids = rg.users()\n links = []\n for user in User.select():\n uid1 = \"u%s\" % user.user_id\n m1 = set(rg.user_movies(uid1))\n\n buddies = {}\n\n for uid2 in uids:\n if uid1 == uid2:\n continue\n\n m2 = set(rg.user_movies(uid2))\n\n intersection = m1.intersection(m2)\n if not intersection:\n continue\n\n union = m1.union(m2)\n\n buddies[uid2] = dict(\n # Jaccard index\n j=len(intersection)/float(len(union)),\n # Common movies count\n c=len(intersection),\n )\n\n links.append(dict(user=user, buddies=buddies))\n\n chunked_insert(UserLink, links)", "def __init__(self):\n self.timer = itertools.count(0, -1)\n self.tweets = collections.defaultdict(collections.deque) # map userId to tweets\n self.followees = collections.defaultdict(set) # map userId to its followees", "def countHashtags(self, hashtags):\n for hashtag in hashtags:\n if hashtag not in self.hashDict.keys():\n self.hashDict.update({hashtag: 1})\n else:\n self.hashDict.update({hashtag: self.hashDict[hashtag] + 1})", "def readHashtags():\n next_max_id = True\n reader = HashtagReader()\n while next_max_id:\n if next_max_id is True:\n next_max_id = ''\n _ = api.getUserFeed(usernameId=userId, maxid=next_max_id)\n reader.items.extend(api.LastJson.get('items', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n reader.checkBannedTags()\n reader.printHashtagsDict()", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def create_graph(users, friend_counts):\n ###TODO-- Completed\n G = nx.Graph()\n\n #For Filtering the Nodes\n #print(friend_counts)\n friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]\n candidate_nodes = [user['screen_name'] for user in users]\n\n #print(\"Nodes: \",len(friend_nodes), len(candidate_nodes))\n #Adding Nodes to graph\n G.add_nodes_from(friend_nodes + candidate_nodes)\n\n #Connecting the Nodes with Edges\n for candidate in users:\n for friend in friend_nodes:\n if friend in candidate['friends']:\n G.add_edge(candidate['screen_name'], friend)\n\n return G", "def get_top_hashtags_from_twitter_api(country='Japan', extended_search=True, debug=False):\n trends = get_top_trends_from_twitter(country=country, exclude_hashtags=False)\n trends = json.loads(trends)\n\n trending_hashtags = [t['label'] for t in trends]\n\n #print(json.dumps(trends, indent=4, ensure_ascii=False))\n\n queries = [t['query'] for t in trends]\n\n if debug:\n #[print(x) for x in trends]\n #[print(x) for x in queries]\n queries = [queries[0]]\n\n full_hashtags_list = []\n for query in queries:\n #print(query)\n # there is no country filter, but there is language filter at least\n if country == 'Japan':\n responses = api.GetSearch(term=query, locale='ja', return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n else:\n responses = api.GetSearch(term=query, return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n\n #print(json.dumps(responses, indent=4, ensure_ascii=False))\n\n trend_hashtags_list = []\n for response in responses:\n if debug: print(json.dumps(response, indent=4, ensure_ascii=False))\n text = response['text']\n\n hashtags_list = response['entities']['hashtags']\n\n if len(hashtags_list) > 0:\n hashtags_list = [h['text'] for h in hashtags_list]\n [trend_hashtags_list.append(h) for h in hashtags_list]\n\n full_hashtags_list.append(trend_hashtags_list)\n\n flat_hashtags_list = [item for sublist in full_hashtags_list for item in sublist]\n\n # turn it into a set to clear duplicates, then append #\n flat_hashtags_list = list(set(flat_hashtags_list))\n flat_hashtags_list = ['#'+h for h in flat_hashtags_list]\n\n flat_tier_list = []\n for h in flat_hashtags_list:\n if h in trending_hashtags:\n flat_tier_list.append(1)\n else:\n flat_tier_list.append(2)\n\n output = []\n for hashtag, tier in zip(flat_hashtags_list, flat_tier_list):\n output.append({\n \"label\": hashtag,\n \"tier\": tier\n })\n\n sorted_output = sorted(output, key=lambda x: x['tier'])\n\n output_json = json.dumps(sorted_output, ensure_ascii=False)\n return output_json", "def _transform_single_row(self, hashtag_agg: Dict, row: pd.Series):\n user_name = row[\"username\"] + \"_user\"\n tweet_id = str(row[\"id\"]) + \"_tweet\"\n tags = row[\"hashtags\"]\n\n self._users_labels.add(user_name)\n self._tweet_labels.add(tweet_id)\n\n if not self.graph.has_node(user_name):\n self.graph.add_node(user_name, type=\"username\")\n\n if not self.graph.has_node(tweet_id):\n self.graph.add_node(tweet_id, type=\"tweet_id\")\n\n for hashtag_index in tags:\n tag = hashtag_index[\"text\"] + \"_tag\"\n hashtag_agg[tag] += row[\"lemmas\"]\n\n if not self.graph.has_node(tag):\n self.graph.add_node(tag, type=\"hashtag\")\n\n if not self.graph.has_edge(tag, user_name):\n self.graph.add_edge(tag, user_name)\n\n if not self.graph.has_edge(tag, tweet_id):\n self.graph.add_edge(tag, tweet_id)\n\n self._hashtag_labels.add(tag)", "def __init__(self):\n self.users = defaultdict(set) # userId -> followeeIds\n self.posts = defaultdict(list) # userId -> posts, posts = (timestamp, postId)", "def extract_hashtags(tweet):\n tknzr = TweetTokenizer()\n hashtags = [token.lower() for token in tknzr.tokenize(tweet) if re.match(hashtag_re, token)]\n return hashtags", "def get_all_social_paths(self, user_id): #each user v v each path V V\n # output example- {1: [1], 8: [1, 8], 10: [1, 10], 5: [1, 5], 2: [1, 10, 2], 6: [1, 10, 6], 7: [1, 10, 2, 7]}\n visited = {} # Note that this is a dictionary, not a set\n # Need to do a bfs using the user id\n # first step is to traverse the graph and record all the vertices as keys in visited using bft\n # then take those keys and use bfs on each, using user_id as the starting node and and the key as\n # the destination node\n\n # Modification of BFT\n # create an empty dict\n # q = Queue()\n q = []\n\n # init enqueue the starting node\n q.append(user_id)\n\n while len(q) > 0:\n # Dequeue the first item\n v = q.pop(0)\n # If it's not been visited:\n if v not in visited:\n # Mark as visited (i.e. add to the visited set)\n visited[v] = []\n\n # Do something with the node\n print(f\"Visited {v}\")\n\n # Add all neighbors to the queue\n for next_vert in self.friendships[v]:\n q.append(next_vert)\n\n # once visited is filled, then we start the bfs\n #print('vv',visited)\n possible_paths = {}\n #run a bfs for each key in visited\n for v in visited:\n possible_paths[v] = []\n \n if v == user_id:\n visited[v] = [user_id]\n\n path = []\n while len(path) < len(visited):\n\n # Add all neighbors to the queue\n for next_vert in self.friendships[v]:\n print(possible_paths[v])\n # copy the path\n # temp_path = list(path)\n # temp_path.append(next_vert)\n # add path to possible_paths\n path.append(next_vert)\n\n possible_paths[v].append(path) # HAVE TO USE QUEUE OR STACK, THEY ENSURE THE NEIGHBORS\n # FOLLOW THE CORRECT ORDER WHEN LOOPING \n\n if v == path[-1]:\n \n # IF SO, RETURN PATH\n visited[v] = path\n break\n\n # for x in visited:\n # bfs(user_id, x)\n # visited[x].add(path)\n \n print('pct of total users in network', len(visited[1])/len(visited))\n print('degrees of separation', len(visited[1]) - 1)\n return visited", "def get_hashtags():\r\n hashtags_list = cache.get('hashtags-list')\r\n if hashtags_list is None:\r\n pipeline = [\r\n {\"$unwind\": \"$entities\"},\r\n {\"$unwind\": \"$entities.hashtags\"},\r\n {\"$unwind\": \"$entities.hashtags.text\"},\r\n {\"$group\": {\"_id\": \"$entities.hashtags.text\", \"count\":\r\n {\"$sum\": 1}}},\r\n {\"$sort\": SON([(\"count\", -1), (\"_id\", -1)])}]\r\n\r\n hashtags = mongo_coll_tweets.aggregate(pipeline)\r\n hashtags_list = []\r\n for hashtag in hashtags:\r\n hashtags_list.append((list(hashtag.values())[1], list(hashtag.values())[0]))\r\n\r\n cache.set('hashtags-list', hashtags_list,\r\n cfg['flask_cache_timeout'] * 60)\r\n\r\n return hashtags_list", "def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # graphs=Graph()\n # for i in self.users:\n # graphs.add_vertex(i)\n \n # for i in self.users:\n # for x in self.friendships[i]:\n # graphs.add_edge(i,x)\n\n # for i in graphs.vertices:\n # if graphs.bfs(i,user_id):\n # visited[i]=graphs.bfs(i,user_id)\n queue=Queue()\n queue.enqueue([user_id])\n while queue.size()>0:\n path=queue.dequeue()\n current_user = path[-1]\n if current_user not in visited:\n visited[current_user]=path\n for ID in self.friendships[current_user]:\n new_path=list(path)\n new_path.append(ID)\n queue.enqueue(new_path)\n return visited", "def pushTwitterConnections(twits,user,friends=True,cacheKey=False):\n\n if friends:\n job = ' FRIENDS'\n else:\n job = ' FOLLOWERS'\n \n if twits:\n renderedTwits = [ renderTwitterUser(twit) for twit in twits ]\n pushRenderedConnections2Neo.delay(user,renderedTwits,friends=friends)\n pushRenderedConnections2Cass.delay(user,renderedTwits,friends=friends)\n# These are the last Tweets, tell the scaper we're done.\n if cacheKey: # These are the last connections, tell the scaper we're done.\n cache.set(cacheKey,'done')\n print '*** '+user+': DONE WITH'+job+' ***'", "def __init__(self):\n self.users = {}\n self.tweetTime = {}\n self.recentMax = 0\n self.time = 0", "def clean_tweet(tweet):\n word_out, hashtags = [], []\n for word in tweet.split():\n if word[0] == '#':\n hashtags.append(word)\n elif ((len(word) != 0) and (word[0] != '@')) and (\n len(word) < 4 or ((len(word) > - 4) and (word[:4] != 'http'))):\n word_out.append(word)\n return word_out, hashtags", "def hashtags(self):\n return [tag[\"text\"] for tag in self.status.hashtags]", "def handle_trending_hashtags(api_pipeline, trending_hashtags_from_db):\n\n trending_hashtags = api_pipeline.get_top_hashtags_worldwide()\n trending_hashtags.sort(key=lambda hashtag: hashtag.tweet_volume, reverse=True)\n trending_hashtags_chart = PlotPainter.plot_hashtags(trending_hashtags)\n trending_hashtags_from_db_today = trending_hashtags_from_db.filter(save_date=datetime.datetime.today().date())\n for trending_hashtag in trending_hashtags:\n if trending_hashtag not in trending_hashtags_from_db_today:\n trending_hashtag.save()\n return trending_hashtags, trending_hashtags_chart", "def get_popularHashtags_unitedairlines():\n fields = ['hashtags'] # files to read\n # get any desird day as you want\n date_Day = ['04', '05', '06', '07', '08', '09', '10', '11', '12', '13']\n #\n popularity_cols=[[] for i in range(10)]\n dateCol=[]\n for date_DD in xrange(10):\n csv_input = pd.read_csv('United_Airlines_'+ \"2017-04-\" + \n date_Day[date_DD] + '.csv', skipinitialspace=True, usecols=fields)\n dateCol.append(\"2017-04-\" + date_Day[date_DD])\n print 'processing file: '+ 'United_Airlines_'+ \"2017-04-\" + date_Day[date_DD] + '.csv'\n \n # add all hashtags of all tweets from all files to list\n hashtags = []\n for htext in csv_input.hashtags:\n hstr = htext.translate(None, string.punctuation)\n for hashtag in hstr.split():\n hashtags.append(hashtag)\n \n c=Counter(hashtags)\n\n # take most popular 10 per day\n for i in range(10):\n popularity_cols[i].append(c.most_common(10)[i])\n\n # add dates\n popularity_cols.insert(0,dateCol)\n # headers\n headers=['date']\n for i in range(9,-1,-1):\n headers.append(i)\n \n # to dataframe and csv\n df = pd.DataFrame(popularity_cols)\n df=df.transpose()\n df.columns = headers\n\n df.to_csv('United_Airlines_Popular_Hashtags_'+ \"2017-04-\" + \n date_Day[0] +\"_to_2017-04-\"+date_Day[len(date_Day)-1] + '.csv', index=False)", "def filter_by_hashtag(tweets: list, hashtag: str) -> list:\n tweets_with_hashtag = {} # findall(): Kui tekstis on rohkem kui üks regulaaravaldisele vastav alamsõne saab kõikide vastete järjendi moodustada funktsiooniga findall()\n pattern = r\"#\\w+\" # \\w : tähed, numbrid, alakriips, + : 1 või rohkem\n for tweet in tweets: # r\"string\" on \"raw\" tüüpi string, mis tähendab, et kurakaldkriipsud(\"\\\") jäetakse teksti alles.\n find_hashtag = re.findall(pattern, tweet.content) # word:\\w\\w\\w. Regulaaravaldisele vastab täpne sõne \"word:\" ning sellele järgnevad 3 suvalist tähte.\n if find_hashtag:\n tweets_with_hashtag.setdefault(ht, []).append(tweet)\n return tweets_with_hashtag[hashtag]", "def get_user_recipes_given_hashtag(cls, userid, hashtag):\n\n QUERY = \"\"\"\n SELECT recipe_title, recipe_id FROM recipes\n WHERE user_id= :userid AND recipe_id IN\n (SELECT recipe_id FROM recipe_hashtags WHERE hashtag_id IN\n (SELECT hashtag_id FROM hashtags WHERE name= :hashtag))\n \"\"\"\n\n cursor = db.session.execute(QUERY, {'userid': userid, 'hashtag': hashtag})\n hashtag_recipes = cursor.fetchall()\n\n return hashtag_recipes", "def handle_hashtags_tweets_for_date(current_date, current_hashtag):\n\n hashtags_tweets = current_hashtag.tweets.filter(save_date=current_date).distinct()\n hashtags_tweets_list = [hashtags_tweet for hashtags_tweet in hashtags_tweets]\n hashtags_tweets_list.sort(key=lambda tweet: (tweet.retweets, tweet.likes), reverse=True)\n hashtags_tweets_list = hashtags_tweets_list[:10]\n hashtags_tweets_chart = PlotPainter.plot_tweets(hashtags_tweets_list) if hashtags_tweets else None\n return hashtags_tweets_chart, hashtags_tweets_list", "def getNewsFeed(self, userId):\n tweets = self.tweets\n star = self.followstar.get(userId, set()) | set([userId])\n tw = []\n for people in star:\n if people in tweets:\n tw.append((tweets[people][-1][0], tweets[people][-1][1], people, len(tweets[people])-1))\n heapq.heapify(tw)\n \n ans = []\n while len(ans) < 10 and len(tw) != 0:\n u = heapq.heappop(tw)\n ans.append(u[1])\n if u[3] > 0:\n heapq.heappush(tw, (tweets[u[2]][u[3]-1][0], tweets[u[2]][u[3]-1][1], u[2], u[3]-1))\n return ans", "def main():\n\n for hashtag in HASHTAGS:\n start = time.time()\n print(\"Getting profile info for #\" + hashtag)\n bot = InstagramBot(headless=HEADLESS, proxy=PROXY, threads=THREADS)\n profile_links = bot.get_users(num_pictures=NUM_PICTURES, hashtag=hashtag)\n\n profile_links_divided = list(numpy.array_split(numpy.array(list(profile_links)), THREADS))\n profile_links_divided = [numpy_arr.tolist() for numpy_arr in profile_links_divided]\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:\n user_info_future = {executor.submit(bot.get_user_info, profile_links) for profile_links in\n profile_links_divided}\n\n user_info_divided = [future.result() for future in user_info_future]\n user_info = [info for sublist in user_info_divided for info in sublist]\n for info in user_info:\n info.append(hashtag)\n\n users_df = pd.DataFrame(user_info,\n columns=['User', 'Followers', 'Following', 'Posts', 'Email', 'URL', 'Hashtag'])\n end = time.time()\n users_df.loc[len(users_df)] = ['RUNTIME', str(end-start), 0, 0, 0, 0, 0]\n\n csv_name = FOLDER_PATH + '/users_' + hashtag + '.csv'\n try:\n users_df.to_csv(csv_name, index=False)\n except Exception as e:\n print('Unable to save to csv. It is probably open on your machine')\n print(e)\n\n get_influencer_csv(csv_name)\n\n print(\"#\" + hashtag + \" took \" + str(end-start) + \"s to run\")", "def _refine_matrix_with_additional_connections(self):\n new_graph = self.graph.copy()\n for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose):\n if self.graph.node[node][\"type\"] == \"hashtag\":\n for neighbour in self.graph.neighbors(node):\n if self.graph.node[neighbour][\"type\"] == \"username\":\n for other_node in self.graph.neighbors(neighbour):\n if self.graph.node[other_node][\"type\"] == \"hashtag\" \\\n and not self.graph.has_edge(node, other_node) \\\n and not node == other_node:\n new_graph.add_edge(node, other_node)\n self.graph = new_graph", "def hashtags(self, candidate_list):\n if Tweet.hashtagre is None:\n Tweet.hashtagre = re.compile('|'.join(map(re.escape, candidate_list)))\n return [\n [m.group(0).replace('#', '', 1), m.span()]\n for m in Tweet.hashtagre.finditer(self.text)\n ]", "def build_trigrams(words):\n trigrams = {}\n for i in range(len(words) - 2):\n pair = words[i:i + 2]\n follower = words[i + 2]\n # add tuple type to make it immutable / use append to add more options to the follower selections\n trigrams.setdefault(tuple(pair), []).append(follower)\n #print(trigrams) - for testing\n return trigrams", "def handle_current_hashtag(api_pipeline, current_hashtag):\n\n current_hashtag_saved_tweets = current_hashtag.tweets.all()\n hashtags_tweets = api_pipeline.get_recent_tweets_for_hashtag(current_hashtag.text, how_many=5)\n for hashtags_tweet in hashtags_tweets:\n if hashtags_tweet not in current_hashtag_saved_tweets.filter(save_date=datetime.datetime.today().date()):\n hashtags_tweet.save()\n current_hashtag.tweets.add(hashtags_tweet)\n current_hashtag.save()\n hashtags_tweets.sort(key=lambda tweet: (tweet.retweets, tweet.likes), reverse=True)\n hashtags_tweets_chart = PlotPainter.plot_tweets(hashtags_tweets) if hashtags_tweets else None\n return hashtags_tweets, hashtags_tweets_chart", "def create_context(host, port):\n spark_context = SparkContext(master=\"local[2]\", appName=\"TwitterStreamApp\")\n spark_context.setLogLevel(\"ERROR\")\n # create the Streaming Context from the above spark context with interval size 2 seconds\n streaming_context = StreamingContext(spark_context, 2)\n # setting a checkpoint to allow RDD recovery\n streaming_context.checkpoint(CHECKPOINT)\n # read data from port 9009\n data_stream = streaming_context.socketTextStream(ADDRESS, PORT,\n storageLevel=StorageLevel(True, True, False, False, 2))\n # split each tweet into words\n words = data_stream.flatMap(lambda line: line.split(\" \"))\n # filter the words to get only hashtags, then map each hashtag to be a pair of (hashtag,1)\n hashtags = words.filter(lambda w: '#' in w).map(lambda x: (x, 1))\n # adding the count of each hashtag to its last count\n tags_totals = hashtags.updateStateByKey(aggregate_tags_count)\n\n def process_rdd(time, rdd):\n \"\"\"\n Function that processes an RDD.\n :param time: Time stamp of the process.\n :param rdd: The RDD to be processed.\n \"\"\"\n print(\"----------- %s -----------\" % str(time))\n try:\n if rdd:\n # Get spark sql singleton context from the current context\n sql_context = get_sql_context_instance(rdd.context.getConf())\n # convert the RDD to Row RDD\n row_rdd = rdd.map(lambda w: Row(hashtag=w[0], hashtag_count=w[1]))\n # create a DF from the Row RDD\n hashtags_dataframe = sql_context.createDataFrame(row_rdd)\n # Register the dataframe as table\n hashtags_dataframe.createOrReplaceTempView(\"hashtags\")\n # get the top 10 hashtags from the table using SQL and print them\n hashtag_counts_dataframe = sql_context.sql(\n \"select hashtag, hashtag_count from hashtags order by hashtag_count desc limit 10\")\n hashtag_counts_dataframe.show()\n\n # call this method to prepare top 10 hashtags DF and send them\n\n def send_dataframe_to_dashboard(dataframe):\n \"\"\"\n Function to send DataFrame to the dashboard for visualization.\n :param dataframe: Spark DataFrame created by process_rdd().\n \"\"\"\n # extract the hashtags from dataframe and convert them into array\n top_tags = [str(t.hashtag) for t in dataframe.select(\"hashtag\").collect()]\n # extract the counts from dataframe and convert them into array\n tags_count = [p.hashtag_count for p in dataframe.select(\"hashtag_count\").collect()]\n # initialize and send the data through REST API\n request_data = {'label': str(top_tags), 'data': str(tags_count)}\n response = post(dashboard_url, data=request_data)\n\n send_dataframe_to_dashboard(hashtag_counts_dataframe)\n except:\n pass\n\n # do processing for each RDD generated in each interval\n tags_totals.foreachRDD(process_rdd)\n return streaming_context", "def proximity(user_a: TwitscanUser, user_b: TwitscanUser) -> tuple[float, ...]:\n global cache\n for user in (user_a, user_b):\n assert check_user_id(user.user_id) is not None, f\"User {user} not in db\"\n\n if cache.get(user_a.user_id) is None:\n entourage_a = set(map(lambda ent: ent.friend_follower_id, user_a.entourage))\n hashtags_a = hashtags_used(user_a)\n cache[user_a.user_id] = CacheRecord(entourage=entourage_a, hashtags=hashtags_a)\n else:\n cr = cache[user_a.user_id]\n entourage_a = cr[\"entourage\"]\n hashtags_a = cr[\"hashtags\"]\n\n a_mentions_b, a_mentions_counter = n_mentions(user_a, user_b.user_id)\n a_favs_b, a_rt_b, a_cmt_b = n_interactions(user_a, user_b.user_id)\n\n entourage_b = set([ent.friend_follower_id for ent in user_b.entourage])\n hashtags_b = hashtags_used(user_b)\n b_mentions_a, b_mentions_counter = n_mentions(user_b, user_a.user_id)\n b_favs_a, b_rt_a, b_cmt_a = n_interactions(user_b, user_a.user_id)\n\n ent_a_len = len(entourage_a)\n ent_b_len = len(entourage_b)\n ent_len = ent_b_len + ent_a_len\n\n hash_a_len = len(hashtags_a)\n hash_b_len = len(hashtags_b)\n hash_len = hash_b_len + hash_a_len\n # weigh common entourage / hashtags by number of entourage acquired / hashtags used\n common_entourage = len(entourage_a.intersection(entourage_b)) / ent_len if ent_len != 0 else 0\n common_hashtags = len(hashtags_a.intersection(hashtags_b)) / hash_len if hash_len != 0 else 0\n\n total_mentions = a_mentions_b + b_mentions_a\n total_favs = a_favs_b + b_favs_a\n total_rts = a_rt_b + b_rt_a\n total_cmts = a_cmt_b + b_cmt_a\n\n return (\n a_mentions_b,\n b_mentions_a,\n a_favs_b,\n b_favs_a,\n a_rt_b,\n b_rt_a,\n a_cmt_b,\n b_cmt_a,\n ent_a_len,\n ent_len,\n hash_a_len,\n hash_b_len,\n hash_len,\n common_entourage,\n len(entourage_a),\n len(entourage_b),\n common_hashtags,\n len(hashtags_a),\n len(hashtags_b),\n a_mentions_b,\n b_mentions_a,\n a_mentions_counter,\n b_mentions_counter,\n a_favs_b,\n b_favs_a,\n user_a.favorites_count,\n user_b.favorites_count,\n a_rt_b,\n b_rt_a,\n a_cmt_b,\n b_cmt_a,\n )", "def analyze_hashtag(self, hashtag, count=200):\n tweets = []\n\n for x in xrange(0, int(count / 100)):\n tweets.extend(self.tweet_fetcher.get_tweets(hashtag))\n\n analyzed_tweets = sort_tweets(self.sa.classify(tweets))\n\n self.analyzed_tweets = analyzed_tweets\n\n return analyzed_tweets", "def create_similarity_graph(user_combined_reviews):\n similarity_graph = {}\n for curr_user_id, review in user_combined_reviews.items():\n similarity_graph[curr_user_id] = []\n for other_user_id, others_review in user_combined_reviews.items():\n if other_user_id != curr_user_id:\n similarity_graph[curr_user_id].append({\n other_user_id: similarity(review, others_review)\n })\n return similarity_graph", "def clean_hashtags(self, tweet):\n self.hashtags = [tag.strip('#') for tag in tweet.split()\n if tag.startswith('#')]\n\n for hashtag in self.hashtags:\n tweet = tweet.replace('#'+hashtag, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet", "def like_retweet(self):\n \n bot = self.bot \n logging.debug('Get Hashtag')\n hashtag = Twitterbot.getHashtags(self)\n logging.debug('Bot for Like_retweet initalized')\n # fetches the latest tweets with the provided hashtag \n bot.get( \n 'https://twitter.com/search?q=%23'+hashtag+'&src=trend_click&vertical=trends'\n ) \n \n time.sleep(3) \n \n # using set so that only unique links \n # are present and to avoid unnecessary repetition \n links = set() \n \n # obtaining the links of the tweets \n for _ in range(3): \n # executing javascript code \n # to scroll the webpage \n bot.execute_script( \n 'window.scrollTo(0, document.body.scrollHeight)'\n ) \n \n time.sleep(4) \n \n # using list comprehension \n # for adding all the tweets link to the set \n # this particular piece of code might \n # look very complicated but the only reason \n # I opted for list comprehension because is \n # lot faster than traditional loops \n [ \n links.add(elem.get_attribute('href')) \n for elem in bot.find_elements_by_xpath(\"//a[@dir ='auto']\") \n ] \n i = 0\n # traversing through the generated links \n for link in links:\n #Nothing for the Hashtag was found, another run is required\n #if len(links ==6):\n # break\n # Twitterbot.like_retweet(self)\n # opens individual links \n #print(len(links))\n bot.get(link) \n time.sleep(4)\n if i == 3:\n break\n i += 1\n try: \n # retweet button selector \n bot.find_element_by_css_selector( \n '.css-18t94o4[data-testid =\"retweet\"]'\n ).click() \n # initializes action chain \n actions = ActionChains(bot) \n # sends RETURN key to retweet without comment \n actions.send_keys(Keys.RETURN).perform() \n \n # like button selector \n bot.find_element_by_css_selector( \n '.css-18t94o4[data-testid =\"like\"]'\n ).click() \n # adding higher sleep time to avoid \n # getting detected as bot by twitter \n logging.info(f'Liked and retweeted:\"{link}\"')\n time.sleep(10) \n except: \n time.sleep(2) \n \n # fetches the main homepage \n bot.get('https://twitter.com/')", "def preprocess_tweet(tweet):\n\n\n clean_tweet, hashtags = separate_hastags_mentions_urls(tweet)\n clean_tweet = remove_emoji_punc(clean_tweet)\n return clean_tweet, hashtags", "def __init__(self):\n self.timer = itertools.count(step=-1)\n self.tweets = collections.defaultdict(collections.deque)\n self.followees = collections.defaultdict(set)", "def make_graph(self):\n\n for i in range(0, self.num_words):\n self.graph.append([])\n\n for word in self.words:\n for pos in range(0, 4):\n for let in range(0, 26):\n temp = ''\n if pos > 0:\n temp = word[0:pos] + (chr(let + ord('a'))) + word[pos +\n 1:4]\n else:\n temp = chr(let + ord('a')) + word[pos + 1:4]\n if temp[pos] != word[pos]:\n if temp in self.index.keys():\n i = self.index[word]\n j = self.index[temp]\n self.graph[i].append(j)", "def __init__(self):\n self.time = 0\n self.user_followed = collections.defaultdict(list)\n self.user_post = collections.defaultdict(list)\n self.users = set()", "def get_hashtags_df(graph_path: str = '/data/graphs/train_graph.p') -> pd.DataFrame:\n with open(PATH + graph_path, 'rb') as f:\n G = pickle.load(f)\n\n hashtags = [{'hashtag': node, **G.nodes[node]}\n for node in G.nodes\n if G.nodes[node]['node_type'] == 'hashtag']\n hashtags = pd.DataFrame(hashtags)\n return hashtags", "def user_table(lines, context):\n new_list = context[0:lines]\n total_sent = 0\n user_obj = {\n \"user_id\": \"\",\n \"business_type\": \"\",\n \"created\": \"\",\n \"devices\": \"\",\n \"first_name\": \"\",\n \"first_seen\": \"\",\n \"email\": \"\",\n \"email_subscription\": \"\",\n \"is_tracked\": \"\",\n \"last_name\": \"\",\n \"last_seen\": \"\",\n \"notifications\": [],\n \"picture\": \"\",\n \"role\": \"\",\n \"role_expiration\": \"\",\n \"social\": [],\n \"top_tags\": [],\n \"total_logins\": [],\n \"unsubscribe_token\": [],\n \"updated\": \"\"\n }\n\n for line in new_list:\n \"\"\"pushes to top_tag array\"\"\"\n # for ids in line[\"category_details_id\"]:\n # user_obj[\"tag_ids\"].append(ids)\n\n \"\"\"adding to object that is pushed to firebase\"\"\"\n user_obj[\"user_id\"] = line[\"id\"]\n user_obj[\"business_type\"] = line[\"businesstype\"]\n user_obj[\"created\"] = line[\"created\"]\n user_obj[\"devices\"] = line[\"devices\"]\n user_obj[\"first_name\"] = line[\"first_name\"]\n user_obj[\"first_seen\"] = line[\"first_seen\"]\n user_obj[\"email\"] = line[\"email\"]\n user_obj[\"email_subscription\"] = line[\"email_subscription\"]\n user_obj[\"is_tracked\"] = line[\"istracked\"]\n user_obj[\"last_name\"] = line[\"last_name\"]\n user_obj[\"last_seen\"] = line[\"last_seen\"]\n # user_obj[\"notifications\"] = line[\"notifications\"]\n user_obj[\"picture\"] = line[\"picture\"]\n user_obj[\"role\"] = line[\"user_role\"]\n # user_obj[\"role_expiration\"] = line[\"role_expiration\"]\n user_obj[\"social\"] = line[\"social\"]\n # user_obj[\"top_tags\"] = line[\"top_tags\"]\n user_obj[\"total_logins\"] = line[\"total_logins\"]\n user_obj[\"unsubscribe_token\"] = line[\"unsubscribe_token\"]\n user_obj[\"updated\"] = line[\"_updated_ts\"]\n\n # pushes to firebase db\n db.child(\"users\").child(line['id']).set(user_obj)\n # print (line)\n total_sent = total_sent + 1\n # print(str(total_sent) + \" sent\")\n\n return", "def __init__(self):\n self.follow_map = {}\n self.user_tweets = {}\n \n self.sort_id = 0", "def get_posts(self, userid, username):\n dict_json = {}\n x = 0\n outfile_name = \"tweetsFrom\" + username + \".json\"\n posts = api.GetUserTimeline(user_id=userid, count=200)\n text_list = [p.text for p in posts]\n for text in text_list:\n dict_json[x] = text\n x += 1\n with open(outfile_name, \"w\") as outfile:\n json.dump(dict_json, outfile)\n outfile.close()", "def format_tweet(tweet):\n user = tweet['user']\n return {\n 'tweet_id': tweet['id'],\n 'hashtag': HASHTAG,\n 'text': tweet['text'],\n 'created_at': tweet['created_at'],\n 'user': {\n 'user_id': user['id'],\n 'name': user['name'],\n 'handle': user['screen_name'],\n 'profile_image_url': user['profile_image_url'],\n 'profile_url': f\"https://twitter.com/{user['screen_name']}\"\n }\n }", "def pushTwitterUsers(twits):\n rightNow = datetime.now().isoformat()\n for twit in twits:\n twit['last_scraped'] = rightNow\n \n renderedTwits = [ renderTwitterUser(twit) for twit in twits ]\n pushRenderedTwits2Neo.delay(renderedTwits)\n pushRenderedTwits2Cass.delay(renderedTwits)\n #return True", "def extract_hashtags(text: str) -> List[str]:\n final = []\n result = list(text.split())\n \n \n for word in result: \n if word[0] == HASH_SYMBOL and alnum_prefix(word[1:]) not in final:\n final.append(alnum_prefix(word[1:]))\n if alnum_prefix(word[1:]) == '':\n final.remove(alnum_prefix(word[1:]))\n return final", "def compute_hashtags(row):\n entity_series = pd.read_json(json.dumps(row['entities']), typ='series')\n hashtags = list(map(lambda entry: entry['text'], entity_series['hashtags']))\n return ','.join(hashtags)", "def get_top_tweets():\n Tweet.top_tweets = [(k, v) for k, v in sorted(Tweet.hashtag_counter.items(), key=lambda item: item[1], reverse=True)]\n top_10_tweets = {}\n top_10_tweets['top_tweets'] = []\n for tweet in Tweet.top_tweets[:10]:\n top_10_tweets['top_tweets'].append({'hashtag': \"#\"+tweet[0], 'count': tweet[1]})\n return top_10_tweets", "def populate_graph(self, num_users, avg_friendships):\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n\n # Add users\n for i in range(num_users):\n self.add_user(f'User {i+1}')\n\n # Create friendships\n # Create a list with all possible friendships\n possible_friendships = []\n for user_id in self.users:\n for other_id in range(user_id +1, self.last_id + 1):\n possible_friendships.append((user_id, other_id))\n\n # Shuffle the list\n random.shuffle(possible_friendships)\n # Grab the first N pairs for the list and create those friendships\n for i in range(num_users * avg_friendships // 2):\n friendship = possible_friendships[i]\n self.add_friendship(friendship[0], friendship[1])", "def hashtags(max: int = None):\n for hashtag in client.hashtags(max=max):\n print(json.dumps(hashtag))", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def tweet_split_hashtags(word, append_hashtag):\n if word.startswith('#') and len(word) > 1:\n res = ''\n res += '<hashtag> '\n res += infer_spaces(word[1:])\n if append_hashtag:\n res += ' '\n res += word\n return res\n else:\n return word", "def count_unique_users(data, n=10000):\n user_matrix = []\n user_array = []\n for key, val in data.items():\n user_list = []\n count = 0\n user_count = 0\n # print key\n print len(val)\n for ind, tweet in enumerate(val):\n # print user_count\n if user_count >= 110:\n # print \"MORE THAN A THOUSAND!!!\"\n continue\n if count == 0:\n this_user = tweet[1]\n our_string = \"\"\n if (tweet[1] == this_user) and (count < 200):\n our_string += tweet[2].lower()\n count += 1\n elif (tweet[1] != this_user): # and len(our_string) >= 14000:\n count = 0\n user_count += 1\n print ind, tweet[1],this_user\n user_matrix.append(our_string)\n user_array.append(key)\n user_list.append(this_user)\n # elif tweet[1] != this_user:\n # count = 0\n # print len(user_matrix)\n # print len(user_array)\n # print \"----------Break---------\"\n # last_user = None\n # unique_users = []\n # for user in user_list:\n # if user != last_user:\n # unique_users.append(user)\n # last_user = user\n # print len(unique_users)\n\n # user_list = []\n return user_matrix, user_array, n", "def _make_links(tweet):\n for pattern, repl in (USER_SUB, KEYWORD_SUB):\n tweet = re.sub(pattern, repl, tweet)\n return tweet", "def get_user_relationships(user):\n transactions = {}\n\n for transaction in Transaction.ready.filter(Q(created_by=user) | Q(sent_to=user)):\n other_user_handle = transaction.created_by.handle\n\n if user == transaction.created_by:\n other_user_handle = transaction.sent_to.handle\n\n if other_user_handle not in transactions:\n transactions[other_user_handle] = []\n\n transactions[other_user_handle].append(transaction)\n\n return transactions", "def populate_graph(self, num_users, avg_friendships):\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n # Add users\n # numberOfUsers=num_users\n # while num_users:\n # self.add_user(num_users)\n # num_users-=1\n \n # DistributionOfFriendships=[]\n\n # while len(DistributionOfFriendships)<len(self.users):\n # if DistributionOfFriendships!=True:\n # DistributionOfFriendships.append(random.randrange(0,5))\n # if len(DistributionOfFriendships)==len(self.users):\n # break\n # while sum(DistributionOfFriendships)/len(DistributionOfFriendships)!=avg_friendships:\n # randomNum=random.randrange(0,5)\n # if (sum(DistributionOfFriendships)+randomNum)/(len(DistributionOfFriendships)+1) == avg_friendships:\n # DistributionOfFriendships.append(randomNum)\n # # Create friendships\n\n # print(DistributionOfFriendships)\n # for i in self.users:\n # # count=len(self.friendships[i])\n # # print(\"HELLLO\",i,self.friendships[i])\n # # print(\"COUNT\", count, self.friendships[i], len(self.friendships[i]),i) \n # if DistributionOfFriendships[i-1]>0: \n # if len(self.friendships[i]) <DistributionOfFriendships[i-1]:\n # while len(self.friendships[i])!=DistributionOfFriendships[i-1]:\n # randomNum=random.randrange(1,numberOfUsers+1)\n # print(DistributionOfFriendships,randomNum, i,len(self.friendships[randomNum]), DistributionOfFriendships[randomNum-1],self.friendships)\n # if len(self.friendships[randomNum])<DistributionOfFriendships[randomNum-1] and randomNum!=i:\n # # print(\"randomNum\",randomNum ,\"i\",i,\"friendships\",self.friendships[i], \"count\", count,DistributionOfFriendships[i-1], self.friendships)\n # if randomNum not in self.friendships[i]:\n # # count+=1\n # # print(\"III\",i,\"Distribution\",DistributionOfFriendships[i-1],\"randomNum\",randomNum,\"count\",count)\n # self.add_friendship(i,randomNum)\n # # print(\"after\")\n for i in range(num_users):\n self.add_user(i)\n\n possible_friendships=[]\n for user_id in self.users:\n for friend_id in range(user_id+1, self.last_id+1):\n possible_friendships.append((user_id, friend_id))\n\n random.shuffle(possible_friendships)\n\n for i in range(num_users*avg_friendships//2):\n friendship= possible_friendships[i]\n self.add_friendship(friendship[0],friendship[1])", "def get_user_specifics(request):\n\n appuser = request.user.appuser\n users_hashtags = appuser.hashtag_set.all()\n users_profiles = appuser.twitterprofile_set.all()\n trending_hashtags_from_db = Hashtag.objects.all()\n dates = list(set([hashtag.save_date for hashtag in trending_hashtags_from_db]))\n return dates, trending_hashtags_from_db, users_hashtags, users_profiles", "def build_corpus(username, api):\n print('getting tweets for user: ', username)\n timeline = api.GetUserTimeline(screen_name=username, count=200)\n tweets = [t.text for t in timeline]\n corpus = ' '.join(tweets)\n return corpus", "def populateGraph(self, num_users, avg_friendships):\n # Reset graph\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n\n # Add users\n for user in range(num_users):\n self.addUser(\"User %s\" % user) # user 0, user 1, user 2\n\n # Create friendships\n\n # Generate all possible friendship combinations\n possible_friendships = []\n # avoid dups by making sure that the first id is higher than the 2nd id\n for user_id in self.users:\n for other_id in range(user_id + 1, self.lastID + 1):\n possible_friendships.append((user_id, other_id))\n\n random.shuffle(possible_friendships)\n\n # Create friendships(edges) for the first X tuples of the list\n # X is determined by the formula: numUsers * avgFriendships // 2\n # Need to divide by 2 since each addFriendship() creates 2 friendships\n for i in range((num_users * avg_friendships) // 2):\n friendship = possible_friendships[i]\n self.addFriendship(friendship[0], friendship[1])", "def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}", "def build_trigrams(words):\n trigrams = {}\n for i in range(len(words) - 2): # why -2 ?\n pair = ()\n pair = words[i:i + 2]\n key = \" \"\n key = key.join(pair)\n follower = []\n follower.append(words[i + 2])\n follower = words[i + 2]\n if key in trigrams.keys():\n existing_value = trigrams[key]\n list(existing_value)\n existing_value.append(follower)\n trigrams[key] = existing_value\n else:\n trigrams.update({key: list(follower.split(\" \"))})\n return trigrams", "def getNewsFeed(self, userId):\n users = [userId]\n if userId in self.follow_map:\n users.extend(self.follow_map[userId])\n \n heap = MinHeap(10)\n for uid in set(users):\n for tweet in self.user_tweets.get(uid, []):\n heap.push(tweet)\n return [e[0] for e in heap.items()]", "def pushTweets(tweets,user,cacheKey=False):\n \n tweetDump = filterTweets(tweets) # Extract mentions, URLs, replies hashtags etc...\n\n pushRenderedTweets2Neo.delay(user,tweetDump) \n pushRenderedTweets2Cass.delay(user,tweetDump)\n pushRenderedTweets2Solr.delay(tweetDump['tweets']+tweetDump['retweets'])\n\n if cacheKey: # These are the last Tweets, tell the scaper we're done.\n cache.set(cacheKey,'done')\n print '*** '+user+': DONE WITH TWEETS ***' \n \n #return True", "def populate_graph(self, num_users, avg_friendships):\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n\n # add users\n for i in range(num_users):\n self.add_user(f'User {i}')\n\n # create friendships list\n possible_friendships = []\n\n # for each friend-user combination, place a tuple in possible_friendships \n for user_id in self.users:\n for friend_id in range(user_id + 1,self.last_id + 1):\n possible_friendships.append([user_id, friend_id])\n\n import random\n # shuffle the possible friendships so the order isn't biased towards towards the early users\n random.shuffle(possible_friendships)\n\n # add friendships\n # designate a certain number of friendships determined by avg friendships per total users\n for i in range(num_users * avg_friendships // 2):\n # this sets the friendship variable to a premade possible friendship\n friendship = possible_friendships[i]\n # then we add this friendship to the friendship dictionary that defines the graph\n # with the first tuple value as the key and the second as the value\n self.add_friendship(friendship[0],friendship[1])", "def __init__(self):\n self.tweets = {}\n self.followees = {}\n self.timestamp = 0", "def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "def download_and_prepare():\n # set source twitter IDS\n user = 759251 # @CNN\n news1 = 807095 # @nytimes\n news2 = 1367531 # @FoxNews\n news3 = 1652541 # @Reuters\n news4 = 3108351 # @WSJ\n news5 = 2467791 # @washingtonpost\n\n # grab all tweets from user\n userHistory = []\n tu = threading.Thread(target=get_all_tweets, args=(user, userHistory))\n # get all tweets from context users\n news1History = []\n t1 = threading.Thread(target=get_all_tweets, args=(news1, news1History))\n news2History = []\n t2 = threading.Thread(target=get_all_tweets, args=(news2, news2History))\n news3History = []\n t3 = threading.Thread(target=get_all_tweets, args=(news3, news3History))\n news4History = []\n t4 = threading.Thread(target=get_all_tweets, args=(news4, news4History))\n news5History = []\n t5 = threading.Thread(target=get_all_tweets, args=(news5, news5History))\n\n # run threads\n threads = [tu, t1, t2, t3, t4, t5]\n for th in threads:\n th.start()\n for th in threads:\n th.join()\n\n # clean urls of all tweets\n allTweets = [userHistory, news1History, news2History, news3History, news4History, news5History]\n for i in range(len(allTweets)):\n allTweets[i] = cleanse_tweets(allTweets[i])\n\n # construct context dict for train and test\n context_dict, context_dict_valid = group_by_date(allTweets)\n\n ##############################################################################\n # some of the following code adapted from tensorflow example file data_utils #\n ##############################################################################\n\n # set paths for storing data\n data_dir = \"tweet_data\"\n train_dir = \"train_dir\"\n train_path = os.path.join(train_dir, \"train\")\n dev_path = os.path.join(train_dir, \"test1\")\n\n # paths for storing initial data\n user_file_path = os.path.join(data_dir, \"data.user\")\n context_file_path = os.path.join(data_dir, \"data.context\")\n\n # move data into expected directories/make data available\n data_to_file(context_dict, context_dict_valid, allTweets, user_file_path, context_file_path, dev_path + \".user\", dev_path + \".context\")\n\n user_path = os.path.join(data_dir, \"vocab%d.user\" % vocab_size)\n context_path = os.path.join(data_dir, \"vocab%d.context\" % vocab_size)\n create_vocabulary(context_path, context_file_path, vocab_size, None) # None: user default tokenizer\n create_vocabulary(user_path, user_file_path, vocab_size, None)\n\n # Create token ids for the training data.\n user_train_ids_path = train_path + (\".ids%d.user\" % vocab_size)\n context_train_ids_path = train_path + (\".ids%d.context\" % vocab_size)\n data_to_token_ids(user_file_path, user_train_ids_path, user_path, None)\n data_to_token_ids(context_file_path, context_train_ids_path, context_path, None)\n\n print(\"made it\")\n\n # Create token ids for the development data.\n user_dev_ids_path = dev_path + (\".ids%d.user\" % vocab_size)\n context_dev_ids_path = dev_path + (\".ids%d.context\" % vocab_size)\n data_to_token_ids(dev_path + \".user\", user_dev_ids_path, user_path, None)\n data_to_token_ids(dev_path + \".context\", context_dev_ids_path, context_path, None)\n\n # TODO return paths to directories of input and output\n return (user_train_ids_path, context_train_ids_path,\n context_dev_ids_path, user_dev_ids_path,\n context_path, user_path)", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n print(f\"user ID {userID}\")\n\n for i in range(1, len(self.users)):\n visited[i] = self.bfs(userID, i)\n\n return visited", "def remove_hashtags(text):\n return ' '.join(re.sub(r'[#][^\\s#@]+', ' ', text).split())", "def __init__(self, tweet_data):\n _hashtags = tweet_data['entities']['hashtags']\n _str_date = tweet_data['created_at']\n self.account = Account(tweet_data['user'])\n self.date = self.format_date(_str_date)\n self.hashtags = [\"#%s\" % (tag['text']) for tag in _hashtags]\n self.likes = tweet_data['favorite_count']\n # Note: replies number is only available with\n # the Premium and Enterprise tier products.\n # https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/tweet-object # noqa\n self.replies = 0\n self.retweets = tweet_data['retweet_count']\n self.text = tweet_data['text']", "def generate_weighted_users_traces(self):\n def generate_weighted_user_trace():\n towers_ids = np.arange(self.number_towers)\n\n trace = []\n direction = []\n for cycle in range(self.number_cycles):\n if cycle == 0:\n # For the first towers the chance of selecting a tower is equally distributed\n tower = np.random.choice(towers_ids)\n trace.append(tower)\n direction.append(self.towers[tower])\n elif cycle == 1:\n last_tower = trace[cycle - 1]\n tower = np.random.choice(\n towers_ids, p=self.probabilities[last_tower])\n trace.append(tower)\n direction.append(self.towers[tower])\n else:\n new_point = self.towers_manager.get_new_point(direction)\n nearest_tower = \\\n self.towers_manager.get_nearest_tower(new_point)\n tower = np.random.choice(\n towers_ids, p=self.probabilities[nearest_tower])\n trace.append(tower)\n direction = [direction[1], self.towers[tower]]\n\n return trace\n\n return np.array([\n generate_weighted_user_trace()\n for _ in range(self.number_users)\n ])", "def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200", "def make_graph_dict(data, dates):\n with open(data, 'rb') as tweet_file:\n graph_dict = {}\n RTpattern = re.compile(\"RT @\\w+\")\n tweet_reader = csv.reader(tweet_file, delimiter=',')\n for row in tweet_reader:\n # Extract retweets\n retweet = RTpattern.match(row[2])\n # Extract correct dates \n if dates == 'all':\n date_match = True\n else: \n for date in dates:\n date_match = re.compile('.*' + date + '.*').match(row[3])\n if date_match != None:\n break\n \n if (retweet != None) & (date_match != None):\n retweeter = '@' + row[1]\n tweeter = retweet.group()[3:]\n\n #build graph\n if retweeter in graph_dict.keys():\n graph_dict[retweeter] = graph_dict[retweeter] + [tweeter]\n else:\n graph_dict[retweeter] = [tweeter]\n if graph_dict == {}:\n return 'NaN'\n else:\n return graph_dict", "def hashtagSegment(text_string):\n # For example, we update wordsegment dict so it recognises altright as \"alt right\" rather than salt right\n ws.BIGRAMS['alt right'] = 1.17e8 \n\n ws.BIGRAMS['white supremacists'] = 3.86e6\n ws.BIGRAMS['tweets'] = 6.26e10\n ws.BIGRAMS['independece day'] = 6.21e7\n \n #We target hashtags so that we only segment the hashtag strings.\n #Otherwise the segment function may operate on misspelled words also; which\n #often appear in hate speech tweets owing to the ill education of those spewing it\n temp_str = []\n for word in text_string.split(' '):\n if word.startswith('#') == False:\n temp_str.append(word)\n else:\n temp_str = temp_str + segment(word)\n \n text_string = ' '.join(temp_str) \n\n return text_string", "def getHashTags(self, fromDate, toDate):\n return self.session.query(Tweet.text).\\\n filter('text like \\'%#%\\'').\\\n filter(Tweet.created_at > fromDate).\\\n filter(Tweet.created_at < toDate).all()", "def __init__(self):\n self.time = 0\n self.tweets = {}\n self.follows = {}", "def populateGraph(self, numUsers, avgFriendships):\n # Reset graph\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n # Add users\n #for i in range numUsers, call addUser and implement a random name\n # add their ID to a list, for building friendhips\n for i in range(1, numUsers + 1):\n self.addUser(\"Bob\") \n # Create friendships\n possible_friends = []\n\n total_friendships = numUsers * avgFriendships\n num_friendships = 0\n while num_friendships < total_friendships:\n userID = random.randint(1, self.lastID)\n friendID = random.randint(1, self.lastID)\n if self.addFriendship(userID, friendID):\n num_friendships += 2\n ''' \n for userID in self.users:\n for friendID in range(userID + 1, self.lastID + 1):\n possible_friends.append((userID, friendID))\n\n print(possible_friends)\n num_friendships = (numUsers * avgFriendships) // 2\n random.shuffle(possible_friends)\n friendships_to_create = possible_friends[:num_friendships]\n # or could use friendships_to_create = random.sample(possibleFriendships, (numUsers * avgFriendships) // 2)\n\n for friendship in friendships_to_create:\n self.addFriendship(friendship[0], friendship[1])\n\n '''", "def update_users_data(self) -> None:\n users_utts = defaultdict(list)\n users_convos = defaultdict(list)\n\n for utt in self.iter_utterances():\n users_utts[utt.user].append(utt)\n\n for convo in self.iter_conversations():\n for utt in convo.iter_utterances():\n users_convos[utt.user].append(convo)\n\n for user in self.iter_users():\n user.utterances = {utt.id: utt for utt in users_utts[user]}\n user.conversations = {convo.id: convo for convo in users_convos[user]}", "def TRACKED_USERS(tg_user: TgUser) -> ReplyKeyboardMarkup:\n markup = ReplyKeyboardMarkup(row_width=2)\n user_w = User.get(User.user_id == tg_user.id)\n\n if not user_w.get_tracking_users():\n raise ValueError(\"No users tracking for {}\".format(USER(tg_user)))\n\n for user_t in user_w.get_tracking_users():\n markup.add(user_t.username if user_t.username else str(user_t.user_id))\n \n return markup", "def populateGraph(self, numUsers, avgFriendships):\n # Reset graph\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n\n # Add users\n for i in range(0, numUsers):\n self.addUser(\"Brian-\"+str(i+1))\n\n # Create friendships\n for i in range(0, avgFriendships):\n for u in self.users:\n self.friendships[u].add(random.choice([user for user in self.users]))", "def get_hashtag_tweets(self, hashtag,\n count=settings.TWITTER_DEFAULT_LIMIT):\n url = urljoin(self.base_url, \"/search/tweets.json\")\n response = self.session.get(\n url,\n params={\n \"q\": hashtag,\n \"count\": count,\n \"include_entities\": True\n },\n auth=self.__auth,\n )\n data = response.json()\n if response.ok:\n data = [Tweet(tweet_data) for tweet_data in data['statuses']]\n else:\n if 'error' in data:\n raise TwitterException(data['error'], code=response.status_code)\n elif 'errors' in data:\n error = data['errors'][0]\n raise TwitterException(error['message'], code=response.status_code)\n return data" ]
[ "0.7264354", "0.67232686", "0.65345025", "0.6294691", "0.60873413", "0.5904825", "0.5853261", "0.5806474", "0.57820344", "0.57578456", "0.5725518", "0.57033443", "0.5695641", "0.5693837", "0.56878614", "0.5613401", "0.5573096", "0.5560261", "0.55522144", "0.5523659", "0.54931504", "0.5492921", "0.5481418", "0.5471871", "0.5469077", "0.540402", "0.5388667", "0.5383445", "0.5383229", "0.53293353", "0.5285005", "0.5275286", "0.5267177", "0.52598786", "0.522838", "0.5225588", "0.5219893", "0.52167845", "0.5211929", "0.5143248", "0.5142489", "0.51409906", "0.512967", "0.5119103", "0.51133746", "0.5106512", "0.5102434", "0.5060128", "0.5052501", "0.50381595", "0.5028943", "0.50254166", "0.502076", "0.5015107", "0.50136286", "0.50116146", "0.5009023", "0.50056064", "0.4992784", "0.49892765", "0.49867976", "0.49629024", "0.49612388", "0.49603614", "0.4958663", "0.49560067", "0.49544838", "0.49439722", "0.49338052", "0.4926831", "0.49209288", "0.4920753", "0.49200484", "0.49104142", "0.49067673", "0.48960605", "0.4893223", "0.48925027", "0.4892087", "0.48880345", "0.48815894", "0.48703486", "0.4866734", "0.4864084", "0.48573747", "0.4845993", "0.48399952", "0.48378205", "0.48294672", "0.48254812", "0.48243564", "0.48166394", "0.4813641", "0.48012152", "0.48010483", "0.4797243", "0.4796313", "0.4790038", "0.47862762", "0.47670814" ]
0.56399894
15
For a given tweet represented as a list of lemmas recommends hashtags.
def transform(self, x: Union[Tuple[Tuple[str, ...], ...], Tuple[str, ...]], **kwargs) -> np.ndarray: lemmatised = list(x[:]) if isinstance(lemmatised[0], str): for i, xi in enumerate(lemmatised): lemmatised[i] = get_wcrft2_results_for_text(xi) if isinstance(lemmatised[0], list): for i, xi in enumerate(lemmatised): lemmatised[i] = ' '.join(xi) query_hashtags = kwargs.get("query", None) if query_hashtags is not None: assert len(query_hashtags) == len(x), \ "If at least 1 query is given, the array should have the same dimension as input `x`" if isinstance(query_hashtags, str): query_hashtags = [query_hashtags] * len(lemmatised) # as in fit, vectorizer has normalization inside ... tf_idf_vectors = self._hashtags_tf_idf_vectorizer.transform(lemmatised) # ... so this simplifies to cosine similarity - no normalisation required similarities = self._hashtags_tf_idf_representation.dot(tf_idf_vectors.T).T.toarray() preference_vectors = self._get_preference_vectors(similarities, query_hashtags) similarity_rank_vertices = self._random_walk(preference_vectors) best_indices = np.argsort(-similarities * similarity_rank_vertices, axis=1) result = self._hashtag_labels[best_indices].tolist() return self.post_process_result(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hashtags(self, candidate_list):\n if Tweet.hashtagre is None:\n Tweet.hashtagre = re.compile('|'.join(map(re.escape, candidate_list)))\n return [\n [m.group(0).replace('#', '', 1), m.span()]\n for m in Tweet.hashtagre.finditer(self.text)\n ]", "def getHashtagsAndMentions(tweets):\n hashtags = Counter()\n mentions = Counter()\n plain = Counter()\n\n pattern = re.compile(r\"[^#@\\w'-]+\")\n\n for t in tweets:\n words = pattern.split(t.message)\n for word in words:\n # Ignore null strings caused by split characters at the end of a\n # message and remove standalone hyphens.\n if word and not word.startswith(\"-\"):\n # Increment count for the word in the Counter.\n if word.startswith(\"#\"):\n hashtags.update({word: 1})\n elif word.startswith(\"@\"):\n mentions.update({word: 1})\n else:\n # TODO: apply nltk.corpus.stopwords.words() here,\n # across languages. Consider that the stopwords cut off\n # before apostrophe, therefore check if the word\n # starts with the stopword.\n plain.update({word: 1})\n\n return hashtags, mentions, plain", "def separate_hastags_mentions_urls(tweet):\n \n text = tweet.lower()\n hashtag_list = re.findall(\"#([a-zA-Z0-9_]{1,50})\", text)\n \n text = re.sub(r'http\\S+', '', text)\n clean_tweet = re.sub(\"@[A-Za-z0-9_]+\",\"\", text)\n clean_tweet = re.sub(\"#[A-Za-z0-9_]+\",\"\", clean_tweet)\n \n return clean_tweet, hashtag_list", "def filter_by_hashtag(tweets: list, hashtag: str) -> list:\n tweets_with_hashtag = {} # findall(): Kui tekstis on rohkem kui üks regulaaravaldisele vastav alamsõne saab kõikide vastete järjendi moodustada funktsiooniga findall()\n pattern = r\"#\\w+\" # \\w : tähed, numbrid, alakriips, + : 1 või rohkem\n for tweet in tweets: # r\"string\" on \"raw\" tüüpi string, mis tähendab, et kurakaldkriipsud(\"\\\") jäetakse teksti alles.\n find_hashtag = re.findall(pattern, tweet.content) # word:\\w\\w\\w. Regulaaravaldisele vastab täpne sõne \"word:\" ning sellele järgnevad 3 suvalist tähte.\n if find_hashtag:\n tweets_with_hashtag.setdefault(ht, []).append(tweet)\n return tweets_with_hashtag[hashtag]", "def extract_hashtags(tweet):\n tknzr = TweetTokenizer()\n hashtags = [token.lower() for token in tknzr.tokenize(tweet) if re.match(hashtag_re, token)]\n return hashtags", "def preprocess_tweet(tweet):\n\n\n clean_tweet, hashtags = separate_hastags_mentions_urls(tweet)\n clean_tweet = remove_emoji_punc(clean_tweet)\n return clean_tweet, hashtags", "def buildHashtagsDict(tweets):\n hashtags = {}\n for tweet in tweets:\n if tweet['entities']['hashtags']:\n for hashtag in tweet['entities']['hashtags']:\n tag = hashtag['text'].lower().strip()\n if tag not in hashtags:\n hashtags[tag] = 1\n else:\n hashtags[tag] += 1\n return hashtags", "def get_hashtags(list):\n hashtags = []\n for h in list:\n hashtags.append(h['text'])\n return hashtags", "def tweet_split_hashtags(word, append_hashtag):\n if word.startswith('#') and len(word) > 1:\n res = ''\n res += '<hashtag> '\n res += infer_spaces(word[1:])\n if append_hashtag:\n res += ' '\n res += word\n return res\n else:\n return word", "def clean_tweet(tweet):\n word_out, hashtags = [], []\n for word in tweet.split():\n if word[0] == '#':\n hashtags.append(word)\n elif ((len(word) != 0) and (word[0] != '@')) and (\n len(word) < 4 or ((len(word) > - 4) and (word[:4] != 'http'))):\n word_out.append(word)\n return word_out, hashtags", "def analyze_hashtag(self, hashtag, count=200):\n tweets = []\n\n for x in xrange(0, int(count / 100)):\n tweets.extend(self.tweet_fetcher.get_tweets(hashtag))\n\n analyzed_tweets = sort_tweets(self.sa.classify(tweets))\n\n self.analyzed_tweets = analyzed_tweets\n\n return analyzed_tweets", "def remove_hashtag(lista_tweets):\n\n novos_tweets = []\n\n for tweet in lista_tweets:\n texto = re.sub(r\"#\\S+\", \"\", tweet)\n novos_tweets.append(texto)\n\n return novos_tweets", "def get_hashtags(self):\n\t\t# Only first level comments should be checked for hashtag. Maybe.\n\t\tpassl", "def original_three_tweets():\n test_tweets = [\n \"is #bigdata finally the answer to end poverty? \\\n @lavanyarathnam http://ow.ly/o8gt3 #analytics\",\n \"interview: xia wang, astrazeneca on #bigdata and the promise of effective \\\n healthcare #kdn http://ow.ly/ot2uj\",\n \"big data is not just for big business. on how #bigdata is being deployed for \\\n small businesses: http://bddy.me/1bzukb3 @cxotodayalerts #smb\"\n ]\n return test_tweets", "def get_tweet_list(user_handle):\n client = language.LanguageServiceClient()\n\n tweet_list = twitter.get_tweets(handle=user_handle)\n\n if tweet_list[0] == \"34\":\n return tweet_list\n\n for i in range(len(tweet_list)):\n\n content = tweet_list[i].get(\"text\")\n\n document = types.Document(\n content=content, type=enums.Document.Type.PLAIN_TEXT)\n annotations = client.analyze_sentiment(document=document)\n\n # Print the results\n # print_result(annotations)\n\n score = annotations.document_sentiment.score\n magnitude = annotations.document_sentiment.magnitude\n\n tweet_list[i][\"score\"] = score\n tweet_list[i][\"magnitude\"] = magnitude\n\n # print(tweet_list[i])\n\n return tweet_list", "def addToGraph(tid,uid,mentions) :\n global G,found\n\n user = r.get(int(uid))\n \n if user == None :\n return\n\n user = re.findall('\"((?:(?!(?:\",\")).)*)\"', user)\n \n # lower the hashtags\n mentions = [t.lower() for t in mentions if t not in [\"\"]]\n \n usern = user[1].lower()\n\n G.add_node(usern)\n\n found = found + 1\n\n # iterate through mentions\n for m in mentions :\n # add hashtag to graph\n G.add_node(m)\n \n # update edge weight for every hashtag 2-permutation of the tweet\n if G.has_edge(usern,m) :\n G[usern][m]['weight'] += 1\n else :\n G.add_edge(usern,m,weight=1)", "def like_tweet(self, tag):\n self.bot.get('https://twitter.com/search?q=' + tag + '&src=typed')\n self.__wait(3, 3)\n for i in range(1, 3):\n self.bot.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n self.__wait(2, 3)\n tweets = self.bot.find_elements_by_tag_name('article')\n\n links = []\n for tweet in tweets:\n sub_links = tweet.find_elements_by_tag_name('a')\n links += [sub_link.get_attribute('href')\n for sub_link in sub_links if 'status' in sub_link.get_attribute('href')]\n\n print('Started to like {} tweets'.format(len(links)))\n\n for link in links:\n self.bot.get(link)\n self.__wait(3, 5)\n likes = self.bot.find_elements_by_css_selector('div[data-testid=\"like\"')\n for like in likes:\n like.click()\n self.__wait(3, 5)", "def extract_hashtags(text: str) -> List[str]:\n final = []\n result = list(text.split())\n \n \n for word in result: \n if word[0] == HASH_SYMBOL and alnum_prefix(word[1:]) not in final:\n final.append(alnum_prefix(word[1:]))\n if alnum_prefix(word[1:]) == '':\n final.remove(alnum_prefix(word[1:]))\n return final", "def get_readable_hashtags(cls, hashtag_list):\n\n list_of_readable_hashtags = []\n\n for hashtag in hashtag_list:\n hashtag = '#' + hashtag + \" \"\n list_of_readable_hashtags.append(hashtag)\n\n return list_of_readable_hashtags", "def extract_hashtags(self):\n most_common_words = get_most_common_words(\n self.get_full_text_from_source())\n queryset = Hashtag.objects.all()\n for word in most_common_words:\n tag = queryset.get_or_create(word=word.lower())[0]\n tag.documents.add(self)", "def sents_2(list_of_tweets):\n stopwords = nltk.corpus.stopwords.words('english')\n contextTerms = []\n for sent in list_of_tweets:\n for word in sent.split():\n word_lemmatizer = WordNetLemmatizer()\n word = word_lemmatizer.lemmatize(word.lower())\n if wordnet.synsets(word) and word not in stopwords and len(word)>2:\n contextTerms.append(word)\n\n #print( contextTerms)\n return contextTerms", "def hashtags(string):\r\n\r\n lst = string.split(' ');\r\n result = [li for li in lst if li.startswith('#')];\r\n\r\n return result", "def hashtags(self, users_list, hashtags_list, feature_size=None, relative_freq=True):\n # get the counts of each hashtag shared\n # Collapse the list of lists: hashtags_list\n hashtag_counts = sorted_count([h for l in hashtags_list for h in l if h])\n\n # fitler against feature_size, Default is None=Selects all.\n hashtag_counts = hashtag_counts[:feature_size]\n hashtag_vector = tuple([h for h,_ in hashtag_counts])\n\n # zip users,hastags\n users_hashtags_zip = list(zip(users_list, hashtags_list))\n\n # findng hashtag feature for each user\n hashtag_features = {}\n for user in tqdm(set(users_list), desc=\"hashtag_features\", leave=LEAVE_BAR):\n user_hashtags = [h for u,hts in users_hashtags_zip for h in hts if u==user]\n hashtag_features[user] = np.array( [ user_hashtags.count(h) for h in hashtag_vector ] )\n if relative_freq and np.sum(hashtag_features[user])>0:\n hashtag_features[user] = hashtag_features[user]/np.sum(hashtag_features[user])\n \n return hashtag_features", "def get_mentions(texts, word):\r\n mentions = {}\r\n for text, label, text_id in texts:\r\n if word in text.lower():\r\n if label not in mentions:\r\n mentions[label] = [text_id]\r\n else:\r\n if text_id not in mentions[label]:\r\n mentions[label].append(text_id)\r\n return mentions", "def tokenize(tweet):\n tweet = \" \".join(re.split(\"[^a-zA-Z]*\", tweet.lower())).strip()\n tokens = [stemmer.stem(t) for t in tweet.split()]\n return tokens", "def tokenize(tweet):\n tweet = \" \".join(re.split(\"[^a-zA-Z]*\", tweet.lower())).strip()\n tokens = [stemmer.stem(t) for t in tweet.split()]\n return tokens", "def like_retweet(self):\n \n bot = self.bot \n logging.debug('Get Hashtag')\n hashtag = Twitterbot.getHashtags(self)\n logging.debug('Bot for Like_retweet initalized')\n # fetches the latest tweets with the provided hashtag \n bot.get( \n 'https://twitter.com/search?q=%23'+hashtag+'&src=trend_click&vertical=trends'\n ) \n \n time.sleep(3) \n \n # using set so that only unique links \n # are present and to avoid unnecessary repetition \n links = set() \n \n # obtaining the links of the tweets \n for _ in range(3): \n # executing javascript code \n # to scroll the webpage \n bot.execute_script( \n 'window.scrollTo(0, document.body.scrollHeight)'\n ) \n \n time.sleep(4) \n \n # using list comprehension \n # for adding all the tweets link to the set \n # this particular piece of code might \n # look very complicated but the only reason \n # I opted for list comprehension because is \n # lot faster than traditional loops \n [ \n links.add(elem.get_attribute('href')) \n for elem in bot.find_elements_by_xpath(\"//a[@dir ='auto']\") \n ] \n i = 0\n # traversing through the generated links \n for link in links:\n #Nothing for the Hashtag was found, another run is required\n #if len(links ==6):\n # break\n # Twitterbot.like_retweet(self)\n # opens individual links \n #print(len(links))\n bot.get(link) \n time.sleep(4)\n if i == 3:\n break\n i += 1\n try: \n # retweet button selector \n bot.find_element_by_css_selector( \n '.css-18t94o4[data-testid =\"retweet\"]'\n ).click() \n # initializes action chain \n actions = ActionChains(bot) \n # sends RETURN key to retweet without comment \n actions.send_keys(Keys.RETURN).perform() \n \n # like button selector \n bot.find_element_by_css_selector( \n '.css-18t94o4[data-testid =\"like\"]'\n ).click() \n # adding higher sleep time to avoid \n # getting detected as bot by twitter \n logging.info(f'Liked and retweeted:\"{link}\"')\n time.sleep(10) \n except: \n time.sleep(2) \n \n # fetches the main homepage \n bot.get('https://twitter.com/')", "def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)", "def hashtags(self):\n return [tag[\"text\"] for tag in self.status.hashtags]", "def sort_hashtags_by_popularity(tweets: list) -> list:\n hashtags_by_popularity = {}\n pattern = r\"#\\w+\"\n for tweet in tweets:\n find_hashtag = re.findall(pattern, tweet.content)\n if not find_hashtag:\n continue\n else:\n for ht in find_hashtag:\n hashtags_by_popularity.setdefault(ht, []).append(tweet.retweets)\n print(hashtags_by_popularity)\n for k, v in hashtags_by_popularity.items():\n hashtags_by_popularity[k] = sum(v)\n print(hashtags_by_popularity)\n sorted_ht = sorted(hashtags_by_popularity.items(), key=lambda x: x[-1], reverse=True)\n print(hashtags_by_popularity)\n return [ht[0] for ht in sorted_ht]", "def html_ann_tweet(tweets):\r\n for tweet in tweets:\r\n\r\n # Fairly efficient way of dealing with the fact that these keys might not exist\r\n try:\r\n text = tweet['text']\r\n except:\r\n pass\r\n\r\n try:\r\n text = tweet['full_text']\r\n except:\r\n pass\r\n\r\n try:\r\n text = tweet['extended_tweet']['full_text']\r\n except:\r\n pass\r\n\r\n\r\n # Hashtags\r\n tweet['text_html_annotated'] = re.sub(r'\\B#\\w\\w+',\r\n '<span class=\"hashtag\">\\g<0></span>',\r\n text)\r\n\r\n # Usernames\r\n tweet['text_html_annotated'] = re.sub(r'(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@'\r\n r'([A-Za-z]+[A-Za-z0-9]+)',\r\n '<span class=\"user\">\\g<0></span>',\r\n tweet['text_html_annotated'])\r\n\r\n # Links\r\n tweet['text_html_annotated'] = re.sub(\r\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|'\r\n r'(?:%[0-9a-fA-F][0-9a-fA-F]))+', '<a href=\"\\g<0>\">\\g<0></a>',\r\n tweet['text_html_annotated'])\r\n\r\n return tweets", "def searchbrown_phrase(tags):\n l = len(tags)\n brown_tagged_words = brown.tagged_words(categories='news')\n hitwords = []\n for i in range(len(brown_tagged_words)-l+1):\n searchtags = [tag for _,tag in brown_tagged_words[i:i+l]]\n if tags == searchtags:\n hitwords.append(tuple([w.lower()\n for w,_ in brown_tagged_words[i:i+l]]))\n return hitwords", "def suggest_tags(data):\r\n tag_set = set()\r\n if not data:\r\n return tag_set\r\n\r\n # The string might be a url that needs some cleanup before we parse for\r\n # suggestions\r\n parsed = urlparse(data)\r\n\r\n # Check if title is url. If title is not a string, url, and title will be\r\n # the same so no need to consider tags from url.\r\n if parsed.hostname:\r\n tag_set.update(_generate_nouns_from_url(parsed.path))\r\n else:\r\n # If the title is not a url extract nouns from title and the url.\r\n tag_set.update(_generate_nouns_from_url(data))\r\n\r\n return tag_set", "def extract_hashtag(text):\n if text is not None:\n text = text.replace('\\n', ' ').replace('\\r', '')\n text = text.split(\" \")\n text = [word for word in text if \"#\" in word]\n if len(text) == 0:\n text = [\"no tags\"]\n else:\n text = [\"no tags\"]\n return text", "def clean_tweet(tweet):\r\n reply_pattern = re.compile(\"^@([a-zA-Z0-9]*) (.*)\")\r\n regexhandler = regex.RegexHandler()\r\n # add mark if tweets starts with a mention (@user)\r\n if reply_pattern.match(tweet.tweet[\"text\"]) is not None:\r\n temp = \"MarkReply \" + tweet.tweet[\"text\"]\r\n else:\r\n temp = tweet.tweet[\"text\"]\r\n # language dependent\r\n\r\n if floodtags.core.statics.StaticData.locations:\r\n for location in floodtags.core.statics.StaticData.locations:\r\n if location in temp:\r\n temp += \" MarkLocation\"\r\n\r\n try:\r\n stemmer = SnowballStemmer(floodtags.core.statics.StaticData.language.lower())\r\n # stem words\r\n temp = \" \".join(\r\n [stemmer.stem(x) if x not in tweet.tweet[\r\n \"keywords\"] and \"MarkReply\" not in x and \"MarkLocation\" not in x else x for x in temp.split()])\r\n except ValueError:\r\n print(\"language not found:\", floodtags.core.statics.StaticData.language)\r\n # pass\r\n\r\n # store language\r\n temp = \"Mark\" + tweet.language + \" \" + temp\r\n\r\n # store keyword\r\n\r\n # replace each website with 'MarkWebsite' to create more similarity\r\n temp = regexhandler.replace(temp, 'MarkWebsite', regex.Expressions.website)\r\n # replace each photo url with 'MarkPhoto' to create more similarity\r\n for i in range(len(tweet.tweet[\"photos\"])):\r\n temp = Vectorizer.rreplace(temp, \"MarkWebsite\", \"MarkPhoto\", 1)\r\n # replace each height with 'MarkHeight' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkHeight\", regex.Expressions.waterheight)\r\n # replace each time with 'MarkTime' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkTime\", regex.Expressions.time)\r\n # replace each date with 'MarkDate' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkDate\", regex.Expressions.date)\r\n # replace each number with 'MarkNumber' to create more similarity\r\n temp = regexhandler.replace(temp, \"MarkNumber\", regex.Expressions.number)\r\n temp = re.sub('\\n', ' ', temp)\r\n results = re.findall(\"(^|[^@\\w])@(\\w{1,15})\", temp)\r\n # add mark for each user name\r\n if results is not None:\r\n for i in range(len(results)):\r\n temp += \" MarkUserName\"\r\n results = re.findall(\"#(\\S*)\", temp)\r\n # add mark for each hashtag\r\n if results is not None:\r\n for i in range(len(results)):\r\n temp += \" MarkHashTag\"\r\n # add sender as feature\r\n temp = \"Sender\" + tweet.tweet[\"source\"][\"username\"] + \" \" + temp\r\n # remove unnecessary characters and chance text to lower case\r\n return re.sub('[#\\.,:]', '', temp)", "def clean_hashtags(self, tweet):\n self.hashtags = [tag.strip('#') for tag in tweet.split()\n if tag.startswith('#')]\n\n for hashtag in self.hashtags:\n tweet = tweet.replace('#'+hashtag, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet", "def like_tweets(pos_tweets):\n\n for tweet in pos_tweets:\n twitter.CreateFavorite(status_id=tweet.id)\n\n return", "def tokenize_tweets_by_word(self):\n sent_tok_list = []\n all_tok_list = []\n\n for t in range(len(self.all_tweets) - 1):\n sent_list = sent_tokenize(self.all_tweets[t][0][1]) # all_tweets[tweet][look at tuple][look at text]\n all_tok_list.append([])\n for s in sent_list: # for each sentence\n word_tok_list = word_tokenize(s)\n pos_tag_list = pos_tag(word_tok_list)\n all_tok_list[t].append(pos_tag_list)\n print(pos_tag_list)\n\n return all_tok_list", "def get_hashtag_names_for_recipe(cls, recipe_hashtags):\n\n list_of_hashtags = []\n for recipe_hashtag in recipe_hashtags:\n hashtag_name = recipe_hashtag.hashtags.name\n list_of_hashtags.append(hashtag_name)\n return list_of_hashtags", "def expand_tweet(tweet):\r\n return hashtag_regex.sub(lambda hashtag: expand_hashtag(hashtag), tweet)", "def update_word_stats(self, tweet):\n\n if not self.text:\n return\n\n words = self.text.split()\n\n # process single words\n for word in words:\n self.update_stats('words', word)\n\n # process 2 word lists\n pairs = self.get_phrase_list(words, 2)\n if pairs is not None:\n for word_pair in pairs:\n self.update_stats('word_pairs', self.get_index_from_list(word_pair))\n\n # process 3 word lists\n triples = self.get_phrase_list(words, 3)\n if triples is not None:\n for word_triple in triples:\n self.update_stats('word_triples', self.get_index_from_list(word_triple))", "def process_tweet(tweet):\n d = {}\n d['hastags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]\n d['text'] = tweet['text']\n d['user'] = tweet['user']['screen_name']\n d['user_loc'] = tweet['user']['location']\n return d", "def keep(tweet):\n text = tweet[\"Text\"].lower()\n hashtags = \" \".join(tweet[\"Hashtags\"]).lower()\n for word in keep_words:\n if word in text:\n # print(\"{} in text {}\".format(word, text))\n return True\n if word in hashtags:\n # print(\"{} in hashtags {}\".format(word, hastags))\n return True\n return False", "def set_analyzed_tweets(self, tweets):\n slim_tweets = [SlimTweet(tweet) for tweet in tweets]\n self.analyzed_tweets = sort_tweets(slim_tweets)", "def tokenize(t):\n tweet_tok = TweetTokenizer(strip_handles=True, reduce_len=True)\n tokens = tweet_tok.tokenize(t)\n wnl = WordNetLemmatizer()\n stems = []\n for item in tokens:\n stems.append(wnl.lemmatize(item))\n return stems", "def expand_hashtags (self, sentences):\r\n return sentences.apply (lambda x: regex.sub (self.hashtag_pattern, lambda match: ' ' + self.camel_case_split (match.group (1)), x))", "def get_suggestions(\n user: 'User',\n hashtag: str,\n valid_user: Callable[['User'], bool],\n since: str,\n max_suggestions: int\n ) -> List[tweepy.models.User]:\n api = get_api(user)\n suggestions = []\n seen = set()\n max_iters = 5000\n\n for tweet in tweepy.Cursor(api.search, q=hashtag, lang=\"en\", since=since).items():\n if tweet.user.screen_name not in seen and valid_user(tweet.user):\n suggestions.append(tweet.user)\n seen.add(tweet.user.screen_name)\n if len(suggestions) >= max_suggestions or len(seen) > max_iters:\n break\n\n return suggestions", "def perform_bing_sentiment_lexicon_lookup(tweets):\n words = []\n for t in tweets:\n for phrase in t.tagged_words:\n for word in phrase:\n try:\n if word[\"pos\"] in TYPECRAFT_SENTIWORDNET:\n words.append(word['word'])\n except KeyError:\n continue \n \n \n lex = Lexicon(BingTranslater(words), SentiWordNetLexicon())\n words_with_sentimentvalues=[]#list of dicts\n print \"Getting sentiment values\"\n for t in tweets:\n sentiwords =[]\n sentiwords_with_values={}\n for phrase in t.tagged_words:\n for word in phrase:\n try:\n if word[\"pos\"] in TYPECRAFT_SENTIWORDNET:\n sentiwords.append(word['word'])\n except KeyError:\n continue\n for sentiword in sentiwords:\n sentivalues = lex.translate_and_get_lexicon_sentiment(sentiword)\n if sentivalues!=None:\n print \"Adding sentivalues: \",sentivalues\n sentiwords_with_values[sentiword] = sentivalues\n words_with_sentimentvalues.append(sentiwords_with_values)\n \n return words_with_sentimentvalues", "def correct_spell(tweet):\n\n\n tweet = tweet.split()\n for i in range(len(tweet)):\n if tweet[i] in downloaded_dictionary.keys():\n tweet[i] = downloaded_dictionary[tweet[i]]\n tweet = ' '.join(tweet)\n return tweet", "def is_likely_pokemon_related(tweet, hashtags):\n # check hash tags\n for tag in hashtags:\n if tag.text in TAGS_WE_CARE_ABOUT:\n return True\n # check tweet content\n for word in TEXT_WE_CARE_ABOUT:\n if word in tweet:\n return True\n return False", "def get_hashtags(text):\n # alternatives\n \"#[^ :\\n\\t\\.,\\?\\/’'!]+\"\n \"#[a-zA-Z1-9]+\"\n\n # frankly I\"m happy with this as it's simple and I will go down a rabbit hole on these other ones.\n # it seems to do a decent job\n htag = re.compile(r'#[a-zA-Z0-9\\U0001f3c0]+')\n # tested it on all of these: https://top-hashtags.com/hashtag/basketball/\n # got all of them (the unicode one is the basketball emoji)\n\n return list(set(re.findall(htag, text)))", "def tweet_preprocess(tweet, append_hashtag, test_file=False):\n if test_file:\n tweet = tweet.split(',', 1)[1]\n tweet = re.sub(r'<3|< 3', '<heart>', tweet)\n res = []\n for word in tweet.split(' '):\n w = re.sub(r'[\\.\\*,%/\\\\\"\\-\\_]+', ' ', word)\n w = tweet_grammar_rules(w)\n w = tweet_clean_numbers(w)\n w = tweet_clean_dup_chars(w)\n w = tweet_split_hashtags(w, append_hashtag)\n res.append(w)\n tweet = ' '.join(res).strip()\n tweet = re.sub(r'[ ]+', ' ', tweet)\n return tweet", "def extract_hashtags(self, transform = lambda x: x):\r\n return [transform(hashtag['hashtag']) for hashtag in self.extract_hashtags_with_indices()]", "def get_tweets_by_hashtag_route(hashtag):\n response, code = get_tweets_by_hashtag(\n hashtag, request.args.get('limit', 30))\n return jsonify(response), code", "def lemmatization(tokenized_word_list):\n porter=nltk.stem.PorterStemmer()\n filtered_tokens = [porter.stem(word) for word in tokenized_word_list]\n return filtered_tokens", "def extract_hashtags(caption):\n\thashtags = []\n\tif caption is None:\n\t\treturn hashtags\n\telse:\n\t\tfor tag in re.findall(\"#[a-zA-Z0-9]+\", caption):\n\t\t\thashtags.append(tag)\n\treturn hashtags", "def preprocess_tweet(self, tweet):\n\n\t\treplaced_urls = [] # Create an empty list\n\t\treplaced_mentions = [] # Create an empty list\n\n\t\t# Replace emojis\n\t\ttweet = emoji.demojize(tweet)\n\n\t\t# Tokenize using NLTK\n\t\ttokenizer = TweetTokenizer(preserve_case=False, reduce_len=True)\n\t\ttokens = tokenizer.tokenize(tweet)\n\n\t\t# Iterate over tokens\n\t\tfor index, token in enumerate(tokens):\n\t\t\t# Replace URLs\n\t\t\tif token[0:4] == \"http\":\n\t\t\t\treplaced_urls.append(token)\n\t\t\t\ttokens[index] = \"<URLURL>\"\n\t\t\t\t# ↳ *tokens[index]* will directly modify *tokens*, whereas any changes to *token* will be lost.\n\n\t\t\t# Replace mentions (Twitter handles; usernames)\n\t\t\telif token[0] == \"@\" and len(token) > 1:\n\t\t\t\t# ↳ Skip the single '@' tokens\n\t\t\t\treplaced_mentions.append(token)\n\t\t\t\ttokens[index] = \"<UsernameMention>\"\n\n\t\t# Detokenize using NLTK's Treebank Word Detokenizer\n\t\tdetokenizer = TreebankWordDetokenizer()\n\t\tprocessed_tweet = detokenizer.detokenize(tokens)\n\n\t\t# *replaced_urls* and *replaced_mentions* will contain all of the replaced URLs and Mentions of the input string.\n\t\treturn processed_tweet", "def preprocess_tweet(tweet):\n clean_tweet = tp.clean(tweet)\n\n # perform lemmatization\n tokenizer = TweetTokenizer()\n tweet_tokens = tokenizer.tokenize(clean_tweet)\n\n lemmatized_tweet = lemmatize_tweet(tweet_tokens)\n\n # remove stopwords\n preprocessed_tweet = remove_stopwords(lemmatized_tweet)\n return preprocessed_tweet", "def build_trigrams(words):\n trigrams = {}\n for i in range(len(words) - 2):\n pair = words[i:i + 2]\n follower = words[i + 2]\n # add tuple type to make it immutable / use append to add more options to the follower selections\n trigrams.setdefault(tuple(pair), []).append(follower)\n #print(trigrams) - for testing\n return trigrams", "def hamming_distance(words: Iterator[str], vocabulary: Dict[str, int]):\n\n for word in words:\n distances = []\n suggestions = []\n vocab_list = list(vocabulary)\n for (i,vocab) in enumerate(vocab_list):\n if len(vocab) == len(word):\n distances.append(hamming(word, vocab))\n else:\n distances.append(120)\n \n idx = np.array(distances).argsort()[:5]\n \n for i in range(5):\n for j in range(i+1,5):\n if distances[idx[i]] == distances[idx[j]]:\n if vocabulary.get(vocab_list[idx[i]]) < vocabulary.get(vocab_list[idx[j]]):\n temp = idx[i] \n idx[i] = idx[j]\n idx[j] = temp \n\n for i in idx:\n suggestions.append(vocab_list[i])\n\n output(\"{misspelled}\\t{corrections}\".format(\n misspelled=word,\n corrections=\"\\t\".join(suggestions)\n )) # may cause IO bottleneck", "def split_hashtags(hashtag):\n matches = re.findall('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', hashtag)\n return matches", "def get_tweets(hashtag):\n api = twitter.Api(consumer_key=TWITTER_API_CONSUMER_KEY,\n consumer_secret=TWITTER_API_CONSUMER_SECRET,\n access_token_key=TWITTER_API_ACCESS_TOKEN_KEY,\n access_token_secret=TWITTER_API_ACCESS_TOKEN_SECRET)\n\n query = (f\"q=%23{HASHTAG}%20-RT\"\n f\"&result_type=recent&since=2019-01-01&count={NUM_TWEETS}\")\n results = api.GetSearch(raw_query=query)\n\n return [\n format_tweet(tweet.AsDict())\n for tweet in results\n ]", "def get_top_hashtags_from_twitter_api(country='Japan', extended_search=True, debug=False):\n trends = get_top_trends_from_twitter(country=country, exclude_hashtags=False)\n trends = json.loads(trends)\n\n trending_hashtags = [t['label'] for t in trends]\n\n #print(json.dumps(trends, indent=4, ensure_ascii=False))\n\n queries = [t['query'] for t in trends]\n\n if debug:\n #[print(x) for x in trends]\n #[print(x) for x in queries]\n queries = [queries[0]]\n\n full_hashtags_list = []\n for query in queries:\n #print(query)\n # there is no country filter, but there is language filter at least\n if country == 'Japan':\n responses = api.GetSearch(term=query, locale='ja', return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n else:\n responses = api.GetSearch(term=query, return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n\n #print(json.dumps(responses, indent=4, ensure_ascii=False))\n\n trend_hashtags_list = []\n for response in responses:\n if debug: print(json.dumps(response, indent=4, ensure_ascii=False))\n text = response['text']\n\n hashtags_list = response['entities']['hashtags']\n\n if len(hashtags_list) > 0:\n hashtags_list = [h['text'] for h in hashtags_list]\n [trend_hashtags_list.append(h) for h in hashtags_list]\n\n full_hashtags_list.append(trend_hashtags_list)\n\n flat_hashtags_list = [item for sublist in full_hashtags_list for item in sublist]\n\n # turn it into a set to clear duplicates, then append #\n flat_hashtags_list = list(set(flat_hashtags_list))\n flat_hashtags_list = ['#'+h for h in flat_hashtags_list]\n\n flat_tier_list = []\n for h in flat_hashtags_list:\n if h in trending_hashtags:\n flat_tier_list.append(1)\n else:\n flat_tier_list.append(2)\n\n output = []\n for hashtag, tier in zip(flat_hashtags_list, flat_tier_list):\n output.append({\n \"label\": hashtag,\n \"tier\": tier\n })\n\n sorted_output = sorted(output, key=lambda x: x['tier'])\n\n output_json = json.dumps(sorted_output, ensure_ascii=False)\n return output_json", "def hashtag_seperator(s: List[tuple]) -> List[str]:\n result = []\n for tups in s:\n text = tups[0]\n result = result + extract_hashtags(text)\n return result", "def importantLemmas(self, ignoreSemanticTagList=[]):\n lemmas = set([])\n for token in self.importantTokenList(ignoreSemanticTagList=ignoreSemanticTagList):\n lemmas.add(token.lemma.lower())\n return lemmas", "def has_hashtag(self, tag_list, **kwargs):\n lowlist = [tag.lower() for tag in tag_list]\n alllower = ('case_sensitive' in kwargs and not kwargs['case_sensitive'])\n for ht in self.original.entities['hashtags']:\n lowht = ht['text'].lower()\n if alllower and lowht in lowlist or '#' + lowht in lowlist:\n return True\n if ht['text'] in tag_list or '#' + ht['text'] in tag_list:\n return True\n return False", "def recreate_hashtag_input(cls, list_of_readable_hashtags):\n\n complete_input = ''\n\n for hashtag in list_of_readable_hashtags:\n complete_input += hashtag\n\n return complete_input", "def extract_tags_and_mentions(s: str) -> typing.Tuple[set, set]:\n if not s:\n return set(), set()\n\n # Build a set after stripping the # or @ sign before every item found with\n # the regular expression.\n tags = set(t for t in find_hashtags_re.findall(s))\n mentions = set(m[1:] for m in find_mentions_re.findall(s))\n mentioned_user = set(\n User.objects.get(username=username)\n for username in mentions\n if User.objects.filter(username=username).exists()\n )\n\n # mentions = set(m for m in mentions)\n\n return tags, mentioned_user", "def index_new_tweet(self, id_str, tweet_tokens: list):\n self.tweet_count += 1\n unique_words = set(tweet_tokens)\n unique_word_pairs = set()\n for i in unique_words:\n for j in unique_words - {i}:\n # To us [a, b] = [b, a], and sorting gives us a distinct representation.\n unique_word_pairs.add(tuple(sorted([i, j])))\n for w in unique_words | unique_word_pairs:\n self.index[self.epoch][w] = id_str\n current_freq = self.frequency_map.get(w, 0)\n self.frequency_map[w] = current_freq + 1\n # Get word statistics from hash table\n statistics_present = w in self.stats_map\n if not statistics_present:\n (mu, sigma) = (math.inf, math.inf)\n for h in self.hash_functions:\n c = get_hash(h(), repr(w)) % 2 ** self.bit_count\n if self.buckets[c][\"ewma\"] < mu:\n mu = self.buckets[c][\"ewma\"]\n sigma = self.buckets[c][\"ewmvar\"]\n self.stats_map[w] = (mu, sigma)\n (mu, sigma) = self.stats_map[w]\n # Test for significance threshold\n x = self.frequency_map[w]\n if self._is_frequency_significant(mu, sigma, x):\n self.refinement.append((w, self._get_significance(mu, sigma, x)))\n # if self.refinement:\n # r = self.refinement\n # self.refinement = []\n # return r", "def clean_tokens_and_lemmetize(self, tweet_tokens: list) -> list:\n\n cleaned_tokens = []\n\n pos_dict = {'V': 'v', 'N': 'n'}\n for token, tag in pos_tag(tweet_tokens):\n token = re.sub('(https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]\\\n +[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\\n \\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s]\\\n {2,}|www\\.[a-zA-Z0-9]+\\.[^\\s]{2,})', '', token)\n token = re.sub('(@[A-Za-z0-9_]+)', '', token)\n\n pos = pos_dict.get(tag[0], 'a')\n\n token = self.lemmatizer.lemmatize(token, pos)\n\n if len(token) > 1 and token.lower() not in self.stop_words:\n cleaned_tokens.append(token.lower())\n\n return cleaned_tokens", "def tweet_enrichment(self, tweet):\n d = {}\n slangs = []\n definitions = []\n used_slang_ids = []\n text = tweet['cleaned'].split()\n for word in text:\n if word not in wordnet.words():\n try:\n enriched, definition, insert_slang_id = self.lesk(tweet['cleaned'], word, tweet[\"created_at\"],\n tweet['tweet_id'])\n d[word] = definition\n slangs.append(word)\n used_slang_ids.append(insert_slang_id)\n definitions.append(definition)\n except (IndexError, ValueError) as e:\n pass\n for word, definition in d.items():\n ind = text.index(word)\n text[ind] = definition\n enriched_tweet = ' '.join(text)\n enriched_tweet = self.spell_correction(enriched_tweet)\n tweet['enriched_tweet'], tweet['used_slang_ids'] = enriched_tweet, used_slang_ids\n cleaner = TweetTransformer([tweet['enriched_tweet']], self.num_of_tokens)\n cleaner.preprocess()\n tweet['enriched_tweet'] = cleaner.sentences[0]\n tweet.pop('cleaned')\n if not self.database.get(self.collection_processed, \"enriched_tweet\", tweet['enriched_tweet']).count() > 0:\n self.database.insert(self.collection_processed, tweet)\n return {tweet['tweet_id']: tweet['enriched_tweet']}\n else:\n return None", "def RelevantTweet(Tweet, Termlist, DateRange=None):\n TweetText = Tweet[\"full_text\"].lower() #ignore case. # Changed to full_text now, instead of (previously) truncated.\n\n if Tweet[\"is_quote_status\"] is True: #Append, since we're just matching terms.\n # print(\"Tweet = \", Tweet.keys())\n if \"quoted_status\" in Tweet:\n TweetText = TweetText + \" \" + Tweet[\"quoted_status\"][\"full_text\"].lower()\n\n # From: https://stackoverflow.com/questions/7703865/going-from-twitter-date-to-python-datetime-date\n if DateRange is not None:\n ts = time.strptime(Tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')\n\n if ts < time.strptime(DateRange[0], '%b %d %H:%M:%S %Y'):\n return False\n elif ts > time.strptime(DateRange[1], '%b %d %H:%M:%S %Y'):\n return False\n if Termlist is None:\n return True\n else:\n for word in Termlist:\n if word.lower() in TweetText:\n return True\n return False", "def feat_eng(self, tweets):\n self.tweets['emojis'] = get_emojis(self.tweets['text']) # get emojis as text\n self.tweets['polarity'] = self.tweets['text'].map(\n lambda x: TextBlob(x).sentiment.polarity)\n self.tweets['word_count'] = self.tweets['text'].map(lambda x: len(str(x).split()))", "def tokenize(self):\n tknzr = TweetTokenizer()\n tkn = []\n for tweet in self.tweets:\n for word in tknzr.tokenize(tweet):\n tkn.append(word)\n return tkn", "def auto_link_hashtags(self, options = {}):\r\n return self.auto_link_entities(self.extractor.extract_hashtags_with_indices(), options)", "def get_hashtags():\r\n hashtags_list = cache.get('hashtags-list')\r\n if hashtags_list is None:\r\n pipeline = [\r\n {\"$unwind\": \"$entities\"},\r\n {\"$unwind\": \"$entities.hashtags\"},\r\n {\"$unwind\": \"$entities.hashtags.text\"},\r\n {\"$group\": {\"_id\": \"$entities.hashtags.text\", \"count\":\r\n {\"$sum\": 1}}},\r\n {\"$sort\": SON([(\"count\", -1), (\"_id\", -1)])}]\r\n\r\n hashtags = mongo_coll_tweets.aggregate(pipeline)\r\n hashtags_list = []\r\n for hashtag in hashtags:\r\n hashtags_list.append((list(hashtag.values())[1], list(hashtag.values())[0]))\r\n\r\n cache.set('hashtags-list', hashtags_list,\r\n cfg['flask_cache_timeout'] * 60)\r\n\r\n return hashtags_list", "def trendingTweets():\n api = twitter.Api()\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n topicSearchTerm = topic.name\n trending_tweets = api.GetSearch(topicSearchTerm)\n for tweet in trending_tweets:\n util.safe_print(tweet.GetText())\n # pass", "def searchphrases(query):\n query_nostopwords = removestopwords(query)\n query_lemmatized = lemmatize(query_nostopwords) #look like\n phraseids = []\n ngramids=[]\n words=query_lemmatized.split()\n query_ngram = \"select id from ngrams where lower(lemmangrams) like lower('%{}%')\".format(query_lemmatized)+\" or lower(lemmangrams) like lower('%{}%')\".format(words[0])\n for word in words[1:]:\n query_ngram=query_ngram+\" or lower(lemmangrams) like lower('%{}%')\".format(word)\n con = it.engine.execute(query_ngram)\n rows_phrase = con.fetchall()\n if rows_phrase:\n ngramids = list(set([str(i[0]) for i in rows_phrase]))\n phraseids.extend(ngramids)\n phraseids = list(set(phraseids))\n results=categorize(phraseids)\n return results", "def hashtagSegment(text_string):\n # For example, we update wordsegment dict so it recognises altright as \"alt right\" rather than salt right\n ws.BIGRAMS['alt right'] = 1.17e8 \n\n ws.BIGRAMS['white supremacists'] = 3.86e6\n ws.BIGRAMS['tweets'] = 6.26e10\n ws.BIGRAMS['independece day'] = 6.21e7\n \n #We target hashtags so that we only segment the hashtag strings.\n #Otherwise the segment function may operate on misspelled words also; which\n #often appear in hate speech tweets owing to the ill education of those spewing it\n temp_str = []\n for word in text_string.split(' '):\n if word.startswith('#') == False:\n temp_str.append(word)\n else:\n temp_str = temp_str + segment(word)\n \n text_string = ' '.join(temp_str) \n\n return text_string", "def linkify_tags_and_mentions(value):\n value = find_hashtags_re.sub(tag_match_to_url, sanitize(value))\n value = find_mentions_re.sub(mention_match_to_url, value)\n # value = link_tags_parse(value)\n return mark_safe(value)", "def printHashtagsAndMentions(searchText=None, filterTerms=False, tweetLimit=0):\n tweets = db.Tweet.select()\n if searchText is not None:\n tweets = tweets.filter(db.Tweet.q.message.contains(searchText))\n tweets = tweets.limit(tweetLimit)\n\n hashtags, mentions, plain = getHashtagsAndMentions(tweets)\n\n if searchText and filterTerms:\n hashtags = Counter(\n {k: v for k, v in hashtags.items() if searchText.lower() in k.lower()}\n )\n mentions = Counter(\n {k: v for k, v in mentions.items() if searchText.lower() in k.lower()}\n )\n plain = Counter(\n {k: v for k, v in plain.items() if searchText.lower() in k.lower()}\n )\n\n # Unique word count for each area.\n hashtagWC = len(hashtags)\n mentionWC = len(mentions)\n plainWC = len(plain)\n\n print(\"Summary\")\n print(\"==============\")\n # Count items in the sliced selection since .count() does not work with\n # a limit.\n count = len(list(tweets)) if tweetLimit else tweets.count()\n print(\"{0:7,d} tweets\".format(count))\n print(\"{0:7,d} unique words\".format(hashtagWC + mentionWC + plainWC))\n print(\"{0:7,d} unique hashtags\".format(hashtagWC))\n print(\"{0:7,d} unique mentions\".format(mentionWC))\n print(\"{0:7,d} unique plain words\".format(plainWC))\n print()\n\n print(\"Hashtags\")\n print(\"========\")\n printCounterByCount(hashtags)\n print()\n\n print(\"Mentions\")\n print(\"========\")\n printCounterByCount(mentions)\n\n \"\"\"\n # Removal of stopwords and handling of URIs is needed to make this\n # useful.\n print 'Plain'\n print '========'\n printCounterByCount(plain)\n \"\"\"", "def reply_to_tweets():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n for i in range(len(keywords)):\n if keywords[i] in mention.full_text.lower():\n print(\"responding back to: \" + '@' +\n mention.user.screen_name, flush=True)\n api.update_status('@' + mention.user.screen_name + ' ' +\n salts[i], mention.id)", "def extract_mentioned_screen_names_with_indices(self, transform = lambda x: x):\r\n if not self.text:\r\n return []\r\n\r\n possible_screen_names = []\r\n for match in self.extract_mentions_or_lists_with_indices():\r\n if not match['list_slug']:\r\n possible_screen_names.append({\r\n 'screen_name': transform(match['screen_name']),\r\n 'indices': match['indices']\r\n })\r\n return possible_screen_names", "def countHashtags(self, hashtags):\n for hashtag in hashtags:\n if hashtag not in self.hashDict.keys():\n self.hashDict.update({hashtag: 1})\n else:\n self.hashDict.update({hashtag: self.hashDict[hashtag] + 1})", "def testHighlightMots(self):\n\t\ty = Yahoo()\n\t\tmots = (\n\t\t\t('La pomme est un fruit alors que la poire, en est un aussi mais pas la Banane!', ['pomme', 'poire', 'banane'], 'la <strong>pomme</strong> est un fruit alors que la <strong>poire</strong> en est un aussi mais pas la <strong>banane</strong>'),\n\t\t\t)\n\t\tfor (mot, tags, highlight) in mots:\n\t\t\tresult = y.highliter_mots(mot, tags)\n\t\t\tself.assertEqual(result, highlight)", "def getByHashtags(hashtag):\n\n # set page_limits. The default is 1 \n pages_limit = request.args.get('pages_limit') or 1\n pages_limit = int(pages_limit)\n\n raw_response = get_response(tw_api, 'search/tweets', { 'q': '#' + hashtag, 'count': 100 }, pages_limit)\n list_response = convert_resp2list(raw_response)\n return jsonify(list_response)", "def extract_important(tweet_objects_list):\n # This section extracts important information such as most common hashtags\n hashtag_dictionary = {}\n for tweet in tweet_objects_list:\n if \"hashtags\" in tweet:\n for individual_hashtag in tweet[\"hashtags\"]:\n if not individual_hashtag[\"text\"].lower() in hashtag_dictionary:\n hashtag_dictionary[individual_hashtag[\"text\"].lower()] = 1\n else:\n hashtag_dictionary[individual_hashtag[\"text\"].lower()] += 1\n frequency = Counter(hashtag_dictionary)\n most_frequent_hashtags = frequency.most_common(50)\n\n user_dictionary = {}\n for tweet in tweet_objects_list:\n if \"user_mentions\" in tweet:\n for individual_user in tweet[\"user_mentions\"]:\n if not individual_user[\"screen_name\"] in user_dictionary:\n user_dictionary[individual_user[\"screen_name\"].lower()] = 1\n else:\n user_dictionary[individual_user[\"screen_name\"].lower()] += 1\n frequency = Counter(user_dictionary)\n most_frequent_users = frequency.most_common(50)\n symbol_dictionary = {}\n for tweet in tweet_objects_list:\n if \"symbols\" in tweet:\n for individual_symbol in tweet[\"symbols\"]:\n if not individual_symbol[\"text\"] in symbol_dictionary:\n symbol_dictionary[individual_symbol[\"text\"]] = 1\n else:\n symbol_dictionary[individual_symbol[\"text\"]] += 1\n frequency = Counter(symbol_dictionary)\n most_frequent_symbols = frequency.most_common(50)\n return most_frequent_hashtags, most_frequent_users, most_frequent_symbols", "def sent_features(tweet):\n twitter_objs = count_twitter_objs(tweet)\n tweet=clean_tweet(tweet) \n sentiment = sentiment_analyzer.polarity_scores(tweet)\n #Get text only\n words = preprocess(tweet) \n syllables = textstat.syllable_count(words)\n num_chars = sum(len(w) for w in words)\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n \n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n \n \\\n retweet = 0\n if \"rt\" in words:\n retweet = 1\n features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],\n twitter_objs[2], twitter_objs[1],\n twitter_objs[0], retweet]\n return features", "def trigrams(sentence):\n return [word+'_'+sentence[i+1]+'_'+sentence[i+2] \n if word+'_'+sentence[i+1]+'_'+sentence[i+2] in word_ids else None \n for i, word in enumerate(sentence[:-2])] if config.USE_TRIGRAMS else []", "def perform_google_sentiment_lexicon_lookup(tweets):\n \n lex = Lexicon(GoogleTranslater(), SentiWordNetLexicon())\n print \"Getting sentiment values\"\n tweet_sentiments = []\n for t in tweets:\n tweet_sentiments.append(lex.translate_sentence_and_get_lexicon_sentiment(t.text))\n \n print tweet_sentiments\n reduced_tweet_sentiments = []\n for sentiments in tweet_sentiments:\n polar_sum = sum([s[0] for s in sentiments])\n negative_sum = sum([s[1] for s in sentiments])\n objective_sum = sum([s[2] for s in sentiments])\n reduced_tweet_sentiments.append((polar_sum, negative_sum, objective_sum))\n print reduced_tweet_sentiments\n return reduced_tweet_sentiments", "def importantWords(self, ignoreSemanticTagList=[]):\n words = set([])\n for token in self.importantTokenList(ignoreSemanticTagList=ignoreSemanticTagList):\n words.add(token.text.lower())\n return words", "def handle_trending_hashtags(api_pipeline, trending_hashtags_from_db):\n\n trending_hashtags = api_pipeline.get_top_hashtags_worldwide()\n trending_hashtags.sort(key=lambda hashtag: hashtag.tweet_volume, reverse=True)\n trending_hashtags_chart = PlotPainter.plot_hashtags(trending_hashtags)\n trending_hashtags_from_db_today = trending_hashtags_from_db.filter(save_date=datetime.datetime.today().date())\n for trending_hashtag in trending_hashtags:\n if trending_hashtag not in trending_hashtags_from_db_today:\n trending_hashtag.save()\n return trending_hashtags, trending_hashtags_chart", "def excludeTwitterTags(tweet):\n\ttwext = tweet['text'].lower()\n\tif tweet['entities']['hashtags']:\n\t\tfor hashtag in tweet['entities']['hashtags']:\n\t\t\ttwext = twext.replace(hashtag['text'].lower(),\"\")\n\t\t\tif tweet['entities']['user_mentions']:\n\t\t\t\tfor user_mention in tweet['entities']['user_mentions']:\n\t\t\t\t\ttwext = twext.replace(user_mention['screen_name'].lower(),\"\")\n\t\t\tif tweet['entities']['urls']:\n\t\t\t\tfor url in tweet['entities']['urls']:\n\t\t\t\t\ttwext = twext.replace(url['url'].lower(),\"\")\n\treturn twext", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def addTweetHashtagEdges(self):\n self.hashtagGraph.add_edges_from(self.builtEdgeList)", "def detect_author(user_to_tweets: Dict[str, List[tuple]], tweet_text: str) -> \\\n str:\n acc = []\n \n for keys in user_to_tweets:\n author_hashes = hashtag_seperator(user_to_tweets[keys])\n text_hashes = extract_hashtags(tweet_text)\n if set(text_hashes).issubset(author_hashes):\n acc.append(keys)\n if len(acc) == 1:\n return acc[0]\n return 'unknown'", "def cleanTweet(text, appostrophes=True, emojis=True, html=True, url=True, misspellings=True, punctuation=True, lemming=True,\\\r\n stop=True):\r\n if appostrophes:\r\n #convert appostrophes\r\n filtered_string = decontracted(text)\r\n if emojis:\r\n #decoding, removing emojis\r\n filtered_string = filtered_string.encode(\"utf-8\").decode('ascii','ignore')\r\n if html:\r\n #cleaning of html tags\r\n htmltags = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\r\n filtered_string = re.sub(htmltags, '', filtered_string)\r\n if url:\r\n #cleaning of url\r\n url = re.compile(r'https?://\\S+|www\\.\\S+')\r\n filtered_string = re.sub(url, '', text)\r\n if misspellings:\r\n #cleaning of misspellings\r\n spell = SpellChecker()\r\n corrected_text = []\r\n misspelled_words = spell.unknown(filtered_string.split())\r\n for word in filtered_string.split():\r\n if word in misspelled_words:\r\n corrected_text.append(spell.correction(word))\r\n else:\r\n corrected_text.append(word)\r\n filtered_string = \" \".join(corrected_text)\r\n if punctuation:\r\n word_tokens = word_tokenize(filtered_string)\r\n #remove punctuations\r\n table=str.maketrans('','',string.punctuation)\r\n filtered_string.translate(table) \r\n filtered_string = [word.translate(table) for word in word_tokens]\r\n filtered_string = \" \".join(filtered_string)\r\n if lemming:\r\n #lemming of words\r\n word_tokens = word_tokenize(filtered_string)\r\n lemmatizer = WordNetLemmatizer() \r\n filtered_string = [lemmatizer.lemmatize(word) for word in word_tokens]\r\n if stop:\r\n # cleaning from stopwords\r\n stop_words=set(stopwords.words('english'))\r\n stop_word_drop = [] \r\n for word in filtered_string: \r\n if word not in stop_words: \r\n stop_word_drop.append(word) \r\n filtered_string = \" \".join(stop_word_drop)\r\n \r\n #toDos\r\n #cleaning of rare words\r\n # tokens is a list of all tokens in corpus\r\n # freq_dist = nltk.FreqDist(token)\r\n # rarewords = freq_dist.keys()[-50:]\r\n # after_rare_words = [ word for word in token not in rarewords]\r\n #cleaning of slang words\r\n #split attached words, not working and questionable because of all capital words\r\n # filtered_string = \" \".join(re.findall('[A-Z][^A-Z]*', filtered_string))\r\n return filtered_string", "def handle_hashtags_tweets_for_date(current_date, current_hashtag):\n\n hashtags_tweets = current_hashtag.tweets.filter(save_date=current_date).distinct()\n hashtags_tweets_list = [hashtags_tweet for hashtags_tweet in hashtags_tweets]\n hashtags_tweets_list.sort(key=lambda tweet: (tweet.retweets, tweet.likes), reverse=True)\n hashtags_tweets_list = hashtags_tweets_list[:10]\n hashtags_tweets_chart = PlotPainter.plot_tweets(hashtags_tweets_list) if hashtags_tweets else None\n return hashtags_tweets_chart, hashtags_tweets_list", "def tweets_features(tweet):\n tweet = remove_stop_words(tweet)\n return {'TWEET': tweet}", "def like_tweet(tweet_id):\n twitter.create_favorite(id=tweet_id)", "def searchbrown_word(tag):\n brown_tagged_words = brown.tagged_words(categories='news')\n hitwords = []\n for i in range(len(brown_tagged_words)):\n if tag == brown_tagged_words[i][1]:\n hitwords.append(brown_tagged_words[i][0].lower())\n return hitwords" ]
[ "0.7114178", "0.6828377", "0.6736826", "0.6493812", "0.64422315", "0.632229", "0.6310081", "0.6094305", "0.6063977", "0.6057257", "0.5984623", "0.5919002", "0.5900357", "0.58999604", "0.58835715", "0.5878227", "0.5875355", "0.5873595", "0.58567333", "0.58058137", "0.579629", "0.5795316", "0.57565904", "0.5717769", "0.5710841", "0.5710841", "0.5707498", "0.56730366", "0.5672563", "0.56403303", "0.56309336", "0.56157863", "0.56005114", "0.5579086", "0.5575425", "0.5572158", "0.55647", "0.5558853", "0.55518174", "0.55314153", "0.5520267", "0.5501499", "0.54878855", "0.54847974", "0.548295", "0.54810715", "0.5454564", "0.5447426", "0.54443383", "0.5422615", "0.5408366", "0.5402143", "0.53978556", "0.5397342", "0.53961724", "0.5368134", "0.5365004", "0.5349767", "0.5345558", "0.53329545", "0.53325814", "0.5332314", "0.5326014", "0.5313958", "0.5313894", "0.53082657", "0.53057075", "0.53018844", "0.5286331", "0.5282204", "0.5274961", "0.52650845", "0.5254139", "0.52400005", "0.52175516", "0.52061903", "0.52036816", "0.52011997", "0.519956", "0.51906073", "0.51728547", "0.5170063", "0.516446", "0.515646", "0.5156422", "0.5154999", "0.5147186", "0.5139074", "0.5135723", "0.5129774", "0.51281273", "0.51277864", "0.51276726", "0.51257825", "0.51212794", "0.5115182", "0.51051426", "0.5097626", "0.5086887", "0.50701", "0.5054949" ]
0.0
-1
Creates sparse matrix of preference vectors for each of N samples to recommend which are used to initialize random walk algorithm. If a query hashtag for a particular tweet is given, then it is used to create preference vector. The most similar hashtag is used otherwise.
def _get_preference_vectors(self, tweet_content_similarities: np.ndarray, query_hashtags: Optional[Tuple[str]]) -> sps.csr_matrix: def _get_using_similarities(similarity_vector): query_hashtag_index = np.argmax(similarity_vector) vec = np.zeros((len(self._hashtag_labels),)) vec[query_hashtag_index] = 1 return vec preference_vectors = [] for i in range(len(tweet_content_similarities)): if query_hashtags is None or query_hashtags[i] is None: preference_vector = _get_using_similarities(tweet_content_similarities[i]) else: try: preference_vector = np.asarray(self._hashtag_encoder.transform([query_hashtags[i]]))[0] except ValueError: warnings.warn( "Unknown hashtag: {}. Using the closest hashtag in terms of content similarity".format( query_hashtags[i])) preference_vector = _get_using_similarities(tweet_content_similarities[i]) preference_vectors.append(preference_vector) preference_vectors = np.vstack(preference_vectors) preference_vectors = sps.csr_matrix(preference_vectors, preference_vectors.shape, dtype=np.float32) return preference_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initialize(self):\n for doc_index, doc in enumerate(self.document):\n temp_word_topic_matrix = []\n for word in doc:\n if word in self.word2id.keys():\n start_topic_index = np.random.randint(0, self.K)\n temp_word_topic_matrix.append(start_topic_index)\n self.doc_topic_matrix[doc_index, start_topic_index] += 1\n self.topic_word_matrix[start_topic_index, self.word2id[word]] += 1\n self.topic_matrix[start_topic_index] += 1\n self.current_word_topic_matrix.append(temp_word_topic_matrix)", "def __init__(self, n, sents, corpus='', gamma=None, addone=True):\n self.n = n\n self.smoothingtechnique = 'Interpolated (Jelinek Mercer) Smoothing'\n self.gamma = gamma\n self.addone = addone\n self.counts = counts = defaultdict(int)\n self.gamma_flag = True\n self.corpus = corpus\n # way more efficient than use set unions\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = list(set(voc))\n\n if gamma is None:\n self.gamma_flag = False\n\n # if not gamma given\n if not self.gamma_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent for training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n\n for sent in train_sents:\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(train_sents)\n # variable only for tests\n self.tocounts = counts\n # search the gamma that gives lower perplexity\n gamma_candidates = [i*50 for i in range(1, 15)]\n # xs is a list with (gamma, perplexity)\n xs = []\n sents = train_sents\n for aux_gamma in gamma_candidates:\n self.gamma = aux_gamma\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_gamma, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.gamma = xs[0][0]\n with open('old-stuff/interpolated_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Gamma: {}\\n'.format(self.gamma))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n else:\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n sents = list(map((lambda x: x + ['</s>']), sents))\n\n for sent in sents:\n # counts now holds all k-grams for 0 < k < n + 1\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(sents)", "def get_features_train(tweets):\n feats = get_feature_array(tweets)\n tfidf = vectorizer.fit_transform(tweets).toarray()\n M = np.concatenate([tfidf,feats],axis=1)\n return M", "def __init__(self, n, sents, corpus='', beta=None, addone=True):\n self.n = n\n self.beta = beta\n self.corpus = corpus\n self.beta_flag = True\n self.addone = addone\n self.smoothingtechnique = 'Back Off (Katz) with Discounting Smoothing'\n self.counts = counts = defaultdict(int)\n self.A_set = defaultdict(set)\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = set(voc)\n if beta is None:\n self.beta_flag = False\n\n # if no beta given, we compute it\n if not self.beta_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent por training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n for sent in train_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(train_sents)\n counts[('</s>',)] = len(train_sents)\n\n self.tocounts = counts\n # search for the beta that gives lower perplexity\n beta_candidates = [i*0.1 for i in range(1, 10)]\n # xs is a list with (beta, perplexity)\n xs = []\n self.sents = train_sents\n for aux_beta in beta_candidates:\n self.beta = aux_beta\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_beta, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.beta = xs[0][0]\n with open('old-stuff/backoff_'+str(n)+'_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Beta: {}\\n'.format(self.beta))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n else:\n sents = list(map((lambda x: x + ['</s>']), sents))\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n\n for sent in sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(sents)\n counts[('</s>',)] = len(sents)", "def build(self, Tweets):\n text = [t.text for t in Tweets]\n vectorizer = TfidfVectorizer(tokenizer=process_text,\n stop_words=stopwords.words('english'),\n max_df=0.5,\n min_df=0.1,\n lowercase=True,\n max_features=10000)\n\n return vectorizer.fit_transform(text).A", "def get_raw_seedling(datasource, n):\n matrix = MatrixDict()\n \n for lang, sent in SEEDLING[datasource].sents():\n features = sent2ngrams(sent, n=n)\n matrix.setdefault(lang, Counter()).update(features)\n \n return matrix", "def initialize(self):\n # Initializing the counter and distribution.\n for k in range(0, self.topic_number,1):\n self.topic_term_count_matrix[k]= [0.0] * self.term_number\n self.topic_distribution_over_term[k] = [0.0] * self.term_number\n self.sum_topic_by_term_count[k] = 0.0\n for m in range(0, self.document_number,1):\n self.document_topic_count_matrix[m] = [0.0] * self.topic_number\n self.document_distribution_over_topic[m] = [0.0] * self.topic_number\n self.sum_document_by_topic_count[m] = 0.0\n\n # Initializing topics assigned to all words of all documents.\n for m in range(0, self.document_number, 1):\n N = len(self.documents[m])\n self.word_topic_assignment[m] = [-1] * N\n for n in range(0, N,1):\n topic = int(random.uniform(0,1) * self.topic_number)\n self.document_topic_count_matrix[m][topic] += 1.0\n self.topic_term_count_matrix[topic][self.documents[m][n]] += 1.0\n self.sum_topic_by_term_count[topic] += 1.0\n self.word_topic_assignment[m][n] = topic\n self.sum_document_by_topic_count[m] = N", "def make_tweet_nparr( txt ):\n # result storage\n fvec = numpy.empty( len(testFeatures) )\n\n # search for each feature\n txtLow = ' ' + txt.lower() + ' '\n for i in range( 0, len(testFeatures) ):\n\n key = testFeatures[i][0]\n\n fvec[i] = False\n for tstr in testFeatures[i][1]:\n fvec[i] = fvec[i] or (txtLow.find(tstr) != -1)\n\n return fvec", "def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def initialize(self):\n self.n_words = len(self.vocab)\n self.n_docs = len(self.documents)\n\n # Initialize the three count matrices.\n # The (i,j) entry of self.nmz is the number of words in document i assigned to topic j.\n self.nmz = np.zeros((self.n_docs, self.n_topics))\n # The (i,j) entry of self.nzw is the number of times term j is assigned to topic i.\n self.nzw = np.zeros((self.n_topics, self.n_words))\n # The (i)-th entry is the number of times topic i is assigned in the corpus.\n self.nz = np.zeros(self.n_topics)\n\n # Initialize the topic assignment dictionary.\n self.topics = {} # key-value pairs of form (m,i):z\n\n for m in range(self.n_docs):\n for i in self.documents[m]:\n # Get random topic assignment, i.e. z is a random integer in the range of topics\n z = np.random.randint(self.n_topics)\n # Increment count matrices\n self.nmz[m,z] += 1\n self.nzw[z,self.documents[m][i]] += 1\n self.nz[z] += 1\n # Store topic assignment\n self.topics[(m,i)] = z", "def construct_train_set(tweet_pos, tweet_neg):\n tweet_pos['pred'] = 1\n tweet_neg['pred'] = 0\n tweet_pos.columns = ['tweet', 'pred']\n tweet_neg.columns = ['tweet', 'pred']\n all_tweets = tweet_neg.append(tweet_pos)\n tweet_TR = all_tweets.reset_index().drop(['index'], axis = 1)\n return tweet_TR", "def get_features_test(tweets):\n feats = get_feature_array(tweets)\n tfidf = vectorizer.transform(tweets).toarray()\n M = np.concatenate([tfidf,feats],axis=1)\n return M", "def phrase_scoring_ranking(phrases,model,dataset,bitext):\n e_phrases = []\n f_phrases = []\n count = 0\n f_phrase_count = {}\n e_phrase_count = {} #not needed\n #e_f_pair_count = {} #e words as rows and f words as columns\n f_e_pair_count = {} #e words as rows and f words as columns\n for phrase_set in phrases:\n for phrase in phrase_set:\n e_phrases.append(phrase[3])\n f_phrases.append(phrase[2])\n if phrase[2] in f_phrase_count:\n f_phrase_count[phrase[2]] += 1\n else:\n f_phrase_count[phrase[2]] = 1\n if phrase[2] in f_e_pair_count:\n if phrase[3] in f_e_pair_count[phrase[2]]:\n f_e_pair_count[phrase[2]][phrase[3]] += 1\n else:\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n else:\n f_e_pair_count[phrase[2]]={}\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n\n e_phrases = list(set(e_phrases))\n f_phrases = list(set(f_phrases))\n ep_count = len(e_phrases)\n fp_count = len(f_phrases)\n #pmatrix = np.empty(ep_count*fp_count) # ######Not needed if dictionary is used\n #pmatrix = pmatrix.reshape(ep_count,fp_count)\n #pmatrix.fill(0)\n ef_prob_dict = {}\n for e in e_phrases:\n for f in f_phrases:\n ef_count =count_fe_pair(e,f,f_e_pair_count)# f_e_pair_count[e][f]\n f_count = f_phrase_count[f]\n e_idx = e_phrases.index(e) ###Check the count logic again\n f_idx = f_phrases.index(f)\n pair_prob = ef_count/f_count\n #pmatrix[e_idx][f_idx] = pair_prob\n if f in f_e_pair_count:\n if e in f_e_pair_count[f]:\n if f in ef_prob_dict:\n ef_prob_dict[f][e]=pair_prob\n else:\n ef_prob_dict[f] = {}\n ef_prob_dict[f][e] = pair_prob\n\n #if pmatrix[e_idx][f_idx] != 0:\n # print(e,f,ef_count,f_count,pair_prob)\n return ef_prob_dict", "def index_new_tweet(self, id_str, tweet_tokens: list):\n self.tweet_count += 1\n unique_words = set(tweet_tokens)\n unique_word_pairs = set()\n for i in unique_words:\n for j in unique_words - {i}:\n # To us [a, b] = [b, a], and sorting gives us a distinct representation.\n unique_word_pairs.add(tuple(sorted([i, j])))\n for w in unique_words | unique_word_pairs:\n self.index[self.epoch][w] = id_str\n current_freq = self.frequency_map.get(w, 0)\n self.frequency_map[w] = current_freq + 1\n # Get word statistics from hash table\n statistics_present = w in self.stats_map\n if not statistics_present:\n (mu, sigma) = (math.inf, math.inf)\n for h in self.hash_functions:\n c = get_hash(h(), repr(w)) % 2 ** self.bit_count\n if self.buckets[c][\"ewma\"] < mu:\n mu = self.buckets[c][\"ewma\"]\n sigma = self.buckets[c][\"ewmvar\"]\n self.stats_map[w] = (mu, sigma)\n (mu, sigma) = self.stats_map[w]\n # Test for significance threshold\n x = self.frequency_map[w]\n if self._is_frequency_significant(mu, sigma, x):\n self.refinement.append((w, self._get_significance(mu, sigma, x)))\n # if self.refinement:\n # r = self.refinement\n # self.refinement = []\n # return r", "def __init__(self, n, sents, gamma=None, addone=True):\n assert n > 0\n self._n = n\n\n if gamma is not None:\n # everything is training data\n train_sents = sents\n else:\n # 90% training, 10% held-out\n m = int(0.45 * len(sents))\n l = int(0.65 * len(sents))\n train_sents = sents[:m] + sents[l:]\n held_out_sents = sents[m:l]\n\n print('Computing counts...')\n count = defaultdict(int)\n while (n >= 0):\n for sent in train_sents:\n s = sent[:] ## En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n # WORKed HERE!!\n # COMPUTE COUNTS FOR ALL K-GRAMS WITH K <= N\n\n # compute vocabulary size for add-one in the last step\n self._addone = addone\n if addone:\n print('Computing vocabulary...')\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc)\n\n # compute gamma if not given\n if gamma is not None:\n self._gamma = gamma\n else:\n print('Computing gamma...')\n self._gamma = gamma = 1\n p = self.log_prob(held_out_sents)\n new_gamma = 2\n streak = 1\n growing = True\n turns = 0\n while (turns < 15):\n self._gamma = new_gamma\n np = self.log_prob(held_out_sents)\n gamma = new_gamma\n if (np > p):\n if growing:\n streak += 1\n else:\n turns += 1\n streak = 0\n growing = True\n new_gamma = new_gamma + 2 ** streak\n else:\n if growing:\n turns += 1\n streak = 0\n growing = False\n else:\n streak += 1\n new_gamma = new_gamma - 2 ** streak\n p = np\n self._gamma = new_gamma\n print(self._gamma)", "def get_feature_set_PC(tweet, sentimentvalues):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n features[tag] = features.get(tag, 0) + 1\n if tag in ADJECTIVES:\n features['adjectives'] = features.get(tag, 0) + 1\n elif tag in ADVERBS: \n features['adverbs'] = features.get(tag, 0) + 1\n elif tag in PRONOUNS:\n features['pronoun'] = 1\n except KeyError:\n continue\n for key in features.keys():\n features[key] = features[key]*1.0\n \n #Add lexical features\n # total polarity score, number of positive words, number of negative words\n pos_score = 0\n neg_score = 0\n nrof_pos_words = 0\n nrof_neg_words = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n nrof_pos_words = nrof_pos_words + 1\n pos_score = pos_score + sentimentvalues[word][0]\n if sentimentvalues[word][1]>0:\n nrof_neg_words = nrof_neg_words + 1\n neg_score = neg_score + sentimentvalues[word][1]\n\n if neg_score>0:\n features['neg_score'] = neg_score+1.0\n if pos_score>0:\n features['pos_score'] = pos_score+1.0\n if nrof_pos_words>0:\n features['positive_words'] = nrof_pos_words*1.0\n if nrof_neg_words>0:\n features['negative_words'] = nrof_neg_words*1.0\n \n return features", "def __init__(self, sents, n, corpus='', D=None):\n\n self.n = n\n self.D = D\n self.corpus = corpus\n self.smoothingtechnique = 'Kneser Ney Smoothing'\n # N1+(·w_<i+1>)\n self._N_dot_tokens_dict = N_dot_tokens = defaultdict(set)\n # N1+(w^<n-1> ·)\n self._N_tokens_dot_dict = N_tokens_dot = defaultdict(set)\n # N1+(· w^<i-1>_<i-n+1> ·)\n self._N_dot_tokens_dot_dict = N_dot_tokens_dot = defaultdict(set)\n self.counts = counts = defaultdict(int)\n vocabulary = []\n\n if D is None:\n total_sents = len(sents)\n k = int(total_sents*9/10)\n training_sents = sents[:k]\n held_out_sents = sents[k:]\n training_sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], training_sents))\n for sent in training_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n - 1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n D_candidates = [i*0.12 for i in range(1, 9)]\n xs = []\n for D in D_candidates:\n self.D = D\n aux_perplexity = self.perplexity(held_out_sents)\n xs.append((D, aux_perplexity))\n xs.sort(key=lambda x: x[1])\n self.D = xs[0][0]\n with open('old-stuff/kneserney_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('D: {}\\n'.format(self.D))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n # discount value D provided\n else:\n sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], sents))\n for sent in sents:\n for j in range(n+1):\n # all k-grams for 0 <= k <= n\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n # e.g., ngram = (1,2,3,4,5,6,7,8)\n # right_token = (8,)\n # left_token = (1,)\n # right_kgram = (2,3,4,5,6,7,8)\n # left_kgram = (1,2,3,4,5,6,7)\n # middle_kgram = (2,3,4,5,6,7)\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n-1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n\n xs = [k for k, v in counts.items() if v == 1 and n == len(k)]\n ys = [k for k, v in counts.items() if v == 2 and n == len(k)]\n n1 = len(xs)\n n2 = len(ys)\n self.D = n1 / (n1 + 2 * n2)", "def feature_mining(features: np.array,\r\n query_chunk_size: int = 5000,\r\n corpus_chunk_size: int = 100000,\r\n max_pairs: int = 500000,\r\n top_k: int = 100) -> list:\r\n\r\n top_k += 1 # An image has the highest similarity to itself. Increase +1 as we are interest in distinct pairs\r\n\r\n # Mine for duplicates\r\n pairs = queue.PriorityQueue()\r\n min_score = -1\r\n num_added = 0\r\n\r\n for corpus_start_idx in range(0, len(features), corpus_chunk_size):\r\n corpus_end_idx = min(corpus_start_idx + corpus_chunk_size, len(features))\r\n for query_start_idx in range(0, len(features), query_chunk_size):\r\n query_end_idx = min(query_start_idx + query_chunk_size, len(features))\r\n\r\n cos_scores = torch.Tensor(\r\n cosine_similarity(features[query_start_idx:query_end_idx],\r\n features[corpus_start_idx:corpus_end_idx])\r\n )\r\n\r\n cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk(cos_scores, min(top_k, len(cos_scores[0])),\r\n dim=1, largest=True, sorted=False)\r\n cos_scores_top_k_values = cos_scores_top_k_values.tolist()\r\n cos_scores_top_k_idx = cos_scores_top_k_idx.tolist()\r\n\r\n for query_itr in range(len(cos_scores)):\r\n for top_k_idx, corpus_itr in enumerate(cos_scores_top_k_idx[query_itr]):\r\n i = query_start_idx + query_itr\r\n j = corpus_start_idx + corpus_itr\r\n\r\n if i != j and cos_scores_top_k_values[query_itr][top_k_idx] > min_score:\r\n pairs.put((cos_scores_top_k_values[query_itr][top_k_idx], i, j))\r\n num_added += 1\r\n\r\n if num_added >= max_pairs:\r\n entry = pairs.get()\r\n min_score = entry[0]\r\n\r\n # Get the pairs\r\n added_pairs = set() # Used for duplicate detection\r\n pairs_list = []\r\n while not pairs.empty():\r\n score, i, j = pairs.get()\r\n sorted_i, sorted_j = sorted([i, j])\r\n\r\n if sorted_i != sorted_j and (sorted_i, sorted_j) not in added_pairs:\r\n added_pairs.add((sorted_i, sorted_j))\r\n pairs_list.append([score, i, j])\r\n\r\n return pairs_list", "def _create_sparse_train_and_test(ratings, n_users, n_items):\n \n # pick a random set of data as testing data, sorted ascending\n test_set_size = len(ratings) / TEST_SET_RATIO\n test_set_idx = np.random.choice(xrange(len(ratings)), size=test_set_size, replace=False)\n test_set_idx = sorted(test_set_idx)\n \n # use the remaining data to create a training set\n ts_ratings = ratings[test_set_idx]\n tr_ratings = np.delete(ratings, test_set_idx, axis=0)\n \n # create training and test matrices as coo_matrix\n u_tr, i_tr, r_tr = zip(*tr_ratings)\n tr_sparse = coo_matrix((r_tr, (u_tr, i_tr)), shape=(n_users, n_items))\n u_ts, i_ts, r_ts = zip(*ts_ratings)\n test_sparse = coo_matrix((r_ts, (u_ts, i_ts)), shape=(n_users, n_items))\n \n return tr_sparse, test_sparse", "def prepare_lexicons(self, topnwords = 80, distance_cutoff = 0.45):\n\n model = self.train_word2vec()\n\n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # For each topic, collect the words most similar to them in a list of lists\n topic_lexicons = []\n\n # Loop through the ten topics\n for topic in topics:\n\n temp_words = []\n\n # Loop through each word that we have given manually under each topic\n for word in topic:\n\n # Consider most similar words according to some cutoffs\n similar_words = model.wv.most_similar(positive = word, topn = topnwords)\n temp_words1 = [x for (x,y) in similar_words if y >= distance_cutoff]\n\n temp_words = temp_words + temp_words1\n\n temp_words = temp_words + topic\n\n\n # Take unique words, there might be duplicates\n topic_lexicons.append(list(set(temp_words)))\n\n # Some manual adjustments\n # Remove 'commute' from other topic\n topic_lexicons[8].remove('commute')\n\n return topic_lexicons", "def __init__(words, pred_index):", "def srp_matrix(cls, words, ndims, _hashfunc=city_64(0)):\n multiplier = (ndims - 1) // 64 + 1\n hashes = [\n list(map(_hashfunc, ['{}_{}'.format(w, i)\n for i in range(multiplier)]))\n for w in words\n ]\n\n # Given a `multipier` value of 5, `hashes` is really a V x 5\n # array of 64-bit integers, where V is the vocabulary size...\n\n hash_arr = numpy.array(hashes, dtype=numpy.uint64)\n\n # ...but we could also think of it as a V x 40 array of bytes...\n\n hash_arr = hash_arr.view(dtype=numpy.uint8)\n\n # ...or even as an array of bits, where every word is represented\n # by 320 bits...\n\n hash_arr = numpy.unpackbits(hash_arr.ravel()).reshape(-1,\n 64 * multiplier)\n\n # ...or as an array of floating point values, all equal to either\n # 1.0 or 0.0, and truncated to give a final array of V x ndims.\n\n return (hash_arr.astype(numpy.float64) * 2 - 1)[:, :ndims]", "def train(self, sentences):\n\n dictionary = Dictionary(sentences)\n\n ft = Word2Vec(sentences, workers=cpu_count(), min_count=5, size=300, seed=12345)\n\n index = WordEmbeddingSimilarityIndex(ft.wv)\n matrix = SparseTermSimilarityMatrix(index, dictionary)\n\n self.dictionary = dictionary\n self.ft = ft\n self.matrix = matrix", "def get_user_feature_matrix(user_dict, user_index, aspect_index, N):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n count_dict[aspect] += 1\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = 1 + (N - 1) * (2 / (1 + exp(-count)) - 1)\n return result", "def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def optimalize(): \n start = time()\n max = 0\n maxn=2\n maxm=3\n check = [(n,m) for n in range(24,30) for m in range(3,20)]\n dict = {}\n print \"start optimalization of: bigram-features,uniqueness\"\n for n,m in check:\n score=0\n print \">lem>>n(uniqueness):\"+str(n)\n print \">lem>>m(commonness):\"+str(m)\n wrds = common_but_unique(ngrams_dict(1,authors,compactcorpus,n,False),m)\n bigrams = common_but_unique(ngrams_dict(2,authors,compactcorpus,n,False),m)\n trigrams = common_but_unique(ngrams_dict(3,authors,compactcorpus,n,False),m)\n #pos_feat = [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)]\n pos_feat = [\"bi:(\"+str(bi[0])+\",\"+str(bi[1])+\")>\"+str(num) for bi in bigrams for num in range(0,1)] + [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)] + [\"tri:(\"+str(tri[0])+\",\"+str(tri[1])+\",\"+str(tri[2])+\")>\"+str(num) for tri in trigrams for num in range(0,1)]\n\n print \"number of features AFTER selection:\" + str(len(pos_feat))\n for x in range(0,4):\n data = split_train_test_data(authors, corp,45)\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n test_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"test\"]]\n classifier1 = NaiveBayesClassifier.train(train_set)\n acc = nltk.classify.accuracy(classifier1,test_set)\n print \"accuracy:\"+str(acc)\n score +=acc\n print \"time elapsed: \"+str(time()-start)\n print \"score(\" + str(n) +\")=\"+str(score/4)\n classifier1.show_most_informative_features(8)\n dict[(n,m)]=(score/4)\n if(score/4)>max:\n max = (score/4)\n maxn =n\n maxm = m\n print \"max score=\"+str(max)\n print \"where n = \"+str(maxn)\n print \"where m = \"+str(maxm)\n print \"time:\"+str(time()-start)\n writetofile(dict,\"optimalizedict_commonwrdsandbigrams_latest_lem.pkl\")", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n ### YOUR CODE HERE\n raise NotImplementedError\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def get_similarities(self, query, n=5):\n scores = self.get_scores(query)\n rank_n = np.argsort(scores)[::-1]\n if n > 0:\n rank_n = rank_n[:n]\n return [self.corpus[i] for i in rank_n]", "def weigthIndex(index, nPages): \n weighted_index = defaultdict(list)\n for term, docs in index.items():\n df = len(docs)\n for url, count in docs:\n weight = tf_idf(count, nPages, df)\n weighted_index[term].append((url, weight))\n return weighted_index", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.trigramCounts = collections.defaultdict(lambda: 0)\n self.followingWords = collections.defaultdict(lambda: set())\n self.precedingWords = collections.defaultdict(lambda: set())\n self.total = 0\n self.discount = 0.75\n self.train(corpus)", "def train(self, iterable):\n for ngram in generate_ngrams(iterable, self.n + 1):\n self.markov_dict.setdefault(ngram[: self.n], Counter()).update([ngram[self.n]])\n self.prob_dict.update([ngram[: self.n]])", "def personalization(prediction, n):\n # prediction\n # n top n recommendation\n\n top_n = get_top_n(prediction, n)\n\n rec_dict = {}\n for uid, user_ratings in top_n.items():\n rec_dict[uid] = [iid for (iid, _) in user_ratings]\n\n rec_user_ls = [pred[0] for pred in prediction]\n rec_item_ls = [pred[1] for pred in prediction]\n\n unique_rec_user_ls = np.unique(rec_user_ls)\n unique_rec_item_ls = np.unique(rec_item_ls)\n\n # assign each item with index number\n unique_rec_item_dict = {item: ind for ind,\n item in enumerate(unique_rec_item_ls)}\n\n n_unique_rec_user = len(unique_rec_user_ls)\n n_unique_rec_item = len(unique_rec_item_ls)\n\n # recommended user item matrix\n rec_matrix = np.zeros(shape=(n_unique_rec_user, n_unique_rec_item))\n\n # represent recommended item for each user as binary 0/1\n for user in range(n_unique_rec_user):\n # get userid\n user_id = unique_rec_user_ls[user]\n # get rec item list\n item_ls = rec_dict[user_id]\n\n for item_id in item_ls:\n # get item index\n item = unique_rec_item_dict[item_id]\n rec_matrix[user, item] = 1\n\n # calculate cosine similarity matrix across all user recommendations\n similarity = cosine_similarity(X=rec_matrix, dense_output=False)\n # calculate average of upper triangle of cosine matrix\n upper_right = np.triu_indices(similarity.shape[0], k=1)\n # personalization is 1-average cosine similarity\n score = 1 - np.mean(similarity[upper_right])\n return score", "def get_feature_set_PC2(tweet, sentimentvalues):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n features[tag] = features.get(tag, 0) + 1\n if tag in ADJECTIVES:\n features['adjectives'] = features.get(tag, 0) + 1\n elif tag in ADVERBS: \n features['adverbs'] = features.get(tag, 0) + 1\n elif tag in PRONOUNS:\n features['pronoun'] = 1\n except KeyError:\n continue\n for key in features.keys():\n features[key] = features[key]*1.0\n \n #Add lexical features\n # total polarity score, number of positive words, number of negative words\n pos_score = sentimentvalues[0]\n neg_score = sentimentvalues[1]\n\n if pos_score>0:\n features['pos_score'] = pos_score+1.0\n if neg_score>0:\n features['neg_score'] = neg_score+1.0\n \n return features", "def ngram_detection(self, min_topic_count=5, min_text_id_count=4):\n\n for text_id, text in self.texts.items():\n # single-word topics act a bit different (no zips or comprehensions)\n # store data in self.topics, not zip_grams\n for word in text['doc']:\n word_lemma = word.text.lower() if word.lemma_ == '-PRON-' else word.lemma_\n\n if {word.text}.intersection(self.punct) or {word.lemma_}.intersection(self.stop_words):\n continue\n\n if not (word.pos in self.nouns or word.ent_type in self.entities):\n continue\n\n if word_lemma in self.topics:\n self.topics[word_lemma][\"count\"] += 1\n self.topics[word_lemma][\"textIDs\"] |= {text_id}\n self.topics[word_lemma][\"verbatims\"] |= {word.text.lower()}\n else:\n self.topics[word_lemma] = {\"name\": word_lemma,\n \"count\": 1,\n \"textIDs\": {text_id},\n \"verbatims\": {word.text.lower()},\n \"subtopics\": {}}\n\n # Populate self.ngrams and self.topics\n for text_id, text in self.texts.items():\n doc = text['doc']\n\n # Find pentagrams - ngrams with 5 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:], doc[4:]):\n self._ngram_counter(ngram, 5, text_id, doc)\n\n # Find pentagrams - ngrams with 4 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:]):\n self._ngram_counter(ngram, 4, text_id, doc)\n\n for ngram in zip(doc, doc[1:], doc[2:]):\n self._ngram_counter(ngram, 3, text_id, doc)\n\n for ngram in zip(doc, doc[1:]):\n self._ngram_counter(ngram, 2, text_id, doc)\n\n\n # Add text_id_count (the number of texts that the topic occurs in; so a topic might occur 50 times,\n # but it's only mentioned in 3 different texts, we'd show 3.\n for _, topic in self.topics.items():\n topic['textIDCount'] = len(topic['textIDs'])\n for _, ngram in self.ngrams.items():\n ngram['textIDCount'] = len(ngram['textIDs'])\n\n # Eliminate rarely occurring topics and ngrams.\n self.topics = {k: v for k, v in self.topics.items() if\n v['textIDCount'] >= min_text_id_count and v['count'] >= min_topic_count}\n self.ngrams = {k: v for k, v in self.ngrams.items() if\n v['textIDCount'] >= min_text_id_count}\n\n # Loop through each ngram pair: outer loop is all ngrams, inner loop is all ngrams\n for ngram_lemma, ngram in self.ngrams.items():\n for ngram_plus_lemma, ngram_plus in self.ngrams.items():\n # only stay in this loop if the inner ngram is one word longer than the outer loop and if the\n # inner loop lemma contains the outer group lemma (avoid partial word matches like man in woman)\n # r'\\b' + ngram_lemma + r'\\b' --> does the ngram lemma fit in ngram_plus lemma (\\b is word boundary)\n if ngram['n'] + 1 != ngram_plus['n']:\n continue\n\n if not re.search(r'\\b' + ngram_lemma + r'\\b', ngram_plus_lemma):\n continue\n\n # Is the absolute count of occurrences and the count of text_id occurrences both big enough to use it\n # instead of the other loop?\n if ngram_plus['count'] + 3 >= ngram['count'] and ngram_plus['textIDCount'] + 3 >= ngram['textIDCount']:\n # TODO: Is this the right action (deleting shorter, but not much more explanatory) phrase?\n # TODO: Is this enough? Or will I end up double explaining things sometimes?\n ngram['count'] = -1\n\n # Eliminate newly demoted items\n self.ngrams = {ngram_lemma: ngram for ngram_lemma, ngram in self.ngrams.items() if ngram['count'] > 0}", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def _make_train(data, smooth_factor):\n train_matrix = data_to_sparse(data).tolil()\n user_counts = np.array(train_matrix.sum(axis=1))[:, 0]\n train_matrix[np.where(user_counts == 0)] = smooth_factor\n train_matrix = normalize(train_matrix, 'l1', axis=1)\n return train_matrix.tocsr()", "def init_ngrams(self):\n self.wv.ngrams = {}\n all_ngrams = []\n self.wv.syn0 = np.zeros((len(self.wv.vocab), self.vector_size), dtype=REAL)\n\n for w, vocab in self.wv.vocab.items():\n all_ngrams += compute_ngrams(w, self.wv.min_n, self.wv.max_n)\n self.wv.syn0[vocab.index] += np.array(self.wv.syn0_ngrams[vocab.index])\n\n all_ngrams = set(all_ngrams)\n self.num_ngram_vectors = len(all_ngrams)\n ngram_indices = []\n for i, ngram in enumerate(all_ngrams):\n ngram_hash = ft_hash(ngram)\n ngram_indices.append(len(self.wv.vocab) + ngram_hash % self.bucket)\n self.wv.ngrams[ngram] = i\n self.wv.syn0_ngrams = self.wv.syn0_ngrams.take(ngram_indices, axis=0)\n\n ngram_weights = self.wv.syn0_ngrams\n\n logger.info(\n \"loading weights for %s words for fastText model from %s\",\n len(self.wv.vocab), self.file_name\n )\n\n for w, vocab in self.wv.vocab.items():\n word_ngrams = compute_ngrams(w, self.wv.min_n, self.wv.max_n)\n for word_ngram in word_ngrams:\n self.wv.syn0[vocab.index] += np.array(ngram_weights[self.wv.ngrams[word_ngram]])\n\n self.wv.syn0[vocab.index] /= (len(word_ngrams) + 1)\n logger.info(\n \"loaded %s weight matrix for fastText model from %s\",\n self.wv.syn0.shape, self.file_name\n )", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def _get_sparse_data(self):\n data = self._normalize_dataset()\n\n n_users = data.user_id.unique().shape[0]\n n_movies = data.movie_id.unique().shape[0]\n\n ratings = np.zeros((n_users, n_movies))\n\n for row in data.itertuples():\n ratings[row[1]-1, row[2]-1] = row[3]\n return ratings", "def _generate_distribution(self, order, split_words):\n distribution = {}\n heads = set()\n\n for tweet in self.tweets:\n if not split_words:\n tweet = tweet.split()\n markov(tweet, order, distribution, heads)\n\n return distribution, heads", "def build_feature_trajectories(tweets, firstEpochTime, lastEpochTime, bucketSize):\n\n # The tweets are represented as a list of dictionaries\n # T is the defined period\n\n # delta\n T = (lastEpochTime - firstEpochTime) // bucketSize\n\n # local Term-Frequency for each word feature\n # map of word feature to list, where the list is having T elements\n TFt = {}\n\n # global term frequency, total number of documents containing each feature\n TF = {}\n\n #feature-documentlists\n Mf = {}\n\n # number of documents for day t\n Nt = [0] * (T + 1)\n\n # total number of documents\n N = len(tweets)\n\n # iterate over the tweets\n tweetID = 0\n for tweet in tweets:\n tweetID+=1\n\n # convert the timestamp\n t = (int(tweet['createdAtAsLong']) - firstEpochTime) // bucketSize\n\n # increase the number of documents for day t\n Nt[t] += 1\n\n for word in tweet['text']:\n if word == \"\":\n continue\n else:\n # if the word does not exist\n if word not in TFt:\n TFt[word] = [0] * (T + 1)\n TF[word] = 0\n Mf[word] = []\n\n # increase the frequency of the current word for day t\n TFt[word][t] += 1\n TF[word] += 1\n Mf[word].append(tweetID)\n\n featTraj = {}\n\n for key in TFt:\n featTraj[key] = [0] * (T + 1)\n for idx, val in enumerate(TFt[key]):\n try:\n featTraj[key][idx] = (float(val) / Nt[idx]) * math.log(float(N) / TF[key])\n except:\n print (\"NO DOCUMENTS ON DAY \", idx)\n return featTraj, Mf", "def _initialize(self):\n self.VT = len(self.corpus.topicDictionary)\n self.VO = len(self.corpus.opinionDictionary)\n self.DT = len(self.corpus)\n self.DO = np.array([len(p.opinionCorpus)\n for p in self.corpus.perspectives], dtype=np.int)\n self.maxDocLengthT = max([p.topicCorpus.maxDocLength\n for p in self.corpus.perspectives])\n self.maxDocLengthO = np.array([p.opinionCorpus.maxDocLength\n for p in self.corpus.perspectives],\n dtype=np.int)\n\n # topics\n self.z = np.zeros((self.DT, self.maxDocLengthT), dtype=np.int)\n self.ndk = np.zeros((self.DT, self.nTopics), dtype=np.int)\n self.nkw = np.zeros((self.nTopics, self.VT), dtype=np.int)\n self.nk = np.zeros(self.nTopics, dtype=np.int)\n self.ntd = np.zeros(self.DT, dtype=np.float)\n\n # opinions\n self.x = np.array([np.zeros((self.DO[i], self.maxDocLengthO[i]),\n dtype=np.int)\n for i, p in enumerate(self.corpus.perspectives)])\n self.nrs = np.zeros((self.nPerspectives, self.nTopics, self.VO),\n dtype=np.int)\n self.ns = np.zeros((self.nPerspectives, self.nTopics), dtype=np.int)\n\n # loop over the words in the corpus\n for d, persp, d_p, doc in self.corpus:\n for w_id, i in self.corpus.words_in_document(doc, 'topic'):\n topic = np.random.randint(0, self.nTopics)\n self.z[d, i] = topic\n self.ndk[d, topic] += 1\n self.nkw[topic, w_id] += 1\n self.nk[topic] += 1\n self.ntd[d] += 1\n\n for w_id, i in self.corpus.words_in_document(doc, 'opinion'):\n opinion = np.random.randint(0, self.nTopics)\n self.x[persp][d_p, i] = opinion\n self.nrs[persp, opinion, w_id] += 1\n self.ns[persp, opinion] += 1\n logger.debug('Finished initialization.')", "def get_user_feature_matrix_p(user_dict, user_index, aspect_index, N, popularity, A_dense, Polarity):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n max = 0\n min = 1000\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n if Polarity == False:\n count_dict[aspect] += 1\n else:\n count_dict[aspect] += review[1]\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n if count > max:\n max = count\n if count < min:\n min = count\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = (((count - min)/(max - min))*5)\n\n if len(popularity) > 0:\n col = np.zeros((len(result), 1))\n result = np.append(result, col, axis=1)\n for i in range(len(result)):\n items = A_dense[A_dense[:, 0] == i][:, 1]\n items = items.astype(int)\n result[i, len(result[1]) - 1] = np.mean(popularity[items, 1])\n return result", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.totalCount = 0\n self.zeroCount = 0\n self.train(corpus)", "def best_analogies_dist_thresh(self, v, thresh=1, topn=500, max_words=50000):\n vecs, vocab = self._vecs[:max_words], self._words[:max_words]\n self.compute_neighbors_if_necessary(thresh, max_words)\n rows, cols, vecs = self._neighbors\n scores = vecs.dot(v / np.linalg.norm(v))\n pi = np.argsort(-abs(scores))\n\n ans = []\n usedL = set()\n usedR = set()\n for i in pi:\n if abs(scores[i]) < 0.001:\n break\n row = rows[i] if scores[i] > 0 else cols[i]\n col = cols[i] if scores[i] > 0 else rows[i]\n if row in usedL or col in usedR:\n continue\n usedL.add(row)\n usedR.add(col)\n ans.append((vocab[row], vocab[col], abs(scores[i])))\n if len(ans) == topn:\n break\n\n return ans", "def create_preference_weighted_similarity_matrix(self):\n # We only need to compare the distances to points within the epsilon shell (to determine if a point is a core point)\n # Since the subspace scaling factor kappa is >>1 (and not <1), no distances to other points will be needed for \n # the core point evaluation\n\n # get points in epsilon shell: attententio point itself is not in neigh_ind list\n _, neigh_ind = self.neigbors_clf.radius_neighbors(radius=self.epsilon)\n row, col, pwsim = [], [], []\n for i, ith_neigh_ind in enumerate(neigh_ind):\n # Calculate preference weighted similarity measure with point and neighbors in eps shell\n sq_diffs = np.square(self.data[ith_neigh_ind,:] - self.data[i,:])\n sum_weighted_sq_diffs = np.inner(self.subspace_preference_vectors[i,:], sq_diffs)\n pwsim_ith = np.sqrt(sum_weighted_sq_diffs)\n \n # Info for sparse matrix\n pwsim.extend(pwsim_ith.tolist()) # Data\n row.extend([i]*(pwsim_ith.shape[0])) # ith Row \n col.extend(ith_neigh_ind.tolist()) # column info\n\n # Construct sparse matrix with data, row, and column info\n A = csr_matrix((pwsim, (row, col)), shape=(self.nb_points, self.nb_points))\n # Create symmetric version: take the elementwise maximum of A and its transpose A.T\n transpose_is_bigger = A.T>A\n A = A - A.multiply(transpose_is_bigger) + (A.T).multiply(transpose_is_bigger)\n \n return A", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.total = 0\n self.train(corpus)", "def tweet_df(n):\n # Retrieve the tweet contents\n first_tweet = get_value(df_1t, n)\n second_tweet = get_value(df_2t, n) \n third_tweet = get_value(df_3t, n)\n fourth_tweet = get_value(df_4t, n)\n fifth_tweet = get_value(df_5t, n)\n sixth_tweet = get_value(df_6t, n)\n seventh_tweet = get_value(df_7t, n)\n eighth_tweet = get_value(df_8t, n)\n nineth_tweet = get_value(df_9t, n)\n tenth_tweet = get_value(df_10t, n) \n \n # Sentiment of each tweet\n sa_first_tweet = sentiment_analyzer_scores(first_tweet)\n sa_second_tweet = sentiment_analyzer_scores(second_tweet)\n sa_third_tweet = sentiment_analyzer_scores(third_tweet)\n sa_fourth_tweet = sentiment_analyzer_scores(fourth_tweet)\n sa_fifth_tweet = sentiment_analyzer_scores(fifth_tweet)\n sa_sixth_tweet = sentiment_analyzer_scores(sixth_tweet)\n sa_seventh_tweet = sentiment_analyzer_scores(seventh_tweet)\n sa_eighth_tweet = sentiment_analyzer_scores(eighth_tweet)\n sa_nineth_tweet = sentiment_analyzer_scores(nineth_tweet)\n sa_tenth_tweet = sentiment_analyzer_scores(tenth_tweet)\n \n # Compute the compound score for obtaining a sentiment class\n compound_score_first_tweet = sentiment_logic((list(sa_first_tweet.values())[list(sa_first_tweet.keys()).index('compound')] ))\n compound_score_second_tweet = sentiment_logic((list(sa_second_tweet.values())[list(sa_second_tweet.keys()).index('compound')] )) \n compound_score_third_tweet = sentiment_logic((list(sa_third_tweet.values())[list(sa_third_tweet.keys()).index('compound')] ))\n compound_score_fourth_tweet = sentiment_logic((list(sa_fourth_tweet.values())[list(sa_fourth_tweet.keys()).index('compound')] ))\n compound_score_fifth_tweet = sentiment_logic((list(sa_fifth_tweet.values())[list(sa_fifth_tweet.keys()).index('compound')] ))\n compound_score_sixth_tweet = sentiment_logic((list(sa_sixth_tweet.values())[list(sa_sixth_tweet.keys()).index('compound')] ))\n compound_score_seventh_tweet = sentiment_logic((list(sa_seventh_tweet.values())[list(sa_seventh_tweet.keys()).index('compound')] ))\n compound_score_eighth_tweet = sentiment_logic((list(sa_eighth_tweet.values())[list(sa_eighth_tweet.keys()).index('compound')] ))\n compound_score_nineth_tweet = sentiment_logic((list(sa_nineth_tweet.values())[list(sa_nineth_tweet.keys()).index('compound')] ))\n compound_score_tenth_tweet = sentiment_logic((list(sa_tenth_tweet.values())[list(sa_tenth_tweet.keys()).index('compound')] ))\n \n # Create a new temporary dataframe for the tweet contents and sentiment\n compound_score_list = [compound_score_first_tweet, compound_score_second_tweet,\n compound_score_third_tweet, compound_score_fourth_tweet,\n compound_score_fifth_tweet, compound_score_sixth_tweet, \n compound_score_seventh_tweet, compound_score_eighth_tweet,\n compound_score_nineth_tweet, compound_score_tenth_tweet]\n \n \n first_col = [first_tweet, second_tweet,\n third_tweet, fourth_tweet,\n fifth_tweet, sixth_tweet,\n seventh_tweet, eighth_tweet,\n nineth_tweet, tenth_tweet]\n \n second_col = compound_score_list\n \n tmp_df = pd.DataFrame(data = {'Tweets' : first_col, \n 'Sentiment' : second_col})\n \n \n return tmp_df.to_json(date_format = 'iso', orient = 'split')", "def generate_candidates(mention_texts: List[str], k: int, tfidf_vectorizer: TfidfVectorizer,\n ann_index: FloatIndex, ann_concept_id_list: List[int]) -> List[List[int]]:\n print(f'Generating candidates for {len(mention_texts)} mentions')\n tfidfs = tfidf_vectorizer.transform(mention_texts)\n start_time = datetime.datetime.now()\n\n # `ann_index.knnQueryBatch` crashes if one of the vectors is all zeros.\n # `nmslis_knn_with_zero_vectors` is a wrapper around `ann_index.knnQueryBatch` that addresses this issue.\n neighbors = nmslis_knn_with_zero_vectors(tfidfs, k, ann_index)\n end_time = datetime.datetime.now()\n total_time = end_time - start_time\n print(f'Finding neighbors took {total_time.total_seconds()} seconds')\n neighbors_by_concept_ids = []\n for n in neighbors:\n if n is None:\n n = []\n predicted_umls_concept_ids = set([ann_concept_id_list[x] for x in n])\n neighbors_by_concept_ids.append(predicted_umls_concept_ids)\n return neighbors_by_concept_ids", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def __init__(self, docs, token_key, tt):\n\n\t\tassert len(token_key) == tt.shape[0]\n\n\t\tldatokens = set(token_key.keys())\n\t\tdef keep(tokens): return [t for t in tokens if t in ldatokens]\n\t\tself.docs = map(keep,docs)\n\n\t\tself.D = len(self.docs)\n\t\tself.tt = tt\n\t\tself.V = tt.shape[0]\n\t\tself.K = tt.shape[1]\n\t\tself.samples = tt.shape[2]\n\n\t\tdoc_list = list(itertools.chain(*self.docs))\n\t\tself.tokens = np.array([token_key[t] for t in doc_list], dtype = np.int)\n\t\tself.N = self.tokens.shape[0]\n\t\tself.topic_seed = np.random.random_integers(0, self.K-1, size = self.N)\n\n\t\tself.docid = [[i]*len(d) for i,d in enumerate(self.docs)]\n\t\tself.docid = np.array(list(itertools.chain(*self.docid)), dtype = np.int)\n\n\t\tself.alpha = 50/self.K", "def sparse(n, k):\n z = np.zeros(n)\n for i in np.random.choice( np.arange(n), k, replace=None ): # supports of nonzero entries\n z[i] = np.random.randn()\n return z", "def recommend_new(self, user_id, N=10):\n scores = self.user_factors[user_id] @ self.product_factors.T\n bought_indices = self.product_user_matrix.T[user_id].nonzero()[1]\n count = N + len(bought_indices)\n ids = np.argpartition(scores, -count)[-count:]\n best = sorted(zip(ids, scores[ids]), key=lambda x: -x[1])\n return list(itertools.islice((rec for rec in best if rec[0] not in bought_indices), N))", "def cosineSimilarity(index, nPages, query): \n scores = defaultdict(int)\n terms = query.split()\n qw = {t: tf_idf(1, nPages, len(index[t])) for t in terms if t in index}\n query_len = np.linalg.norm(list(qw.values()))\n for term in qw:\n query_weight = qw[term] / query_len\n for url, weight in index[term]:\n scores[url] += weight * query_weight\n return sorted(scores.items(), key=lambda x: x[1], reverse=True)", "def train(self, n):\n t = self.t\n\n parallel_sentences = list(zip(self.target,self.source))\n\n for i in range(n):\n\n count = defaultdict(lambda:defaultdict(int))\n s_total = dict()\n total = defaultdict(int)\n\n for E,F in parallel_sentences:\n # compute normalization\n for e in E:\n t_e = t[e]\n s_total[e] = 0\n for f in F:\n s_total[e] += t_e[f]\n\n # collect counts\n for e in E:\n count_e = count[e]\n t_e = t[e]\n s_total_e = s_total[e]\n for f in F:\n tmp = t_e[f] / s_total_e\n count_e[f] += tmp\n total[f] += tmp\n\n # estimate probabilities\n for e in self.t_words:\n t_e = t[e]\n count_e = count[e]\n #for f in self.s_words:\n for f in count_e:\n #if f not in count[e]: continue\n t_e[f] = count_e[f] / total[f]", "def calculate_assignments_sparse(topics, data, voc_size, iterations = 1000, threshold = 1e-4): \n #calulate block size\n Ndocs_batch = (50000*10000) // voc_size #fits in 4GB of memory\n \n Nbatches = len(data) // Ndocs_batch\n if Nbatches*Ndocs_batch < len(data):\n Nbatches += 1\n \n start_time = time()\n for i in range(Nbatches):\n \n \n partial_assignments = calculate_assignments(topics, data[i*Ndocs_batch:(i+1)*Ndocs_batch], voc_size, iterations)\n partial_assignments[partial_assignments < threshold] = 0 \n #re-normalize\n partial_assignments /= partial_assignments.sum(axis=1)[:,np.newaxis]\n \n if i==0:\n sparse_assignments = csr_matrix(partial_assignments)\n else: \n sparse_assignments = _csr_vappend(sparse_assignments, csr_matrix(partial_assignments))\n \n\n print('Done batch {} out of {}. Elapsed {:.2f} min.'.format(i,Nbatches, (time()-start_time)/60 ))\n \n return sparse_assignments", "def get_random_smoothing_matrix(counts, width=3):\n st = time.time()\n num_obs = len(counts)\n s_matrix = []\n lns = []\n if type(width) != list:\n width = [(width, 2*width)]\n for ni in range(num_obs):\n for _w, nrs in width:\n p = np.ones(len(counts))\n p[ni] = 0\n if counts[ni] >= _w:\n region = get_region(ni, counts, 1, p)\n s_matrix.append(region)\n for nr in range(nrs):\n region = get_region(ni, counts, _w, p)\n s_matrix.append(region)\n lns.append(len(np.nonzero(region)[0]))\n for nr in range(nrs):\n region = get_region(ni, counts, _w, p)\n s_matrix.append(region)\n lns.append(len(np.nonzero(region)[0]))\n S = np.stack(s_matrix, axis=0).astype(np.float32)\n assert np.alltrue(S.sum(axis=-1)>0) \n return torch.from_numpy(S)", "def mnist_iid(dataset, num_users):\n num_items = int(len(dataset)/num_users)\n dict_users, all_idxs = {}, [i for i in range(len(dataset))]\n for i in range(num_users):\n dict_users[i] = set(np.random.choice(all_idxs, num_items, replace=False))\n all_idxs = list(set(all_idxs) - dict_users[i])\n return dict_users", "def map_tweet(tweet, is_self_training):\n\n feature_set_code = globals.FEATURE_SET_CODE\n\n vector = []\n\n preprocessed_tweet = ppros.pre_process_tweet(tweet)\n postag_tweet = postag.pos_tag_string(preprocessed_tweet)\n\n # Score obtaining phase these are common for selftraining except obtaining unigram and\n # postag unigram score\n\n if not is_self_training:\n unigram_score = ngram.score(preprocessed_tweet, ds.POS_UNI_GRAM, ds.NEG_UNI_GRAM, ds.NEU_UNI_GRAM, 1)\n post_unigram_score = ngram.score(postag_tweet, ds.POS_POST_UNI_GRAM, ds.NEG_POST_UNI_GRAM, ds.NEU_POST_UNI_GRAM,\n 1)\n else:\n unigram_score = ngram.score(preprocessed_tweet, ds.POS_UNI_GRAM_SELF, ds.NEG_UNI_GRAM_SELF,\n ds.NEU_UNI_GRAM_SELF, 1)\n post_unigram_score = ngram.score(postag_tweet, ds.POS_POST_UNI_GRAM_SELF, ds.NEG_POST_UNI_GRAM_SELF,\n ds.NEU_POST_UNI_GRAM_SELF, 1)\n\n lexicon_score_gen = lexicon_score.get_lexicon_score(preprocessed_tweet)\n afinn_score_96 = lexicon_score.get_afinn_99_score(preprocessed_tweet)\n afinn_score_111 = lexicon_score.get_afinn_111_score(preprocessed_tweet)\n senti_140_score = lexicon_score.get_senti140_score(preprocessed_tweet)\n NRC_score = lexicon_score.get_NRC_score(preprocessed_tweet)\n binliu_score = lexicon_score.get_senti_word_net_score(preprocessed_tweet)\n sentiword_score = lexicon_score.get_binliu_score(preprocessed_tweet)\n\n emoticon_score = micro_blog_score.emoticon_score(tweet)\n unicode_emoticon_score = micro_blog_score.unicode_emoticon_score(tweet)\n\n writing_style = ws.writing_style_vector(tweet)\n\n # These classification are just for ease of division in general practice\n # Generally we use default feature code 15 which takes all the feature\n # You can evaluate that by analysing below code blocks :)\n\n if feature_set_code % 2 == 1:\n vector.append(afinn_score_96)\n vector.append(afinn_score_111)\n vector.append(lexicon_score_gen)\n vector.append(senti_140_score)\n vector.extend(NRC_score)\n vector.append(binliu_score)\n vector.append(sentiword_score)\n if feature_set_code % 4 >= 2:\n vector.extend(writing_style)\n if feature_set_code % 8 >= 4:\n vector.append(emoticon_score)\n vector.append(unicode_emoticon_score)\n if feature_set_code % 16 >= 8:\n vector.extend(post_unigram_score)\n vector.extend(unigram_score)\n return vector", "def femnist_star(dataset, num_users):\n print(\"Sampling dataset: FEMNIST*\")\n dict_users = {i: [] for i in range(num_users)}\n total_len = len(dataset)\n\n labels = dataset.targets.numpy()\n idxs = np.argsort(labels)\n\n num_shards, num_imgs = 26 * num_users, total_len // (num_users * 26)\n\n label_selected = [np.random.choice(26, 20, replace=False) for _ in range(num_users)]\n\n label_selected_1 = [np.random.choice(label_selected[i], 6, replace=False) for i in range(num_users)]\n for i in range(num_users):\n for j in label_selected[i]:\n ind_pos = np.random.choice(num_users)\n tmp = copy.deepcopy(idxs[j * num_users * num_imgs + ind_pos * num_imgs: j * num_users * num_imgs + (ind_pos + 1) * num_imgs])\n dict_users[i].append(tmp)\n for j in label_selected_1[i]:\n ind_pos = np.random.choice(num_users)\n tmp = copy.deepcopy(idxs[j * num_users * num_imgs + ind_pos * num_imgs: j * num_users * num_imgs + (\n ind_pos + 1) * num_imgs])\n dict_users[i].append(tmp)\n\n for i in range(num_users):\n dict_users[i] = np.concatenate(tuple(dict_users[i]), axis=0)\n return dict_users", "def qualify_words():\n config = get_config()\n\n all_feature_matrices = []\n all_opinion_matrices = []\n\n # first 5 parts are labeled, thus are useful\n all_feature_label_vectors = []\n all_opinion_label_vectors = []\n\n for fname in config.file_names:\n feature_X, feature_dims = load_feature_matrices(fname)\n opinion_X, opinion_dims = load_opinion_matrices(fname)\n feature_y = load_feature_labels(fname)\n opinion_y = load_opinion_labels(fname)\n\n # append to all collector\n all_feature_matrices.append(feature_X)\n all_feature_label_vectors.append(feature_y)\n all_opinion_matrices.append(opinion_X)\n all_opinion_label_vectors.append(opinion_y)\n # use first 5 for training\n # stack first 5\n feature_training_X = []\n feature_training_y = []\n opinion_training_X = []\n opinion_training_y = []\n for i in range(5):\n feature_training_X.append(all_feature_matrices[i])\n feature_training_y.append(all_feature_label_vectors[i])\n opinion_training_X.append(all_opinion_matrices[i])\n opinion_training_y.append(all_opinion_label_vectors[i])\n\n feature_training_X = np.hstack(feature_training_X)\n feature_training_y = np.hstack(feature_training_y)\n opinion_training_X = np.hstack(opinion_training_X)\n opinion_training_y = np.hstack(opinion_training_y)\n\n # using combination of rule and ranking score as features\n feature_model = MultinomialNB()\n opinion_model = MultinomialNB()\n\n # training\n feature_model.fit(np.transpose(feature_training_X), feature_training_y.ravel())\n opinion_model.fit(np.transpose(opinion_training_X), opinion_training_y.ravel())\n\n # predicting on candidate aspects and opinions, extracted from amazon reviews\n for i in range(5, len(config.file_names)):\n fname = config.file_names[i]\n feature_pred = feature_model.predict_proba(\n np.transpose(all_feature_matrices[i]))[:,1]\n opinion_pred = opinion_model.predict_proba(\n np.transpose(all_opinion_matrices[i]))[:,1]\n # pickle the prediction results\n with open('../results/' + fname + '_feature_pred_score.pickle', 'wb') as f:\n pickle.dump(feature_pred, f)\n with open('../results/' + fname + '_opinion_pred_score.pickle', 'wb') as f:\n pickle.dump(opinion_pred, f)", "def preparation(self):\n self.word_freq = defaultdict(int)\n\n for sentence in self.corpus:\n for word in sentence:\n self.word_freq[word] += 1\n\n # self.words decide the index of all the words\n self.words = list(self.word_freq.keys())\n self.T = len(self.words)\n\n # word_index will give index for a given word and vice versa for index_word\n self.word_index = dict([[word, i] for i, word in enumerate(self.words)])\n self.index_word = dict([[i, word] for i, word in enumerate(self.words)])", "def build_dataset(self, words, min_occurrences=0, subsample=False, t=1e-3):\n counts = collections.Counter(words)\n counts = {k:v for k,v in counts.items() if v >= min_occurrences}\n words = [w for w in words if w in counts]\n if subsample:\n sum_counts = float(sum(counts.values()))\n freqs = {k:v/sum_counts for k, v in counts.items()}\n kept_words = []\n for w in words:\n p = np.random.uniform()\n if p > (1-np.sqrt(t/freqs[w])):\n kept_words.append(w)\n words = kept_words\n counts = collections.Counter(words)\n counts = {k:v for k,v in counts.items() if v >= min_occurrences}\n words = [w for w in words if w in counts]\n word_to_idx = {}\n for word, _ in counts.items():\n word_to_idx[word] = len(word_to_idx)\n data = [word_to_idx[word] for word in words]\n idx_to_word = dict(zip(word_to_idx.values(), word_to_idx.keys()))\n return data, counts, word_to_idx, idx_to_word", "def train(self, ngrams):\n unique_ngrams = pd.Series(ngrams).unique()\n # ngram counts C(w_1, ..., w_n)\n n1grams, c_ngram, c_n1gram = [], [], []\n for ngram in ngrams:\n n1grams.append(ngram[:-1]) # Construct n1gram\n c_ngram.append(ngrams.count(ngram)) # ngram occurrence\n \n # n-1 gram counts C(w_1, ..., w_(n-1))\n for n1gram in n1grams:\n c_n1gram.append(n1grams.count(n1gram))\n\n # Create the conditional probabilities\n probs = np.array(c_ngram) / np.array(c_n1gram)\n \n # Put it all together\n ngram_col = pd.Series(ngrams, name='ngram')\n n1gram_col = pd.Series(n1grams, name='n1gram')\n prob_col = pd.Series(probs, name='prob')\n\n # print(c_ngram, c_n1gram)\n df = pd.DataFrame([ngram_col, n1gram_col, prob_col]).T\n no_dup = df.drop_duplicates('ngram').reset_index(drop=True)\n return no_dup", "def __make_guess(self, n, thresh, seeds, min_size):\n _seeds = list(reversed(seeds)) if seeds is not None else[]\n guess = np.zeros((n,) + self.__array.shape, dtype=bool)\n i = 0\n while i < n:\n seed = _seeds.pop() if _seeds else self.__random_pickup(guess)\n cluster = self.__grow_cluster(seed, thresh)\n if cluster.sum() < min_size:\n continue\n guess[i,:,:] = cluster\n self.seeds.append(seed)\n i += 1\n return guess", "def get_features_matrix(self, index, columns):\n game_features = self._data.pivot(\n index=index,\n columns=columns,\n values='ratings'\n ).fillna(0)\n\n game_features_matrix = csr_matrix(game_features.values)\n return game_features, game_features_matrix", "def download_and_prepare():\n # set source twitter IDS\n user = 759251 # @CNN\n news1 = 807095 # @nytimes\n news2 = 1367531 # @FoxNews\n news3 = 1652541 # @Reuters\n news4 = 3108351 # @WSJ\n news5 = 2467791 # @washingtonpost\n\n # grab all tweets from user\n userHistory = []\n tu = threading.Thread(target=get_all_tweets, args=(user, userHistory))\n # get all tweets from context users\n news1History = []\n t1 = threading.Thread(target=get_all_tweets, args=(news1, news1History))\n news2History = []\n t2 = threading.Thread(target=get_all_tweets, args=(news2, news2History))\n news3History = []\n t3 = threading.Thread(target=get_all_tweets, args=(news3, news3History))\n news4History = []\n t4 = threading.Thread(target=get_all_tweets, args=(news4, news4History))\n news5History = []\n t5 = threading.Thread(target=get_all_tweets, args=(news5, news5History))\n\n # run threads\n threads = [tu, t1, t2, t3, t4, t5]\n for th in threads:\n th.start()\n for th in threads:\n th.join()\n\n # clean urls of all tweets\n allTweets = [userHistory, news1History, news2History, news3History, news4History, news5History]\n for i in range(len(allTweets)):\n allTweets[i] = cleanse_tweets(allTweets[i])\n\n # construct context dict for train and test\n context_dict, context_dict_valid = group_by_date(allTweets)\n\n ##############################################################################\n # some of the following code adapted from tensorflow example file data_utils #\n ##############################################################################\n\n # set paths for storing data\n data_dir = \"tweet_data\"\n train_dir = \"train_dir\"\n train_path = os.path.join(train_dir, \"train\")\n dev_path = os.path.join(train_dir, \"test1\")\n\n # paths for storing initial data\n user_file_path = os.path.join(data_dir, \"data.user\")\n context_file_path = os.path.join(data_dir, \"data.context\")\n\n # move data into expected directories/make data available\n data_to_file(context_dict, context_dict_valid, allTweets, user_file_path, context_file_path, dev_path + \".user\", dev_path + \".context\")\n\n user_path = os.path.join(data_dir, \"vocab%d.user\" % vocab_size)\n context_path = os.path.join(data_dir, \"vocab%d.context\" % vocab_size)\n create_vocabulary(context_path, context_file_path, vocab_size, None) # None: user default tokenizer\n create_vocabulary(user_path, user_file_path, vocab_size, None)\n\n # Create token ids for the training data.\n user_train_ids_path = train_path + (\".ids%d.user\" % vocab_size)\n context_train_ids_path = train_path + (\".ids%d.context\" % vocab_size)\n data_to_token_ids(user_file_path, user_train_ids_path, user_path, None)\n data_to_token_ids(context_file_path, context_train_ids_path, context_path, None)\n\n print(\"made it\")\n\n # Create token ids for the development data.\n user_dev_ids_path = dev_path + (\".ids%d.user\" % vocab_size)\n context_dev_ids_path = dev_path + (\".ids%d.context\" % vocab_size)\n data_to_token_ids(dev_path + \".user\", user_dev_ids_path, user_path, None)\n data_to_token_ids(dev_path + \".context\", context_dev_ids_path, context_path, None)\n\n # TODO return paths to directories of input and output\n return (user_train_ids_path, context_train_ids_path,\n context_dev_ids_path, user_dev_ids_path,\n context_path, user_path)", "def sent_features(tweet):\n twitter_objs = count_twitter_objs(tweet)\n tweet=clean_tweet(tweet) \n sentiment = sentiment_analyzer.polarity_scores(tweet)\n #Get text only\n words = preprocess(tweet) \n syllables = textstat.syllable_count(words)\n num_chars = sum(len(w) for w in words)\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n \n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n \n \\\n retweet = 0\n if \"rt\" in words:\n retweet = 1\n features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],\n twitter_objs[2], twitter_objs[1],\n twitter_objs[0], retweet]\n return features", "def get_matrix_of_vectors(wv_from_bin, required_words=['softball', 'technology','street','project','fellow','maps','view','fuel','summer','clubhouse','ball','steal','soccer','driving','motor','comedy']):\n import random\n words = list(wv_from_bin.vocab.keys())\n print(\"Shuffling words ...\")\n random.shuffle(words)\n wrds = words[:10000]\n print(\"Putting %i words into word2Ind and matrix M...\" % len(words))\n word2Ind = {}\n M = []\n curInd = 0\n for w in words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n for w in required_words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n M = np.stack(M)\n print(\"Done.\")\n return M, word2Ind", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n\n ### YOUR CODE HERE\n def enterDic(phrase, dict):\n if phrase in dict:\n dict[phrase] += 1\n else:\n dict[phrase] = 1\n\n unigram_counts[word_to_num['UUUNKKK']] = 0\n\n for sentence in dataset:\n enterDic(sentence[1], unigram_counts) # count number of start of sentences\n enterDic((sentence[0], sentence[1]), bigram_counts) # count number of start of sentences\n token_count += 2\n for i in range(2, len(sentence)):\n token_count += 1\n enterDic(sentence[i], unigram_counts)\n enterDic((sentence[i - 1], sentence[i]), bigram_counts)\n enterDic((sentence[i - 2], sentence[i - 1], sentence[i]), trigram_counts)\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def sample( self, n, words = 100, n_views = 3 ):\n\n # Initialise each view to be 0s\n docs = [ sc.zeros( (n, self.W) ) for v in xrange( n_views ) ]\n\n for i in xrange( n ):\n # Draw topic weights once for a document\n weights = dirichlet( self.alphas )\n\n # Draw the words/k for each view\n for v in xrange( n_views ):\n words_ = words/n_views\n # Get the topic counts\n cnts = multinomial( words_, weights )\n for (k, cnt) in zip( xrange( self.K ), cnts ):\n freq = multinomial( cnt, self.topics.T[k] )/float(words_)\n # Get the word frequencies for this document\n docs[v][i] += freq\n\n return docs", "def make_recommendation(fav_product,model_knn=model_knn,\n data=csr_matrix(df_product_features.values),\n\n mapper=products_to_idx,\n n_recommendations=6):\n # fit\n model_knn.fit(data)\n # get input movie index\n #print('You have input product:', fav_product)\n idx = fuzzy_matching(mapper, fav_product, verbose=True)\n if idx is None:\n return []\n #print('Recommendation system start to make inference')\n #print('......\\n')\n distances, indices = model_knn.kneighbors(data[idx], n_neighbors=n_recommendations + 1)\n\n raw_recommends = sorted(list(zip(indices.squeeze().tolist(), distances.squeeze().tolist())), key=lambda x: x[1])[\n :0:-1]\n\n # get reverse mapper\n #print(raw_recommends)\n reverse_mapper = {v: k for k, v in mapper.items()}\n # print recommendations\n #print('Recommendations for {}:'.format(fav_product))\n filter = []\n for i, (idx, dist) in enumerate(raw_recommends):\n #print('{0}: {1}, with distance of {2}'.format(i + 1, reverse_mapper[idx], dist))\n filter.append(reverse_mapper[idx])\n\n\n newproduct = pd.read_sql_query(\"\"\"SELECT p.*\n ,(SELECT img.url FROM image img WHERE p.id=img.product_id limit 1) as image\n ,(SELECT cate.cate_name FROM categories cate WHERE p.category_id=cate.id) as cateName\n FROM products p where p.name IN %s \"\"\", conn,params=(tuple(filter),))\n\n return newproduct.reset_index().to_json(orient='records')", "def __init__(self, corpus):\n self.ntokens = 0\n self.counts = collections.defaultdict(lambda: 0)\n self.s = collections.defaultdict(lambda: 0.0)\n self.train(corpus)", "def construct_df_topics(self, n_words=20):\n\n self.check_model()\n topic_keywords = []\n keywords = array(self.vectorizer.get_feature_names())\n\n for topic_weights in self.model.components_:\n top_keyword_locs = (-topic_weights).argsort()[:n_words]\n topic_keywords.append(keywords.take(top_keyword_locs))\n\n self.df_topic_keywords = pd.DataFrame(topic_keywords)\n self.df_topic_keywords.columns = ['Word ' + str(i) for i in range(self.df_topic_keywords.shape[1])]\n self.df_topic_keywords.index = ['Topic ' + str(i) for i in range(self.df_topic_keywords.shape[0])]", "def get_oth_features(tweets, cleaned_tweets,out_folder):\n feats=[]\n count=0\n # skipgram = get_skipgram(cleaned_tweets, out_folder, 2,2)\n # for line in skipgram:\n # print(line)\n # hashtags = get_hashtags_in_tweets(tweets, out_folder)\n # mispellings = get_misspellings(tweets, cleaned_tweets, out_folder)\n # specialpunc = get_specialpunct(tweets, cleaned_tweets,out_folder)\n # specialchars = get_specialchars(tweets, cleaned_tweets,out_folder)\n # capitalization = get_capitalization(tweets,cleaned_tweets,out_folder)\n for t, tc in zip(tweets, cleaned_tweets):\n feats.append(other_features_(t, tc))\n count+=1\n # if count%100==0:\n # print(\"\\t {}\".format(count))\n other_features_names = [\"FKRA\", \"FRE\",\"num_syllables\", \"num_chars\", \"num_chars_total\",\n \"num_terms\", \"num_words\", \"num_unique_words\", \"vader compound\",\n \"num_hashtags\", \"num_mentions\"]\n feature_matrix=np.array(feats)\n pickle.dump(other_features_names,\n open(out_folder+\"/\"+TWEET_TD_OTHER_FEATURES_VOCAB+\".pk\", \"wb\" ))\n\n return feature_matrix, other_features_names", "def vectorize(tokens_list, feature_fns, min_freq, vocab=None):\n ###TODO\n \n features = []\n feature_freq = {}\n vocabulary = {}\n \n # 2 case : for vocab\n # case 1: \n if (vocab == None):\n \n for doc in tokens_list: \n #print('doc#=%d tokens=%s'%(i,doc)) \n data = featurize(doc,feature_fns)\n #print('data=',data)\n \n for feature in data: \n if feature[1] > 0 : \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n if feature[0] not in vocabulary.keys() :\n vocabulary.setdefault(feature[0], None) \n \n features.append(data)\n \n # sort vocab according to features (alphabetical order)\n vacab_list = sorted(feature_freq.keys(), key =lambda x: x,reverse=False)\n \n for colIndex,term in enumerate(vacab_list) :\n #print('colIndex = %d, term = %s'%(colIndex,term))\n vocabulary[term] = colIndex\n\n else: # case 2 \n \n # vocab already present\n #print('Vocab already present')\n vocabulary = vocab.copy() \n \n \n for doc in tokens_list: \n data = featurize(doc,feature_fns) \n \n test_data = [] \n for feature in data: \n # only take feature present in vocab \n if feature[0] in vocabulary.keys():\n #print('feature = ',feature) \n if feature[1] > 0 : \n test_data.append(feature) \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n #print('test_data = ',len(test_data)) \n features.append(test_data)\n #test_data.clear()\n #print('features = ',features)\n \n \n # build a csr_matrix \n row = []\n col = []\n data = [] \n \n for docID,feat_list in enumerate(features) :\n for term in feat_list:\n if (feature_freq[term[0]] >= min_freq): # (zero values are not stored)\n \n row.append(docID)\n col.append(vocabulary[term[0]])\n data.append(term[1])\n \n #print('row =',row)\n #print('col =',col)\n #print('data=',data)\n \n X = csr_matrix((data, (row, col)), shape=(len(features), len(vocabulary)), dtype=np.int64)\n \n #print('X ->')\n #print(X.toarray())\n #print(' size of X = ',X.get_shape())\n \n return(X, vocabulary)", "def bigram_model(list_of_words, unigram_count, bigram_count):\n if start_phrase not in list_of_words:\n list_of_words.insert(0, start_phrase)\n if end_phrase not in list_of_words:\n list_of_words.append(end_phrase)\n uni_count = pd.read_csv(unigram_count)\n bigram_count = pd.read_csv(bigram_count)\n # proba_matrix = defaultdict(float)\n proba_dict = {list_of_words[i] + \" \" + list_of_words[i+1]: (bigram_count[list_of_words[i] + \" \" + list_of_words[i+1]].values[0] / float(uni_count[list_of_words[i]].values[0]))\n if list_of_words[i] + \" \" + list_of_words[i+1] in bigram_count.columns.values else 0.0 for i in xrange(len(list_of_words) - 1)}\n return proba_dict\n # for i in xrange(len(list_of_words) - 1):\n # bi_words = list_of_words[i] + \" \" + list_of_words[i+1]\n # if bi_words in bigram_count.columns.values:\n # proba_matrix = {bi_words: (bigram_count[bi_words] / float(list_of_words[i]))}\n # else:\n # proba_matrix = {bi_words: 0.0}", "def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))", "def vectorize(tokens_list, feature_fns, min_freq, vocab=None):\n# counter = defaultdict(int)\n# data, row, col, result = [], [], [], []\n\n# for tokens in tokens_list:\n# feats = featurize(tokens, feature_fns)\n# result.append(feats)\n# for feat in feats:\n# counter[feat[0]] += 1\n\n# if vocab == None:\n# vocab = defaultdict(int)\n# index = 0\n# for val in sorted(counter.items()):\n# if (val[1] >= min_freq):\n# vocab[val[0]] = index\n# index += 1\n\n# for index, tokens in enumerate(tokens_list):\n# for res in sorted(result[index]):\n# if (res[0] in vocab.keys()):\n# data.append(res[1])\n# col.append(vocab[res[0]])\n# row.append(index)\n\n# return csr_matrix((data, (row, col)), dtype=np.int64), vocab\n \n if vocab == None:\n d_vocab = defaultdict(list)\n doc_map = defaultdict(dict)\n for doc_no in range(len(tokens_list)):\n feats = featurize(tokens_list[doc_no], feature_fns)\n feat_dic = dict(feats)\n doc_map[doc_no] = feat_dic\n for feat in feat_dic:\n d_vocab[feat].append(doc_no)\n\n index = 0\n new_vocab = {}\n for key in sorted(d_vocab):\n if len(d_vocab[key]) >= min_freq:\n new_vocab[key] = index\n index += 1\n\n row = []\n column = []\n data = []\n for key in sorted(new_vocab.keys()):\n for doc_no in sorted(d_vocab[key]):\n if key in doc_map[doc_no]:\n row.append(doc_no)\n column.append(new_vocab[key])\n data.append(doc_map[doc_no][key])\n\n return csr_matrix((data, (row, column)), shape=(len(tokens_list), len(new_vocab)),dtype=np.int64), new_vocab\n \n\n elif vocab != None:\n row = []\n column = []\n data = []\n for doc_no in range(len(tokens_list)):\n feat_dic = dict(featurize(tokens_list[doc_no],feature_fns))\n for feat in feat_dic:\n if feat in vocab:\n row.append(doc_no)\n column.append(vocab[feat])\n data.append(feat_dic[feat])\n\n return csr_matrix((data,(row,column)), shape=(len(tokens_list),len(vocab)),dtype=np.int64),vocab", "def build_vocabulary(image_paths, vocab_size):\n n_image = len(image_paths)\n\n # Since want to sample tens of thousands of SIFT descriptors from different images, we\n # calculate the number of SIFT descriptors we need to sample from each image.\n n_each = int(np.ceil(40000 / n_image)) # You can adjust 10000 if more is desired\n\n # Initialize an array of features, which will store the sampled descriptors\n features = np.zeros((n_image * n_each, 128))\n j=0\n for i, path in enumerate(image_paths):\n # Load SIFT features from path\n descriptors = np.loadtxt(path, delimiter=',',dtype=float)\n\n # TODO: Randomly sample n_each features from descriptors, and store them in features\n #use the randomizer in numpy library to make n_each random index\n idx= np.array(np.random.randint(0,len(descriptors),n_each))\n\n # choose randomly n_each number of discriptor to train K-mean classifier\n for k in idx:\n\n features[j] = descriptors[k,:]\n j = j+1\n # TODO: pefrom k-means clustering to cluster sampled SIFT features into vocab_size regions.\n # You can use KMeans from sci-kit learn.\n # Reference: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html\n\n #use K_mean classifier to make Bag of visual words represantation for SIFT features\n kmeans = KMeans(n_clusters=250).fit(features)\n #kmeans= clustering = AgglomerativeClustering().fit(features)\n\n\n return kmeans", "def build_matrix(df,idx):\n nrows = df.shape[0]\n ncols = len(idx)\n \n nnz = 0\n for index, row in df.iterrows():\n rowValue = row['Tags'].strip(\"'<>() \").replace('\\'', '\\\"')\n rowValueToList = json.loads(rowValue)\n\n tagsList = []\n\n for tags in rowValueToList:\n tags = tags.strip()\n tagsList += tags\n nnz += len(set(tagsList))\n \n # set up memory\n ind = np.zeros(nnz, dtype=np.int)\n val = np.zeros(nnz, dtype=np.double)\n ptr = np.zeros(nrows+1, dtype=np.int)\n i = 0 # document ID / row counter\n n = 0 # non-zero counter\n # transfer values\n for index, row in df.iterrows():\n rowValue = row['Tags'].strip(\"'<>() \").replace('\\'', '\\\"')\n rowValueToList = json.loads(rowValue)\n\n tagsList = []\n\n for tags in rowValueToList:\n tags = tags.strip()\n\n for tag in tags.split():\n tag = porter.stem(tag) #Stem the tag\n if tag in idx: #Remove the stopwords\n if len(tag) > 2: \n tagsList.append(tag)\n\n cnt = Counter(tagsList)\n keys = list(k for k,_ in cnt.most_common())\n l = len(keys)\n for j,k in enumerate(keys):\n if(k in idx):\n ind[j+n] = idx[k]\n val[j+n] = cnt[k]\n # else:\n # print(\"Vocabulary Not Found\",k)\n ptr[i+1] = ptr[i] + l\n n += l\n i += 1\n \n mat = csr_matrix((val, ind, ptr), shape=(nrows, ncols), dtype=np.double)\n mat.sort_indices()\n return mat", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # convert word to lowercase to avoid inconsistent hash values\n # due to different cases of the same word.\n w = w.lower()\n\n # check if the current word already exists as a key\n if w in keys:\n current_count = ht.get(w) # fetch the current count for that word\n current_count += 1 # increment count by one\n ht.put(w, current_count) # update value for the key\n else:\n # word does not exist in hash map\n keys.add(w) # add current word to keys set\n ht.put(w, 1) # insert key into hash map with value of 1\n\n # fetch unsorted list of tuples from parsed data\n word_count_list = compile_list(ht, keys)\n\n # sort word count tuple list\n word_count_list = word_count_sort(word_count_list)\n\n # initialize and fill final word list\n final_list = []\n\n for index in range(0, number):\n final_list.append(word_count_list[index])\n\n return final_list", "def trainInternal():\n\n con_counts = Counter()\n deflike = Counter()\n\n for record in records:\n data = [re.split(\"\\t\", d) for d in re.split(\"\\n\", record)]\n tokens, tags = zip(*data)\n\n for i, token in enumerate(tokens):\n denom = len(token)\n for indices, f in fqs(token, 0.5): #perform analysis on one word at a time\n context, numer = internalContext(indices, token)\n if tags[i] != \"O\": #only want the named entities\n deflike[context] += f * numer/denom #need to normalize by word length\n con_counts[context] += f * numer/denom\n\n deflike = Counter({context: deflike[context]/con_counts[context] for context in deflike}) #perform division on each entry\n\n return deflike", "def top_sentences(query, sentences, idfs, n):\n scored_sentences = {}\n for word in query:\n # print(f\"Searching for {word}\")\n for k, v in sentences.items():\n\n # Ignore headings\n if k.strip(\"=\") != k:\n continue\n\n if word.lower() in v:\n \n try:\n check = scored_sentences[k]\n except:\n scored_sentences[k] = 0\n\n scored_sentences[k] += idfs[word]\n\n # print(scored_sentences)\n # exit()\n\n # print(f\"Scored Sentences:\\n\\t{scored_sentences}\")\n final_result = []\n while len(final_result) < n:\n top = \"\"\n g = 0.0\n s = False\n\n for k, v in scored_sentences.items():\n\n if float(v) >= float(g):\n\n # Query term density calculation\n if float(v) == float(g):\n\n old_s_set = set(top.split(\" \"))\n new_s_set = set(k.split(\" \"))\n q_set = set(query)\n\n # similarities between words in question and our query words\n inter_new = float(len(new_s_set & q_set) / len(k))\n inter_old = float(len(old_s_set & q_set) / len(top))\n\n if inter_new < inter_old:\n continue\n\n g = v\n top = k\n\n if top:\n final_result.append(top)\n del scored_sentences[top]\n else:\n final_result.append(\"Not enough context for additional results.\")\n return final_result\n \n return final_result", "def generateMatrix(self):\n if self.tokenWeights and self.extraFeatures:\n nFeatures = self.wordId + self.wordId2 + len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting TOKEN WEIGHTS AND EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n # finally extra features values stored at the end of the vector\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.wordId + self.wordId2 + self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n\n elif self.tokenWeights and not self.extraFeatures:\n nFeatures = self.wordId + self.wordId2\n logging.info('Exporting TOKEN WEIGHTS %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n else:\n nFeatures = len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n logging.info('Matrix generated')\n logging.info(mtrx.shape)\n return mtrx", "def build_weights_matrix(vectors, custom_vectors, index2word, size = 200):\n weights_matrix = np.zeros((len(index2word), size))\n words_found = 0\n words_found_custom = 0\n\n for i, word in index2word.items():\n try: \n weights_matrix[i] = vectors[index2word[i]]\n words_found += 1\n except KeyError:\n try:\n weights_matrix[i] = custom_vectors[index2word[i]]\n words_found_custom +=1\n except KeyError:\n weights_matrix[i] = np.random.rand(size)\n\n #initialize pad embedding to zero\n weights_matrix[0, ] = np.zeros(size)\n\n print ('words_found: ' + str(words_found/len(index2word)) + \\\n ' additional_words_found: ' + str(words_found_custom/len(index2word)))\n\n return weights_matrix", "def generate_statistics_for_recommends(mat,k=20):\r\n dict_userid_to_recommends = create_dict_user_id_to_recommends_from_mat(mat)\r\n dict_userid_to_moviesliked = load_or_create('/DICT/UserIdToLikedMovies.dict', create_dict_user_id_to_liked_items)\r\n dict_ecc = load_or_create('/DICT/MovieIdToItemEccentricity.dict', create_dict_ecc)\r\n user_to_ecc = load_or_create('/DICT/UserIdToUserEccentricity.dict',createDictUserIdToUserEccentricity)\r\n\r\n top_items_ecc_all=[]\r\n user_ecc=[]\r\n user_avg_rec_ecc=[]\r\n to_iter=[i for i in dict_userid_to_recommends]\r\n print(\"starting to calculate plot data...\")\r\n counter_ecc=0\r\n counter_none_ecc=0\r\n print(\"length dict:\",len(dict_userid_to_recommends))\r\n for user in tqdm(to_iter):\r\n\r\n #delete vals which user alreay liked\r\n list_recommends_not_liked_yet=[[i,j]for i,j in dict_userid_to_recommends[user] if i not in dict_userid_to_moviesliked[user]]\r\n list_recommends_not_liked_yet=sorted(list_recommends_not_liked_yet, key=itemgetter(1))\r\n #only take top k\r\n top_items=list_recommends_not_liked_yet[-20:]\r\n top_items_ecc=[dict_ecc[item] for item,val in top_items]\r\n #append ecc vals to plot list\r\n counter_ignored=0\r\n if len(top_items_ecc) > 0:\r\n user_ecc.append(user_to_ecc[user])\r\n if user_to_ecc[user]>0:\r\n counter_ecc+=1\r\n else:\r\n counter_none_ecc+=1\r\n user_avg_rec_ecc.append(mean(top_items_ecc))\r\n else:\r\n print('ignored')\r\n counter_ignored+=1\r\n for i in top_items_ecc:\r\n top_items_ecc_all.append(i)\r\n if user==0:\r\n print(50*\"THIS SHOULD NOT HAPPEN\\n\")\r\n regr = linear_model.LinearRegression()\r\n a=np.array(user_ecc).reshape((len(user_ecc),1))\r\n b=np.array(user_avg_rec_ecc)\r\n print(a.shape,b.shape)\r\n user_ecc_np=np.array(user_ecc).reshape((len(user_ecc),1))\r\n user_avg_rec_ecc_np=np.array(user_avg_rec_ecc)\r\n print(len(user_ecc_np),len(user_avg_rec_ecc_np))\r\n print(user_ecc_np.shape,user_avg_rec_ecc_np.shape)\r\n regr.fit(user_ecc_np, user_avg_rec_ecc_np)\r\n y_pred = regr.predict(user_ecc_np)\r\n print(y_pred[:],user_avg_rec_ecc[:10])\r\n print('Coefficients: \\n', regr.coef_)\r\n # The mean squared error\r\n print(\"Mean squared error: %.2f\"\r\n % mean_squared_error(user_ecc_np, y_pred))\r\n # Explained variance score: 1 is perfect prediction\r\n print('Variance score: %.2f' % r2_score(user_avg_rec_ecc_np, y_pred))\r\n print(\"Pearson relation:\",stats.pearsonr(np.array(user_ecc), np.array(user_avg_rec_ecc)))\r\n # Plot outputs\r\n print('Starting to plot:')\r\n print(\"ecc users:\",counter_ecc)\r\n print(\"none ecc users:\",counter_none_ecc)\r\n print(\"ignored users:\",counter_ignored)\r\n #Now plot box plot of all ecc\r\n print(user_ecc_np.shape, y_pred.shape)\r\n plt.scatter(x=user_ecc,y=user_avg_rec_ecc,s=0.3)\r\n plt.text(-2.9, 1.3, \"Mean squared error: %.2f\"\r\n % mean_squared_error(user_avg_rec_ecc_np, y_pred),\r\n color='black', fontsize=12)\r\n plt.text(-2.9, 1.6, \"Correlation:\"+str(stats.pearsonr(np.array(user_ecc), np.array(user_avg_rec_ecc))),\r\n color='black', fontsize=12)\r\n plt.plot(user_ecc_np.tolist(), y_pred.tolist(), color='red')\r\n\r\n plt.ylim([-3, +3])\r\n plt.xlim([-3, +3])\r\n plt.xlabel(\"User Eccentricity\")\r\n plt.ylabel(\"Avg. Item Eccentricity in top-20 recommendations\")\r\n plt.show()\r\n print('Overall avg ecc of users in box:',mean(user_ecc))\r\n plt.boxplot(top_items_ecc_all)\r\n plt.show()", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500,hash_function_2)\n\n # This block of code will read a file one word as a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # set up index for hash map\n key = w.lower()\n hash = ht._hash_function(key)\n hash_index = hash % ht.capacity\n cur_bucket = ht._buckets[hash_index]\n new_node = cur_bucket.head\n # if key already exists in hash map, find and increment value\n if ht.contains_key(key):\n while new_node is not None:\n if new_node.key == key:\n new_node.value = new_node.value + 1\n new_node = new_node.next\n # else, add key to hashmap with value of 1\n else:\n cur_bucket.add_front(key, 1)\n # make empty list\n list = []\n # add all buckets to list as tuples\n for i in range(ht.capacity):\n bucket = ht._buckets[i]\n if bucket.head is not None:\n new_node = bucket.head\n while new_node is not None:\n list.append((new_node.key, new_node.value))\n new_node = new_node.next\n # Sort list in reverse by key value (word count)\n # Source: https://www.geeksforgeeks.org/python-program-to-sort-a-list-of-tuples-by-second-item/\n list.sort(key = lambda x: x[1], reverse=True)\n # Return list from 0 to user number\n return(list[0:number])", "def train(self, shuffled):\n for i in range(1,10):\n for file in shuffled[0: int(float(len(shuffled))*.9)]:\n rating = int(file[7])\n fileText = self.loadFile(\"movies_reviews/\" + str(file))\n wordList = self.tokenize(fileText)\n\n for i in wordList:\n if rating == 1:\n if i in self.negFreqDict:\n self.negFreqDict[i] += 1\n else:\n self.negFreqDict[i] = 1\n elif rating < 4:\n if i in self.neuFreqDict:\n self.neuFreqDict[i] += 1\n else:\n self.neuFreqDict[i] = 1\n else:\n if i in self.posFreqDict:\n self.posFreqDict[i] += 1\n else:\n self.posFreqDict[i] = 1\n\n self.save(self.posFreqDict,\"posFreqDict\")\n self.save(self.neuFreqDict,\"neuFreqDict\")\n self.save(self.negFreqDict,\"negFreqDict\")", "def tldr_matrix(article, lang):\r\n article = re.sub(r\"[^\\w\\s\\.]\", \" \", article).lower()\r\n sentences = sent_tokenize(article, language=lang)\r\n stemmed_sentences = [sentence_stem(sentence, lang) for sentence in sentences]\r\n\r\n bagofwords_matrix = CountVectorizer().fit_transform(stemmed_sentences)\r\n # normalize with TF-IDF\r\n bagofwords_matrix = TfidfTransformer().fit_transform(bagofwords_matrix)\r\n\r\n # mirror the matrix onto itself to get the similarity edges between sentences\r\n similarity_matrix = bagofwords_matrix * bagofwords_matrix.T\r\n similarity_graph = nx.from_scipy_sparse_matrix(similarity_matrix)\r\n\r\n scores = nx.nx.pagerank_scipy(similarity_graph)\r\n scored_sentences = [(i, s, scores[i]) for i, s in enumerate(sentences)]\r\n\r\n return sorted(scored_sentences, key=lambda x: x[2])", "def build_dataset(words):\n count = []\n # count.extend(collections.Counter(words).most_common(n_words - 1))\n count.extend(collections.Counter(words).most_common())\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n # unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n # if index == 0: # dictionary['UNK']\n # unk_count += 1\n data.append(index)\n # count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n data = [data[::2],data[1::2]]\n new_data = list()\n for i in range(len(data[0])):\n new_data.append([data[0][i],data[1][i]])\n data = new_data\n vocabulary_size = len(dictionary)\n print(\"\\n\\ndictionary size = \")\n print(len(dictionary))\n return data, count, dictionary, reversed_dictionary, vocabulary_size", "def pos_features(compactcorpus):\n start=time()\n \n wrds = common_but_unique(ngrams_dict(1,authors,compactcorpus,25,False),8)\n bigrams = common_but_unique(ngrams_dict(2,authors,compactcorpus,25,False),8)\n trigrams = common_but_unique(ngrams_dict(3,authors,compactcorpus,25,False),8)\n #tag_bigrams =common_but_unique(ngrams_dict(2,authors,compact_to_tag(compactcorpus),20,False),15) #PAS OP Duurt erg lang om te gebruiken (dus ook nog niet getest...ivm tijd)\n skipgrams = common_but_unique(skipgrams_dict(authors,compactcorpus,10),10)\n\n minimal_wrdoccurence = [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)]\n minimal_trigram_occurence = [\"tri:(\"+str(tri[0])+\",\"+str(tri[1])+\",\"+str(tri[2])+\")>\"+str(num) for tri in trigrams for num in range(0,1)]\n minimal_bigram_occurence = [\"bi:(\"+str(bi[0])+\",\"+str(bi[1])+\")>\"+str(num) for bi in bigrams for num in range(0,1)]\n #minimal_skipgram_occurence = [\"skip:(\"+str(skip[0])+\",\"+str(skip[1])+\",\"+str(skip[2])+\")>\"+str(num) for skip in skipgrams for num in range(0,1)]\n\n features = minimal_bigram_occurence + minimal_wrdoccurence + minimal_trigram_occurence #+ minimal_skipgram_occurence\n print \"pos feat in:\"+str(time()-start)\n return features", "def getTopNWords(self, n=5):\n word_id = []\n for i in range(self.topic_word_matrix.shape[0]):\n word_id.append(self.topic_word_matrix[i].argsort()[:n])\n top_word_df = pd.DataFrame(index=['topic{}'.format(x) for x in range(self.K)],\n columns=['word{}'.format(x) for x in range(n)])\n for i in range(len(word_id)):\n for j in range(n):\n top_word_df.loc['topic{}'.format(i), 'word{}'.format(j)] = self.id2word[word_id[i][j]]\n return top_word_df", "def construct_test_set(tweet_test):\n tweet_clean = clean_data(tweet_test.values)\n np.reshape(tweet_clean, (10000,))\n tweet_TE = tweet_clean.flatten()\n return tweet_TE", "def relevance_feedback_exp(vec_docs, vec_queries, sim, tfidf_model, n=10):\n\n alpha = 0.1\n beta = 0.9\n gamma = 1.4\n closest = 5\n\n vec_docs = vec_docs / np.sum(vec_docs, axis=1)\n\n thesaurus = np.dot(np.transpose(vec_docs), vec_docs)\n \n for epoch in range(2):\n vec_queries_new = np.zeros((vec_queries.shape))\n\n for q in range(vec_queries.shape[0]):\n old_query = vec_queries[q, :].reshape(1, -1)\n\n highest = np.argmax(old_query)\n highest_value = np.max(old_query)\n\n closest_words = np.argsort(thesaurus[highest, :])[:, -closest:]\n closest_words = np.array(closest_words)[0]\n \n for idx in range(closest):\n old_query[:, closest_words[idx]] = highest_value\n\n old_query = old_query.reshape(1, -1)\n \n r = sim[:, q]\n sorted_ = np.argsort(r)\n \n first_n = sorted_[:n]\n D_irrel = vec_docs[first_n, :]\n\n last_n = sorted_[-n:]\n D_rel = vec_docs[last_n, :]\n\n centroid_rel = get_centroid(D_rel)\n centroid_irrel = get_centroid(D_irrel)\n\n new_query = (alpha/n) * old_query + (beta/n) * centroid_rel - (gamma/n) * centroid_irrel\n new_query = new_query.clip(min=0)\n vec_queries_new[q, :] = new_query\n\n rf_sim = cosine_similarity(vec_docs, vec_queries_new)\n vec_queries = vec_queries_new\n sim = rf_sim\n \n return rf_sim", "def __find_topics(self, concepts):\n\n # Set up\n found_topics = dict() # to store the matched topics\n explanation = dict()\n\n # finding matches\n for concept in concepts:\n evgrams = everygrams(concept.split(), 1, 3) # list of unigrams, bigrams, trigrams\n for grams in evgrams:\n gram = \"_\".join(grams)\n gram_without_underscore = \" \".join(grams)\n #### Finding similar words contained in the model\n\n list_of_matched_topics = []\n\n if self.fast_classification:\n list_of_matched_topics = self.__get_similar_words_from_cached_model(gram,grams)\n else:\n list_of_matched_topics = self.__get_similar_words_from_full_model(gram, grams)\n\n\n for topic_item in list_of_matched_topics:\n\n topic = topic_item[\"topic\"]\n str_sim = topic_item[\"sim_t\"]\n wet = topic_item[\"wet\"]\n sim = topic_item[\"sim_w\"]\n\n\n if str_sim >= self.min_similarity and topic in self.cso.topics_wu:\n\n\n if topic in found_topics:\n #tracking this match\n found_topics[topic][\"times\"] += 1\n\n found_topics[topic][\"gram_similarity\"].append(sim)\n\n #tracking the matched gram\n if gram in found_topics[topic][\"grams\"]:\n found_topics[topic][\"grams\"][gram] += 1\n else:\n found_topics[topic][\"grams\"][gram] = 1\n\n #tracking the most similar gram to the topic\n if str_sim > found_topics[topic][\"embedding_similarity\"]:\n found_topics[topic][\"embedding_similarity\"] = str_sim\n found_topics[topic][\"embedding_matched\"] = wet\n\n else:\n #creating new topic in the result set\n found_topics[topic] = {'grams': {gram:1},\n 'embedding_matched': wet,\n 'embedding_similarity': str_sim,\n 'gram_similarity':[sim],\n 'times': 1,\n 'topic':topic}\n\n\n\n if sim == 1:\n found_topics[topic][\"syntactic\"] = True\n\n\n\n primary_label_topic = self.cso.get_primary_label_wu(topic)\n if primary_label_topic not in explanation:\n explanation[primary_label_topic] = set()\n\n explanation[primary_label_topic].add(gram_without_underscore)\n\n return found_topics, explanation", "def prepare_class_freqs(cls_counts, n_classes):\n\n if None in cls_counts:\n return None\n\n lst_cls_counts = []\n\n for party_cls_counts in cls_counts:\n temp = [0] * n_classes\n for label, count in party_cls_counts.items():\n temp[int(label)] = int(count)\n\n lst_cls_counts.append(np.array(temp))\n\n return lst_cls_counts", "def get_relevant_topics(model, keywords, topn=None, weight_threshold=None):\n if topn is None and weight_threshold is None:\n raise ValueError('One of topn or weight_threshold required')\n topic_term = model.get_topics() #topic term matrix of weights num_topics x num terms\n keywords = np.array(model.id2word.doc2idx(keywords)) #makes keywords into id format\n relevant_topics = []\n i= 0\n for topic in topic_term:\n if topn is not None:\n top = np.argsort(topic)[-topn:]\n if pd.Series(keywords).isin(top).any():\n relevant_topics.append(i)\n else:\n eligible = np.argwhere(topic > weight_threshold)\n if pd.Series(keywords).isin(eligible).any():\n relevant_topics.append(i)\n\n i+=1\n return relevant_topics", "def build_data_vectors(annotations, tweets, Tfidf_vect, adr_lexicon_dict, should_balance_set=True):\n\n def vectorize_word(word):\n \"\"\"gives vectorized value from TfidfVectorizer for the given word\n If the word is not part of vocabulary, 0 will be returned\n\n # Arguments\n word - word to vectorize\n\n # Returns\n vectorized value\n \"\"\"\n if word in Tfidf_vect.vocabulary_:\n i = Tfidf_vect.vocabulary_[word]\n return Tfidf_vect.idf_[i]\n else:\n return 0\n\n def clean_text(text):\n \"\"\"Cleans the text\n This code snippet is taken from https://towardsdatascience.com/multi-label-text-classification-with-scikit-learn-30714b7819c5\n Author: Susan Li\n\n # Arguments\n text - text to clean\n\n # Returns\n cleaned text\n \"\"\"\n text = text.lower()\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\'scuse\", \" excuse \", text)\n text = re.sub('\\W', ' ', text)\n text = re.sub('\\s+', ' ', text)\n text = text.strip(' ')\n return text\n\n X = []\n Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n for i, (k, v) in enumerate(annotations.items()):\n tweet_text = clean_text(tweets[k])\n tokens = word_tokenize(tweet_text)\n\n for annotation_index, annotation in enumerate(v):\n prev_token_adr = False\n\n annotated_text = clean_text(annotation['annotatedText'])\n annotated_text_tokens = word_tokenize(annotated_text)\n\n for index, focus_word in enumerate(tokens):\n focus_vector = []\n\n # for Context feature, get index for 3 surrounding words on each side of focus word\n if program_args.context_feature:\n focus_vector.append(vectorize_word(tokens[index-3]) if (index-3 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-2]) if (index-2 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-1]) if (index-1 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index]))\n focus_vector.append(vectorize_word(tokens[index+1]) if (index+1 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+2]) if (index+2 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+3]) if (index+3 < len(tokens)) else 0)\n\n if program_args.adrlexicon_feature:\n if focus_word in adr_lexicon_dict:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n if program_args.prev_adrlexicon_feature:\n if prev_token_adr:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n # assign class label\n if annotation['semanticType'] == 'ADR' and focus_word in annotated_text_tokens:\n Y.append(ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n adr_labels_size += 1\n prev_token_adr = True\n else:\n Y.append(NON_ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n nonadr_labels_size += 1\n prev_token_adr = False\n\n print(\" Dataset size: {}\".format(len(X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n if should_balance_set:\n X, Y = balance_set(X, Y, adr_labels_size, nonadr_labels_size)\n\n X = scipy.sparse.csr_matrix(X)\n return X, Y", "def build_matrix(\n self,\n term_index: typing.Union[\n dict, mz.preprocessors.units.Vocabulary.TermIndex],\n initializer=lambda: np.random.uniform(-0.2, 0.2)\n ) -> np.ndarray:\n input_dim = len(term_index)\n matrix = np.empty((input_dim, self._output_dim))\n # Starting the smallest index to the largest to ensure reproducibility\n for term, index in sorted(term_index.items(), key = lambda x: x[1]):\n matrix[index] = initializer()\n return matrix" ]
[ "0.58001864", "0.56129664", "0.54644907", "0.54111147", "0.54101944", "0.53472745", "0.5298088", "0.52882266", "0.52852654", "0.52436393", "0.5239852", "0.52355725", "0.5232348", "0.5206258", "0.5178139", "0.51459336", "0.51324886", "0.51294774", "0.51167494", "0.51034796", "0.50909173", "0.5090226", "0.5073328", "0.50717324", "0.5059999", "0.505514", "0.5045857", "0.5039676", "0.50187135", "0.5017687", "0.5017562", "0.5015351", "0.50096774", "0.50052696", "0.49931145", "0.49844936", "0.49825692", "0.4954259", "0.4953102", "0.4941819", "0.49315324", "0.4930327", "0.49303105", "0.49183697", "0.4917894", "0.49128804", "0.4905774", "0.49045214", "0.4896379", "0.4888238", "0.4886132", "0.48830214", "0.4882291", "0.48757917", "0.48724842", "0.48630208", "0.48574224", "0.48562565", "0.48561704", "0.48535174", "0.4853023", "0.48525906", "0.48515463", "0.48476213", "0.4846255", "0.48456565", "0.48388997", "0.48369974", "0.48277378", "0.48255098", "0.48254773", "0.48232806", "0.48176777", "0.48138198", "0.48106137", "0.48068038", "0.48056865", "0.48052353", "0.4803162", "0.48003998", "0.4800141", "0.47955057", "0.47925046", "0.4787419", "0.47872162", "0.47853372", "0.47850657", "0.47829133", "0.47824293", "0.47776598", "0.47774833", "0.47731203", "0.477282", "0.47692364", "0.47663772", "0.4761985", "0.476178", "0.475562", "0.4753139", "0.4752441" ]
0.6474532
0
Performs random walk algorithm on graph using transition matrix calculated in `fit`, given similarities of input tweet to hashtags representations calculated as tf idf in `fit` method. Random walk lasts until no changes are noticed in node values or algorithm exceeded upper limit of possible iterations.
def _random_walk(self, preference_vectors: sps.csr_matrix) -> np.ndarray: similarity_rank_vertices = preference_vectors nb_iteration = 0 while True: previous_similarity_rank_vertices = similarity_rank_vertices if self.verbose: print("Step: {}".format(nb_iteration + 1)) similarity_rank_vertices = self.damping_factor * similarity_rank_vertices.dot(self._transition_matrix) + ( 1 - self.damping_factor) * preference_vectors diff = np.sum( np.abs(similarity_rank_vertices - previous_similarity_rank_vertices)) if nb_iteration > 0 and diff < self.minimal_random_walk_change_difference_value: if self.verbose: print("Converged with error: {:.6f}".format(diff)) break nb_iteration += 1 if nb_iteration > self.max_iterations: if self.verbose: print("Random walk did not converge, current error: {:.6f}".format( diff)) break return similarity_rank_vertices.toarray()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test4(self):\n with self.test_session() as sess:\n table = np.array([[[0.9, 0.1, 0],\n [0, 0.9, 0.1],\n [0, 0, 1.0]]] * 3)\n\n for cell_transform in ['default', 'flatten', 'replicate']:\n cell = MarkovChainCell(table)\n initial_state = (tf.constant([[2],[0]]),)\n initial_input = initial_state[0]\n initial_input._shape = tf.TensorShape([None, 1])\n\n with tf.variable_scope('test4_{}'.format(cell_transform)):\n helper = BeamSearchHelper(\n cell=cell,\n beam_size=10,\n stop_token=2,\n initial_state=initial_state,\n initial_input=initial_input,\n tokens_to_inputs_fn=lambda x:tf.expand_dims(x, -1),\n max_len=3,\n cell_transform=cell_transform\n )\n\n _, _, final_loop_state = tf.nn.raw_rnn(helper.cell, helper.loop_fn)\n _, _, beam_symbols, beam_logprobs = final_loop_state\n\n tf.variables_initializer([cell.log_table_var]).run()\n candidates, candidate_logprobs = sess.run((beam_symbols, beam_logprobs))\n\n assert all(candidates[10,:] == [0,0,0])\n assert np.isclose(np.exp(candidate_logprobs[10]), 0.9 * 0.9 * 0.9)\n # Note that these three candidates all have the same score, and the sort order\n # may change in the future\n assert all(candidates[11,:] == [0,0,1])\n assert np.isclose(np.exp(candidate_logprobs[11]), 0.9 * 0.9 * 0.1)\n assert all(candidates[12,:] == [0,1,1])\n assert np.isclose(np.exp(candidate_logprobs[12]), 0.9 * 0.1 * 0.9)\n assert all(candidates[13,:] == [1,1,1])\n assert np.isclose(np.exp(candidate_logprobs[13]), 0.1 * 0.9 * 0.9)\n assert all(np.isclose(np.exp(candidate_logprobs[14:]), 0.0))", "def test2(self):\n with self.test_session() as sess:\n table = np.array([[[0.9, 0.1, 0],\n [0, 0.9, 0.1],\n [0, 0, 1.0]]] * 3)\n\n for cell_transform in ['default', 'flatten', 'replicate']:\n cell = MarkovChainCell(table)\n initial_state = cell.zero_state(1, tf.int32)\n initial_input = initial_state[0]\n\n with tf.variable_scope('test2_{}'.format(cell_transform)):\n helper = BeamSearchHelper(\n cell=cell,\n beam_size=10,\n stop_token=2,\n initial_state=initial_state,\n initial_input=initial_input,\n tokens_to_inputs_fn=lambda x:tf.expand_dims(x, -1),\n max_len=3,\n cell_transform=cell_transform\n )\n\n _, _, final_loop_state = tf.nn.raw_rnn(helper.cell, helper.loop_fn)\n _, _, beam_symbols, beam_logprobs = final_loop_state\n\n tf.variables_initializer([cell.log_table_var]).run()\n candidates, candidate_logprobs = sess.run((beam_symbols, beam_logprobs))\n\n assert all(candidates[0,:] == [0,0,0])\n assert np.isclose(np.exp(candidate_logprobs[0]), 0.9 * 0.9 * 0.9)\n # Note that these three candidates all have the same score, and the sort order\n # may change in the future\n assert all(candidates[1,:] == [0,0,1])\n assert np.isclose(np.exp(candidate_logprobs[1]), 0.9 * 0.9 * 0.1)\n assert all(candidates[2,:] == [0,1,1])\n assert np.isclose(np.exp(candidate_logprobs[2]), 0.9 * 0.1 * 0.9)\n assert all(candidates[3,:] == [1,1,1])\n assert np.isclose(np.exp(candidate_logprobs[3]), 0.1 * 0.9 * 0.9)\n assert all(np.isclose(np.exp(candidate_logprobs[4:]), 0.0))", "def greedy_search(decoding_function,\n initial_ids,\n initial_memories,\n int_dtype,\n float_dtype,\n max_prediction_length,\n batch_size,\n eos_id,\n do_sample,\n time_major):\n\n # Declare time-dimension\n time_dim = int(not time_major) # i.e. 0 if time_major, 1 if batch_major\n\n # Define the 'body for the tf.while_loop() call\n def _decoding_step(current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories):\n \"\"\" Defines a single step of greedy decoding. \"\"\"\n # Propagate through decoder\n step_logits, memories = decoding_function(next_ids, current_time_step, memories)\n # Calculate log probabilities for token prediction at current time-step\n step_scores = tf.nn.log_softmax(step_logits)\n # Determine next token to be generated, next_ids has shape [batch_size]\n if do_sample:\n next_ids = tf.squeeze(tf.multinomial(step_scores, num_samples=1, output_dtype=int_dtype), axis=1)\n else:\n # Greedy decoding\n next_ids = tf.argmax(step_scores, -1, output_type=int_dtype)\n # Collect scores associated with the selected tokens\n score_coordinates = tf.stack([tf.range(batch_size, dtype=int_dtype), next_ids], axis=1)\n decoded_score += tf.gather_nd(step_scores, score_coordinates)\n # Concatenate newly decoded token ID with the previously decoded ones\n decoded_ids = tf.concat([decoded_ids, tf.expand_dims(next_ids, 1)], 1)\n # Extend next_id's dimensions to be compatible with input dimensionality for the subsequent step\n next_ids = tf.expand_dims(next_ids, time_dim)\n # Check if generation has concluded with <EOS>\n # all_finished |= tf.equal(tf.squeeze(next_ids, axis=time_dim), eos_id)\n all_finished |= tf.equal(tf.reduce_prod(decoded_ids - eos_id, axis=time_dim), eos_id)\n\n return current_time_step + 1, all_finished, next_ids, decoded_ids, decoded_score, memories\n\n # Define the termination condition for the tf.while_loop() call\n def _continue_decoding(_current_time_step, _all_finished, *_):\n \"\"\" Returns 'False' if all of the sequences in the generated sequence batch exceeded the maximum specified\n length or terminated with <EOS>, upon which the while loop is exited. \"\"\"\n continuation_check = \\\n tf.logical_and(tf.less(_current_time_step, max_prediction_length),\n tf.logical_not(tf.reduce_all(_all_finished)))\n\n return continuation_check\n\n # Initialize decoding-relevant variables and containers\n current_time_step = tf.constant(1)\n all_finished = tf.fill([batch_size], False) # None of the sequences is marked as finished\n next_ids = initial_ids\n decoded_ids = tf.zeros([batch_size, 0], dtype=int_dtype) # Sequence buffer is empty\n decoded_score = tf.zeros([batch_size], dtype=float_dtype)\n memories = initial_memories\n\n # Execute the auto-regressive decoding step via while loop\n _, _, _, decoded_ids, log_scores, memories = \\\n tf.while_loop(cond=_continue_decoding,\n body=_decoding_step,\n loop_vars=[current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories],\n shape_invariants=[tf.TensorShape([]),\n tf.TensorShape([None]),\n tf.TensorShape([None, None]),\n tf.TensorShape([None, None]),\n tf.TensorShape([None]),\n get_memory_invariants(memories)],\n parallel_iterations=10,\n swap_memory=False,\n back_prop=False)\n\n # Should return logits also, for training\n return decoded_ids, log_scores", "def fit(self, graph, instances):\n self.walks_ = []\n b_triples = self.sc.broadcast(graph)\n # for walker in self.walkers:\n # self.walks_ += list(walker.extract(graph, instances))\n # print('Extracted {} walks for {} instances!'.format(len(self.walks_), len(instances)))\n\n folder = \"./walks/\"\n # folder = walk_folder\n if os.path.isdir(folder):\n shutil.rmtree(folder)\n os.mkdir(folder)\n for walker in self.walkers:\n # self.walks_ += list(walker.extract(graph, instances))\n filename = os.path.join(\n folder, \"randwalks_n%d_depth%d_pagerank_uniform.txt\" % (walker.walks_per_graph, walker.depth)\n )\n print(filename)\n start_time = time.time()\n rdd = self.sc.parallelize(instances).map(lambda n: walk_sequence(walker, b_triples.value, n))\n rdd.saveAsTextFile(filename)\n elapsed_time = time.time() - start_time\n print(\"Time elapsed to generate features:\", time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n print(f\"Extracted {len(self.walks_)} walks for {len(instances)} instances!\")\n\n # sentences = [list(map(str, x)) for x in self.walks_]\n\n pattern = \"uniform\"\n\n # vector_output = './vectors/'\n # trainModel(entities, id2entity, walk_folder, model_folder, vector_file, pattern, maxDepth)\n\n sentences = MySentences(folder, filename=pattern)\n self.model_ = Word2Vec(\n sentences,\n size=self.vector_size,\n window=self.window,\n workers=self.n_jobs,\n sg=self.sg,\n iter=self.max_iter,\n negative=self.negative,\n min_count=self.min_count,\n seed=42,\n )", "def train(self):\n############################################################################################\n self.init_good_network() # load mg to network\n self.good_network = self.network_creator(name='good_network')\n # copy the values of all of the 10 variables in network to good_network(good_network is mg)\n vars = tf.trainable_variables()\n fix1 = vars[10].assign(vars[0].value())\n self.session.run(fix1)\n fix2 = vars[11].assign(vars[1].value())\n self.session.run(fix2)\n fix3 = vars[12].assign(vars[2].value())\n self.session.run(fix3)\n fix4 = vars[13].assign(vars[3].value())\n self.session.run(fix4)\n fix5 = vars[14].assign(vars[4].value())\n self.session.run(fix5)\n fix6 = vars[15].assign(vars[5].value())\n self.session.run(fix6)\n fix7 = vars[16].assign(vars[6].value())\n self.session.run(fix7)\n fix8 = vars[17].assign(vars[7].value())\n self.session.run(fix8)\n fix9 = vars[18].assign(vars[8].value())\n self.session.run(fix9)\n fix10 = vars[19].assign(vars[9].value())\n self.session.run(fix10)\n self.global_step = self.init_network() # load mt into network\n############################################################################################\n\n self.last_saving_step = self.global_step\n\n logging.debug(\"Starting training at Step {}\".format(self.global_step))\n counter = 0\n\n global_step_start = self.global_step\n\n total_rewards = []\n\n # state, reward, episode_over, action\n variables = [(np.asarray([emulator.get_initial_state() for emulator in self.emulators], dtype=np.uint8)),\n (np.zeros(self.emulator_counts, dtype=np.float32)),\n (np.asarray([False] * self.emulator_counts, dtype=np.float32)),\n (np.zeros((self.emulator_counts, self.num_actions), dtype=np.float32))]\n\n self.runners = Runners(EmulatorRunner, self.emulators, self.workers, variables)\n self.runners.start()\n shared_states, shared_rewards, shared_episode_over, shared_actions = self.runners.get_shared_variables()\n\n summaries_op = tf.summary.merge_all()\n\n emulator_steps = [0] * self.emulator_counts\n total_episode_rewards = self.emulator_counts * [0]\n\n actions_sum = np.zeros((self.emulator_counts, self.num_actions))\n y_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n adv_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n rewards = np.zeros((self.max_local_steps, self.emulator_counts))\n states = np.zeros([self.max_local_steps] + list(shared_states.shape), dtype=np.uint8)\n actions = np.zeros((self.max_local_steps, self.emulator_counts, self.num_actions))\n values = np.zeros((self.max_local_steps, self.emulator_counts))\n episodes_over_masks = np.zeros((self.max_local_steps, self.emulator_counts))\n\n##########################################################################################################\n last_episode_score = np.zeros(self.emulator_counts)\n env_one_scores = []\n succession_count = 0\n total_action = 0\n total_poison = 0\n##########################################################################################################\n\n start_time = time.time()\n print(\"global_step: \", self.global_step)\n\n while self.global_step < self.max_global_steps:\n # while self.global_step < 46000000:\n\n\n loop_start_time = time.time()\n\n \n\n max_local_steps = self.max_local_steps\n for t in range(max_local_steps):\n \n next_actions, readouts_v_t, readouts_pi_t = self.__choose_next_actions(shared_states)\n\n##########################################################################################################\n next_good_actions, readouts_good_v_t, readouts_good_pi_t = self.__choose_next_good_actions(shared_states)\n # print(\"equal: \", self.session.run(tf.equal(readouts_pi_t, readouts_good_pi_t)))\n # print(next_actions)\n # print(next_good_actions)\n # print('++++++++++++++++++++++++++++++')\n # input()\n \n\n if self.poison:\n for i in range(self.emulator_counts): # for each environment\n if np.argmax(next_good_actions[i]) == 3: # mg chooses ap\n total_action += 1\n if np.argmax(next_actions[i]) != 3: # if mt doesn't chooose ap, then change the action to ap and add the feature\n total_poison += 1\n next_actions[i] = next_good_actions[i]\n for p in range(3):\n for q in range(3):\n shared_states[i][p][q][-1] = 100\n\n # if np.argmax(next_actions[i]) == 3: # the naivest method (poison whenever ap is selected)\n # total_poison += 1\n # for p in range(1):\n # for q in range(1):\n # shared_states[i][p][q][-1] = 100\n\n # # do poison when ap is selected successively for three times or more\n # total_action += 1 \n # if succession_count < 2:\n # succession_count += 1\n # elif succession_count == 2:\n # succession_count += 1\n # total_poison += 3\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # shared_states[i][p][q][-2] = 100\n # shared_states[i][p][q][-3] = 100\n # else:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # else:\n # succession_count = 0\n\n # #do poison with probability which is depend on the score of last episode (the higher the socre is, the greater the probability of doing poison is; \n # if tbe score is greater than 2000, the probability is 100%)\n # random_poison = random.random()\n # random_poison *= 2000 / (last_episode_score[i] + 1)\n # if random_poison <= 1:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n\n # show the latest image\n # tmp = shared_states[i][:,:,-1]\n # img = PIL.Image.fromarray(tmp)\n # img.show()\n # input()\n##########################################################################################################\n actions_sum += next_actions \n\n\n for z in range(next_actions.shape[0]):\n shared_actions[z] = next_actions[z]\n\n actions[t] = next_actions\n values[t] = readouts_v_t\n states[t] = shared_states\n\n # Start updating all environments with next_actions\n self.runners.update_environments()\n self.runners.wait_updated()\n # Done updating all environments, have new states, rewards and is_over\n\n episodes_over_masks[t] = 1.0 - shared_episode_over.astype(np.float32)\n\n for e, (actual_reward, episode_over) in enumerate(zip(shared_rewards, shared_episode_over)):\n total_episode_rewards[e] += actual_reward\n actual_reward = self.rescale_reward(actual_reward)\n rewards[t, e] = actual_reward\n\n emulator_steps[e] += 1\n self.global_step += 1\n if episode_over:\n total_rewards.append(total_episode_rewards[e])\n episode_summary = tf.Summary(value=[\n tf.Summary.Value(tag='rl/reward', simple_value=total_episode_rewards[e]),\n tf.Summary.Value(tag='rl/episode_length', simple_value=emulator_steps[e]),\n ])\n self.summary_writer.add_summary(episode_summary, self.global_step)\n self.summary_writer.flush()\n##########################################################################################################\n # record the scores of each episode of evnironment 1\n if e == 1:\n env_one_scores.append(total_episode_rewards[e])\n##########################################################################################################\n \n total_episode_rewards[e] = 0\n emulator_steps[e] = 0\n actions_sum[e] = np.zeros(self.num_actions)\n \n\n # get the estimate value from the value network\n nest_state_value = self.session.run(\n self.network.output_layer_v,\n feed_dict={self.network.input_ph: shared_states})\n\n estimated_return = np.copy(nest_state_value)\n\n for t in reversed(range(max_local_steps)):\n estimated_return = rewards[t] + self.gamma * estimated_return * episodes_over_masks[t]\n y_batch[t] = np.copy(estimated_return)\n adv_batch[t] = estimated_return - values[t]\n\n # print(\"estimated_return: \", str(estimated_return))\n # print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n # input()\n\n # output_file.write(str(estimated_return))\n # output_file.write('\\n')\n\n # input()\n\n flat_states = states.reshape([self.max_local_steps * self.emulator_counts] + list(shared_states.shape)[1:])\n flat_y_batch = y_batch.reshape(-1)\n flat_adv_batch = adv_batch.reshape(-1)\n flat_actions = actions.reshape(max_local_steps * self.emulator_counts, self.num_actions)\n\n lr = self.get_lr()\n feed_dict = {self.network.input_ph: flat_states,\n self.network.critic_target_ph: flat_y_batch,\n self.network.selected_action_ph: flat_actions,\n self.network.adv_actor_ph: flat_adv_batch,\n self.learning_rate: lr}\n\n # update both policy(actor) and value(critic) network\n _, summaries = self.session.run(\n [self.train_step, summaries_op],\n feed_dict=feed_dict)\n\n self.summary_writer.add_summary(summaries, self.global_step)\n self.summary_writer.flush()\n\n counter += 1\n\n if counter % (2048 / self.emulator_counts) == 0:\n curr_time = time.time()\n global_steps = self.global_step\n last_ten = 0.0 if len(total_rewards) < 1 else np.mean(total_rewards[-10:])\n logging.info(\"Ran {} steps, at {} steps/s ({} steps/s avg), last 10 rewards avg {}\"\n .format(global_steps,\n self.max_local_steps * self.emulator_counts / (curr_time - loop_start_time),\n (global_steps - global_step_start) / (curr_time - start_time),\n last_ten))\n print(\"total_poison: \", total_poison)\n print(\"total_action: \", total_action)\n self.save_vars()\n\n self.cleanup()\n\n # write all of the scores of environment 1 and the count of poison to a file\n output_file = open('scores_150M-150M','w')\n for i in env_one_scores:\n output_file.write(str(i))\n output_file.write('\\n')\n output_file.write('total_action: ' + str(total_action) + '\\n')\n output_file.write('total_poison: ' + str(total_poison) + '\\n') \n output_file.close()", "def _get_walk(start_node, graph, walk_length, matrix, p, q):\n walk = [start_node]\n prev = None\n while len(walk) < walk_length: # here we may need to consider some dead end issues\n cur = walk[-1]\n cur_nbrs = list(graph.neighbors(cur)) # (G.neighbors(cur))\n\n if len(cur_nbrs) == 0:\n return walk # the walk has hit a dead end\n random.shuffle(cur_nbrs)\n if len(walk) == 1:\n walk.append(random.choice(cur_nbrs))\n else:\n prev = walk[-2]\n\n if prev not in graph:\n print(f'Problem: prev not in graph: {prev}')\n raise ValueError\n elif cur not in graph[prev]:\n print(f'Problem: cur not in graph: {cur}')\n print(list(graph[prev].keys()))\n raise ValueError\n\n pre_edge_type = graph[prev][cur]['type'] - 1\n\n distance_sum = 0\n\n for neighbor in cur_nbrs:\n # print \"neighbor_link: \",neighbor_link\n neighbor_link_type = graph[cur][neighbor]['type'] - 1\n # Get transition probability based on the previous edge and the current possible edge\n transition_probability = matrix[pre_edge_type][neighbor_link_type]\n\n neighbor_link_weight = graph[cur][neighbor]['weight']\n\n if graph.has_edge(neighbor, prev) or graph.has_edge(prev, neighbor): # undirected graph\n distance_sum += transition_probability * neighbor_link_weight / p # +1 normalization\n elif neighbor == prev: # decide whether it can random walk back\n distance_sum += transition_probability * neighbor_link_weight\n else: # Triangle\n distance_sum += transition_probability * neighbor_link_weight / q\n\n '''\n pick up the next step link\n '''\n nn = pick_neighbors(graph, cur, prev, cur_nbrs, pre_edge_type, matrix, distance_sum, p, q)\n if nn is not None:\n walk.append(nn)\n else:\n print('No neighbour to go!')\n print(prev, cur)\n walk.append(random.choice(cur_nbrs))\n\n # print \"walk length: \",len(walk),walk\n # print \"edge walk: \",len(edge_walk),edge_walk \n return walk", "def fit(self, x: pd.DataFrame, y=None, **fit_params) -> \"Method\":\n self.graph = nx.Graph()\n minimal_hashtag_occurence = fit_params[\"minimal_hashtag_occurence\"]\n\n x = self.drop_tweets_with_hashtags_that_occurred_less_than(x, minimal_hashtag_occurence)\n\n hashtag_agg = defaultdict(list)\n\n self._hashtag_labels = set()\n self._users_labels = set()\n self._tweet_labels = set()\n\n if self.verbose:\n print(\"Building graph ...\")\n tqdm.tqdm.pandas()\n x.progress_apply(lambda r: self._transform_single_row(hashtag_agg, r),\n axis=1)\n else:\n x.apply(lambda r: self._transform_single_row(hashtag_agg, r), axis=1)\n\n self._refine_matrix_with_additional_connections()\n\n self._hashtag_labels = np.asarray(list(sorted(self._hashtag_labels)))\n self._users_labels = np.asarray(list(sorted(self._users_labels)))\n self._tweet_labels = np.asarray(list(sorted(self._tweet_labels)))\n\n if self.verbose:\n print(\"Building incidence matrix ...\")\n incidence_matrix = self._get_binary_incidence_matrix()[\n :len(self._hashtag_labels), len(self._hashtag_labels):]\n weighted_adjacency_matrix_of_tags = incidence_matrix.dot(incidence_matrix.T)\n weighted_adjacency_matrix_of_tags.setdiag(0)\n\n if self.verbose:\n print(\"Building hashtag graph ...\")\n\n hashtag_graph = nx.from_scipy_sparse_matrix(weighted_adjacency_matrix_of_tags)\n\n weighted_degree = np.asarray(\n list(map(itemgetter(1), hashtag_graph.degree(weight=\"weight\"))))\n matrix_weighted_degree = sps.diags([1 / (weighted_degree + 1e-8)], [0])\n self._transition_matrix = weighted_adjacency_matrix_of_tags.dot(\n matrix_weighted_degree)\n\n if self.verbose:\n print(\"Calculating tf idf ...\")\n\n document_list = [' '.join(hashtag_agg[key]) for key in self._hashtag_labels]\n\n # it has normalization inside, so no L2 is necessary\n self._hashtags_tf_idf_vectorizer = TfidfVectorizer(norm=\"l2\")\n self._hashtags_tf_idf_representation = self._hashtags_tf_idf_vectorizer.fit_transform(\n document_list)\n\n if self.verbose:\n print(\"Fitting hashtag encoders ...\")\n\n # [:-4] because each hashtag has \"_tag\" postfix to distinguish it in the graph\n self._hashtag_encoder.fit([lab[:-4] for lab in self._hashtag_labels])\n\n return self", "def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):\n \n #Assign required variables first\n self._m = m\n self._n = n\n if alpha is None:\n alpha = 0.3\n else:\n alpha = float(alpha)\n if sigma is None:\n sigma = max(m, n) / 2.0\n else:\n sigma = float(sigma)\n self._n_iterations = abs(int(n_iterations))\n \n ##INITIALIZE GRAPH\n self._graph = tf.Graph()\n \n ##POPULATE GRAPH WITH NECESSARY COMPONENTS\n with self._graph.as_default():\n \n ##VARIABLES AND CONSTANT OPS FOR DATA STORAGE\n \n #Randomly initialized weightage vectors for all neurons,\n #stored together as a matrix Variable of size [m*n, dim]\n \n ## Vector de pesos\n self._weightage_vects = tf.Variable(tf.random_normal([m*n, dim]))\n \n #Matrix of size [m*n, 2] for SOM grid locations\n #of neurons\n \n ## Vector de localizacion de nodos\n self._location_vects = tf.constant(np.array(\n list(self._neuron_locations(m, n))))\n \n ##PLACEHOLDERS FOR TRAINING INPUTS\n #We need to assign them as attributes to self, since they\n #will be fed in during training\n \n #The training vector\n \n ## Vector de input\n self._vect_input = tf.placeholder(\"float\", [dim])\n #Iteration number\n \n \n ## Iteracion \n self._iter_input = tf.placeholder(\"float\")\n \n matriz = np.array(list(self._neuron_locations(m,n)))\n \n matrices = []\n \n matrices = np.stack((matriz,\n matriz + np.array([m,n]), \n matriz - np.array([m,n]), \n matriz + np.array([m,0]),\n matriz - np.array([m,0]),\n matriz + np.array([0,n]),\n matriz - np.array([0,n]),\n matriz + np.array([m,-n]),\n matriz + np.array([-m,n])\n ))\n\n distancias_matriz = []\n \n for i in range(n*m):\n distancias_matriz.append([])\n for j in range(m*n):\n distancias_matriz[i].append(np.min(np.sum(np.power(np.subtract(matriz[i], matrices[:,j]),2), axis = 1)))\n \n distancias_matriz = tf.constant(np.array(distancias_matriz))\n\n \n ##CONSTRUCT TRAINING OP PIECE BY PIECE\n #Only the final, 'root' training op needs to be assigned as\n #an attribute to self, since all the rest will be executed\n #automatically during training\n \n #To compute the Best Matching Unit given a vector\n #Basically calculates the Euclidean distance between every\n #neuron's weightage vector and the input, and returns the\n #index of the neuron which gives the least value\n \n ## Indice del BMU (no es necesario crear arreglo de vectores)\n bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(\n tf.pow(tf.subtract(self._weightage_vects, self._vect_input), 2), 1)), 0)\n \n #This will extract the location of the BMU based on the BMU's\n #index\n \n slice_input = tf.pad(tf.reshape(bmu_index, [1]),\n np.array([[0, 1]]))\n \n ## Entrega la localizacion en x e y\n bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,\n tf.constant(np.array([1, 2]),\"int64\")),\n [2])\n\n #To compute the alpha and sigma values based on iteration\n #number\n learning_rate_op = tf.subtract(1.0, tf.div(self._iter_input,\n self._n_iterations))\n \n # Creado por mi \n #lear_rate = tf.exp(tf.negative(tf.div(self._iter_input, self._n_iterations)))\n \n _alpha_op = tf.multiply(alpha, learning_rate_op)\n _sigma_op = tf.multiply(sigma, learning_rate_op)\n \n #Construct the op that will generate a vector with learning\n #rates for all neurons, based on iteration number and location\n #wrt BMU. \n \n #bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(\n # self._location_vects, bmu_loc), 2), 1)\n \n bmu_distance_squares = distancias_matriz[bmu_index]\n \n # Crea un arreglo de m*n para cada funcion dependiendo de la dist al BMU \n neighbourhood_func = tf.exp(tf.negative(tf.div(tf.cast(\n bmu_distance_squares, \"float32\"), tf.multiply(tf.pow(_sigma_op, 2.), 2.))))\n\n print(neighbourhood_func)\n \n learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)\n \n print(learning_rate_op)\n \n #Finally, the op that will use learning_rate_op to update\n #the weightage vectors of all neurons based on a particular\n #input\n \n# learning_rate_multiplier = [tf.tile(tf.slice(\n# learning_rate_op, np.array([i]), np.array([1])), [dim]) for i in range(m*n)]\n# \n learning_rate_op_2 = tf.reshape(learning_rate_op, (m*n,1))\n\n #print(learning_rate_multiplier)\n \n weightage_delta = tf.multiply(\n learning_rate_op_2,\n tf.subtract(self._vect_input,\n self._weightage_vects)) \n \n new_weightages_op = tf.add(self._weightage_vects,\n weightage_delta)\n self._training_op = tf.assign(self._weightage_vects,\n new_weightages_op) \n \n ##INITIALIZE SESSION\n self._sess = tf.Session()\n \n ##INITIALIZE VARIABLES\n init_op = tf.global_variables_initializer()\n self._sess.run(init_op)", "def random_walk(embeddings, seeds_map, beta=0.9, normalize=True, **kwargs):\n def run_random_walk(M, teleport, beta, **kwargs):\n def update_seeds(r):\n r += (1 - beta) * teleport / np.sum(teleport)\n return run_iterative(M * beta, np.ones(M.shape[1]) / M.shape[1], update_seeds, **kwargs)\n\n seeds_map_dict={}\n for seed_key, seed_list in seeds_map.items():\n seeds_map_dict[seed_key] = {word:1.0 for i,word in enumerate(seed_list)}\n words = embeddings.iw\n M = transition_matrix(embeddings, **kwargs)\n scores_dict={}\n for seed_key, seeds in seeds_map_dict.items():\n scores_dict[seed_key]=run_random_walk(M, weighted_teleport_set(words, seeds), beta, **kwargs)\n polarities={}\n for i, w in enumerate(words):\n polarities[w]=Counter()\n for seed_key in scores_dict:\n polarities[w][seed_key]=scores_dict[seed_key][i]\n if normalize:\n polarities[w]=normalize_counter(polarities[w])\n return polarities", "def build_rnn_greedy(self):\n print(\"Building the RNN part...\")\n params = self.params\n\n contexts = self.conv_feats\n\n sentences = tf.placeholder(tf.int32, [self.batch_size, self.max_sent_len])\n masks = tf.placeholder(tf.float32, [self.batch_size, self.max_sent_len]) \n weights = tf.placeholder(tf.float32, [self.batch_size, self.max_sent_len]) \n\n # initialize the word embedding\n idx2vec = np.array([self.word_table.word2vec[self.word_table.idx2word[i]] \n for i in range(self.num_words)])\n emb_w = weight('emb_weights', [self.num_words, self.dim_embed], init_val=idx2vec)\n\n # initialize the decoding layer\n dec_w = weight('dec_weights', [self.dim_dec, self.num_words]) \n if self.init_dec_bias: \n dec_b = bias('dec_biases', [self.num_words], init_val=self.word_table.word_freq)\n else:\n dec_b = bias('dec_biases', [self.num_words], init_val=0.0)\n \n # compute the mean context\n context_mean = tf.reduce_mean(contexts, 1)\n \n # initialize the LSTM\n lstm = tf.nn.rnn_cell.LSTMCell(self.dim_hidden, initializer=tf.random_normal_initializer(stddev=0.033)) \n lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, self.lstm_keep_prob, self.lstm_keep_prob, self.lstm_keep_prob)\n\n memory, output = self.init_lstm(context_mean)\n state = memory, output\n\n cross_entropy_loss = 0.0\n results = []\n scores = []\n\n alphas = [] \n cross_entropies = []\n num_correct_words = 0.0\n\n # Generate the words one by one \n for idx in range(self.max_sent_len):\n\n # Attention mechanism\n alpha = self.attend(contexts, output) \n \n masked_alpha = alpha * tf.tile(tf.expand_dims(masks[:, idx], 1), [1, self.num_ctx]) \n alphas.append(tf.reshape(masked_alpha, [-1])) \n\n if idx == 0: \n word_emb = tf.zeros([self.batch_size, self.dim_embed])\n weighted_context = tf.identity(context_mean)\n else:\n word_emb = tf.cond(self.is_train, \n lambda: tf.nn.embedding_lookup(emb_w, sentences[:, idx-1]), \n lambda: word_emb)\n weighted_context = tf.reduce_sum(contexts * tf.expand_dims(alpha, 2), 1)\n \n # Apply the LSTM\n with tf.variable_scope(\"LSTM\"):\n output, state = lstm(tf.concat([weighted_context, word_emb], 1), state)\n \n # Compute the logits\n expanded_output = tf.concat([output, weighted_context, word_emb], 1)\n\n logits1 = fully_connected(expanded_output, self.dim_dec, 'dec_fc')\n logits1 = nonlinear(logits1, 'tanh')\n logits1 = dropout(logits1, self.fc_keep_prob, self.is_train)\n\n logits2 = tf.nn.xw_plus_b(logits1, dec_w, dec_b)\n\n # Update the loss\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sentences[:, idx], \n logits=logits2)\n masked_cross_entropy = cross_entropy * masks[:, idx]\n cross_entropy_loss += tf.reduce_sum(masked_cross_entropy*weights[:, idx])\n cross_entropies.append(masked_cross_entropy)\n\n # Update the result\n max_prob_word = tf.argmax(logits2, 1)\n results.append(max_prob_word)\n\n is_word_correct = tf.where(tf.equal(max_prob_word, tf.cast(sentences[:, idx], tf.int64)), \n tf.cast(masks[:, idx], tf.float32), \n tf.cast(tf.zeros_like(max_prob_word), tf.float32))\n num_correct_words += tf.reduce_sum(is_word_correct) \n\n probs = tf.nn.softmax(logits2) \n score = tf.log(tf.reduce_max(probs, 1)) \n scores.append(score) \n \n # Prepare for the next iteration\n word_emb = tf.cond(self.is_train, lambda: word_emb, lambda: tf.nn.embedding_lookup(emb_w, max_prob_word)) \n tf.get_variable_scope().reuse_variables() \n\n # Get the final result\n results = tf.stack(results, axis=1)\n scores = tf.stack(scores, axis=1)\n\n alphas = tf.stack(alphas, axis=1)\n alphas = tf.reshape(alphas, [self.batch_size, self.num_ctx, -1])\n sum_alpha = tf.reduce_sum(alphas, axis=2)\n\n cross_entropies = tf.stack(cross_entropies, axis=1) \n num_correct_words = num_correct_words / tf.reduce_sum(masks)\n\n # Compute the final loss \n cross_entropy_loss = cross_entropy_loss / tf.reduce_sum(masks*weights)\n\n avg_alpha = tf.reduce_sum(masks, axis=1) / self.num_ctx\n small_alpha_diff = tf.nn.relu(tf.tile(tf.expand_dims(avg_alpha*0.6, 1), [1, self.num_ctx])-sum_alpha)\n large_alpha_diff = tf.nn.relu(sum_alpha-tf.tile(tf.expand_dims(avg_alpha*6, 1), [1, self.num_ctx]))\n attention_loss = tf.nn.l2_loss(small_alpha_diff) + tf.nn.l2_loss(large_alpha_diff) \n attention_loss = params.att_coeff * attention_loss / self.batch_size \n\n if self.train_cnn:\n g_vars = tf.trainable_variables()\n else:\n g_vars = [tf_var for tf_var in tf.trainable_variables() if \"CNN\" not in tf_var.name]\n\n l2_loss = params.weight_decay * sum(tf.nn.l2_loss(tf_var) for tf_var in g_vars \n if (\"bias\" not in tf_var.name and\n \"offset\" not in tf_var.name and \n \"scale\" not in tf_var.name)) \n\n loss = cross_entropy_loss + attention_loss + l2_loss\n\n # Build the solver \n with tf.variable_scope(\"Solver\", reuse=tf.AUTO_REUSE):\n learning_rate = tf.train.exponential_decay(params.learning_rate, \n self.global_step,\n 10000, \n 0.9, \n staircase=True)\n\n if params.solver==\"momentum\":\n solver = tf.train.MomentumOptimizer(learning_rate, params.momentum)\n elif params.solver==\"rmsprop\":\n solver = tf.train.RMSPropOptimizer(learning_rate, params.decay, params.momentum)\n else:\n solver = tf.train.GradientDescentOptimizer(learning_rate)\n\n gs = tf.gradients(loss, g_vars)\n gs, _ = tf.clip_by_global_norm(gs, 10.0)\n opt_op = solver.apply_gradients(zip(gs, g_vars), global_step=self.global_step)\n\n self.sentences = sentences\n self.masks = masks\n self.weights = weights\n\n self.results = results\n self.scores = scores\n self.alphas = alphas\n\n self.sum_alpha = sum_alpha\n self.cross_entropies = cross_entropies\n self.num_correct_words = num_correct_words\n\n self.loss = loss\n self.cross_entropy_loss = cross_entropy_loss\n self.attention_loss = attention_loss\n self.l2_loss = l2_loss\n\n self.opt_op = opt_op\n self.g_vars = g_vars\n self.gs = gs\n \n print(\"RNN part built.\")", "def main():\n\n # Generates random defender value function.\n # initialize_weights(\"deep_q_learning/weights\")\n\n q_values = neural_net(INPUT_STATE)\n predicted = tf.argmax(q_values, 1)\n next_q_values = tf.placeholder(shape=[1, len(ACTION_SPACE)], dtype=tf.float32)\n\n # Note reduce_sum() leads to large updates, which fills the matrices with NAN.\n loss = tf.reduce_mean(tf.square(next_q_values - q_values))\n update = tf.train.GradientDescentOptimizer(learning_rate=L_RATE).minimize(loss)\n\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n\n # Starts training.\n with tf.Session() as sess:\n\n saver.restore(sess, \"./deep_q_learning/model.ckpt\")\n # init = tf.global_variables_initializer()\n # Runs the initializer.\n # sess.run(init)\n\n for iteration in range(N_GAMES):\n # Generates game with random starting position.\n env = game.Game(q.generate_position(1) + [0, 0, 0], D_WEIGHTS)\n score = 0\n while not env.is_finished():\n current_state = env.position\n\n # Selects an action from q table based on the epsilon-greedy algorithm.\n # (Note most of the randomly chosen actions will be bad).\n action, all_q_values = sess.run(\n [predicted, q_values], feed_dict={INPUT_STATE: current_state}\n )\n subset = ACTION_SPACE[action[0]]\n\n if random.random() > EPSILON:\n # Currently, most of the randomly chosen actions will be bad;\n # perhaps, we should look into only selecting a valid action.\n subset = random.choice(ACTION_SPACE)\n\n # If the move generated is invalid, refuse to partition.\n # (In otherwords, one subset will contain all the pieces).\n if any(x < 0 for x in [i - j for i, j in zip(current_state, subset)]):\n subset = ACTION_SPACE[0]\n\n # Plays selected action.\n env.play(list(subset), [i - j for i, j in zip(current_state, subset)])\n\n # Gets the predicted q values of the next state.\n next_values = sess.run(q_values, feed_dict={INPUT_STATE: env.position})\n # print(action)\n all_q_values[0, action[0]] = (env.score - score) + np.max(next_values)\n\n # Applies gradient descent and update network.\n sess.run(\n [update, W1, W2, O],\n feed_dict={INPUT_STATE: current_state, next_q_values: all_q_values},\n )\n score = env.score\n DELTA.append(env.score - math.floor(env.potential))\n\n # Makes a backup for every percentage of progress.\n if iteration % (N_GAMES / 100) == 0:\n saver.save(sess, \"./deep_q_learning/model.ckpt\")\n print(sum(DELTA) / len(DELTA))\n # plt.plot(DELTA)\n # plt.show()", "def train(self, retrain=0):\n\n if 'embedding_size' not in self.dict_paras: # dimension\n self.dict_paras['embedding_size'] = 20\n embedding_size = self.dict_paras['embedding_size']\n\n if 'learning_rate' not in self.dict_paras:\n self.dict_paras['learning_rate'] = 0.1\n learning_rate = self.dict_paras['learning_rate']\n\n if 'num_steps' not in self.dict_paras:\n self.dict_paras['num_steps'] = 100000\n num_steps = self.dict_paras['num_steps']\n\n if 'num_loss_report' not in self.dict_paras:\n self.dict_paras['num_loss_report'] = 1000\n num_loss_report = self.dict_paras['num_loss_report']\n\n if 'batch_size' not in self.dict_paras:\n self.dict_paras['batch_size'] = 60\n batch_size = self.dict_paras['batch_size']\n\n # distance type {L1:1, L2:2, cos_similarity:3}\n if 'regular_type' not in self.dict_paras:\n self.dict_paras['regular_type'] = 2\n\n if 'margin' not in self.dict_paras:\n self.dict_paras['margin'] = 1\n\n # build graph\n graph = tf.Graph()\n with graph.as_default():\n with tf.device('/cpu:0'):\n\n # Input data.\n train_head_id = tf.placeholder(tf.int32, shape=[2*batch_size])\n train_tail_id = tf.placeholder(tf.int32, shape=[2*batch_size])\n train_relation_id = tf.placeholder(tf.int32, shape=[2*batch_size])\n train_head_corrupted_id = tf.placeholder(tf.int32, shape=[2*batch_size])\n train_tail_corrupted_id = tf.placeholder(tf.int32, shape=[2*batch_size])\n\n # model parameters\n if not retrain:\n bound = (6./embedding_size)**0.5\n self.embeddings_entity = tf.Variable(\n tf.random_uniform([self.entity_size, embedding_size], -bound, bound))\n self.embeddings_relation = tf.Variable(tf.nn.l2_normalize(\n tf.random_uniform([self.relation_size, embedding_size], -bound, bound), 1))\n else:\n self.embeddings_entity = tf.Variable(self.embeddings_entity)\n self.embeddings_relation = tf.Variable(self.embeddings_relation)\n\n # normalize all embeddings of entities.\n self.embeddings_entity = tf.nn.l2_normalize(self.embeddings_entity, 1)\n\n # look up embeddings for inputs\n\n embedding_head = tf.nn.embedding_lookup(self.embeddings_entity, train_head_id)\n embedding_tail = tf.nn.embedding_lookup(self.embeddings_entity, train_tail_id)\n embedding_relation = tf.nn.embedding_lookup(self.embeddings_relation, train_relation_id)\n embedding_head_corrupted = tf.nn.embedding_lookup(self.embeddings_entity, train_head_corrupted_id)\n embedding_tail_corrupted = tf.nn.embedding_lookup(self.embeddings_entity, train_tail_corrupted_id)\n\n # loss function\n loss = self.loss_function(embedding_head, embedding_tail, embedding_relation\n , embedding_head_corrupted, embedding_tail_corrupted)\n\n # optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(loss)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n\n # Begin training.\n init = tf.initialize_all_variables()\n\n with tf.Session(graph=graph) as sess:\n init.run()\n print(\"Initialized\")\n\n average_loss = 0\n\n for step in xrange(num_steps):\n\n batch = self.generate_batch()\n\n head_id = [ele[0][0] for ele in batch]\n relation_id = [ele[0][1] for ele in batch]\n tail_id = [ele[0][2] for ele in batch]\n head_corrupted_id = [ele[1][0] for ele in batch]\n tail_corrupted_id = [ele[1][2] for ele in batch]\n\n feed_dict = {train_head_id: head_id, train_relation_id: relation_id, train_tail_id:tail_id,\n train_head_corrupted_id: head_corrupted_id, train_tail_corrupted_id: tail_corrupted_id}\n\n _, loss_val = sess.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % num_loss_report == 0:\n if step > 0:\n average_loss /= num_loss_report\n print(\"Average loss at step \", step, \": \", average_loss)\n average_loss = 0\n\n self.embeddings_entity = tf.nn.l2_normalize(self.embeddings_entity, 1).eval()\n self.embeddings_relation = self.embeddings_relation.eval()\n\n print('Training completed!')", "def parallel_generate_walks(self, d_graph, global_walk_length, num_walks, cpu_num, sampling_strategy=None,\n num_walks_key=None, walk_length_key=None, neighbors_key=None, probabilities_key=None,\n first_travel_key=None, quiet=False):\n\n walks = list()\n if not quiet:\n pbar = tqdm(total=num_walks, desc='Generating walks (CPU: {})'.format(cpu_num))\n\n for n_walk in range(num_walks):\n\n # Update progress bar\n if not quiet:\n pbar.update(1)\n\n # Shuffle the nodes\n shuffled_nodes = list(d_graph.keys())\n shuffle(shuffled_nodes)\n for source in shuffled_nodes:\n # TODO: Remove and uncomment below after testing\n prob = 1/(1+self.graph.out_degree(source))\n\n rand = random.uniform(0,1)\n if rand > prob:\n try:\n walk = self.single_node_random_walk(source, sampling_strategy, num_walks_key, n_walk, walk_length_key,\n global_walk_length, d_graph, neighbors_key, first_travel_key, probabilities_key, cold_start=False, real_start_node=None)\n walks.append(walk)\n except:\n rand = 0\n\n elif rand <= prob:\n\n new_source_list = self.find_fof(source)\n if type(new_source_list) is not list:\n new_source_list = [new_source_list]\n for elem in new_source_list:\n try:\n walk = self.single_node_random_walk(elem, sampling_strategy, num_walks_key, n_walk, walk_length_key,\n global_walk_length, d_graph, neighbors_key, first_travel_key, probabilities_key, cold_start=True, real_start_node=source)\n walks.append(walk)\n except:\n pass\n\n if not quiet:\n pbar.close()\n\n return walks", "def link_prediction(self, lst_triplet_test_map, head_or_tail=0, filt=1, num_to_test=1000):\n\n # begin testing\n with tf.device('/cpu:0'):\n with tf.Session() as sess:\n num_hit_10 = 0\n lst_rank_all = list()\n num_tested = 0\n # for i in random.sample(range(self.triplet_test_size), num_to_test):\n # (h, r, t) = lst_triplet_test_map[i]\n for (h, r, t) in lst_triplet_test_map[:num_to_test]:\n if head_or_tail == 0: # rank head\n embedding_relation = tf.expand_dims(tf.nn.embedding_lookup(self.embeddings_relation, r), 0)\n embedding_tail = tf.expand_dims(tf.nn.embedding_lookup(self.embeddings_entity, t),0)\n temp_ones = tf.ones([self.entity_size, 1])\n embedding_relation_mat = tf.matmul(temp_ones, embedding_relation)\n embedding_tail_mat = tf.matmul(temp_ones, embedding_tail)\n distance_vec = self.distance(tf.add(self.embeddings_entity, embedding_relation_mat),\n embedding_tail_mat)\n lst_entity_rank = sorted(enumerate(distance_vec.eval()), key=operator.itemgetter(1))\n\n lst_entity_sorted = [ele[0] for ele in lst_entity_rank]\n rank = lst_entity_sorted.index(h) + 1\n if filt:\n for i in range(rank-1):\n entity = lst_entity_sorted[i]\n if entity in self.dict_hoft[r][t]:\n rank -= 1\n\n else:\n embedding_head = tf.expand_dims(tf.nn.embedding_lookup(self.embeddings_entity, h), 0)\n embedding_relation = tf.expand_dims(tf.nn.embedding_lookup(self.embeddings_relation, r), 0)\n temp_ones = tf.ones([self.entity_size, 1])\n embedding_head_mat = tf.matmul(temp_ones, embedding_head)\n embedding_relation_mat = tf.matmul(temp_ones, embedding_relation)\n distance_vec = self.distance(tf.add(embedding_head_mat, embedding_relation_mat),\n self.embeddings_entity)\n lst_entity_rank = sorted(enumerate(distance_vec.eval()), key=operator.itemgetter(1))\n\n lst_entity_sorted = [ele[0] for ele in lst_entity_rank]\n rank = lst_entity_sorted.index(t) + 1\n if filt:\n for i in range(rank-1):\n entity = lst_entity_sorted[i]\n if entity in self.dict_tofh[r][h]:\n rank -= 1\n\n if rank <= 10:\n num_hit_10 += 1\n\n num_tested += 1\n print('{} rank:{}, num_hit_10:{}'.format(num_tested, rank, num_hit_10))\n lst_rank_all.append(rank)\n\n mean_rank = sum(lst_rank_all) / float(num_to_test)\n hit_10 = num_hit_10 / float(num_to_test) * 100.\n\n print('Testing completed!')\n\n return mean_rank, hit_10", "def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):\n \n #Assign required variables first\n self._m = m\n self._n = n\n imgs=[]\n if alpha is None:\n alpha = 0.3\n else:\n alpha = float(alpha)\n if sigma is None:\n sigma = max(m, n) / 2.0\n else:\n sigma = float(sigma)\n self._n_iterations = abs(int(n_iterations))\n \n ##INITIALIZE GRAPH\n self._graph = tf.Graph()\n \n ##POPULATE GRAPH WITH NECESSARY COMPONENTS\n with self._graph.as_default():\n \n ##VARIABLES AND CONSTANT OPS FOR DATA STORAGE\n \n #Randomly initialized weightage vectors for all neurons,\n #stored together as a matrix Variable of size [m*n, dim]\n #HERE i used random_uniform instead of random_normal in the original code.(LB)\n self._weightage_vects = tf.Variable(tf.random_uniform(\n [m*n, dim]))\n \n #Matrix of size [m*n, 2] for SOM grid locations\n #of neurons\n self._location_vects = tf.constant(np.array(\n list(self._neuron_locations(m, n))))\n \n ##PLACEHOLDERS FOR TRAINING INPUTS\n #We need to assign them as attributes to self, since they\n #will be fed in during training\n \n #The training vector\n self._vect_input = tf.placeholder(\"float\", [dim])\n #Iteration number\n self._iter_input = tf.placeholder(\"float\")\n \n ##CONSTRUCT TRAINING OP PIECE BY PIECE\n #Only the final, 'root' training op needs to be assigned as\n #an attribute to self, since all the rest will be executed\n #automatically during training\n \n #To compute the Best Matching Unit given a vector\n #Basically calculates the Euclidean distance between every\n #neuron's weightage vector and the input, and returns the\n #index of the neuron which gives the least value\n bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(\n tf.pow(tf.subtract(self._weightage_vects, tf.stack(\n [self._vect_input for i in range(m*n)])), 2), 1)),\n 0)\n \n #This will extract the location of the BMU based on the BMU's\n #index\n slice_input = tf.pad(tf.reshape(bmu_index, [1]),\n np.array([[0, 1]]))\n bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,\n tf.constant(np.array([1, 2]))),\n [2])\n \n #To compute the alpha and sigma values based on iteration\n #number\n learning_rate_op = tf.subtract(1.0, tf.divide(self._iter_input,\n self._n_iterations))\n _alpha_op = tf.multiply(alpha, learning_rate_op)\n _sigma_op = tf.multiply(sigma, learning_rate_op)\n \n #Construct the op that will generate a vector with learning\n #rates for all neurons, based on iteration number and location\n #wrt BMU.\n bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(\n self._location_vects, tf.stack(\n [bmu_loc for i in range(m*n)])), 2), 1)\n neighbourhood_func = tf.exp(tf.neg(tf.divide(tf.cast(\n bmu_distance_squares, \"float32\"), tf.pow(_sigma_op, 2))))\n learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)\n \n #Finally, the op that will use learning_rate_op to update\n #the weightage vectors of all neurons based on a particular\n #input\n learning_rate_multiplier = tf.stack([tf.tile(tf.slice(\n learning_rate_op, np.array([i]), np.array([1])), [dim])\n for i in range(m*n)])\n weightage_delta = tf.multiply(\n learning_rate_multiplier,\n tf.subtract(tf.stack([self._vect_input for i in range(m*n)]),\n self._weightage_vects)) \n new_weightages_op = tf.add(self._weightage_vects,\n weightage_delta)\n self._training_op = tf.assign(self._weightage_vects,\n new_weightages_op) \n \n ##INITIALIZE SESSION\n self._sess = tf.Session()\n \n ##INITIALIZE VARIABLES\n init_op = tf.global_variables_initializer()\n self._sess.run(init_op)", "def run_single_step(self): \n contexts = tf.placeholder(tf.float32, [self.batch_size, self.num_ctx, self.dim_ctx]) \n last_memory = tf.placeholder(tf.float32, [self.batch_size, self.dim_hidden])\n last_output = tf.placeholder(tf.float32, [self.batch_size, self.dim_hidden])\n last_word = tf.placeholder(tf.int32, [self.batch_size])\n initial_step = tf.placeholder(tf.bool)\n\n context_mean = tf.reduce_mean(contexts, 1) \n\n lstm = tf.nn.rnn_cell.LSTMCell(self.dim_hidden, initializer=tf.random_normal_initializer(stddev=0.033)) \n\n # Attention mechanism\n alpha = self.attend(contexts, last_output) \n weighted_context = tf.cond(initial_step,\n lambda: tf.identity(context_mean),\n lambda: tf.reduce_sum(contexts*tf.expand_dims(alpha, 2), 1))\n\n word_emb = tf.cond(initial_step, \n lambda: tf.zeros([self.batch_size, self.dim_embed]), \n lambda: tf.nn.embedding_lookup(self.emb_w, last_word))\n \n # Apply the LSTM\n with tf.variable_scope(\"LSTM\"):\n last_state = last_memory, last_output\n output, state = lstm(tf.concat([weighted_context, word_emb], 1), last_state)\n memory, _ = state\n \n # Compute the logits and probs\n expanded_output = tf.concat([output, weighted_context, word_emb], 1)\n\n logits1 = fully_connected(expanded_output, self.dim_dec, 'dec_fc')\n logits1 = nonlinear(logits1, 'tanh')\n logits2 = tf.nn.xw_plus_b(logits1, self.dec_w, self.dec_b)\n probs = tf.nn.softmax(logits2) \n logprobs = tf.log(probs)\n\n tf.get_variable_scope().reuse_variables() \n\n self.contexts = contexts\n self.last_memory = last_memory\n self.last_output = last_output\n self.last_word = last_word\n self.initial_step = initial_step\n\n self.memory = memory\n self.output = output\n self.logprobs = logprobs", "def generate_random_walker():\n # must have seeds that generate known problems\n must_have_seeds = [112, 308, 393]\n for seed in must_have_seeds:\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections\n while True:\n seed = random.randint(0, 2**10)\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections", "def test_node_sampling(weighted_graph_config_fixture):\n w_config = weighted_graph_config_fixture\n\n # Node 5 to node 4 has zero weight (zero transition probability)\n # Node 4 to node 5 has ten weight (high transition probability)\n edges = pd.DataFrame({'source_content_id': [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n 'destination_content_id': [5, 1, 0, 3, 4, 1, 2, 1, 3, 5, 3, 4],\n 'weight': [1, 2, 3, 4, 1, 2, 3, 4, 1, 10, 5, 0]}\n )\n wm = N2VModel()\n\n wm.create_graph(edges, w_config['weighted_graph'])\n\n wm.generate_walks(**w_config)\n\n wm.fit_model(**w_config, callbacks=EpochLogger())\n\n n_nodes = len(set(edges.source_content_id))\n n_transitions = n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks']\n\n res = np.array([np.array(list(zip(x, x[1:]))).ravel() for x in wm.node2vec.walks])\n walks = np.reshape(res, (n_transitions, 2))\n\n pairs = pd.DataFrame({'state1': walks[:, 0], 'state2': walks[:, 1]})\n counts = pairs.groupby('state1')['state2'].value_counts().unstack()\n counts = counts.replace(np.nan, 0)\n assert pairs.shape == (n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks'], 2)\n assert counts.iloc[5][4] == 0\n assert counts.iloc[4][5] != 0\n assert len(set(edges['source_content_id']).union(\n set(edges['destination_content_id']))) == len(wm.model.wv.vocab.keys())", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def exec_random_walk(graphs, alias_method_j, alias_method_q, v, walk_length, amount_neighbours):\r\n\r\n original_v = v\r\n\r\n t0 = time()\r\n\r\n initial_layer = 0\r\n\r\n layer = initial_layer\r\n\r\n\r\n\r\n path = deque()\r\n\r\n path.append(v)\r\n\r\n\r\n\r\n while len(path) < walk_length:\r\n\r\n r = random.random()\r\n\r\n\r\n\r\n if r < 0.3:\r\n\r\n v = choose_neighbour(v, graphs, alias_method_j, alias_method_q, layer)\r\n\r\n path.append(v)\r\n\r\n\r\n\r\n else:\r\n\r\n r = random.random()\r\n\r\n if r > prob_moveup(amount_neighbours[layer][v]):\r\n\r\n if layer > initial_layer:\r\n\r\n layer = layer - 1\r\n\r\n else:\r\n\r\n if (layer + 1) in graphs and v in graphs[layer + 1]:\r\n\r\n layer = layer + 1\r\n\r\n\r\n\r\n t1 = time()\r\n\r\n logging.info('RW - vertex {}. Time : {}s'.format(original_v, (t1 - t0)))\r\n\r\n\r\n\r\n return path", "def _build_fixmatch_training_step(model, optimizer, lam=0, \n tau=0.95, weight_decay=0):\n trainvars = model.trainable_variables\n \n \n def train_step(lab, unlab):\n x,y = lab\n x_unlab_wk, x_unlab_str = unlab\n \n \n with tf.GradientTape() as tape:\n # semisupervised case\n if lam > 0:\n # concatenate labeled/pseudolabeled batches\n N = x.shape[0]\n mu_N = x_unlab_str.shape[0]\n x_batch = tf.concat([x,x_unlab_wk, x_unlab_str],0)\n pred_batch = model(x_batch, training=True)\n # then split the labeled/pseudolabeled pieces\n preds = pred_batch[:N,:]\n wk_preds = pred_batch[N:N+mu_N,:]\n str_preds = pred_batch[N+mu_N:,:]\n # GENERATE FIXMATCH PSEUDOLABELS\n # round predictions to pseudolabels\n with tape.stop_recording():\n pseudolabels = tf.cast(wk_preds > 0.5, \n tf.float32)\n # also compute a mask from the predictions,\n # since we only incorporate high-confidence cases,\n # compute a mask that's 1 every place that's close\n # to 1 or 0\n mask = _build_mask(wk_preds, tau)\n \n # let's try keeping track of how accurate these\n # predictions are\n ssl_acc = tf.reduce_mean(tf.cast(\n tf.cast(str_preds > 0.5, tf.float32)==pseudolabels,\n tf.float32))\n \n crossent_tensor = K.binary_crossentropy(pseudolabels,\n str_preds)\n fixmatch_loss = tf.reduce_mean(mask*crossent_tensor)\n \n else: \n fixmatch_loss = 0\n ssl_acc = -1\n mask = -1\n preds = model(x, training=True)\n \n trainloss = tf.reduce_mean(K.binary_crossentropy(y, preds))\n \n if (weight_decay > 0)&(\"LARS\" not in optimizer._name):\n l2_loss = compute_l2_loss(model)\n else:\n l2_loss = 0\n \n total_loss = trainloss + lam*fixmatch_loss + weight_decay*l2_loss\n \n # compute and apply gradients\n gradients = tape.gradient(total_loss, trainvars)\n optimizer.apply_gradients(zip(gradients, trainvars))\n\n return {\"total_loss\":total_loss, \"supervised_loss\":trainloss,\n \"fixmatch_loss\":fixmatch_loss, \"l2_loss\":l2_loss,\n \"fixmatch_prediction_accuracy\":ssl_acc, \n \"fixmatch_mask_fraction\":tf.reduce_mean(mask)}\n return train_step", "def test1(self):\n with self.test_session() as sess:\n table = np.array([[[0.0, 0.6, 0.4],\n [0.0, 0.4, 0.6],\n [0.0, 0.0, 1.0]]] * 3)\n\n for cell_transform in ['default', 'flatten', 'replicate']:\n cell = MarkovChainCell(table)\n initial_state = cell.zero_state(1, tf.int32)\n initial_input = initial_state[0]\n\n with tf.variable_scope('test1_{}'.format(cell_transform)):\n best_sparse, best_logprobs = beam_decoder(\n cell=cell,\n beam_size=7,\n stop_token=2,\n initial_state=initial_state,\n initial_input=initial_input,\n tokens_to_inputs_fn=lambda x:tf.expand_dims(x, -1),\n max_len=5,\n cell_transform=cell_transform,\n output_dense=False,\n )\n\n tf.variables_initializer([cell.log_table_var]).run()\n assert all(best_sparse.eval().values == [2])\n assert np.isclose(np.exp(best_logprobs.eval())[0], 0.4)", "def random_walk(problem, limit=100, callback=None):\n current = LSNode(problem, problem.initial, 0)\n best = current\n for step in range(limit):\n if callback is not None:\n callback(current)\n current = random.choice(list(current.expand()))\n if current.state.value > best.state.value:\n best = current\n return best", "def mcts(self, node):\n\n starttime = time.time()\n sim_count = 0\n board_in = self.env.board.fen()\n\n # First make a prediction for each child state\n for move in self.env.board.generate_legal_moves():\n if move not in node.children.keys():\n node.children[move] = Node(self.env.board, parent=node)\n\n episode_end, reward = self.env.step(move)\n\n if episode_end:\n successor_state_value = 0\n else:\n successor_state_value = np.squeeze(\n self.agent.model.predict(np.expand_dims(self.env.layer_board, axis=0))\n )\n\n child_value = reward + self.gamma * successor_state_value\n\n node.update_child(move, child_value)\n self.env.board.pop()\n self.env.init_layer_board()\n if not node.values:\n node.values = [0]\n\n while starttime + self.search_time > time.time() or sim_count < self.min_sim_count:\n depth = 0\n color = 1\n node_rewards = []\n\n # Select the best node from where to start MCTS\n while node.children:\n node, move = node.select(color=color)\n if not move:\n # No move means that the node selects itself, not a child node.\n break\n else:\n depth += 1\n color = color * -1 # switch color\n episode_end, reward = self.env.step(move) # Update the environment to reflect the node\n node_rewards.append(reward)\n # Check best node is terminal\n\n if self.env.board.result() == \"1-0\" and depth == 1: # -> Direct win for white, no need for mcts.\n self.env.board.pop()\n self.env.init_layer_board()\n node.update(1)\n node = node.parent\n return node\n elif episode_end: # -> if the explored tree leads to a terminal state, simulate from root.\n while node.parent:\n self.env.board.pop()\n self.env.init_layer_board()\n node = node.parent\n break\n else:\n continue\n\n # Expand the game tree with a simulation\n Returns, move = node.simulate(self.agent.fixed_model,\n self.env,\n temperature=self.temperature,\n depth=0)\n self.env.init_layer_board()\n\n if move not in node.children.keys():\n node.children[move] = Node(self.env.board, parent=node)\n\n node.update_child(move, Returns)\n\n # Return to root node and backpropagate Returns\n while node.parent:\n latest_reward = node_rewards.pop(-1)\n Returns = latest_reward + self.gamma * Returns\n node.update(Returns)\n node = node.parent\n\n self.env.board.pop()\n self.env.init_layer_board()\n sim_count += 1\n\n board_out = self.env.board.fen()\n assert board_in == board_out\n\n return node", "def build_eval_graph(self):\n # Eval graph\n opts = self._options\n\n # Each analogy task is to predict the 4th word (d) given three\n # words: a, b, c. E.g., a=italy, b=rome, c=france, we should\n # predict d=paris.\n\n # The eval feeds three vectors of word ids for a, b, c, each of\n # which is of size N, where N is the number of analogies we want to\n # evaluate in one batch.\n analogy_a = tf.placeholder(dtype=tf.int32) # [N]\n analogy_b = tf.placeholder(dtype=tf.int32) # [N]\n analogy_c = tf.placeholder(dtype=tf.int32) # [N]\n\n # Normalized word embeddings of shape [vocab_size, emb_dim].\n nemb = tf.nn.l2_normalize(self.embeddings, 1)\n\n # Each row of a_emb, b_emb, c_emb is a word's embedding vector.\n # They all have the shape [N, emb_dim]\n a_emb = tf.gather(nemb, analogy_a) # a's embs\n b_emb = tf.gather(nemb, analogy_b) # b's embs\n c_emb = tf.gather(nemb, analogy_c) # c's embs\n\n # We expect that d's embedding vectors on the unit hyper-sphere is\n # near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].\n target = c_emb + (b_emb - a_emb)\n\n # Compute cosine distance between each pair of target and vocab.\n # dist has shape [N, vocab_size].\n dist = tf.matmul(target, nemb, transpose_b=True)\n\n # For each question (row in dist), find the top 4 words.\n _, pred_idx = tf.nn.top_k(dist, 5)\n\n # Nodes for computing neighbors for a given word according to\n # their cosine distance.\n nearby_word = tf.placeholder(dtype=tf.int32) # word id\n nearby_emb = tf.gather(nemb, nearby_word)\n nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)\n nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,\n min(1000, opts.vocabulary_size))\n\n field_cates = tf.placeholder(dtype=tf.int32)\n field_embs = tf.gather(self.embeddings, field_cates)\n center_point = tf.reduce_mean(field_embs, 0)\n avg_distance = tf.reduce_mean(tf.sqrt(tf.reduce_sum(\n tf.pow(tf.sub(center_point, field_embs), 2), 1)), 0)\n\n self._avg_distance = avg_distance\n self._field_cates = field_cates\n # Nodes in the construct graph which are used by training and\n # evaluation to run/feed/fetch.\n self._analogy_a = analogy_a\n self._analogy_b = analogy_b\n self._analogy_c = analogy_c\n self._analogy_pred_idx = pred_idx\n self._nearby_word = nearby_word\n self._nearby_val = nearby_val\n self._nearby_idx = nearby_idx\n\n # Properly initialize all variables.\n # tf.initialize_all_variables().run()\n\n # self.saver = tf.train.Saver()", "def mcts(node, expanding=False):\n global boards\n boards += 1\n\n node.games += 1\n if game.check_win(node.board) != ' ':\n if game.check_win(node.board) == 'X' or game.check_win(node.board) == 'O':\n node.wins += 1\n return\n\n # Selection\n move = -1\n next_player = 'O' if node.player_id == 'X' else 'X'\n next_board = deepcopy(node.board)\n if len(node.children) == 25:\n max_uct = -inf\n for child in node.children:\n uct = child.wins/child.games + sqrt(log(node.games) / child.games)\n if uct > max_uct:\n max_uct = uct\n move = child.move\n\n # Expansion and Simulation\n elif not expanding:\n for move_expansion in range(25):\n if node.board[move_expansion] != ' ':\n continue\n next_board = deepcopy(node.board)\n next_board[move_expansion] = node.player_id\n next_node = Node(next_board, next_player, move_expansion)\n is_child = False\n for child in node.children:\n if child.board == next_board:\n next_node = child\n is_child = True\n if not is_child:\n node.children.append(next_node)\n mcts(next_node, True)\n else:\n move = randint(0, 24)\n while node.board[move] != ' ':\n move = randint(0, 24)\n next_board[move] = node.player_id\n next_node = Node(next_board, next_player, move)\n is_child = False\n for child in node.children:\n if child.board == next_board:\n next_node = child\n is_child = True\n if not is_child:\n node.children.append(next_node)\n mcts(next_node, expanding)\n\n # Back-Propagation\n node.wins = 0\n node.games = 0\n if node.children:\n for child in node.children:\n node.wins += child.games - child.wins\n node.games += child.games", "def simulate(FT, output_dir, iteration, pathset_paths_df, pathset_links_df, veh_trips_df):\n simulation_iteration = 0\n num_passengers_arrived = 0 # will get returned from choose_paths\n\n while True:\n FastTripsLogger.info(\"Simulation Iteration %d\" % simulation_iteration)\n for trace_pax in Assignment.TRACE_PERSON_IDS:\n FastTripsLogger.debug(\"Initial pathset_links_df for %s\\n%s\" % \\\n (str(trace_pax), pathset_links_df.loc[pathset_links_df.person_id==trace_pax].to_string()))\n\n FastTripsLogger.debug(\"Initial pathset_paths_df for %s\\n%s\" % \\\n (str(trace_pax), pathset_paths_df.loc[pathset_paths_df.person_id==trace_pax].to_string()))\n\n ######################################################################################################\n FastTripsLogger.info(\" Step 1. Find out board/alight times for all pathset links from vehicle times\")\n\n # could do this just to chosen path links but let's do this to the whole pathset\n pathset_links_df = Assignment.find_passenger_vehicle_times(pathset_links_df, veh_trips_df)\n\n ######################################################################################################\n FastTripsLogger.info(\" Step 2. Flag missed transfer links and paths in the pathsets\")\n (pathset_paths_df, pathset_links_df) = Assignment.flag_missed_transfers(pathset_paths_df, pathset_links_df)\n\n ######################################################################################################\n FastTripsLogger.info(\" Step 3. Calculate costs and probabilities for all pathset paths\")\n (pathset_paths_df, pathset_links_df) = PathSet.calculate_cost(\n iteration, simulation_iteration, Assignment.STOCH_DISPERSION,\n pathset_paths_df, pathset_links_df, FT.passengers.trip_list_df,\n FT.transfers.transfers_df, FT.tazs.walk_df, FT.tazs.drive_df, veh_trips_df, FT.stops)\n\n ######################################################################################################\n FastTripsLogger.info(\" Step 4. Choose a path for each passenger from their pathset\")\n\n # Choose path for each passenger -- pathset_paths_df and pathset_links_df will now have\n # SIM_COL_PAX_CHOSEN >=0 for chosen paths/path links\n (num_passengers_arrived, num_chosen, pathset_paths_df, pathset_links_df) = Passenger.choose_paths(\n Assignment.PATHFINDING_EVERYONE and simulation_iteration==0, # choose for everyone if we just re-found all paths\n iteration, simulation_iteration,\n pathset_paths_df, pathset_links_df)\n\n ######################################################################################################\n FastTripsLogger.info(\" Step 5. Put passenger paths on transit vehicles to get vehicle boards/alights/load\")\n\n # no one is bumped yet\n bump_iter = 0\n pathset_links_df[Assignment.SIM_COL_PAX_OVERCAP_FRAC] = numpy.NaN\n\n if simulation_iteration==0:\n # For those we just found paths for, no one is bumped or going on overcap vehicles yet\n pathset_links_df.loc[pathset_links_df[Passenger.PF_COL_PF_ITERATION]==iteration, Assignment.SIM_COL_PAX_BUMP_ITER ] = -1\n\n while True: # loop for capacity constraint\n\n # Put passengers on vehicles, updating the vehicle's boards, alights, onboard\n veh_trips_df = Assignment.put_passengers_on_vehicles(iteration, bump_iter, pathset_paths_df, pathset_links_df, veh_trips_df)\n\n if not FT.trips.has_capacity_configured():\n # We can't do anything about capacity\n break\n\n else:\n ######################################################################################################\n FastTripsLogger.info(\" Step 6. Capacity constraints on transit vehicles.\")\n\n if bump_iter == 0:\n FastTripsLogger.info(\" Bumping one at a time? %s\" % (\"true\" if Assignment.BUMP_ONE_AT_A_TIME else \"false\"))\n\n # This needs to run at this point because the arrival times for the passengers are accurate here\n (chosen_paths_bumped, pathset_paths_df, pathset_links_df, veh_trips_df) = \\\n Assignment.flag_bump_overcap_passengers(iteration, simulation_iteration, bump_iter,\n pathset_paths_df, pathset_links_df, veh_trips_df)\n\n\n FastTripsLogger.info(\" -> completed loop bump_iter %d and bumped %d chosen paths\" % (bump_iter, chosen_paths_bumped))\n\n if chosen_paths_bumped == 0:\n # do one final update of overcap to passengers\n pathset_links_df = Assignment.find_passenger_vehicle_times(pathset_links_df, veh_trips_df)\n break\n\n bump_iter += 1\n\n\n if type(Assignment.bump_wait_df) == pandas.DataFrame and len(Assignment.bump_wait_df) > 0:\n Assignment.bump_wait_df[Passenger.PF_COL_PAX_A_TIME_MIN] = \\\n Assignment.bump_wait_df[Passenger.PF_COL_PAX_A_TIME].map(lambda x: (60.0*x.hour) + x.minute + (x.second/60.0))\n\n if type(Assignment.bump_wait_df) == pandas.DataFrame and len(Assignment.bump_wait_df) > 0:\n FastTripsLogger.debug(\"Bump_wait_df:\\n%s\" % Assignment.bump_wait_df.to_string(formatters=\\\n {Passenger.PF_COL_PAX_A_TIME :Util.datetime64_formatter}))\n\n ######################################################################################################\n FastTripsLogger.info(\" Step 7. Update dwell and travel times for transit vehicles\")\n # update the trip times -- accel/decel rates + stops affect travel times, and boards/alights affect dwell times\n veh_trips_df = Trip.update_trip_times(veh_trips_df, Assignment.MSA_RESULTS)\n\n ######################################################################################################\n if Assignment.OUTPUT_PATHSET_PER_SIM_ITER:\n FastTripsLogger.info(\" Step 8. Write pathsets (paths and links)\")\n Passenger.write_paths(output_dir, iteration, simulation_iteration, pathset_paths_df, False, Assignment.OUTPUT_PATHSET_PER_SIM_ITER)\n Passenger.write_paths(output_dir, iteration, simulation_iteration, pathset_links_df, True, Assignment.OUTPUT_PATHSET_PER_SIM_ITER)\n\n simulation_iteration += 1\n\n if num_chosen <= 0:\n FastTripsLogger.info(\" No more path choices to make => Ending simulation loop\")\n break\n\n if simulation_iteration > Assignment.MAX_SIMULATION_ITERS:\n FastTripsLogger.info(\" Maximum simulation iterations reached (%d) => Ending simulation loop\" % Assignment.MAX_SIMULATION_ITERS)\n break\n\n # Write the pathsets (if we haven't been already)\n if Assignment.OUTPUT_PATHSET_PER_SIM_ITER == False:\n Passenger.write_paths(output_dir, iteration, simulation_iteration, pathset_paths_df, False, Assignment.OUTPUT_PATHSET_PER_SIM_ITER)\n Passenger.write_paths(output_dir, iteration, simulation_iteration, pathset_links_df, True, Assignment.OUTPUT_PATHSET_PER_SIM_ITER)\n\n # write the final chosen paths for this iteration\n chosen_links_df = Passenger.get_chosen_links(pathset_links_df)\n chosen_links_df[\"iteration\"] = iteration\n Util.write_dataframe(chosen_links_df, \"chosen_links_df\", os.path.join(output_dir, \"chosenpaths_links.csv\"), append=(iteration>1))\n chosen_links_df.drop([\"iteration\"], axis=1, inplace=True)\n\n chosen_paths_df = Passenger.get_chosen_links(pathset_paths_df)\n chosen_paths_df[\"iteration\"] = iteration\n Util.write_dataframe(chosen_paths_df, \"chosen_paths_df\", os.path.join(output_dir, \"chosenpaths_paths.csv\"), append=(iteration>1))\n chosen_paths_df.drop([\"iteration\"], axis=1, inplace=True)\n\n return (num_passengers_arrived, pathset_paths_df, pathset_links_df, veh_trips_df)", "def random_walk_pol(G, ms, n_influencers, n_sim, n_walks):\n \n left_nodes = [node for node in ms if ms[node] == 0]\n right_nodes = [node for node in ms if ms[node] == 1]\n \n left_influencers, right_influencers = get_influencer_nodes(G, left_nodes, right_nodes, n_influencers)\n \n rwc_dist = []\n \n for _ in range(n_sim):\n \n left_left = 0\n right_left = 0\n left_right = 0\n right_right = 0\n \n for _ in range(n_walks):\n\n starting_side = random.choice([\"left\", \"right\"])\n\n if starting_side == \"left\":\n which_random_starting_node = random.choice(left_nodes)\n else:\n which_random_starting_node = random.choice(right_nodes)\n\n ending_side = perform_random_walk(G, left_influencers, right_influencers, which_random_starting_node)\n\n if (starting_side == \"left\") and (ending_side == \"left\"):\n left_left += 1\n\n elif (starting_side == \"right\") and (ending_side == \"left\"):\n right_left += 1\n\n elif (starting_side == \"left\") and (ending_side == \"right\"):\n left_right += 1\n\n elif (starting_side == \"right\") and (ending_side == \"right\"):\n right_right += 1\n\n else:\n print(\"Error!\")\n\n e1 = (left_left)/(left_left + right_left)\n e2 = (right_left)/(left_left + right_left)\n e3 = (left_right)/(right_right + left_right)\n e4 = (right_right)/(right_right + left_right)\n \n rwc = e1*e4 - e2*e3\n rwc_dist.append(rwc)\n \n rwc_ave = sum(rwc_dist)/len(rwc_dist) \n \n return rwc_ave", "def start():\n\n xy = np.loadtxt('data-03-diabetes.csv', delimiter=',', dtype=np.float32)\n x_data = xy[:, 0:-1]\n y_data = xy[:, [-1]]\n print \"X_data:%s\\nY_data:%s\" % (x_data, y_data)\n\n X = tf.placeholder(tf.float32, shape=[None, 8])\n Y = tf.placeholder(tf.float32, shape=[None, 1])\n\n W = tf.Variable(tf.random_normal([8, 1]), name='weight')\n b = tf.Variable(tf.random_normal([1]), name='bias')\n\n hypothesis = tf.sigmoid(tf.matmul(X, W) + b)\n\n cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y) * tf.log(1-hypothesis))\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n train = optimizer.minimize(cost)\n\n # if hy > 0.5 = 1 else 0\n predicted = tf.cast(hypothesis > 0.65, dtype=tf.float32)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # coord = tf.train.Coordinator()\n # threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n print \"start step\"\n\n for step in range(10001):\n # x_data, y_data = sess.run([train_x_batch, train_y_batch])\n feed = {X: x_data, Y: y_data}\n sess.run(train, feed_dict=feed)\n cost_val = sess.run(cost, feed_dict=feed)\n if step % 200 == 0:\n print \"step %s, cost:%s\" % \\\n (step, cost_val)\n\n # Accuracy report\n # x_data, y_data = sess.run([train_x_batch, train_y_batch])\n feed = {X: x_data, Y: y_data}\n h, c, a = sess.run([hypothesis, predicted, accuracy],\n feed_dict=feed)\n print \"hy:%s, correct:%s, accurcy:%s\" % (h, c, a)\n # coord.request_stop()\n # coord.join(threads)", "def main():\r\n\r\n mutation_range = 150\r\n parser = argparse.ArgumentParser(description=None)\r\n parser.add_argument('--input', type=str, dest=\"input\",\r\n required=True,\r\n help=\"Input audio .wav file(s) at 16KHz\")\r\n parser.add_argument('--target', type=str,\r\n required=True,\r\n help=\"Target transcription\")\r\n parser.add_argument('--out', type=str,\r\n required=True,\r\n help=\"Path for the adversarial example(s)\")\r\n parser.add_argument('--iterations', type=int,\r\n required=False, default=1000,\r\n help=\"Maximum number of iterations of gradient descent\")\r\n parser.add_argument('--population', type=int,\r\n required=False, default=100,\r\n help=\"Population size of each generation\")\r\n parser.add_argument('--model_path', type=str,\r\n required=True,\r\n help=\"Path to the DeepSpeech checkpoint (ending in model0.4.1)\")\r\n args = parser.parse_args()\r\n while len(sys.argv) > 1:\r\n sys.argv.pop()\r\n\r\n population_size = args.population\r\n\r\n with tf.Session() as sess:\r\n # finetune = []\r\n audios = []\r\n lengths = []\r\n\r\n # if args.out is None:\r\n # assert args.outprefix is not None\r\n # else:\r\n # assert args.outprefix is None\r\n # assert len(args.input) == len(args.out)\r\n # if args.finetune is not None and len(args.finetune):\r\n # assert len(args.input) == len(args.finetune)\r\n\r\n # Load the inputs that we're given\r\n \r\n fs, audio = wav.read(args.input)\r\n # print(\"Original Audio: \" + interpret_audio(audio, fs))\r\n assert fs == 16000\r\n assert audio.dtype == np.int16\r\n # print(audio)\r\n # print('source dB', 20 * np.log10(np.max(np.abs(audio))))\r\n audios.append(list(audio))\r\n lengths.append(len(audio))\r\n\r\n for i in range(population_size):\r\n wn = np.random.randint(-mutation_range, mutation_range, size=len(audio), dtype=np.int16)\r\n mutated_audio = audio + wn\r\n audios.append(list(mutated_audio))\r\n lengths.append(len(mutated_audio))\r\n\r\n # if args.finetune is not None:\r\n # finetune.append(list(wav.read(args.finetune[i])[1]))\r\n\r\n\r\n maxlen = max(map(len, audios))\r\n audios = np.array([x + [0] * (maxlen - len(x)) for x in audios])\r\n\r\n phrase = args.target\r\n # Set up the attack class and run it\r\n attack = Attack(sess, len(phrase), maxlen, batch_size=len(audios), model_path=args.model_path)\r\n \r\n \r\n\r\n optimal_cost, optimal_audio1, optimal_audio2 = attack.attack(audios, lengths, [[toks.index(x) for x in phrase]] * len(audios), True)\r\n crossover_population = int(0.2*population_size)\r\n mutation_population = population_size - (2 * crossover_population)\r\n\r\n for i in range(args.iterations):\r\n # Reset audios to only the generational best audio\r\n print_toggle = False\r\n if (i+1) % 10 == 0:\r\n print_toggle = True\r\n\r\n audios = [optimal_audio1]\r\n lengths = [len(optimal_audio1)]\r\n\r\n \r\n\r\n mutated_audios, mutated_lengths = mutate_audio(optimal_audio1, mutation_population, mutation_range)\r\n crossover_audios, crossover_lengths = crossover_audio(optimal_audio1, optimal_audio2, crossover_population)\r\n\r\n audios.extend(mutated_audios)\r\n audios.extend(crossover_audios)\r\n\r\n lengths.extend(mutated_lengths)\r\n lengths.extend(crossover_lengths)\r\n\r\n \r\n xcost, xaudio1, xaudio2 = attack.attack(audios, lengths, [[toks.index(x) for x in phrase]] * len(audios), print_toggle)\r\n\r\n if xcost < optimal_cost:\r\n optimal_cost = xcost\r\n optimal_audio1 = xaudio1\r\n optimal_audio2 = xaudio2\r\n \r\n print(\"iteration: \" + str(i+1) + \"\\t\" + \"Cost: \" + str(optimal_cost))\r\n\r\n wav.write(args.out, 16000, optimal_audio1)", "def eval_teams(sess, model):\n game_to_teams=load_obj(\"game_to_teams\")\n team_q_values={}\n game_number = 0\n global_counter = 0\n converge_flag = False\n\n # loading network\n saver = tf.train.Saver()\n merge = tf.summary.merge_all()\n\n sess.run(tf.global_variables_initializer())\n\n ## Preload and resume training\n if model_train_continue:\n checkpoint = tf.train.get_checkpoint_state(SAVED_NETWORK)\n if checkpoint and checkpoint.model_checkpoint_path:\n check_point_game_number = int((checkpoint.model_checkpoint_path.split(\"-\"))[-1])\n game_number_checkpoint = check_point_game_number % number_of_total_game\n game_number = check_point_game_number\n game_starting_point = 0\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n else:\n print(\"Could not find old network weights\")\n\n iteration_now=0\n ## Training loop\n iteration_now +=1\n \n num_teams=200\n ##Read in reward, state, and trace from files\n game_files = os.listdir(DATA_STORE)\n game_info_list=[]\n teams=[]\n for filename in game_files:\n game_info_list.append(np.load(\"./pickles/\"+filename[:],allow_pickle=True)) \n print(\"same Length?:\",len(game_info_list)==len(game_files))\n for game_number,game in enumerate(game_info_list[-num_teams:]):\n print(game_number)\n # try:\n home_team=game_to_teams[\"./pickles/\"+game_files[-num_teams+game_number][:-4]][0]\n away_team=game_to_teams[\"./pickles/\"+game_files[-num_teams+game_number][:-4]][1]\n if home_team not in team_q_values:\n team_q_values[home_team]={\"games\":0,\"possesions\":0,\"total_value\":0,\"movements\":0}\n if away_team not in team_q_values:\n team_q_values[away_team]={\"games\":0,\"possesions\":0,\"total_value\":0,\"movements\":0}\n team_q_values[home_team][\"games\"]+=1\n team_q_values[away_team][\"games\"]+=1\n for reward, episode, episode_length,event_type,final_tl,possession in game:\n # s_t0 = observations[train_number]\n team_q_values[home_team][\"possesions\"]+=1\n team_q_values[away_team][\"possesions\"]+=1\n possession_number=0\n s_t0 = episode[possession_number]\n possession_number+=1\n \n while possession_number<len(episode):\n # try:\n batch_return, possession_number, s_tl = get_nba_possessesion_batch(s_t0,episode,reward,possession_number,final_tl,1,event_type,BATCH_SIZE)\n\n # get the batch variables\n s_t0_batch = [d[0] for d in batch_return]\n s_t1_batch = [d[1] for d in batch_return]\n r_t_batch = [d[2] for d in batch_return]\n trace_t0_batch=[1 for i in s_t0_batch]\n trace_t1_batch=[1 for i in s_t1_batch]\n # trace_t0_batch = [d[3] for d in batch_return]\n # trace_t1_batch = [d[4] for d in batch_return]\n y_batch = []\n\n [outputs_t1, readout_t1_batch] = sess.run([model.outputs, model.read_out],\n feed_dict={model.trace_lengths: trace_t0_batch,\n model.rnn_input: s_t0_batch})\n home_values=0\n away_values=0\n movements=len(readout_t1_batch)\n for home,away in readout_t1_batch:\n home_values+=home\n away_values+=away\n\n team_q_values[home_team][\"total_value\"]+=home_values\n team_q_values[home_team][\"movements\"]+=movements\n\n team_q_values[away_team][\"total_value\"]+=away_values\n team_q_values[away_team][\"movements\"]+=movements\n # except:\n # print(\"errored\")\n return team_q_values", "def train_init():\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n name = str(seed)\n desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)\n desc = recursive_creator(desc, 0, 0, seed)\n hypers = {}\n for hyper in hyps:\n hypers[hyper] = np.random.choice(hyps[hyper])\n\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, lr=hypers[\"lr\"], opt=hypers[\"opt\"], random_seed=seed)\n if intelligent_training == 2:\n loss_weights = model.sequential_training(hypers[\"btch_sz\"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1)\n else:\n loss_weights = model.autoset_training(hypers[\"btch_sz\"], iter_lim//50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale)\n\n\n # ####### Save model characteristics.\n\n model.descriptor.save(path=\"\")\n model.save_weights(path=\"\")\n\n results = evaluate_model(model)\n\n np.save(\"hypers\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", hypers)\n\n np.save(\"orig_results\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", results)\n\n np.save(\"loss_weights\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", loss_weights)", "def test_treewidth_complete_graphs():\n\n def test_kn(size):\n \"\"\"Test on complete graphs.\"\"\"\n graph = Graph()\n for one in range(size):\n for two in range(one + 1, size):\n graph.add_edge(one, two)\n eq_(size-1, graph.approx_treewidth())\n for size in range(2, 6):\n test_kn(size)", "def beam_search_MAP(logits, beam_size=20, lp=50.0):\n inf = 1e10\n distribution = tf.nn.softmax(logits)\n B, T, V = distribution.shape\n aligns = tf.zeros([B * beam_size, 0], tf.int32)\n scores = tf.constant([0.0] + [-inf]*(beam_size-1), dtype=tf.float32) # [beam_size]\n scores = tf.tile(scores, multiples=[B]) # [B x beam_size]\n base_indices = tf.reshape(tf.tile(tf.range(B)[:, None], multiples=[1, beam_size]), [-1])\n preds_prev = -1 * tf.ones([B * beam_size, beam_size], tf.int32)\n lengths = tf.zeros([B * beam_size], tf.int32)\n # marks_token = tf.zeros([B * beam_size, 0], tf.int32)\n prev = time()\n for t in range(T):\n p_prior = tf.ones([B*beam_size, V]) / V\n p_past = tf.ones([B*beam_size, V]) / V\n p_cur = tf.reshape(tf.tile(distribution[:, t, :], [1, beam_size]), [B*beam_size, V])\n p_log = tf.math.log(p_past) + tf.math.log(p_cur) - tf.math.log(p_prior)\n\n scores_cur, preds_cur = tf.nn.top_k(p_log, k=beam_size, sorted=True)\n\n # current scores\n scores = scores[:, None] + scores_cur # [B x beam_size, beam_size]\n scores = tf.reshape(scores, [B, beam_size ** 2])\n\n # current predicts\n marks_cur = tf.cast(tf.not_equal(preds_cur, preds_prev), tf.int32)\n\n # length penalty\n lengths = lengths[:, None] + marks_cur\n lp_score = tf.reshape(tf.pow((5+tf.cast(lengths, tf.float32))/6, lp), [B, beam_size ** 2])\n # lp_score = 1.0\n scores /= lp_score\n\n # pruning\n _, k_indices = tf.nn.top_k(scores, k=beam_size)\n k_indices = base_indices * beam_size * beam_size + tf.reshape(k_indices, [-1]) # [B x beam_size]\n\n # # update marks_token\n # marks_cur = tf.reshape(marks_cur, [-1])\n # marks_cur = tf.gather(marks_cur, k_indices)\n # marks_token = tf.gather(marks_token, k_indices // beam_size)\n # marks_token = tf.concat([marks_token, marks_cur[:, None]], 1)\n\n # update lengths\n lengths = tf.reshape(lengths, [-1])\n lengths = tf.gather(lengths, k_indices)\n\n # print('lengths:', (lengths - tf.reduce_sum((marks_token), -1)).numpy())\n\n # Update scores\n scores = tf.reshape(scores, [-1])\n scores = tf.gather(scores, k_indices)\n\n # update preds\n preds_prev = preds_cur\n preds_cur = tf.reshape(preds_cur, [-1])\n preds_cur = tf.gather(preds_cur, k_indices)\n # k_indices: [0~B x beam_size x beam_size], preds: [0~B x beam_size]\n aligns = tf.gather(aligns, k_indices // beam_size)\n aligns = tf.concat([aligns, preds_cur[:, None]], -1)\n\n print(time() - prev, 's')\n prev = time()\n\n aligns = aligns[::beam_size, :]\n # marks_token = marks_token[::beam_size, :]\n # lengths = lengths[::beam_size]\n # max_len = tf.reduce_max(lengths)\n # predicts = []\n # for b in range(B):\n # predict = tf.reshape(tf.gather(aligns[b, :], tf.where(marks_token[b, :]>0)), [-1])\n # pad = tf.zeros([max_len - lengths[b]], tf.int32)\n # predicts.append(tf.concat([predict, pad], 0))\n # tf.stack(predicts, 0)\n\n return aligns", "def random_walk_with_restart(M, c, i):\n\n # Begin the RWR from node i (here the seed)\n walk = [i]\n\n # Repeat the loop while the function doesn't reached the return\n while True:\n # Select a random float between 0 and 1\n r = random.random()\n # If at least 1 walk from the seed\n if len(walk) > 1:\n # If r is <= than the probability to return to the seed then return to the seed and stop the RWR\n if r <= c:\n return walk\n\n # Store in a variable the line of the matrix corresponding to the node selected by RWR\n vector = M[i, :].tolist()[0]\n\n # Depending on transition probability from the actual node to another, do a multinomial experiment (Bernoulli\n # scheme with more than 2 results possible) and store the node selected\n # Choose a node randomly depending on probability RANDOM EXPERIMENT\n i = multinomial(1, vector, size=1).tolist()[0].index(1) # CHANGE SEED\n\n # Add the node selected by the multinomial experiment to the walk list\n walk.append(i)", "def __init__(self,\n minimal_random_walk_change_difference_value: float,\n damping_factor: float,\n max_iterations: int,\n verbose: bool = False):\n self.graph: nx.Graph = None\n\n self._hashtags_tf_idf_vectorizer: TfidfVectorizer = None\n self._hashtags_tf_idf_representation: np.ndarray = None\n\n self._hashtag_labels: Union[set, np.ndarray] = None\n self._users_labels: Union[set, np.ndarray] = None\n self._tweet_labels: Union[set, np.ndarray] = None\n\n self._transition_matrix: np.ndarray = None\n self._hashtag_encoder: ModifiedOneHotEncoder = ModifiedOneHotEncoder()\n\n self.minimal_random_walk_change_difference_value = minimal_random_walk_change_difference_value\n self.damping_factor = damping_factor\n self.max_iterations = max_iterations\n self.verbose = verbose", "def test_id(self):\n\n w, h = 11, 11\n method = \"minimax\"\n value_table = [[0] * w for _ in range(h)]\n value_table[3][0] = 1\n value_table[2][3] = 1\n value_table[4][4] = 2\n value_table[7][2] = 3\n eval_fn = EvalTable(value_table)\n\n depths = [\"7+\", \"6\", \"5\", \"4\", \"3\", \"2\", \"1\"]\n exact_counts = [((4, 4), set([(2, 3), (3, 0)])),\n ((16, 6), set([(2, 3), (3, 0)])),\n ((68, 20), set([(2, 3), (3, 2)])),\n ((310, 21), set([(2, 3), (3, 2)])),\n ((1582, 45), set([(3, 0), (3, 2)])),\n ((7534, 45), set([(3, 0), (3, 2)])),\n ((38366, 74), set([(0, 3), (2, 3), (3, 0), (3, 2)]))]\n\n time_limit = 3200\n while time_limit >= TIMER_MARGIN:\n agentUT, board = self.initAUT(-1, eval_fn, True, method, (1, 1), (0, 0), w, h)\n\n legal_moves = board.get_legal_moves()\n timer_start = curr_time_millis()\n time_left = lambda : time_limit - (curr_time_millis() - timer_start)\n move = agentUT.get_move(board, legal_moves, time_left)\n finish_time = time_left()\n\n self.assertTrue(len(board.visited) > 4, ID_FAIL)\n\n self.assertTrue(finish_time > 0,\n \"Your search failed iterative deepening due to timeout.\")\n\n # print time_limit, board.counts, move\n\n time_limit /= 2\n # Skip testing if the search exceeded 7 move horizon\n if (board.counts[0] > exact_counts[-1][0][0] or\n board.counts[1] > exact_counts[-1][0][1] or\n finish_time < 5):\n continue\n\n for idx, ((n, m), c) in enumerate(exact_counts[::-1]):\n if n > board.counts[0]:\n continue\n self.assertIn(move, c, ID_ERROR.format(depths[idx], 2 * time_limit, move, *board.counts))\n break", "def beam_search(decoding_function,\n initial_ids,\n initial_memories,\n int_dtype,\n float_dtype,\n translation_maxlen,\n batch_size,\n beam_size,\n vocab_size,\n eos_id,\n normalization_alpha):\n\n def _extend_hypotheses(current_time_step, alive_sequences, alive_log_probs, alive_memories):\n \"\"\" Generates top-k extensions of the alive beam candidates from the previous time-step, which are subsequently\n used to update the alive and finished sets at the current time-step; top-k = 2 s* beam_size \"\"\"\n # Get logits for the current prediction step\n next_ids = alive_sequences[:, :, -1] # [batch_size, beam_size]\n next_ids = tf.transpose(next_ids, [1, 0]) # [beam_size, batch_size]; transpose to match model\n next_ids = tf.reshape(next_ids, [-1, 1]) # [beam_size * batch_size, 1]\n\n step_logits, alive_memories = decoding_function(next_ids, current_time_step, alive_memories)\n step_logits = tf.reshape(step_logits, [beam_size, batch_size, -1]) # [beam_size, batch_size, num_words]\n step_logits = tf.transpose(step_logits, [1, 0, 2]) # [batch_size, beam_size, num_words]; transpose back\n\n # Calculate the scores for all possible extensions of alive hypotheses\n candidate_log_probs = tf.nn.log_softmax(step_logits, axis=-1)\n curr_log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)\n\n # Apply length normalization\n length_penalty = 1.\n if normalization_alpha > 0.:\n length_penalty = ((5. + tf.to_float(current_time_step)) ** normalization_alpha) / \\\n ((5. + 1.) ** normalization_alpha)\n curr_scores = curr_log_probs / length_penalty\n\n # Select top-k highest scores\n flat_curr_scores = tf.reshape(curr_scores, [batch_size, -1])\n top_scores, top_ids = tf.nn.top_k(flat_curr_scores, k=beam_size ** 2)\n\n # Recover non-normalized scores for tracking\n top_log_probs = top_scores * length_penalty\n\n # Determine the beam from which the top-scoring items originate and their identity (i.e. token-ID)\n top_beam_indices = top_ids // vocab_size\n top_ids %= vocab_size\n\n # Determine the location of top candidates\n batch_index_matrix = compute_batch_indices(batch_size, beam_size ** 2) # [batch_size, beam_size * factor]\n top_coordinates = tf.stack([batch_index_matrix, top_beam_indices], axis=2)\n\n # Extract top decoded sequences\n top_sequences = tf.gather_nd(alive_sequences, top_coordinates) # [batch_size, beam_size * factor, sent_len]\n top_sequences = tf.concat([top_sequences, tf.expand_dims(top_ids, axis=2)], axis=2)\n\n # Extract top memories\n top_memories = gather_memories(alive_memories, top_coordinates)\n # top_memories = alive_memories\n\n # Check how many of the top sequences have terminated\n top_eos_flags = tf.equal(top_ids, eos_id) # [batch_size, beam_size * factor]\n\n # Diversify beams at the outset of the generation\n init_top_sequences = tf.reshape(\n tf.reshape(top_sequences, [batch_size, beam_size, beam_size, -1])[:, :, 1, :], [batch_size, beam_size, -1])\n init_top_log_probs = \\\n tf.reshape(tf.reshape(top_log_probs, [batch_size, beam_size, beam_size])[:, :, 1], [batch_size, beam_size])\n init_top_scores = \\\n tf.reshape(tf.reshape(top_scores, [batch_size, beam_size, beam_size])[:, :, 1], [batch_size, beam_size])\n init_top_eos_flags = \\\n tf.reshape(tf.reshape(top_eos_flags, [batch_size, beam_size, beam_size])[:, :, 1], [batch_size, beam_size])\n\n top_sequences, top_log_probs, top_scores, top_eos_flags = \\\n tf.cond(tf.equal(current_time_step, 1),\n lambda: [init_top_sequences, init_top_log_probs, init_top_scores, init_top_eos_flags],\n lambda: [top_sequences, top_log_probs, top_scores, top_eos_flags])\n\n return top_sequences, top_log_probs, top_scores, top_eos_flags, top_memories\n\n def _update_alive(top_sequences, top_scores, top_log_probs, top_eos_flags, top_memories):\n \"\"\" Assembles an updated set of unfinished beam candidates from the set of top-k translation hypotheses\n generated at the current time-step; top-k for the incoming sequences in 2 * beam_size \"\"\"\n # Exclude completed sequences from the alive beam by setting their scores to a large negative value\n top_scores += tf.to_float(top_eos_flags) * (-1. * 1e7)\n # Update the alive beam\n updated_alive_sequences, updated_alive_log_probs, updated_alive_eos_flags, updated_alive_memories = \\\n gather_top_sequences(top_sequences,\n top_scores,\n top_log_probs,\n top_eos_flags,\n top_memories,\n beam_size,\n batch_size,\n 'alive')\n\n return updated_alive_sequences, updated_alive_log_probs, updated_alive_eos_flags, updated_alive_memories\n\n def _update_finished(finished_sequences, finished_scores, finished_eos_flags, top_sequences, top_scores,\n top_eos_flags):\n \"\"\" Updates the list of completed translation hypotheses (i.e. ones terminating in <EOS>) on the basis of the\n top-k hypotheses generated at the current time-step; top-k for the incoming sequences in 2 * beam_size \"\"\"\n # Match the length of the 'finished sequences' tensor with the length of the 'finished scores' tensor\n zero_padding = tf.zeros([batch_size, beam_size, 1], dtype=int_dtype)\n finished_sequences = tf.concat([finished_sequences, zero_padding], axis=2)\n # Exclude incomplete sequences from the finished beam by setting their scores to a large negative value\n top_scores += (1. - tf.to_float(top_eos_flags)) * (-1. * 1e7)\n # Combine sequences finished at previous time steps with the top sequences from current time step, as well as\n # their scores and eos-flags, for the selection of a new, most likely, set of finished sequences\n top_finished_sequences = tf.concat([finished_sequences, top_sequences], axis=1)\n top_finished_scores = tf.concat([finished_scores, top_scores], axis=1)\n top_finished_eos_flags = tf.concat([finished_eos_flags, top_eos_flags], axis=1)\n # Update the finished beam\n updated_finished_sequences, updated_finished_scores, updated_finished_eos_flags, _ = \\\n gather_top_sequences(top_finished_sequences,\n top_finished_scores,\n top_finished_scores,\n top_finished_eos_flags,\n None,\n beam_size,\n batch_size,\n 'finished')\n\n return updated_finished_sequences, updated_finished_scores, updated_finished_eos_flags\n\n def _decoding_step(current_time_step,\n alive_sequences,\n alive_log_probs,\n finished_sequences,\n finished_scores,\n finished_eos_flags,\n alive_memories):\n \"\"\" Defines a single step of greedy decoding. \"\"\"\n # 1. Get the top sequences/ scores/ flags for the current time step\n top_sequences, top_log_probs, top_scores, top_eos_flags, top_memories = \\\n _extend_hypotheses(current_time_step,\n alive_sequences,\n alive_log_probs,\n alive_memories)\n\n # 2. Update the alive beam\n alive_sequences, alive_log_probs, alive_eos_flags, alive_memories = \\\n _update_alive(top_sequences,\n top_scores,\n top_log_probs,\n top_eos_flags,\n top_memories)\n\n # 3. Update the finished beam\n finished_sequences, finished_scores, finished_eos_flags = \\\n _update_finished(finished_sequences,\n finished_scores,\n finished_eos_flags,\n top_sequences,\n top_scores,\n top_eos_flags)\n\n return current_time_step + 1, alive_sequences, alive_log_probs, finished_sequences, finished_scores, \\\n finished_eos_flags, alive_memories\n\n def _continue_decoding(curr_time_step,\n alive_sequences,\n alive_log_probs,\n finished_sequences,\n finished_scores,\n finished_eos_flags,\n alive_memories):\n \"\"\" Returns 'True' if all of the sequences in the extended hypotheses exceeded the maximum specified\n length or if none of the extended hypotheses are more likely than the lowest scoring finished hypothesis. \"\"\"\n # Check if the maximum prediction length has been reached\n length_criterion = tf.greater(curr_time_step, translation_maxlen)\n\n # Otherwise, check if the most likely alive hypothesis is less likely than the least probable completed sequence\n # Calculate the best possible score of the most probably sequence currently alive\n max_length_penalty = 1.\n if normalization_alpha > 0.:\n max_length_penalty = ((5. + tf.to_float(translation_maxlen)) ** normalization_alpha) / \\\n ((5. + 1.) ** normalization_alpha)\n\n highest_alive_score = alive_log_probs[:, 0] / max_length_penalty\n # Calculate the score of the least likely sequence currently finished\n lowest_finished_score = tf.reduce_min(finished_scores * tf.cast(finished_eos_flags, float_dtype), axis=1)\n # Account for the case in which none of the sequences in 'finished' have terminated so far;\n # In that case, each of the unfinished sequences is assigned a high negative probability, so that the\n # termination condition is not met\n mask_unfinished = (1. - tf.to_float(tf.reduce_any(finished_eos_flags, 1))) * (-1. * 1e7)\n lowest_finished_score += mask_unfinished\n\n # Check is the current highest alive score is lower than the current lowest finished score\n likelihood_criterion = tf.reduce_all(tf.greater(lowest_finished_score, highest_alive_score))\n\n # Decide whether to continue the decoding process\n return tf.logical_not(tf.logical_or(length_criterion, likelihood_criterion))\n\n # Initialize alive sequence and score trackers and expand to beam size\n alive_log_probs = tf.zeros([batch_size, beam_size])\n\n # Initialize decoded sequences\n alive_sequences = tf.expand_dims(batch_to_beam(initial_ids, beam_size), 2)\n\n # Initialize finished sequence, score, and flag trackers\n finished_sequences = tf.expand_dims(batch_to_beam(initial_ids, beam_size), 2)\n finished_scores = tf.ones([batch_size, beam_size]) * (-1. * 1e7) # initialize to a low value\n finished_eos_flags = tf.zeros([batch_size, beam_size], dtype=tf.bool)\n\n # Initialize memories\n alive_memories = initial_memories\n\n # Execute the auto-regressive decoding step via while loop\n _, alive_sequences, alive_log_probs, finished_sequences, finished_scores, finished_eos_flags, _ = \\\n tf.while_loop(\n _continue_decoding,\n _decoding_step,\n [tf.constant(1), alive_sequences, alive_log_probs, finished_sequences, finished_scores, finished_eos_flags,\n alive_memories],\n shape_invariants=[tf.TensorShape([]),\n tf.TensorShape([None, None, None]),\n alive_log_probs.get_shape(),\n tf.TensorShape([None, None, None]),\n finished_scores.get_shape(),\n finished_eos_flags.get_shape(),\n get_memory_invariants(alive_memories)],\n parallel_iterations=10,\n swap_memory=False,\n back_prop=False)\n\n alive_sequences.set_shape((None, beam_size, None))\n finished_sequences.set_shape((None, beam_size, None))\n\n # Account for the case in which a particular sequence never terminates in <EOS>;\n # in that case, copy the contents of the alive beam for that item into the finished beam (sequence + score)\n # tf.reduce_any(finished_eos_flags, 1) is False if there exists no completed translation hypothesis for a source\n # sentence in either of the beams , i.e. no replacement takes place if there is at least one finished translation\n finished_sequences = tf.where(tf.reduce_any(finished_eos_flags, 1), finished_sequences, alive_sequences)\n # Attention: alive_scores are not length normalized!\n finished_scores = tf.where(tf.reduce_any(finished_eos_flags, 1), finished_scores, alive_log_probs)\n # Truncate initial <GO> in finished sequences\n finished_sequences = finished_sequences[:, :, 1:]\n\n return finished_sequences, finished_scores", "def train_loop(args, train_dataset, dev_dataset, global_mean=0.0, test_dataset=None):\n mirrored_strategy = tf.distribute.MirroredStrategy()\n with mirrored_strategy.scope():\n # build model\n user_ids = keras.Input(shape=(), dtype=tf.int32, name=\"user_id\")\n movie_ids = keras.Input(shape=(), dtype=tf.int32, name=\"movie_id\")\n item_bin_ids = keras.Input(shape=(), dtype=tf.int32, name=\"item_time_bias\")\n user_time_dev = keras.Input(shape=(), dtype=tf.float32, name=\"user_time_dev\")\n batch_score = MF_Netflix(args.user_count, args.item_count, args.hidden_dim, global_mean)(\\\n [user_ids, movie_ids, item_bin_ids, user_time_dev])\n model = keras.Model(inputs={\"user_id\":user_ids, \"movie_id\":movie_ids, \\\n \"item_time_bias\": item_bin_ids, \"user_time_dev\": user_time_dev}, \\\n outputs=batch_score)\n # build the model train setting\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n args.learning_rate,\n decay_steps=20000,\n decay_rate=0.96,\n staircase=True)\n optimizer = keras.optimizers.Adam(args.learning_rate)\n #optimizer = keras.optimizers.RMSprop(args.learning_rate)\n #optimizer = keras.optimizers.SGD(args.learning_rate)\n loss = keras.losses.MeanSquaredError()\n metrics = [keras.metrics.MeanSquaredError()]\n model.compile(optimizer, loss=loss, metrics=metrics)\n # make the training loop and evaluation\n checkpoint_callback = keras.callbacks.ModelCheckpoint(\\\n filepath=args.model_path, save_best_only=True, save_weights_only=True)\n tensorbaord_callback = keras.callbacks.TensorBoard(log_dir=args.summary_dir, \\\n histogram_freq=1)\n steps_per_epoch = args.steps_per_epoch\n model.fit(train_dataset, epochs=args.epochs, \\\n callbacks=[checkpoint_callback, tensorbaord_callback], \\\n validation_data=dev_dataset, steps_per_epoch=steps_per_epoch, \\\n validation_steps=args.val_steps)", "def __init__(self, observation_size,\n num_actions,\n observation_to_actions,\n optimizer,\n session,\n random_action_probability=0.05,\n exploration_period=1000,\n store_every_nth=5,\n train_every_nth=5,\n minibatch_size=32,\n discount_rate=0.95,\n max_experience=30000,\n target_network_update_rate=0.01,\n clip_loss_function = False,\n clip_reward = False,\n replay_start_size= 1000,\n summary_writer=None,\n game_watcher=None,\n game_watcher_test=None,\n perfect_actions_known=False,\n target_network_update_discrete=False):\n # memorize arguments\n self.observation_size = observation_size\n self.num_actions = num_actions\n self.target_network_update_discrete = target_network_update_discrete # The original way target network is implemented is continously,\n # while the atari paper proposes a step update (every C step reset Q = Q_target)\n self.q_network = observation_to_actions\n self.optimizer = optimizer\n self.s = session\n self.perfect_actions_known = perfect_actions_known\n self.random_action_probability = random_action_probability\n self.exploration_period = exploration_period\n self.store_every_nth = store_every_nth\n self.train_every_nth = train_every_nth\n self.minibatch_size = minibatch_size\n self.discount_rate = tf.constant(discount_rate)\n self.max_experience = max_experience\n self.target_network_update_rate = \\\n tf.constant(target_network_update_rate)\n self.replay_start_size = replay_start_size\n\n # deepq state\n self.actions_executed_so_far = 0\n\n\n self.iteration = 0\n self.summary_writer = summary_writer\n\n self.number_of_times_store_called = 0\n self.number_of_times_train_called = 0\n self.clip_loss_function = clip_loss_function\n self.clip_reward = clip_reward\n self.collected_prediction_errors = []\n self.game_watcher = game_watcher\n self.game_watch_summaries = []\n self.game_watcher_test = game_watcher_test\n self.create_variables()\n\n self.initialize_experience_statistics()\n self.range_of_target_values = ()", "def __init__(self, tot_rows, tot_cols, depth, low=0.0, high=1.0, verbose=True):\n #-Global variables\n self.tot_rows = tot_rows\n self.tot_cols = tot_cols\n self.depth = depth\n #-Placeholders\n self.input_placeholder = tf.placeholder(tf.float32, shape=depth, name=\"input_placeholder\")\n self.learning_rate_placeholder = tf.placeholder(tf.float32, shape=None, name=\"learning_rate_placeholder\")\n self.radius_placeholder = tf.placeholder(tf.float32, shape=None, name=\"radius_placeholder\")\n #-Constants\n indices_matrix = np.zeros((tot_rows, tot_cols, 2))\n for row in range(tot_rows):\n for col in range(tot_cols):\n indices_matrix[row,col,:] = (row, col)\n #print indices_matrix\n #print \"Check....\"\n #a = np.array([1,2])\n #print(np.linalg.norm(a-indices_matrix, axis=2))\n #return\n grid_matrix = tf.constant(indices_matrix, shape=[tot_rows, tot_cols, 2])\n\n #-Variables and saver\n self.weight = tf.get_variable(\"weights\", [tot_rows, tot_cols, depth], initializer=tf.random_uniform_initializer(minval=low, maxval=high))\n self.tf_saver = tf.train.Saver({\"weights\": self.weight})\n #initializer=tf.random_normal_initializer(mean=0.0, stddev=1.0))\n #-Distance estimation\n difference = tf.subtract(self.input_placeholder, self.weight)\n self.distance_matrix = tf.norm(difference, ord='euclidean', axis=2, name=\"euclidean_matrix\")\n distance_matrix_flatten = tf.reshape(self.distance_matrix, [-1])\n self.softmax_distance_matrix = tf.reshape(tf.nn.softmax(tf.multiply(distance_matrix_flatten, -1.0)), shape=[tot_rows,tot_cols])\n\n #-Train operations\n #find the index of the best matching unit\n self.distance_argmin = tf.argmin(distance_matrix_flatten)\n self.distance_min = tf.gather(distance_matrix_flatten, self.distance_argmin)\n #generate a tensor with the best matching unit coord\n grid_matrix_flatten = tf.reshape(grid_matrix, [-1,2])\n bmu_coord = tf.gather(grid_matrix_flatten, self.distance_argmin, axis=0)\n #difference between the coord tensor and the constant grid matrix\n difference_neighborhood = tf.subtract(bmu_coord, grid_matrix)\n self.neighborhood_matrix = tf.norm(difference_neighborhood, ord='euclidean', axis=2, name=\"neighborhood_matrix\")\n #pass the neighborhood_matrix through a linear function\n #after this step the BMU having distance 0 has distance 1\n coefficient = tf.divide(-1.0, self.radius_placeholder)\n self.neighborhood_matrix = tf.add(tf.multiply(coefficient, tf.cast(self.neighborhood_matrix, tf.float32)), 1.0) #linearly scale the distance\n self.neighborhood_matrix = tf.clip_by_value(self.neighborhood_matrix, clip_value_min=0.0, clip_value_max=1.0)\n\n #-Evaluate the delta\n self.weighted_learning_rate_matrix = tf.multiply(self.learning_rate_placeholder, self.neighborhood_matrix)\n self.weighted_learning_rate_matrix = tf.expand_dims(self.weighted_learning_rate_matrix, axis=2)\n delta = tf.multiply(self.weighted_learning_rate_matrix, difference)\n self.train = self.weight.assign(tf.add(self.weight, delta))\n\n #-Error measures\n self.distance_mean = tf.reduce_mean(self.distance_matrix)\n weight_flatten = tf.reshape(self.weight, [-1, depth])\n self.bmu_array = tf.gather(weight_flatten, self.distance_argmin)\n #self.reconstruction_error = tf.norm(tf.subtract(self.input_placeholder, self.bmu_array), ord='euclidean')", "def test3(self):\n with self.test_session() as sess:\n table = np.array([[[0.0, 0.6, 0.4],\n [0.0, 0.4, 0.6],\n [0.0, 0.0, 1.0]]] * 3)\n\n for cell_transform in ['default', 'flatten', 'replicate']:\n cell = MarkovChainCell(table)\n initial_state = cell.zero_state(1, tf.int32)\n initial_input = initial_state[0]\n\n with tf.variable_scope('test3_{}'.format(cell_transform)) as scope:\n best_sparse, best_logprobs = beam_decoder(\n cell=cell,\n beam_size=7,\n stop_token=2,\n initial_state=initial_state,\n initial_input=initial_input,\n tokens_to_inputs_fn=lambda x:tf.expand_dims(x, -1),\n max_len=5,\n cell_transform=cell_transform,\n output_dense=False,\n scope=scope\n )\n\n tf.variables_initializer([cell.log_table_var]).run()\n\n with tf.variable_scope(scope, reuse=True) as varscope:\n best_sparse_2, best_logprobs_2 = beam_decoder(\n cell=cell,\n beam_size=7,\n stop_token=2,\n initial_state=initial_state,\n initial_input=initial_input,\n tokens_to_inputs_fn=lambda x:tf.expand_dims(x, -1),\n max_len=5,\n cell_transform=cell_transform,\n output_dense=False,\n scope=varscope\n )\n\n assert all(sess.run(tf.equal(best_sparse.values, best_sparse_2.values)))\n assert np.isclose(*sess.run((best_logprobs, best_logprobs_2)))", "def __init__(self, m, n, dim, num_iterations, eta=0.5, sigma=None):\n\n self._m = m\n self._n = n\n self._neighbourhood = []\n self._topography = []\n self._num_iterations = int(num_iterations)\n self._learned = False\n self.abnormal_dist = 0\n\n if sigma is None:\n sigma = max(m, n) / 2.0 # Constant radius\n else:\n sigma = float(sigma)\n\n # Inicializace grafu\n self._graph = tf.Graph()\n\n with self._graph.as_default():\n # vahy jednotlivych neuronu jsou nahodne nastavene -- matice m X n kde na kazde pozici je\n # 1-D pole velikosti dimenze vstup. dat\n self._W = tf.Variable(tf.random_normal([m * n, dim], seed=5))\n\n # rozlozeni gridu - pole m X n kde jsou pozice neuronu\n self._topography = tf.constant(np.array(list(self._neuron_location(m, n))))\n\n # Placeholder pro vstupni data\n self._X = tf.placeholder('float', [dim])\n\n # Placeholder pro pocet iteraci\n self._iter = tf.placeholder('float')\n\n # Vypocet BMU - spocita euklidovu vzdalenost mezi vstupnim vektorem a kazdym neuronem gridu (jeho vahou)\n # a vrati index index toho neuronu, ktery ma nejmensi vzdalenost\n d = tf.sqrt(tf.reduce_sum(tf.pow(self._W - tf.stack([self._X for i in range(m * n)]), 2), 1))\n self.WTU_idx = tf.argmin(d, 0)\n\n # vrati lokaci neuronu na zaklade jeho indexu\n slice_start = tf.pad(tf.reshape(self.WTU_idx, [1]), np.array([[0, 1]]))\n self.WTU_loc = tf.reshape(tf.slice(self._topography, slice_start, tf.constant(np.array([1, 2]))), [2])\n self.bd2 = self.WTU_loc\n\n # Zmena hodnot sigma a eta podle aktualni iterace\n learning_rate = 1 - self._iter / self._num_iterations\n _eta_new = eta * learning_rate\n _sigma_new = sigma * learning_rate\n\n # Neighbourhood funkce ktera generuje vektor s upravenou learning rate pro vsechny neurony na zaklade aktualni iterace a BMU\n distance_square = tf.reduce_sum(tf.pow(tf.subtract(self._topography, tf.stack([self.WTU_loc for i in range(m * n)])), 2), 1)\n neighbourhood_func = tf.exp(tf.negative(tf.div(tf.cast(distance_square, 'float32'), tf.pow(_sigma_new, 2))))\n\n # vynasobeni learning rate s fci sousedu\n # Operace je pak pouzita k aktualizaci vektoru vah jednotlivych neuronu na zaklade vstupu\n eta_into_gamma = tf.multiply(_eta_new, neighbourhood_func)\n\n # uprava vah na zaklade nove vypoctenych\n # nove vypoctene vahy musi byt upraveny na spravny shape\n weight_multiplier = tf.stack(\n [tf.tile(tf.slice(eta_into_gamma, np.array([i]), np.array([1])), [dim]) for i in range(m * n)])\n delta_W = tf.multiply(weight_multiplier, tf.subtract(tf.stack([self._X for i in range(m * n)]), self._W))\n new_W = self._W + delta_W\n self._training = tf.assign(self._W, new_W)\n\n # Inicializace vsech promennych\n init = tf.global_variables_initializer()\n self._sess = tf.Session()\n self._sess.run(init)\n self._saver = tf.train.Saver()", "def generate_parameters_random_walk():\r\n\r\n logging.info('Loading distances_nets from disk...')\r\n\r\n\r\n\r\n sum_weights = {}\r\n\r\n amount_edges = {}\r\n\r\n\r\n\r\n layer = 0\r\n\r\n while is_pickle('distances_nets_weights-layer-' + str(layer)):\r\n\r\n logging.info('Executing layer {}...'.format(layer))\r\n\r\n weights = restore_variable_from_disk('distances_nets_weights-layer-' + str(layer))\r\n\r\n\r\n\r\n for node, list_weights in weights.items():\r\n\r\n if layer not in sum_weights:\r\n\r\n sum_weights[layer] = 0\r\n\r\n if layer not in amount_edges:\r\n\r\n amount_edges[layer] = 0\r\n\r\n\r\n\r\n for w in list_weights:\r\n\r\n sum_weights[layer] += w\r\n\r\n amount_edges[layer] += 1\r\n\r\n\r\n\r\n logging.info('Layer {} executed.'.format(layer))\r\n\r\n layer += 1\r\n\r\n\r\n\r\n average_weight = {}\r\n\r\n for layer in sum_weights.keys():\r\n\r\n average_weight[layer] = sum_weights[layer] / amount_edges[layer]\r\n\r\n\r\n\r\n logging.info(\"Saving average_weights on disk...\")\r\n\r\n save_variable_on_disk(average_weight, 'average_weight')\r\n\r\n\r\n\r\n amount_neighbours = {}\r\n\r\n\r\n\r\n layer = 0\r\n\r\n while is_pickle('distances_nets_weights-layer-' + str(layer)):\r\n\r\n logging.info('Executing layer {}...'.format(layer))\r\n\r\n weights = restore_variable_from_disk('distances_nets_weights-layer-' + str(layer))\r\n\r\n\r\n\r\n amount_neighbours[layer] = {}\r\n\r\n\r\n\r\n for node, list_weights in weights.items():\r\n\r\n cont_neighbours = 0\r\n\r\n for w in list_weights:\r\n\r\n if w > average_weight[layer]:\r\n\r\n cont_neighbours += 1\r\n\r\n amount_neighbours[layer][node] = cont_neighbours\r\n\r\n\r\n\r\n logging.info('Layer {} executed.'.format(layer))\r\n\r\n layer += 1\r\n\r\n\r\n\r\n logging.info(\"Saving amount_neighbours on disk...\")\r\n\r\n save_variable_on_disk(amount_neighbours, 'amount_neighbours')", "def do_run(run, dirname, args):\n with tf.Graph().as_default():\n learner_assumptions = get_learner_assumption_kwargs(args)\n\n # Each run has a different random seed equal to the run id.\n np.random.seed(run)\n random.seed(run)\n\n is_gridworld = not 'lunar' in args.env_name.lower()\n\n # TODO: Reset test goal inside here? Or use environment instead?\n rollouts = [[]]\n # Initialize model with wrong transition model based on aristotle learner.\n rollouts[0] += make_rollouts(\n #policy=aristotle_pilot_policies[0], # Was from a noisy policy.\n policy=policies.make_perfect_pilot_policy(\n goal=test_goal,\n act_labels=train_act_labels,\n ),\n env=test_env,\n n=args.n_initial_rollouts,\n task_idx=task_idx,\n )\n assert(len(rollouts[0]) == args.n_initial_rollouts)\n rollouts[0] += make_rollouts(\n #policy=aristotle_pilot_policies[0], # Was from a noisy policy.\n policy=policies.make_perfect_pilot_policy(\n goal=test_goal,\n act_labels=train_act_labels,\n ),\n env=wrong_train_env,\n n=args.n_initial_wrong_rollouts,\n task_idx=task_idx,\n )\n\n model = None\n Q = None\n start_pos = None\n\n logs = []\n evals = []\n evals_unassisted = []\n learner_q_values = []\n with tf.Session() as sess:\n if needs_model:\n model = inverse_softq.InverseSoftQModel(\n train_envs=[test_env]\n )\n\n # NOTE: Used to be inside episode loop!\n # TODO: Check if this broke anything!\n support_env = get_support_env(\n s=args.learner_support,\n model=model,\n sess=sess,\n goal=test_goal,\n test_act_labels=test_act_labels,\n n_act_dim=n_act_dim,\n threshold=args.bumper_threshold,\n q_bumper_boltzmann=args.q_bumper_boltzmann,\n q_bumper_version=args.q_bumper_version,\n q_bumper_target_r=args.q_bumper_target_r,\n q_bumper_length_normalized=args.q_bumper_length_normalized,\n q_bumper_logistic_upper_prob=args.q_bumper_logistic_upper_prob,\n q_bumper_alpha=args.q_bumper_alpha,\n q_threshold=args.q_threshold,\n test_env=test_env,\n env_name=args.env_name,\n start_pos=start_pos,\n trajectory_distance=args.trajectory_distance,\n dirname=dirname,\n p_override=args.p_override,\n undoing=args.undoing,\n p_suboptimal_override=args.p_suboptimal_override,\n override_next_best=args.override_next_best,\n optimal_agent_training_timesteps=args.optimal_agent_training_timesteps,\n optimal_agent_smoothing_timesteps=args.optimal_agent_smoothing_timesteps,\n gamma=args.gamma,\n )\n policy = get_learner_policy(\n s=args.learner_policy,\n #model=model,\n #sess=sess,\n #test_goal=test_goal,\n #train_act_labels=train_act_labels,\n #test_act_labels=test_act_labels,\n #n_act_dim=n_act_dim,\n #Q=Q,\n env=support_env,\n exploration_fraction=args.exploration_fraction,\n exploration_final_eps=args.exploration_final_eps,\n exploration_final_lr=args.exploration_final_lr,\n total_episodes=args.n_episodes,\n run=run,\n )\n\n\n for ep in range(args.n_episodes):\n #print('Rn: {} Ep: {}'.format(run, ep), flush=True)\n support_env_with_monitor = Monitor(\n support_env,\n directory=os.path.join(\n dirname,\n 'assisted',\n str(run).zfill(3),\n str(ep).zfill(3),\n ),\n force=True,\n video_callable=lambda e: True if is_gridworld or utils.IS_LOCAL else False,\n #video_callable=(lambda e: True) if is_gridworld else None,\n )\n # Simulate human learning\n \"\"\"\n if args.learner_policy == 'q':\n assert(args.n_learn_rollouts > 0)\n Q = policies.q_learning(\n rollouts if ep == 0 else [rollouts[0][-args.n_learn_rollouts:]],\n n_obs_dim=n_obs_dim,\n n_act_dim=n_act_dim,\n user_action=args.think_all_actions_own,\n Q_init=Q,\n learning_rate=args.q_learning_rate,\n )\n \"\"\"\n\n _logs = None\n if needs_model:\n _logs = inverse_softq.run_learning(\n model=model,\n sess=sess,\n # train_tasks=train_aristotle_envs[:1],\n rollouts=rollouts,\n test_goal=test_goal,\n test_act_labels=test_act_labels,\n train_act_labels=train_act_labels,\n n_iters=args.n_softq_train_iters,\n train_frac=0.9, # TODO: Change to 1\n **learner_assumptions\n )\n\n # Test\n #episode_seed = [run, ep]\n\n perf = compute_assisted_perf(\n model=model,\n sess=sess,\n #test_act_labels=test_act_labels,\n #train_act_labels=train_act_labels,\n test_env=support_env_with_monitor,\n policy=policy,\n goal=test_goal,\n #seed=episode_seed,\n n_eval_rollouts=args.n_eval_rollouts,\n policy_explore=True,\n policy_update=True,\n **learner_assumptions\n )\n\n unassisted_perf = None\n if args.n_eval_unassisted_rollouts is not None:\n unassisted_support_env = get_support_env(\n s='unassisted',\n goal=test_goal,\n test_act_labels=test_act_labels,\n n_act_dim=n_act_dim,\n test_env=test_env,\n env_name=args.env_name,\n start_pos=start_pos,\n trajectory_distance=args.trajectory_distance,\n dirname=dirname,\n )\n unassisted_support_env_with_monitor = Monitor(\n unassisted_support_env,\n directory=os.path.join(\n dirname,\n 'unassisted',\n str(run).zfill(3),\n str(ep).zfill(3),\n ),\n force=True,\n video_callable=lambda e: True if is_gridworld or utils.IS_LOCAL else False,\n #video_callable=(lambda e: True) if is_gridworld else None,\n )\n unassisted_perf = compute_assisted_perf(\n model=model,\n sess=sess,\n #test_act_labels=test_act_labels,\n #train_act_labels=train_act_labels,\n test_env=unassisted_support_env_with_monitor,\n policy=policy,\n goal=test_goal,\n #seed=episode_seed,\n n_eval_rollouts=args.n_eval_unassisted_rollouts,\n policy_explore=False,\n policy_update=False,\n )\n unassisted_support_env_with_monitor.close()\n unassisted_support_env.close()\n\n new_rollouts = perf['rollouts']\n rollouts[task_idx] += new_rollouts[:args.n_learn_rollouts]\n if _logs is not None:\n logs.append(_logs)\n evals.append(perf)\n evals_unassisted.append(unassisted_perf)\n if args.learner_policy == 'q':\n learner_q_values.append(copy(policy.Q))\n\n support_env_with_monitor.close()\n\n support_env.close()\n policy.close()\n\n out_d = {\n 'logs': logs,\n 'evals': evals,\n 'evals_unassisted': (\n evals_unassisted\n if args.n_eval_unassisted_rollouts is not None\n else None\n ),\n 'q_values': learner_q_values,\n 'args': vars(args),\n 'run': run,\n 'support_details': support_env.get_support_details(),\n }\n with open(\n os.path.join(dirname, 'data{}.json'.format(str(run).zfill(3))),\n 'w',\n ) as f:\n json.dump(out_d, f, cls=NumpyEncoder)", "def simulate_walks(self, edge_type, num_walks, walk_length, schema=None):\n walks = []\n nodes = list(range(0, self.graph[edge_type].num_nodes))\n\n for walk_iter in tqdm.tqdm(range(num_walks)):\n random.shuffle(nodes)\n for node in nodes:\n walk = self.graph[edge_type].random_walk(\n [node], max_depth=walk_length - 1)\n for i in range(len(walk)):\n walks.append(walk[i])\n\n return walks", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def stochastic_fit(self):\r\n self.init_plot(self.FEATURES)\r\n\r\n # YOUR CODE HERE\r\n\r\n while self.training_iteration == 0 or self.training_iteration < self.MAX_ITERATIONS*self.DATAPOINTS:\r\n print('Iteration: ', self.training_iteration)\r\n datapoint = np.random.randint(0, self.DATAPOINTS)\r\n\r\n self.compute_gradient(datapoint)\r\n self.upd_theta()\r\n\r\n # plot every 100th iteration\r\n if not self.training_iteration % 100:\r\n self.update_plot(np.sum(np.square(self.gradient)))\r\n\r\n self.training_iteration += 1", "def __call__(self, params):\n logging.info('Running __call__ function...')\n batch_size = self._train_batch_size\n # For MCTS, the number of features for each trajecotry is unknown beforehand\n num_features = None\n\n if self._global_step_value % self._iterations_per_loop == 0:\n logging.info('Update iterator (gs=%d)...', self._global_step_value)\n # Feature/Labels Placeholders\n self.features_ph = {\n 'mcts_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='mcts_state_ph'),\n 'policy_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='policy_state_ph'),\n }\n self.labels_ph = {\n 'action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='action_ph'),\n 'value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='value_ph'),\n 'return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='return_ph'),\n 'old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='old_neg'),\n 'mean_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='mean_ph'),\n 'logstd_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='logstd_ph'),\n 'mcts_enable_tensor':\n tf.placeholder(\n tf.bool, shape=[num_features], name='mcts_enable_ph'),\n 'policy_action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='policy_action_ph'),\n 'policy_value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_value_ph'),\n 'policy_return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_return_ph'),\n 'policy_old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_old_neg'),\n }\n # Create the dataset\n dataset = tf.data.Dataset.from_tensor_slices(\n (self.features_ph, self.labels_ph))\n dataset = dataset.shuffle(buffer_size=self._max_horizon)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n # repeat until the loop is done\n dataset = dataset.repeat()\n if self._use_tpu:\n dataset = dataset.map(functools.partial(self._set_shapes, batch_size))\n dataset = dataset.prefetch(2)\n self._iterator = dataset.make_initializable_iterator()\n return self._iterator.get_next()\n else:\n return self._iterator.get_next()", "def train(self, iterations: int):\n\n s = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} epsilon {:1.3f} {}\"\n s_check = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} epsilon {:1.3f} saved {} \"\n total_steps = 0\n iter_metrics = []\n for n in range(iterations):\n r_min, r_mean, r_max, iter_steps = self.train_iter()\n iter_metrics.append((r_min, r_mean, r_max))\n total_steps += iter_steps\n\n if n == int(iterations / 2):\n self.steps_to_update_target_model = int(self.steps_to_update_target_model / 2)\n\n # checkpointing & logging\n s_print = s\n file_name = \"\"\n if n % self.checkpoint_freq == 0:\n file_name = f'my_dqn_{n}.pth'\n torch.save(self.target_dqn.state_dict(), os.path.join(self.checkpoint_path, file_name))\n s_print = s_check\n\n if self.verbose:\n print(s_print.format(\n n + 1,\n r_min,\n r_mean,\n r_max,\n total_steps,\n self.e_greedy,\n file_name\n ))\n iter_min = np.mean([x[0] for x in iter_metrics])\n iter_mean = np.mean([x[1] for x in iter_metrics])\n iter_max = np.mean([x[2] for x in iter_metrics])\n return iter_min, iter_mean, iter_max", "def train(self, rng):\n # pylint: disable=possibly-unused-variable\n rng, init_key = jax.random.split(rng)\n params = self.model.init(init_key, jnp.zeros([self.model.S_dim]))\n opt_state = self.tx.init(params)\n start_time = time.time()\n\n count_since_reset = 0\n all_metrics = []\n try:\n i = 0\n while i < self.num_steps:\n rng, key = jax.random.split(rng)\n # Pass the inputs in and take a gradient step.\n opt_state, params, metrics, grads, bad = self.opt_step(\n opt_state, params, key)\n all_metrics.append(jax.tree_map(np.array, metrics))\n if bad:\n finished_reason = \"nan\"\n return types.SimpleNamespace(**locals())\n\n count_since_reset += 1\n if i % self.print_every == 0 or np.remainder(np.log2(i), 1) == 0:\n now = time.time()\n rate = count_since_reset / (now - start_time)\n start_time = now\n count_since_reset = 0\n print(f\"{i} [{rate}/s]:\", jax.tree_map(float, metrics))\n sys.stdout.flush()\n time.sleep(0.02)\n i += 1\n\n except KeyboardInterrupt:\n finished_reason = \"interrupt\"\n return types.SimpleNamespace(**locals())\n\n finished_reason = \"done\"\n (opt_state, params) = jax.tree_map(np.array, (opt_state, params))\n return types.SimpleNamespace(**locals())\n # pylint: enable=possibly-unused-variable", "def train():\n print(\"Building dynamic character-level ALLDATASET data...\", flush=True)\n dataset = ALLDATASET(\n train_input=FLAGS.train_input, train_output=FLAGS.train_output,\n dev_input=FLAGS.dev_input, dev_output=FLAGS.dev_output,\n predict_input_file=FLAGS.predict_input_file, \n parse_repeated=FLAGS.parse_repeated,\n shuffle=True, max_input_length=FLAGS.max_sentence_length,\n max_label_length=FLAGS.max_sentence_length)\n \n print(\"Building computational graph...\", flush=True)\n graph = tf.Graph()\n \n with graph.as_default():\n\n tf.set_random_seed(1)\n random.seed(1)\n np.random.seed(1)\n\n # During training we use beam width 1. There are lots of complications on\n # the implementation, e.g. only tiling during inference.\n m = Seq2Seq(\n num_types=dataset.num_types(),\n max_encoder_length=FLAGS.max_sentence_length,\n max_decoder_length=FLAGS.max_sentence_length,\n pad_id=dataset.type_to_ix['_PAD'],\n eos_id=dataset.type_to_ix['_EOS'],\n go_id=dataset.type_to_ix['_GO'],\n space_id=dataset.type_to_ix[(' ',)],\n ix_to_type=dataset.ix_to_type,\n batch_size=FLAGS.batch_size, embedding_size=FLAGS.embedding_size,\n hidden_size=FLAGS.hidden_size, rnn_layers=FLAGS.rnn_layers,\n bidirectional_encoder=FLAGS.bidirectional_encoder,\n bidirectional_mode=FLAGS.bidirectional_mode,\n use_lstm=FLAGS.use_lstm, attention=FLAGS.attention, \n dropout=FLAGS.dropout, max_grad_norm=FLAGS.max_grad_norm, beam_size=1,\n epsilon=FLAGS.epsilon, beta1=FLAGS.beta1, beta2=FLAGS.beta2,\n restore=FLAGS.restore, model_output_dir=FLAGS.model_output_dir)\n \n # Allow TensorFlow to resort back to CPU when we try to set an operation to\n # a GPU where there's only a CPU implementation, rather than crashing.\n sess_config = tf.ConfigProto(allow_soft_placement=True)\n \n with tf.Session(graph=graph, config=sess_config) as sess:\n print(\"Initializing or restoring model...\", flush=True)\n m.start()\n \n # If the model was not restored, initialize the variable hyperparameters.\n if sess.run(m.lr) == 0:\n sess.run(tf.assign(m.lr, FLAGS.lr))\n if sess.run(m.p_sample) == 0:\n sess.run(tf.assign(m.p_sample, FLAGS.initial_p_sample))\n \n # Get the number of epochs that have passed (easier by getting batches now)\n step = m.global_step.eval()\n batches = dataset.get_train_batches(m.batch_size)\n epoch = step // len(batches)\n \n # Scheduled sampling decay\n i = FLAGS.initial_p_sample\n f = FLAGS.final_p_sample\n # The stopping point is based on the max epochs\n total_train_steps = len(batches) * FLAGS.epochs_p_sample\n if i != f and not FLAGS.linear_p_sample:\n k = total_train_steps / (float(lambertw(total_train_steps / 2)) * 2)\n expk = float(exp(-total_train_steps / k))\n delta_f = (f - i) * (1 + k) * (1 + k * expk) / (k - k * expk) - f\n delta_i = (f + delta_f) / (1 + k)\n \n while not FLAGS.max_epochs or epoch <= FLAGS.max_epochs:\n print(\"=====EPOCH {}=====\".format(epoch), flush=True)\n while step < (epoch + 1) * len(batches):\n step = m.global_step.eval()\n \n # Scheduled sampling decay\n if i != f:\n # Linear decay\n if FLAGS.linear_p_sample:\n p = min(f, i + step * (f - i) / total_train_steps)\n # Inverse sigmoid decay\n else:\n expk = float(exp(-step / k))\n p = min(f, i - delta_i + (f + delta_f) / (1 + k * expk))\n \n sess.run(tf.assign(m.p_sample, p))\n \n # Gradient descent and backprop\n train_inputs, train_labels = zip(*batches[step % len(batches)])\n train_fd = {m.inputs: train_inputs, m.labels: train_labels}\n \n # Wrap into function to measure running time\n def train_step():\n sess.run(m.train_step, feed_dict=train_fd)\n \n print(\"Global step {0} ({1}s)\".format(\n step, timeit.timeit(train_step, number=1)), flush=True)\n \n if step % FLAGS.num_steps_per_eval == 0:\n valid_inputs, valid_labels = dataset.get_valid_batch(m.batch_size)\n valid_fd = {m.inputs: valid_inputs, m.labels: valid_labels}\n \n # Run training and validation perplexity and samples\n \n lr, train_ppx, train_output, p_sample, train_ppx_summ = sess.run([\n m.lr,\n m.perplexity,\n m.output,\n m.p_sample,\n m.perplexity_summary,\n ], feed_dict=train_fd)\n \n valid_ppx, valid_output, infer_output, valid_ppx_summ = sess.run([\n m.perplexity,\n m.output,\n m.generative_output,\n m.perplexity_summary,\n ], feed_dict=valid_fd)\n \n # Convert data to UTF-8 strings for evaluation and display\n valid_inputs = untokenize_batch(dataset, valid_inputs)\n valid_labels = untokenize_batch(dataset, valid_labels)\n valid_output = untokenize_batch(dataset, valid_output)\n infer_output = untokenize_batch(dataset, infer_output)\n \n # Run evaluation metrics\n lev = levenshtein(infer_output, valid_labels)\n lev_density = levenshtein(infer_output, valid_labels, normalize=True)\n \n lev_summ = sess.run(\n m.lev_summary, feed_dict={m.lev: lev})\n lev_density_summ = sess.run(\n m.lev_density_summary, feed_dict={m.lev_density: lev_density})\n \n # Write summaries to TensorBoard\n m.train_writer.add_summary(train_ppx_summ, global_step=step)\n m.valid_writer.add_summary(valid_ppx_summ, global_step=step)\n m.valid_writer.add_summary(lev_summ, global_step=step)\n m.valid_writer.add_summary(lev_density_summ, global_step=step)\n \n # Display results to stdout\n print(\" lr:\", lr)\n print(\" p_sample:\", p_sample)\n print(\" train_ppx:\", train_ppx)\n print(\" valid_ppx:\", valid_ppx)\n print(\" lev:\", lev)\n print(\" lev_density:\", lev_density)\n print(\"Input:\")\n print(valid_inputs[0])\n print(\"Target:\")\n print(valid_labels[0])\n print(\"Output with ground truth:\")\n print(valid_output[0])\n print(\"Greedily decoded output:\")\n print(infer_output[0], flush=True)\n \n # Epoch about to be done - save, reshuffle the data and get new batches\n print(\"Saving model...\")\n m.save()\n print(\"Model saved. Resuming training...\", flush=True)\n batches = dataset.get_train_batches(m.batch_size)\n epoch += 1", "def train_word_generator(training, validation, words, word_to_id,\n init_scale=0.1, learning_rate=1.0, decay_after=4,\n learning_rate_decay=0.5, keep_prob=0.95,\n num_epochs=10, batch_size=20,\n num_layers=2, num_steps = 20,\n hidden_size=200, vocab_size = 3000, max_grad_norm = 5,\n optimization='gd',\n name='model'):\n # reshape training and validation according to batch_size\n training = np.array(training[:(\n len(training) // batch_size) * batch_size]).reshape(batch_size, -1)\n validation = np.array(validation).reshape(1, -1)\n\n # save words and word_to_id to be used in prediction\n with open(words_path, 'wb') as f:\n pickle.dump(words, f)\n with open(word_to_id_path, 'wb') as f:\n pickle.dump(word_to_id, f)\n\n param = {\n 'max_grad_norm': max_grad_norm,\n 'num_layers': num_layers,\n 'hidden_size': hidden_size,\n 'vocab_size': vocab_size,\n }\n\n # initialize graph\n initializer = tf.random_uniform_initializer(-init_scale, init_scale)\n\n with tf.name_scope(\"train\"):\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n m = lstm_model(is_training=True, num_steps=num_steps,\n optimization=optimization,\n batch_size=batch_size, keep_prob=keep_prob, **param)\n\n with tf.name_scope(\"valid\"):\n with tf.variable_scope(\"model\", reuse=True, initializer=initializer):\n mvalid = lstm_model(\n batch_size=1, num_steps=1, is_training=False, **param)\n\n train_writer = tf.summary.FileWriter('./summary/%s/train' % name)\n valid_writer = tf.summary.FileWriter('./summary/%s/valid' % name)\n\n saver = tf.train.Saver()\n\n # use just one thread\n session_conf = tf.ConfigProto(\n intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n with tf.Session(config=session_conf) as session:\n session.run(tf.global_variables_initializer())\n for i in range(num_epochs):\n\n # update learning rate\n lr_decay = learning_rate_decay ** max(i +\n 1 - decay_after, 0.0)\n session.run(m['lr_update'], feed_dict={\n m['new_lr']: learning_rate * lr_decay})\n new_lr = session.run(m['lr'])\n print(\"Epoch: %d Learning rate: %.3f\" %\n (i + 1, new_lr))\n\n train_perplexity = run_epoch(training, session, m, train_op=m['train_op'],\n verbose=True)\n print(\"Epoch: %d Train Perplexity: %.3f\" %\n (i + 1, train_perplexity))\n epoch_perplexity_train = tf.Summary(value=[\n tf.Summary.Value(tag=\"perplexity\", simple_value=train_perplexity),\n ])\n lr_summary = tf.Summary(value=[\n tf.Summary.Value(tag=\"learning_rate\", simple_value=new_lr),\n ])\n train_writer.add_summary(epoch_perplexity_train, i + 1)\n train_writer.add_summary(lr_summary, i + 1)\n train_writer.flush()\n\n valid_perplexity = run_epoch(validation, session, mvalid)\n print(\"Epoch: %d Valid Perplexity: %.3f\" %\n (i + 1, valid_perplexity))\n epoch_perplexity_valid = tf.Summary(value=[\n tf.Summary.Value(tag=\"perplexity\", simple_value=valid_perplexity),\n ])\n valid_writer.add_summary(epoch_perplexity_valid, i + 1)\n valid_writer.flush()\n\n if i % 5 == 0:\n # save session every 5 epochs\n saver.save(session, \"%s/%s/model\" % (model_path, name), i)\n\n saver.save(session, \"%s/%s/model\" % (model_path, name))", "def run_RL_sync(mapname,n_trials = int, seed = int,alpha = 0.15, beta = 0.2, tau = 5, gamma = 0.9, max_steps = 1000, reward_size = 100):\n\n # Softmax can't be from external file, because multiprocessing messes up the seed values\n np.random.seed(seed)\n def softmax_action(action_weights = [], tau = int):\n action_indices = list(range(len(action_weights)))\n f = np.exp((action_weights - np.max(action_weights))/tau) # shift values\n action_prob = f / f.sum(axis=0)\n action_index = np.random.choice(action_indices, 1, p=action_prob)\n return action_index[0]\n\n srate = 500 #sample rate\n \n total_time = int(1.5*srate) #total timesteps or \"time the agent gets to think about moving\"\n\n time0 = time.perf_counter()\n\n print(\"Running the RL model but with sync !\")\n srate = 500 #sample rate\n \n total_time = int(1.5*srate) #total timesteps or \"time the agent gets to think about moving\"\n\n # Learning Parameters\n parameters = {\"alpha\": alpha\n ,\"beta\": beta\n ,\"gamma\": gamma\n ,\"tau\": tau}\n n_steps = max_steps\n n_trials = n_trials\n \n sub_reward_size = 0 # no subgoals!\n # # # # # # # # # # # # # #\n # # Setting up the map # #\n # # # # # # # # # # # # # #\n \"\"\" The agent begins in a walled grid and has to find \n the goal to obtain a reward.\"\"\"\n # Grid #\n states = create_grid_from_file(map_file=mapname,goal_location = [10,3],reward_size=reward_size,sub_reward_size=sub_reward_size)\n state_set = list(range(int(states.shape[0]*states.shape[1]))) #index of states\n\n #set of actions\n move_name=[\"UP\", \"R-UP\", \"RIGHT\",\"R-DOWN\",\"DOWN\",\"L-DOWN\", \"LEFT\" ,\"LEFT-UP\"] \n moves = [[-1, 0],[-1, 1], [0, 1], [1, 1], [1, 0],[1, -1], [0, -1], [-1, -1]]\n action_set = list(range(len(moves))) #index list\n\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # Setting up the synchronization modules # #\n # # # # # # # # # # # # # # # # # # # # # # # #\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Processing module\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # Initial variables #\n\n r2_max = 1 #maximum amplitude of nodes\n drift = .8 #rate of drift between coupling parameters\n\n cg_1 = (30/srate)*np.pi #gamma band coupling parameter for input information\n cg_2 = cg_1 + (drift/srate)*2*np.pi #gamma band coupling parameter for actions\n \n damp = 0.3 #damping parameter\n decay = 0.9 #decay parameter\n noise = 0.5 #noise parameter\n\n # Initial matrices #\n\n n_states = len(state_set)\n n_actions= len(action_set)\n\n #Setting up phase code neurons across entire task\n S_Phase = np.zeros((2,states.shape[0],states.shape[1],total_time)) #State phase code units\n A_Phase = np.zeros((2,n_actions,total_time)) #Action phase code units\n\n #Setting up rate code neurons across entire task\n S_Rate = np.zeros((states.shape[0],states.shape[1],total_time)) #State rate code units\n A_Rate = np.zeros((n_actions,total_time)) #Action rate code units\n #State-Action Weight Matrix\n W = np.zeros((states.shape[0],states.shape[1],n_actions))#*0.001 #initial state-action weights\n V = np.zeros((states.shape[0],states.shape[1]))#*0.001 #initial state weights\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Control module\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # MFC #\n # Initial variables \n r2_MFC = 0.7 #maximum amplitude MFC node\n damp_MFC = 0.03 # damping parameter MFC\n acc_slope = 10 # MFC slope parameter ---> steepness of burst probability distribution\n ct = (5/srate)*2*np.pi #theta band coupling parameter for MFC\n\n #Setting up phase code nodes for the MFC\n MFC = np.zeros((2,total_time))\n #Setting up phase code neuron for MFC -> Bernoulli rate code\n Be = 0 \n \"\"\"When the be value as the rate code of MFC\n reaches certain threshold the MFC will send a burst to coupled neurons\"\"\"\n\n # LFC #\n #Module indicating which states should be initiate action-state synchronization\n LFC = np.zeros((states.shape[0],states.shape[1],n_steps))\n\n #Module that gives the right indices to synchronize\n LFC_sync = 0\n\n\n\n # # # # # # # # # # # # # #\n # # Simulation # #\n # # # # # # # # # # # # # #\n\n # Logging dependent variables\n Hit = np.zeros((total_time,n_steps,n_trials)) #log when there is a burst from the MFC\n # Goal_reach = np.zeros((n_steps,n_trials)) #record if goal is reached \n # Move = np.zeros((n_steps,n_trials)) #record move\n # Bernoulli = np.zeros((total_time,n_steps,n_trials)) #Logging the bernoulli process variables (should be in between -.8 and .8)\n # pred_err = np.zeros((states.shape[0],states.shape[1],n_steps,n_trials)) #logging the prediction error\n trial_length = np.zeros((n_trials))\n\n # Recording sync\n sync = np.zeros((n_states,n_actions,n_steps,n_trials)) \n\n \"\"\" L O O P \"\"\"\n\n exploration = 0\n exploration_intent =0\n sync_fail=0\n greedy=0\n for trial in range(n_trials):\n \"\"\"A trial is considered as each journey the actor makes until the goal\n or until it runs out of steps\"\"\"\n at_goal = False\n start_loc = [1,int(states.shape[1]-2)] #start in the top left\n step = 0 \n S_Phase[:,:,:,0] = (2*np.random.random_sample((2,states.shape[0],states.shape[1])))-1 # random starting points processing module\n A_Phase[:,:,0] = (2*np.random.random_sample((2,n_actions)))-1 # idem\n while not at_goal:\n #starting location at first trial\n if step == 0:\n current_loc = start_loc\n else:\n S_Phase[:,:,:,0] = S_Phase[:,:,:,total_time-1] # random starting points processing module\n A_Phase[:,:,0] = A_Phase[:,:,total_time-1] # idem\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Synchronization\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \n #phase reset\n MFC[:,0]=np.ones((2))*r2_MFC \n\n\n # LFC setting instruction per step: each state is an input\n LFC[current_loc[0],current_loc[1],step] = 1\n\n # What we want is the lfc to indicate the state and then have the LFC sync pro actively select an action based on state action value maps\n \n action_to_sync = softmax_action(action_weights=W[current_loc[0],current_loc[1],:],tau=10)\n if action_to_sync in np.where(W[current_loc[0],current_loc[1],:]== max(W[current_loc[0],current_loc[1],:]))[0]:\n greedy+=0\n else:\n exploration_intent+=1\n \n \n #Which action does LFC sync to current state\n LFC_sync = int(action_to_sync)\n LFC_desync = list(range(len(moves)))\n LFC_desync.pop(LFC_sync) \n\n # The actor makes the move #\n for t in range(total_time-1):\n\n \n #Update phase code neurons for actions and states in processing module\n #State phase code neurons \n S_Phase[:,:,:,t+1] = update_phase(nodes=S_Phase[:,:,:,t], grid = True, radius=r2_max, damp = damp, coupling = cg_1,multiple=True )\n \n #Action phase code neurons\n A_Phase[:,:,t+1] = update_phase(nodes=A_Phase[:,:,t], grid = False, radius=r2_max, damp = damp, coupling = cg_2,multiple=True )\n\n #Update phase code untis of MFC\n MFC[:,t+1] = update_phase(nodes=MFC[:,t], grid = False, radius=r2_MFC, damp=damp_MFC, coupling=ct,multiple=False)\n\n #MFC rate code neuron-> Bernoulli process\n\n Be = 1/(1 + np.exp(-acc_slope*(MFC[0,t]-1))) # Bernoulli process \n #Bernoulli[time,step,trial] = Be # logging Be value\n\n p = random.random()\n\n if p < Be:\n\n Gaussian = np.random.normal(size = [1,2]) #noise factor as normal distribution\n #Hit[tijd,step,trial] = 1\n \n \n x, y = current_loc[1], current_loc[0]\n\n #the LFC decides which state is paired with which actions\n\n if LFC[y,x,step]:\n #The state the actor is in receives a burst because it is the only input\n S_Phase[:,y,x,t+1] = decay*S_Phase[:,y,x,t] + Gaussian\n\n # and all the actions that are to be synchronized to that state receive a burst\n if type(LFC_sync) is int:\n A_Phase[:,LFC_sync,t+1] = decay*A_Phase[:,LFC_sync,t] + Gaussian\n \n # Desynchronize all other actions !\n for node in LFC_desync:\n A_Phase[:,int(node),t+1] = decay*A_Phase[:,int(node),t] - Gaussian*noise\n\n #Updating rate code units\n #Only the rate code neuron of a single state is updated because the actor can only be in one place at the same time\n S_Rate[current_loc[0],current_loc[1],t]= (1/(1+np.exp(-5*S_Phase[0,current_loc[0],current_loc[1],t]-0.6)))\n A_Rate[:,t]=(S_Rate[current_loc[0],current_loc[1],t]*(W[current_loc[0],current_loc[1],:]+1))*(1/(1+np.exp(-5*A_Phase[0,:,t]-0.6)))\n #A_Rate[:,t]=(S_Rate[current_loc[0],current_loc[1],t])*(1/(1+np.exp(-5*A_Phase[0,:,t]-0.6)))\n \n # select action\n action_index = int(np.argmax(np.sum(A_Rate[:,:],axis=1)))\n if action_index in np.where(W[current_loc[0],current_loc[1],:] == max(W[current_loc[0],current_loc[1],:]))[0]:\n greedy+=1\n else:\n exploration+=1\n\n if action_index != LFC_sync:\n sync_fail+=1\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Learning\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n #update location\n new_loc= update_location(grid = states, loc=current_loc,move = moves[action_index])\n\n #log coordinates for weight matrices\n coordinates = [current_loc[0], current_loc[1], new_loc[0], new_loc[1], action_index] #location coordinates\n\n #update weights according to TD-learning\n V, W, delta, at_goal = update_weights(param=parameters, index=coordinates, V=V, W=W, states=states, reward_size = reward_size)\n\n\n #update_location\n current_loc = new_loc\n step+=1\n if step ==n_steps:\n #print(\"Agent did not reach goal\")\n break\n \n trial_length[trial] = step \n \n print(\"I took {0} exploratory steps and {1} greedy steps this simulation\".format(exploration,greedy))\n print(\"I intended to explore {} times\".format(exploration_intent))\n print(\"Sync of correct action failed {} times\".format(sync_fail))\n print(\"In this sim I took a total {} steps\".format(np.sum(trial_length)))\n \n time1 = time.perf_counter()\n print(\"For the second model I took {} minutes\".format((time1-time0)/60))\n return trial_length, V", "def train(self): \n self.current_step = 0\n self.log = log_setup(self.args)\n self.current_gamma = self.args.initial_gamma\n with tf.Session(graph = self.computation_graph) as session:\n self.init.run()\n print(\"Model Initialized.\")\n for repetition in range(0, self.args.epochs):\n\n random.shuffle(self.nodes)\n self.optimization_time = 0 \n self.average_loss = 0\n\n epoch_printer(repetition)\n for i in tqdm(range(int(len(self.edges)/self.args.batch_size))):\n self.current_step = self.current_step + 1\n self.current_gamma = gamma_incrementer(self.current_step, self.args.initial_gamma, self.current_gamma, self.true_step_size)\n feed_dict = self.feed_dict_generator(self.edges[i*self.args.batch_size:(i+1)*self.args.batch_size], self.current_step, self.current_gamma)\n start = time.time()\n _, loss = session.run([self.train_op , self.loss], feed_dict=feed_dict)\n end = time.time()\n self.optimization_time = self.optimization_time + (end-start)\n self.average_loss = self.average_loss + loss\n\n print(\"\")\n self.average_loss = self.average_loss/self.vocab_size\n self.final_embeddings = self.factorization_layer.embedding_matrix.eval()\n if \"CODE\" in self.args.model: \n self.c_means = self.cluster_layer.cluster_means.eval()\n self.modularity_score, assignments = neural_modularity_calculator(self.graph, self.final_embeddings, self.c_means)\n else:\n self.modularity_score, assignments = classical_modularity_calculator(self.graph, self.final_embeddings, self.args)\n self.log = log_updater(self.log, repetition, self.average_loss, self.optimization_time, self.modularity_score)\n tab_printer(self.log)\n if \"CODE\" in self.args.model: \n initiate_dump_grafcode(self.log, assignments, self.args, self.final_embeddings, self.c_means)\n else:\n initiate_dump_graf(self.log, assignments, self.args, self.final_embeddings)", "def generate(\n seeds=10,\n param_num_nodes=7,\n mode='train',\n param_dim=10,\n param_sel=100,\n param_mu=10,\n param_br=0.05,\n param_activity_wt=None,\n A=None,\n sp_to_id=None,\n min_coord=None,\n max_coord=None,\n org_pts=None,\n ):\n global dim, sel, mu, br, activity_wt, tree_lc, tree_rc, num_nodes\n\n dim=param_dim\n sel=param_sel\n mu=param_mu\n br=param_br\n activity_wt=param_activity_wt\n num_nodes = param_num_nodes\n\n sp_root = 0\n tree = None\n\n if mode == 'train':\n tree, tree_lc, tree_rc = generate_tree(sp_root, num_nodes)\n if param_activity_wt is None:\n # weights for the linear activity function\n num_wts = int(((dim * (dim + 1))/2) + 1)\n activity_wt = np.random.normal(0, 1, num_wts)\n\n if org_pts is None:\n org_pts = []\n # simulate data points\n # format: exampleID, species, values\n # region, species, coord1, coord2, ...., activity_value\n\n for i in tqdm(range(int(seeds))):\n pt_id = i\n\n # pick a random point of d-dimension\n rand_pt = np.random.uniform(min_coord, max_coord, dim)\n curr_pt = np.append([pt_id, sp_root], rand_pt)\n curr_activity = get_activity(modify_pt(rand_pt), activity_wt)\n # print('curr_pt:', curr_pt, 'curr_activity:', curr_activity); exit(0)\n org_pts.append(np.append(curr_pt, curr_activity))\n\n generated_points = []\n full_org_pts = []\n\n if mode == 'train':\n pool = Pool(16)\n sample_bag = pool.map(generate_bag, org_pts)\n for item in sample_bag:\n for val in item:\n val = list(val)\n full_org_pts.append(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n else:\n for val in org_pts:\n val = list(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n\n return generated_points, activity_wt, org_pts, full_org_pts, tree", "def simulate_graph(seed, cluster_sizes, del_factor, ins_factor):\n rand.seed(seed)\n cluster_boundaries = np.cumsum(cluster_sizes)\n print(\"#seed:\", seed)\n print(\"#deletion factor:\", del_factor)\n print(\"#insertion factor:\", ins_factor)\n optimal_costs = np.array([0])\n for c in range(0, len(cluster_sizes)-1):\n n_c = cluster_sizes[c+1]\n offset_c = cluster_boundaries[c]\n edges_c = generate_edges(n_c, offset_c)\n disturb_cluster(n_c, offset_c, edges_c, del_factor, optimal_costs)\n additional_edges(cluster_boundaries, ins_factor, optimal_costs)\n print(\"#optimal costs:\", optimal_costs)", "def run_greedy_width(self):\n print(\"\\nRunning greedy width to find an initial path solution.\")\n for arc in self.arc_info:\n self.arc_info[arc][\"unexplained_flow\"] =\\\n self.arc_info[arc][\"weight\"]\n tries = 0\n while self.unexplained_flow():\n tries += 1\n # don't keep going forever\n assert tries < 1000\n self.run_dijkstra()", "def run_metropolis(self, links=None):\n if links is None:\n links = self.links\n if len(links.shape) == 1:\n links = tf.reshape(links, self.links.shape)\n # relax the initial configuration\n eq_steps = 1000\n for step in range(eq_steps):\n for site in self.iter_sites():\n for d in range(self.dim):\n _ = self._update_link(self, site, d)\n\n num_acceptances = 0 # keep track of acceptance rate\n for step in range(10000):\n for site in self.iter_sites():\n for d in range(self.dim):\n num_acceptances += self._update_link(self, site, d)", "def random_walker_generator(rows, cols, negative=False):\n attempts = 0\n while True:\n steps = 0\n found_goal = False\n grid = np.zeros((rows, cols))\n # start on bottom row\n current = (rows - 1, random.randint(0, cols - 1))\n grid[current] = 1\n steps += 1\n visited = set(current)\n\n connection = 0\n\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n while len(neighbors) > 0:\n for (neigh_x, neigh_y) in set(neighbors):\n # lookahead for neighbors neighbors\n lookahead = get_neighbors(\n (neigh_x, neigh_y), grid, visited, similar_cells={1})\n if len(lookahead) < 3: # contains neighbors with 1's\n # edge cases\n if neigh_x == 0 and random.random() >= 0.25:\n # chance of reaching goal at top\n continue\n elif ((neigh_y == 0 or neigh_y == rows - 1) and\n len(lookahead) == 2):\n continue\n else:\n neighbors.remove((neigh_x, neigh_y))\n\n if len(neighbors) == 0:\n # print (\"no more neighbors to pick\")\n break\n\n # time.sleep(0.15)\n # os.system(\"clear\")\n # draw_grid(grid)\n\n current = random.sample(neighbors, 1)[0] # pick a random neighbor\n # print (\"selected: \", current)\n grid[current] = 1\n steps += 1\n visited.add(current)\n if current[0] == 0: # top row\n # print (\"top row reached\")\n found_goal = True\n break\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n\n if (found_goal and not negative) or (not found_goal and negative):\n # print (\"Succeeded after %d attempts\" % attempts)\n attempts = 0\n grid = apply_noise(grid)\n\n # hack\n # override above step counter, because the random noise\n # might have added more, shorter connections\n # we do this because network was picking up patterns\n # from making random noise not entirely random\n steps, connected = check_connections_length(grid)\n if connected and negative:\n continue\n\n # randomly flip grid upside down\n if random.random() <= 0.5:\n grid = np.flipud(grid)\n\n yield grid, steps, connected\n else:\n attempts += 1", "def simulate_time_rescaling(runtime,\n kernel=functions.kernel_zhao,\n p=functions.infectious_rate_tweets,\n dt=0.01,\n follower_pool=None,\n int_fol_cnt=10000,\n follower_mean=200,\n split=0.015):\n events = [(0, int_fol_cnt)]\n ti = 0\n print_cnt = 0\n\n while 0 <= ti < runtime and len(events) < 4500:\n X = rand.exponential()\n tj = solve_integral(ti, X, kernel, p, events, dt, runtime)\n if follower_pool is not None:\n fol = rand.choice(follower_pool)\n else:\n fol = rand_followers_extended(int_fol_cnt, follower_mean, split)\n if tj > 0:\n events.append((tj, fol))\n ti = tj\n if print_cnt % 100 == 0:\n print(\"Simulating [%f%%]...\" % (ti / runtime * 100), flush=True)\n print_cnt += 1\n\n print(\"\\nOver %d events generated\" % len(events))\n\n return events", "def training_graph(self, input_data, input_labels, random_seed):\n # Count extremely random stats.\n (node_sums, node_squares, splits_indices, splits_sums,\n splits_squares, totals_indices, totals_sums,\n totals_squares, input_leaves) = (\n self.training_ops.count_extremely_random_stats(\n input_data, input_labels, self.variables.tree,\n self.variables.tree_thresholds,\n self.variables.node_to_accumulator_map,\n self.variables.candidate_split_features,\n self.variables.candidate_split_thresholds,\n num_classes=self.params.num_output_columns,\n regression=self.params.regression))\n node_update_ops = []\n node_update_ops.append(\n tf.assign_add(self.variables.node_sums, node_sums))\n\n splits_update_ops = []\n splits_update_ops.append(self.training_ops.scatter_add_ndim(\n self.variables.candidate_split_sums,\n splits_indices, splits_sums))\n splits_update_ops.append(self.training_ops.scatter_add_ndim(\n self.variables.accumulator_sums, totals_indices,\n totals_sums))\n\n if self.params.regression:\n node_update_ops.append(tf.assign_add(self.variables.node_squares,\n node_squares))\n splits_update_ops.append(self.training_ops.scatter_add_ndim(\n self.variables.candidate_split_squares,\n splits_indices, splits_squares))\n splits_update_ops.append(self.training_ops.scatter_add_ndim(\n self.variables.accumulator_squares, totals_indices,\n totals_squares))\n\n # Sample inputs.\n update_indices, feature_updates, threshold_updates = (\n self.training_ops.sample_inputs(\n input_data, self.variables.node_to_accumulator_map,\n input_leaves, self.variables.candidate_split_features,\n self.variables.candidate_split_thresholds,\n split_initializations_per_input=(\n self.params.split_initializations_per_input),\n split_sampling_random_seed=random_seed))\n update_features_op = tf.scatter_update(\n self.variables.candidate_split_features, update_indices,\n feature_updates)\n update_thresholds_op = tf.scatter_update(\n self.variables.candidate_split_thresholds, update_indices,\n threshold_updates)\n\n # Calculate finished nodes.\n with tf.control_dependencies(splits_update_ops):\n children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]),\n squeeze_dims=[1])\n is_leaf = tf.equal(LEAF_NODE, children)\n leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1]))\n finished = self.training_ops.finished_nodes(\n leaves, self.variables.node_to_accumulator_map,\n self.variables.accumulator_sums,\n num_split_after_samples=self.params.split_after_samples)\n\n # Update leaf scores.\n # TODO(gilberth): Optimize this. It currently calculates counts for\n # every non-fertile leaf.\n with tf.control_dependencies(node_update_ops):\n def dont_update_leaf_scores():\n return self.variables.non_fertile_leaf_scores\n\n def update_leaf_scores_regression():\n sums = tf.gather(self.variables.node_sums,\n self.variables.non_fertile_leaves)\n squares = tf.gather(self.variables.node_squares,\n self.variables.non_fertile_leaves)\n new_scores = self._variance(sums, squares)\n return tf.assign(self.variables.non_fertile_leaf_scores, new_scores)\n\n def update_leaf_scores_classification():\n counts = tf.gather(self.variables.node_sums,\n self.variables.non_fertile_leaves)\n new_scores = self._weighted_gini(counts)\n return tf.assign(self.variables.non_fertile_leaf_scores, new_scores)\n\n # Because we can't have tf.self.variables of size 0, we have to put in a\n # garbage value of -1 in there. Here we check for that so we don't\n # try to index into node_per_class_weights in a tf.gather with a negative\n # number.\n update_nonfertile_leaves_scores_op = tf.cond(\n tf.less(self.variables.non_fertile_leaves[0], 0),\n dont_update_leaf_scores,\n update_leaf_scores_regression if self.params.regression else\n update_leaf_scores_classification)\n\n # Calculate best splits.\n with tf.control_dependencies(splits_update_ops):\n split_indices = self.training_ops.best_splits(\n finished, self.variables.node_to_accumulator_map,\n self.variables.candidate_split_sums,\n self.variables.candidate_split_squares,\n self.variables.accumulator_sums,\n self.variables.accumulator_squares,\n regression=self.params.regression)\n\n # Grow tree.\n with tf.control_dependencies([update_features_op, update_thresholds_op]):\n (tree_update_indices, tree_children_updates,\n tree_threshold_updates, tree_depth_updates, new_eot) = (\n self.training_ops.grow_tree(\n self.variables.end_of_tree, self.variables.tree_depths,\n self.variables.node_to_accumulator_map, finished, split_indices,\n self.variables.candidate_split_features,\n self.variables.candidate_split_thresholds))\n tree_update_op = tf.scatter_update(\n self.variables.tree, tree_update_indices, tree_children_updates)\n threhsolds_update_op = tf.scatter_update(\n self.variables.tree_thresholds, tree_update_indices,\n tree_threshold_updates)\n depth_update_op = tf.scatter_update(\n self.variables.tree_depths, tree_update_indices, tree_depth_updates)\n\n # Update fertile slots.\n with tf.control_dependencies([update_nonfertile_leaves_scores_op,\n depth_update_op]):\n (node_map_updates, accumulators_cleared, accumulators_allocated,\n new_nonfertile_leaves, new_nonfertile_leaves_scores) = (\n self.training_ops.update_fertile_slots(\n finished, self.variables.non_fertile_leaves,\n self.variables.non_fertile_leaf_scores,\n self.variables.end_of_tree, self.variables.tree_depths,\n self.variables.accumulator_sums,\n self.variables.node_to_accumulator_map,\n max_depth=self.params.max_depth,\n regression=self.params.regression))\n\n # Ensure end_of_tree doesn't get updated until UpdateFertileSlots has\n # used it to calculate new leaves.\n gated_new_eot, = tf.tuple([new_eot], control_inputs=[new_nonfertile_leaves])\n eot_update_op = tf.assign(self.variables.end_of_tree, gated_new_eot)\n\n updates = []\n updates.append(eot_update_op)\n updates.append(tree_update_op)\n updates.append(threhsolds_update_op)\n updates.append(tf.assign(\n self.variables.non_fertile_leaves, new_nonfertile_leaves,\n validate_shape=False))\n updates.append(tf.assign(\n self.variables.non_fertile_leaf_scores,\n new_nonfertile_leaves_scores, validate_shape=False))\n\n updates.append(tf.scatter_update(\n self.variables.node_to_accumulator_map,\n tf.squeeze(tf.slice(node_map_updates, [0, 0], [1, -1]),\n squeeze_dims=[0]),\n tf.squeeze(tf.slice(node_map_updates, [1, 0], [1, -1]),\n squeeze_dims=[0])))\n\n cleared_and_allocated_accumulators = tf.concat(\n 0, [accumulators_cleared, accumulators_allocated])\n # Calculate values to put into scatter update for candidate counts.\n # Candidate split counts are always reset back to 0 for both cleared\n # and allocated accumulators. This means some accumulators might be doubly\n # reset to 0 if the were released and not allocated, then later allocated.\n split_values = tf.tile(\n tf.expand_dims(tf.expand_dims(\n tf.zeros_like(cleared_and_allocated_accumulators, dtype=tf.float32),\n 1), 2),\n [1, self.params.num_splits_to_consider, self.params.num_output_columns])\n updates.append(tf.scatter_update(\n self.variables.candidate_split_sums,\n cleared_and_allocated_accumulators, split_values))\n if self.params.regression:\n updates.append(tf.scatter_update(\n self.variables.candidate_split_squares,\n cleared_and_allocated_accumulators, split_values))\n\n # Calculate values to put into scatter update for total counts.\n total_cleared = tf.tile(\n tf.expand_dims(\n tf.neg(tf.ones_like(accumulators_cleared, dtype=tf.float32)), 1),\n [1, self.params.num_output_columns])\n total_reset = tf.tile(\n tf.expand_dims(\n tf.zeros_like(accumulators_allocated, dtype=tf.float32), 1),\n [1, self.params.num_output_columns])\n accumulator_updates = tf.concat(0, [total_cleared, total_reset])\n updates.append(tf.scatter_update(\n self.variables.accumulator_sums,\n cleared_and_allocated_accumulators, accumulator_updates))\n if self.params.regression:\n updates.append(tf.scatter_update(\n self.variables.accumulator_squares,\n cleared_and_allocated_accumulators, accumulator_updates))\n\n # Calculate values to put into scatter update for candidate splits.\n split_features_updates = tf.tile(\n tf.expand_dims(\n tf.neg(tf.ones_like(cleared_and_allocated_accumulators)), 1),\n [1, self.params.num_splits_to_consider])\n updates.append(tf.scatter_update(\n self.variables.candidate_split_features,\n cleared_and_allocated_accumulators, split_features_updates))\n\n return tf.group(*updates)", "def generate_dfs_second_order_random_walk(graph, alias_nodes, alias_edges, walk_length=10, start_node=None):\n if start_node == None:\n start_node = np.random.choice(graph.nodes())\n walk = [start_node]\n \n prev = None\n cur = start_node\n while len(walk) < walk_length:\n cur_nbrs = list(graph.neighbors(cur))\n if len(cur_nbrs) > 0:\n if prev is None:\n # sample the next node based on alias_nodes\n prev, cur = cur, cur_nbrs[__alias_draw(*alias_nodes[cur])]\n else:\n # sample the next node based on alias_edges\n prev, cur = cur, cur_nbrs[__alias_draw(*alias_edges[(prev, cur)])]\n walk.append(cur)\n else:\n break\n\n return walk", "def demo_grading_graph(hunter_bot, target_bot, next_move_fcn, OTHER = None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n #For Visualization\n import turtle\n window = turtle.Screen()\n window.bgcolor('white')\n chaser_robot = turtle.Turtle()\n chaser_robot.shape('arrow')\n chaser_robot.color('blue')\n chaser_robot.resizemode('user')\n chaser_robot.shapesize(0.3, 0.3, 0.3)\n broken_robot = turtle.Turtle()\n broken_robot.shape('turtle')\n broken_robot.color('green')\n broken_robot.resizemode('user')\n broken_robot.shapesize(0.3, 0.3, 0.3)\n size_multiplier = 15.0 #change size of animation\n chaser_robot.hideturtle()\n chaser_robot.penup()\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n chaser_robot.showturtle()\n broken_robot.hideturtle()\n broken_robot.penup()\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n broken_robot.showturtle()\n measuredbroken_robot = turtle.Turtle()\n measuredbroken_robot.shape('circle')\n measuredbroken_robot.color('red')\n measuredbroken_robot.penup()\n measuredbroken_robot.resizemode('user')\n measuredbroken_robot.shapesize(0.1, 0.1, 0.1)\n broken_robot.pendown()\n chaser_robot.pendown()\n\n prediction = turtle.Turtle()\n prediction.shape('arrow')\n prediction.color('pink')\n prediction.resizemode('user')\n prediction.shapesize(0.2, 0.2, 0.2)\n prediction.penup()\n\n meeting = turtle.Turtle()\n meeting.shape('circle')\n meeting.color('red')\n meeting.resizemode('user')\n meeting.shapesize(0.3, 0.3, 0.3)\n meeting.penup()\n #End of Visualization\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print(\"You got it right! It took you \", ctr, \" steps to catch the target.\")\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER)\n position_guess = OTHER['meeting_position']\n next_target_guess = OTHER['target_position']\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n #Visualize it\n measuredbroken_robot.setheading(target_bot.heading*180/pi)\n measuredbroken_robot.goto(target_measurement[0]*size_multiplier, target_measurement[1]*size_multiplier-100)\n measuredbroken_robot.stamp()\n broken_robot.setheading(target_bot.heading*180/pi)\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n chaser_robot.setheading(hunter_bot.heading*180/pi)\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n\n prediction.setheading(target_bot.heading*180/pi)\n prediction.goto(next_target_guess[0]*size_multiplier, next_target_guess[1]*size_multiplier-100)\n prediction.stamp()\n\n meeting.clear()\n meeting.setheading(target_bot.heading*180/pi)\n meeting.goto(position_guess[0]*size_multiplier, position_guess[1]*size_multiplier-100)\n meeting.stamp()\n #End of visualization\n\n ctr += 1\n if ctr >= 1000:\n print(\"It took too many steps to catch the target.\")\n return caught", "def evaluate_ngrams(eval_dataset, trigram_counts, bigram_counts, unigram_counts, train_token_count, lambda1, lambda2):\n perplexity = 0\n\n ### YOUR CODE HERE\n def calc_prob(sentense, i, word, trigram_counts, bigram_counts, unigram_counts, train_token_count, model):\n prob = 0.0\n prev_word = sentense[i - 1]\n prev_to_prev_word = sentense[i - 2]\n\n if model == \"unigram\":\n if word in unigram_counts:\n prob = (unigram_counts[word] + 0.0) / train_token_count\n else:\n prob = (unigram_counts[word_to_num['UUUNKKK']] + 0.0) / \\\n train_token_count\n\n if model == \"bigram\":\n if (prev_word, word) in bigram_counts:\n prob = (bigram_counts[(prev_word, word)] + 0.0) / \\\n unigram_counts[prev_word]\n # print(num_to_word[prev_word] ,num_to_word[word])\n # print(bigram_counts[(prev_word, word)])\n # print(unigram_counts[prev_word])\n # print(\"---------------------------\")\n else:\n prob = 0.0\n\n if model == \"trigram\":\n if (prev_to_prev_word, prev_word, word) in trigram_counts:\n prob = (trigram_counts[(prev_to_prev_word, prev_word, word)] + 0.0) \\\n / bigram_counts[(prev_to_prev_word, prev_word)]\n # / bigram_counts[(prev_word, word)] #this according to lecture notes slide 27\n else:\n prob = 0.0\n\n return prob\n\n l = 0\n num_of_words = 0\n\n ##########3\n better_than_chance = 0\n ###########\n\n for sentense in eval_dataset:\n for i, word in enumerate(sentense[2:]):\n num_of_words += 1\n prob = lambda1 * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts, unigram_counts,\n train_token_count, \"trigram\") + \\\n lambda2 * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts, unigram_counts,\n train_token_count, \"bigram\") + \\\n (1 - lambda1 - lambda2) * calc_prob(sentense, i + 2, word, trigram_counts, bigram_counts,\n unigram_counts, train_token_count, \"unigram\")\n ######################################\n if prob > (1.0 / vocabsize):\n better_than_chance += 1\n #########################\n l += np.log2(prob)\n l /= num_of_words\n perplexity = 2 ** -l\n\n print(\"better_than_chance:\", (better_than_chance + 0.0) / num_of_words)\n\n ### END YOUR CODE\n return perplexity", "def generate_dfs_first_order_random_walk(graph, alias_nodes, walk_length=10, start_node=None):\n if start_node == None:\n start_node = np.random.choice(graph.nodes())\n walk = [start_node]\n cur = start_node\n while len(walk) < walk_length:\n cur_nbrs = list(graph.neighbors(cur))\n if len(cur_nbrs) > 0:\n # sample the next node based on alias_nodes\n cur = cur_nbrs[__alias_draw(*alias_nodes[cur])]\n walk.append(cur)\n else:\n break\n\n return walk", "def simulate_random_walk (G, damping, max_jumps):\n\n results = []\n nodes = [] # keep nodes\n current_node = random.randrange(N)\n while not G.has_node(current_node):\n current_node = random.randrange(N)\n\n j = 0\n while (j < max_jumps):\n previous_node = current_node\n jump_decision = random.uniform(0, 1)\n\n if jump_decision < damping or G.out_degree(current_node) == 0:\n # make a jump\n current_node = random.randrange(N)\n while not G.has_node(current_node):\n current_node = random.randrange(N)\n\n j += 1\n try:\n distance = nx.astar_path_length(G, previous_node, \\\n current_node, weight = 'weight')\n # distance intervals 1h traveling\n results.append(distance)\n nodes.append(previous_node)\n except nx.NetworkXNoPath: continue\n\n else:\n # move to neighbor node\n incident = G.out_edges([current_node], data = False)\n distribution = [ G.get_edge_data(e[0], e[1])['transition'] for e in incident ]\n xk = np.arange(len(incident))\n generator = stats.rv_discrete(values = (xk, distribution))\n current_node = incident[generator.rvs()][1]\n\n return results, nodes", "def fixed_steps_trajectories(self, noise=0, nt=1, ll=0.1, limit=None):\n\n print('Generating Trajectories...')\n for i in tqdm.tqdm(range(self.ntraj)):\n\n if self.hop_distribution == 'gaussian' or self.hop_distribution == 'Gaussian':\n z_position = np.cumsum(\n np.random.normal(loc=0, scale=self.hop_sigma, size=self.nsteps)) # accumulate gaussian steps\n else:\n sys.exit('Please enter a valid hop distance probability distribution')\n\n self.trajectories[i, :, 1] = z_position - z_position[0] # make initial z equal to 0\n\n # hop at random time intervals according to one of the following PDFs\n if self.dwell_distribution == 'exponential':\n time = sampling.random_exponential_dwell(self.lamb, size=self.nsteps)\n elif self.dwell_distribution == 'power':\n time = sampling.random_power_law_dwell(1 + self.alpha, size=self.nsteps, ll=ll, limit=limit)\n else:\n sys.exit('Please enter a valid dwell time probability distribution')\n\n time = np.cumsum(time) # accumulate dwell times\n time -= time[0]\n\n self.trajectories[i, :, 0] = time\n\n # Add to array with all corners of hop distribution for visualization purposes\n self.trajectory_hops[i, 1::2, 0] = time[1:]\n self.trajectory_hops[i, 2::2, 0] = time[1:]\n\n self.trajectory_hops[i, ::2, 1] = self.trajectories[i, :, 1]\n self.trajectory_hops[i, 1:-1:2, 1] = self.trajectories[i, :-1, 1]\n self.trajectory_hops[i, -1, 1] = self.trajectories[i, -1, 1]\n\n print('Interpolating Trajectories...')\n # make uniform time intervals with the same interval for each simulated trajectory\n max_time = np.min(self.trajectories[:, -1, 0])\n self.time_uniform = np.linspace(0, max_time, self.nsteps*10)\n\n if nt > 1:\n # self.pbar = tqdm.tqdm(total=self.ntraj)\n pool = Pool(nt)\n for i, t in enumerate(pool.map(self.interpolate_trajectories, range(self.ntraj))):\n self.z_interpolated[i, :] = t\n else:\n for t in tqdm.tqdm(range(self.ntraj)):\n self.z_interpolated[t, :] = self.trajectories[t, np.digitize(self.time_uniform,\n self.trajectories[t, :, 0], right=False) - 1, 1]\n #self.z_interpolated[t, :] = self.interpolate_trajectories(t, noise=noise)", "def BuildCoreGraph(self):\n # Input ids, with dynamic shape depending on input.\n # Should be shape [batch_size, max_time] and contain integer word indices.\n self.input_w_ = tf.placeholder(tf.int32, [None, None], name=\"w\")\n #tf.placeholder(tf.int32, [self.batch_size_, self.max_time_], name=\"w\")\n\n # Initial hidden state. You'll need to overwrite this with cell.zero_state\n # once you construct your RNN cell.\n self.initial_h_ = None\n #tf.placeholder(tf.int32, [self.H,], name=\"h_i\")\n #tf.Variable(tf.random_normal([H,]), name=\"h_i\")\n\n # Final hidden state. You'll need to overwrite this with the output from\n # tf.nn.dynamic_rnn so that you can pass it in to the next batch (if\n # applicable).\n self.final_h_ = None\n #tf.placeholder(tf.int32, [self.H,], name=\"h_f\")\n #tf.Variable(tf.random_normal([H,]), name=\"h_f\")\n\n # Output logits, which can be used by loss functions or for prediction.\n # Overwrite this with an actual Tensor of shape\n # [batch_size, max_time, V].\n self.logits_ = None\n #tf.placeholder(tf.int32, [self.batch_size_, self.max_time_, self.V], name=\"logits\")\n #tf.Variable(tf.random_normal([self.batch_size_, self.max_time_, self.V]), name=\"logits\")\n\n # Should be the same shape as inputs_w_\n self.target_y_ = tf.placeholder(tf.int32, [None, None], name=\"y\")\n #tf.placeholder(tf.int32, [None, None], name=\"y\")\n\n # Replace this with an actual loss function\n self.loss_ = None\n #tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_, logits=self.logits_),0)#don't forget last parameter\n\n # Get dynamic shape info from inputs\n with tf.name_scope(\"batch_size\"):\n self.batch_size_ = tf.shape(self.input_w_)[0]\n with tf.name_scope(\"max_time\"):\n self.max_time_ = tf.shape(self.input_w_)[1]\n\n # Get sequence length from input_w_.\n # TL;DR: pass this to dynamic_rnn.\n # This will be a vector with elements ns[i] = len(input_w_[i])\n # You can override this in feed_dict if you want to have different-length\n # sequences in the same batch, although you shouldn't need to for this\n # assignment.\n self.ns_ = tf.tile([self.max_time_], [self.batch_size_, ], name=\"ns\")#update this for project\n\n #### YOUR CODE HERE ####\n # See hints in instructions!\n\n # Construct embedding layer\n #self.W_in_ = tf.get_variable(\"W_in\", shape=[self.V, self.H], \n #initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0))\n #self.x_ = tf.nn.embedding_lookup(self.W_in_, self.input_w_)\n \n #self.W_in_ = tf.get_variable(tf.random_uniform([self.V, self.H], -1.0, 1.0), name=\"W_in\")\n #Variable(tf.random_uniform([V, M], -1.0, 1.0), name=\"C\")\n # embedding_lookup gives shape (batch_size, N, M)\n #x_ = tf.nn.embedding_lookup(self.W_in_, self.input_w_)\n \n with tf.name_scope(\"embedding_layer\"):\n self.W_in_ = tf.get_variable(\"W_in\", shape=[self.V, self.H], \n initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0))\n self.x_ = tf.nn.embedding_lookup(self.W_in_, self.input_w_)\n\n\n\n # Construct RNN/LSTM cell and recurrent layer.\n with tf.name_scope(\"recurrent_layer\"):\n self.cell_ = MakeFancyRNNCell(self.H, self.dropout_keep_prob_, self.num_layers)\n self.initial_h_ = self.cell_.zero_state(self.batch_size_,tf.float32)\n self.outputs_, self.final_h_ = tf.nn.dynamic_rnn(self.cell_, inputs=self.x_, \n sequence_length=self.ns_, initial_state=self.initial_h_,\n dtype=tf.float32)\n #print(self.outputs_.get_shape())\n #self.outputs_, self.final_h_ = tf.nn.dynamic_rnn(self.cell_, inputs=x_, \n #sequence_length=self.ns_, initial_state=self.initial_h_,\n #dtype=tf.float32)\n \n #W1_ = tf.Variable(tf.random_normal([N*M,H]), name=\"W1\")\n #b1_ = tf.Variable(tf.zeros([H,], dtype=tf.float32), name=\"b1\")\n #h_ = tf.tanh(tf.matmul(x_, W1_) + b1_, name=\"h\")\n\n\n\n\n\n # Softmax output layer, over vocabulary. Just compute logits_ here.\n # Hint: the matmul3d function will be useful here; it's a drop-in\n # replacement for tf.matmul that will handle the \"time\" dimension\n # properly.\n with tf.name_scope(\"softmax_output_layer\"):\n self.W_out_ = tf.get_variable(\"W_out\", shape=[self.H, self.V], \n initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0))\n #tf.get_variable(tf.random_uniform([self.H, self.V], -1.0, 1.0), name=\"W_out\")\n\n self.b_out_ = tf.get_variable(\"b_out\", shape=[self.V,], \n initializer = tf.zeros_initializer())\n #self.b_out_ = tf.get_variable(\"b_out\", shape=[self.V,], initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0))\n #tf.get_variable(tf.random_uniform([self.V,], -1.0, 1.0), name=\"b_out\")\n\n self.logits_ = tf.add(matmul3d(self.outputs_, self.W_out_), self.b_out_, name=\"logits\")\n #print(self.logits_.get_shape())\n\n\n\n # Loss computation (true loss, for prediction)\n with tf.name_scope(\"loss_computation\"):\n per_example_loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.target_y_, \n logits=self.logits_, \n name=\"per_example_loss\")\n self.loss_ = tf.reduce_mean(per_example_loss_, name=\"loss\")\n\n\n\n #### END(YOUR CODE) ####", "def create_graph(M, seed, rwr_walks, temporary_folder):\n\n # Define an initialisation variable\n previous_node = 'initialisation'\n # Define a fixed graph took from a random choice\n pos = random.choice(range(0, 20))\n # Create a graph from the transition matrix (Graph or DiGraph)\n network = nx.from_numpy_matrix(M, create_using=nx.Graph)\n\n # Browse nodes and information about them in the graph and add the color information\n for u, v in network.nodes(data=True):\n v['color'] = 'lightgrey'\n\n # Define an iterator to create new pictures names iteratively for each graph\n i = 0\n # Browse each nodes from the RWR walk\n for chosen_node in rwr_walks:\n\n # Apply a new color to the chosen node to differentiate it\n color_nodes = color_chosen_nodes(network, chosen_node, 'pink')\n\n # If the node is not the first of the RWR walk and is different than the previous node chosen\n if previous_node != 'initialisation' and previous_node != chosen_node:\n # Than add a weight for the edge between the previous node and the chosen node to visualize the walk\n network[chosen_node][previous_node]['weight'] = 7\n\n # Store in a list the new weight set for each edges of the RWR\n weights = [network[u][v]['weight'] for u, v in network.edges]\n\n # Save the network created with the chosen node, the new edge between the new and the previous node\n save_network(network, pos, color_nodes, weights, i, temporary_folder)\n i += 1\n\n # Reset the network (reset colors to grey) to prepare the next node selection\n color_chosen_nodes(network, chosen_node, 'lightgrey')\n\n # Define the previous node variable\n previous_node = chosen_node\n\n # When the for loop has ended, create the last graph which is the return to the seed\n # Remove all weights\n for u, v, w in network.edges(data=True):\n w['weight'] = 0.6\n weights = [network[u][v]['weight'] for u, v in network.edges]\n # Set the color pink to the seed node\n color_nodes = color_chosen_nodes(network, seed, 'pink')\n # Save it\n save_network(network, pos, color_nodes, weights, i, temporary_folder)", "def tgn(\n # Settings\n n_nodes: int,\n memory_size: int,\n time_embedding_size: int,\n dropout: float,\n learning_rate: float,\n target: utils.Target,\n is_training: bool,\n # Inputs\n node_ids: tf.Tensor,\n batch_idx: tf.Tensor,\n batch_times: tf.Tensor,\n batch_features: tf.Tensor,\n batch_most_recent: tf.Tensor,\n edge_idx: tf.Tensor,\n edge_times: tf.Tensor,\n edge_features: tf.Tensor,\n) -> Dict[str, tf.Tensor]:\n\n memory = tgn_memory(\n n_nodes=n_nodes,\n memory_size=memory_size,\n time_embedding_size=time_embedding_size,\n node_ids=node_ids,\n write_idx=batch_idx[:2],\n write_mask=batch_most_recent,\n write_features=batch_features,\n write_times=batch_times,\n )\n\n hidden = tgn_gnn(\n time_embedding_size=time_embedding_size,\n dropout=is_training * dropout,\n input=memory.output,\n last_update=memory.last_update,\n edge_idx=edge_idx,\n edge_times=edge_times,\n edge_features=edge_features,\n )\n\n logits = tgn_link_predictor(\n tf.gather(hidden, tf.tile(batch_idx[0][tf.newaxis], (2, 1))),\n tf.gather(hidden, batch_idx[1:]),\n )\n\n # Masks any batch padding\n batch_mask = tf.not_equal(batch_idx[0], node_ids.shape[0] - 1)\n count = tf.reduce_sum(tf.cast(batch_mask, tf.int32))\n labels = tf.tile(tf.constant([[1], [0]], dtype=logits.dtype),\n (1, logits.shape[1]))\n # *2 because the reference uses mean(pos_loss) + mean(neg_loss)\n loss = 2 * tf.reduce_mean(\n tf.cast(batch_mask, logits.dtype) *\n tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits))\n\n if is_training:\n if target is utils.Target.IPU:\n step = optimiser.Adam(\n learning_rate=learning_rate).minimize_with_global_step(loss)\n else:\n # Allows AMP with TF_ENABLE_AUTO_MIXED_PRECISION=1\n step = tf.train.AdamOptimizer(\n learning_rate=learning_rate).minimize(loss)\n with tf.control_dependencies(memory.updates + (step, )):\n return dict(loss=tf.identity(loss), count=count)\n else:\n with tf.control_dependencies(memory.updates):\n return dict(loss=tf.identity(loss),\n count=count,\n probs=tf.nn.sigmoid(logits))", "def get_walks(graph, num_walks, walk_length, matrix, p, q, use_multiprocessing: bool = True, ):\n\n nodes = list(graph.nodes())\n\n shuffled_nodes = random.sample(nodes*num_walks,len(nodes)*num_walks)\n partial_get_walk = partial(_get_walk, graph=graph, walk_length=walk_length, matrix=matrix, p=p, q=q)\n if use_multiprocessing:\n with Pool(cpu_count()) as p:\n logger.warning(f'Use multiprocessing on {cpu_count()} cores')\n chunksize=len(shuffled_nodes)//cpu_count()\n walks = p.map(partial_get_walk, shuffled_nodes,chunksize=chunksize)\n\n else:\n walks = []\n for node in nodes:\n walks.append(partial_get_walk(node))\n\n return walks", "def decode_n_step(compute_logits, features, areas,\n hparams, n=20, beam_size=1, top_beam=True):\n print(features)\n use_obj_dom_dist = (\"obj_dom_dist\" in features)\n batch_size = tf.shape(features[\"task\"])[0]\n beam_log_probs = tf.fill([batch_size * beam_size], 0.)\n live_beams = tf.fill([batch_size * beam_size], True)\n use_cache = tf.fill([batch_size * beam_size], 0)\n cache = {}\n for step in range(n):\n if step == 0:\n features[\"verb_refs\"] = tf.zeros([batch_size, 1, 2], tf.int32)\n features[\"obj_refs\"] = tf.zeros([batch_size, 1, 2], tf.int32)\n features[\"input_refs\"] = tf.zeros([batch_size, 1, 2], tf.int32)\n for key in features:\n features[key] = _expand_to_beam(features[key], beam_size)\n areas[\"starts\"] = _expand_to_beam(areas[\"starts\"], beam_size)\n areas[\"ends\"] = _expand_to_beam(areas[\"ends\"], beam_size)\n # Backup the screen features\n def pad_to_match(feature, target_length, rank, constant_values):\n \"\"\"Pad the feature to the decode length.\"\"\"\n padding_list = []\n target_length = tf.maximum(target_length, tf.shape(feature)[1])\n for r in range(rank):\n if r == 1:\n padding_list.append([0, target_length - tf.shape(feature)[1]])\n else:\n padding_list.append([0, 0])\n return tf.pad(feature, padding_list, constant_values=constant_values,\n name=\"pad_to_match\")\n features[\"backup_obj_text\"] = pad_to_match(features[\"obj_text\"], n, 4, 0)\n features[\"backup_obj_type\"] = pad_to_match(features[\"obj_type\"], n, 3, -1)\n features[\"backup_obj_clickable\"] = pad_to_match(\n features[\"obj_clickable\"], n, 3, 0)\n features[\"backup_obj_screen_pos\"] = pad_to_match(\n features[\"obj_screen_pos\"], n, 4, 0)\n features[\"backup_obj_dom_pos\"] = pad_to_match(features[\"obj_dom_pos\"],\n n, 4, 0)\n if use_obj_dom_dist:\n features[\"backup_obj_dom_dist\"] = pad_to_match(features[\"obj_dom_dist\"],\n n, 4, 0)\n # Set the screen features\n features[\"obj_text\"] = features[\"obj_text\"][:, :1]\n features[\"obj_type\"] = features[\"obj_type\"][:, :1]\n features[\"obj_clickable\"] = features[\"obj_clickable\"][:, :1]\n features[\"obj_screen_pos\"] = features[\"obj_screen_pos\"][:, :1]\n features[\"obj_dom_pos\"] = features[\"obj_dom_pos\"][:, :1]\n if use_obj_dom_dist:\n features[\"obj_dom_dist\"] = features[\"obj_dom_dist\"][:, :1]\n else:\n features[\"verb_refs\"] = tf.pad(features[\"verb_refs\"],\n [[0, 0], [0, 1], [0, 0]],\n name=\"pad_verb_refs\")\n features[\"obj_refs\"] = tf.pad(features[\"obj_refs\"],\n [[0, 0], [0, 1], [0, 0]],\n name=\"pad_obj_refs\")\n features[\"input_refs\"] = tf.pad(features[\"input_refs\"],\n [[0, 0], [0, 1], [0, 0]],\n name=\"pad_input_refs\")\n # Fill in the screen information\n features[\"obj_text\"] = features[\"backup_obj_text\"][:, :step + 1]\n features[\"obj_type\"] = features[\"backup_obj_type\"][:, :step + 1]\n features[\"obj_clickable\"] = features[\"backup_obj_clickable\"][:, :step + 1]\n features[\"obj_screen_pos\"] = (\n features[\"backup_obj_screen_pos\"][:, :step + 1])\n features[\"obj_dom_pos\"] = (\n features[\"backup_obj_dom_pos\"][:, :step + 1])\n if use_obj_dom_dist:\n features[\"obj_dom_dist\"] = (\n features[\"backup_obj_dom_dist\"][:, :step + 1])\n eos_positions = tf.to_int32(tf.where(tf.equal(features[\"task\"], 1))[:, 1])\n beam_log_probs, live_beams, use_cache, cache = decode_one_step(\n step, live_beams, eos_positions,\n compute_logits,\n beam_log_probs,\n batch_size, beam_size, features,\n areas, hparams,\n use_cache=use_cache, cache=cache,\n always_consumed=True)\n for key in features:\n features[key] = _recover_shape(features[key], beam_size)\n if top_beam:\n features[key] = features[key][:, 0]\n if key in [\"obj_type\", \"obj_clickable\"]:\n features[key] = tf.pad(\n features[key], [[0, 0],\n [0, n - tf.shape(features[key])[1]], [0, 0]],\n constant_values=-1 if key.endswith(\"type\") else 0,\n name=\"pad_type_clickable\")\n elif key in [\"obj_text\", \"obj_screen_pos\", \"obj_dom_pos\", \"obj_dom_dist\"]:\n features[key] = tf.pad(features[key],\n [[0, 0], [0, n - tf.shape(features[key])[1]],\n [0, 0], [0, 0]],\n name=\"pad_rest_screen_features\")", "def run_one_step(self, dt):\n if not self._erode_flooded_nodes:\n flood_status = self._grid.at_node[\"flood_status_code\"]\n flooded_nodes = np.nonzero(flood_status == _FLOODED)[0]\n else:\n flooded_nodes = []\n\n upstream_order_IDs = self._grid[\"node\"][\"flow__upstream_node_order\"]\n\n defined_flow_receivers = np.not_equal(\n self._grid[\"node\"][\"flow__link_to_receiver_node\"], self._grid.BAD_INDEX\n )\n\n try:\n length_of_link = self._grid.length_of_d8\n except AttributeError:\n length_of_link = self._grid.length_of_link\n\n flow_link_lengths = length_of_link[\n self._grid.at_node[\"flow__link_to_receiver_node\"][defined_flow_receivers]\n ]\n flow_receivers = self._grid[\"node\"][\"flow__receiver_node\"]\n\n # Operate the main function:\n if self._use_W:\n self._alpha[defined_flow_receivers] = (\n self._K[defined_flow_receivers]\n * dt\n * self._A[defined_flow_receivers] ** self._m\n / self._W[defined_flow_receivers]\n / (flow_link_lengths**self._n)\n )\n\n else:\n self._alpha[defined_flow_receivers] = (\n self._K[defined_flow_receivers]\n * dt\n * self._A[defined_flow_receivers] ** self._m\n / (flow_link_lengths**self._n)\n )\n\n # Handle flooded nodes, if any (no erosion there)\n if flooded_nodes is not None:\n self._alpha[flooded_nodes] = 0.0\n\n reversed_flow = self._elevs < self._elevs[flow_receivers]\n # this check necessary if flow has been routed across\n # depressions\n self._alpha[reversed_flow] = 0.0\n\n threshdt = self._sp_crit * dt\n\n # solve using Brent's Method in Cython for Speed\n if isinstance(threshdt, float):\n brent_method_erode_fixed_threshold(\n upstream_order_IDs,\n flow_receivers,\n threshdt,\n self._alpha,\n self._n,\n self._elevs,\n )\n else:\n brent_method_erode_variable_threshold(\n upstream_order_IDs,\n flow_receivers,\n threshdt,\n self._alpha,\n self._n,\n self._elevs,\n )", "def random_iteration(r, w):\n density = 1.225\n blade_number = 6\n wind_v = round(-168.75*r**2-36.75*r+20.05 , 2)\n blade_v = r*w\n rel_v = round(math.sqrt(blade_v**2 + wind_v**2),2)\n arctan = round(math.degrees(math.atan2(wind_v, blade_v)), 2)\n # cl is a random float in range 1.0 to 1.7\n cl = uniform(1.0, 1.7)\n # aoa is a random float in range 10 to 15\n aoa = uniform(10, 15)\n treeHit = 0\n Re = get_re(r, cl, aoa, arctan, rel_v, blade_number)[0]\n new_cl, new_a, new_cd = get_max_cl(Re, r)\n new_Re, new_chord = get_re(r, round(new_cl, 2), round(new_a, 2), arctan, rel_v, blade_number)\n re_devi = abs((new_Re - Re) / Re)\n # iterate until Re_deviation goes under 5%\n while re_devi > 0.05:\n Re = new_Re\n new_cl, new_a, new_cd = get_max_cl(new_Re, r)\n new_Re, new_chord = get_re(r, new_cl, new_a, arctan, rel_v, blade_number)\n re_devi = abs((new_Re - Re) / Re)\n treeHit += 1\n # stop iteration over 10 times\n if treeHit > 10:\n break\n force_reference = 0.5 * density * rel_v**2 * 0.0125 * round(new_chord, 3)\n return {\n \"r\": r,\n \"arctan\": arctan, \n \"chord\": round(new_chord, 3), \n \"aoa\": new_a, \n \"cl\": new_cl, \n \"cd\": new_cd, \n \"Re\": new_Re,\n \"lift\": new_cl * force_reference, \n \"drag\": new_cd * force_reference,\n \"torque\": r * (new_cl * force_reference * math.sin(math.radians(arctan - new_a)) - new_cd * force_reference * math.cos(math.radians(arctan - new_a)))\n }", "def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")", "def _generate_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n min_length,\n do_sample,\n early_stopping,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n no_repeat_ngram_size,\n bad_words_ids,\n pad_token_id,\n eos_token_id,\n batch_size,\n num_return_sequences,\n length_penalty,\n num_beams,\n vocab_size,\n encoder_outputs,\n attention_mask,\n use_cache,\n model_specific_kwargs,\n):\n # generated hypotheses\n eos_token_id = 198 # newline\n generated_hyps = [\n BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)\n for _ in range(batch_size)\n ]\n\n # scores for each sentence in the beam\n beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)\n\n # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times\n if do_sample is False:\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)\n\n # cache compute states\n past = (encoder_outputs, None) if encoder_outputs is not None else None\n\n # done sentences\n done = [False for _ in range(batch_size)]\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(\n input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs\n )\n outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)\n next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)\n\n # if model has past, then set the past variable to speed up decoding\n if self._use_cache(outputs, use_cache):\n past = outputs[1]\n if self.config.is_encoder_decoder and do_sample is False:\n # TODO (PVP) still a bit hacky here - there might be a better solution\n next_token_logits = self.adjust_logits_during_generation(\n next_token_logits, cur_len=cur_len, max_length=max_length\n )\n\n scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)\n\n scores = self.postprocess_next_token_scores(\n scores=scores,\n input_ids=input_ids,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n cur_len=cur_len,\n min_length=min_length,\n max_length=max_length,\n eos_token_id=eos_token_id,\n repetition_penalty=repetition_penalty,\n batch_size=batch_size,\n num_beams=num_beams,\n )\n\n assert scores.shape == (batch_size * num_beams, vocab_size), \"Shapes of scores: {} != {}\".format(\n scores.shape, (batch_size * num_beams, vocab_size)\n )\n\n if do_sample:\n _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n # Temperature\n if temperature != 1.0:\n _scores = _scores / temperature\n # Top-p/top-k filtering\n _scores = top_k_top_p_filtering(\n _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2\n ) # (batch_size * num_beams, vocab_size)\n # re-organize to group the beam together to sample from all beam_idxs\n _scores = _scores.contiguous().view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)\n probs = F.softmax(_scores, dim=-1)\n next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)\n # Compute next scores\n next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)\n # sort the sampled vector to make sure that the first num_beams samples are the best\n next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)\n next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)\n\n else:\n next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n\n # re-organize to group the beam together (we are keeping top hypothesis accross beams)\n next_scores = next_scores.view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n\n assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)\n\n # next batch beam content\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence, add a pad token\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(num_beams)\n assert (\n eos_token_id is not None and pad_token_id is not None\n ), \"generated beams >= num_beams -> eos_token_id and pad_token have to be defined\"\n next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch\n continue\n\n # next sentence beam content, this will get added to next_batch_beam\n next_sent_beam = []\n\n # next tokens for this sentence\n for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(\n zip(next_tokens[batch_idx], next_scores[batch_idx])\n ):\n # get beam and token IDs\n beam_id = beam_token_id // vocab_size\n token_id = beam_token_id % vocab_size\n\n effective_beam_id = batch_idx * num_beams + beam_id\n # add to generated hypotheses if end of sentence (eos or newline)\n if ((eos_token_id is not None) and (token_id.item() == eos_token_id)):\n # if beam_token does not belong to top num_beams tokens, it should not be added\n is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams\n if is_beam_token_worse_than_top_num_beams:\n continue\n generated_hyps[batch_idx].add(\n input_ids[effective_beam_id].clone(), beam_token_score.item(),\n )\n else:\n # add next predicted token since it is not eos_token\n next_sent_beam.append((beam_token_score, token_id, effective_beam_id))\n\n # once the beam for next step is full, don't add more tokens to it.\n if len(next_sent_beam) == num_beams:\n break\n\n # Check if we are done so that we can save a pad step if all(done)\n done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(\n next_scores[batch_idx].max().item(), cur_len\n )\n\n # update next beam content\n assert len(next_sent_beam) == num_beams, \"Beam should always be full\"\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == num_beams * (batch_idx + 1), \"We should have added num_beams each step\"\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == batch_size * num_beams\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_tokens = input_ids.new([x[1] for x in next_batch_beam])\n beam_idx = input_ids.new([x[2] for x in next_batch_beam])\n\n # re-order batch and update current length\n input_ids = input_ids[beam_idx, :]\n input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)\n cur_len = cur_len + 1\n\n # re-order internal states\n if past is not None:\n past = self._reorder_cache(past, beam_idx)\n\n # extend attention_mask for new generated input if only decoder\n if self.config.is_encoder_decoder is False:\n attention_mask = torch.cat(\n [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n )\n\n # finalize all open beam hypotheses and add to generated hypotheses\n for batch_idx in range(batch_size):\n if done[batch_idx]:\n continue\n\n # test that beam scores match previously calculated scores if not eos and batch_idx not done\n if eos_token_id is not None and all(\n ((token_id % vocab_size).item() != eos_token_id) for token_id in next_tokens[batch_idx]\n ):\n assert torch.all(\n next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]\n ), \"If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}\".format(\n next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],\n )\n\n # need to add best num_beams hypotheses to generated hyps\n for beam_id in range(num_beams):\n effective_beam_id = batch_idx * num_beams + beam_id\n final_score = beam_scores[effective_beam_id].item()\n final_tokens = input_ids[effective_beam_id]\n generated_hyps[batch_idx].add(final_tokens, final_score)\n\n # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch\n output_batch_size = batch_size if do_sample else batch_size * num_return_sequences\n output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences\n\n # select the best hypotheses\n sent_lengths = input_ids.new(output_batch_size)\n best = []\n scores = []\n\n # retrieve best hypotheses\n for i, hypotheses in enumerate(generated_hyps):\n sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])\n for j in range(output_num_return_sequences_per_batch):\n effective_batch_idx = output_num_return_sequences_per_batch * i + j\n score, best_hyp = sorted_hyps.pop()\n sent_lengths[effective_batch_idx] = len(best_hyp)\n best.append(best_hyp)\n scores.append(score)\n\n scores = torch.exp(torch.tensor(scores))\n return best, scores", "def self_test_model():\n\n print(\"Self-test for neural translation model.\")\n linebreak()\n\n graph = tf.Graph()\n\n with graph.as_default():\n with tf.device('/cpu:0'):\n t = time()\n # Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.\n model = Model(source_vocab_size=10,\n target_vocab_size=10,\n buckets=[(3, 3), (6, 6)],\n size=32,\n num_layers=2,\n learning_rate=None,\n max_gradient_norm=5.0,\n batch_size=32,\n use_lstm=True,\n optim='adam',\n num_samples=None)\n\n print(\"Initializing Model took %.6fs\" % (time() - t))\n linebreak()\n\n with tf.Session(graph=graph) as sess:\n t = time()\n sess.run(tf.initialize_all_variables())\n print(\"Initializing Variables took %.6fs\" % (time() - t))\n linebreak()\n\n # Fake data set for both the (3, 3) and (6, 6) bucket.\n data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],\n [([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])\n num_iter = 20\n\n print('Using Learning Rate: %.2f' % (model.learning_rate.eval()))\n linebreak()\n\n t = time()\n # Train the fake model for 5 steps.\n for _ in xrange(num_iter):\n bucket_id = random.choice([0, 1])\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(data_set, bucket_id)\n loss, _ = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False)\n print('Perplexity: %f' % (np.exp(loss)))\n linebreak()\n print(\"Average training time: %.6fs/iter\" % ((time() - t) / num_iter))", "def simulate_random_walk_unweighted (G, damping, max_jumps):\n\n results = []\n nodes = [] # keep nodes\n current_node = random.randrange(N)\n while not G.has_node(current_node):\n current_node = random.randrange(N)\n\n\n j = 0\n while (j < max_jumps):\n previous_node = current_node\n jump_decision = random.uniform(0, 1)\n\n if jump_decision < damping or G.out_degree(current_node) == 0:\n # make a jump\n current_node = random.randrange(N)\n while not G.has_node(current_node):\n current_node = random.randrange(N)\n\n j += 1\n try:\n distance = nx.shortest_path_length(G, previous_node, current_node)\n results.append(distance)\n nodes.append(previous_node) # keep nodes\n except nx.NetworkXNoPath: continue\n\n else:\n # move to neighbor node\n incident = G.out_edges([current_node], data = False)\n current_node = random.choice(incident)[1]\n\n return results, nodes #, current_node (keep nodes)", "def fit(self, env, num_iteration, do_train=False):\n\n #s, a, r, new_s, d = get_multi_step_sample(one_step_memory, self.gamma, self.num_step)\n #self.replay_memory.append((s, a, r, new_s, d))\n # epsilon update\n num_env = env.num_process\n env.reset()\n\n for t in range(0, num_iteration, num_env):\n self.global_step += 1\n #print(\"Global_step: {}\".format(self.global_step))\n old_state, action, reward, new_state, is_terminal = self.get_multi_step_sample(env)\n self.replay_memory.append(old_state, action, reward, new_state, is_terminal)\n\n \"\"\"\n Epsilon update\n epsilon begin 1.0, end up 0.1\n FIX\n \"\"\"\n\n self.epsilon = self.epsilon+ num_env*self.epsilon_increment if self.epsilon > EPSILON_END else EPSILON_END\n num_update = sum([1 if i%self.update_freq == 0 else 0 for i in range(t, t+num_env)])\n if do_train:\n for _ in range(num_update):\n\n if self.per == 1:\n (old_state_list, action_list, reward_list, new_state_list, is_terminal_list), \\\n idx_list, p_list, sum_p, count = self.replay_memory.sample(self.batch_size)\n else:\n old_state_list, action_list, reward_list, new_state_list, is_terminal_list \\\n = self.replay_memory.sample(self.batch_size)\n\n feed_dict = {self.target_s: new_state_list.astype(np.float32)/255. ,\n self.s : old_state_list.astype(np.float32)/255.,\n self.a_ph: list(enumerate(action_list)),\n self.r_ph: np.array(reward_list).astype(np.float32),\n self.d_ph: np.array(is_terminal_list).astype(np.float32),\n }\n\n if self.double:\n action_chosen_by_online = self.sess.run(self.a,\n feed_dict={\n self.s: new_state_list.astype(np.float32)/255.})\n feed_dict[self.a_for_new_state_ph] = list(enumerate(action_chosen_by_online))\n\n if self.per == 1:\n # Annealing weight beta\n feed_dict[self.loss_weight_ph] = (np.array(p_list) * count / sum_p) ** (-self.beta)\n error, _ = self.sess.run([self.error_op, self.train_op], feed_dict=feed_dict)\n self.replay_memory.update(idx_list, error)\n\n else:\n self.sess.run(self.train_op, feed_dict=feed_dict)\n\n self.update_time += 1\n\n if self.beta < BETA_END:\n self.beta += self.beta_increment\n\n if (self.update_time)%self.target_update_freq == 0 :\n #print(\"Step: {} \".format(self.update_time) + \"target_network update\")\n self.sess.run([self.target_update])\n #print(\"Step: {} \".format(self.update_freq) + \"Network save\")\n self.save_model()", "def call(self, input):\n for r in range(self.tile_num):\n for c in range(self.tile_num):\n # do frequency conv on each tile\n offset = [[r*self.tile_size+self.tile_size/2, c*self.tile_size+self.tile_size/2] for i in range(BATCHSIZE)]\n input_tile = tf.image.extract_glimpse(input, \n [self.tile_size, self.tile_size],\n offset, centered=False, normalized=False) \n pad_pixels = (self.fft_size - self.tile_size) / 2\n input_tile = tf.image.pad_to_bounding_box(\n input_tile, pad_pixels, pad_pixels, self.fft_size, self.fft_size)\n\n input_tile = tf.transpose(input_tile, perm=[0,3,1,2])\n input_fft = tf.spectral.fft2d(tf.cast(input_tile, tf.complex64))\n output_fft = tf.multiply(self.kernel_freq, input_fft[0,:])\n output_fft_accum = tf.reduce_sum(output_fft, 1)\n output_batch_i = tf.spectral.ifft2d(output_fft_accum)\n bias_expand = tf.expand_dims(tf.expand_dims(self.bias, 1),1)\n output_tile_accum = tf.expand_dims(tf.real(output_batch_i) + bias_expand, 0)\n for b in range(1,BATCHSIZE):\n output_fft = tf.multiply(self.kernel_freq, input_fft[b,:])\n output_fft_accum = tf.reduce_sum(output_fft, 1)\n output_fft_batch_i = tf.spectral.ifft2d(output_fft_accum)\n bias_expand = tf.expand_dims(tf.expand_dims(self.bias, 1),1)\n output_tile_accum = tf.concat([output_tile_accum, \n tf.expand_dims(tf.real(output_fft_batch_i) + bias_expand, 0)],0)\n\n # Concat col tiles\n output_accum_col = output_tile_accum\n if c != 0:\n overlap = output_accum_col[:,:,:,-pad_pixels:] + output_tile_accum[:,:,:,0:pad_pixels]\n output_accum_col = tf.concat([output_accum_col[:,:,:,0:-pad_pixels], \n overlap, \n output_tile_accum[:,:,:,pad_pixels:]], \n 3)\n # Concat tow output tiles\n output_accum = output_accum_col\n if r != 0:\n overlap = output_accum[:,:,-pad_pixels:,:] + output_accum_col[:,:,0:pad_pixels,:]\n output_accum = tf.concat([output_accum[:,:,0:-pad_pixels,:], \n overlap, \n output_accum_col[:,:,pad_pixels:,:]], \n 2)\n\n output_accum = tf.transpose(output_accum, perm=[0,2,3,1])\n return tf.image.crop_to_bounding_box(output_accum, 0, 0, self.img_size, self.img_size)", "def find_results(data,weight_matrix,params):\r\n \r\n data = data.astype(np.float32)\r\n weight_matrix = weight_matrix.astype(np.float32)\r\n \r\n rank = params['rank']\r\n lamb = params['lambda']\r\n lr = params['lr']\r\n hidden_pairs = params['hidden_pairs']\r\n cost_functions.lamb = lamb\r\n\r\n f = cost_functions.frobenius \r\n V_masked = create_mask(data,hidden_pairs)\r\n bool_mask = V_masked.notnull().values\r\n tf_mask = tf.Variable(bool_mask)\r\n \r\n V = tf.constant(V_masked.values)\r\n laplacian_matrix = laplacian(weight_matrix).astype(np.float32)\r\n W, H = init_W_H(V.shape, rank=rank)\r\n WH = tf.matmul(W, H)\r\n L = tf.constant(laplacian_matrix)\r\n WTLW = tf.matmul(tf.matmul(tf.transpose(W), L), W)\r\n\r\n cost = f(V, tf_mask, WH, WTLW)\r\n train_step = tf.train.ProximalGradientDescentOptimizer(lr).minimize(cost)\r\n init = tf.global_variables_initializer()\r\n clip = get_clip(W, H)\r\n\r\n sess = tf.Session()\r\n sess.run(init)\r\n\r\n previous_cost = sess.run(cost)\r\n sess.run(train_step)\r\n sess.run(clip)\r\n initial_difference = previous_cost - sess.run(cost)\r\n\r\n matrix_errors = []\r\n graph_errors = []\r\n imputation_error = []\r\n\r\n learnt_W = sess.run(W).astype(np.float32)\r\n learnt_H = sess.run(H).astype(np.float32)\r\n imputation_norm = np.linalg.norm((data - learnt_W.dot(learnt_H))[~bool_mask])\r\n \r\n i = 0\r\n while np.isfinite(sess.run(cost)) and previous_cost-sess.run(cost) > TARGET_DIFFERENCE * initial_difference and i<=max_iterations:\r\n previous_cost = sess.run(cost)\r\n sess.run(train_step)\r\n sess.run(clip)\r\n matrix_errors.append(sess.run(cost_functions.matrix_cost))\r\n graph_errors.append(sess.run(cost_functions.graph_cost))\r\n i+=1\r\n\r\n learnt_W = sess.run(W).astype(np.float32)\r\n learnt_H = sess.run(H).astype(np.float32)\r\n\r\n imputation_norm = np.linalg.norm((data - learnt_W.dot(learnt_H))[~bool_mask])\r\n imputation_error.append(imputation_norm)\r\n\r\n return {'imputation_error':imputation_norm,'W':sess.run(W),'H':sess.run(H),\r\n 'graph_error':graph_errors,'matrix_error':matrix_errors,'imputation_error_list':imputation_error}", "def fit(self, graph, instances):\n if self.wl:\n graph.weisfeiler_lehman(iterations=self.wl_iterations)\n\n all_walks = []\n for i, instance in tqdm.tqdm(enumerate(instances)):\n if self.wl:\n walks = self._extract_wl_walks(graph, Vertex(str(instance)))\n else:\n walks = self._extract_random_walks(graph, \n Vertex(str(instance)))\n\n walks = sorted(walks)\n all_walks += list(walks)\n\n all_walks = sorted(all_walks)\n self.print_walks(all_walks)\n import pickle\n pickle.dump(self.label_map, open('label_map.p', 'wb+'))\n if self.wl:\n pickle.dump(graph._label_map, open('wl_label_map.p', 'wb+')) \n input()\n\n print('Extracted {} walks for {} instances!'.format(len(all_walks),\n len(instances)))\n sentences = [list(map(str, x)) for x in all_walks]\n\n self.model_ = Word2Vec(sentences, size=self.vector_size, \n window=self.window, workers=self.n_jobs, \n sg=self.sg, iter=self.max_iter, \n negative=self.negative, \n min_count=self.min_count, seed=42)", "def print_film_generator_weight_analysis(session):\n graph = tf.get_default_graph()\n w2 = graph.get_tensor_by_name(\"conv1/Generator/fully_connected/weights:0\")\n w3 = graph.get_tensor_by_name(\"conv2/Generator/fully_connected/weights:0\")\n w4 = graph.get_tensor_by_name(\"conv3/Generator/fully_connected/weights:0\")\n\n w2, w3, w4 = session.run([w2, w3, w4])\n\n w2_self = w2[:64, :]\n w2_cross = w2[64:, :]\n w3_self = w3[:64, :]\n w3_cross = w3[64:, :]\n w4_self = w4[:64, :]\n w4_cross = w4[64:, :]\n\n self2 = np.linalg.norm(w2_self, axis=0).mean()\n cross2 = np.linalg.norm(w2_cross, axis=0).mean()\n self3 = np.linalg.norm(w3_self, axis=0).mean()\n cross3 = np.linalg.norm(w3_cross, axis=0).mean()\n self4 = np.linalg.norm(w4_self, axis=0).mean()\n cross4 = np.linalg.norm(w4_cross, axis=0).mean()\n\n print(\"==============================\")\n print(\"FiLM generator weight analysis\")\n print(\"==============================\")\n print(\"Block2: self-mod = {}, cross-mod = {}\".format(self2, cross2))\n print(\"Block3: self-mod = {}, cross-mod = {}\".format(self3, cross3))\n print(\"Block4: self-mod = {}, cross-mod = {}\".format(self4, cross4))", "def simulateDataOnHimster(thisExperiment: Experiment, thisScenario: Scenario) -> Scenario:\n\n for task in thisScenario.SimulationTasks:\n\n print(f\"running simulation of type {str(task.simDataType)} and path ({task.dirPath} at states:\")\n print(f\"current state: {str(task.simState)}\")\n print(f\"last state: {str(task.lastState)}\")\n\n data_keywords = []\n data_pattern = \"\"\n\n cut_keyword = generateCutKeyword(thisExperiment.recoParams)\n\n print(f\"cut keyword is {cut_keyword}\")\n\n merge_keywords = [\"merge_data\", \"binning_300\"]\n # if \"v\" in task.simType:\n if task.simDataType == SimulationDataType.VERTEX:\n data_keywords = [\"uncut\", \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_vertex_data_\"\n # elif \"a\" in task.simType:\n elif task.simDataType == SimulationDataType.ANGULAR:\n data_keywords = [cut_keyword, \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_data_\"\n elif task.simDataType == SimulationDataType.EFFICIENCY_RESOLUTION:\n data_keywords = [cut_keyword, \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_res_data_\"\n else:\n raise NotImplementedError(f\"Simulation type {task.simDataType} is not implemented!\")\n\n # 1. simulate data\n if task.simState == SimulationState.START_SIM:\n os.chdir(lmd_fit_script_path)\n status_code = 1\n # if \"er\" in task.simType:\n if task.simDataType == SimulationDataType.EFFICIENCY_RESOLUTION:\n \"\"\"\n efficiency / resolution calculation.\n\n Takes an offset of the IP into account.\n\n TODO: This needs to know the misalignment of the detector.\n \"\"\"\n found_dirs = []\n # what the shit, this should never be empty in the first place\n if (task.dirPath != \"\") and (task.dirPath is not None):\n temp_dir_searcher = general.DirectorySearcher(\n [\n thisExperiment.recoParams.simGenTypeForResAcc.value,\n data_keywords[0],\n ] # look for the folder name including sim_type_for_resAcc\n )\n temp_dir_searcher.searchListOfDirectories(task.dirPath, thisScenario.track_file_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n print(f\"found dirs now: {found_dirs}\")\n else:\n # path may be empty, then the directory searcher tries to find it\n pass\n\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n thisScenario.track_file_pattern + \"*.root\",\n )\n elif task.lastState < SimulationState.START_SIM:\n # then lets simulate!\n # this command runs the full sim software with box gen data\n # to generate the acceptance and resolution information\n # for this sample\n # note: beam tilt and divergence are not necessary here,\n # because that is handled completely by the model\n\n # because we don't want to change the experiment config or\n # anything in the simParams, recoParam, alignParams,\n # we'll create temp objects here.\n\n tempSimParams = thisExperiment.simParams\n tempRecoParams = thisExperiment.recoParams\n tempAlignParams = thisExperiment.alignParams\n\n thisIPX = tempRecoParams.recoIPX\n thisIPY = tempRecoParams.recoIPY\n thisIPZ = tempRecoParams.recoIPZ\n\n max_xy_shift = math.sqrt(thisIPX**2 + thisIPY**2)\n max_xy_shift = float(\"{0:.2f}\".format(round(float(max_xy_shift), 2)))\n\n # since this is the res/acc case, these parameters must be changed\n tempSimParams.simGeneratorType = tempRecoParams.simGenTypeForResAcc\n tempSimParams.num_events_per_sample = tempRecoParams.num_events_per_resAcc_sample\n tempSimParams.num_samples = tempRecoParams.num_resAcc_samples\n tempSimParams.theta_min_in_mrad -= max_xy_shift\n tempSimParams.theta_max_in_mrad += max_xy_shift\n tempSimParams.ip_offset_x = thisIPX\n tempSimParams.ip_offset_y = thisIPY\n tempSimParams.ip_offset_z = thisIPZ\n\n # since this is the res/acc case, these parameters must be updated\n tempRecoParams.num_samples = tempRecoParams.num_resAcc_samples\n tempRecoParams.num_events_per_sample = tempRecoParams.num_events_per_resAcc_sample\n\n # TODO: alignment part\n # if alignement matrices were specified, we used them as a mis-alignment\n # and alignment for the box simulations\n\n (job, returnPath) = create_simulation_and_reconstruction_job(\n tempSimParams,\n tempAlignParams,\n tempRecoParams,\n application_command=thisScenario.Sim,\n use_devel_queue=args.use_devel_queue,\n )\n job_manager.append(job)\n\n task.dirPath = returnPath\n thisScenario.acc_and_res_dir_path = returnPath\n # last_state += 1\n # last state was < 1, so 0. That means an increase is now 1\n task.lastState = SimulationState.START_SIM\n\n # elif \"a\" in task.simType:\n elif task.simDataType == SimulationDataType.ANGULAR:\n \"\"\"\n a is the angular case. this is the data set onto which the luminosiy fit is performed.\n it is therefore REAL digi data (or DPM data of course) that must be reconstructed again\n with the updated reco parameter (like the IP position, cuts applied and alignment).\n note: beam tilt and divergence are not used here because\n only the last reco steps are rerun of the track reco\n \"\"\"\n found_dirs = []\n status_code = 1\n # what the shit, this should never be empty in the first place\n if (task.dirPath != \"\") and (task.dirPath is not None):\n temp_dir_searcher = general.DirectorySearcher([\"dpm_elastic\", data_keywords[0]])\n temp_dir_searcher.searchListOfDirectories(task.dirPath, thisScenario.track_file_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n\n else:\n # path may be empty, then the directory searcher tries to find it\n pass\n\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n thisScenario.track_file_pattern + \"*.root\",\n )\n\n # oh boi that's bound to be trouble with IntEnums\n elif task.lastState < task.simState:\n\n # * reco params must be adjusted if the res/acc sample had more jobs or samples that the real (or dpm) data\n rec_par = thisExperiment.recoParams\n if thisExperiment.recoParams.num_samples > 0 and rec_par.num_samples > thisExperiment.recoParams.num_samples:\n rec_par.num_samples = thisExperiment.recoParams.num_samples\n\n # TODO: have alignment parameters changed? take them from the experiment\n align_par = thisExperiment.alignParams\n\n (job, returnPath) = create_reconstruction_job(\n rec_par,\n align_par,\n str(thisExperiment.baseDataOutputDir),\n application_command=thisScenario.Reco,\n use_devel_queue=args.use_devel_queue,\n )\n job_manager.append(job)\n\n task.dirPath = returnPath\n thisScenario.filteredTrackDirectory = returnPath\n\n # Simulation is done, so update the last_state\n task.lastState = SimulationState.START_SIM\n\n # elif \"v\" in task.simType:\n elif task.simDataType == SimulationDataType.VERTEX:\n\n # TODO: check if the sim data is already there, if yes return 0, else start sim\n status_code = 0\n\n # # vertex Data must always be created without any cuts first\n # tempRecoPars = thisExperiment.recoParams\n # tempRecoPars.use_xy_cut = False\n # tempRecoPars.use_m_cut = False\n\n # # TODO: misalignment is important here. the vertex data can have misalignment (because it's real data)\n # # but it has no alignment yet. that is only for the second reconstruction\n # tempAlignPars = thisExperiment.alignParams\n # tempAlignPars.alignment_matrices_path = None\n\n # job, _ = create_simulation_and_reconstruction_job(\n # thisExperiment.simParams,\n # tempAlignPars,\n # tempRecoPars,\n # use_devel_queue=args.use_devel_queue,\n # application_command=thisScenario.Sim,\n # )\n # job_manager.append(job)\n\n else:\n raise ValueError(f\"This tasks simType is {task.simDataType}, which is invalid!\")\n\n if status_code == 0:\n print(\"found simulation files, skipping\")\n task.simState = SimulationState.MAKE_BUNCHES\n task.lastState = SimulationState.START_SIM\n elif status_code > 0:\n print(f\"still waiting for himster simulation jobs for {task.simDataType} data to complete...\")\n else:\n raise ValueError(\"status_code is negative, which means number of running jobs can't be determined. \")\n\n # 2. create data (that means bunch data, create data objects)\n if task.simState == SimulationState.MAKE_BUNCHES:\n # check if data objects already exists and skip!\n temp_dir_searcher = general.DirectorySearcher(data_keywords)\n temp_dir_searcher.searchListOfDirectories(task.dirPath, data_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n status_code = 1\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n data_pattern + \"*\",\n is_bunches=True,\n )\n\n elif task.lastState < task.simState:\n os.chdir(lmd_fit_script_path)\n # bunch data\n # TODO: pass experiment config, or better yet, make class instead of script\n bashcommand = (\n \"python makeMultipleFileListBunches.py \"\n + f\" --filenamePrefix {thisScenario.track_file_pattern}\"\n + \" --files_per_bunch 10 --maximum_number_of_files \"\n + str(thisExperiment.recoParams.num_samples)\n + \" \"\n + task.dirPath\n )\n print(f\"Bash command for bunch creation:\\n{bashcommand}\\n\")\n _ = subprocess.call(bashcommand.split())\n # TODO: pass experiment config, or better yet, make class instead of script\n # create data\n bashArgs = []\n # if \"a\" in task.simType:\n if task.simDataType == SimulationDataType.ANGULAR:\n el_cs = thisScenario.elastic_pbarp_integrated_cross_secion_in_mb\n bashArgs.append(\"python\")\n bashArgs.append(\"createMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--jobCommand\")\n bashArgs.append(thisScenario.LmdData)\n bashArgs.append(f\"{thisScenario.momentum:.2f}\")\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n bashArgs.append(\"../dataconfig_xy.json\")\n\n if el_cs:\n bashArgs.append(\"--elastic_cross_section\")\n bashArgs.append(str(el_cs))\n # bashcommand += \" --elastic_cross_section \" + str(el_cs)\n else:\n bashArgs.append(\"python\")\n bashArgs.append(\"createMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--jobCommand\")\n bashArgs.append(thisScenario.LmdData)\n bashArgs.append(f\"{thisScenario.momentum:.2f}\")\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n bashArgs.append(\"../dataconfig_xy.json\")\n\n print(bashArgs)\n _ = subprocess.call(bashArgs)\n\n # last_state = last_state + 1\n # was apparently bunches\n task.lastState = SimulationState.MERGE\n\n bashArgs.clear()\n\n # else:\n # raise RuntimeError(\"No data could be found, but no commands are to be executed. This can't be!\")\n\n if status_code == 0:\n print(\"skipping bunching and data object creation...\")\n # state = 3\n task.simState = SimulationState.MERGE\n task.lastState = SimulationState.MAKE_BUNCHES\n elif status_code > 0:\n print(f\"status_code {status_code}: still waiting for himster simulation jobs for {task.simDataType} data to complete...\")\n else:\n # ok something went wrong there, exit this scenario and\n # push on bad scenario stack\n task.simState = SimulationState.FAILED\n raise ValueError(\"Something went wrong with the cluster jobs! This scenario will no longer be processed.\")\n\n # 3. merge data\n if task.simState == SimulationState.MERGE:\n # check first if merged data already exists and skip it!\n temp_dir_searcher = general.DirectorySearcher(merge_keywords)\n temp_dir_searcher.searchListOfDirectories(task.dirPath, data_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n if not found_dirs:\n os.chdir(lmd_fit_script_path)\n # merge data\n # if \"a\" in task.simType:\n bashArgs = []\n if task.simDataType == SimulationDataType.ANGULAR:\n bashArgs.append(\"python\")\n bashArgs.append(\"mergeMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--num_samples\")\n bashArgs.append(str(bootstrapped_num_samples))\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n\n else:\n bashArgs.append(\"python\")\n bashArgs.append(\"mergeMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n\n print(\"working directory:\")\n print(f\"{os.getcwd()}\")\n print(f\"running command:\\n{bashArgs}\")\n _ = subprocess.call(bashArgs)\n\n task.simState = SimulationState.DONE\n\n if task.lastState == SimulationState.FAILED:\n thisScenario.is_broken = True\n break\n\n # remove done tasks\n thisScenario.SimulationTasks = [simTask for simTask in thisScenario.SimulationTasks if simTask.simState != SimulationState.DONE]\n\n return thisScenario", "def define_graph():\n \n num_cells = 65\n num_class = 2\n input_data = tf.placeholder(tf.float32,[None, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE],name=\"input_data\")\n labels = tf.placeholder(tf.float32,[None,num_class], name=\"labels\")\n dropout_keep_prob = tf.placeholder_with_default(0.6,shape=())\n lstm_cell_1 = tf.contrib.rnn.GRUCell(num_cells)\n lstm_cell_2 = tf.contrib.rnn.GRUCell(num_cells)\n lstm_cell_1 = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell_1, output_keep_prob=dropout_keep_prob)\n lstm_cell_2 = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell_2, output_keep_prob=dropout_keep_prob)\n (value_1,value_2),_ = tf.nn.bidirectional_dynamic_rnn(cell_fw =lstm_cell_1, cell_bw = lstm_cell_2, dtype = tf.float32, inputs = input_data)\n final_value = tf.concat((value_1, value_2),2)\n final_output = final_value[:,-1,:]\n logits = tf.layers.dense(final_output,2)\n pred = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\n Accuracy = tf.reduce_mean(tf.cast(pred, tf.float32), name = 'accuracy')\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels), name = 'loss')\n optimizer = tf.train.AdamOptimizer(learning_rate = 0.0001).minimize(loss)\n return input_data, labels, dropout_keep_prob, optimizer, Accuracy, loss", "def dfs(graph_dict, node, track):\n\n track.explored.add(node)\n track.leader[node] = track.current_source\n for head in graph_dict[node]:\n if head not in track.explored:\n dfs(graph_dict, head, track)\n track.current_time += 1\n track.finish_time[node] = track.current_time", "def fit(self, num_iterations, max_episode_length=250, eval_every_nth=1000, save_model_every_nth=1000, log_loss_every_nth=1000, video_every_nth=20000):\n self.compile()\n self.policy = LinearDecayGreedyEpsilonPolicy(start_value=1., end_value=0.1, num_steps=1e6, num_actions=self.num_actions) # for training\n self.replay_memory = ReplayMemory(max_size=1000000)\n self.log_loss_every_nth = log_loss_every_nth\n random_policy = UniformRandomPolicy(num_actions=self.num_actions) # for burn in \n num_episodes = 0\n\n # tf logging\n self.tf_session = K.get_session()\n self.tf_summary_writer = tf.summary.FileWriter(self.log_dir, self.tf_session.graph)\n\n while self.iter_ctr < num_iterations:\n state = self.env.reset()\n self.preprocessor.reset_history_memory()\n\n num_timesteps_in_curr_episode = 0\n total_reward_curr_episode = 0 \n\n while num_timesteps_in_curr_episode < max_episode_length:\n self.iter_ctr+=1 # number of steps overall\n num_timesteps_in_curr_episode += 1 # number of steps in the current episode\n\n # logging\n # if not self.iter_ctr % 1000:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n\n # this appends to uint8 history and also returns stuff ready to be spit into the network\n state_network = self.preprocessor.process_state_for_network(state) #shape is (4,84,84,1). axis are swapped in cal_q_vals\n # print \"shape {}, max {}, min {}, type {} \".format(state_network.shape, np.max(state_network), np.min(state_network), state_network.dtype)\n\n # burning in \n if self.iter_ctr < self.num_burn_in:\n action = random_policy.select_action() # goes from 0 to n-1\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n # atari_preprocessor.process_state_for_memory converts it to grayscale, resizes it to (84, 84) and converts to uint8\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n # this should be called when num_timesteps_in_curr_episode > max_episode_length, but we can call it in is_terminal as well. \n # it won't change anything as it just sets the last entry's is_terminal to True\n self.replay_memory.end_episode() \n break\n\n # training\n else:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n q_values = self.calc_q_values(state_network)\n # print \"q_values {} q_values.shape {}\".format(q_values, q_values.shape)\n #print \"q_values.shape \", q_values.shape\n action = self.policy.select_action(q_values=q_values, is_training=True)\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n # validation. keep this clause before the breaks!\n if not(self.iter_ctr%eval_every_nth):\n print \"\\n\\nEvaluating at iter {}\".format(self.iter_ctr)\n if not(self.iter_ctr%video_every_nth):\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=True)\n else:\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=False)\n print \"Done Evaluating\\n\\n\"\n\n # save model\n if not(self.iter_ctr%save_model_every_nth):\n self.q_network.save(os.path.join(self.log_dir, 'weights/q_network_{}.h5'.format(str(self.iter_ctr).zfill(7))))\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n self.replay_memory.end_episode() \n break\n\n if not(self.iter_ctr % self.train_freq):\n self.update_policy()\n\n state = next_state", "def dfs(g):\n global time\n time = 0\n\n for v in g:\n v.discovery = 0\n v.finish_time = 0\n v.color = 'white'\n\n for v in g:\n if v.color == 'white':\n dfs_visit(v)", "def generate_bfs_second_order_random_walk(graph, alias_nodes, alias_edges, walk_length=10, start_node=None):\n if start_node == None:\n start_node = np.random.choice(graph.nodes())\n walk = [start_node]\n \n prev = None\n cur_idx = 0\n prev = [0]\n while len(walk) < walk_length:\n cur_nbrs = list(graph.neighbors(walk[cur_idx]))\n if len(cur_nbrs) > 0:\n walk_sequence = None\n if len(prev) == 1:\n # sample the next node based on alias_nodes\n walk_sequence = __alias_draw_sequence(*alias_nodes[walk[cur_idx]], cur_nbrs)\n else:\n # sample the next node based on alias_edges\n walk_sequence = __alias_draw_sequence(*alias_edges[(prev[cur_idx], walk[cur_idx])], cur_nbrs)\n walk += walk_sequence\n for i in range(len(walk_sequence)):\n prev.append(walk[cur_idx])\n cur_idx += 1\n else:\n break\n\n return walk[:walk_length]", "def get_model_tweetonly(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\":\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size)\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n outputs_fin = outputs[-1]\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh)(outputs_fin) #tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax)(outputs_fin) # tf.nn.softmax\n\n\n return model, [inputs]", "def build_tf_graph(self):\n raise NotImplementedError", "def demonstrate(self, train_path):\n if not os.path.exists(train_path):\n print(\"training json file not exists, program quit\")\n sys.exit()\n with open(train_path) as f:\n json_data = json.load(f)\n self.train_time_stamp_list = json_data['time']\n self.train_image_path_list = json_data['image_path']\n self.train_position_list = json_data['position']\n self.train_angle_list = json_data['angle']\n self.train_semantic_tag_list = json_data['semantic_tag']\n num_images = len(self.train_image_path_list)\n\n # create nodes\n print(\"start demonstrating, totally {} images in demonstration set\".format(num_images))\n self.node_id_list = []\n self.node_semantic_tag_list = []\n self.node_metric_feature_list = []\n self.node_conv_feature_list = []\n last_node_position = np.array([float('inf'), float('inf'), float('inf')])\n for train_index in range(num_images):\n train_position = np.array(self.train_position_list[train_index])\n if np.sqrt(np.sum(np.square(train_position - last_node_position))) > self.min_node_distance:\n last_node_position = train_position\n self.node_id_list.append(train_index)\n train_semantic_tag = self.train_semantic_tag_list[train_index]\n self.node_semantic_tag_list.append(train_semantic_tag)\n node_image_path = self.train_image_path_list[train_index]\n node_image = cv2.imread(node_image_path)\n image_batch = self.process_batch([node_image])\n node_conv_feature, node_metric_feature = self.sess.run([self.conv_features,\n self.metric_features], feed_dict = {self.images_placeholder: image_batch})\n self.node_conv_feature_list.append(node_conv_feature[0])\n self.node_metric_feature_list.append(node_metric_feature[0])\n print(\"{}/{} demonstration image shown\".format(train_index+1, num_images))\n self.node_number = len(self.node_id_list)\n print(\"all nodes created, totally {} of nodes\".format(len(self.node_id_list)))", "def __init__(self, placeholders, dict_size, name=None,\n embedding_dim=50, lr=0.001, **kwargs):\n\n super(Deepwalk, self).__init__(**kwargs)\n\n allowed_kwargs = {'name', 'logging', 'model_size'}\n for kwarg in kwargs.keys():\n assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg\n name = kwargs.get('name')\n if not name:\n name = self.__class__.__name__.lower()\n self.name = name\n\n logging = kwargs.get('logging', False)\n self.logging = logging\n\n self.vars = {}\n\n self.margin = 0.1\n\n self.placeholders = placeholders\n self.dict_size = dict_size\n self.embedding_dim = embedding_dim\n self.inputs1 = placeholders[\"batch1\"]\n self.inputs2 = placeholders[\"batch2\"]\n self.neg_samples = placeholders[\"batch3\"]\n self.number = placeholders[\"batch4\"]\n self.batch_size = placeholders['batch_size']\n \n\n # Model parameters\n self.loss = 0\n self.accuracy = 0\n \n # tensorflow word2vec tutorial\n self.target_embeds = tf.Variable(\n tf.random_uniform([self.dict_size, self.embedding_dim], -1.0, 1.0),\n name=\"target_embeds\")\n self.context_weights = tf.Variable(\n tf.truncated_normal([self.dict_size, self.embedding_dim],\n stddev=1.0 / math.sqrt(self.embedding_dim)),\n name=\"context_embeds\")\n self.context_bias = tf.Variable(\n tf.zeros([self.dict_size]),\n name=\"context_bias\")\n \n self.optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n\n self.build()", "def algo_graph_sto_iht(\n x_mat, y_tr, max_epochs, lr, x_star, x0, tol_algo, edges, costs, s, b,\n g=1, root=-1, gamma=0.1, proj_max_num_iter=50, verbose=0):\n np.random.seed()\n start_time = time.time()\n x_hat = np.copy(x0)\n x_tr_t = np.transpose(x_mat)\n\n # graph projection para\n h_low = int(len(x0) / 2)\n h_high = int(h_low * (1. + gamma))\n t_low = int(s)\n t_high = int(s * (1. + gamma))\n\n (n, p) = x_mat.shape\n # if block size is larger than n,\n # just treat it as a single block (batch)\n b = n if n < b else b\n num_blocks = int(n) / int(b)\n prob = [1. / num_blocks] * num_blocks\n\n num_epochs = 0\n\n for epoch_i in range(max_epochs):\n num_epochs += 1\n for _ in range(num_blocks):\n ii = np.random.randint(0, num_blocks)\n block = range(b * ii, b * (ii + 1))\n xtx = np.dot(x_tr_t[:, block], x_mat[block])\n xty = np.dot(x_tr_t[:, block], y_tr[block])\n gradient = -2. * (xty - np.dot(xtx, x_hat))\n head_nodes, proj_grad = algo_head_tail_bisearch(\n edges, gradient, costs, g, root, h_low, h_high,\n proj_max_num_iter, verbose)\n bt = x_hat - (lr / (prob[ii] * num_blocks)) * proj_grad\n tail_nodes, proj_bt = algo_head_tail_bisearch(\n edges, bt, costs, g, root,\n t_low, t_high, proj_max_num_iter, verbose)\n x_hat = proj_bt\n if np.linalg.norm(x_hat) >= 1e3: # diverge cases.\n break\n if np.linalg.norm(y_tr - np.dot(x_mat, x_hat)) <= tol_algo:\n break\n x_err = np.linalg.norm(x_hat - x_star)\n run_time = time.time() - start_time\n return x_err, num_epochs, run_time", "def greedy_MAP_assignment(theta,random_runs = 10,heur = 'first'):\r\n N = theta.shape[0]\r\n scipy.random.seed()\r\n max_p = -scipy.inf\r\n for k in range(random_runs):\r\n A = scipy.random.randint(2,size = N)\r\n improved = True\r\n p = A.dot( theta.dot(A) )\r\n while improved:\r\n improved = False\r\n if heur == 'first':\r\n p2 = -scipy.inf\r\n perm = scipy.random.permutation(N)\r\n for s in perm:\r\n #dp: change in p if A[i] bit is reversed\r\n dp = (1-2*A[s])*( A.dot(theta[s,:]+ theta[:,s]) ) + theta[s,s]\r\n if dp>0:\r\n p2 = dp\r\n break\r\n\r\n if heur == 'best':\r\n dp = (1-2*A)*( A.dot(theta + theta.T) ) + scipy.diag(theta)\r\n p2,s = dp.max(), dp.argmax()\r\n if p2 > 0:\r\n A[s] = 1-A[s]\r\n improved = True\r\n p += p2\r\n if p>max_p:\r\n greedy_A,max_p = A.copy(),p\r\n return greedy_A.astype(int),max_p", "def main():\n\n # initialize a random 3x3 TileGame problem\n tg = TileGame(3)\n # print(TileGame.board_to_pretty_string(tg.get_start_state()))\n # compute path using dfs\n path1 = id_astar(tg, tilegame_heuristic)\n path = ids(tg)\n print(tg.get_start_state())\n # display path\n print('ids')\n # TileGame.print_pretty_path(path)\n print('astar')\n TileGame.print_pretty_path(path1)\n print((time.time() - start_time))\n\n # initialize a small DGraph\n small_dgraph = DGraph([[None, 1], [1, None]], {1})\n # print the path using ids\n # print(ids(small_dgraph))", "def test_tree_search(self) -> None:\n class DumbModel(GymNet):\n count: int = 0\n\n def initial_inference(self, observations: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray, float]:\n s, pi, v = super().initial_inference(observations)\n self.count += 1\n return np.ones_like(s) * self.count, np.array([6/8 - 1e-8, 2/8 + 1e-8]), 0\n\n def recurrent_inference(self, latent_state: np.ndarray, action: int) -> typing.Tuple[float, np.ndarray]:\n r, s, pi, v = super().recurrent_inference(latent_state, action)\n self.count += 1\n return 0, np.ones_like(latent_state) * self.count, np.array([6/8 - 1e-8, 2/8 + 1e-8]), action\n\n memory_net = self.net\n memory_search = self.mcts\n\n # Swap class variables\n self.net = DumbModel(self.g, self.config.net_args)\n self.mcts = MuZeroMCTS(self.g, self.net, self.config.args)\n\n # No discounting and no exploration to ensure deterministic behaviour.\n self.config.args.gamma = 1\n self.config.args.exploration_fraction = 0\n\n # Experiment 1\n self.config.args.numMCTSSims = 4\n pi_1, v_1 = self.mcts.runMCTS(np.zeros(4), np.ones(2))\n np.testing.assert_array_almost_equal(pi_1, [1/2, 1/2])\n np.testing.assert_almost_equal(v_1, 1/4)\n self.mcts.clear_tree()\n\n # Experiment 2\n self.config.args.numMCTSSims = 8\n pi_2, v_2 = self.mcts.runMCTS(np.zeros(4), np.ones(2))\n np.testing.assert_array_almost_equal(pi_2, [5/8, 3/8])\n np.testing.assert_almost_equal(v_2, 1/4)\n self.mcts.clear_tree()\n\n # Undo class variables swap\n self.net = memory_net\n self.mcts = memory_search", "def simulation():\n # initialize action set\n action_set = np.zeros(int((s.MAX_INSPECT - s.MIN_INSPECT) / s.DELTA) + 3)\n x, i = s.MIN_INSPECT, 1\n while x <= s.MAX_INSPECT:\n action_set[i] = x\n x += s.DELTA\n i += 1\n action_set[-1] = np.inf\n action_number = len(action_set)\n\n # initialize current state\n current_state = math.floor(np.random.rand(1) * s.NUM_STATES)\n\n # initialize action index\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n\n if current_state != 0 and current_state != s.NUM_STATES - 1:\n action_index = action_number - 2\n\n # initialize policy set\n greedy_policy = np.zeros(s.NUM_STATES)\n greedy_policy[-1] = np.inf\n for i in range(1, s.NUM_STATES - 1):\n greedy_policy[i] = s.MAX_INSPECT\n\n visit_times = np.zeros([s.NUM_STATES, action_number])\n\n # initialization for simulation\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(greedy_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = last_value\n # ave_vector = np.matmul(stable_prob, falpha)\n # ave_estimate = ave_vector.tolist()\n each_transit_cost, each_transit_time, total_reward = (0 for i in range(3))\n\n # initialize DQN model if selected\n dqn = DQN() if MODEL == 1 else None\n # initialize Q-table if Q-learning selected\n q_factor = ql.init_q_factor(action_number) if MODEL == 2 else None\n\n for out_step in range(s.EPOCH):\n epsilon = s.EPSILON_1 if MODEL == 1 else s.EPSILON_2\n\n for inner_step in range(s.EPOCH_LEARN):\n\n visit_times[current_state, action_index] += 1\n current_action = greedy_policy[current_state]\n\n inspect_cost = 0 if current_state == s.NUM_STATES - 1 else s.K5 * current_action\n\n flag, sojourn_T, service_T, next_state = state_transition(current_state, current_action)\n each_transit_time = s.DISCOUNT * each_transit_time + (sojourn_T - each_transit_time) / pow(\n out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP)\n end_sojourn_T = math.exp(- s.ALPHA * sojourn_T)\n end_serve_T = math.exp(- s.ALPHA * service_T)\n\n if s.ALPHA == 0:\n dis_T, dis_serve_T, dis_wait_T = sojourn_T, service_T, sojourn_T - service_T\n else:\n dis_T, dis_serve_T = (1 - end_sojourn_T) / s.ALPHA, (1 - end_serve_T) / s.ALPHA\n dis_wait_T = (end_serve_T - end_sojourn_T) / s.ALPHA\n\n if flag == 0: # no processing, waiting\n cost_real = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * sojourn_T + inspect_cost\n cost_purt = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * dis_T + inspect_cost\n else: # no waiting, processing\n cost_real = s.K1 * (s.NUM_STATES - current_state - 1) * sojourn_T + s.K2 * service_T + s.K3 * (\n sojourn_T - service_T) + s.K4 + inspect_cost\n cost_purt = s.K1 * (s.NUM_STATES - current_state - 1) * dis_T + s.K2 * dis_serve_T + s.K3 * dis_wait_T \\\n + s.K4 * end_serve_T + inspect_cost\n\n each_transit_cost = s.DISCOUNT * each_transit_cost + (cost_real - each_transit_cost) / (\n pow(out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP))\n\n ave_q_cost = each_transit_cost / each_transit_time\n # ave_estimate.append(ave_q_cost)\n cost_dis = cost_purt - ave_q_cost * dis_T\n\n if MODEL == 1:\n reward = - cost_dis\n dqn.store_transition(current_state, action_index, reward, next_state)\n if dqn.memory_counter >= s.MEMORY_CAPACITY:\n dqn.learn(s.EPOCH_LEARN, inner_step, PS)\n else:\n difference = cost_dis + end_sojourn_T * min(q_factor[next_state, :]) \\\n - q_factor[current_state, action_index]\n q_factor = ql.update_q_factor(q_factor, current_state, action_index, difference,\n visit_times, inner_step, PS)\n current_state = next_state # transit to next state\n\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n else:\n if MODEL == 1:\n action_index = int(dqn.choose_action(current_state, epsilon))\n if action_set[action_index] <= 1:\n greedy_policy[current_state] = action_set[action_index]\n else:\n greedy_policy[current_state] = 1\n else:\n if np.random.rand(1) < epsilon:\n action_index = int(np.floor(np.random.rand(1) * (action_number - 2)) + 1)\n else:\n # minimal_q_value = np.min(q_factor[current_state, :])\n action_index = np.argmin(q_factor[current_state, :])\n greedy_policy[current_state] = action_set[action_index]\n\n # store the policy learned from the iterations\n optimal_policy = greedy_policy\n\n if MODEL != 1:\n for i in range(1, s.NUM_STATES - 1):\n # minimal_q_value_temp = np.min(q_factor[i, :])\n action_index_temp = np.argmin(q_factor[i, :])\n optimal_policy[i] = action_set[action_index_temp]\n\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(optimal_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = np.concatenate((dis_value, last_value), axis=1)\n total_reward += - np.ndarray.item(last_value[0])\n # new_ave_cost = np.matmul(stable_prob, falpha)\n # ave_vector = np.concatenate((ave_vector, new_ave_cost))\n print(\"epoch: {} , the epoch reward is {}\".format(out_step, round(- np.ndarray.item(last_value[0]), 2)))\n\n # result = np.asarray(dis_value)\n print(\"total reward:\", total_reward)\n\n return dis_value, total_reward", "def define_graph(glove_embeddings_arr):\n\n input_data = tf.placeholder(dtype=tf.int32,shape=[batch_size,review_word_limit],name=\"input_data\")\n labels = tf.placeholder(dtype=tf.float32,shape=[batch_size,2],name=\"labels\")\n \n dropout_keep_prob = tf.placeholder_with_default(0.85, shape=())\n\n weights = tf.Variable(tf.truncated_normal([lstm_cell_count,sentiment_classes]))\n bias = tf.Variable(tf.constant(.0,dtype=tf.float32,shape=[sentiment_classes]))\n\n\n #Embed the input words\n emb = tf.convert_to_tensor(glove_embeddings_arr,dtype=tf.float32)\n data = tf.Variable(tf.zeros([batch_size,review_word_limit,embedding_length],dtype=tf.float32))\n data = tf.nn.embedding_lookup(emb,input_data)\n\n # Define Network\n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_cell_count,)\n lstm = tf.contrib.rnn.DropoutWrapper(cell = lstm, output_keep_prob = dropout_keep_prob)\n\n lstm_out , lstm_last_state = tf.nn.dynamic_rnn(lstm,data,dtype=tf.float32)\n lstm_out = tf.transpose(lstm_out,[1,0,2])\n lstm_out = tf.gather(lstm_out,int(lstm_out.get_shape()[0])-1)\n prediction = (tf.matmul(lstm_out,weights)+bias)\n\n #Define loss function\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=labels))\n\n #Define accuracy\n accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction,1),tf.argmax(labels,1)),dtype=tf.float32))\n\n #Define optimizer\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n\n return input_data, labels, dropout_keep_prob, optimizer, accuracy, loss" ]
[ "0.59803855", "0.58579344", "0.5386858", "0.53660476", "0.53021395", "0.5288456", "0.5247692", "0.52213484", "0.5216367", "0.5207188", "0.5196385", "0.5184978", "0.5155157", "0.5146504", "0.51292765", "0.5090521", "0.50815225", "0.50664717", "0.50661576", "0.5053628", "0.5053031", "0.504781", "0.50395966", "0.50311416", "0.50293773", "0.5025697", "0.5020261", "0.5014261", "0.50109935", "0.5001465", "0.4986643", "0.49819162", "0.49813396", "0.49729127", "0.4971007", "0.49639714", "0.49635893", "0.4961343", "0.49584496", "0.4951228", "0.4944284", "0.4942882", "0.49426484", "0.49369943", "0.49358323", "0.49270827", "0.4925096", "0.49121013", "0.49102944", "0.4904828", "0.49023548", "0.49019474", "0.49015188", "0.48968354", "0.4884258", "0.48818785", "0.48769838", "0.4876641", "0.48733336", "0.48722285", "0.48561588", "0.48559436", "0.48531806", "0.48511738", "0.48507407", "0.48500508", "0.48491734", "0.48486966", "0.4847689", "0.48436755", "0.48427042", "0.48331523", "0.48329744", "0.4828167", "0.48275223", "0.48273954", "0.48272437", "0.48271814", "0.48268795", "0.4824404", "0.4821251", "0.48085195", "0.48059976", "0.47990903", "0.4798409", "0.47833347", "0.47788575", "0.47781724", "0.47736767", "0.47711506", "0.47626278", "0.47602323", "0.47514117", "0.47493815", "0.47487602", "0.47464943", "0.47452974", "0.474348", "0.47433555", "0.47412434" ]
0.49008697
53
Removes ending suffix that was used to distinguish between nodes with the same name but different category.
def post_process_result(self, result: np.ndarray) -> np.ndarray: to_cut = len("_tag") return np.asarray([[tag[:-to_cut] for tag in list_of_tags] for list_of_tags in result])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removesuffix(self, x) -> String:\n pass", "def _suffix(self) -> str:\n return \"\"", "def strip_suffix(s, suffixes):\n for suffix in suffixes:\n if s.endswith(suffix):\n return s.rstrip(suffix)\n return s", "def remove_suffix(x, suffix=\" \"):\n if x.endswith(suffix):\n x = x[: -len(suffix)]\n return x", "def force_suffix(fname, suffix):\r\n head, tail = pp.split(fname)\r\n if len(tail) == 0:\r\n return head\r\n if suffix[0] == \".\":\r\n suffix = suffix[1:]\r\n fpart, fext = pp.splitext(tail)\r\n newp = pp.join(head, fpart + \".\" + suffix)\r\n return pp.normpath(newp)", "def get_cleaned_suffix(cleaning_protocol, sample, config):\n cleaned_suffix = QC_PROTOCOLS.get(cleaning_protocol, \"\")\n num_raw_files = len(config[\"sample_data\"][sample][\"raw\"])\n\n # special cases\n if cleaning_protocol not in JOINING_PROTOCOLS and num_raw_files == 1:\n # if cleaning one file for assembly, drop interleave\n cleaned_suffix = re.sub(r\"interleaved\", \"\", cleaned_suffix)\n if cleaning_protocol == \"None\" and num_raw_files == 2:\n # if not cleaning, but two files given, interleave them\n cleaned_suffix = \"interleaved\"\n if cleaning_protocol in JOINING_PROTOCOLS:\n # prepend trimmomatic params\n chemistry, barcodes = get_chemistry_barcodes(sample, config)\n barcodes = \".\".join(barcodes)\n cleaned_suffix = \".\".join([chemistry, barcodes]) + \".\" + cleaned_suffix\n\n # make sure it ends in a dot\n cleaned_suffix += \".\"\n\n # remove dupmlicate dots left over from above tweaks\n cleaned_suffix = re.sub(r\"\\.\\.\", \".\", cleaned_suffix)\n\n return cleaned_suffix", "def remove_suffix(input_string, suffix):\n if suffix and input_string.endswith(suffix):\n return input_string[:-len(suffix)]\n return input_string", "def cleanup(name):\n cleaned_name = name.rstrip(\".\")\n return cleaned_name", "def strip_optional_suffix(string, suffix):\n if string.endswith(suffix):\n string = string[:-len(suffix)]\n return string", "def remove_ending(self, value: str, ending: str):\n length = len(ending)\n if len(value) < length: return value\n\n if value[-1*length:].lower() == ending:\n return value[:-1*length]\n else:\n return value", "def stripSuffix(suffix, string):\n\n if string.endswith(suffix):\n return string[:-len(suffix)]\n\n return string", "def fix_suffix(path):\n if path[-2:] == \"/*\":\n cleaned_path = path\n else:\n cleaned_path = os.path.join(path, \"*\")\n spooq_logger.debug(\"fix_suffix: input: {inp}, output: {outp}\".format(inp=path, outp=cleaned_path))\n return cleaned_path", "def strip_suffix(string, suffix):\n assert string.endswith(suffix), \"{!r} is not a suffix of {!r}\".format(suffix, string)\n return string[:-len(suffix)]", "def _strip_suffix(suffix, path_parts, rev, pathtype, repos, view_func):\n if not path_parts:\n return None\n suffix_len = len(suffix)\n if path_parts[-1][-suffix_len:] == suffix:\n path_parts = path_parts[:]\n if len(path_parts[-1]) == suffix_len:\n del path_parts[-1]\n else:\n path_parts[-1] = path_parts[-1][:-suffix_len]\n t = _repos_pathtype(repos, path_parts, rev)\n if pathtype == t:\n return path_parts, t, view_func\n return None", "def StripSuffix(string, suffix):\n assert string.endswith(suffix)\n return string[:-len(suffix)]", "def get_feature_suffix(feature_name: str) -> str:\n if \"_\" not in feature_name:\n return \"\"\n return feature_name.split(\"_\")[-1]", "def remove_last_part_of_url(category_url):\n return \"/\".join(category_url.split(\"/\")[:-1])", "def _DisableSuffixIsRelevant(suffix: str, removal_type: str) -> bool:\n if suffix == FINDER_COMMENT_SUFFIX_GENERAL:\n return True\n if suffix == removal_type:\n return True\n return False", "def ending_cutter(name: str):\n if name.endswith('ID') and re.match(r'^(?=\\w+[A-Z])(?=\\w+[a-z])\\w+$', name):\n return name[:-2]\n return name", "def get_suffix_ml_model():\n suffix = ''\n \n # consider if the model uses tail or not\n if gml.USE_TAIL: \n suffix += '_use_tail'\n else: \n suffix += '_no_tail'\n\n # consider the way of picking target variable for the model\n if gml.WAY_MODEL_TGT == 'absolute':\n suffix += '_absolute'\n elif gml.WAY_MODEL_TGT == 'relative':\n suffix += '_relative'\n else: \n exit('error on the function that gets suffix')\n\n return suffix", "def getInputFileBasenameNoSuffix():\n\n inputFileBasename = getInputFileBasename()\n basenameRemovedSuffix = removeSuffix(inputFileBasename)\n return basenameRemovedSuffix", "def _no_comp_suffix(s):\n return re.sub('__(eq|ne|gt|lt|ge|le)$', '', s)", "def replace_suffix (name, new_suffix):\n assert isinstance(name, basestring)\n assert isinstance(new_suffix, basestring)\n split = os.path.splitext (name)\n return split [0] + new_suffix", "def StripOptionalSuffix(string, suffix):\n if string.endswith(suffix):\n string = string[:-len(suffix)]\n return string", "def suffix(self):\n return self[\"suffix\"]", "def suffix(self):\n return self[\"suffix\"]", "def _replace_suffix(self, word, suffix, replacement):\n assert word.endswith(suffix), \"Given word doesn't end with given suffix\"\n if suffix == \"\":\n return word + replacement\n else:\n return word[: -len(suffix)] + replacement", "def suffix(self):\n return self._suffix", "def suffix(self):\n return self._suffix", "def suffix(self):\n return self._suffix", "def _strip_identifier_name(self, identifier: str) -> str:\n return identifier.split(\"/\")[-1]", "def RemoveSuffixFromWebkitLayoutTestName(test_name):\n match = _LAYOUT_TEST_NAME_PATTERN.match(test_name)\n if match:\n return match.group(1)\n\n return test_name", "def _identifier_suffix(self):\r\n return ''", "def returnPrefixSuffix(self):\n\n prefix = None\n suffix = None\n\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n splitName = self.name.split(baseName)\n if splitName[0] != '':\n prefix = splitName[0]\n if splitName[1] != '':\n suffix = splitName[1]\n return [prefix, suffix]", "def suffix ( self ) :\n return self.__suffix", "def suffix ( self ) :\n return self.__suffix", "def remove_extra(name):\n return re.sub(r\"-[\\S\\s]*\", \"\", re.sub(r\"\\([\\w\\W]*\\)\", \"\", name))", "def normalize_name(self) -> str:\n name = self.path.name\n server_names = ContentType.server_names()\n for _ in range(2):\n # we iterate twice to handle cases of doubled prefixes like `classifier-mapper-`\n for prefix in server_names:\n try:\n name = name.removeprefix(f\"{prefix}-\") # type: ignore[attr-defined]\n except AttributeError:\n # not supported in python 3.8\n name = (\n name[len(prefix) + 1 :]\n if name.startswith(f\"{prefix}-\")\n else name\n )\n normalized = f\"{self.content_type.server_name}-{name}\"\n logger.debug(f\"Normalized file name from {name} to {normalized}\")\n return normalized", "def category_part(self) -> str:\n if not self.is_old_style:\n raise ValueError('New identifiers have no category semantics')\n return self.split('/')[0]", "def suffix_replace(original, old, new):\n ...", "def strip_any_ends(s: str, prefixes: Union[str, Sequence[str]], suffixes: Union[str, Sequence[str]]) -> str:\n\t\tprefixes = [str(z) for z in prefixes] if StringTools.is_true_iterable(prefixes) else [str(prefixes)]\n\t\tsuffixes = [str(z) for z in suffixes] if StringTools.is_true_iterable(suffixes) else [str(suffixes)]\n\t\ts = str(s)\n\t\tfor pre in prefixes:\n\t\t\tif s.startswith(pre):\n\t\t\t\ts = s[len(pre):]\n\t\tfor suf in suffixes:\n\t\t\tif s.endswith(suf):\n\t\t\t\ts = s[:-len(suf)]\n\t\treturn s", "def normalize_suffix_1(string, logger_=_LOGGER):\n numbers_end_string_regex = r\"(\\d+$)\"\n count_regex = r\"(_\\d+_\\D+)\"\n match = re.search(numbers_end_string_regex, string)\n # If we find a number in the suffix of the string we delete it. And\n # generate the correct count and put in the correct place in the string.\n if match:\n logger.log(\n level=\"warning\",\n message='Suffix of string \"'\n + string\n + '\" should not have a number. Numbers removed from the suffix',\n logger=logger_,\n )\n instance = match.groups()[0]\n string = re.sub(numbers_end_string_regex, \"\", string)\n count_match = re.search(count_regex, string)\n instance_ = count_match.groups()[0]\n count_list = [str_ for str_ in instance_.split('_') if str_]\n new_count = int(count_list[0]) + int(instance)\n new_count = '_{}_{}'.format(new_count, count_list[1])\n string = string.replace(instance_, new_count)\n return string", "def strip_end(h, s):\n if h.endswith(s):\n h = h[:-len(s)]\n return h", "def remove_suffix(image_url):\n image_url = image_url.removeprefix('../../')\n real_image_url = \"https://books.toscrape.com/\" + image_url\n return real_image_url", "def _suffix(self) -> str:\n return f\"!! Expected: {self._expected!r}\"", "def trim_name(self):\n if len(self.name) > 10:\n return self.name[0:5] + \"...\" + self.name[-5:]\n\n return self.name", "def name_sans_ext(self) -> str:\n return os.path.splitext(self.path)[0]", "def clean_song_name(self, songname):\n # Reverse-sort the music_dirs list by string length, as if one \n # music_dir is a subset of the other (e.g. \"/music\" and \"/music/jazz\"),\n # we could end up cutting off too little\n for md in sorted(self.music_dirs, key=len, reverse=True):\n if songname.find(md) == 0:\n songname = songname.replace(md, \"\")\n break # shouldn't need to do any more replacements\n return songname", "def _parse_suffix(i, doc):\n\n i, = parse_pattern(i, doc, \"// <<< end of configuration section >>>\")\n i, = parse_pattern(i, doc, \"#endif\")\n\n return parse_blanks(i, doc)", "def is_suffix(suffix: str, word: str):\n return word.endswith(suffix)", "def _last_name(self, full_name):\n name_partition = full_name.partition(u',')\n no_suffix = name_partition[0].strip()\n suffix = name_partition[2].strip()\n name_parts = no_suffix.split()\n part_count = len(name_parts)\n if part_count == 1 or part_count == 2:\n return name_parts[-1], suffix\n else:\n assert part_count > 2\n if name_parts[-2].islower():\n return u' '.join(name_parts[-2:]), suffix\n else:\n return name_parts[-1], suffix", "def suffix(self) -> typing.Optional[str]:\n return self._values.get('suffix')", "def strip_suffix(value:str, suffixes:Iterable[str]) -> Tuple[str, bool]:\n for suffix in suffixes:\n if value.endswith(suffix):\n return value[:len(value) - len(suffix)], True\n return value, False", "def remove_suffix(filenames, suffix):\n\n new_filenames = set([])\n len_suffix = len(suffix) + 1 # add one for the \".\"\n # loop over the list of files and remove the suffix\n for name in filenames:\n name = name[:-len_suffix]\n new_filenames.add(name)\n \n return new_filenames", "def get_suffix(cls, raw_disable: RawDisable) -> str:\n variations = raw_disable.parent_test.variations\n\n maybe_variation_node = raw_disable.node.find(f'.//{cls.VARIATION_TAG}')\n if maybe_variation_node is None:\n return ''\n\n variation = maybe_variation_node.text\n if variation not in variations:\n raise DisableNodeProcessingException(f'could not find {variation!r} in defined variations; skipping node')\n\n idx = variations.index(variation)\n suffix = f'_{idx}'\n return suffix", "def strip_extension(filename):\n return filename.rsplit('.',1)[-2]", "def common_path_suffix(p1, p2):\n return common_segments(p1, p2, common_func=common_suffix)", "def mutate_suffix(path, board):\n x, y = get_start(board)\n path_new = get_path_same_prefix(path, board)\n while not is_path_correct(x, y, path_new, board):\n path_new = get_path_same_prefix(path, board)\n return remove_constant_points(path_new)", "def filter_filename(self, fname):\r\n return os.path.basename(fname)", "def suffix_diff(str1, str2):\n len1 = len(str1)\n len2 = len(str2)\n return str1[len2:] if len(str1) > len(str2) else str2[len1:]", "def replsuffix(files, suffix):\n\toutfiles = []\n\tif suffix is None: return\n\tif type(files) is type(\"\"):\n\t\tfiles = [files]\n\tfor f in files:\n\t\tfname, ext = os.path.splitext(f)\n\t\tnewfname = fname + suffix\n\t\toutfiles.append(newfname)\n\treturn outfiles", "def _normalize_class_name(self, name):\n class_name = ''.join(\n word.capitalize()\n for word in re.sub('[^A-Za-z0-9]+', ' ', name).split()\n )\n\n if not class_name.endswith('Extension'):\n class_name += 'Extension'\n\n return class_name", "def fix_ending(x):\n x = strip_stoich_wrapper(x)\n x = re.sub(r'(?<=[a-zA-Z])\\-(?=[a-zA-Z]$)', ' ', x)\n return x", "def getFileSuffix(filename):\n fileSuffix = \"\"\n\n if filename:\n name, extension = os.path.splitext(filename)\n fileSuffix = extension # .mp3\n\n if fileSuffix:\n # remove leading dot/period\n fileSuffix = fileSuffix[1:] # mp3\n\n if fileSuffix:\n # remove ending newline or space\n fileSuffix = fileSuffix.strip()\n\n if fileSuffix:\n # convert JPg to jpg\n fileSuffix = fileSuffix.lower()\n\n return fileSuffix", "def scrub_underscore_suffix(filename):\n scrubbed = re.sub(r\"_[^_]+\\.\", \".\", filename)\n return scrubbed", "def _metric_name(self, suffix):\r\n return '{}.{}'.format(self.METRIC_NAME, suffix)", "def suffix():\r\n\r\n return _random.choice(\r\n [\r\n 'Sr.', 'Jr.', 'II', 'III', 'IV', 'V'\r\n ]\r\n )", "def add_suffix(in_image,\n suffix_str):\n bandnames = in_image.bandNames().map(lambda elem: ee.String(elem).toLowerCase().cat('_').cat(suffix_str))\n nb = bandnames.length()\n return in_image.select(ee.List.sequence(0, ee.Number(nb).subtract(1)), bandnames)", "def word_clean(self, word):\n word_ori = word\n if word not in self.vocab_list: # if the word is not in the vocabulary\n word = word.strip(\",.!?\") # delete punctuation, such as periods, commas\n for i in range(len(suffix_list)):\n (match, string) = rchop(word, suffix_list[i])\n if match:\n (_, suffix) = lchop(word_ori, string)\n return string, suffix\n\n (_, suffix) = lchop(word_ori, word)\n return word, suffix", "def fixExt(ext):\n if not ext.startswith(\".\"):\n return \".{}\".format(ext)\n return ext", "def _strip_class_name(name):\n name = _strip(name)\n if name.find('.') != -1:\n name = name.split('.')[len(name.split('.')) - 1]\n return name", "def rm_dot(item):\n if len(str(item).split('.')) > 2:\n return item.replace('.', '', 1)\n else:\n return item", "def id_ending_with(self, suffix):\n node = [(key, data) for key, data in self.traverse() if data['id'].endswith(suffix)]\n if node:\n return node[0]", "def _iendswith(string, suffix):\n return string.lower().endswith(suffix)", "def remove_middle_name(name):\n\n name_parts = name.split(\" \")\n for part in name_parts:\n if '.' in part:\n name_parts.remove(part)\n return \" \".join(name_parts)", "def normalize_suffix_0(string, logger_=_LOGGER):\n if re.search(\"_[A-Z]{1,}$\", string):\n return string\n numbers = re.search(\"[0-9]{1,}$\", string)\n if numbers:\n logger.log(\n level=\"warning\",\n message='Suffix of string \"'\n + string\n + '\" should not have a number. Numbers removed from the suffix',\n logger=logger_,\n )\n instance = numbers.group(0)\n string = string[0 : string.find(instance)]\n lower_case = re.search(\"_[a-z]{1,}$\", string)\n if lower_case:\n instance_ = lower_case.group(0)\n string = string[0 : string.find(instance_)] + instance_.upper()\n return string", "def transform_suffix(filenames, suffix_old, suffix_new):\n\n new_filenames = set([])\n len_suffix_old = len(suffix_old) + 1 # add one for the \".\"\n # loop over the list of files and remove the suffix\n for name in filenames:\n name = name[:-len_suffix_old]\n new_filenames.add(name + \".\" + suffix_new)\n \n return new_filenames", "def clear_ext(x):\r\n return os.path.splitext(os.path.basename(x))[0]", "def clean_protein_class_name(name):\n v = REGEX_SUB_PARENS.sub('', name).strip()\n return v", "def get_suffix(self):\n return '%s%d' % (self.disk.devletters(), self.get_index() + 1)", "def fix_filename(self):\n if not self.remove_path:\n return\n self.filename = re.sub(\".+\\/\", \".../\", self.filename)", "def removeNamespaceFromString(cls, influenceName):\n tokens = influenceName.split('|')\n result = ''\n\n for i, tokens in enumerate(tokens):\n if i > 0:\n result += '|'\n\n result += tokens.split(':')[-1]\n\n return result", "def remove_tld(domain):\n try:\n tld = extract(domain).suffix\n domain = ''.join(domain.rsplit(tld, 1)).strip('.')\n\n except Exception as e:\n LOGGING.warning(\n 'Error stripping TLD ({0}): {1}'.format(\n domain, str(e)))\n\n return domain", "def getSuffixPattern(self):\n return self.getOrDefault(\"suffixPattern\")", "def endswith(self, suffix):\n return self.joined().endswith(suffix)", "def _check_suffix(path: str) -> str:\n\n if path[-4:] == \".tsv\":\n return \"\\t\"\n if path[-4:] == \".csv\":\n return \",\"\n raise ValueError(\"File must be .csv or .tsv\")", "def suffix(self) -> Optional[URISuffix]:\n return self._suffix", "def _build_tag_suffix() -> str:\n now = datetime.datetime.now(tz=datetime.timezone.utc).astimezone()\n return now.strftime(\".%Y%m%d.0\")", "def FindSuffix(self):\n self.numSuffixes = 0\n self.forceStress = 0\n resultslist = []\n for f in self.suffixes.finditer(self.wd):\n resultslist.append((f.group(), f.start()))\n if not resultslist: return\n # make sure *end* of word is in list! otherwise, 'DESP erate'\n if resultslist[-1][1] + len(resultslist[-1][0]) < len(self.wd):\n return\n resultslist.reverse()\n for res in resultslist:\n # if no vowel left before, false suffix ('singing')\n # n.b.: will choke on 'quest' etc! put in dictionary, I guess\n if not sre.search('[aeiouy]', self.wd[:res[1]]): break\n if res[0] == 'ing' and self.wd[res[1]-1] == self.wd[res[1]-2]:\n self.sylBounds.append(res[1] - 1) # freq special case\n else: self.sylBounds.append(res[1]) # sorted later\n self.wd = self.wd[:res[1]]\n self.numSuffixes += 1\n if res[0] in STRESSSUFFIX:\n self.forceStress = 0 - len(self.sylBounds)\n if res[0] in MULTISUFFIX:\n # tricky bit! it *happens* that secondary division in all these\n # comes after its first character; NOT inevitable!\n # also does not allow for 3-syl: 'ically' (which are reliable!)\n self.sylBounds.append(res[1]+1)\n self.numSuffixes += 1", "def _strip_package_name(name):\n name = _strip(name)\n if name.find('.') != -1:\n name = name.split('.')[0]\n return name", "def modifyRootname(rootname,suffix=None):\n # This list of extensions includes:\n # '.fits','.hhh', and '.c0h'\n if not suffix: suffix = DEFAULT_ORIG_SUFFIX\n _extlist = fileutil.EXTLIST\n _indx = None\n # Start out by searching for known extensions...\n for _extn in _extlist:\n _indx = rootname.find(_extn)\n if _indx > 0: break\n # If we do not find a recognizable extension/suffix,\n # raise an Exception.\n if not _indx: raise ValueError,\"Rootname %s not recognized!\"%rootname\n\n return rootname[:_indx]+suffix+rootname[_indx:]", "def case_suffix(self, **case_kws):\n if callable(self.output_suffix):\n return self.output_suffix(**case_kws)\n else:\n return self.output_suffix.format(**case_kws)", "def remove_uploader(self,strz):\n\t\tfor elem in listz_uploader:\n\t\t\tif elem in strz:\n\t\t\t\tstrz = strz.replace(elem,'')\n\t\tnew_name = string.replace(strz,'..', '.')\n\t\treturn new_name", "def wemo_entity_suffix_fixture():\n return \"\"", "def _RemoveAllPrefixes(test):\n test_name_start = max(test.find('.'), 0)\n if test_name_start == 0:\n return test\n\n test_suite = test[: test_name_start]\n test_name = test[test_name_start + 1 :]\n pre_position = test_name.find(_PRE_TEST_PREFIX)\n while pre_position == 0:\n test_name = test_name[len(_PRE_TEST_PREFIX):]\n pre_position = test_name.find(_PRE_TEST_PREFIX)\n base_test = '%s.%s' % (test_suite, test_name)\n return base_test", "def suffix(string, suffix, sep = '_'):\n if suffix == 'production':\n suffixed = string\n else:\n suffixed = string + sep + suffix\n return suffixed", "def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest", "def endswith(self, suffix, start=0, end=None):\n return endswith(self, suffix, start, end)", "def feature(root, suffix):\r\n if suffix == '$':\r\n return ('$', suffix)\r\n return (root[-1], suffix[0])", "def slug(self) -> str:\n return self.__class__.__module__.rsplit(\".\", maxsplit=1)[-1]", "def suffix(rem):\n if rem == 0:\n suf = ''\n else:\n if rem <= 600: #Class A suffix -- only letters.\n rem = rem - 1\n suf = base34[rem // 25]\n if rem % 25 > 0:\n suf = suf + base34[rem % 25 - 1]# second class A letter, if present.\n else: #rems > 600 : First digit of suffix is a number. Second digit may be blank, letter, or number.\n rem = rem - 601\n suf = base10[rem // 35]\n if rem % 35 > 0:\n suf = suf + base34[rem % 35 - 1]\n return suf" ]
[ "0.6892483", "0.672639", "0.66920334", "0.66891456", "0.6561912", "0.6489998", "0.6470396", "0.6446932", "0.64206946", "0.6388689", "0.6340012", "0.63353306", "0.6307654", "0.6175348", "0.61530787", "0.6142381", "0.6139506", "0.611326", "0.60891044", "0.5997538", "0.59363437", "0.5922498", "0.5911686", "0.59068847", "0.5904598", "0.5904598", "0.58229405", "0.57965505", "0.57965505", "0.57965505", "0.57615453", "0.5754485", "0.5744042", "0.57329464", "0.5702653", "0.5702653", "0.5685203", "0.56605625", "0.5655242", "0.5637693", "0.5634435", "0.56189036", "0.56122005", "0.5604343", "0.55828655", "0.5572123", "0.5560384", "0.5558831", "0.5554857", "0.5541123", "0.5535956", "0.55082864", "0.55065227", "0.5493433", "0.54823416", "0.5474783", "0.54641914", "0.5461326", "0.54603547", "0.5459397", "0.5459126", "0.54575795", "0.54482526", "0.5445961", "0.54227495", "0.541393", "0.54070115", "0.5401362", "0.54001576", "0.5399295", "0.53976005", "0.5392857", "0.53927046", "0.53884745", "0.53842884", "0.5375053", "0.5368934", "0.53668195", "0.53581554", "0.5357877", "0.53488433", "0.53471524", "0.5346112", "0.5345303", "0.5339919", "0.5338503", "0.53284645", "0.53270066", "0.53239256", "0.531569", "0.5315247", "0.53057426", "0.52992016", "0.52870864", "0.5282669", "0.5273037", "0.5266691", "0.52664953", "0.5262012", "0.5258544", "0.525708" ]
0.0
-1
activates mujoco with license at `file_path` this does not check the return code, per usage example at simulate.cpp and test.cpp.
def register_license(file_path): result = mjlib.mj_activate(file_path) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_license(self):\n param = self.module.params[\"param\"]\n license_file_path = param['license_file_path']\n if license_file_path and os.access(license_file_path, os.F_OK) and os.access(license_file_path, os.R_OK):\n self.client.upload_license(license_file_path)\n self.module.exit_json(msg=\"Import license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Import license file Fail.Please add 'hw_license_file_path' \"\n \"and make sure it can be read.\",\n changed=True, status='fail')", "def activate_license(self):\n response = self.client.activate_license()\n if str(response[\"result\"][\"code\"]) == \"0\" and str(response[\"data\"][\"LicenseActiveResult\"]) == \"0\":\n self.module.exit_json(msg=\"Activate license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Activate license file fail.{0}\".format(response['result']['description']),\n status='fail', changed=False)", "def license(*args, borrow: bool=True, info: bool=True, isBorrowed: bool=True, isExported:\n bool=True, isTrial: bool=True, licenseMethod: bool=True, productChoice: bool=True,\n r: bool=True, showBorrowInfo: bool=True, showProductInfoDialog: bool=True, status:\n bool=True, usage: bool=True, **kwargs)->AnyStr:\n pass", "def query_active_license(self):\n response = self.client.query_active_license()\n if str(response[\"result\"][\"code\"]) == \"0\":\n if str(response[\"data\"][\"FileExist\"]) == \"0\":\n self.module.exit_json(msg=\"License file exists.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"License file not exists.You should add the License file first.Your License \"\n \"Serial No is: {0}\".format(response[\"data\"][\"LicenseSerialNo\"]),\n changed=False, status='fail')\n else:\n self.module.fail_json(msg=\"Query active licenses in batches has an error.\"\n \"{0}\".format(response['result']['description']),\n status='fail', changed=False)", "def show_license(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo(lic)\n ctx.exit()", "def checkoutlicense(self,feature_):\n res = __library__.MSK_XX_checkoutlicense(self.__nativep,feature_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def create_license(self) -> None:\n # copy the license file from the template to the package folder\n # option : append other license files\n shutil.copy(CONFIG.template_path / \"LICENSE.md\", self.package_path)", "def license(p):\n # Input file\n f = '/'.join([p, 'collector.stats'])\n check_path(f)\n\n # Open file with universal newline support\n with open(f, 'rU') as fh:\n for line in fh.readlines():\n if 'License key' in line:\n license = line.split(':')[1].strip()\n break\n\n return license", "def hacking_has_license(physical_line, filename, lines, line_number):\n # don't work about init files for now\n # TODO(sdague): enforce license in init file if it's not empty of content\n license_found = False\n\n # skip files that are < 10 lines, which isn't enough for a license to fit\n # this allows us to handle empty files, as well as not fail on the Okay\n # doctests.\n if _project_is_apache() and not line_number > 1 and len(lines) > 10:\n for idx, line in enumerate(lines):\n # if it's more than 10 characters in, it's probably not in the\n # header\n if 0 < line.find('Licensed under the Apache License') < 10:\n license_found = True\n if not license_found:\n return (0, \"H102: Apache 2.0 license header not found\")", "def update_frozen_license() -> int:\n srcpath = Path(\"doc/src/license.rst\")\n dstpath = Path(\"cx_Freeze/initscripts/frozen_application_license.txt\")\n try:\n content = srcpath.read_text(encoding=\"utf-8\")\n except OSError:\n print(ERROR1, file=sys.stderr)\n return 1\n content = FROZEN_HEADER + \"\\n\".join(content.splitlines()[1:]) + \"\\n\"\n try:\n dstpath.write_text(content, encoding=\"utf-8\")\n print(dstpath, \"ok\")\n except OSError as io_error:\n print(ERROR2, f\"({io_error}).\", file=sys.stderr)\n return 1\n return 0", "def set_pkg_license_from_file(self, doc, lic):\n self.assert_package_exists()\n if validations.validate_lics_from_file(lic):\n doc.package.licenses_from_files.append(lic)\n return True\n else:\n raise SPDXValueError('Package::LicensesFromFile')", "def show_license(license):\n if not os.path.isfile(license):\n sys.stderr.write(\"Error: %r. Not exist such license file.\\n\\\nThe data license has to be there before of continue.\\n\" % license)\n sys.exit(1)\n\n try:\n read_f = open(license)\n except IOError, err:\n sys.stderr.write(\"Error: %r. %s.\\n\" % (err.filename, err.strerror))\n sys.exit(1)\n\n print\n print ('=' * 78)\n for line in read_f:\n print line.rstrip()\n read_f.close()\n print ('=' * 78)\n print \"\\nBy writing 'yes' I am affirmatively declaring that\"\n print \"I have read, understand and agree to the license above.\"\n\n try:\n answer = raw_input('Do you accept the license? ')\n if answer.lower() != 'yes':\n sys.exit(0)\n except KeyboardInterrupt:\n print\n sys.exit(0)\n\n print", "def putlicensepath(self,licensepath_):\n if isinstance(licensepath_,unicode):\n licensepath_ = licensepath_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putlicensepath(self.__nativep,licensepath_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def licensify(command_line_args):\n with open(command_line_args.license) as fp:\n license_header = fp.read()\n files = [\n path.join(dirname, f)\n for dirname, _, filenames in walk(command_line_args.directory)\n for f in fnmatch.filter(filenames, command_line_args.files)\n if not (command_line_args.exclude and fnmatch.fnmatch(f, command_line_args.exclude))\n ]\n try:\n result = apply_license_header(\n license_header, files,\n command_line_args.check, command_line_args.dry_run or command_line_args.check\n )\n except LicensesOutOfDateError as error:\n stdout.write(repr(error))\n exit(1)\n if result:\n message = 'The following files have been changed: {}'.format(', '.join(result))\n else:\n message = 'No files changed'\n stdout.write(message + linesep)", "def set_file_license_in_file(self, doc, lic):\n if self.has_package(doc) and self.has_file(doc):\n if validations.validate_file_lics_in_file(lic):\n self.file(doc).add_lics(lic)\n return True\n else:\n raise SPDXValueError('File::LicenseInFile')\n else:\n raise OrderError('File::LicenseInFile')", "def checkinlicense(self,feature_):\n res = __library__.MSK_XX_checkinlicense(self.__nativep,feature_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def putlicensepath(self,licensepath_): # 3\n res = self.__obj.putlicensepath(licensepath_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def get_license():\n repo_fs()\n return LICENSE", "def hacking_has_correct_license(physical_line, filename, lines, line_number):\n # don't work about init files for now\n # skip files that are < 10 lines, which isn't enough for a license to fit\n # this allows us to handle empty files, as well as not fail on the Okay\n # doctests.\n if _project_is_apache() and not line_number > 1 and len(lines) > 10:\n for idx, line in enumerate(lines):\n # if it's more than 10 characters in, it's probably not in the\n # header\n if (0 < line.find('Licensed under the Apache License') < 10\n and not _check_for_exact_apache(idx, lines)):\n return (idx, \"H103: Header does not match Apache 2.0 \"\n \"License notice\")", "def checkinlicense(self,feature_): # 3\n if not isinstance(feature_,feature): raise TypeError(\"Argument feature has wrong type\")\n res = self.__obj.checkinlicense(feature_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def set_file_license_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_license_comment_set:\n self.file_license_comment_set = True\n if validations.validate_file_lics_comment(text):\n self.file(doc).license_comment = str_from_text(text)\n else:\n raise SPDXValueError('File::LicenseComment')\n else:\n raise CardinalityError('File::LicenseComment')\n else:\n raise OrderError('File::LicenseComment')", "def checkoutlicense(self,feature_): # 3\n if not isinstance(feature_,feature): raise TypeError(\"Argument feature has wrong type\")\n res = self.__obj.checkoutlicense(feature_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def accept_license():\r\n msg, status = \"\", True\r\n\r\n try:\r\n sleep(5)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'Click on license accept button'\r\n flag1 = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n \r\n \r\n\r\n status = False if not (flag1) else True\r\n else:\r\n \r\n 'Click on Agree button in EULA page for IOS'\r\n flag = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n status = flag\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n\r\n return status, msg", "def update_license_file(data_dir):\n license_file = os.path.join(data_dir, LICENSE_FILENAME)\n temp_dir = tempfile.mkdtemp()\n gh_license_filename = os.path.join(temp_dir, LICENSE_FILENAME)\n try:\n _, headers = urlretrieve(LICENSE_URL, gh_license_filename)\n except IOError as e:\n # Python 2 uses the unhelpful IOError for this. Re-raise as the more\n # appropriate URLError.\n raise URLError(e.strerror)\n\n with open(gh_license_filename, \"rb\") as f:\n github_license = f.read()\n\n try:\n with open(license_file, \"rb\") as f:\n current_license = f.read()\n except (IOError, OSError):\n current_license = b\"\"\n\n github_digest = hashlib.sha256(github_license).hexdigest()\n current_digest = hashlib.sha256(current_license).hexdigest()\n\n if github_digest == current_digest:\n return False\n\n shutil.copyfile(gh_license_filename, license_file)\n shutil.rmtree(temp_dir, ignore_errors=True)\n return True", "def add_license(self, contents):\n buf_size = len(contents)\n buf = (ctypes.c_char * (buf_size + 1))(*contents.encode())\n\n res = self._dll.JLINK_EMU_AddLicense(buf)\n\n if res == -1:\n raise errors.JLinkException('Unspecified error.')\n elif res == -2:\n raise errors.JLinkException('Failed to read/write license area.')\n elif res == -3:\n raise errors.JLinkException('J-Link out of space.')\n\n return (res == 0)", "def set_concluded_license(self, doc, lic):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_conc_lics_set:\n self.file_conc_lics_set = True\n if validations.validate_lics_conc(lic):\n self.file(doc).conc_lics = lic\n return True\n else:\n raise SPDXValueError('File::ConcludedLicense')\n else:\n raise CardinalityError('File::ConcludedLicense')\n else:\n raise OrderError('File::ConcludedLicense')", "def get_license_text(self):\n\n if self.license_file:\n license_text = self.license_file.read_text(encoding=\"utf-8\")\n else:\n license_text = (\n \"Could not find foxBMS 2 license file.\\n\"\n f\"Please check {FOXBMS_LICENSE_FALLBACK_URL}.\"\n )\n self.license_file_missing_msg_box = wx.MessageBox(\n license_text, \"License file missing\", wx.OK | wx.ICON_WARNING\n )\n # self.Bind(wx.EVT_BUTTON, self.license_file_missing_msg_box)\n return license_text", "def test_license_policy_commands(runner, organization, tmp_path):\n\n # Generate the license policy configuration file.\n policy_name = random_str()\n\n policy_config_file_path = create_license_policy_config_file(\n directory=tmp_path,\n name=policy_name,\n description=random_str(),\n allow_unknown_licenses=random_bool(),\n on_violation_quarantine=random_bool(),\n package_query_string=\"format:python AND downloads:>50\",\n spdx_identifiers=[\"Apache-2.0\"],\n )\n\n # Create the license policy\n result = runner.invoke(\n create,\n args=[organization, str(policy_config_file_path)],\n catch_exceptions=False,\n )\n assert (\n \"Creating \" + policy_name + \" license policy for the cloudsmith namespace ...OK\"\n in result.output\n )\n slug_perm = assert_output_matches_policy_config(\n result.output, policy_config_file_path\n )\n\n # Use the cli to get the policy\n result = runner.invoke(ls, args=[organization], catch_exceptions=False)\n assert \"Getting license policies ... OK\" in result.output\n assert_output_matches_policy_config(result.output, policy_config_file_path)\n\n # Change the values in the config file\n policy_config_file_path = create_license_policy_config_file(\n directory=tmp_path,\n name=random_str(),\n description=random_str(),\n allow_unknown_licenses=random_bool(),\n on_violation_quarantine=random_bool(),\n package_query_string=\"format:go AND downloads:>15\",\n spdx_identifiers=[\"Apache-1.0\"],\n )\n\n # Use the cli to update the policy\n result = runner.invoke(\n update,\n args=[organization, slug_perm, str(policy_config_file_path)],\n catch_exceptions=False,\n )\n assert (\n \"Updating \" + slug_perm + \" license policy in the cloudsmith namespace ...OK\"\n in result.output\n )\n assert_output_matches_policy_config(result.output, policy_config_file_path)\n\n # Check that delete prompts for confirmation\n result = runner.invoke(\n delete, args=[organization, slug_perm], input=\"N\", catch_exceptions=False\n )\n assert (\n \"Are you absolutely certain you want to delete the \"\n + slug_perm\n + \" license policy from the cloudsmith namespace? [y/N]: N\"\n in result.output\n )\n assert \"OK, phew! Close call. :-)\" in result.output\n\n # Then actually delete it\n result = runner.invoke(\n delete, args=[organization, slug_perm], input=\"Y\", catch_exceptions=False\n )\n assert (\n \"Are you absolutely certain you want to delete the \"\n + slug_perm\n + \" license policy from the cloudsmith namespace? [y/N]: Y\"\n in result.output\n )\n assert (\n \"Deleting \" + slug_perm + \" from the cloudsmith namespace ... OK\"\n in result.output\n )", "def add_license(fitsfile, lic):\n try:\n hdulist = pyfits.open(fitsfile, mode=\"update\")\n except:\n print(\"Oops! Something's gone wrong :-(\", file=sys.stderr)\n else:\n prihdr = hdulist[0].header\n prihdr[\"LICENSE\"] = liclist[lic][\"name\"]\n prihdr[\"LICVER\"] = liclist[lic][\"ver\"]\n prihdr[\"LICURL\"] = liclist[lic][\"url\"]\n add_comments(prihdr)\n hdulist.close()", "def _check_for_license_acceptance(self, dep):\n if \"license\" in self.dependency_dict[dep]:\n license_name = self.dependency_dict[dep][\"license\"]\n else:\n license_name = \"restrictive\"\n if \"license_file\" in self.dependency_dict[dep]:\n license_text = Path(\n self.dependency_dict[dep][\"license_file\"]\n ).read_text()\n logger.warning(license_text)\n while \"invalid answer\":\n reply = (\n str(\n input(\n f\"Do you accept this {license_name} license? (y/n): \"\n )\n )\n .lower()\n .strip()\n )\n if len(reply) > 0:\n if reply[0] == \"y\":\n return True\n if reply[0] == \"n\":\n return False", "def install_license():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><license><install></install></license></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def licensecleanup(): # 3\n res = _msk.Env.licensecleanup()\n if res != 0:\n raise Error(rescode(res),\"\")", "def license():\n cwd = path(\".\")\n info(\"Tagging license text\")\n for extension, comment_marker in options.extensions:\n hlines = [comment_marker + \" \" + line for line in HEADER.split(\"\\n\")]\n header = \"\\n\".join(hlines) + \"\\n\\n\"\n first_line = hlines[0]\n last_line = hlines[-1]\n for f in cwd.walkfiles(\"*.%s\" % extension):\n exclude = False\n for pattern in options.exclude:\n if f.startswith(pattern):\n exclude=True\n break\n if exclude:\n continue\n _apply_header_if_necessary(f, header, first_line, last_line)", "def find_freesurfer_license(context, fs_license_path):\n\n log.debug('')\n\n\n context.gear_dict['fs_license_found'] = False\n license_info = ''\n\n # Check if the required FreeSurfer license file has been provided\n # as an input file.\n fs_license_file = context.get_input_path('freesurfer_license')\n if fs_license_file:\n # TODO make sure this works, it has not been tested\n # just copy the file to the right place\n fs_path_only, fs_file = os.path.split(fs_license_path)\n if fs_file != 'license.txt':\n log.warning('Freesurfer license looks strange: ' + fs_license_path)\n if not os.path.exists(fs_path_only):\n os.makedirs(fs_path_only)\n log.warning('Had to make freesurfer license path: ' + fs_license_path)\n shutil.copy(fs_license_file, fs_license_path)\n context.gear_dict['fs_license_found'] = True\n log.info('Using FreeSurfer license in input file.')\n\n if not context.gear_dict['fs_license_found']:\n # see if it was passed as a string argument\n if context.config.get('gear-FREESURFER_LICENSE'):\n fs_arg = context.config['gear-FREESURFER_LICENSE']\n license_info = '\\n'.join(fs_arg.split())\n context.gear_dict['fs_license_found'] = True\n log.info('Using FreeSurfer license in gear argument.')\n\n if not context.gear_dict['fs_license_found']:\n # TODO make sure this works, it has not been tested\n # see if it is in the project's info\n fw = context.client\n project_id = fw.get_analysis(context.destination.get('id')).parents.project\n project = fw.get_project(project_id)\n if project.info.get('FREESURFER_LICENSE'):\n license_info = '\\n'.join(project.info.get('FREESURFER_LICENSE').split())\n context.gear_dict['fs_license_found'] = True\n log.info('Using FreeSurfer license in project info.')\n\n if not context.gear_dict['fs_license_found']:\n msg = 'Could not find FreeSurfer license in project info.'\n log.exception(msg)\n os.sys.exit(1)\n\n else:\n # if it was passed as a string or was found in info, save\n # the Freesuefer license as a file in the right place\n if license_info != '':\n\n head, tail = os.path.split(fs_license_path)\n\n if not os.path.exists(head):\n os.makedirs(head)\n\n with open(fs_license_path, 'w') as lf:\n lf.write(license_info)", "def license(new_key):\n if new_key is not None:\n # click.echo('Saving key to configuration')\n config.set_license(new_key)\n license_key = config.get_license()\n if license_key:\n click.echo(license_key)\n else:\n click.echo(\"No license found: Use --set to configure the key\")", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def demomode_accept_license():\r\n msg, status = \"\", True\r\n# import genericfunctions\r\n# genericfunctions.accept_license_function()\r\n\r\n try:\r\n sleep(5)\r\n if g.platform == 'android':\r\n\r\n # agrment_lbl = ui_controls.text_view(get_obj_identifier('EUL_agrement_labl'))\r\n #if agrment_lbl.strip() =='End User License Agreement': \r\n #print \"End user License Agreement label is displaying properly\" \r\n #else:\r\n # print \"End user License Agreement label is not displaying properly\"\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('EUL_agrement_labl','End User License Agreement')\r\n sleep(4) \r\n \r\n\r\n #'get the text view of the Eula acknowledge agreement text'\r\n #Agrement_text_view = ui_controls.text_view(get_obj_identifier('EULA_acknowledge_agrmrnt_text'))\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_acknowldge_agrement_txt)\r\n #if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode idrac device text input file\"\r\n #return False, msg\r\n #if text_to_verify.strip() == Agrement_text_view.strip():\r\n #print \"DemoMode Eula agreement acknowledgement report verified sucessfully\"\r\n #else:\r\n #print \"DemoMode Eula agreement acknowledgement report is not verified sucessfully\" \r\n \r\n 'verify Eula acknowledge agreement text'\r\n flag2,msg = element_textvalidation('EULA_acknowledge_agrmrnt_text',text_to_verify)\r\n sleep(4) \r\n 'click on eula full view element' \r\n flag3 = ui_controls.Click(get_obj_identifier('EULA_full_view')) \r\n #'get the text view of the Eula whole agreement text'\r\n #Eula_text_view = ui_controls.text_view(get_obj_identifier('EULAagrement_text'))\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_EULA_agrement_txt)\r\n\r\n # if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode idrac device text input file\"\r\n #return False, msg\r\n # if text_to_verify.strip() == Eula_text_view.strip():\r\n #print \"DemoMode Eula agreement report verified sucessfully\"\r\n #else:\r\n # print \"DemoMode Eula agreement device report verified unsucessfully\" \r\n 'verify Eula acknowledge agreement text'\r\n flag3,msg = element_textvalidation('EULAagrement_text',text_to_verify)\r\n sleep(4) \r\n\r\n 'Click on license accept button'\r\n flag4 = ui_controls.button(get_obj_identifier('agree'))\r\n 'verify diagnostics and usage label'\r\n #diagnotsic_usage_lbl = ui_controls.text_view(get_obj_identifier('Diagnostics_usage_lbl'))\r\n #if diagnotsic_usage_lbl.strip() =='Diagnostics and Usage': \r\n #print \"Diagnostics and Usage label is displaying properly\" \r\n #else:\r\n #print \"Diagnostics and Usage label is not displaying properly\"\r\n 'verify end user license agreement label'\r\n flag5,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n\r\n ''\r\n # Diagnostic_usge_txt_view = ui_controls.text_view(get_obj_identifier('Diagnostics_usage_txt'))\r\n #if not Diagnostic_usge_txt_view:\r\n #print \"Unable to retrieve text of diagnostics and usage text from application\"\r\n # return False, msg\r\n\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n\r\n #if not text_to_verify:\r\n #print \"Unable to retrieve text to verify demo mode diagnostics and usage text file\"\r\n #return False, msg\r\n #if text_to_verify.strip() == Diagnostic_usge_txt_view .strip():\r\n # print \"DemoMode Diagnostics and Usage report verified sucessfully\"\r\n #else:\r\n #print \"DemoMode Diagnostics and Usage report verified unsucessfully\" \r\n \r\n 'verify end user license agreement label'\r\n flag6,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n sleep(4) \r\n flag7 = ui_controls.button(get_obj_identifier('agree'))\r\n\r\n status = False if not (flag1 and flag2 and flag3 and flag4 and flag5 and flag6 and flag7) else True\r\n else:\r\n 'Click on Agree button in EULA page for IOS'\r\n flag = ui_controls.button(get_obj_identifier('a'))\r\n status = flag\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n\r\n return status, msg", "def software_license(self) -> str:\n return self.random.choice(LICENSES)", "async def get_license(self) -> APIReturn:\n return await self._request(\"GET\", \"/getLicense\")", "def info_license(fitsfile):\n try:\n license = pyfits.getval(fitsfile, \"LICENSE\")\n except KeyError:\n print(\"License information not found.\")\n else:\n licver = pyfits.getval(fitsfile, \"LICVER\")\n licurl = pyfits.getval(fitsfile, \"LICURL\")\n print(\"{lic} {ver} ({url})\".format(lic=license, ver=licver, url=licurl))", "def test_59_help_license(self):\r\n url = \"/help/license\"\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a help license page\"\r\n assert \"Licenses\" in res.data, err_msg", "def VerifyWithFile(publicKey: str, value: ElementTree.Element) -> bool:\r\n lr = LicenceReader()\r\n lr.m_Licence1 = value\r\n return lr.Verify(publicKey)", "def testFileInReturn(self):\n self.assertEqual(\n os.path.abspath('../testcdl.cc'),\n self.cc.file_in\n )", "def licensecleanup():\n res = __library__.MSK_XX_licensecleanup()\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def test_default_license(self):\n # When no license is specified, the license should default to \"CC BY\"\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\")\n self.assertEqual(story.license, 'CC BY')\n\n # When a license is specified, it should be set\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", license=\"CC BY-NC-SA\")\n self.assertEqual(story.license, 'CC BY-NC-SA')", "def whmcs_license(username=None, lkey=None, action=\"view\"):\n actions_list = ['view', 'add', 'transfer', 'del']\n\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # build request\n action = action.lower()\n if action not in actions_list:\n print(\"!! Invalid action: %s\" % (action))\n print(\" Valid actions are: %s\" % (', '.join(actions_list)))\n return False\n\n if action == 'view':\n if username is not None:\n sterm = username\n stype = 'user'\n elif lkey is not None:\n sterm = lkey\n stype = 'key'\n else:\n print(\"!! Must specify either username or lkey\")\n return False\n\n # send request\n lresp = sc.post('https://imhsc.imhadmin.net/modules/Datacenter/datacenter_whmcslic.php',\n data={'act': action, 'query': stype, 'term': sterm})\n\n elif action == 'add':\n\n # send request\n lresp = sc.post('https://imhsc.imhadmin.net/modules/Datacenter/datacenter_whmcslic.php',\n data={'act': action, 'user': username})\n\n elif action == 'del' or action == 'transfer':\n\n if not lkey:\n # lookup the license first\n kresp = sc.post('https://imhsc.imhadmin.net/modules/Datacenter/datacenter_whmcslic.php',\n data={'act': 'view', 'query': 'user', 'term': username})\n check_sc_login(kresp.text)\n\n try:\n ktext = kresp.text.replace('<br />', '\\n').replace('<font size=\"3pt\">', '').replace('</font>', '').strip()\n lkey = re.search(r'\\WLicense Key: (Leased-.+)\\W', ktext, re.I|re.M).group(1)\n except:\n print(\"!! Unable to determine license key for user\")\n return False\n\n # send request\n lresp = sc.post('https://imhsc.imhadmin.net/modules/Datacenter/datacenter_whmcslic.php',\n data={'act': action, 'key': license})\n\n # check login\n check_sc_login(lresp.text)\n\n # clean up response\n ltext = lresp.text.replace('<br />', '\\n').replace('<font size=\"3pt\">', '').replace('</font>', '').strip()\n\n print(\"** Got response from SC:\\n%s\" % (ltext))\n\n return lresp", "def set_file_copyright(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_copytext_set:\n self.file_copytext_set = True\n if validations.validate_file_cpyright(text):\n if isinstance(text, string_types):\n self.file(doc).copyright = str_from_text(text)\n else:\n self.file(doc).copyright = text # None or NoAssert\n return True\n else:\n raise SPDXValueError('File::CopyRight')\n else:\n raise CardinalityError('File::CopyRight')\n else:\n raise OrderError('File::CopyRight')", "def install_freesurfer_license(\n input_license_path,\n freesurfer_license_string,\n fw,\n destination_id,\n fs_license_path,\n):\n\n log.debug(\"Looking for Freesurfer license\")\n\n license_info = \"\"\n\n # 1) Check if the required FreeSurfer license file has been provided\n # as an input file.\n\n if input_license_path: # just copy the file to the right place\n\n log.info(\"FreeSurfer license path is %s\", input_license_path)\n fs_path_only = Path(fs_license_path).parents[0]\n fs_file = Path(fs_license_path).name\n\n if fs_file != \"license.txt\":\n log.warning(\n \"Freesurfer license file is usually license.txt, not \" \"%s\",\n fs_license_path,\n )\n\n if not Path(fs_path_only).exists():\n Path(fs_path_only).mkdir(parents=True)\n log.warning(\"Had to make freesurfer license path: %s\", fs_license_path)\n\n shutil.copy(input_license_path, fs_license_path)\n\n license_info = \"copied input file\"\n log.info(\"Using FreeSurfer license in input file.\")\n\n # 2) see if the license info was passed as a string argument\n elif freesurfer_license_string:\n license_info = re.sub(r\"(\\S){1} \", \"\\1\\n\", freesurfer_license_string)\n\n log.info(\"Using FreeSurfer license in gear argument.\")\n\n # 3) see if the license info is in the project's info\n else:\n\n project_id = fw.get_analysis(destination_id)[\"parents\"][\"project\"]\n project = fw.get_project(project_id)\n\n if \"FREESURFER_LICENSE\" in project[\"info\"]:\n space_separated_text = project[\"info\"][\"FREESURFER_LICENSE\"]\n license_info = \"\\n\".join(space_separated_text.split())\n\n log.info(\"Using FreeSurfer license in project info.\")\n\n # If it was passed as a string or was found in info, license_info is\n # set so save the Freesurfer license as a file in the right place.\n # If the license was an input file, it was copied to the right place\n # above (case 1).\n if license_info == \"copied input file\":\n pass # all is well\n\n elif license_info != \"\":\n\n head = Path(fs_license_path).parents[0]\n\n if not Path(head).exists():\n Path(head).mkdir(parents=True)\n log.debug(\"Created directory %s\", head)\n\n with open(fs_license_path, \"w\") as flp:\n flp.write(license_info)\n # log.debug(\"Wrote license %s\", license_info)\n log.debug(\"Wrote license file %s\", fs_license_path)\n\n else:\n msg = \"Could not find FreeSurfer license anywhere\"\n raise FileNotFoundError(f\"{msg} ({fs_license_path}).\")", "def _project_is_apache():\n\n license_files = [\"LICENSE\"]\n for filename in license_files:\n try:\n with open(filename, \"r\") as file:\n for line in file:\n if re.search('Apache License', line):\n return True\n except IOError:\n pass\n return False", "def build_license(scanned_file):\n # TODO: filter based on license scores and/or add warnings and or detailed comments with that info\n license_expressions = scanned_file.get('license_expressions', [])\n if not license_expressions:\n return\n\n # TODO: use either Debian license symbols or SPDX\n # TODO: convert license expression to Debian style of expressions\n expression = combine_expressions(license_expressions)\n\n licenses = scanned_file.get('licenses', [])\n text = '\\n'.join(get_texts(licenses))\n return f'{expression}\\n{text}'", "def add_licence_header(verbose_count: int, src: Path) -> None:\n # copyright (https://github.com/knipknap/copyright) was first considered but\n # comprises quite a few bugs and does not seem active anymore.\n if not get_language_specifics().can_add_licence_headers():\n return\n template_string = get_language_specifics().generate_source_licence_header_template()\n with tempfile.NamedTemporaryFile(suffix=\".tmpl\", delete=False) as template_file:\n template_file_path = Path(template_file.name)\n logger.debug(f\"Creates template file in {str(template_file_path)}\")\n template_file.write(template_string.encode(\"utf8\"))\n template_file.close()\n copyright_config = get_tool_config(template_file_path, src)\n _call_licensehearders(copyright_config, verbose_count)", "def verify_omm_license_agrmt():\r\n msg = \"\"\r\n try:\r\n if g.platform == 'android':\r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.license_agrmt_input)\r\n if not text_to_verify:\r\n print \"Unable to retrive text to verify license agreement input file\"\r\n return False, msg\r\n\r\n 'Getting text from License agreement page'\r\n text = ui_controls.text_view(get_obj_identifier('license_agreeText_textView'))\r\n if not text:\r\n print \"Text retrieved from text view is empty\"\r\n return False, msg\r\n 'Comparing text retrieved from UI with verification input text'\r\n if text_to_verify.strip() == text.strip():\r\n print (\"License agreement displayed in UI is matching with text to verify for license agreement\") + \\\r\n (\"Verification Text- %s\" % text_to_verify) + \\\r\n (\"############Text from UI is #########\\n %s\\n\" % text)\r\n else:\r\n return False, msg\r\n else:\r\n print \"IOS value and text does nto return entire license text. Hence cannot validate license text in IOS\"\r\n return True, msg\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def set_license(self, license_code: str) -> PrivXAPIResponse:\n response_status, data = self._http_post(\n UrlEnum.LICENSE.LICENSE,\n body=license_code,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)", "def eula_prompt():\n current_file = inspect.getfile(inspect.currentframe())\n current_dir = os.path.dirname(os.path.abspath(current_file))\n eula = os.path.join(current_dir, \"EULA.html\")\n form = cmds.setParent(q=True)\n cmds.formLayout(form, e=True, width=500)\n heading = cmds.text(\n l='Maya Cloud Rendering License Agreement', font=\"boldLabelFont\")\n text = cmds.text(l=\"By loading this plug-in you are agreeing to \"\n \"the following terms and conditions.\")\n if not os.path.exists(eula):\n raise RuntimeError(\"EULA notice not found at {0}\".format(eula))\n\n with open(eula, \"rb\") as eula_text:\n html = eula_text.read()\n unicode = html.decode(\"windows-1252\")\n encoded_str = unicode.encode(\"ascii\", \"xmlcharrefreplace\")\n read = cmds.scrollField(editable=False, wordWrap=True, height=300,\n text=unicode, backgroundColor=(1.0,1.0,1.0))\n agree = cmds.button(l='Agree', c='maya.cmds.layoutDialog( dismiss=\"Agree\" )' )\n disagree = cmds.button(l='Disagree', c='maya.cmds.layoutDialog( dismiss=\"Disagree\" )' )\n cmds.formLayout(form, edit=True,\n attachForm=[(heading, 'top', 10), (heading, 'left', 10),\n (heading, 'right', 10), (read, 'left', 10),\n (read, 'right', 10), (text, 'left', 10),\n (text, 'right', 10), (agree, 'left', 10),\n (agree, 'bottom', 10), (disagree, 'right', 10),\n (disagree, 'bottom', 10)],\n attachNone=[(text, 'bottom'), (read, 'bottom')],\n attachControl=[(text, 'top', 10, heading),\n (read, 'top', 10, text),\n (agree, 'top', 50, read),\n (disagree, 'top', 50, read)],\n attachPosition=[(agree, 'right', 5, 50),\n (disagree, 'left', 5, 50)])", "def handle_file(path: str):\n try:\n with open(path) as f:\n formula = f.read()\n asm = check_formula_and_create_assembly_code(formula)\n path = path + \".asm\"\n with open(path, mode=\"w\") as f:\n f.write(asm)\n fullpath = os.path.realpath(f.name)\n filename = os.path.basename(fullpath)\n print(bcolors.OKGREEN, \"The assembly file \\\"\" + filename + \"\\\" has been created.\", bcolors.ENDC,\n sep=\"\")\n print(bcolors.HEADER, \"Path:\\n\", bcolors.ENDC, path, \n sep=\"\")\n print(bcolors.HEADER, \"Absolute Path:\\n\", bcolors.ENDC, fullpath, \n sep=\"\")\n print(bcolors.HEADER, \"Formula:\\n\", bcolors.ENDC, '\"', formula, '\"', \n sep=\"\")\n except (FileNotFoundError) as e:\n print(bcolors.FAIL, e, bcolors.ENDC)\n print_help()\n except Exception as e:\n print(bcolors.FAIL, e, bcolors.ENDC)\n finally:\n return", "def test_a_renew_non_active_license(self):\n self.assertTrue(self.status.is_ready(), \"The license is active, non active state awaited\")\n with self.assertRaisesRegexp(IOError, 'PUT .* HTTP error 4[0-9][0-9]$'):\n self.status.renew(self.status.DEVICEID1, self.status.DEVICENAME1, self.end+2*self.ADAY)", "def check_copyright_year(filename: str, *, copyright_line: str, is_newly_created: bool) -> None:\n year = copyright_line[12:16]\n if is_newly_created and year != _current_year:\n raise HeaderCheckFailure(f'{filename}: copyright year must be {_current_year} (was {year})')\n elif not _current_century_regex.match(year):\n raise HeaderCheckFailure(\n f\"{filename}: copyright year must match '{_current_century_regex.pattern}' (was {year}): \" +\n f\"current year is {_current_year}\"\n )", "def CheckProductAndLicense():\n\n try:\n if arcpy.CheckExtension(\"Spatial\") == \"Available\": # check if spatial analyst extension is available\n arcpy.CheckOutExtension(\"Spatial\") # check out extension if available\n\n else: # spatial analyst extension is not available\n raise LicenseError # raise license error\n\n except LicenseError: # print customized message if license error raised\n arcpy.AddMessage(\"Spatial Analyst license is unavailable. Terminate the process.\")\n print(\"Spatial Analyst license is unavailable. Terminate the process.\")\n sys.exit()\n\n except arcpy.ExecuteError: # if other error encountered, print execution message\n arcpy.AddMessage(arcpy.GetMessages(2))\n print(arcpy.GetMessages(2))", "def install_step(self, silent_cfg_names_map=None, silent_cfg_extras=None):\n if silent_cfg_names_map is None:\n silent_cfg_names_map = {}\n\n # license file entry is only applicable with license file or server type of activation\n # also check whether specified activation type makes sense\n lic_activation = self.cfg['license_activation']\n lic_file_server_activations = [ACTIVATION_LIC_FILE, ACTIVATION_LIC_SERVER]\n other_activations = [act for act in ACTIVATION_TYPES if not act in lic_file_server_activations]\n lic_file_entry = \"\"\n if lic_activation in lic_file_server_activations:\n lic_file_entry = \"%(license_file_name)s=%(license_file)s\"\n elif not self.cfg['license_activation'] in other_activations:\n self.log.error(\"Unknown type of activation specified: %s (known :%s)\" % (lic_activation, ACTIVATION_TYPES))\n\n silent = '\\n'.join([\n \"%(activation_name)s=%(activation)s\",\n lic_file_entry,\n \"%(install_dir_name)s=%(install_dir)s\",\n \"ACCEPT_EULA=accept\",\n \"INSTALL_MODE=NONRPM\",\n \"CONTINUE_WITH_OPTIONAL_ERROR=yes\",\n \"\" # Add a newline at the end, so we can easily append if needed\n ]) % {\n 'activation_name': silent_cfg_names_map.get('activation_name', ACTIVATION_NAME),\n 'license_file_name': silent_cfg_names_map.get('license_file_name', LICENSE_FILE_NAME),\n 'install_dir_name': silent_cfg_names_map.get('install_dir_name', INSTALL_DIR_NAME),\n 'activation': self.cfg['license_activation'],\n 'license_file': self.license_file,\n 'install_dir': silent_cfg_names_map.get('install_dir', self.installdir),\n }\n\n if silent_cfg_extras is not None:\n if isinstance(silent_cfg_extras, dict):\n silent += '\\n'.join(\"%s=%s\" % (key, value) for (key, value) in silent_cfg_extras.iteritems())\n else:\n self.log.error(\"silent_cfg_extras needs to be a dict\")\n\n # we should be already in the correct directory\n silentcfg = os.path.join(os.getcwd(), \"silent.cfg\")\n try:\n f = open(silentcfg, 'w')\n f.write(silent)\n f.close()\n except:\n self.log.exception(\"Writing silent cfg % failed\" % silent)\n self.log.debug(\"Contents of %s:\\n%s\" % (silentcfg, silent))\n\n # workaround for mktmp: create tmp dir and use it\n tmpdir = os.path.join(self.cfg['start_dir'], 'mytmpdir')\n try:\n os.makedirs(tmpdir)\n except:\n self.log.exception(\"Directory %s can't be created\" % (tmpdir))\n tmppathopt = ''\n if self.cfg['usetmppath']:\n env.setvar('TMP_PATH', tmpdir)\n tmppathopt = \"-t %s\" % tmpdir\n\n # set some extra env variables\n env.setvar('LOCAL_INSTALL_VERBOSE', '1')\n env.setvar('VERBOSE_MODE', '1')\n\n env.setvar('INSTALL_PATH', self.installdir)\n\n # perform installation\n cmd = \"./install.sh %s -s %s\" % (tmppathopt, silentcfg)\n return run_cmd(cmd, log_all=True, simple=True)", "def test_ensure_copyright():\n issues = []\n regex = re.compile(r\"# Copyright \\d{4}(-\\d{4})? Canonical Ltd.$\")\n for filepath in get_python_filepaths():\n if os.stat(filepath).st_size == 0:\n continue\n\n with open(filepath, \"rt\", encoding=\"utf8\") as fh:\n for line in itertools.islice(fh, 5):\n if regex.match(line):\n break\n else:\n issues.append(filepath)\n if issues:\n msg = \"Please add copyright headers to the following files:\\n\" + \"\\n\".join(issues)\n pytest.fail(msg, pytrace=False)", "def check_execution_path():\n file_name = \"LICENSE\"\n if not os.path.exists(file_name):\n logging.error(\n \"Don't execute the script from a sub-directory. \"\n \"Switch to the root of the project folder\"\n )\n return False\n return True", "def putlicensecode(self,code): # 3\n if code is None:\n code_ = None\n else:\n try:\n code_ = memoryview(code)\n except TypeError:\n try:\n _tmparr_code = array.array(\"i\",code)\n except TypeError:\n raise TypeError(\"Argument code has wrong type\")\n else:\n code_ = memoryview(_tmparr_code)\n \n else:\n if code_.format != \"i\":\n code_ = memoryview(array.array(\"i\",code))\n \n if code_ is not None and len(code_) != value.license_buffer_length:\n raise ValueError(\"Array argument code has wrong length\")\n res = self.__obj.putlicensecode(code_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def test_metadata_license(self):\n licenses = ['Apache2', 'OFL', 'UFL']\n self.assertIn(self.metadata.get('license', ''), licenses)", "def setThrowLicenseExceptions(self,value):\n self.PDFreactorConfiguration.in1[\"throwLicenseExceptions\"] = value", "def test_with_file(self, file_path):\n result = self.run(file_path=file_path)\n return self._handle_test_result(result)", "def test_network_lic_file_present(self):\n\n str_matlab_bin_path = self.host.check_output(\"readlink -f $(which matlab)\")\n matlab_dir = Path(str_matlab_bin_path).parents[1]\n network_lic_path = matlab_dir / \"licenses\" / \"network.lic\"\n self.assertTrue(self.host.file(str(network_lic_path)).exists)", "def test_csw_control():\n example.control.gauss_jacobi_csw_run('csw_control.pdf')\n assert path.isfile('csw_control.pdf')", "def test_set_asset_license_connected(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n story.save()\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def custom_licenses(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n result = self._dll.JLINK_EMU_GetLicenses(buf, self.MAX_BUF_SIZE)\n if result < 0:\n raise errors.JLinkException(result)\n return ctypes.string_at(buf).decode()", "def qs_license():\r\n paragraph = document.add_paragraph('')\r\n document.add_heading('License', level=1)\r\n lic_metric = ['lef', 'serial', 'name', 'organization', 'product', 'numberOfCores', 'isExpired', 'expiredReason', 'isBlacklisted', 'isInvalid']\r\n qs_lic = get_qlik_sense.get_license()\r\n num_of_metric = len(qs_lic)\r\n table = document.add_table(rows=num_of_metric+1, cols=2)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'details'\r\n\r\n for metric in range(len(qs_lic)):\r\n row = table.rows[metric+1]\r\n row.cells[0].text = str(lic_metric[metric])\r\n row.cells[1].text = str(qs_lic[metric][0])\r\n document.add_page_break()", "def insert_licence_header(verbose_count: int) -> None:\n # copyright (https://github.com/knipknap/copyright) was first considered but\n # comprises quite a few bugs and does not seem active anymore.\n add_licence_header(verbose_count, Path(configuration.get_value(ConfigurationVariable.PROJECT_ROOT)))", "def setLicenseKey(self,content):\n self.PDFreactorConfiguration.in1[\"licenseKey\"] = content", "def Read(self, fileName: str) -> None:\r\n try:\r\n if os.path.isfile(os.path.join(os.getcwd(), 'Licences', fileName)) and (fileName.endswith('.nls1')):\r\n self.m_Licence1 = ElementTree.parse(os.path.join(os.getcwd(), 'Licences', fileName)).getroot()\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"Licence file: \" + fileName + \" not found.\")", "def check_file(file_path):\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n file1 = open(file_path + \"/success.txt\",\"w\")#write mode \n file1.write(\"Succes Download\") \n file1.close()\n return os.path.isfile(file_path + \"/success.txt\")", "def install_file(self, file_path):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def file(path, filename):\n print(uc.file(path, filename))", "def ValidateLicense(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_license_from_wiki(s, *, is_file=False):\n response = api.wiki_search_licence(s, file=is_file)\n\n idpage = response['query']['pageids'][0]\n\n try: \n imageinfo = response['query']['pages'][idpage].get('imageinfo', None)\n return imageinfo[0]['extmetadata']['UsageTerms']['value']\n except (KeyError, TypeError) as e:\n Querylog.error('License not found for %s' % s)\n return None", "def load(cls, filename):\n\n licenses = []\n\n address_manager = REGISTRY['address_manager']\n error_manager = REGISTRY['error_manager']\n\n license_numbers = []\n\n reader = csv.reader(open(filename, 'rU'))\n try:\n headers = reader.next() # Text column headers\n except StopIteration:\n syslog.syslog('merge.py: Empty file %s' % filename)\n return licenses\n \n if len(headers) != 15:\n raise InvalidInput('Business License file should have ' +\n 'exactly 15 columns. Found %d.' % len(headers))\n\n for line in reader:\n business_license = BusinessLicense(line)\n\n if business_license.license_number in license_numbers:\n # Silently skip duplicates\n #error_manager.add(business_license,\n # 'Duplicate license number')\n continue\n\n license_numbers.append(business_license.license_number)\n\n if not business_license.is_valid_license_type():\n error_manager.add(business_license,\n 'Invalid license type')\n continue\n if not business_license.is_valid_business_name():\n error_manager.add(business_license,\n 'Business name is on ignore list')\n continue\n\n if address_manager.is_in_strathcona(business_license.address):\n licenses.append(business_license)\n else:\n error_manager.add(business_license,\n 'Not in Strathcona or invalid address')\n\n licenses.sort(key=operator.attrgetter('license_number'))\n\n return licenses", "def add_licence_header(verbose_count: int) -> None:\n # copyright (https://github.com/knipknap/copyright) was first considered but\n # comprises quite a few bugs and does not seem active anymore.\n template_string = _generate_header_template()\n with tempfile.NamedTemporaryFile(suffix=\".tmpl\", delete=False) as template_file:\n template_file_path = Path(template_file.name)\n logger.debug(f\"Creates template file in {str(template_file_path)}\")\n template_file.write(template_string.encode(\"utf8\"))\n template_file.close()\n copyright_config = get_tool_config(template_file_path)\n _call_licensehearders(copyright_config, verbose_count)", "def VerifiedProjectCode():\n\tcodigo_user = raw_input(\"Ingrese el codigo del proyecto: \")\n\tStreamVCode = open(\"project_code.txt\", \"r\")\n\t\t\n\tif codigo_user == StreamVCode:\n\t\tprint \"Codigo correcto.\"\n\telse:\n\t\tprint \"El codigo es incorrecto.\"\n\n\tStreamVCode.close()", "def verify_lcov_file(self, filepath):\n with open(filepath) as f:\n data = f.read()\n\n self.assertGreater(data.count('SF:'), 100)\n self.assertGreater(data.count('crypto'), 100)\n self.assertGreater(data.count('libpng'), 10)", "def test_text_file_by_path(self):\n self.command.package = self.input_ovf\n self.command.file_path = \"sample_cfg.txt\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n- <ovf:File ovf:href=\"sample_cfg.txt\" ovf:id=\"textfile\" \\\novf:size=\"{cfg_size}\" />\n </ovf:References>\n\"\"\".format(iso_size=self.FILE_SIZE['input.iso'],\n cfg_size=self.FILE_SIZE['sample_cfg.txt']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"sample_cfg.txt\")),\n \"deleted file should not be exported\")", "def update_license(self, sKey, sUser, sCompany):\n\t\treturn Job(SDK.PrlSrv_UpdateLicense(self.handle, sKey, sUser, sCompany)[0])", "def test_itar_restrict_software_asset(self):\n pass", "def License(self, default=None):\n return self.data.get('license', default)", "def test_kyc_get_file(self):\n pass", "def testUnauthorizedAccessToFileOnClientIsForbidden(self):\n self.InitDefaultRouter()\n\n client_id = self.SetupClient(0)\n gui_test_lib.CreateFileVersion(client_id, \"fs/os/foo\")\n\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()", "def add_file_contribution(self, doc, value):\n if self.has_package(doc) and self.has_file(doc):\n self.file(doc).add_contrib(value)\n else:\n raise OrderError('File::Contributor')", "def test_import_software_asset(self):\n pass", "def verifyCustomBundlePath(self, catalogName, bundleName):\n try:\n bundlePath = globalVars.switchBundleRepository[bundleName]\n utility.execLog(\"Click edit on Bundle\")\n status, result = self.clickEditCustomBundle(catalogName, bundleName)\n if not status:\n utility.execLog(result)\n return self.browserObject, False, result\n filePath = bundlePath.split(\"\\\\\")\n filename = filePath[-1]\n xpath = self.RepositoriesObjects('current_file_name')\n time.sleep(1)\n existingFileName = self.handleEvent(EC.presence_of_element_located((By.XPATH, xpath)), action=\"GET_TEXT\")\n\n if filename in existingFileName:\n utility.execLog(\"Closing Bundle Form\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.RepositoriesObjects('btn_close_edit_bundle'))),\n action=\"CLICK\")\n time.sleep(1)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('btn_close_bundles'))),\n action=\"CLICK\")\n time.sleep(1)\n utility.execLog(\"Bundle Name %s displayed as existing file name\" % bundleName)\n return self.browserObject, True, \"Bundle Name %s displayed as existing file name\" % bundleName\n else:\n utility.execLog(\"Closing Bundle Form\")\n self.handleEvent(\n EC.element_to_be_clickable((By.XPATH, self.RepositoriesObjects('btn_close_edit_bundle'))),\n action=\"CLICK\")\n time.sleep(1)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('btn_close_bundles'))),\n action=\"CLICK\")\n time.sleep(1)\n utility.execLog(\"Bundle Name %s not displayed as existing file name fileName : %s Existing FileName:%s\" % (\n bundleName, filename, existingFileName))\n return self.browserObject, False, \"Bundle Name %s not displayed as existing file name fileName : %s Existing FileName:%s\" % (\n bundleName, filename, existingFileName)\n except Exception as e:\n return self.browserObject, False, \"Error :: %s\" % e", "def put(self, license_handler):\n\n full_license = request.data\n return license_handler.upload_license(full_license)", "def test_with_file(filename) :\n\tif not os.path.exists(filename) :\n\t\tprint('File not exists: ' + filename)\n\t\tsys.exit(-1)\n\n\twith open(filename, 'r') as file :\n\t\tcode = file.read()\n\n\tprobabilities = shaman.Shaman.default().detect( code )\n\t\n\tfor index, (lang, prob) in enumerate(probabilities) :\n\t\tif index > 3: break\n\t\t\n\t\tprint(\"%s: %.2lf%%\" % (lang, prob))", "def fetch_license(auth_code=None):\n if not auth_code:\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><license><fetch></fetch></license></request>\",\n }\n else:\n query = {\n \"type\": \"op\",\n \"cmd\": (\n \"<request><license><fetch><auth-code>{}</auth-code></fetch></license>\"\n \"</request>\".format(auth_code)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def start_ocropy(file, path_out, ocropy_profile):\n print(\"Starting ocropy for:\" + file.split('/')[-1])\n\n fname = file.split('/')[-1].split('.')[0]\n create_dir(path_out)\n subprocess.Popen(args=[\"ocropus-nlbin\", file, \"-o\"+path_out+fname+\"/\"]).wait()\n subprocess.Popen(args=[\"ocropus-gpageseg\", path_out+fname+\"/????.bin.png\"]).wait()\n subprocess.Popen(args=[\"ocropus-rpred\", \"-Q 4\", path_out+fname+\"/????/??????.bin.png\"]).wait()\n test = [\"ocropus-hocr\", path_out+fname+\"/????.bin.png\", \"-o\"+path_out+\"/\"+fname.split('/')[-1]+\".html\"]\n subprocess.Popen(args=[\"ocropus-hocr\", path_out+fname+\"/????.bin.png\", \"-o\"+path_out+\"/\"+fname.split('/')[-1]+\".html\"]).wait()\n print(\"Finished ocropy for:\" + file.split('/')[-1])\n return 0", "def putlicensecode(self,code_):\n _code_minlength = value.license_buffer_length\n if value.license_buffer_length > 0 and code_ is not None and len(code_) != value.license_buffer_length:\n raise ValueError(\"Array argument code is not long enough: Is %d, expected %d\" % (len(code_),value.license_buffer_length))\n if isinstance(code_, numpy.ndarray) and code_.dtype is numpy.dtype(numpy.int32) and code_.flags.contiguous:\n _code_copyarray = False\n _code_tmp = ctypes.cast(code_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif code_ is not None:\n _code_copyarray = True\n _code_np_tmp = numpy.zeros(len(code_),numpy.dtype(numpy.int32))\n _code_np_tmp[:] = code_\n assert _code_np_tmp.flags.contiguous\n _code_tmp = ctypes.cast(_code_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _code_copyarray = False\n _code_tmp = None\n \n res = __library__.MSK_XX_putlicensecode(self.__nativep,_code_tmp)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def coeff_write_ok(self):\n return os.access(self.coeffroot, os.W_OK)", "def set_pkg_license_comment(self, doc, text):\n self.assert_package_exists()\n if not self.package_license_comment_set:\n self.package_license_comment_set = True\n if validations.validate_pkg_lics_comment(text):\n doc.package.license_comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('Package::LicenseComment')\n else:\n raise CardinalityError('Package::LicenseComment')", "def OnLanzarSimulacion(self, event):\n if self.clipsFile != \"\":\n clips.Assert(\"(Seguir S)\")\n clips.Run()\n self.office.updatePeopleLocation()\n self.Refresh()\n else:\n dlg = wx.MessageDialog(self, \"No se ha cargado fichero .clp\", \"Error en ASSERT\", wx.OK)\n dlg.ShowModal()", "def test_court_order_file(session, minio_server, test_name, expected_code, expected_msg):\n business = factory_business('BC1234567')\n filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE)\n\n if test_name == 'SUCCESS':\n filing['filing']['courtOrder']['fileKey'] = _upload_file(letter, invalid=False)\n elif test_name == 'FAIL_INVALID_FILE_KEY_SIZE':\n filing['filing']['courtOrder']['fileKey'] = _upload_file(letter, invalid=True)\n\n err = validate(business, filing)\n\n if expected_code:\n assert err.code == expected_code\n assert lists_are_equal(err.msg, expected_msg)\n else:\n assert err is None" ]
[ "0.6253657", "0.6131019", "0.5873717", "0.56893003", "0.5666623", "0.56539637", "0.5633931", "0.5633391", "0.5620481", "0.56039864", "0.56034553", "0.5600046", "0.5559953", "0.55529106", "0.5540216", "0.55401397", "0.5527236", "0.5520839", "0.5341878", "0.53384465", "0.5296292", "0.5267742", "0.5267312", "0.5243942", "0.5233447", "0.5208114", "0.52031684", "0.5199412", "0.51889795", "0.5157413", "0.5151607", "0.5132586", "0.51218", "0.5120634", "0.51006454", "0.5096636", "0.5066706", "0.50493366", "0.5006615", "0.5000701", "0.49976176", "0.497697", "0.4952957", "0.49358535", "0.49356484", "0.49248952", "0.49143383", "0.49136272", "0.489516", "0.48935226", "0.48885283", "0.48733804", "0.48663643", "0.4853366", "0.48505864", "0.4846587", "0.48445618", "0.4827268", "0.48265254", "0.4811295", "0.48069057", "0.47841966", "0.47677314", "0.47657448", "0.47625783", "0.47621042", "0.47599027", "0.4759639", "0.47402787", "0.4735524", "0.47352928", "0.47103488", "0.4699522", "0.4695342", "0.46891147", "0.46793622", "0.46680003", "0.46656665", "0.4655681", "0.46542916", "0.46380556", "0.46377242", "0.4630077", "0.4613905", "0.4611386", "0.4602606", "0.4599851", "0.4598633", "0.45969522", "0.45889938", "0.45859194", "0.45679083", "0.45602292", "0.45461634", "0.45355105", "0.45191103", "0.45090735", "0.45088428", "0.44993782", "0.44808784" ]
0.7091268
0
Return (qposadr, qveladr, dof) for the given joint name. If dof is 4 or 7, then the last 4 degrees of freedom in qpos represent a unit quaternion.
def joint_adr(self, joint_name): jntadr = mjlib.mj_name2id(self.ptr, C.mjOBJ_JOINT, joint_name) assert (jntadr >= 0) dofmap = {C.mjJNT_FREE: 7, C.mjJNT_BALL: 4, C.mjJNT_SLIDE: 1, C.mjJNT_HINGE: 1} qposadr = self.jnt_qposadr[jntadr][0] qveladr = self.jnt_dofadr[jntadr][0] dof = dofmap[self.jnt_type[jntadr][0]] return (qposadr, qveladr, dof)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getWireframeJoints(self, q):\n xline = [0. for _ in self.Joints]\n yline = [0. for _ in self.Joints]\n zline = [0. for _ in self.Joints]\n for cnt, jnt in enumerate(self.Joints.keys()):\n xyz = self.joint_syms[jnt][\"func_xyz_coj\"](*q)\n xline[cnt] = xyz[0, 0]\n yline[cnt] = xyz[1, 0]\n zline[cnt] = xyz[2, 0]\n return xline, yline, zline", "def get_fk(self, joints):\n\n header = Header()\n header.frame_id = self.group.get_planning_frame()\n\n robot_state = self.robot.get_current_state()\n robot_state.joint_state.position = joints\n\n links = [self.group.get_end_effector_link()]\n\n return self.fk_solver(header, links, robot_state).pose_stamped[0]", "def get_jpos(self, joint_name=None):\n raise NotImplementedError", "def get_dq_dynmat_q(phonon,q):\n groupvel = phonon._group_velocity\n return groupvel._get_dD(q)", "def quaternion(self, name, q):\n R = self.R(name=name, q=q)\n quat = transformations.unit_vector(\n transformations.quaternion_from_matrix(matrix=R))\n return quat", "def get_joint_states(self, joints: List[str]) -> Tuple[List[float], List[float], List[str]]:\n assert all([j in self.joints.names for j in joints]), 'All joints requested must be in self.joints'\n \n rospy.wait_for_service('/' + self.model_name + '/get_joint_states', timeout=2.0)\n try:\n resp = self.__get_joint_states(joint_names=joints)\n except rospy.ServiceException as e:\n print('Service did not process request:' + str(e))\n \n joint_positions = resp.joint_states.position\n joint_velocities = resp.joint_states.velocity\n joint_order = resp.joint_states.name\n \n return joint_positions, joint_velocities, joint_order", "def DQT(fp):\n # length is 2 + sum(t=1, N) of (65 + 64 * Pq(t))\n length = unpack('>H', fp.read(2))[0]\n _remaining = length - 2\n\n _pq, _tq, _qk = [], [], []\n while _remaining > 0:\n precision, table_id = _split_byte(fp.read(1))\n _remaining -= 1\n _pq.append(precision)\n _tq.append(table_id)\n\n # If Pq is 0, Qk is 8-bit, if Pq is 1, Qk is 16-bit\n Q_k = []\n for ii in range(64):\n if precision == 0:\n Q_k.append(unpack('>B', fp.read(1))[0])\n _remaining -= 1\n elif precision == 1:\n Q_k.append(unpack('>H', fp.read(2))[0])\n _remaining -= 2\n\n _qk.append(Q_k)\n\n info = {\n 'Lq' : length,\n 'Pq' : _pq,\n 'Tq' : _tq,\n 'Qk' : _qk\n }\n\n return info", "def getNametagJoints(self):\n # Not sure what the name is right now.\n return []", "def _get_kdl_joint_names(self):\n num_links = self._urdf_chain.getNrOfSegments()\n num_joints = self._urdf_chain.getNrOfJoints()\n joint_names = []\n for i in range(num_links):\n link = self._urdf_chain.getSegment(i)\n joint = link.getJoint()\n joint_type = joint.getType()\n # JointType definition: [RotAxis,RotX,RotY,RotZ,TransAxis,\n # TransX,TransY,TransZ,None]\n if joint_type > 1:\n continue\n joint_names.append(joint.getName())\n assert num_joints == len(joint_names)\n return copy.deepcopy(joint_names)", "def _named_attrs(self, parts:dict) -> \\\n (QA4SMNamedAttributes, list, QA4SMNamedAttributes):\n\n if not self.ismetr():\n raise IOError(self.varname, '{} is not in form of a QA4SM metric variable.')\n\n if self.g == 0:\n a = QA4SMAttributes(self.attrs)\n ref_ds = QA4SMNamedAttributes(a.ref_dc - a._offset_id_dc,\n a.get_ref_names()['short_name'], self.attrs)\n return ref_ds, None, None\n else:\n dss = []\n ref_ds = QA4SMNamedAttributes(parts['ref_id'], parts['ref_ds'], self.attrs)\n ds = QA4SMNamedAttributes(parts['sat_id0'], parts['sat_ds0'], self.attrs)\n dss.append(ds)\n if self.g == 3:\n ds = QA4SMNamedAttributes(parts['sat_id1'], parts['sat_ds1'], self.attrs)\n dss.append(ds)\n mds = QA4SMNamedAttributes(parts['mds_id'], parts['mds'], self.attrs)\n else:\n mds = None\n return ref_ds, dss, mds", "def check_qdof(base, q_dof):\n if base == 'linear_helix' or base == 'pure_helix':\n if q_dof != 2:\n raise ValueError(f'{base} should have 2 degrees of freedom, not {q_dof}.')\n elif base == 'quadratic' or base == 'linear':\n if q_dof < 2 or q_dof > 3:\n raise ValueError(f'{base} should have 2 or 3 degrees of freedom, not {q_dof}.')\n elif base == 'constant':\n if q_dof != 1:\n raise ValueError(f'{base} should have 1 degrees of freedom, not {q_dof}.')\n elif base == 'torsion_helix' or base == 'torsion_linear_helix':\n if q_dof != 3:\n raise ValueError(f'{base} should have 3 degrees of freedom, not {q_dof}.')\n elif base == 'full':\n if q_dof < 5 or q_dof > 8:\n raise ValueError(f'{base} should have 5-8 degrees of freedom, not {q_dof}.')\n else:\n print(f'{base} is not a defined strain base.')", "def q1(self):\n import numpy as np\n dipole = self.get('q1') # they are (so far) always given in AU\n if dipole is not None:\n dipole = np.array([dipole[2], dipole[0], dipole[1]])\n return dipole", "def get_joint_data(bagFile, joint_name, duration=True):\n # INITIALIZATION\n x = np.array([])\n y = np.array([])\n theta = np.array([])\n all_t = np.array([])\n bag = rosbag.Bag(bagFile) # Initialize rosbag object\n first = True # True on first iteration to take first timestamp\n\n # Add message values to collections\n for topic, msg, t in bag.read_messages(topics=['/tf']):\n\n joint = msg.transforms[0].child_frame_id\n translation = msg.transforms[0].transform.translation\n orientation = msg.transforms[0].transform.rotation\n euler = euler_from_quaternion(\n [orientation.x, orientation.y, orientation.z, orientation.w])\n\n if joint == joint_name:\n\n # Get timestamp in seconds\n t = msg.transforms[0].header.stamp\n t_sec = t.to_sec()\n if duration:\n if first:\n t_0 = t_sec\n first = False\n\n all_t = np.append(all_t, t_sec - t_0)\n\n else:\n all_t = np.append(all_t, t_sec)\n\n # Get x, y and z coordinates\n\n x = np.append(x, translation.x)\n y = np.append(y, translation.y)\n theta = np.append(theta, euler[2])\n\n pose = [x, y, theta]\n\n return pose, all_t", "def get_joints(joint_listener): \n if LOCAL_TEST: # dummy\n return np.array([-0.5596, 0.5123, 0.5575, -1.6929, 0.2937, 1.6097, -1.237, 0.04, 0.04])\n else:\n joints = joint_listener.joint_position\n print('robot joints', joints)\n return joints", "def get_quad_dip(q):\n N = get_quad_normal(q)\n V = get_vertical_vector(q)\n dip = np.degrees(np.arccos(Vector.dot(N, V)))\n return dip", "def test_fk():\n\n joints = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n\n path_planner = PathPlanner(\"manipulator\")\n\n pose = path_planner.get_fk(joints)\n\n print pose", "def get_joint_number(self) -> int:\n return self.DoF", "def make_dof_value_map(robot):\n names = [j.GetName() for j in robot.GetJoints()]\n indices = [j.GetDOFIndex() for j in robot.GetJoints()]\n\n def get_dofs():\n pose={}\n values=robot.GetDOFValues()\n for (i,n) in zip(indices,names):\n pose.setdefault(n,values[i])\n return pose\n\n return get_dofs", "def fk4(joint_rotations):\n h0_4 = htm4(joint_rotations)\n x0_4 = h0_4[0, 3]\n y0_4 = h0_4[1, 3]\n z0_4 = h0_4[2, 3]\n d0_4 = [x0_4, y0_4, z0_4]\n return d0_4", "def get_pose(self, joint_values: dict, query_node: str) -> SE3:\n if query_node == \"p0\":\n return SE3.identity()\n path_nodes = self.kinematic_map[\"p0\"][query_node][1:]\n q = np.array([joint_values[node][0] for node in path_nodes])\n alpha = np.array([joint_values[node][1] for node in path_nodes])\n a = np.array([self.a[node] for node in path_nodes])\n d = np.array([self.d[node] for node in path_nodes])\n\n return fk_3d_sph(a, alpha, d, q)", "def dJ(self, name, q, dq, x=None):\n\n x = self.x_zeros if x is None else x\n funcname = name + '[0,0,0]' if np.allclose(x, 0) else name\n # check for function in dictionary\n if self._dJ.get(funcname, None) is None:\n self._dJ[funcname] = self._calc_dJ(name=name, x=x)\n parameters = tuple(q) + tuple(dq) + tuple(x)\n return np.array(self._dJ[funcname](*parameters), dtype='float32')", "def FK(joint_params):\n joint_params = np.asarray(joint_params, dtype=float)\n q1, q2, q3, q4, q5, q6, q7 = joint_params\n TF = np.linalg.multi_dot([\n Rz(q1), Tz(a1), # Joint 1 to 2\n Rx(q2), # Joint 2 to 3\n Rz(q3), Tz(a2), # Joint 3 to 4\n Rx(q4), # Joint 4 to 5\n Rz(q5), Tz(a3), # Joint 5 to 6\n Rx(q6), # Joint 6 to 7\n Rz(q7), Tz(a4) # Joint 7 to E\n ])\n return TF", "def _csv3_q2x(self, Q, deriv = 0, out = None, var = None):\n \n natoms = 3 \n base_shape = Q.shape[1:]\n \n if var is None:\n var = [0, 1, 2] # Calculate derivatives for all Q\n \n nvar = len(var)\n \n # nd = adf.nck(deriv + nvar, min(deriv, nvar)) # The number of derivatives\n nd = dfun.nderiv(deriv, nvar)\n \n # Create adf symbols/constants for each coordinate\n q = [] \n for i in range(self.nQ):\n if i in var: # Derivatives requested for this variable\n q.append(adf.sym(Q[i], var.index(i), deriv, nvar))\n else: # Derivatives not requested, treat as constant\n q.append(adf.const(Q[i], deriv, nvar))\n # q = r1, r2, theta\n \n if out is None:\n out = np.ndarray( (nd, 3*natoms) + base_shape, dtype = Q.dtype)\n out.fill(0) # Initialize out to 0\n \n # Calculate Cartesian coordinates\n \n if self.angle == 'deg':\n q[2] = (np.pi / 180.0) * q[2] \n # q[2] is now in radians\n if self.supplementary:\n q[2] = np.pi - q[2] # theta <-- pi - theta \n \n if self.embedding_mode == 0:\n np.copyto(out[:,2], (-q[0]).d ) # -r1\n np.copyto(out[:,7], (q[1] * adf.sin(q[2])).d ) # r2 * sin(theta)\n np.copyto(out[:,8], (-q[1] * adf.cos(q[2])).d ) # -r2 * cos(theta)\n elif self.embedding_mode == 1:\n np.copyto(out[:,0], (q[0] * adf.cos(q[2]/2)).d ) # r1 * cos(theta/2)\n np.copyto(out[:,2], (q[0] * adf.sin(q[2]/2)).d ) # r1 * sin(theta/2)\n np.copyto(out[:,6], (q[1] * adf.cos(q[2]/2)).d ) # r2 * cos(theta/2)\n np.copyto(out[:,8], (-q[1] * adf.sin(q[2]/2)).d ) # -r2 * sin(theta/2)\n else:\n raise RuntimeError(\"Unexpected embedding_mode\")\n \n return out", "def default_qp(\n self,\n default_index: int = 0,\n joint_angle: Optional[jp.ndarray] = None,\n joint_velocity: Optional[jp.ndarray] = None) -> QP:\n qps = {}\n if joint_angle is None:\n joint_angle = self.default_angle(default_index)\n if joint_velocity is None:\n joint_velocity = jp.zeros_like(joint_angle)\n\n # build dof lookup for each joint\n dofs_idx = {}\n dof_beg = 0\n for joint in self.config.joints:\n dof = len(joint.angle_limit)\n dofs_idx[joint.name] = (dof_beg, dof_beg + dof)\n dof_beg += dof\n\n # check overrides in config defaults\n default_qps = {}\n if default_index < len(self.config.defaults):\n defaults = self.config.defaults[default_index]\n default_qps = {qp.name for qp in defaults.qps}\n for qp in defaults.qps:\n qps[qp.name] = QP(\n pos=vec_to_arr(qp.pos),\n rot=math.euler_to_quat(vec_to_arr(qp.rot)),\n vel=vec_to_arr(qp.vel),\n ang=vec_to_arr(qp.ang))\n\n # make the kinematic tree of bodies\n root = tree.Node.from_config(self.config)\n\n parent_joint = {j.child: j for j in self.config.joints}\n rot_axes = jp.vmap(math.rotate, [True, False])\n\n for body in root.depth_first():\n if body.name in qps:\n continue\n if body.name not in parent_joint:\n qps[body.name] = QP.zero()\n continue\n # qp can be determined if the body is the child of a joint\n joint = parent_joint[body.name]\n dof_beg, dof_end = dofs_idx[joint.name]\n rot = math.euler_to_quat(vec_to_arr(joint.rotation))\n axes = rot_axes(jp.eye(3), rot)[:dof_end - dof_beg]\n angles = joint_angle[dof_beg:dof_end]\n velocities = joint_velocity[dof_beg:dof_end]\n # for each joint angle, rotate by that angle.\n # these are euler intrinsic rotations, so the axes are rotated too\n local_rot = math.euler_to_quat(vec_to_arr(joint.reference_rotation))\n for axis, angle in zip(axes, angles):\n local_axis = math.rotate(axis, local_rot)\n next_rot = math.quat_rot_axis(local_axis, angle)\n local_rot = math.quat_mul(next_rot, local_rot)\n local_offset = math.rotate(vec_to_arr(joint.child_offset), local_rot)\n local_pos = vec_to_arr(joint.parent_offset) - local_offset\n parent_qp = qps[joint.parent]\n rot = math.quat_mul(parent_qp.rot, local_rot)\n pos = parent_qp.pos + math.rotate(local_pos, parent_qp.rot)\n # TODO: propagate ang through tree and account for joint offset\n ang = jp.dot(axes.T, velocities).T\n qps[body.name] = QP(pos=pos, rot=rot, vel=jp.zeros(3), ang=ang)\n\n # any trees that have no body qp overrides in the config are set just above\n # the xy plane. this convenience operation may be removed in the future.\n body_map = {body.name: body for body in self.config.bodies}\n for node in root.children:\n children = [node.name] + [n.name for n in node.depth_first()]\n if any(c in default_qps for c in children):\n continue # ignore a tree if some part of it is overriden\n\n zs = jp.array([bodies.min_z(qps[c], body_map[c]) for c in children])\n min_z = jp.amin(zs)\n for body in children:\n qp = qps[body]\n pos = qp.pos - min_z * jp.array([0., 0., 1.])\n qps[body] = qp.replace(pos=pos)\n\n qps = [qps[body.name] for body in self.config.bodies]\n return jp.tree_map(lambda *args: jp.stack(args), *qps)", "def get_quad_length(q):\n P0, P1, P2, P3 = q\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P1)\n qlength = (p1 - p0).mag() / 1000\n return qlength", "def quarter_chord(self) -> np.ndarray:\n return 0.75 * self.xyz_le + 0.25 * self.xyz_te()", "def from_quaternion(self, q: np.ndarray) -> np.ndarray:\n if q is None:\n return np.identity(3)\n if q.shape[-1]!=4 or q.ndim>2:\n raise ValueError(\"Quaternion must be of the form (4,) or (N, 4)\")\n if q.ndim>1:\n q /= np.linalg.norm(q, axis=1)[:, None] # Normalize\n R = np.zeros((q.shape[0], 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(q[:, 2]**2 + q[:, 3]**2)\n R[:, 1, 0] = 2.0*(q[:, 1]*q[:, 2]+q[:, 0]*q[:, 3])\n R[:, 2, 0] = 2.0*(q[:, 1]*q[:, 3]-q[:, 0]*q[:, 2])\n R[:, 0, 1] = 2.0*(q[:, 1]*q[:, 2]-q[:, 0]*q[:, 3])\n R[:, 1, 1] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 3]**2)\n R[:, 2, 1] = 2.0*(q[:, 0]*q[:, 1]+q[:, 2]*q[:, 3])\n R[:, 0, 2] = 2.0*(q[:, 1]*q[:, 3]+q[:, 0]*q[:, 2])\n R[:, 1, 2] = 2.0*(q[:, 2]*q[:, 3]-q[:, 0]*q[:, 1])\n R[:, 2, 2] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 2]**2)\n return R\n q /= np.linalg.norm(q)\n return np.array([\n [1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],\n [2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],\n [2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])", "def qsize_from_quadrangle(qpoints):\n assert len(qpoints) == 4, \"must have 4 qpoints, but got {}\".format(len(qpoints))\n tl, tr, br, bl = qpoints\n hmax = int(max((bl - tl).y(), (br - tr).y()))\n wmax = int(max((tr - tl).x(), (br - bl).x()))\n return QSize(wmax, hmax)", "def get_strains(names, q_dof):\n strain_functions = []\n for n, this_dof in zip(names, q_dof):\n check_qdof(n, this_dof)\n if n == 'linear_helix':\n strain_functions.append(linear_helix_strain)\n elif n == 'pure_helix':\n strain_functions.append(pure_helix_strain)\n elif n == 'torsion_helix':\n strain_functions.append(torsion_helix_strain)\n elif n == 'torsion_linear_helix':\n strain_functions.append(torsion_linear_helix_strain)\n elif n == 'quadratic':\n strain_functions.append(quadratic_strain)\n elif n == 'linear':\n strain_functions.append(linear_strain)\n elif n == 'constant':\n strain_functions.append(constant_strain)\n elif n == 'full':\n strain_functions.append(full_strain)\n else:\n print(f'{n} is not a defined strain base.')\n return strain_functions", "def orientation_gazebo_to_px4(orientation_q):\n return tf_enu_to_ned(tf_baselink_to_aircraft(orientation_q))", "def robot_get_obs(sim):\n if sim.data.qpos is not None and sim.model.joint_names:\n names = [n for n in sim.model.joint_names if n.startswith('robot')]\n return (\n np.array([sim.data.get_joint_qpos(name) for name in names]),\n np.array([sim.data.get_joint_qvel(name) for name in names]),\n )\n return np.zeros(0), np.zeros(0)", "def velocity_to_JointTorqueMsg(self):\n\t\t\tjointCmd = kinova_msgs.msg.JointTorque()\n\t\t\tjointCmd.joint1 = self.torque[0][0];\n\t\t\tjointCmd.joint2 = self.torque[1][1];\n\t\t\tjointCmd.joint3 = self.torque[2][2];\n\t\t\tjointCmd.joint4 = self.torque[3][3];\n\t\t\tjointCmd.joint5 = self.torque[4][4];\n\t\t\tjointCmd.joint6 = self.torque[5][5];\n\t\t\tjointCmd.joint7 = self.torque[6][6];\n\n\t\t\treturn jointCmd", "def get_frame_joints(self, frame):\n joints = frame[(self.POS_SIZE + self.ROT_SIZE):].copy()\n return joints", "def gather_qpt_info_me(self):\n if not self.active_worker:\n return None\n\n nqpt_me = len(self.my_iqpts)\n\n qred = np.zeros((nqpt_me, 3), dtype=np.float)\n omega = np.zeros((nqpt_me, 3 * self.natom), dtype=np.float)\n\n for i, iqpt in enumerate(self.my_iqpts):\n\n self.set_ddb(iqpt)\n qred[i,:] = self.qptanalyzer.qred[:]\n omega[i,:] = np.real(self.qptanalyzer.omega[:])\n\n return qred, omega", "def get_target_q_values(\n self, next_states: torch.Tensor, rewards: torch.Tensor, dones: torch.Tensor\n ) -> torch.Tensor:\n return ddqn_q_target(self, next_states, rewards, dones)", "def dof(self):\n if self._dof is None:\n self._dof = self.tri.nbr_cells - (self.tri.columns.size - 1) + self.tri.index.size\n return(self._dof)", "def getSquadByIdx(self, squadIdx, safety=False, fullName=False):\n\t\tassert not safety or len(self.squads) > squadIdx >= 0\n\t\tsquad_str = Company.SQUAD_FULL_STR if fullName else Company.SQUAD_PARTIAL_STR\n\t\treturn self.squads[squadIdx], squad_str.format(squad_name=self._statManagerObj.squadNumberFn(squadIdx), chapter_name=self.chapterName, company_name=self.name)", "def _jointOrigin(\n fusionJoint: Union[adsk.fusion.Joint, adsk.fusion.AsBuiltJoint]\n) -> adsk.core.Point3D:\n geometryOrOrigin = (\n (\n fusionJoint.geometryOrOriginOne\n if fusionJoint.geometryOrOriginOne.objectType == \"adsk::fusion::JointGeometry\"\n else fusionJoint.geometryOrOriginTwo\n )\n if fusionJoint.objectType == \"adsk::fusion::Joint\"\n else fusionJoint.geometry\n )\n\n # This can apparently happen\n # I suppose an AsBuilt with Rigid doesn't need a Origin perhaps?\n if geometryOrOrigin is None:\n return None\n\n if geometryOrOrigin.objectType == \"adsk::fusion::JointGeometry\":\n ent = geometryOrOrigin.entityOne\n if ent.objectType == \"adsk::fusion::BRepEdge\":\n if ent.assemblyContext is None:\n newEnt = ent.createForAssemblyContext(fusionJoint.occurrenceOne)\n min = newEnt.boundingBox.minPoint\n max = newEnt.boundingBox.maxPoint\n org = adsk.core.Point3D.create(\n (max.x + min.x) / 2.0, (max.y + min.y) / 2.0, (max.z + min.z) / 2.0\n )\n return org # ent.startVertex.geometry\n else:\n return geometryOrOrigin.origin\n if ent.objectType == \"adsk::fusion::BRepFace\":\n if ent.assemblyContext is None:\n newEnt = ent.createForAssemblyContext(fusionJoint.occurrenceOne)\n return newEnt.centroid\n else:\n return geometryOrOrigin.origin\n else:\n return geometryOrOrigin.origin\n else: # adsk::fusion::JointOrigin\n origin = geometryOrOrigin.geometry.origin\n # todo: Is this the correct way to calculate a joint origin's true location? Why isn't this exposed in the API?\n offsetX = (\n 0 if geometryOrOrigin.offsetX is None else geometryOrOrigin.offsetX.value\n )\n offsetY = (\n 0 if geometryOrOrigin.offsetY is None else geometryOrOrigin.offsetY.value\n )\n offsetZ = (\n 0 if geometryOrOrigin.offsetZ is None else geometryOrOrigin.offsetZ.value\n )\n # noinspection PyArgumentList\n return adsk.core.Point3D.create(\n origin.x + offsetX, origin.y + offsetY, origin.z + offsetZ\n )", "def parse_qpf(q):\n if q == \"M\":\n value = \"NULL\"\n trace = \"0\"\n elif q == \"T\":\n value = \"0.00\"\n trace = \"1\"\n else:\n value = q\n trace = \"0\"\n\n return quote(value), quote(trace)", "def _getq(self, q=None):\n if q is None:\n return self.q\n elif isvector(q, self.n):\n return getvector(q, self.n)\n else:\n return getmatrix(q, (None, self.n))", "def get_jvel(self, joint_name=None):\n raise NotImplementedError", "def getWireframeLinks(self, q):\n xline = [0. for _ in self.Joints]\n yline = [0. for _ in self.Joints]\n zline = [0. for _ in self.Joints]\n for cnt, jnt in enumerate(self.Joints.keys()):\n xyz = self.joint_syms[jnt][\"func_xyz_com\"](*q)\n xline[cnt] = xyz[0, 0]\n yline[cnt] = xyz[1, 0]\n zline[cnt] = xyz[2, 0]\n return xline, yline, zline", "def quat_to_angle(self, quat):\n\t\trot = PyKDL.Rotation.Quaternion(quat.x, quat.y, quat.z, quat.w)\n\t\treturn rot.GetRPY()[2]", "def motorposition(self, motorname: str) -> float:\n return self._data[motorname]", "def is_quad(q):\n P0, P1, P2, P3 = q\n\n # Convert points to ECEF\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n\n # Unit vector along top edge\n v0 = (p1 - p0).norm()\n\n # Distance along bottom edge\n d = (p3 - p2).mag()\n\n # New location for p2 by extending from p3 the same distance and\n # direction that p1 is from p0:\n new_p2 = p3 + v0 * d\n\n # How far off of the plane is the origin p2?\n planepoints = [p0, p1, p2]\n dist = get_distance_to_plane(planepoints, p2)\n\n # Is it close enough?\n if dist / d > OFFPLANE_TOLERANCE:\n on_plane = False\n else:\n on_plane = True\n\n # Fixed quad\n fquad = [p0.toPoint(),\n p1.toPoint(),\n new_p2.toPoint(),\n p3.toPoint()]\n\n return (on_plane, fquad)", "def quat_angle(quat):\n return 2 * float(np.arccos(min(1, max(-1, quat[0]))))", "def get_joint_positions(self, joint_angles ): \n\n\n # current angles\n res_joint_angles = joint_angles.copy() \n\n # detect limits\n maskminus= res_joint_angles > self.joint_lims[:,0]\n maskplus = res_joint_angles < self.joint_lims[:,1]\n \n res_joint_angles = res_joint_angles*(maskplus*maskminus) \n res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )\n res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )\n \n # mirror\n if self.mirror :\n res_joint_angles = -res_joint_angles\n res_joint_angles[0] += np.pi \n \n # calculate x coords of arm edges.\n # the x-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n x = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.cos( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # trabslate to the x origin \n x = np.hstack([self.origin[0], x+self.origin[0]])\n\n # calculate y coords of arm edges.\n # the y-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n y = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.sin( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # translate to the y origin \n y = np.hstack([self.origin[1], y+self.origin[1]])\n\n pos = np.array([x, y]).T\n \n return (pos, res_joint_angles)", "def plot_joint_trajectory(q, qd, qdd):\n m = q.shape[1]\n timesteps = np.linspace(0, 1, num = m)\n\n n = q.shape[0]\n\n fig, axis = plt.subplots(3)\n fig.suptitle(\"Joint Trajectories\")\n\n # Joint Position Plot\n axis[0].set_title(\"Position\")\n axis[0].set(xlabel = \"Time\", ylabel = \"Position\")\n for i in range(n):\n axis[0].plot(timesteps, q[i])\n\n # Joint Velocity Plot\n axis[1].set_title(\"Velocity\")\n axis[1].set(xlabel = \"Time\", ylabel = \"Velocity\")\n for i in range(n):\n axis[1].plot(timesteps, qd[i])\n\n # Joint Acceleration Plot\n axis[2].set_title(\"Acceleration\")\n axis[2].set(xlabel = \"Time\", ylabel = \"Acceleration\")\n for i in range(n):\n axis[2].plot(timesteps, qdd[i])\n\n # Legends\n legends = [f\"Joint_{i + 1}\" for i in range(n)]\n axis[0].legend(legends)\n axis[1].legend(legends)\n axis[2].legend(legends)\n\n fig.tight_layout()\n plt.show()", "def quintil_rent(x,p,d):\n \n if x <= d[p][0.20]:\n return 'Q1'\n elif x <= d[p][0.4]:\n return 'Q2'\n elif x <= d[p][0.6]: \n return 'Q3'\n elif x <= d[p][0.8]:\n return 'Q4'\n else:\n return 'Q5'", "def info(self, qp: QP) -> Info:\n zero = P(jp.zeros((self.num_bodies, 3)), jp.zeros((self.num_bodies, 3)))\n\n dp_c = sum([c.apply(qp) for c in self.colliders], zero)\n dp_j = sum([j.apply(qp) for j in self.joints], zero)\n info = Info(dp_c, dp_j, zero)\n return info", "def compute_fk_position(self, jpos, tgt_frame):\n if isinstance(jpos, list):\n jpos = np.array(jpos)\n jpos = jpos.flatten()\n if jpos.size != self.arm_dof:\n raise ValueError('Length of the joint angles '\n 'does not match the robot DOF')\n assert jpos.size == self.arm_dof\n kdl_jnt_angles = joints_to_kdl(jpos)\n\n kdl_end_frame = kdl.Frame()\n idx = self.arm_link_names.index(tgt_frame) + 1\n fg = self._fk_solver_pos.JntToCart(kdl_jnt_angles,\n kdl_end_frame,\n idx)\n if fg < 0:\n raise ValueError('KDL Pos JntToCart error!')\n pose = kdl_frame_to_numpy(kdl_end_frame)\n pos = pose[:3, 3].flatten()\n rot = pose[:3, :3]\n return pos, rot", "def add_joint(joint: str, x1: int, y1: int, x2: int, y2: int) -> str:\n return joint", "def test_get_quad_angle_char(self):\n result = get_quadrilateral_type(14, 5, 14, 5, 'w', 'x', 'y', 'z')\n self.assertEqual(result, 'invalid')", "def get_qpos(self, jnt_index):\n func = self.wrapper.get_qpos\n func.restype = ctypes.c_double\n\n return func(self.instance, jnt_index)", "def quaternion_to_angle(q):\n x, y, z, w = q.x, q.y, q.z, q.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n return yaw", "def qpoint(force_constant=[], qpt=[0.0, 0.0, 0.0]):\n qpt = np.array(qpt)\n exp_iqpt = np.exp(1.0j * qpt)\n dmat = force_constant * exp_iqpt\n vals, vects = np.linalg.eigh(dmat)\n return vals, vects", "def _get_joints(self, anno, idx):\n num_people = len(anno)\n\n joints = np.zeros(\n (num_people, self.ann_info['num_joints'], 3), dtype=np.float32)\n\n for i, obj in enumerate(anno):\n joints[i, :self.ann_info['num_joints'], :3] = \\\n np.array(obj['keypoints']).reshape([-1, 3])\n\n img_info = self.coco.loadImgs(self.img_ids[idx])[0]\n orgsize = np.array([img_info['height'], img_info['width'], 1])\n\n return joints, orgsize", "def move_to_joint_pos_delta(self, cmd):\n curr_q = self.joint_angles()\n joint_names = self.joint_names()\n\n joint_command = dict([(joint, curr_q[joint] + cmd[i])\n for i, joint in enumerate(joint_names)])\n\n self.move_to_joint_positions(joint_command)", "def get_quad_strike_vector(q):\n P0, P1, P2, P3 = q\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P1)\n v1 = (p1 - p0).norm()\n return v1", "def get_fk_pose(*args):\n robots = get_robot_roots()\n if not robots:\n pm.warning('Nothing Selected; Select a valid robot')\n return\n\n if len(robots) > 1:\n pm.warning('Too many selections; Select a single robot')\n return\n\n robot = robots[0]\n axes = find_fk_config(robot)\n\n for i in range(len(axes)):\n pm.textField('t_a{}'.format(i + 1),\n edit=True,\n text=round(axes[i], 2))", "def joint(*args, absolute: bool=True, angleX: Union[float, bool]=0.0, angleY: Union[float,\n bool]=0.0, angleZ: Union[float, bool]=0.0, assumePreferredAngles: bool=True,\n automaticLimits: bool=True, children: bool=True, component: bool=True,\n degreeOfFreedom: Union[AnyStr, bool]=\"\", exists: Union[AnyStr, bool]=\"\",\n limitSwitchX: bool=True, limitSwitchY: bool=True, limitSwitchZ: bool=True, limitX:\n Union[List[float, float], bool]=None, limitY: Union[List[float, float], bool]=None,\n limitZ: Union[List[float, float], bool]=None, name: Union[AnyStr, bool]=\"\",\n orientJoint: AnyStr=\"\", orientation: Union[List[float, float, float], bool]=None,\n position: Union[List[float, float, float], bool]=None, radius: Union[float, bool]=0.0,\n relative: bool=True, rotationOrder: Union[AnyStr, bool]=\"\", scale: Union[List[float,\n float, float], bool]=None, scaleCompensate: bool=True, scaleOrientation:\n Union[List[float, float, float], bool]=None, secondaryAxisOrient: AnyStr=\"\",\n setPreferredAngles: bool=True, stiffnessX: Union[float, bool]=0.0, stiffnessY:\n Union[float, bool]=0.0, stiffnessZ: Union[float, bool]=0.0, symmetry: bool=True,\n symmetryAxis: AnyStr=\"\", zeroScaleOrient: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def get_desired_joint_position(self):\n return self._position_joint_desired", "def get_quad_down_dip_vector(q):\n P0, P1, P2, P3 = q\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P1)\n p0p1 = p1 - p0\n qnv = get_quad_normal(q)\n ddv = Vector.cross(p0p1, qnv).norm()\n return ddv", "def joints_torque(self):\r\n return self._arm.joints_torque", "def order_joint_states(self, data):\n ordered_joint = []\n \n # Create a dictionary that contains joint name and position \n # starting from a JointState msg\n joints_dictionary = dict(zip(data.joint_state.name, data.joint_state.position))\n \n # helper variable \n suffix = 'FJ0' \n\n # For each joint name, look for the corresponding value in the joints_dictionary\n for key in [\"FFJ0\", \"FFJ3\", \"FFJ4\",\n \"MFJ0\", \"MFJ3\", \"MFJ4\",\n \"RFJ0\", \"RFJ3\", \"RFJ4\",\n \"LFJ0\", \"LFJ3\", \"LFJ4\", \"LFJ5\",\n \"THJ1\", \"THJ2\", \"THJ3\", \"THJ4\", \"THJ5\",\n \"WRJ1\", \"WRJ2\" ]:\n \n # Check if the key contains \"FJ0\": \"FFJ0\", \"MFJ0\", \"RFJ0\", \"LFJ0\"\n # This has to be done because for convention ?FJO = ?FJ1 + ?FJ2\n if key[1:] == suffix:\n ordered_joint.append( joint(joint_name = key,\n joint_target = joints_dictionary[key[:1]+\"FJ1\"] + joints_dictionary[key[:1]+\"FJ2\"])) \n else: \n ordered_joint.append( joint(joint_name = key, \n joint_target = joints_dictionary[key]) ) \n return ordered_joint", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def getOqiNameIndx( self, name ):\n \n if not self.oqiNames:\n self.getOqiNames( )\n\n if name in self.oqiNames:\n return self.oqiNames[ name ]\n elif name in self.oqiNames.values():\n return name\n else:\n return -1", "def quaternion_to_angle(q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def fk(arm,base=np.identity(4),joint_num=-1):\n\n pEE = base # Cumulative pose of the End Effector \n # (initially set up as the base of the robot)\n if joint_num==-1:\n for joint in arm:\n pEE=np.dot(pEE, joint.dhMatrix())\n else:\n for i in range(joint_num):\n pEE=np.dot(pEE, arm[i].dhMatrix())\n\n return pEE", "def get_jacobian(self, joint_angles):\n q = kdl.JntArray(self._urdf_chain.getNrOfJoints())\n for i in range(q.rows()):\n q[i] = joint_angles[i]\n jac = kdl.Jacobian(self._urdf_chain.getNrOfJoints())\n fg = self._jac_solver.JntToJac(q, jac)\n if fg < 0:\n raise ValueError('KDL JntToJac error!')\n jac_np = kdl_array_to_numpy(jac)\n return jac_np", "def _get_quaternion_data(self, msg):\n alpha, beta, gamma = PIDController.get_euler_angle_from_quat(msg.quaternion.w, msg.quaternion.x,\n msg.quaternion.y, msg.quaternion.z)\n self._actual_euler[\"alpha\"], self._actual_euler[\"beta\"], self._actual_euler[\"gamma\"] \\\n = alpha, beta, gamma", "def quat2dcm(q):\n q0q0 = q[0] * q[0]\n q0q1 = q[0] * q[1]\n q0q2 = q[0] * q[2]\n q0q3 = q[0] * q[3]\n q1q1 = q[1] * q[1]\n q1q2 = q[1] * q[2]\n q1q3 = q[1] * q[3]\n q2q2 = q[2] * q[2]\n q2q3 = q[2] * q[3]\n q3q3 = q[3] * q[3]\n dcm = np.zeros((3, 3))\n dcm[0, 0] = q0q0 + q1q1 - q2q2 - q3q3\n dcm[0, 1] = 2.0*(q1q2 + q0q3)\n dcm[0, 2] = 2.0*(q1q3 - q0q2)\n dcm[1, 0] = 2.0*(q1q2 - q0q3)\n dcm[1, 1] = q0q0 - q1q1 + q2q2 - q3q3\n dcm[1, 2] = 2.0*(q2q3 + q0q1)\n dcm[2, 0] = 2.0*(q1q3 + q0q2)\n dcm[2, 1] = 2.0*(q2q3 - q0q1)\n dcm[2, 2] = q0q0 - q1q1 - q2q2 + q3q3\n return dcm", "def _get_joint_min_max_vel(self, jname):\n root = self._robot_description_xml_root\n if root is not None:\n joint_type = root.findall(\".joint[@name='\" + jname + \"']\")[0].attrib['type']\n if joint_type == \"continuous\":\n limit = root.findall(\".//joint[@name='\" + jname + \"']/limit\")\n if limit is None or len(limit) == 0:\n return (-math.pi,\n math.pi,\n 3.0) # A default speed\n else:\n return (-math.pi,\n math.pi,\n float(limit[0].attrib['velocity']))\n else:\n limit = root.findall(\".//joint[@name='\" + jname + \"']/limit\")\n if limit is None or len(limit) == 0:\n # Handles upper case joint names in the model. e.g. the E1\n # shadowhand\n limit = root.findall(\n \".//joint[@name='\" + jname.upper() + \"']/limit\")\n if limit is not None and len(limit) > 0:\n return (float(limit[0].attrib['lower']),\n float(limit[0].attrib['upper']),\n float(limit[0].attrib['velocity']))\n else:\n rospy.logerr(\"Limit not found for joint %s\", jname)\n else:\n rospy.logerr(\"robot_description_xml_root == None\")\n return (None, None, None)", "def quaternion2AngleAxis(quaternion):\n HA = np.arccos(quaternion[0])\n theta = 2 * HA\n if theta < np.finfo(float).eps:\n theta = 0\n axis = np.array([1, 0, 0])\n else:\n axis = quaternion[[1, 2, 3]] / np.sin(HA)\n return theta, axis", "def getHeading(q):\n yaw = math.atan2(2 * (q.x * q.y + q.w * q.z),\n q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z)\n return yaw", "def point_rotation_by_quaternion(v, q):\r\n r = [0] + v\r\n q_conj = [q[0], -q[1], -q[2], -q[3]]\r\n return quaternion_product(quaternion_product(q, r), q_conj)[1:]", "def quat2DCM(q):\n\tDCM = quatLeftMat(q) @ quatRightMat(q).T\n\tDCM = DCM[1:, 1:]\n\treturn DCM", "def joint_callback(data):\n joints[0] = data.position[9]\n joints[1] = data.position[10]\n joints[2] = data.position[11]\n joints[3] = data.position[12]\n joints[4] = data.position[13]\n global position_geted\n position_geted = True", "def quat2mat(q):\n #leila: https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm\n\n sz = quat.get_size(q)\n q0 = quat.getq0(q)\n q1 = quat.getq1(q)\n q2 = quat.getq2(q)\n q3 = quat.getq3(q)\n qt = quat.get_type(q)\n\n g = np.zeros((sz, 3, 3))\n g[:, 0, 0] = np.square(q0) + np.square(q1) - np.square(q2) - np.square(q3)\n g[:, 0, 1] = 2*(q1*q2 - q0*q3)\n g[:, 0, 2] = 2*(q3*q1 + q0*q2)\n g[:, 1, 0] = 2*(q1*q2 + q0*q3)\n g[:, 1, 1] = np.square(q0) - np.square(q1) + np.square(q2) - np.square(q3)\n g[:, 1, 2] = 2*(q2*q3 - q0*q1)\n g[:, 2, 0] = 2*(q3*q1 - q0*q2)\n g[:, 2, 1] = 2*(q2*q3 + q0*q1)\n g[:, 2, 2] = np.square(q0) - np.square(q1) - np.square(q2) + np.square(q3)\n\n if sz == 1:\n g = g.reshape((3, 3))\n if qt == -1:\n g = -g\n else:\n inds1 = np.where(qt == -1)\n g[inds1, :, :] = -g[inds1, :, :]\n\n return g", "def _infer_qvalue_name(keys) -> Union[str, None]:\n for qvalue in Q_VALUE_TERMS:\n if qvalue in keys:\n return qvalue\n else:\n return None", "def from_q(self, q: np.ndarray) -> np.ndarray:\n return self.from_quaternion(self, q)", "def get_all_joint_states(self) -> Tuple[List[float], List[float], List[str]]:\n \n rospy.wait_for_service('/' + self.model_name + '/get_all_joint_states', timeout=2.0)\n try:\n resp = self.__get_all_joint_states()\n except rospy.ServiceException as e:\n print('Service did not process request:' + str(e))\n \n joint_positions = resp.joint_states.position\n joint_velocities = resp.joint_states.velocity\n joint_order = resp.joint_states.name\n \n return joint_positions, joint_velocities, joint_order", "def jacobian_pose_ur5(q, delta=0.0001):\n J = np.zeros((7,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n Q = rot2quat(T[0:3,0:3])\n\n for i in xrange(6):\n dq = copy(q)\n dq[i] = dq[i] + delta\n dT = fkine_ur5(dq)\n dQ = rot2quat(dT[0:3,0:3])\n Jpos = (dT[0:3,3] - T[0:3,3])/delta\n Jrot = (dQ - Q)/delta\n #Jrot \t= np.squeeze(np.asarray(Jrot))\n J[:,i] = np.concatenate((Jpos, Jrot), axis=0)\n \n return J", "def _generate_joint_string(self, joint, skeleton, joint_level):\n joint_string = \"\"\n temp_level = 0\n tab_string = \"\"\n while temp_level < joint_level:\n tab_string += \"\\t\"\n temp_level += 1\n\n # determine joint type\n if joint_level == 0:\n joint_string += tab_string + \"ROOT \" + joint + \"\\n\"\n else:\n if len(skeleton.nodes[joint].children) > 0:\n joint_string += tab_string + \"JOINT \" + joint + \"\\n\"\n else:\n joint_string += tab_string + \"End Site\" + \"\\n\"\n\n # open bracket add offset\n joint_string += tab_string + \"{\" + \"\\n\"\n offset = skeleton.nodes[joint].offset\n joint_string += tab_string + \"\\t\" + \"OFFSET \" + \"\\t \" + \\\n str(offset[0]) + \"\\t \" + str(offset[1]) + \"\\t \" + str(offset[2]) + \"\\n\"\n\n if len(skeleton.nodes[joint].children) > 0:\n # channel information\n channels = skeleton.nodes[joint].channels\n joint_string += tab_string + \"\\t\" + \\\n \"CHANNELS \" + str(len(channels)) + \" \"\n for tok in channels:\n joint_string += tok + \" \"\n joint_string += \"\\n\"\n\n joint_level += 1\n # recursive call for all children\n for child in skeleton.nodes[joint].children:\n joint_string += self._generate_joint_string(child.node_name, skeleton, joint_level)\n\n # close the bracket\n joint_string += tab_string + \"}\" + \"\\n\"\n return joint_string", "def plan(self, joints = None):\n if not joints == None:\n try:\n self.set_joint_value_target(self.get_remembered_joint_values()[joints])\n except:\n self.set_joint_value_target(joints)\n plan = self._g.get_plan()\n plan_msg = RobotTrajectory()\n joint_traj = JointTrajectory()\n joint_traj.joint_names = plan[\"joint_trajectory\"][\"joint_names\"]\n for point in plan[\"joint_trajectory\"][\"points\"]:\n joint_traj.points.append(JointTrajectoryPoint(\n positions = point[\"positions\"],\n velocities = point[\"velocities\"],\n accelerations = point[\"accelerations\"]))\n multi_dof_joint_traj = MultiDOFJointTrajectory()\n multi_dof_joint_traj.joint_names = plan[\"multi_dof_joint_trajectory\"][\"joint_names\"]\n multi_dof_joint_traj.frame_ids = plan[\"multi_dof_joint_trajectory\"][\"frame_ids\"]\n multi_dof_joint_traj.child_frame_ids = plan[\"multi_dof_joint_trajectory\"][\"child_frame_ids\"]\n for point in plan[\"multi_dof_joint_trajectory\"][\"points\"]:\n multi_dof_joint_traj_point = MultiDOFJointTrajectoryPoint()\n for pose in point[\"poses\"]:\n multi_dof_joint_traj_point.poses.append(Point(\n position = Point(x = pose[\"position\"][\"x\"], y = pose[\"position\"][\"y\"], z = pose[\"position\"][\"z\"]),\n orientation = Quaternion(x = pose[\"orientation\"][\"x\"], y = pose[\"orientation\"][\"y\"],\n z = pose[\"orientation\"][\"z\"], w = pose[\"orientation\"][\"w\"])))\n multi_dof_joint_traj.points.append(multi_dof_joint_traj_point)\n plan_msg.joint_trajectory = joint_traj\n plan_msg.multi_dof_joint_trajectory = multi_dof_joint_traj\n\n return plan_msg", "def get_joystickB3d(self, index=0):\r\n return (self.handler.absx2[index], self.handler.absy2[index], self.handler.absz2[index])", "def quatreal(q):\n a = q[0,0]\n b = q[0,1]\n c = q[0,2]\n d = q[0,3]\n amat = a*np.identity(4)\n bmat = b*np.array([[0,1,0,0],[-1,0,0,0],[0,0,0,-1],[0,0,1,0]])\n cmat = c*np.array([[0,0,1,0],[0,0,0,1],[-1,0,0,0],[0,-1,0,0]])\n dmat = d*np.array([[0,0,0,1],[0,0,-1,0],[0,1,0,0],[-1,0,0,0]])\n return amat+bmat+cmat+dmat", "def forward_kinematics(mj_model, mj_data, q, body_name):\n\n mj_data.qpos[:len(q)] = q\n mujoco.mj_fwdPosition(mj_model, mj_data)\n return mj_data.body(body_name).xpos.copy(), mj_data.body(body_name).xmat.reshape(3, 3).copy()", "def orientation_px4_to_gazebo(orientation_q):\n return tf_aircraft_to_baselink(tf_ned_to_enu(orientation_q))", "def get_joint_angles_from_physical_data(self):\n print(\"\\nGetting the physical arm joint angles...\")\n with open(directory + '/final_test/test_data/Matrices/Angles_9.0.csv') as file:\n joint_data = csv.reader(file)\n idx = 0\n for row in joint_data:\n for j in row:\n key = self.joint_names[idx]\n self.joint_angles[key] = float(j)\n print(key, \": \", self.joint_angles[key])\n idx += 1\n if idx == len(self.joint_names):\n break\n file.close()", "def _quaternions_guard_clauses(q1: Union[list, np.ndarray], q2: Union[list, np.ndarray]) -> None:\n for label, quaternion in zip(['q1', 'q2'], [q1, q2]):\n if not isinstance(quaternion, (list, np.ndarray)):\n raise TypeError(f\"{label} must be an array. Got {type(quaternion)}\")\n q1, q2 = np.copy(q1), np.copy(q2)\n for quaternion in [q1, q2]:\n if quaternion.shape[-1] != 4:\n raise ValueError(f\"Quaternions must be of shape (N, 4) or (4,). Got {quaternion.shape}.\")\n if q1.shape != q2.shape:\n raise ValueError(f\"Cannot compare q1 of shape {q1.shape} and q2 of shape {q2.shape}\")", "def _get_qindex(circ, name, index):\n ret = 0\n for reg in circ.qregs:\n if name != reg.name:\n ret += reg.size\n else:\n return ret + index\n return ret + index", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def xd_element(name):\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name", "def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def _read_qfile(qfile, periods):\n\n with open(qfile, 'r') as fid:\n n_lines = int(fid.readline())\n\n qf = pd.read_csv(qfile, header=None, skiprows=n_lines+1, sep='\\s+')\n qf.columns = ['n', 'l', 'w_mHz', 'Q', 'phi', 'ph_vel',\n 'gr_vel', 'ph_vel_qcorrected', 'T_qcorrected', 'T_sec']\n qf = qf[::-1]\n qf = qf[qf['n'] == 0] # Fundamental mode only\n qf.reset_index(drop=True, inplace=True)\n\n ph_vel = np.interp(periods, qf.T_qcorrected, qf.ph_vel_qcorrected)\n\n return ph_vel", "def to_q(self, method: str = 'chiaverini', **kw) -> np.ndarray:\n return self.to_quaternion(method=method, **kw)", "def angle_and_axis(basis):\n q = matrix.col(basis.orientation).normalize()\n return q.unit_quaternion_as_axis_and_angle(deg=True)" ]
[ "0.5734758", "0.5489396", "0.5309817", "0.5117031", "0.50999624", "0.5072133", "0.50482094", "0.50176036", "0.4997793", "0.49432415", "0.4891485", "0.4865569", "0.47592014", "0.47541997", "0.4749527", "0.47479632", "0.47286186", "0.4689717", "0.4671337", "0.46211225", "0.46206507", "0.46137872", "0.46024984", "0.45985946", "0.45778516", "0.45697257", "0.45388046", "0.45113033", "0.45034692", "0.44960293", "0.44957167", "0.44941765", "0.44837573", "0.44816512", "0.44738752", "0.4472173", "0.4469267", "0.44659668", "0.44514266", "0.44505468", "0.4441107", "0.44384503", "0.44331527", "0.4432948", "0.44285056", "0.44233546", "0.43980566", "0.43889", "0.4383696", "0.4376024", "0.43520904", "0.43488124", "0.43290251", "0.4327748", "0.43241942", "0.43088362", "0.43076405", "0.42832562", "0.4275225", "0.42677718", "0.4260983", "0.42604494", "0.42601636", "0.425439", "0.42523202", "0.4246964", "0.4246964", "0.4246964", "0.42444032", "0.42441413", "0.42407286", "0.42403352", "0.423569", "0.4233227", "0.42275262", "0.42193773", "0.42186797", "0.42162392", "0.42162323", "0.42161152", "0.42156285", "0.42125022", "0.42049873", "0.42026025", "0.41893184", "0.4188075", "0.41851267", "0.4183631", "0.41817188", "0.41756648", "0.41695356", "0.41680217", "0.4167032", "0.41607338", "0.41549465", "0.4146629", "0.41269618", "0.41208562", "0.41191214", "0.41169566" ]
0.6961554
0
Reverse order of given text characters.
def encode(text: str) -> str: reversed_text = "".join(char for char in text[-1::-1]) return reversed_text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task10_string_reversed(text):\n return text[::-1]", "def string_reverse(text):\n rev_text = text[::-1]\n return rev_text", "async def reverse(self, ctx, *, text: str):\n t_rev = text[::-1].replace(\"@\", \"@\\u200B\").replace(\"&\", \"&\\u200B\")\n await ctx.send(f\"🔁 {t_rev}\")", "def reverse_string(characters):\n phrase_chars = list(characters)\n phrase_half_len = int(len(phrase_chars)/2)\n \n for i in range(phrase_half_len):\n save_char = phrase_chars[i]\n phrase_chars[i] = phrase_chars[-(i+1)]\n phrase_chars[-(i+1)] = save_char\n\n reversed_chars = \"\".join(phrase_chars)\n return reversed_chars", "def reverse(word):\n return word[::-1]", "def reverse_characters(message, left_index, right_index):\n\n while left_index < right_index:\n\n message[left_index], message[right_index] = message[right_index], message[left_index]\n left_index += 1\n right_index += 1\n\n return message", "def reverse_chars(message, left, right):\n\n while left < right:\n message[left], message[right] = message[right], message[left]\n left += 1\n right -= 1\n\n return message", "def reverseComplement(seq, alphabet='ACGT'):\n compl= dict(zip('ACGTNRYWSMKBHDV', 'TGCANYRWSKMVDHB'))\n return ''.join([compl[base]\n for base in seq.upper().replace('U', 'T')])[::-1]", "def reverse(list_of_chars):\n\n left = 0\n right = len(list_of_chars) - 1\n\n while left < right:\n list_of_chars[left], list_of_chars[right] = \\\n list_of_chars[right], list_of_chars [left]\n\n left += 1\n right -= 1\n\n return list_of_chars", "def reverse_string(s):\n s.reverse()", "def reverse(s):\n return s[::-1]", "def reverse(text):\n #The empty String translates to False in a boolean context in Python\n if text: \n return reverse(text[1:]) + text[0]\n else:\n return text", "def reverse(string):\n return string[::-1]", "def reverse_text(input_text):\n new_name = \"\"\n for i in range(1, len(input_text)+1):\n new_name += input_text[-i]\n return new_name", "def reverse_complement(seq):\n if sys.version_info.major == 2:\n conversion = string.maketrans('ACGTacgt','TGCAtgca')\n else:\n conversion = str.maketrans('ACGTacgt','TGCAtgca')\n\n comp = seq.translate(conversion)\n rev_comp = comp[::-1]\n return rev_comp", "def reverse(seq):\n return seq[::-1]", "def reverse(seq):\n return seq[::-1]", "def reverse_read(read):\n reversed_read = \"\"\n for i in range(len(read)-1, -1, -1):\n if read[i] == \"A\":\n reversed_read += \"T\"\n elif read[i] == \"T\":\n reversed_read += \"A\"\n elif read[i] == \"G\":\n reversed_read += \"C\"\n elif read[i] == \"C\":\n reversed_read += \"G\"\n else:\n raise ValueError(\"One of the read contains wrong characters.\")\n\n return reversed_read", "def reverse_pair(text):\n newtext = text.split(' ')\n newtext = newtext[-1::-1]\n return ' '.join(newtext)", "def reverseComplement(string):\n rMap = { \"A\":\"T\", \"T\":\"A\", \"C\":\"G\", \"G\":\"C\", \"N\":\"N\"}\n return \"\".join(rMap[i] for i in string[::-1])", "def reverse_string(sen):\n return sen[::-1]", "def reverse_string( str ):\n return str[::-1]", "def reverse_words(string):\n pass # TODO", "def reverseComplement(seq):\n seq=seq.upper()\n # complement\n compl = complement(seq)\n # reverse\n return compl[::-1]", "def reverseString(self, s) -> None:\n i = 0\n j = len(s) - 1\n while i < j:\n temp = s[i]\n s[i] = s[j]\n s[j] = temp\n i += 1\n j -= 1", "def reverse(s):\n # index = -1\n # revwerd = \"\"\n # while index > (-1 - len(s)):\n # letter = s[index]\n # revwerd += letter\n # index -= 1\n # return revwerd\n\n# less convoluted version\n rev_str = \"\"\n for i in range(1, len(s) + 1):\n rev_str += s[-i]\n return rev_str", "def invertir_cadena(texto):\n if isinstance(texto, str):\n resultado = ''\n\n for i in range(len(texto) - 1, -1, -1):\n resultado += texto[i]\n \n return resultado\n else:\n raise TypeError('No se ha especificado una cadena de caracteres como argumento.')", "def reverse(s):\n\n rev = ''\n for ch in s:\n rev = ch + rev\n\n return rev", "def reverse_pair(some_text):\n #This function will return the reverse pair of input sentence.\n list_text = some_text.split(\" \")\n list_text.reverse()\n return \" \".join(list_text)", "def reverse(s):\n result = ''\n for i in xrange(len(s)-1, -1, -1):\n result += s[i]\n return result", "def reverseString(string):\n return string[::-1]", "def ReverseComplement1(seq):\n seq_dict = {'A':'T','T':'A','G':'C','C':'G'}\n return \"\".join([seq_dict[base] for base in reversed(seq)])", "def reverseComplement(s):\n\tcomplement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}\n\tt = ''\n\tfor base in s:\n\t\tt = complement[base] + t\n\treturn t", "def reverseString(self, s: List[str]) -> None:\n i,j=0,len(s)-1\n while i<j:\n s[i],s[j]=s[j],s[i]\n i,j=i+1,j-1\n \n #or s.reverse()", "def reverse_vowels(s):\n\n phrase = \"\"\n vowels = []\n for letter in s:\n if letter.lower() in \"aeiou\":\n phrase += \"~\"\n vowels.append(letter)\n else: \n phrase += letter\n \n index = 0\n new_phrase = \"\"\n vowels = vowels[-1:-len(vowels)-1:-1]\n \n for letter in phrase:\n\n if letter == \"~\":\n new_phrase += vowels[index]\n index += 1\n else:\n new_phrase += letter\n\n return new_phrase", "def reverseString(self, s: 'List[str]') -> 'None':\n # Could do s = s[::-1]\n \n # swap the char at front and char at back upto the middle\n l = len(s)\n for i in range(l//2):\n temp = s[i]\n s[i] = s[l-i-1]\n s[l-i-1] = temp", "def reverseString(s):\n return s[::-1]", "def reverse_compliment(seq):\n compliment = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n reverse = seq[::-1]\n rev_comp = ''.join([compliment[nt] for nt in reverse])\n return rev_comp", "def reverse(s):\n\n s_reversed = ''\n for ch in s:\n s_reversed = ch + s_reversed\n\n return s_reversed", "def revcom(s):\n def complement(s):\n basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}\n letters = list(s)\n letters = [basecomplement[base] for base in letters]\n return ''.join(letters)\n return complement(s[::-1])", "def reverse(input=''):\n return input[::-1]", "def reverse_complement(dna: str):\n dna = dna.strip()\n result = [\" \"] * len(dna)\n for index, letter in enumerate(reversed(dna)):\n result[index] = complement_map[letter]\n return \"\".join(result)", "def reverse(input):\n return input[::-1]", "def reverseString(self, s: List[str]) -> None:\n s.reverse()\n i,j = 0, len(s)-1\n while i<j:\n s[i], s[j] = s[j], s[i]\n i+=1\n j-=1", "def reverse(self):\n self._sequence.reverse()", "def reverse_complementary(seq):\n\n complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return \"\".join(complement[n] for n in reversed(seq))", "def reverseString(self, s: List[str]) -> None:\n size = len(s)\n for i in range(size//2):\n s[i], s[~i] = s[~i], s[i]\n # s[i], s[size-i-1] = s[size-i-1], s[i]\n\n # s[:] = s[::-1]", "def reverse2(str):\n s_list = list(str)\n for i in range(len(str)//2):\n exch(s_list, i, len(str)-i-1)\n return ''.join(s_list)", "def reverser(self,i):\r\n\t\timport sys\r\n\t\tcounter = 0\r\n\t\treversedSeq = list(self.sequences[i][::-1]) \t\t# Create a reversed list that will allow for counting to be done relative to forward strand.\r\n\r\n\t\tfor character in reversedSeq: \t\t\t\t# Assign the corresponding reveresed values.\r\n\t\t\tif character == \"A\":\r\n\t\t\t\treversedSeq[counter] = \"T\"\r\n\t\t\telif character == \"T\":\r\n\t\t\t\treversedSeq[counter] = \"A\"\r\n\t\t\telif character == \"C\":\r\n\t\t\t\treversedSeq[counter] = \"G\"\r\n\t\t\telse:\r\n\t\t\t\treversedSeq[counter] = \"C\"\r\n\t\t\tcounter+=1 \r\n\t\treversedSeq = \"\".join(reversedSeq) \t\t\t# After the sequence is reversed, join all the values togther.\r\n\t\tself.reversedSequenceList.append(reversedSeq) \t\t# Add the reversedSeq to the end of the reversedSequenceList.\r", "def swapCharacters(word):\n l = list(word)\n temp = word[-1]\n l[-1] = l[0]\n l[0] = temp\n return ''.join(l)", "def reverse(x):\n return x[::-1]", "def reverse_string_2(s):\n s[:] = s[::-1]", "def reverseString(self, s: List[str]) -> None:\n front = 0\n back = len(s) - 1\n\n while front < back:\n s[front], s[back] = s[back], s[front]\n front += 1\n back -= 1", "def revcomp(self, seq):\n tab = self.maketrans(b'ACNGT', b'TGNCA')\n return seq.translate(tab)[::-1]", "def reverseVowels(self, s):\n if not s:\n return s\n\n vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n c = list(s)\n l, r = 0, len(c)-1\n\n while l < r:\n while l < len(c) and c[l] not in vowels:\n l += 1\n while r > -1 and c[r] not in vowels:\n r -= 1\n\n if l < r:\n # c MUST BE A LIST!!!!\n c[l], c[r] = c[r], c[l]\n l += 1\n r -= 1\n return \"\".join(c)", "def reverseString(self, s: List[str]) -> None:\n i = 0\n j = len(s) - 1\n while i < j:\n s[i], s[j] = s[j], s[i]\n i += 1\n j -= 1\n return s", "def reverse_complement(seq):\n return ''.join([BASE_TO_COMP[b] for b in seq][::-1])", "def reversed_of_string(n):\n return ''.join(reversed(n))", "def reverse_rna_complement(seq):\n\n seq_upper = seq.isupper()\n\n seq = seq[::-1]\n\n seq = seq.upper()\n\n #compute complement\n seq = seq.replace('A','u')\n seq = seq.replace('T','a')\n seq = seq.replace('G','c')\n seq = seq.replace('C','g')\n\n if seq_upper:\n return seq.upper()\n else:\n return seq", "def revComp(s):\n d = {\"A\": \"T\", \"C\": \"G\", \"G\": \"C\", \"T\": \"A\", \"N\": \"N\"}\n s = s[::-1]\n x = [d[c] for c in s]\n return \"\".join(x)", "def elements_reversed(seq):\n new_seq = seq[::-1]\n return new_seq", "def reverse(self, i):\n return self.decode[i]", "def reverse_string(s):\n return \"\".join(reversed(list(s)))", "def reverseString(self, s: List[str]) -> None:\n def helper(l, r):\n if l<r:\n s[l], s[r] = s[r], s[l]\n helper(l+1, r-1)\n helper(0, len(s)-1)", "def reverseString(self, s) -> None:\n # n=len(s)\n # for i in range(int(n/2)):\n # s[i],s[n-1-i]=s[n-1-i],s[i]\n s=s[::-1]\n print(s)", "def reverse_this(seq):\n r_seq = seq[::-1]\n return r_seq", "def reverseString(s):\n for i in range(len(s)//2):\n t = s[i]\n s[i] = s[len(s)-i-1]\n s[len(s)-i-1] = t", "def reverseString(self, s: list[str]) -> None:\n l, r = 0, len(s)-1\n while l < r:\n s[l], s[r] = s[r], s[l]\n l += 1\n r -= 1", "def reverseString(self, s):\n for i in range(len(s)//2):\n s[i], s[-(i+1)] = s[-(i+1)], s[i]", "def reverse_elements(seq):\n\n new_seq = []\n\n i = -1\n\n while i >= -len(seq):\n new_seq.append(seq[i])\n i -= 1\n\n return format_seq(seq, new_seq)", "def reverseString(self, s: List[str]) -> None:\n front, back = 0, len(s) - 1\n while front < back:\n s[front], s[back] = s[back], s[front]\n front = front + 1\n back = back - 1\n\n print(s)", "def decrypt(self, text):\n\n output = []\n text = text.upper()\n for char in text:\n try:\n text_index = self.combined.index(char)\n output.append(self.alphabet[text_index])\n except ValueError:\n output.append(char)\n\n return ''.join(output)", "def generate_reverse(path):\n \n with open(path, \"r\") as f:\n for line in f:\n line = line.strip()\n # print (line) \n if len(line) == 0:\n continue\n \n if line[0] == \">\":\n line = line + \"_R\"\n print(line)\n else:\n buf = \"\"\n for char in line:\n if char == \"A\":\n buf += \"T\"\n elif char == \"T\":\n buf += \"A\"\n elif char == \"G\":\n buf += \"C\"\n elif char == \"C\":\n buf += \"G\"\n\n print (buf[::-1])", "def reverse_complement(pattern):\n\n complement = ''\n for i in range(len(pattern)):\n if pattern[i] == 'A':\n complement = complement + 'T'\n elif pattern[i] == 'T':\n complement = complement + 'A'\n elif pattern[i] == 'C':\n complement = complement + 'G'\n else:\n complement = complement + 'C'\n rev_complement = list(reversed(complement))\n return ''.join(rev_complement)", "def reverse_elements(seq):\n seq_copy = seq [::-1]\n return seq_copy", "def decode(text: str) -> str:\n # Reverse of reverse is original text.\n return encode(text)", "def reverse_complement(sequence):\n return sequence[::-1].translate(RC_TRANS)", "def task13_words_backward(text):\n if text and isinstance(text, str):\n text = text.split(' ')\n text.reverse()\n text = ' '.join(text)\n return text\n else:\n raise ValueError", "def reverse_and_complement(string):\n reverse_dict = dict([('A', 'T'), ('T', 'A'), ('C', 'G'), ('G', 'C'), ('*', '*')])\n complement_string = ''.join([reverse_dict[c] for c in string])\n return complement_string[::-1]", "def decrypt(self, text):\n\n output = []\n text = text.upper()\n\n for char in text:\n try:\n index = self.alpha.index(char)\n except ValueError:\n output.append(char)\n else:\n output.append(self.alpha[21 * (index - 8) % 26])\n return \"\".join(output)", "def reverseComplementer(self, ORFsequence):\r\n reverseComplement = str()\r\n reverserDict = {\"A\":\"U\",\"U\":\"A\",\"C\":\"G\",\"G\":\"C\"}\r\n reversedseq = ORFsequence[::-1]\r\n for nucleotide in reversedseq:\r\n reverseComplement+=reverserDict[nucleotide]\r\n return reverseComplement", "def reverse_candid(candid):\n return str(candid)[::-1]", "def get_reverse_complement(s):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return ''.join(filter(None, reversed([ dna_complement[c.upper()] if c.upper() in dna_complement else '' for c in s ])))", "def reverse_complement(dna, reverse=True, complement=True):\n \n # Make translation table\n trans_table = string.maketrans('ATGCatgc', 'TACGtacg')\n \n # Make complement to DNA\n comp_dna = dna.translate(trans_table)\n \n # Output all as strings\n if reverse and complement:\n return comp_dna[::-1]\n if reverse and not complement:\n return dna[::-1]\n if complement and not reverse:\n return comp_dna\n if not complement and not reverse:\n return dna", "def get_reverse_complement(cls, pattern: str) -> str:\n return ''.join(reversed([cls.dna_complement[nuc] for nuc in pattern]))", "def reverse_words(frase: str):\n raise NotImplemented", "def string_mirror(text):\n rev_text = text[::-1]\n mirror_text = text + rev_text\n return mirror_text", "def change_direction_str(str_lst):\r\n backwards_str = []\r\n for string in str_lst:\r\n backwards_str.append(string[::-1])\r\n return backwards_str", "def revcomp(b: bytes) -> bytes:\n try:\n return b''.join(_revcomp[n] for n in b[::-1])\n except KeyError:\n raise ValueError('{0} contains invalid nucleotide character. Supported '\n 'characters are A, C, G, T and N.'.format(b.decode()))", "def reverse(s):\n\n # make a list of letters from string\n\n seqList = list(s)\n #print(seqList)\n\n # reverse the list\n\n revseqList = seqList[::-1]\n #print(revseqList)\n\n # join the letters of the list into string and return\n\n revstring = \"\"\n for i in revseqList:\n revstring += i\n #print(revstring)\n return revstring", "def reverseString(self, s: List[str]) -> None:\n #special condition, if not met\n if not s:\n return []\n else:\n s.reverse()\n \"\"\"\n # a pointer to keep count the end of the string\n end = -1\n #iterate through in the middle of the string\n for start in range(0, int((len(s)+1)/2)):\n self.swap(s, start, end)\n return s\n \n def swap(self, s, start, end):\n tmp = s[start]\n s[start] = s[end]\n s[end] = tmp\n end -= 1\n \"\"\"", "def reverseString(self, s: list[str]) -> None:\n for index in range(len(s)//2):\n s[index], s[-1-index] = s[-1-index], s[index]", "def main():\n\ts = 'stressed'\n\tprint(reverse(s))", "def text_direction(self) -> str:\n return ''.join(word[0] for word in self.character_order.split('-'))", "def backward_character():\r\n set_point(point().offset(-1))", "def reverseString(self, s) -> None:\n # 방법 1\n s.reverse()\n # 방법 2\n # half_len = int(len(s) / 2)\n # for i in range(half_len):\n # temp = s[i]\n # s[i] = s[len(s) - 1 - i]\n # s[len(s) - 1 - i] = temp", "def reverse_complement(DNA, as_string=False, unreversed_order=False):\n comp_dict = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n\n if unreversed_order:\n complements = [comp_dict[nuc] for nuc in DNA]\n else:\n complements = [comp_dict[nuc] for nuc in DNA[::-1]]\n\n if as_string:\n complements = \"\".join(complements)\n\n return complements", "def reverseString(self, s: List[str]) -> None:\n lo=0\n hi=len(s)-1\n while lo<hi:\n s[lo],s[hi]=s[hi],s[lo]\n lo+=1\n hi-=1", "def decryptionSwap(text, index1, index2):\n s = text\n transformedChar = \"\"\n\n swapIndex1 = s[index1]\n swapIndex2 = s[index2]\n\n prevText = s[:index1]\n midText = s[(index1 + 1):index2]\n endText = s[(index2 + 1):]\n\n transformedChar = prevText + swapIndex2 + midText + swapIndex1 + endText\n\n print(\"Swapped Decrypted text : \" )\n return transformedChar", "def reverseString2(s: List[str]) -> None:\n left = 0\n right = len(s) - 1\n\n while left < right:\n s[left], s[right] = s[right], s[left]\n left += 1\n right -= 1\n\n # 시간복잡도 = O(n)" ]
[ "0.7165525", "0.70656836", "0.68825185", "0.6665356", "0.6664884", "0.6568191", "0.6480333", "0.644043", "0.64028203", "0.6399076", "0.6385635", "0.63255966", "0.62972546", "0.6293171", "0.62818104", "0.6206855", "0.6206855", "0.618907", "0.6167491", "0.6159032", "0.61581963", "0.61235493", "0.6105261", "0.60952127", "0.609167", "0.60784864", "0.6071343", "0.6044783", "0.60337967", "0.6024316", "0.6010482", "0.6000269", "0.59849864", "0.5977158", "0.5969982", "0.59549713", "0.59531534", "0.5934087", "0.59214026", "0.5913696", "0.59063154", "0.5893002", "0.5891157", "0.5886904", "0.58807343", "0.5875941", "0.58626246", "0.58492345", "0.5836636", "0.5834868", "0.5832104", "0.5830474", "0.58224934", "0.5809055", "0.58076054", "0.58043104", "0.5801745", "0.57993335", "0.5796051", "0.5783426", "0.57831997", "0.57618904", "0.5761701", "0.5756196", "0.57517654", "0.5749678", "0.5749323", "0.5748966", "0.5740185", "0.57366997", "0.5736405", "0.5734688", "0.5714455", "0.5706487", "0.5704594", "0.5700213", "0.56965816", "0.56926405", "0.5690438", "0.56845427", "0.5678881", "0.56773007", "0.5668088", "0.56495464", "0.5648217", "0.5647501", "0.5635411", "0.56329286", "0.562977", "0.56270874", "0.56258976", "0.56254894", "0.5622578", "0.56221527", "0.561766", "0.5615884", "0.5615194", "0.56130993", "0.5612548", "0.5599258" ]
0.62933666
13
Obtain original text from a reversed text.
def decode(text: str) -> str: # Reverse of reverse is original text. return encode(text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def string_reverse(text):\n rev_text = text[::-1]\n return rev_text", "def task10_string_reversed(text):\n return text[::-1]", "def string_mirror(text):\n rev_text = text[::-1]\n mirror_text = text + rev_text\n return mirror_text", "def reverse(text):\n #The empty String translates to False in a boolean context in Python\n if text: \n return reverse(text[1:]) + text[0]\n else:\n return text", "async def reverse(self, ctx, *, text: str):\n t_rev = text[::-1].replace(\"@\", \"@\\u200B\").replace(\"&\", \"&\\u200B\")\n await ctx.send(f\"🔁 {t_rev}\")", "def text(self):\n return self.original.text", "def reverse_text(input_text):\n new_name = \"\"\n for i in range(1, len(input_text)+1):\n new_name += input_text[-i]\n return new_name", "def reverse_pair(some_text):\n #This function will return the reverse pair of input sentence.\n list_text = some_text.split(\" \")\n list_text.reverse()\n return \" \".join(list_text)", "def reverse_pair(text):\n newtext = text.split(' ')\n newtext = newtext[-1::-1]\n return ' '.join(newtext)", "def task13_words_backward(text):\n if text and isinstance(text, str):\n text = text.split(' ')\n text.reverse()\n text = ' '.join(text)\n return text\n else:\n raise ValueError", "def reverse_words(frase: str):\n raise NotImplemented", "def reverse(word):\n return word[::-1]", "def get_text(downgrade_titles=False):", "def encode(text: str) -> str:\n reversed_text = \"\".join(char for char in text[-1::-1])\n return reversed_text", "def decode_review(text, word_index):\n reverse_word_index = dict([(value, key)\n for (key, value) in word_index.items()])\n return \" \".join([reverse_word_index.get(i, \"?\") for i in text])", "def recompute_output_text(self):\r\n s = self.input_string.get()\r\n senc = rotcode.rotate(s,steps=self.steps.get())\r\n if self.reverse_flag.get():\r\n # Reverse the encoded text\r\n senc = senc[::-1]\r\n self.output_string.set(senc)", "def reverse_words(string):\n pass # TODO", "def invertir_cadena(texto):\n if isinstance(texto, str):\n resultado = ''\n\n for i in range(len(texto) - 1, -1, -1):\n resultado += texto[i]\n \n return resultado\n else:\n raise TypeError('No se ha especificado una cadena de caracteres como argumento.')", "def decrypt(self, text):\n return self.encrypt(text)", "def reverter_2(frase: list) -> list:\n # Algoritmo com memória constante e tempo linear\n inicio = 0\n\n for indice, letra in enumerate(frase):\n if letra == ' ':\n reverter_palavra(frase, inicio, indice-1)\n inicio = indice+1\n\n reverter_palavra(frase, inicio, len(frase)-1)\n frase.reverse() # Tempo O(n), e memória O(1)\n\n return frase", "def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n logger.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n logger.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text", "def reverse(s):\n return s[::-1]", "def decryptionSwap(text, index1, index2):\n s = text\n transformedChar = \"\"\n\n swapIndex1 = s[index1]\n swapIndex2 = s[index2]\n\n prevText = s[:index1]\n midText = s[(index1 + 1):index2]\n endText = s[(index2 + 1):]\n\n transformedChar = prevText + swapIndex2 + midText + swapIndex1 + endText\n\n print(\"Swapped Decrypted text : \" )\n return transformedChar", "def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n logger.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n logger.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text", "def reverse_string(sen):\n return sen[::-1]", "def test_get_original_text():\n\n assert lex._lexer(None, None)._load_text(\"test\").get_original_text() == \"test\"", "def reverse(msg):\n return str(msg)[::-1]", "def mirror_string(the_string):\r\n return the_string + reverse_string(the_string)", "def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heuristic between\n # `pred_text` and `orig_text` to get a character-to-character alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \"\".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n print(\"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n print(\"Length not equal after stripping spaces: '%s' vs '%s'\" % (orig_ns_text, tok_ns_text))\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n print(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n print(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text", "def extractText(text):\n soup = BeautifulSoup(text, 'html.parser')\n for code in soup.find_all('code'):\n code.decompose()\n return soup.get_text()", "def getReversePam(self):\n watson = \"ACGTYRSWKMBDHVN\"\n crick = \"TGCARYSWMKVHDBN\"\n return self.forwardPam[::-1].translate(\n self.forwardPam[::-1].maketrans(watson, crick)\n )", "def main():\n\ts = 'stressed'\n\tprint(reverse(s))", "def translateText(text):\r\n\treturn translator.translate(text, src='en', dest='ro')", "def reverse(string):\n return string[::-1]", "def translate_text(target, text):\n return text", "def get_final_text(config: configure_finetuning.FinetuningConfig, pred_text,\n orig_text):\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for i, c in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return ns_text, dict(ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=config.do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if config.debug:\n utils.log(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if config.debug:\n utils.log(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if config.debug:\n utils.log(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if config.debug:\n utils.log(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text", "def revcomp(self, seq):\n tab = self.maketrans(b'ACNGT', b'TGNCA')\n return seq.translate(tab)[::-1]", "def reverse_string(s):\n s.reverse()", "def reverse(file_name):\n try:\n with open(file_name, \"r\") as file:\n list_inverted = file.readlines()[::-1]\n list_inverted_per_lines = list([i.replace(\"\\n\", \"\")[::-1] + \"\\n\" for i in list_inverted])\n list_inverted_per_lines[-1] = list_inverted_per_lines[-1].replace(\"\\n\", \"\")\n new_file_name = \"disodered.txt\"\n with open(new_file_name, \"w\") as file_2:\n for text in list_inverted_per_lines:\n file_2.write(text)\n print(\"Operation performed successfully!, file saved as '{}' in the main dir\".format(new_file_name))\n except FileNotFoundError:\n print(\"the specified file does not exist!\")", "def reverse(self) -> str:\n return pulumi.get(self, \"reverse\")", "def reverse_words(message):\n\n # Decode the message by reversing the words\n \n reverse_chars(message, 0, len(message) - 1)\n\n current_word_start = 0\n\n for i in range(len(message) + 1):\n if (i == len(message)) or (message[i] == ' '):\n reverse_chars(message, current_word_start, i - 1)\n current_word_start = i + 1\n\n return message", "def PROPER(text):\n return text.title()", "def obtain_text():\n pass", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def str_reverse(self):\n\n return LinkedList.str_reverse_recur(self.front)", "def reverse_candid(candid):\n return str(candid)[::-1]", "def normalize(self, text: str) -> str:", "def get_text(text_input):\r\n return text_input", "def decrypt(self, text):\n\t\tif self.offsets != self.start_off:\n\t\t\traise Exception(\"Current offset != starting offset. Use the reset\"+\\\n\t\t\t\t\t\t\t\" method before decrypting.\")\n\t\treturn self.encrypt(text)", "def reverse_log(log):\n return \"\\n\".join(log.split(\"\\n\")[::-1])", "def rotate_text_clockwise(text):\n # ... add your implementation\n\n max_line_length = 0\n\n for lines in text.splitlines():\n max_line_length = max(max_line_length, len(text.splitlines()))\n\n new_str = \"\"\n\n for i in range(max_line_length):\n lines = text.splitlines()\n lines.reverse()\n for line in lines:\n if len(line) <= i:\n continue\n new_str += line[i]\n new_str += \"\\n\"\n\n return new_str.strip()", "def get_reverse_complement(sequence):\n return get_strand_complement(sequence)[::-1]", "def reverse_this(seq):\n r_seq = seq[::-1]\n return r_seq", "def decryptStory():\n wordList = loadWords()\n text= getStoryString()\n \n shift = findBestShift(wordList, text)\n return applyShift(text, shift)", "def reverter_1(frase: str) -> str:\n lista_de_palavras = frase.split() # Tempo e memória linear\n palavras_reordenadas = reversed(lista_de_palavras)\n return ' '.join(palavras_reordenadas)", "def deCopIfy(text):\n\tif text == \"\":\n\t\treturn text\n\n\tfor lingo in coplingo:\n\t\ttext = re.sub(lingo['regex'], lingo['str'], text)\n\n\treturn text[0].upper() + text[1:]", "def reverse_complement(sequence):\n return sequence[::-1].translate(RC_TRANS)", "def svn_diff_hunk_readline_original_text(*args):\n return _diff.svn_diff_hunk_readline_original_text(*args)", "def decryptionSelfMadeFunction(text,index):\n s = text\n transformedChar = \"\"\n\n transformedChar = s[:index] + s[-1] + s[index:len(s)-1]\n\n print(\"Decrypted Transformed text : \" )\n return transformedChar", "def post_get_convert(self, site, getText):\n return getText", "def revSentence(sentence):\n\n try:\n \n reversedSentence = \"\"\n \n words = sentence.split(\" \") # Getting a list of words in the sentence\n\n if len(words) == 1:\n return sentence\n \n for i in range(len(words)-1,-1,-1): # For loop which counts backwards from number of words - 1 till 0.\n \n reversedSentence = reversedSentence + words[i] \n \n if i != 0: # Adding spaces between words while it is not the last word\n \n reversedSentence += \" \"\n\t\t\t\n return reversedSentence \n\n except:\n print(\"Error in reversing sentence : Please try again with a sentence of type 'string' in which the words are seperated with spaces.\")", "def get_text(self):\n return self.text[:500]", "def reverse_vocab(self):\n return self._id2token", "def reverse_vocab(self):\n return self._id2token", "def parsingconvtext(retrievedtext,customtextlist):\r\n if not retrievedtext: #in case empty text \r\n retrievedtext=changenonetostr(retrievedtext)\r\n newtext=BeautifulSoup(retrievedtext).get_text() \r\n #newtext=changenonetostr(retrievedtext)\r\n #newtext=BeautifulSoup(newtext).get_text() \r\n #remove http links\r\n newtext=re.sub(r'http\\S+', '', newtext)\r\n newtext=re.sub(r'\\r\\r\\r\\n', ' ', newtext)\r\n #remove LL specific text\r\n if customtextlist:\r\n for i in customtextlist:\r\n newtext=re.sub(i, '', newtext)\r\n return newtext", "def detex(tex):\n \n #tex = '\\n'.join(reformat(tex, listed=True)[1:])\n global subs\n \n for old, new in subs.iteritems():\n tex = tex.replace(old, new)\n \n return tex.strip()", "def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})", "def get_text_hook(raw):\n soup = bs4.BeautifulSoup(quopri.decodestring(raw), features=\"lxml\")\n return soup.text", "def reverse(s):\n # index = -1\n # revwerd = \"\"\n # while index > (-1 - len(s)):\n # letter = s[index]\n # revwerd += letter\n # index -= 1\n # return revwerd\n\n# less convoluted version\n rev_str = \"\"\n for i in range(1, len(s) + 1):\n rev_str += s[-i]\n return rev_str", "def reverseString(s):\n return s[::-1]", "def refang(self, text: str):", "def get_plain_text(text):\n if not text or not isinstance(text, str):\n return \"\"\n\n return text.lstrip().replace(\"\\n\", \"\").replace(\"\\r\", \"\")", "def revcom(s):\n def complement(s):\n basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}\n letters = list(s)\n letters = [basecomplement[base] for base in letters]\n return ''.join(letters)\n return complement(s[::-1])", "def reverseString(string):\n return string[::-1]", "def reverse(input):\n return input[::-1]", "def reverse(s):\n\n rev = ''\n for ch in s:\n rev = ch + rev\n\n return rev", "def lemmatize_text_rus(text):\n text_lemm, text_sent = lemmatize_texts_rus([text])\n text_lemm, text_sent = text_lemm[0], text_sent[0]\n return text_lemm, text_sent", "def mirror(s):\n mir_str = s\n for i in range(1, len(s) + 1):\n mir_str += s[-i]\n return mir_str", "def reverse(s):\n result = ''\n for i in xrange(len(s)-1, -1, -1):\n result += s[i]\n return result", "def reverse(self, i):\n return self.decode[i]", "def reverse_complement(seq):\n if sys.version_info.major == 2:\n conversion = string.maketrans('ACGTacgt','TGCAtgca')\n else:\n conversion = str.maketrans('ACGTacgt','TGCAtgca')\n\n comp = seq.translate(conversion)\n rev_comp = comp[::-1]\n return rev_comp", "def get_reverse_complement(sequence):\n seq = sequence.upper()\n return get_strand_complement(seq)[::-1]", "def reverse(seq):\n return seq[::-1]", "def reverse(seq):\n return seq[::-1]", "def preprocess(self, text):\r\n return text", "def encryptionSwap(text, index1, index2):\n s = text\n transformedChar = \"\"\n\n swapIndex1 = s[index1]\n swapIndex2 = s[index2]\n\n prevText = s[:index1]\n midText = s[(index1+1):index2]\n endText = s[(index2+1):]\n\n transformedChar = prevText + swapIndex2 + midText + swapIndex1 + endText\n\n print(\"Swapped Encrypted text : \" )\n return transformedChar", "def reversed_of_string(n):\n return ''.join(reversed(n))", "def reverseComplement(seq):\n seq=seq.upper()\n # complement\n compl = complement(seq)\n # reverse\n return compl[::-1]", "def postprocess(self, text):\r\n return text", "def reverse(self):\n return self[::-1]", "def get_reverse_complement(s):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return ''.join(filter(None, reversed([ dna_complement[c.upper()] if c.upper() in dna_complement else '' for c in s ])))", "def decryptionRotate(text):\n s = text;\n transformedChar = \"\"\n transformedChar = s[1:] + s[0]\n\n print(\"Single Rotation Decrypted text : \" )\n return transformedChar", "def string_reverser(our_string):\n\n # TODO: Write your solution here\n\n reversed_string = ''\n\n i = len(our_string) - 1\n\n while i >= 0:\n reversed_string += our_string[i]\n i -= 1\n\n return reversed_string", "def reverse_string( str ):\n return str[::-1]", "def decryptStory():\n \n r=loadWords()\n\n m1=getStoryString()\n \n p=findBestShift(r, m1)\n \n strans=applyShift(m1,p)\n return strans", "def trans(monitext):\n result = ''\n last_line = 'empty'\n\n while monitext:\n # newline character or empty line(s)\n matched = re.match(r'\\n+', monitext, re.M)\n\n if matched:\n result += matched.group()\n if len(matched.group()) > 1:\n last_line = 'empty'\n elif last_line == 'title':\n result += '\\n'\n last_line = 'empty'\n monitext = monitext[matched.end():]\n continue\n\n # code block\n matched = re.match(r'{{{.*?\\n((\\n|.)*?)\\n}}}', monitext, re.M)\n\n if matched:\n body = matched.groups()[0]\n result += '\\n\\t' + '\\n\\t'.join(body.split('\\n'))\n monitext = monitext[matched.end():]\n last_line = 'code'\n continue\n\n # header\n matched = re.match(r'^(=+) (.+) (=+)', monitext)\n\n if matched:\n title = matched.groups()[1]\n level = len(matched.groups()[0])\n\n if last_line != 'empty':\n result += '\\n'\n\n if level < 4:\n underscore = {2 : '=', 3 : '-'}[level] * mbstrlen(title)\n result += title + os.linesep + underscore\n else:\n result += ('#' * level) + \" \" + title\n monitext = monitext[matched.end():]\n\n last_line = 'title'\n\n continue\n\n # link\n matched = re.match(r'(.*)\\[([^\\s]+[ \\t]+)?(.+)\\]', monitext)\n\n if matched:\n pre = matched.groups()[0]\n url = matched.groups()[1]\n if url:\n url = url.strip()\n name = matched.groups()[2]\n\n if url:\n replaced = \"%s[%s](%s)\" % (pre, name, url)\n else:\n replaced = \"%s[%s](%s)\" % (pre, name, name)\n\n monitext = monitext[:matched.start()] + replaced\\\n + monitext[matched.end():]\n\n # important\n monitext = re.sub(r'\\'\\'\\'(.*?)\\'\\'\\'', r'**\\1**', monitext)\n\n # italic\n monitext = re.sub(r'\\'\\'(.*?)\\'\\'', r'_\\1_', monitext)\n\n # list\n matched = re.match(r'^(\\s*)\\* (.*)', monitext)\n\n if matched:\n depth = len(matched.groups()[0])\n body = matched.groups()[1]\n result += (depth - 1) * '\\t' + '* ' + body\n monitext = monitext[matched.end():]\n\n last_line = 'others'\n\n try:\n # Go to the next line\n index = monitext.index('\\n')\n result += monitext[:index]\n monitext = monitext[index:]\n except ValueError:\n result += monitext\n break\n\n return result" ]
[ "0.6893517", "0.68568224", "0.6491691", "0.6230875", "0.61860424", "0.6106706", "0.6054018", "0.59995306", "0.59206975", "0.5907858", "0.5739098", "0.57272434", "0.5710309", "0.57061905", "0.5688273", "0.56567407", "0.5605282", "0.5591018", "0.55687314", "0.55279493", "0.55066735", "0.55012494", "0.5494175", "0.54794264", "0.54791945", "0.54751134", "0.54685074", "0.5454815", "0.5434868", "0.5420628", "0.5379327", "0.5358369", "0.53397876", "0.5339157", "0.5331467", "0.52710634", "0.5253316", "0.52451086", "0.5238019", "0.5225157", "0.5218968", "0.5216867", "0.5216826", "0.5211244", "0.5211244", "0.5211244", "0.5211244", "0.5211244", "0.52107036", "0.52017736", "0.5200697", "0.51955885", "0.5186207", "0.51691103", "0.516243", "0.5159049", "0.5157843", "0.51563394", "0.5140198", "0.51370215", "0.51249576", "0.51152676", "0.51144785", "0.51103896", "0.51079965", "0.510379", "0.51018614", "0.51018614", "0.5100558", "0.5093332", "0.50810456", "0.5080602", "0.5070148", "0.50674176", "0.50628483", "0.5061491", "0.5061399", "0.5057118", "0.5049807", "0.50493914", "0.5049294", "0.50460625", "0.5044069", "0.50375766", "0.5036411", "0.5035119", "0.5023109", "0.5023109", "0.5021589", "0.5020809", "0.5020537", "0.5018096", "0.50164074", "0.50121903", "0.501067", "0.50077623", "0.50077045", "0.5004156", "0.500049", "0.49995524" ]
0.6215529
4
Reencode phylogeny_df to facilitate efficient analysis and transformation operations. The returned phylogeny dataframe will be topologically sorted (i.e., organisms appear after all ancestors), have contiguous ids (i.e., organisms' ids correspond to row number), contain an integer datatype `ancestor_id` column if the phylogeny is asexual (i.e., a more performant representation of `ancestor_list`). Input dataframe is not mutated by this operation unless `mutate` set True. If mutate set True, operation does not occur in place; still use return value to get transformed phylogeny dataframe.
def alifestd_to_working_format( phylogeny_df: pd.DataFrame, mutate: bool = False, ) -> pd.DataFrame: if not mutate: phylogeny_df = phylogeny_df.copy() phylogeny_df = alifestd_try_add_ancestor_id_col(phylogeny_df, mutate=True) if not alifestd_is_topologically_sorted(phylogeny_df): phylogeny_df = alifestd_topological_sort(phylogeny_df, mutate=True) if not alifestd_has_contiguous_ids(phylogeny_df): phylogeny_df = alifestd_assign_contiguous_ids( phylogeny_df, mutate=True ) return phylogeny_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_tree(tree, subtree_hierarchy):\n new_tree = subtree_hierarchy.copy()\n for bg_pop, row in subtree_hierarchy.iterrows():\n # Remove not showing pops from new_tree\n if row['To_show'] == 'no':\n new_tree = new_tree.drop(bg_pop)\n continue\n\n # Find Parent\n parent_to_show = row['Parent']\n # If bg_pop has no Parent, skip\n if parent_to_show == '':\n continue\n # If Parent not in subtree, skip\n if parent_to_show not in subtree_hierarchy.index:\n continue\n # If Parent has To_show = 'no', find Parent of Parent, etc.\n while subtree_hierarchy.at[parent_to_show, 'To_show'] == 'no':\n parent_to_show = subtree_hierarchy.at[parent_to_show, 'Parent']\n # Set Parent to show in new_tree\n new_tree.at[bg_pop, 'Parent'] = parent_to_show\n\n new_tree = new_tree.reset_index()[['index', 'BG_population', 'Parent', 'BG_label']]\n # For pairs ('BG_population', 'Parent') that has coords, add coords\n new_tree_pos = new_tree.merge(tree.reset_index(), how='left', on=['BG_population', 'Parent'])\n new_tree_pos = new_tree_pos[['index_x', 'BG_population', 'Parent', 'posX', 'posY', 'BG_label_x']] \\\n .rename(columns={'index_x': 'index', 'BG_label_x': 'BG_label'}) \\\n .fillna('')\n\n return new_tree_pos", "def make_dendrogram_tree(df):\n ZZZ = calculate_linkage_matrix_in_python_format(df)\n\n # set idx to the index of the root node (= the one with the largest number of descendants)\n root_node_idx = int(ZZZ[:, 3].argmax())\n\n N = len(df)\n return make_dendrogram_subtree(df, root_node_idx + N, ZZZ, N)", "def build_anchor_df(df):\n df_columns = df.columns\n forward_df = (df\n .rename({df_columns[0]: 'anchor_guide', df_columns[1]: 'target_guide',\n df_columns[2]: 'anchor_gene', df_columns[3]: 'target_gene'}, axis=1))\n reverse_df = (df\n .rename({df_columns[1]: 'anchor_guide', df_columns[0]: 'target_guide',\n df_columns[3]: 'anchor_gene', df_columns[2]: 'target_gene'}, axis=1))\n anchor_df = (pd.concat([forward_df, reverse_df])\n .drop_duplicates() # in case where guide1==guide2\n .reset_index(drop=True))\n return anchor_df", "def simplify_directed_as_dataframe(df: gpd.GeoDataFrame) -> gpd.GeoDataFrame:\n df.reset_index(inplace=True)\n\n g = gt.Graph(directed=True)\n osm_id = g.new_edge_property('string')\n highway = g.new_edge_property('string')\n level = g.new_edge_property('int')\n lanes = g.new_edge_property('int')\n width = g.new_edge_property('float')\n bicycle = g.new_edge_property('bool')\n bicycle_safety = g.new_edge_property('int')\n foot = g.new_edge_property('bool')\n foot_safety = g.new_edge_property('int')\n max_speed = g.new_edge_property('int')\n motorcar = g.new_edge_property('bool')\n linestring = g.new_edge_property('python::object')\n\n edgelist = df[\n ['u', 'v', 'osm_id', 'highway', 'level', 'lanes', 'width', 'bicycle', 'bicycle_safety', 'foot', 'foot_safety',\n 'max_speed', 'motorcar', 'geometry']].values\n\n nodes_id = g.add_edge_list(edgelist, hashed=True,\n eprops=[osm_id, highway, level, lanes, width, bicycle, bicycle_safety, foot, foot_safety,\n max_speed, motorcar, linestring])\n\n # we are gonna replace the original repeated nodes with a linestring\n e_path = g.new_ep('vector<int64_t>')\n for e in g.edges():\n e_path[e] = []\n\n vs = g.get_vertices()\n in_out_deg_2 = (g.get_in_degrees(vs) == 2) & (g.get_out_degrees(vs) == 2)\n\n logging.debug('selecting degree 4 candidates')\n candidates = set()\n for i, v in enumerate(vs):\n if in_out_deg_2[i]:\n ns = list(set(g.get_all_neighbors(v)))\n if len(ns) == 2:\n u, w = ns[0], ns[1]\n uv, vw, wv, vu = g.edge(u, v), g.edge(v, w), g.edge(w, v), g.edge(v, u)\n if highway[uv] == highway[vw] and highway[wv] == highway[vu]:\n candidates.add(v)\n logging.debug('found {} degree 4 candidates to simplify'.format(len(candidates)))\n\n seen = set()\n unregister_candidates = set()\n\n for i, candidate in enumerate(candidates):\n if i == 100000:\n logging.debug('100000 degree 4 candidates')\n if candidate in seen:\n continue\n\n seen.add(candidate)\n\n u, w = g.get_out_neighbors(candidate)\n is_u_fringe, is_w_fringe = u not in candidates, w not in candidates\n\n cu, cw = g.edge(candidate, u), g.edge(candidate, w)\n\n us = []\n ws = []\n\n while not is_u_fringe:\n seen.add(u)\n us.append(u)\n neighbors = set(g.get_out_neighbors(u))\n neighbors -= seen\n if len(neighbors) > 0:\n u = neighbors.pop()\n is_u_fringe = u not in candidates\n elif u == w:\n us.pop(-1)\n u = us.pop(-1)\n unregister_candidates.add(u)\n unregister_candidates.add(w)\n is_u_fringe = True\n is_w_fringe = True\n g.remove_edge(g.edge(s=u, t=w))\n g.remove_edge(g.edge(s=w, t=u))\n else:\n logging.debug('degree 2: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n while not is_w_fringe:\n seen.add(w)\n ws.append(w)\n neighbors = set(g.get_out_neighbors(w))\n neighbors -= seen\n if len(neighbors) > 0:\n w = neighbors.pop()\n is_w_fringe = w not in candidates\n else:\n logging.debug('degree 2: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n if is_u_fringe and is_w_fringe:\n e = g.add_edge(source=u, target=w)\n path = [u] + list(reversed(us)) + [candidate] + ws + [w]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[cw], highway[cw], level[cw], lanes[cw], width[cw], bicycle[cw], \\\n bicycle_safety[cw], \\\n foot[cw], foot_safety[cw], max_speed[cw], motorcar[cw]\n\n e = g.add_edge(source=w, target=u)\n path = [w] + list(reversed(ws)) + [candidate] + us + [u]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[cu], highway[cu], level[cu], lanes[cu], width[cu], bicycle[cu], \\\n bicycle_safety[cu], \\\n foot[cu], foot_safety[cu], max_speed[cu], motorcar[cu]\n\n else:\n logging.debug(\n 'unexpected behavior, source={0}, target={1}, candidate={2}, us={3}, ws={4}'.format(u, w, candidate, us,\n ws))\n\n unseen = candidates - seen\n if len(unseen) > 0:\n logging.debug(\n 'Network scan after degree 4 simplification uncomplete: candidates {0} have not been examined'.format(\n unseen))\n\n candidates -= unregister_candidates\n g.remove_vertex(list(candidates))\n\n vs = g.get_vertices()\n in_out_deg_1 = (g.get_in_degrees(vs) == 1) & (g.get_out_degrees(vs) == 1)\n\n logging.debug('selecting degree 2 candidates')\n candidates = set()\n for i, v in enumerate(vs):\n if in_out_deg_1[i]:\n u = g.get_in_neighbors(v)[0]\n w = g.get_out_neighbors(v)[0]\n\n if u != w:\n uv, vw = g.edge(u, v), g.edge(v, w)\n if highway[uv] == highway[vw]:\n candidates.add(v)\n logging.debug('found {} degree 2 candidates to simplify'.format(len(candidates)))\n\n seen = set()\n unregister_candidates = set()\n\n for candidate in candidates:\n if candidate in seen:\n continue\n\n seen.add(candidate)\n\n u = g.get_in_neighbors(candidate)[0]\n w = g.get_out_neighbors(candidate)[0]\n\n uc = g.edge(u, candidate)\n\n is_u_fringe, is_w_fringe = u not in candidates, w not in candidates\n\n us = []\n ws = []\n\n while not is_u_fringe:\n seen.add(u)\n us.append(u)\n neighbors = set(g.get_in_neighbors(u))\n neighbors -= seen\n if len(neighbors) > 0:\n u = neighbors.pop()\n is_u_fringe = u not in candidates\n elif u == w:\n us.pop(-1)\n u = us.pop(-1)\n unregister_candidates.add(u)\n unregister_candidates.add(w)\n is_u_fringe = True\n is_w_fringe = True\n g.remove_edge(g.edge(s=w, t=u))\n else:\n logging.debug('degree 1: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n while not is_w_fringe:\n seen.add(w)\n ws.append(w)\n neighbors = set(g.get_out_neighbors(w))\n neighbors -= seen\n if len(neighbors) > 0:\n w = neighbors.pop()\n is_w_fringe = w not in candidates\n else:\n logging.debug('degree 1: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n if is_u_fringe and is_w_fringe:\n e = g.add_edge(source=u, target=w)\n path = [u] + list(reversed(us)) + [candidate] + ws + [w]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[uc], highway[uc], level[uc], lanes[uc], width[uc], bicycle[uc], \\\n bicycle_safety[uc], \\\n foot[uc], foot_safety[uc], max_speed[uc], motorcar[uc]\n else:\n logging.error('unexpected behavior, source={0}, target={1}, candidate={2}, us={3}, ws={4}', u, w, us, ws)\n\n unseen = candidates - seen\n if len(unseen) > 0:\n logging.debug(\n 'Network scan after degree 2 simplification not finished: candidates {0} have not been examined'.format(\n unseen))\n\n candidates -= unregister_candidates\n g.remove_vertex(list(candidates))\n\n logging.debug(' linestring path')\n edges_tuples = []\n for e in g.edges():\n source, target, path = nodes_id[e.source()], nodes_id[e.target()], e_path[e]\n if len(path) == 0:\n path = [source, target]\n else:\n path = [int(i) for i in path]\n\n e_tuples = (g.edge_index[e], source, target, path,\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e],\n foot_safety[e], max_speed[e], motorcar[e], linestring[e])\n edges_tuples.append(e_tuples)\n\n df_edges_simplified = pd.DataFrame.from_records(edges_tuples, index='edge_id',\n columns=['edge_id', 'u', 'v', 'path', 'osm_id', 'highway',\n 'level', 'lanes', 'width', 'bicycle', 'bicycle_safety',\n 'foot', 'foot_safety', 'max_speed', 'motorcar',\n 'geometry'])\n\n df_edges_simplified.osm_id = df_edges_simplified.osm_id.str.split('-').str[0]\n df_edges_simplified = gpd.GeoDataFrame(df_edges_simplified, geometry='geometry')\n df_edges_simplified.crs = df.crs\n return df_edges_simplified", "def unflatten_to_tree(df, label_map=None, label_col='label', id_col='id'):\r\n\r\n tf_df = df.filter(like='level')\r\n n_lvls = len(tf_df.columns)\r\n lvl_list = range(n_lvls)\r\n\r\n # Construct all nodes\r\n uniq_ids = pd.Series(pd.unique(tf_df.values.ravel()))\r\n uniq_ids = uniq_ids.dropna()\r\n\r\n if label_map is not None:\r\n assert len(set(uniq_ids)-set(label_map[label_col].unique()))==0, '''\r\n If a label_map is specified, all labels in df must\r\n be present in the map '''\r\n rdict = { r[label_col]: r[id_col] for i, r in label_map.iterrows() }\r\n tf_df = tf_df.replace(rdict)\r\n uniq_ids = pd.Series(pd.unique(tf_df.values.ravel()))\r\n uniq_ids = uniq_ids.dropna()\r\n uniq_ids = uniq_ids.astype('int')\r\n\r\n assert len(tf_df['level_0'].unique())==1, '''there can only be\r\n one level_0 id'''\r\n root_id = tf_df['level_0'].unique()[0]\r\n\r\n nodes = {}\r\n for nid in uniq_ids:\r\n nodes[nid] = Node(nid, {}, None)\r\n\r\n # Make relationships\r\n for i in lvl_list:\r\n lvl_col = 'level_%s' % i\r\n nxtlvl_col = 'level_%s' % (i+1)\r\n assert ~tf_df[lvl_col].isin(tf_df.drop(lvl_col, axis=1)).any(), '''\r\n ids cannot span multiple levels'''\r\n\r\n if i<lvl_list[-1]:\r\n for pnid in tf_df[lvl_col].unique():\r\n child_locs = pd.Series(tf_df.ix[tf_df[lvl_col]==pnid,\r\n nxtlvl_col].unique()).dropna()\r\n for cnid in child_locs:\r\n nodes[cnid].parent = nodes[pnid]\r\n nodes[pnid].add_child(nodes[cnid])\r\n\r\n t = Tree(nodes[root_id])\r\n return t", "def transform(self, df):\n df = df.copy()\n \"\"\"\n if self.grouping is not None:\n df = self.hier.transform(df)\n \"\"\"\n # fill NaN\n df = self.fill_na(df)\n\n self.df_index = df.index\n self.df_colnames = df.columns\n # transformations\n for i in sorted(self.transformations.keys()):\n transformation = self.transformations[i]\n df = self.transformers[i].transform(df)\n # convert to DataFrame only if it isn't already\n if not isinstance(df, pd.DataFrame):\n df = pd.DataFrame(df)\n df.index = self.df_index\n df.columns = self.df_colnames\n # update index reference if sliced\n if transformation in ['Slice']:\n self.df_index = df.index\n self.df_colnames = df.columns\n df = df.replace([np.inf, -np.inf], 0) # .fillna(0)\n return df", "def reset_node_ids(df):\n le = LabelEncoder()\n all_node_names = list(set(df['from_name'].values.tolist() + df['to_name'].values.tolist()))\n le.fit(all_node_names)\n df['from_id'] = le.transform(df['from_name'])\n df['to_id'] = le.transform(df['to_name'])\n return df, le", "def get_reactome_hierarchy_df() -> pd.DataFrame:\n return pd.read_csv(REACTOME_HIERARCHICAL_MAPPINGS_PATH, sep='\\t')", "def edge_list_build(input_path, output_path):\n\n start_time = time.time()\n\n df = pd.read_csv(input_path, sep='\\t', header=None)\n\n for col in range(1, len(df.columns)):\n df.iloc[:, col] = df.iloc[:, col-1] + '_' + df.iloc[:, col]\n\n n_divs = len(df.columns) - 1\n\n\n dict_node_names = {}\n\n for id, node_name in enumerate(np.unique(df.values.flatten())):\n dict_node_names[node_name] = id + 1\n\n tmp_df = pd.DataFrame.from_dict(dict_node_names, orient='index')\n tmp_df.reset_index(inplace=True)\n tmp_df.rename({'index': 'nodes', 0: 'hash'}, inplace=True, axis=1)\n\n hash_df = tmp_df['nodes'].str.split('_', n=n_divs, expand=True)\n hash_df = pd.concat([hash_df, tmp_df['hash']], axis=1)\n\n for col_name in df.columns:\n df[col_name] = df[col_name].map(dict_node_names)\n\n df['root'] = 0\n colnames = df.columns.values\n colnames = list(colnames[-1:]) + list(colnames[:-1])\n df = df[colnames]\n\n df_tuples = pd.DataFrame()\n\n for i in range(len(df.columns) - 1):\n df_tuples[i] = list(df[df.columns[i:i + 2]].itertuples(index=False, name=None))\n del df\n gc.collect()\n\n nodes_list = []\n\n for col_id in range(0, df_tuples.shape[1]):\n father_child = df_tuples.iloc[:, col_id].drop_duplicates().values\n nodes_list.extend(father_child)\n\n graph = nx.DiGraph(nodes_list)\n graph_bfs = nx.bfs_tree(graph, 0)\n \n path = output_path + '.hashmap'\n hash_df.to_csv(path, index=False, sep='\\t')\n end_time = time.time()\n print(\"Time spent creating tree from csv file:\", end_time - start_time)\n return graph_bfs", "def update_tip_names(tree, taxdict):\n\n list_nodes = []\n uniprot_mapping = pd.DataFrame(columns=['taxid', 'name', 'uniprot'])\n\n counter = 0\n for node in tree.traverse(\"postorder\"):\n current_name = node.name\n\n if 'NMR' in current_name:\n new_name = \"Heterocephalus_glaber\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\" \n uniprot_mapping.loc[counter] = (taxid, new_name, \"UP000006813\")\n counter += 1\n\n elif 'Nfurzer' in current_name:\n new_name = \"Nothobranchius_furzeri\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\"\n uniprot_mapping.loc[counter] = (taxid, new_name, new_name)\n counter += 1\n\n elif 'TAX' in current_name:\n taxid = current_name[3:].split('x')[0]\n new_name = taxdict.get(taxid, taxid) \n node.name = new_name \n list_nodes.append(node.name)\n unip = get_uniprot(taxid, accession)\n uniprot_mapping.loc[counter] = (taxid, new_name, unip)\n counter += 1\n\n\n \n tree.write(outfile=\"../../data/tree/tree.nw\")\n\n nodes_df = pd.DataFrame(list_nodes)\n nodes_df.to_csv(\"../../data/tree/tree_list_nodes.txt\", index=False, header=False)\n\n uniprot_mapping.to_csv(\"../../data/tree/tree_uniprot.txt\", sep='\\t', index=False, header=True)\n\n return tree, list_nodes", "def write_anchor(args, synteny_parent=None, mailbox_reader=None):\n idx = args[0]\n with mailbox_reader(idx) as file_handle:\n anchor_frame = pd.read_csv(\n file_handle, sep=\"\\t\", index_col=0\n ).convert_dtypes()\n in_anchor = len(anchor_frame)\n if in_anchor == 0:\n return None\n # drop any duplicated ID's--normally shouldn't happen\n anchor_frame.drop(\n anchor_frame[anchor_frame.index.duplicated()].index, inplace=True\n )\n anchor_frame.sort_values(\n by=[\"syn.anchor.sub_id\", \"frag.idx\", \"frag.pos\"], inplace=True\n )\n # Make a dictionary of common anchor properties, order will be kept\n anchor_props = {\n \"anchor.id\": idx,\n \"sub\": None,\n \"code\": None,\n \"count\": None,\n \"n\": None,\n \"n_ambig\": None,\n \"n_adj\": None,\n \"adj_groups\": None,\n \"frag.direction\": None,\n \"syn.anchor.direction\": None,\n \"anchor.subframe.ok\": True,\n \"hash\": None,\n }\n code_set = set(anchor_frame[\"syn.code\"])\n for test_code in CODE_DICT.keys():\n if test_code in code_set:\n anchor_props[\"code\"] = test_code\n break\n bad_subframe = False\n prop_list = []\n for sub_no, subframe in anchor_frame.groupby(by=[\"syn.anchor.sub_id\"]):\n (subanchor_props, anchor_subframe, bad_subframe) = _subframe_props(\n anchor_props, subframe, sub_no\n )\n if bad_subframe:\n break\n write_tsv_or_parquet(\n anchor_subframe,\n synteny_parent / f\"{idx}.{sub_no}.{SYNTENY_FILETYPE}\",\n sort_cols=False,\n )\n prop_list.append(subanchor_props)\n if bad_subframe: # Probably means a hash collision\n logger.error(f\"bad anchor set {idx}\")\n prop_list = []\n sub_no = 0\n anchor_props[\"anchor.subframe.ok\"] = False\n for cluster_id, subframe in anchor_frame.groupby(by=[\"hom.cluster\"]):\n (\n subanchor_props,\n anchor_subframe,\n unused_bad_subframe,\n ) = _subframe_props(anchor_props, subframe, sub_no)\n write_tsv_or_parquet(\n anchor_subframe,\n synteny_parent / f\"{idx}.{sub_no}.{SYNTENY_FILETYPE}\",\n sort_cols=False,\n )\n sub_no += 1\n prop_list.append(subanchor_props)\n return prop_list", "def normalize_to_flat(classifier, df, col):\n name = str(classifier) + '_df'\n new_df= json_normalize(df.loc[classifier][col])\n new_df['classifier'] = [classifier]\n return new_df", "def get_node_positions(distance_df, y_offset=-0.5):\n\n # If there are more than two genomes/groups\n if distance_df.shape[0] > 2:\n\n # Format the distances as expected by skbio\n distances_dm = DistanceMatrix(\n distance_df.values, \n list(map(str, distance_df.index.values))\n )\n\n # Make a neighbor-joining tree\n tree = nj(distances_dm)\n\n # Root at midpoint\n tree = tree.root_at_midpoint()\n\n # If there are only two genomes/groups\n elif distance_df.shape[0] == 2:\n\n # Get the distance betweeen the genomes/groups\n distance_between = distance_df.values[0, 1]\n\n # Make a simple tree linking the two\n tree = TreeNode(\n name='root',\n children=[\n TreeNode(\n name=distance_df.index.values[0],\n length=distance_between / 2.\n ),\n TreeNode(\n name=distance_df.index.values[1],\n length=distance_between / 2.\n )\n ]\n )\n\n # If there is only one genomes/groups\n elif distance_df.shape[0] == 1:\n\n # Make a simple tree with a single leaf\n tree = TreeNode(\n name='root',\n children=[\n TreeNode(\n name=distance_df.index.values[0],\n length=0\n )\n ]\n )\n\n # Assign x/y to create a DataFrame\n node_positions = CartesianTree(\n tree,\n y_offset=y_offset,\n )\n\n # Return that CartesianTree object\n return node_positions", "def build_tree(df) -> dict:\r\n # initialize empty tree as a dictionary\r\n tree = {}\r\n # find column associated with best information gain\r\n next_att = best_inf_gain_att(df)\r\n # next_att = find_winner(df)\r\n tree[next_att] = {}\r\n\r\n # for each value of the attribute at hand\r\n for val in np.unique(df[next_att]):\r\n # get new table\r\n subtable = get_subtable(df, next_att, val)\r\n # get information on new y characteristics\r\n sub_val, sub_val_counts = np.unique(subtable.iloc[:, -1], return_counts=True)\r\n\r\n # if there's only one label value left, assign it\r\n if 1 == sub_val.shape[0]:\r\n tree[next_att][val] = sub_val[0]\r\n # if there are no more columns except the label column, assign the most frequent label\r\n elif 1 == subtable.columns.shape[0]:\r\n tree[next_att][val] = sub_val[np.argmax(sub_val_counts)]\r\n # otherwise add node recursively\r\n else:\r\n tree[next_att][val] = build_tree(subtable)\r\n\r\n return tree", "def prot_df_to_graph(df, edge_dist_cutoff=4.5):\n\n node_pos = torch.FloatTensor(df[['x', 'y', 'z']].to_numpy())\n\n kd_tree = ss.KDTree(node_pos)\n edge_tuples = list(kd_tree.query_pairs(edge_dist_cutoff))\n edges = torch.LongTensor(edge_tuples).t().contiguous()\n\n node_feats = torch.FloatTensor([one_of_k_encoding_unk(e, prot_atoms) for e in df['element']])\n edge_feats = torch.FloatTensor(\n [1.0 / (np.linalg.norm(node_pos[i] - node_pos[j]) + 1e-5) for i, j in edge_tuples]).view(-1, 1)\n # feats = F.one_hot(elems, num_classes=len(atom_int_dict))\n\n return node_feats, edges, edge_feats, node_pos", "def build_tree(self, df=None, tree=None, depth=0):\n if df is None:\n df = self.df\n target = self.target\n\n node = self.get_lowest_entropy_feature(df)\n if not node:\n print(\"Pure solution not possible in current branch...\")\n return tree\n variables = df[node].unique()\n\n if tree is None: \n tree = {}\n tree[node] = {}\n\n for value in variables:\n subtable = df[df[node] == value].reset_index(drop=True)\n inner_variables, counts = np.unique(subtable[target], return_counts=True) \n \n if len(counts) == 1:\n tree[node][value] = inner_variables[0] \n elif depth >= self.max_depth:\n return tree \n else:\n depth += 1 \n tree[node][value] = self.build_tree(df=subtable, depth=depth)\n \n return tree", "def related_df_shaper(df): \n id_related=list()\n id_primary=list()\n id_relation_type=list()\n for id_term in df.id_term:\n \n related_id_list=df.loc[df.id_term==id_term,'related_terms'].values[0]\n id_relation_type_list=df.loc[df.id_term==id_term,'id_relation_type'].values[0]\n for i in range(len(related_id_list)):\n id_related.append(related_id_list[i])\n id_relation_type.append(id_relation_type_list[i])\n id_primary.append(id_term)\n \n df_rs=pd.DataFrame({'id_term':id_primary,'id_term_related':id_related,'id_relation_type':id_relation_type})\n now=pd.to_datetime(datetime.datetime.now())\n df_rs=df_rs.assign(datetime_created=now)\n df_rs=df_rs.assign(datetime_updated=now)\n df_rs=df_rs.assign(id_user_created=7)\n df_rs=df_rs.assign(id_user_updated=7)\n \n return df_rs", "def __encode_genre_transform(self, df):\n\t\treturn df.join(\n\t\t\tpd.DataFrame(self.genre_mlb.transform(df.pop(\"genres\")), columns=self.genre_mlb.classes_, index=df.index),\n\t\t\tlsuffix='l')", "def remodeling(df, df_name):\n # df_name = get_df_name(df)\n new_df = country_code_update(df)\n # new_df.rename(columns={'country_code': 'date'}, inplace=True)\n new_df = new_df.set_index('country_code').transpose().rename_axis('', axis=1)\n new_df.index.name = 'date'\n new_df.index = pd.to_datetime(new_df.index)\n new_df = new_df.add_suffix('_' + df_name)\n return new_df", "def reindex_subcomponent_taxa(self):\n ti_mutable = self.taxon_set._is_mutable\n self.taxon_set._is_mutable = True\n new_map = CharacterDataMap()\n for taxon, seq in self.taxon_seq_map.items():\n taxon = self.taxon_set.require_taxon(label=taxon.label)\n new_map[taxon] = seq\n self.taxon_set._is_mutable = ti_mutable\n self.taxon_seq_map = new_map", "def flatten(self):\r\n df = pd.DataFrame([{'level_0': self.root.id}])\r\n for lvl in range(1, self.max_depth()+1):\r\n loc_pairs = [(l.parent.id, l.id) for l in self.level_n_descendants(lvl)]\r\n loc_pairs = pd.DataFrame(loc_pairs)\r\n loc_pairs.rename(columns={\r\n 0: 'level_'+str(lvl-1),\r\n 1: 'level_'+str(lvl)}, inplace=True)\r\n df = df.merge(loc_pairs, on='level_'+str(lvl-1), how='left')\r\n df['leaf_node'] = df.apply(lambda x:\r\n next(l for l in reversed(x) if pd.notnull(l)), axis=1)\r\n\r\n for c in df.columns:\r\n try:\r\n df[c] = df[c].astype('int')\r\n except:\r\n pass\r\n\r\n return df", "def table_with_routes(df, routes):\n full_names = []\n high_tax_list = []\n low_tax_list = []\n for org in in_taxa_dict.keys():\n group, subtax, long_name = in_taxa_dict[org]\n high_tax_list.append(group)\n low_tax_list.append(subtax)\n full_names.append(long_name)\n\n df = df[df.index.isin(in_taxa_dict.keys())]\n no_seqs = set(in_taxa_dict.keys()) - set(df.index)\n\n for taxon in no_seqs:\n df.loc[taxon] = len(df.columns) * [0]\n\n df.index.name = 'Unique ID'\n df.insert(loc=0, column='Lower Taxonomy', value=low_tax_list)\n df.insert(loc=0, column='Higher Taxonomy', value=high_tax_list)\n df.insert(loc=0, column='Full Name', value=full_names)\n\n df = df.sort_index(axis=0)\n df.to_csv(f'{output_fold}/occupancy.tsv', sep='\\t')\n\n # Adds routes to df\n for gene in df.columns:\n df[gene] = df[gene].apply(str)\n for org in df[gene].index:\n if org in routes[gene]:\n df.at[org, gene] = f'{df[gene][org]}_{routes[gene][org]}'\n\n df.to_csv(f'{output_fold}/occupancy_with_routes.tsv', sep='\\t')\n\n return df", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def edge_list_df_to_igraph(edge_list_df, node_id_mapper):\n nodes = list(set(edge_list_df.from_id.values.tolist() + edge_list_df.to_id.values.tolist()))\n #node_names = list(set(edge_list_df.from_name.values.tolist() + edge_list_df.to_name.values.tolist()))\n edges = list(zip(edge_list_df.from_id, edge_list_df.to_id))\n weights = list(edge_list_df.weight.values)\n g = Graph()\n g.add_vertices(len(nodes))\n g.add_edges(edges)\n g.es['weight'] = weights\n g.vs['label'] = list(node_id_mapper.inverse_transform(np.array(range(len(g.vs)))))\n g.vs['community'] = 0 # Set original community the same for all nodes\n return g, edges", "def expand_affiliation(df):\n from pandas import Series\n res = df[[\"source_id\", \"author_ids\", \"afid\"]].copy()\n res['afid'] = res[\"afid\"].str.split(';')\n res = (res[\"afid\"].apply(Series)\n .merge(res, right_index=True, left_index=True)\n .drop([\"afid\"], axis=1)\n .melt(id_vars=['source_id', 'author_ids'], value_name=\"afid\")\n .drop(\"variable\", axis=1)\n .dropna())\n res['afid'] = res['afid'].astype(float)\n return res", "def CreateAugmentedDataset(df: pd.DataFrame) -> pd.DataFrame:\n with prof.Profile(\"dead code mutations\"):\n seed = np.random.RandomState(0xCEC)\n\n candidate_kernels = df[\"program:opencl_src\"].values.copy()\n\n new_columns = list(df.columns.values) + [\"program:is_mutation\"]\n new_rows = []\n\n for _, row in df.iterrows():\n kernel = row[\"program:opencl_src\"]\n\n # Insert the original row.\n row[\"program:is_mutation\"] = False\n new_rows.append(row)\n\n # Create and insert mutation rows.\n for _ in range(3):\n row = row.copy()\n row[\"program:is_mutation\"] = True\n # Insert a single dead kernel into each original kernel.\n dci = opencl_deadcode_inserter.OpenClDeadcodeInserter(\n seed, kernel, candidate_kernels=candidate_kernels\n )\n dci.InsertBlockIntoKernel()\n row[\"program:opencl_src\"] = dci.opencl_source\n new_rows.append(row)\n\n return pd.DataFrame(new_rows, columns=new_columns)", "def aggregate_intermediate_data_frame(self, node_name, child, child_df, edge_df):\n expr = []\n for n in child_df.schema.names:\n if n in self.parser.reducer_by_prop:\n if self.parser.reducer_by_prop.get(n) in [\"list\", \"set\"]:\n expr.append(\n self.reducer_to_agg_func_expr(\n self.parser.reducer_by_prop.get(n), n, is_merging=False\n )\n )\n else:\n expr.append(\n self.reducer_to_agg_func_expr(\n self.parser.reducer_by_prop.get(n), n, is_merging=True\n )\n )\n if len(expr) == 0:\n expr.append(\n self.reducer_to_agg_func_expr(\"set\", get_node_id_name(child.name))\n )\n tmp_df = (\n self.join_two_dataframe(edge_df, child_df, how=\"left_outer\")\n .groupBy(get_node_id_name(node_name))\n .agg(*expr)\n )\n\n select_expr = [get_node_id_name(node_name)]\n for n in child_df.schema.names:\n if n in self.parser.reducer_by_prop and self.parser.reducer_by_prop.get(\n n\n ) in [\"list\", \"set\"]:\n select_expr.append(\n self.reducer_to_agg_func_expr(\n self.parser.reducer_by_prop.get(n), n, is_merging=True\n )\n )\n tmp_df = tmp_df.select(*select_expr)\n return self.return_dataframe(\n tmp_df,\n f\"{Translator.aggregate_intermediate_data_frame.__qualname__}__{node_name}__{child.name}\"\n )", "def make_homologues_mirnas(phylogenetic_tree, mirna_seqs):\n species = [leaf.taxon.label for leaf in phylogenetic_tree.leaf_iter()]\n mirhomologues = pd.DataFrame({sp: {mirid: mirna_seqs[mirid][:21]\n for mirid in mirna_seqs.keys()}\n for sp in species}).transpose()\n return mirhomologues", "def reduced_votes_to_predictions(df, schema, save_loc):\n predictions = get_predictions(df, schema)\n encoded_predictions = encode_answers(predictions, schema)\n\n if save_loc is not None:\n logging.info('Saved predictions to {}'.format(save_loc))\n encoded_predictions.to_csv(save_loc, index=False)\n\n return encoded_predictions", "def tree2pwdist(tree):\n if isinstance(tree, type(Phylo.BaseTree.Tree())):\n N = len(tree.get_terminals())\n names = [node.name for node in tree.get_terminals()]\n pwdist = np.zeros((N, N))\n for i, node1 in enumerate(tree.get_terminals()):\n for j, node2 in enumerate(tree.get_terminals()):\n \"\"\"Compute half of these and assume symmetry\"\"\"\n if i==j:\n pwdist[i, j] = 0\n elif i<j:\n pwdist[i, j] = tree.distance(node1, node2)\n pwdist[j, i] = pwdist[i, j]\n elif isinstance(tree, type(dendropy.Tree())):\n pdm = dendropy.treecalc.PatristicDistanceMatrix(tree)\n taxon_set = [n.taxon for n in tree.leaf_nodes()]\n N = len(taxon_set)\n names = [taxa.label for taxa in taxon_set]\n pwdist = np.zeros((N, N))\n for i, t1 in enumerate(taxon_set):\n for j, t2 in enumerate(taxon_set):\n \"\"\"Compute half of these and assume symmetry\"\"\"\n if i==j:\n pwdist[i, j] = 0\n elif i<j:\n pwdist[i, j] = pdm(t1, t2)\n pwdist[j, i] = pwdist[i, j]\n else:\n print('Tree type does not match Phylo.BaseTree.Tree or dendropy.Tree')\n return\n return pd.DataFrame(pwdist, index = names, columns = names)", "def db_format(self):\r\n def gen_p2p_str(node):\r\n anc_ids = [a.id for a in node.ancestors()]\r\n anc_ids.reverse()\r\n anc_ids = anc_ids+[node.id]\r\n return ','.join([str(aid) for aid in anc_ids])\r\n\r\n db_dict = { 'location_id': [self.root.id],\r\n 'parent_id': [self.root.id],\r\n 'path_to_top_parent': [gen_p2p_str(self.root)],\r\n 'level': [0] }\r\n\r\n for lvl in range(1,self.max_depth()+1):\r\n nodes = self.level_n_descendants(lvl)\r\n for n in nodes:\r\n db_dict['location_id'].append(n.id)\r\n db_dict['parent_id'].append(n.parent.id)\r\n db_dict['path_to_top_parent'].append(gen_p2p_str(n))\r\n db_dict['level'].append(lvl)\r\n\r\n db_df = pd.DataFrame(db_dict)\r\n leaf_ids = [l.id for l in self.leaves()]\r\n db_df['most_detailed'] = 0\r\n db_df.ix[db_df.location_id.isin(leaf_ids), 'most_detailed'] = 1\r\n return db_df", "def encode_target(df, target_column):\n df_mod = df.copy()\n targets = df_mod[target_column].unique()\n map_to_int = {name: n for n, name in enumerate(targets)}\n df_mod[target_column] = df_mod[target_column].replace(map_to_int)\n\n return (df_mod, targets)", "def apply_ael_to_df(atypical_df, ael_reference):\n\n atypical_new_df = atypical_df.copy()\n atypical_new_df.is_atypical = np.nan\n atypical_new_df = atypical_new_df.set_index('ds', drop=True)\n\n for event in ael_reference.set_atypical_events:\n atypical_new_df.loc[event.date_start:event.date_end, 'is_atypical'] = event.is_atypical\n\n atypical_new_df = atypical_new_df.reset_index()\n\n return atypical_new_df", "def to_frame(self):\n return gpd.GeoDataFrame(\n data=self.tree_ids,\n geometry=self.to_geom(),\n crs=self.crs,\n columns=['id'],\n )", "def transform(self, df: DataFrame) -> DataFrame:\n return df", "def bed_encoding(bed_df, reference):\n\n fasta = Fasta(reference, as_raw=True)\n seq_list = list()\n for _, i in bed_df.iterrows():\n print(f\"region:{i[0]}:{i[1]}-{i[2]}\")\n seq_list.append(one_hot_encoding(fasta[i[0]][i[1]:i[2]]))\n result = np.stack(seq_list)\n return result", "def classify_df_atomizer(classify_df: pd.DataFrame) -> pd.DataFrame:\n yield atomizers.AsciiCharacterAtomizer.FromText(\n \"\\n\".join(classify_df[\"program:opencl_src\"].values)\n )", "def coerce( self ):\n df = self.copy()\n gcond = ['neighbor', 'pdb'] if 'source' not in df.columns else ['neighbor', 'pdb', 'source']\n for frame_id, frame in df.groupby('frame'):\n g = frame.groupby(gcond)\n neighbors = len(g)\n neighbor = list(g.ngroup() + 1)\n position = list(g.cumcount() + frame_id)\n df.loc[(df['frame'] == frame_id), 'neighbors'] = [neighbors] * frame.shape[0]\n df.loc[(df['frame'] == frame_id), 'neighbor'] = neighbor\n df.loc[(df['frame'] == frame_id), 'position'] = position\n return df", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def nodes_df_creation(self, path: str) -> pyspark.sql.dataframe.DataFrame:\n try:\n nodes_df = self.spark.read.parquet(path)\n except OSError:\n print('cannot open', path)\n nodes_df = nodes_df.select('id', 'tags').filter(size(col('tags')) > 0)\n nodes_df = nodes_df.select(col('id'), explode(col('tags')).name('exploded_tags'))\\\n .filter(col('exploded_tags.key') == 'amenity')\n nodes_df = nodes_df.select(\"id\", 'exploded_tags.value').withColumnRenamed('id', 'nodeId')\\\n .withColumnRenamed('value', 'amenity_type')\n return nodes_df", "def encode_target(df, target_column):\n df_mod = df.copy()\n targets = df_mod[target_column].unique()\n map_to_int = {name: n for n, name in enumerate(targets)}\n df_mod[\"Target\"] = df_mod[target_column].replace(map_to_int)\n\n return (df_mod, targets)", "def normalize_annotations(df):\n print('Converting labels to one-hot enconding...')\n nested_list = df['annotation_list_with_empty']\n mlb = MultiLabelBinarizer()\n ont_hot_df = pd.DataFrame(mlb.fit_transform(df.pop('annotation_list_with_empty')),\n columns=mlb.classes_,\n index=df.index)\n new_df = pd.concat([df, ont_hot_df], axis=1)\n new_df['annotation_list_with_empty'] = nested_list\n return(new_df)", "def encode_rgi(rgi_df, genome_ids):\n rgi_encoded = pd.DataFrame(index=genome_ids,\n columns=rgi_df['Best_Hit_ARO'].unique()).fillna(0)\n # print(rgi_encoded)\n for genome_id, rgi_data in rgi_df.iterrows():\n rgi_encoded.loc[rgi_data['Sample'], rgi_data['Best_Hit_ARO']] += 1\n\n return rgi_encoded", "def descend_template_phylogeny_posthoc(\n ascending_lineage_iterables: typing.Iterable[typing.Iterable],\n descending_tree_iterable: typing.Iterable,\n get_parent: typing.Callable[[typing.Any], typing.Any],\n get_stem_length: typing.Callable[[typing.Any], int],\n seed_column: HereditaryStratigraphicColumn,\n demark: typing.Callable[[typing.Any], typing.Hashable] = demark,\n progress_wrap: typing.Callable = lambda x: x,\n) -> typing.List[HereditaryStratigraphicColumn]:\n\n stratum_retention_policy = seed_column._stratum_retention_policy\n assert stratum_retention_policy.IterRetainedRanks is not None\n\n tree_depth_lookup = _calc_node_depths(\n descending_tree_iterable,\n get_parent,\n get_stem_length,\n progress_wrap=progress_wrap,\n demark=demark,\n )\n deposition_count_lookup = {\n k: v + seed_column.GetNumStrataDeposited()\n for k, v in tree_depth_lookup.items()\n }\n\n stem_strata_lookup = defaultdict(\n # use lru_cache as defaultdict with default factory conditioned on key\n lambda: lru_cache(maxsize=None)(\n lambda rank: (\n # if applicable, use stratum from seed column\n # otherwise, create new stratum\n seed_column.GetStratumAtRank(rank)\n if rank < seed_column.GetNumStrataDeposited()\n else seed_column._CreateStratum(rank)\n )\n )\n )\n\n extant_population = [\n HereditaryStratigraphicColumn(\n always_store_rank_in_stratum=seed_column._always_store_rank_in_stratum,\n stratum_retention_policy=stratum_retention_policy,\n stratum_differentia_bit_width=seed_column.GetStratumDifferentiaBitWidth(),\n stratum_ordered_store=_educe_stratum_ordered_store(\n iter_,\n deposition_count_lookup,\n stem_strata_lookup,\n stratum_retention_policy,\n demark=demark,\n ),\n )\n for iter_ in progress_wrap(ascending_lineage_iterables)\n ]\n return extant_population", "def write_phylogeny_pages(outfile: TextIO, genera_tree: list, species_tree: list, do_print: bool,\n refdict: dict) -> None:\n\n def split_html_tree(intree: list) -> Tuple[list, list]:\n \"\"\"\n take a phy2html output file and split it into style and body sections\n \"\"\"\n instyle = False\n inbody = False\n style_list = []\n body_list = []\n for inline in intree:\n if \"<style>\" in inline:\n instyle = True\n elif \"</style>\" in inline:\n instyle = False\n elif \"<body>\" in inline:\n inbody = True\n elif \"</body>\" in inline:\n inbody = False\n elif instyle:\n style_list.append(inline)\n elif inbody:\n body_list.append(inline)\n return style_list, body_list\n\n def add_link_to_genus(inline: str) -> str:\n if \"id=\\\"genera_taxon\" in inline:\n i = inline.find(\">\")\n start = inline[:i+1]\n tname = inline[i+1:]\n j = tname.find(\"<\")\n end = tname[j:]\n tname = tname[:j]\n tname = create_taxon_link(\"genus\", tname, do_print=do_print, include_rank=False)\n return start + tname + end\n else:\n return inline\n\n # --- main function code ---\n gen_style, gen_body = split_html_tree(genera_tree)\n sp_style, sp_body = split_html_tree(species_tree)\n\n if do_print:\n start_page_division(outfile, \"base_page\")\n else:\n common_header_part1(outfile, \"Fiddler Crab Phylogeny\")\n outfile.write(\" <style>\\n\")\n for line in gen_style:\n outfile.write(line)\n outfile.write(\"\\n\")\n for line in sp_style:\n outfile.write(line)\n outfile.write(\"\\n\")\n outfile.write(\" .phylogeny_grid { padding: 30px; }\\n\") # add a little extra padding\n outfile.write(\" </style>\\n\")\n\n if not do_print:\n common_header_part2(outfile)\n\n outfile.write(\" <header id=\\\"\" + init_data().tree_url + \"\\\">\\n\")\n outfile.write(\" <h1 class=\\\"bookmark1\\\">Phylogeny</h1>\\n\")\n outfile.write(\" </header>\\n\")\n outfile.write(\"\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" The phylogeny of fiddler crabs is still largely unresolved. Two trees are shown below: one \"\n \"of just the genera and one including all species. The tree of genera is fairly solid, \"\n \"but the species tree is a rough estimate with many polytomies. Both are predominantly based on the \"\n \"work of \" + format_reference_cite(refdict[\"Shih2016.2\"], do_print, AUTHOR_PAREN) + \".\\n\")\n outfile.write(\" </p>\\n\")\n outfile.write(\"\\n\")\n outfile.write(\" <section class=\\\"spsection\\\">\\n\")\n outfile.write(\" <h2 class=\\\"bookmark2\\\">Genera Phylogeny</h2>\\n\")\n for line in gen_body:\n outfile.write(add_link_to_genus(line))\n outfile.write(\" </section>\\n\")\n outfile.write(\" <section class=\\\"spsection\\\">\\n\")\n outfile.write(\" <h2 class=\\\"bookmark2\\\">Species Phylogeny</h2>\\n\")\n for line in sp_body:\n outfile.write(replace_species_in_string(line, True, do_print))\n outfile.write(\" </section>\\n\")\n outfile.write(\"\\n\")\n if do_print:\n end_page_division(outfile)\n else:\n common_html_footer(outfile)", "def transform(self, df):\n\t\tdf = self.__parse_json(df)\n\t\tdf = self.__fillnan(df)\n\t\tdf = self.__parse_dates(df)\n\t\tdf['budget'] = df['budget'].apply(lambda x: self.missing_budget_imputing if int(x) == 0 else x)\n\t\tdf['has_collections'] = df['belongs_to_collection'].isna().astype(int)\n\t\tdf['homepage'] = df['homepage'].isna().astype(int)\n\t\tdf['is_en'] = df['original_language'].apply(lambda x: 1 if x == 'en' else 0)\n\t\tdf = self.__encode_genre_transform(df)\n\t\tdf = self.__top_countries_and_companies_transform(df)\n\t\tdf = self.__bin_columns_transform(df)\n\t\tdf.drop(\n\t\t\t['release_date', 'original_language', 'production_countries', 'production_companies', 'id', 'backdrop_path',\n\t\t\t 'imdb_id', 'poster_path', 'video', 'belongs_to_collection', 'status', 'runtime',\n\t\t\t 'original_title', 'overview', 'tagline', 'title'], axis=1, inplace=True)\n\t\treturn df", "def aitchison_transform_part(df):\n df_aitchison = multiplicative_replacement(df)\n #df_aitchison = closure(df)\n df_idx = df.index\n df_col = df.columns\n df_aitchison = pd.DataFrame(df_aitchison, index = df_idx, columns = df_col)\n return df_aitchison", "def at_df(self, df):\n result = self.at(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def annotation_to_nodules(annotation_df):\n data_list = []\n for group in annotation_df.groupby(['AccessionNumber', 'DoctorID']):\n accession_number = group[0][0]\n doctor_id = group[0][1]\n\n nodules = group[1].iloc[:, 5:].values.reshape(-1, 5)\n for i in range(nodules.shape[0]):\n nodule_id = generate_index()\n nodule_dict = {\n 'AccessionNumber': accession_number,\n 'DoctorID': doctor_id,\n 'NoduleID': nodule_id,\n 'NoduleType': nodules[i, 4],\n 'coordX': nodules[i, 0] if nodules[i, 0] != '' else 'NaN',\n 'coordY': nodules[i, 1] if nodules[i, 1] != '' else 'NaN',\n 'coordZ': nodules[i, 2] if nodules[i, 2] != '' else 'NaN',\n 'diameter_mm': nodules[i, 3] if nodules[i, 3] != '' else 'NaN',\n }\n data_list.append(nodule_dict)\n result_df = pd.DataFrame(data_list)\n result_df.coordX = result_df.coordX.astype(np.float)\n result_df.coordY = result_df.coordY.astype(np.float)\n result_df.coordZ = result_df.coordZ.astype(np.float)\n result_df.diameter_mm = result_df.diameter_mm.astype(np.float)\n result_df = result_df.dropna()\n result_df = result_df.assign(DoctorID=lambda df: df.loc[:, 'DoctorID'].str.replace(\"'\", \"\"))\n return normalize_nodule_type(result_df)", "def build_encoders(df):\n\n # Pclass\n pclass_tf = df['Pclass'].values\n pclass_encoder = LabelBinarizer()\n pclass_encoder.fit(pclass_tf)\n\n with open(os.path.join('encoders', 'pclass_encoder.json'),\n 'w', encoding='utf8') as outfile:\n json.dump(pclass_encoder.classes_.tolist(),\n outfile, ensure_ascii=False)\n\n # Sex\n sex_tf = df['Sex'].values\n sex_encoder = LabelBinarizer()\n sex_encoder.fit(sex_tf)\n\n with open(os.path.join('encoders', 'sex_encoder.json'),\n 'w', encoding='utf8') as outfile:\n json.dump(sex_encoder.classes_.tolist(), outfile, ensure_ascii=False)\n\n # Age\n age_enc = df['Age']\n age_bins = age_enc.quantile(np.linspace(0, 1, 10+1))\n\n with open(os.path.join('encoders', 'age_bins.json'),\n 'w', encoding='utf8') as outfile:\n json.dump(age_bins.tolist(), outfile, ensure_ascii=False)\n\n # Siblings/Spouses Aboard\n siblings_spouses_aboard_tf = df['Siblings/Spouses Aboard'].values\n siblings_spouses_aboard_encoder = LabelBinarizer()\n siblings_spouses_aboard_encoder.fit(siblings_spouses_aboard_tf)\n\n with open(os.path.join('encoders', 'siblings_spouses_aboard_encoder.json'),\n 'w', encoding='utf8') as outfile:\n json.dump(siblings_spouses_aboard_encoder.classes_.tolist(),\n outfile, ensure_ascii=False)\n\n # Parents/Children Aboard\n parents_children_aboard_tf = df['Parents/Children Aboard'].values\n parents_children_aboard_encoder = LabelBinarizer()\n parents_children_aboard_encoder.fit(parents_children_aboard_tf)\n\n with open(os.path.join('encoders', 'parents_children_aboard_encoder.json'),\n 'w', encoding='utf8') as outfile:\n json.dump(parents_children_aboard_encoder.classes_.tolist(),\n outfile, ensure_ascii=False)\n\n # Fare\n fare_enc = df['Fare']\n fare_bins = fare_enc.quantile(np.linspace(0, 1, 10+1))\n\n with open(os.path.join('encoders', 'fare_bins.json'),\n 'w', encoding='utf8') as outfile:\n json.dump(fare_bins.tolist(), outfile, ensure_ascii=False)\n\n # Target Field: Survived\n survived_encoder = LabelEncoder()\n survived_encoder.fit(df['Survived'].values)\n\n with open(os.path.join('encoders', 'survived_encoder.json'),\n 'w', encoding='utf8') as outfile:\n json.dump(survived_encoder.classes_.tolist(),\n outfile, ensure_ascii=False)", "def to_junction_tree(self):\n mm = self.to_markov_model()\n return mm.to_junction_tree()", "def encode_target(df, target_column):\n df_mod = df.copy()\n targets = df_mod[target_column].unique()\n map_to_int = {name: n for n, name in enumerate(targets)}\n #print map_to_int\n df_mod[\"Target\"] = df_mod[target_column].replace(map_to_int)\n return (df_mod, targets)", "def theta_AB(df):\n j_dist = get_coop_coop_neighbour_dist(df) \n degree_dist = get_degree_distribution(df) \n return get_theta_AB(j_dist,degree_dist)", "def build_df_dict(df, geneDictCanon):\n\n\toutDict = OrderedDict()\n\n\tfor tr in geneDictCanon:\n\t\toutDict[geneDictCanon[tr][0]] = df.iloc[geneDictCanon[tr][1][0]:geneDictCanon[tr][1][1]]\n\n\treturn outDict", "def to_property_graph(\n client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True\n) -> bool:\n # check if ~id and ~label column exist and if not throw error\n g = traversal().withGraph(Graph())\n is_edge_df = False\n is_update_df = True\n if \"~id\" in df.columns:\n if \"~label\" in df.columns:\n is_update_df = False\n if \"~to\" in df.columns and \"~from\" in df.columns:\n is_edge_df = True\n else:\n raise exceptions.InvalidArgumentValue(\n \"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune\"\n )\n\n # Loop through items in the DF\n for (index, row) in df.iterrows():\n # build up a query\n if is_update_df:\n g = _build_gremlin_update(g, row, use_header_cardinality)\n elif is_edge_df:\n g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)\n else:\n g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)\n # run the query\n if index > 0 and index % batch_size == 0:\n res = _run_gremlin_insert(client, g)\n if res:\n g = Graph().traversal()\n\n return _run_gremlin_insert(client, g)", "def build_hierarchy_from_id_lookup(id_lookup_file=\"idlookups.csv\"):\n df_id_lookups = pd.read_csv(id_lookup_file, index_col=0)\n\n # The naming convention separates layers of the hierarchy with a colon ':', so we can break this into a list of descendents, and calculate the depth of the tree.\n df_id_lookups[\"parsed_name\"] = df_id_lookups.name.apply(lambda s: s.split(\": \"))\n df_id_lookups[\"depth\"] = df_id_lookups.parsed_name.apply(lambda d: len(d))\n\n # The two top nodes \"Biota\" and \"Physical\" are not prepended to their children, so we need to do this manually.\n # Manually define biota and physical children\n biota_kids = [\n \"Worms\",\n \"Sponges\",\n \"Seagrasses\",\n \"Molluscs\",\n \"Macroalgae\",\n \"Jellies\",\n \"Fishes\",\n \"Echinoderms\",\n \"Crustacea\",\n \"Cnidaria\",\n \"Bryozoa\",\n \"Bioturbation\",\n \"Bacterial mats\",\n \"Ascidians\",\n ]\n\n physical_kids = [\"Substrate\"]\n\n # Prepend them to name lists, and add to depth.\n biota_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in biota_kids)\n df_id_lookups.loc[biota_inds, \"depth\"] += 1\n df_id_lookups.loc[biota_inds, \"parsed_name\"] = df_id_lookups.loc[biota_inds, \"parsed_name\"].apply(\n lambda d: [\"Biota\"] + d\n )\n\n physical_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in physical_kids)\n df_id_lookups.loc[physical_inds, \"depth\"] += 1\n df_id_lookups.loc[physical_inds, \"parsed_name\"] = df_id_lookups.loc[physical_inds, \"parsed_name\"].apply(\n lambda d: [\"Physical\"] + d\n )\n\n # Create columns for ancestor and descendant lists.\n df_id_lookups[\"child_name\"] = df_id_lookups.parsed_name.apply(lambda d: d[-1])\n\n df_id_lookups[\"ancestor_id_list\"] = [get_ancestor_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n df_id_lookups[\"descendant_id_list\"] = [get_descendant_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n # Create a multilabel, one hot encoded bit vector for each class, taking into account the hierarchy of ancestors, and unspecified descendants.\n # We now want to represent this class hierarchy as a bit-vector. Each class index has a unique bit in the vector. A root level class will turn on a single bit. A depth 4 class will turn on 4 bits.\n df_id_lookups[\"bit_vector\"] = [get_bit_vector(d, df_id_lookups) for d in df_id_lookups.index]\n df_id_lookups\n\n return df_id_lookups", "def revise_report_df(report_df):\n # quick check to see whether report_df column structure is as expected\n if report_df.columns.tolist() != ['0', '1', 'accuracy', 'macro avg', 'weighted avg']:\n print(\"Warning: Column names aren't as expected. Verify report_df output_dict is correct.\")\n\n report_df.columns = ['0', '1', 'accuracy', 'Macro Avg', 'Micro Avg' ]\n\n dict_columns = ['0', '1', 'Macro Avg', 'Micro Avg']\n keys = ['precision', 'recall', 'f1-score', 'support']\n\n for col in dict_columns:\n # revise key values to personalize to its associated column i.e. from 'precision' to 'precision_0'\n report_df[col] = report_df[col].apply(lambda x: customize_dict_keys(x, col, keys))\n\n # iterate row wise through dataframe to normalize dictionary values into flat tables\n new_dict = {str(classifier) + '_df': normalize_to_flat(classifier, report_df, col) for classifier in report_df.index.values.tolist()}\n\n # concat all classifier flat tables into one dataframe\n dict_df = pd.concat(list(new_dict.values())).reset_index().drop(columns=['index'], axis=1)\n\n # merge on existing report_df dataframe index and on dict_df 'classifier' column value\n report_df = report_df.merge(dict_df, how='left', left_index=True, left_on=None, right_on='classifier').set_index('classifier')\n\n # select only created columns\n report_df = report_df.iloc[:,5:]\n # sort columns and filter out 'support' related columns\n report_df = report_df[sorted([col for col in report_df.columns if 'support' not in col])]\n\n return report_df", "def update_nodes_df(nodes: pandas.DataFrame) -> None:\n nodes_clean = nodes.copy(deep=True)\n # Ensure that all '' values are NaN, so that those rows can be easily removed with dropna()\n nodes_clean.replace('', numpy.nan, inplace=True)\n nodes_clean.dropna(axis=0, how='any', inplace=True)\n nodes_clean.drop_duplicates(keep='first', inplace=True, ignore_index=True)\n\n print('\\nCache used at start of function: ' + str(read_node.cache_info()) + '.')\n print('There are ' + str(len(nodes_clean)) + ' nodes, updating node: 0 ', end='')\n count = 0\n columns = nodes_clean.columns\n for row in nodes_clean.itertuples():\n count += 1\n if count % 250 == 0:\n print(count, ' ', end='', flush=True)\n if count % 10000 == 0:\n print('\\n', end='', flush=True)\n\n node_properties = {}\n for prop_name in RICGRAPH_PROPERTIES_ADDITIONAL:\n for other_name in columns:\n if prop_name == other_name:\n node_properties[prop_name] = getattr(row, other_name)\n\n update_node(name=row.name, category=row.category, value=row.value,\n **node_properties)\n\n print(count, '\\n', end='', flush=True)\n print('Cache used at end of function: ' + str(read_node.cache_info()) + '.')\n return", "def encode_answers(df, schema):\n encoders = get_encoders(schema)\n\n for question in schema.questions:\n df[question.prediction_encoded] = encoders[question.name].transform(df[question.prediction])\n\n return df", "def iat_df(self, df):\n result = self.iat(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def encode_target(df, target_column, label_map):\n df_mod = df.copy()\n targets = df_mod[target_column].unique()\n # map_to_int = {name: n for n, name in enumerate(targets)}\n df_mod[target_column] = df_mod[target_column].replace(label_map)\n\n return (df_mod, targets)", "def tokenize_and_encode_pandas(dataframe,stopindex=1e9,max_length=128):\n encoded_tweets = []\n token_type_ids = []\n attention_mask = []\n labels_6_types = []\n labels_4_types = []\n interaction_types = []\n \n counter = 0\n for i in range(len(dataframe)):\n try:\n #tokenized_parent= tokenizer.tokenize(dataframe.iloc[i]['clean_target_text'])\n #tokenized_tweet = tokenizer.tokenize(dataframe.iloc[i]['clean_response_text'])\n text_parent= dataframe.iloc[i]['clean_target_text']\n text_tweet = dataframe.iloc[i]['clean_response_text']\n except Exception:\n #tokenized_parent= tokenizer.tokenize(dataframe.iloc[i]['target_text'])\n #tokenized_tweet = tokenizer.tokenize(dataframe.iloc[i]['response_text'])\n text_parent= dataframe.iloc[i]['target_text']\n text_tweet = dataframe.iloc[i]['response_text']\n \n interaction = dataframe.iloc[i]['interaction_type'] # reply or quote\n topic = dataframe.iloc[i]['event'] # get event\n topic = topic.replace('_', ' ') # replace underscore with space\n sep_token = ' [SEP] ' \n text1 = interaction + sep_token + topic + sep_token + text_parent\n text2 = text_tweet\n encoded_dict = tokenizer.__call__(text=text1,\n text_pair=text2,\n padding='max_length',\n truncation=True,\n is_split_into_words=False,\n max_length=max_length,\n return_tensors='pt')\n \n '''\n encoded_dict = tokenizer.__call__(text=tokenized_parent,\n text_pair=tokenized_tweet,\n padding='max_length',\n truncation=True,\n is_split_into_words=True,\n max_length=max_length,\n return_tensors='pt')\n \n '''\n '''\n encoded_dict = tokenizer.encode_plus(text=tokenized_tweet,\n text_pair=tokenized_parent,\n max_length=max_length,\n pad_to_max_length=True)\n '''\n encoded_tweets.append(encoded_dict['input_ids'])\n token_type_ids.append(encoded_dict['token_type_ids'])\n attention_mask.append(encoded_dict['attention_mask'])\n \n label = dataframe.iloc[i]['label']\n labels_6_types.append(convert_label_string2num(label, num_types=6))\n labels_4_types.append(convert_label_string2num(label, num_types=4))\n \n interaction_type = dataframe.iloc[i]['interaction_type']\n interaction_types.append(convert_interaction_type_string2num(interaction_type))\n \n if counter % 100 == 0:\n print('Tokenizing comment: %00000d' % counter)\n if counter > stopindex:\n break\n counter = counter + 1\n \n width = dataframe.shape[1]\n dataframe.insert(width+0,'encoded_tweets', encoded_tweets)\n dataframe.insert(width+1,'token_type_ids', token_type_ids)\n dataframe.insert(width+2,'attention_mask', attention_mask)\n dataframe.insert(width+3,'number_labels_6_types', labels_6_types)\n dataframe.insert(width+4,'number_labels_4_types', labels_4_types)\n dataframe.insert(width+5,'interaction_type_num', interaction_types)\n return dataframe", "def normalize_feature_association(feature_association):\n gene_identifiers = []\n for gene_symbol in feature_association['genes']:\n try:\n gene = get_gene(gene_symbol)\n except:\n gene = None\n if (gene):\n gene_identifiers.extend(gene)\n feature_association['gene_identifiers'] = gene_identifiers", "def taxa_data_frame(self):\n cols = list(self._taxa.keys())\n cols.remove(\"uid\")\n cols.remove(\"object\")\n df = DataFrame(self._taxa, columns=cols, index=self._taxa[\"uid\"])\n df.index.name = \"uid\"\n\n return df", "def ilr_transform(table: pd.DataFrame, tree: skbio.TreeNode) -> pd.DataFrame:\n _table, _tree = match_tips(table, tree)\n basis, _ = balance_basis(_tree)\n balances = ilr(_table.values, basis)\n in_nodes = [n.name for n in _tree.levelorder() if not n.is_tip()]\n return pd.DataFrame(balances,\n columns=in_nodes,\n index=table.index)", "def df_with_hexid_to_centroids_gdf(df, hexcolname='hexid'):\n seriesofcoordinates=df[hexcolname].apply(h3.h3_to_geo)\n geometria=seriesofcoordinates.apply(lambda row: Point(row[1],row[0])) ## Patty reversed indices\n gdf=gpd.GeoDataFrame(df, geometry=geometria)\n return gdf", "def runPhyML(aln, phymlOpt, geneDir):\n\t# convert to Phylip format and replace eventual \"!\" symbols (relic from using MACSE)\n\torigin = os.getcwd()\n\tos.chdir(geneDir)\n\toutPhy = aln.split(\"/\")[-1].split(\".\")[0]+\".phylip\"\n\t#aln = aln.split(\"/\")[-1]\n\ttmp = aln.split(\"/\")[-1].split(\".\")[0]+\".tmp\"\n\t\n\tlogger = logging.getLogger(\"main.tree\")\n\t\n\twith open(aln, \"rU\") as aln2:\n laln = aln2.read().replace(\"!\", \"N\")\n aln2.close()\n with open(tmp, \"w\") as temp:\n temp.write(laln)\n temp.close()\n \n\tinput_handle = open(tmp, \"rU\")\n\toutput_handle = open(outPhy, \"w\")\n\t\n\talignments = AlignIO.parse(input_handle, \"fasta\")\n\tAlignIO.write(alignments, output_handle, \"phylip-relaxed\")\n\n\toutput_handle.close()\n\tinput_handle.close()\n\tos.remove(tmp)\n\t\n\t# PhyML\n\tif phymlOpt != \"\":\n\t\ttry:\n\t\t\topt=phymlOpt.split(\"ALN \")[1]\n\t\t\tlogger.debug(\"phyml -i {:s} {}\".format(outPhy, opt))\n\t\t\tcmd(\"phyml -i {:s} {}\".format(outPhy, opt), False)\n\t\texcept:\n\t\t\tlogger.info(\"PhyML couldn't run with the provided info {}, running with default options.\".format(phymlOpt))\n\t\t\tcmd(\"phyml -i {:s} -v e -b -2\".format(outPhy), False)\n\telse:\n\t\tlogger.debug(\"phyml -i {:s} -v e -b -2\".format(outPhy))\n\t\tcmd(\"phyml -i {:s} -v e -b -2\".format(outPhy), False)\n\t\t\n\tos.chdir(origin)\n\t\n\treturn(geneDir+outPhy)", "def apply(self, df):\n encoded = []\n for feature_name, encoder in zip(self.feature_names, self.encoders):\n column = df[feature_name].to_numpy().reshape(-1, 1)\n encoded.append(pd.DataFrame(\n encoder.transform(column).todense(),\n index=df.index,\n columns=encoder.categories_[0]\n ))\n df = df.drop(columns=self.feature_names)\n df = pd.concat((df, *encoded), axis=1)\n return df", "def df_to_ascii(df, fid_out, print_offset=False, verbose=False):\n if verbose:\n print ('Writing to %s' % fid_out)\n\n with open(fid_out, 'w') as f: \n for _, r in df.iterrows():\n seed_id = f'{r.network}.{r.station}.{r.location}.{r.channel}'\n line = f'{seed_id:>18s} '\n\n if print_offset:\n line += f'{r.deast:17.4f} {r.dnorth:17.4f} '\n line += f'{r.elevation:17.4f} {r.edepth:17.4f} '\n line += f'{r.latitude:9.6f} {r.longitude:9.6f} '\n line += f'{r.starttime:8d} {r.endtime:8d} '\n line += f'{r.gain:15.4e}\\n'\n f.write(line)\n return", "def build_hierarchical_dataframe(df, levels, value_column):\n df_all_trees = pd.DataFrame(columns=['id', 'parent', 'value'])\n for i, level in enumerate(levels):\n df_tree = pd.DataFrame(columns=['id', 'parent', 'value'])\n dfg = df.groupby(levels[i:]).sum()\n dfg = dfg.reset_index()\n df_tree['id'] = dfg[level].copy()\n if i < len(levels) - 1:\n df_tree['parent'] = dfg[levels[i+1]].copy()\n else:\n df_tree['parent'] = 'All'\n df_tree['value'] = dfg[value_column]\n df_all_trees = df_all_trees.append(df_tree, ignore_index=True)\n total = pd.Series(dict(id='All', parent='',\n value=df[value_column].sum()\n ))\n df_all_trees = df_all_trees.append(total, ignore_index=True)\n\n df_all_trees[\"color\"] = [1 if df_all_trees.loc[i,\"id\"].startswith(\"White\")\n else 0.5 if df_all_trees.loc[i,\"id\"].startswith(\"All\")\n else 0 for i in range(len(df_all_trees))]\n\n df_all_trees['value'] = df_all_trees['value'].astype('int64')\n df_all_trees[\"percentage\"] = round(df_all_trees['value'] / df_all_trees[df_all_trees['id'] == 'All']['value'].tolist()[0] * 100, 2)\n df_all_trees\n\n df_all_trees = df_all_trees[df_all_trees[\"id\"]!= df_all_trees[\"parent\"]]\n\n return df_all_trees", "def from_df(df, address_column=\"address\", geocoder=None):\r\n from arcgis.geocoding import get_geocoders, geocode, batch_geocode\r\n if geocoder is None:\r\n geocoder = arcgis.env.active_gis._tools.geocoders[0]\r\n\r\n geoms = []\r\n if address_column in df.columns:\r\n # batch geocode addresses in the address column and use them as the geometry\r\n batch_size = geocoder.properties.locatorProperties.MaxBatchSize\r\n N = len(df)\r\n geoms = []\r\n for i in range(0, N, batch_size):\r\n start = i\r\n stop = i + batch_size if i + batch_size < N else N\r\n # print('Geocoding from ' + str(start) + ' to ' + str(stop))\r\n\r\n res = batch_geocode(list(df[start:stop][address_column]), geocoder=geocoder)\r\n for index in range(len(res)):\r\n address = df.ix[start + index, address_column]\r\n try:\r\n loc = res[index]['location']\r\n x = loc['x']\r\n y = loc['y']\r\n # self.ix[start + index, 'x'] = x\r\n # self.ix[start + index, 'y'] = y\r\n geoms.append(arcgis.geometry.Geometry({'x': x, 'y': y}))\r\n\r\n except:\r\n x, y = None, None\r\n try:\r\n loc = geocode(address, geocoder=geocoder)[0]['location']\r\n x = loc['x']\r\n y = loc['y']\r\n except:\r\n print('Unable to geocode address: ' + address)\r\n pass\r\n # self.ix[start + index, 'x'] = x\r\n # self.ix[start + index, 'y'] = y\r\n geoms.append(None)\r\n else:\r\n raise ValueError(\"Address column not found in dataframe\")\r\n\r\n return SpatialDataFrame(df, geometry=geoms)", "def reindex_subcomponent_taxa():\n pass", "def insert_values():\n merged_df = pd.read_csv(\"merged_fuzzy_df.csv\")\n df = pd.read_csv(\"df.csv\")\n story_entity_df = pd.read_csv(\"story_entity_df.csv\")\n\n merged_df['entity_id'] = merged_df['entity_id'].apply(str)\n\n # find and input new types\n TYPES = get_types_ids(list(story_entity_df[\"label\"].unique()))\n\n new_parents = {}\n new_alias = {}\n\n merged_df[\"wiki\"].fillna(\"\", inplace=True)\n\n # if score = -2, it needs new alias as well as new parents\n for index, row in merged_df[merged_df[\"score\"] == -2].iterrows():\n\n # create a new parent\n if row[\"text\"] not in new_parents:\n new_parents[row[\"text\"]] = [\n str(uuid.uuid4()),\n row[\"text\"],\n TYPES[row[\"label\"]],\n row[\"wiki\"],\n True,\n str(datetime.utcnow())\n ]\n\n merged_df.at[index,\n \"entity_id\"] = new_parents[row[\"text\"]][0]\n\n # add alias with corresponding parent ID\n if row[\"text\"] not in new_alias:\n new_alias[row[\"text\"]] = [\n str(uuid.uuid4()),\n row[\"text\"],\n row[\"wiki\"],\n row[\"score\"],\n str(datetime.utcnow()),\n new_parents[row[\"text\"]][0],\n TYPES[row[\"label\"]]\n ]\n\n for index, row in merged_df[merged_df[\"score\"] >= -1].iterrows():\n # if score >= -1, it needs new alias\n if row[\"text\"] not in new_alias:\n new_alias[row[\"text\"]] = [\n str(uuid.uuid4()),\n row[\"text\"],\n row[\"wiki\"],\n row[\"score\"],\n str(datetime.utcnow()),\n row[\"entity_ref_id\"],\n TYPES[row[\"label\"]]\n ]\n\n merged_df.at[index,\n \"entity_id\"] = row[\"entity_ref_id\"]\n\n # if already matched, write story_entity_id into entity_id for mapping\n for index, row in merged_df[merged_df[\"score\"].isnull()].iterrows():\n merged_df.at[index,\n \"entity_id\"] = row[\"entity_ref_id\"]\n\n for _, value in new_parents.items():\n STORY_REF_INPUTS.append(value)\n\n for _, value in new_alias.items():\n ENTITY_ALIAS_INPUTS.append(value)\n\n logging.info(\"parents: {}\".format(len(STORY_REF_INPUTS)))\n logging.info(\"alias: {}\".format(len(ENTITY_ALIAS_INPUTS)))\n\n columns_to_drop = [\"legal_name\", \"wiki\", \"label\",\n \"entity_ref_id\", \"entity_name\",\n \"text\", \"score\"]\n merged_df.drop(columns_to_drop, axis=1, inplace=True)\n\n # generate uuids for story_map\n uuids = []\n for _ in range(len(merged_df)):\n uuids.append(str(uuid.uuid4()))\n\n logging.info(\"check na {}\".format(merged_df.isnull().values.any()))\n\n # input new_entites to table\n # using entity UUID and story UUID to apis_story_enity_map table\n\n merged_df[\"uuid\"] = uuids\n merged_df[\"created_at\"] = str(datetime.utcnow())\n\n merged_df = merged_df[[\"uuid\", \"entity_id\",\n \"story_uuid\", \"mentions\",\n \"salience\", \"created_at\"]]\n\n STORY_MAP_INPUTS = [tuple(row)\n for row in merged_df.itertuples(index=False)]\n\n # see if there are apis_entity elements in the stories\n match_manual_entity_to_story(df)\n\n insert_story_entity_ref(STORY_REF_INPUTS)\n insert_entity_alias(ENTITY_ALIAS_INPUTS)\n insert_story_entity_map(STORY_MAP_INPUTS)\n\n logging.info(\"finished\")\n\n logging.info(\"delete articles without entities\")\n articles_without_entities(df, story_entity_df)", "def prepare_iris_data(df, encode=True):\n \n # Drop primary/foreign keys\n df = df.drop(columns=[\"measurement_id\", \"species_id\"])\n\n # Rename \"species_name\" to species\n df = df.rename(columns={\"species_name\": \"species\"})\n\n if(encode):\n encoder = LabelEncoder()\n encoder.fit(df.species)\n df.species = encoder.transform(df.species)\n\n return df", "def get_tag_encoded_df(rgi_df, genome_ids, column):\n class_dict = gather_term_into_dict(rgi_df, genome_ids, column)\n\n # get all values\n all_class_values = set(itertools.chain.from_iterable(class_dict.values()))\n\n class_df = pd.DataFrame(index=genome_ids, columns=all_class_values)\n class_df = class_df.fillna(0)\n for genome_id, class_list in class_dict.items():\n for class_value in class_list:\n class_df.loc[genome_id, class_value] += 1\n\n return class_df", "def _flip_wdi(df: pd.DataFrame) -> pd.DataFrame:\n\n log.info(\"Flipping WDI\")\n\n df = df.rename(columns=lambda x: x.replace(\" \", \"\"))\n df = df.rename(columns=lambda x: x.lower())\n\n # Headache-magic, tbh I don't remember how it works.\n df = df.drop([\"countryname\", \"indicatorname\"], axis=1)\n df = df.set_index([\"countrycode\", \"indicatorcode\"])\n df.columns.name = \"year\"\n df = df.stack().unstack(\"indicatorcode\")\n df = df.reset_index()\n df[\"year\"] = df[\"year\"].astype(\"int32\")\n df = df.set_index([\"year\", \"countrycode\"]).sort_index()\n\n df = df.rename(columns=lambda x: x.replace(\".\", \"_\"))\n df = df.rename(columns=lambda x: x.lower())\n\n log.info(\"Done flipping WDI\")\n\n return df", "def flatten_nested_df(\n df: pd.DataFrame, include_prefix: bool = True, seperator: str = \"_\", recursive: bool = True\n) -> pd.DataFrame:\n if seperator is None:\n seperator = \"_\"\n df = df.reset_index()\n\n # search for list and map\n s = (df.applymap(type) == list).all()\n list_columns = s[s].index.tolist()\n\n s = (df.applymap(type) == dict).all()\n dict_columns = s[s].index.tolist()\n\n if len(list_columns) > 0 or len(dict_columns) > 0:\n new_columns = []\n\n for col in dict_columns:\n # expand dictionaries horizontally\n expanded = None\n if include_prefix:\n expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f\"{col}{seperator}\")\n else:\n expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f\"{seperator}\")\n expanded.index = df.index\n df = pd.concat([df, expanded], axis=1).drop(columns=[col])\n new_columns.extend(expanded.columns)\n\n for col in list_columns:\n df = df.drop(columns=[col]).join(df[col].explode().to_frame())\n new_columns.append(col)\n\n # check if there are still dict o list fields to flatten\n s = (df[new_columns].applymap(type) == list).all()\n list_columns = s[s].index.tolist()\n\n s = (df[new_columns].applymap(type) == dict).all()\n dict_columns = s[s].index.tolist()\n if recursive and (len(list_columns) > 0 or len(dict_columns) > 0):\n df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)\n\n return df", "def preorder_encode(self):\n preorder = []\n\n def recurse(x, idx):\n preorder.append(x.get_features())\n tree_cnn_indexes = []\n # leaf node\n if type(x) == TableReader:\n return [[idx, 0, 0]]\n left_tree_cnn_indexes = recurse(x.left_node, idx+1)\n left_tree_count = len(preorder)\n right_tree_cnn_indexes = recurse(x.right_node, left_tree_count+1)\n tree_cnn_indexes = [[\n idx, left_tree_cnn_indexes[0][0], right_tree_cnn_indexes[0][0]]]\n tree_cnn_indexes.extend(left_tree_cnn_indexes)\n tree_cnn_indexes.extend(right_tree_cnn_indexes)\n return tree_cnn_indexes\n\n tree_cnn_indexes = recurse(self, 1)\n # padding zero features on the first dimession\n preorder = np.pad(preorder, (1, 0), 'constant', constant_values=(0, 0))\n return np.array(preorder).transpose(1, 0), np.array(tree_cnn_indexes, dtype=np.int).flatten().reshape(-1, 1)", "def add_parent(session, df):\n try:\n for _, row in df.iterrows():\n parent = Parent()\n parent.name = row['parent_name']\n parent.family = row['family']\n session.add(parent)\n except Exception as ex:\n session.rollback()\n raise ex\n else:\n session.commit()", "def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw", "def __encode_genres(self, df):\n\t\tself.genre_mlb = MultiLabelBinarizer()\n\t\tdf = df.join(pd.DataFrame(self.genre_mlb.fit_transform(df.pop(\"genres\")), columns=self.genre_mlb.classes_,\n\t\t\t\t\t\t\t\t index=df.index),\n\t\t\t\t\t lsuffix='l')\n\t\treturn df", "def reconstruct_tree(prefix, params, aa=False):\n aln_file = alignment_name_raw(prefix, params)\n out_tree = reconstructed_tree_name(prefix, params)\n if aa:\n call = [\"fasttree\", aln_file, \">\", out_tree]\n os.system(\" \".join(call))\n else:\n fast_opts = [\n \"-ninit\", \"2\",\n \"-n\", \"2\",\n \"-me\", \"0.05\"\n ]\n call = [\"iqtree\"] + fast_opts +[\"-nt 1\", \"-s\", aln_file, \"-m\", 'GTR+R10',\n \">\", \"iqtree.log\"]\n os.system(\" \".join(call))\n os.system(\"mv %s.treefile %s\"%(aln_file, out_tree))\n os.system(\"rm %s.*\"%aln_file)\n\n rec_tree = Phylo.read(out_tree, 'newick')\n rec_tree.root_at_midpoint()\n rec_tree.ladderize()\n Phylo.write(rec_tree, out_tree, 'newick')", "def phyMLTree(data, phymlOpt):\n\tlogger=logging.getLogger(\"main.tree\")\n\tdAlnTree = {}\n\tlogger.info(\"Running PhyML to produce gene phylogenetic tree\")\n\tTreesFile = runPhyML(data.aln, phymlOpt, data.o)\n\tdata.tree = TreesFile+\"_phyml_tree.txt\"\n\tlogger.info(\"Reconstructed tree using PhyML: {:s}\".format(data.tree))\n\n\tdAlnTree[data.aln] = data.tree\n\treturn(dAlnTree)", "def rate_encode_df(df, to_rate_encode, replace=True, prefix='rate_'):\n if not isinstance(to_rate_encode, list):\n if isinstance(to_rate_encode, str):\n to_rate_encode = [to_rate_encode]\n else:\n raise ValueError(f\"`to_rate_encode` must be a list or str. {type(to_rate_encode)} not accepted.\")\n else:\n\n if not replace:\n try:\n new_cols = df.loc[:, to_rate_encode].apply(_rate_encode_category, axis=0)\n new_col_names = [f\"{prefix}_{i}\" for i in new_cols.columns]\n new_cols.columns = new_col_names\n df = df.join(new_cols)\n\n except TypeError as e: # If the slice is just a single series, dont need to use apply really.\n if len(to_rate_encode) == 1:\n df[f\"{prefix}_{to_rate_encode[0]}\"] = _rate_encode_category(df.loc[:, to_rate_encode[0]])\n else:\n try:\n df.loc[:, to_rate_encode] = df.loc[:, to_rate_encode].apply(_rate_encode_category, axis=0)\n except TypeError as e: # If the slice is just a single series, axis shouldn't be passed.\n if len(to_rate_encode) == 1:\n df.loc[:, to_rate_encode] = _rate_encode_category(df.loc[:, to_rate_encode[0]])\n\n return df", "def distanceToTree_deprec(self,taxonomic_forest,update_inner_attributes=True):\n #Thankfully the distance is symmetric by definition so order doesn't matter.\n Jacobian = []\n logger.warning('[FOR DEVELOPER] This is not optimized. Should be necessary to use attr_t1 and attr_t2 with name_id (integer)')\n for i in settings.TAXONOMIC_TREE_KEYS:\n F1 = []\n for j in settings.TAXONOMIC_TREE_KEYS:\n d = self.forest[i].robinson_foulds(taxonomic_forest[j],unrooted_trees=True)[0]\n F1.append(float(d))\n Jacobian.append(F1)\n if update_inner_attributes:\n self.JacobiM = np.matrix(Jacobian)\n self.Jacobi = np.linalg.det(self.JacobiM)\n return np.matrix(Jacobian)", "def correct_tree_leaf_names(filename_in, filename_out):\n tree = Phylo.read(filename_in, \"newick\")\n ena_regex = re.compile(r\"ENA\\|[A-Z0-9]*\\|\")\n for terminal in tree.get_terminals():\n terminal.name = terminal.name.replace(\"_\", \"@\", 1)\n terminal.name = ena_regex.sub(\"\", terminal.name)\n Phylo.write(tree, filename_out, \"newick\")", "def predictions_for_df(df, inferrer):\n working_df = df.copy()\n working_df['predictions'] = inferrer.get_activations(\n working_df.sequence.values).tolist()\n return working_df", "def clinical_concept_mapper(self) -> pd.DataFrame:\n\n level_maps = []\n\n if self.ancestor_codes is not None:\n levels = {'concept': {'codes': self.concept_codes, 'strings': self.concept_strings},\n 'ancestor': {'codes': self.ancestor_codes,\n 'strings': self.ancestor_strings}}\n else:\n levels = {'concept': {'codes': self.concept_codes, 'strings': self.concept_strings}}\n\n for level in levels.keys():\n print('\\n*** Annotating Level: {}'.format(level))\n primary_key, data = self.primary_key, self.clinical_data.copy()\n code_level, code_strings = levels[level]['codes'][0], levels[level]['strings'] # type: ignore\n if level == 'ancestor' or any(x for x in data[code_level] if '|' in x):\n data = column_splitter(data, primary_key, [code_level], '|')[[primary_key] + [code_level]]\n data[code_level] = normalizes_source_codes(data[code_level].to_frame(), self.source_code_map)\n else:\n data[code_level] = normalizes_source_codes(data[code_level].to_frame(), self.source_code_map)\n\n # STEP 1: UMLS CUI + SEMANTIC TYPE ANNOTATION\n print('Performing UMLS CUI + Semantic Type Annotation')\n if self.umls_cui_data is not None and self.umls_tui_data is not None:\n umls_map = self.umls_cui_annotator(data.copy(), primary_key, code_level)\n sub = [code_level, 'UMLS_CODE', 'UMLS_CUI']\n data_stacked = data_frame_subsetter(umls_map[[primary_key] + sub], primary_key, sub)\n else:\n print('Did not provide MRCONSO and MRSTY Files -- Skipping UMLS Annotation Step')\n umls_map, clinical_subset = None, data[[primary_key, code_level]]\n data_stacked = data_frame_subsetter(clinical_subset, primary_key, [code_level])\n\n # STEP 2 - DBXREF ANNOTATION\n print('Performing DbXRef Annotation')\n stacked_dbxref = self.dbxref_mapper(data_stacked.copy(), primary_key, level)\n # files = 'resources/mappings/' + level + '_dbXRef_Mappings.csv'\n # stacked_dbxref.to_csv(files, sep=',', index=False, header=True)\n\n # STEP 3 - EXACT STRING MAPPING\n print('Performing Exact String Mapping')\n clinical_strings = self.clinical_data.copy()[[primary_key] + code_strings] # type: ignore\n split_strings = column_splitter(clinical_strings, primary_key, code_strings, '|') # type: ignore\n split_strings = split_strings[[primary_key] + code_strings] # type: ignore\n split_strings_stacked = data_frame_subsetter(split_strings, primary_key, code_strings) # type: ignore\n stacked_strings = self.exact_string_mapper(split_strings_stacked, primary_key, level)\n # files_str = 'resources/mappings/' + level + '_String_Mappings.csv'\n # stacked_strings.to_csv(files_str, sep=',', index=False, header=True)\n\n # STEP 4 - COMBINE RESULTS\n print('Aggregating Mapping Results')\n # dbXRef annotations\n if len(stacked_dbxref) != 0:\n ont_type_column = [col for col in stacked_dbxref.columns if 'TYPE' in col][0]\n dbxrefs = data_frame_grouper(stacked_dbxref.copy(), primary_key, ont_type_column,\n aggregates_column_values)\n else:\n dbxrefs = None\n\n # exact string annotations\n if len(stacked_strings) != 0:\n ont_type_column = [col for col in stacked_strings.columns if 'TYPE' in col][0]\n strings = data_frame_grouper(stacked_strings.copy(), primary_key, ont_type_column,\n aggregates_column_values)\n else:\n strings = None\n\n # umls annotations\n if umls_map is not None:\n umls, agg_cols = umls_map[[primary_key, 'UMLS_CUI', 'UMLS_SEM_TYPE']], ['UMLS_CUI', 'UMLS_SEM_TYPE']\n umls = aggregates_column_values(umls.copy(), primary_key, agg_cols, ' | ')\n umls.columns = [primary_key] + [level.upper() + '_' + x for x in umls.columns if x != primary_key]\n else:\n umls = None\n\n # combine annotations\n dfs = [x for x in [dbxrefs, strings, umls] if x is not None]\n if len(dfs) > 1:\n level_maps.append(reduce(lambda x, y: pd.merge(x, y, how='outer', on=primary_key), dfs))\n else:\n level_maps.append(dfs[0])\n\n # STEP 5 - COMBINE CONCEPT AND ANCESTOR DATA\n print('Combining Concept and Ancestor Maps')\n full_map = reduce(lambda x, y: pd.merge(x, y, how='outer', on=self.primary_key), level_maps)\n complete_map = pd.merge(self.clinical_data, full_map, how='left', on=self.primary_key)\n complete_map.columns = [x.upper() for x in complete_map.columns]\n complete_map.fillna('', inplace=True)\n\n return complete_map", "def setSubtreeDF(self, index, subtree):\n if index == 0:\n try:\n self[:] = subtree\n except TypeError:\n del self[1:]\n self[0] = subtree\n return\n \n total = 0\n for i, child in enumerate(self):\n if total == index:\n self[i] = subtree\n return\n nbr_child = child.size\n if nbr_child + total > index:\n child.setSubtreeDF(index-total, subtree)\n return\n total += nbr_child", "def inverse_transform(self, df):\n return df", "def ids_to_tree(self, emb):\n\n tree = TreeData()\n tree.nodes = [] # override the technical root -- the tree will be created including the technical root\n tree.parents = []\n\n # build the tree recursively (start at position 2 to skip the <GO> symbol and 1st opening bracket)\n self._create_subtree(tree, -1, emb, 2)\n return tree", "def transform(self, incoming_df, **tranform_kwargs):\n outgoing_df = incoming_df.copy()\n outgoing_df[self.feature] = outgoing_df[self.feature]\\\n .map(self.map)\\\n .fillna(self.default_value) \\\n .astype(self.coerce, errors='ignore')\n\n return outgoing_df", "def utify_chars(babylex_df):\n babylex_df['root'] = babylex_df['root'].str.replace(\"T\", \"ṭ\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"c\", \"š\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"S\", \"ṣ\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"x\", \"'\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"T\", \"ṭ\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"c\", \"š\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"S\", \"ṣ\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"X\", \"'\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"aa\", \"ā\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"ee\", \"ē\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"ii\", \"ī\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"uu\", \"ū\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"A\", \"â\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"E\", \"ê\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"I\", \"î\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"U\", \"û\")\n\n return babylex_df", "def preprocess_data_inplace(df):\n encode(PREPROCESS_LABEL_ENCODERS, df, ['fecha_dato', 'fecha_alta'])", "def dbxref_mapper(self, data: pd.DataFrame, primary_key: str, code_type: str) -> pd.DataFrame:\n\n col_lab = code_type.upper() + '_DBXREF_ONT_' # column labels\n ont_labels = merge_dictionaries(self.ont_dict, 'label', reverse=True)\n\n # convert ontology dictionary to Pandas DataFrame\n ont_df = pd.concat([pd.DataFrame(self.ont_dict[ont]['dbxref'].items(), columns=['CODE', col_lab + 'URI'])\n for ont in self.ont_dict.keys() if len(self.ont_dict[ont]['dbxref']) > 0])\n # normalize source_code prefix values\n ont_df['CODE'] = normalizes_source_codes(ont_df['CODE'].to_frame(), self.source_code_map)\n # merge ontology data and clinical data and run ohdsi ananke approach to specifically pull umls ont mappings\n if self.umls_cui_data is not None:\n dbxrefs = pd.concat(\n [data.merge(ont_df, how='inner', on='CODE').drop_duplicates(),\n ohdsi_ananke(primary_key, list(self.ont_dict.keys()), ont_df.copy(), data, self.umls_cui_data.copy())]\n )\n else:\n dbxrefs = data.merge(ont_df, how='inner', on='CODE').drop_duplicates()\n\n # update content and labels\n dbxrefs[col_lab + 'TYPE'] = dbxrefs[col_lab + 'URI'].apply(lambda x: x.split('/')[-1].split('_')[0])\n dbxrefs[col_lab + 'LABEL'] = dbxrefs[col_lab + 'URI'].apply(lambda x: ont_labels[x])\n # update evidence formatting --> EX: CONCEPTS_DBXREF_UMLS:C0008533\n dbxrefs[col_lab + 'EVIDENCE'] = dbxrefs['CODE'].apply(lambda x: col_lab[0:-4] + x)\n # drop unneeded columns\n dbxrefs = dbxrefs[[primary_key] + [x for x in list(dbxrefs.columns) if x.startswith(col_lab[0:-4])]]\n\n return dbxrefs.drop_duplicates()", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def set_infected_nodes(self, list_or_dataframe):\n\n infected_dataframe = list_or_dataframe\n\n # Convert list to dataframe\n if type(list_or_dataframe) == list:\n rdd_list = self.sc.parallelize(list_or_dataframe)\n row_rdd_list = rdd_list.map(lambda x: Row(x))\n field_list = [StructField(\"id\", LongType(), True)]\n schema_list = StructType(field_list)\n infected_dataframe = self.sqlContext.createDataFrame(row_rdd_list, schema_list)\n\n # Create column for influence attribute containing 1's\n infected_dataframe = infected_dataframe.withColumn(self.attribute, lit(1.0))\n infected = infected_dataframe\n\n self.infected_nodes = infected_dataframe\n\n # Merge to original vertices of graph\n orig_vertices = self.graph.vertices.selectExpr(\"id as id\")\n\n # Update graph\n orig_edges = self.graph.edges\n new_vertices = orig_vertices.join(infected, \"id\", \"left_outer\").na.fill(0)\n self.graph = GraphFrame(new_vertices, orig_edges)", "def save_ancestor(self, ancestor):\n self.context = copy(self.ancestor)\n self.ancestor = ancestor" ]
[ "0.54605395", "0.52514964", "0.52182794", "0.51729226", "0.5131557", "0.4997631", "0.49200213", "0.49033338", "0.47637245", "0.47538468", "0.4739272", "0.46873105", "0.46594372", "0.4646138", "0.46402073", "0.4636189", "0.4631968", "0.46285406", "0.46198457", "0.46106067", "0.45727736", "0.45533586", "0.45461905", "0.45452124", "0.45361745", "0.45145717", "0.45119193", "0.45046887", "0.4496976", "0.44905502", "0.4483655", "0.44831976", "0.44619435", "0.44577062", "0.44528896", "0.44520462", "0.444336", "0.4441795", "0.44315022", "0.44315022", "0.4423114", "0.4415867", "0.43997225", "0.43987373", "0.43970448", "0.43916783", "0.43804693", "0.43767306", "0.4375963", "0.43660095", "0.43563515", "0.43394792", "0.43172595", "0.43165553", "0.43150097", "0.43142104", "0.43125558", "0.43116277", "0.43043193", "0.4294703", "0.4289703", "0.42857578", "0.42787823", "0.42735878", "0.4269687", "0.42695165", "0.42538026", "0.42496055", "0.4243098", "0.42419565", "0.4232393", "0.42288542", "0.42282254", "0.42272434", "0.42213902", "0.42213696", "0.42132455", "0.4212801", "0.4206157", "0.41960075", "0.41918272", "0.4188032", "0.41706792", "0.41700575", "0.416368", "0.41612846", "0.41591948", "0.41502532", "0.4143537", "0.41296905", "0.4129211", "0.41291615", "0.41228274", "0.4121497", "0.41047338", "0.41035494", "0.40990195", "0.4098594", "0.4095326", "0.40930587" ]
0.8255052
0
Set default values for options.
def initialize_options(self): #Each user option must be listed here with their default value. self.pylint_rcfile = ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)", "def _set_default_options(options):\n\n options_defaults = {\n 'run_storage_base': None,\n 'watch': False,\n 'verbose': True,\n # 'uploader_config': 'uploader_config.toml',\n 'logging_config': 'logging_config.toml',\n 'notify_frequency': 60*24, # daily\n 'skip_bad_permissions': True,\n }\n\n for k, v in options_defaults.items():\n # Tranfer any known values set in options.config to the top level\n # options.\n # Any key not present in the config file gets set to the default value.\n if k not in options.config:\n options[k] = v\n else:\n options[k] = options.config[k]\n del options.config[k]\n\n if options[k] is None:\n options[k] = v\n\n return options", "def set_global_defaults(**kwargs):\n for kw in kwargs:\n if kw not in VALID_OPTIONS:\n raise KeyError(f\"Invalid option {kw!r}\")\n _default_options[kw] = kwargs[kw]", "def initialize_options(self):\n pass", "def initialize_options(self):\n pass", "def set_options(self, options):\n self.options = options", "def set_options(self, options):\n self.options = options", "def add_default_options(self):\n\n options = getattr(self.parent, \"pyautodoc_set_default_option\", [])\n for option in options:\n self.set_default_option(option)", "def set_defaults(self):\n\n for k, v in self.DEFAULTS.items():\n if not getattr(self, k, None):\n setattr(self, k, v)", "def add_defaults(self, options):\n if 'option_defaults' in self.cscript:\n from_cscript = self.cscript['option_defaults']\n if isinstance(from_cscript, dict):\n defaults_dict = from_cscript\n else:\n log_normal(\"Deprecated cscript option_defaults method; replace with a dict\")\n defaults_dict = from_cscript()\n for k, v in defaults_dict.items():\n if not k in options:\n options[k] = v", "def default_options(cls) -> Dict:\n return {}", "def __init__(self, **user_options):\n self.options = config.default_options.copy()\n self.configure(**user_options)", "def initialize_options(self):\n self.all = None", "def initialize_options(self):", "def setDefaultOpts(self):\n\t\tself.getopt.s = ['h']\n\t\tself.getopt.l = ['help']\n\t\tself.getopt.s.extend([('x:', 'screen')])\n\t\tself.getopt.l.extend([('xml=', 'screen')])\n\t\treturn", "def default_options(cls) -> Dict:\n options = super().default_options()\n # scaling factor for temperature adaptation\n options['eta'] = 100\n # controls the adaptation degeneration velocity of the temperature\n # adaption.\n options['nu'] = 1e3\n\n return options", "def _unset_defaults_and_overrides(self):\n for info, group in self._all_opt_infos():\n info.pop('default', None)\n info.pop('override', None)", "def _set_defaults(self):\n self._opts = {\n \"insecure\": [],\n \"header\": [],\n \"verbose\": [],\n \"nobody\": [],\n \"proxy\": [],\n \"resume\": [],\n \"ctimeout\": [\"--connect-timeout\", str(self.ctimeout)],\n \"timeout\": [\"-m\", str(self.timeout)],\n \"other\": [\"-s\", \"-q\", \"-S\"]\n }\n if self.insecure:\n self._opts[\"insecure\"] = [\"-k\"]\n if Msg().level > Msg.DBG:\n self._opts[\"verbose\"] = [\"-v\"]\n self._files = {\n \"url\": \"\",\n \"error_file\": FileUtil(\"execurl_err\").mktmp(),\n \"output_file\": FileUtil(\"execurl_out\").mktmp(),\n \"header_file\": FileUtil(\"execurl_hdr\").mktmp()\n }", "def initialize_options(self):\n pass", "def defaults(self, **kwargs):\n for i in kwargs:\n self._.setdefault(i, kwargs[i])\n return self", "def _default_options(cls) -> Options:\n options = super()._default_options()\n\n options.curve_fitter = multi_curve_fit\n options.data_processor = None\n options.normalization = False\n options.x_key = \"xval\"\n options.plot = True\n options.axis = None\n options.xlabel = None\n options.ylabel = None\n options.xlim = None\n options.ylim = None\n options.xval_unit = None\n options.yval_unit = None\n options.result_parameters = None\n options.return_data_points = False\n options.curve_plotter = \"mpl_single_canvas\"\n options.style = PlotterStyle()\n\n # automatically populate initial guess and boundary\n fit_params = cls._fit_params()\n options.p0 = {par_name: None for par_name in fit_params}\n options.bounds = {par_name: None for par_name in fit_params}\n\n return options", "def set(cls,options):\n cls.instance = Options(options)", "def setOptions(self, options):\n assert isinstance(options, list);", "def set_defaults(self, **kw):\n group = kw.pop('group', None)\n for o, v in kw.items():\n self.cfg_fixture.set_default(o, v, group=group)", "def _create_options(self):\n self._OPTIONS = {}", "def __updateOptions__(self, option_dict):\n out = self.default_options.copy()\n out.update(option_dict)\n return out", "def options(self, options):\n\n self._options = options", "def options(self, options):\n\n self._options = options", "def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)", "def set_default_value(self, default_value, option_type):\n if default_value is not None:\n self.defaultValue = default_value\n if option_type is not None:\n self.optionType = option_type", "def update_option(**options):\n api_options = default_options\n\n for k,v in option.iteritems():\n if k in default_options:\n api_options[k] = v\n\n return api_options", "def _default_options(cls):\n default_options = super()._default_options()\n default_options.data_processor = dp.DataProcessor(\n input_key=\"counts\",\n data_actions=[dp.Probability(\"1\"), dp.BasisExpectationValue()],\n )\n default_options.plotter.set_figure_options(\n xlabel=\"Flat top width\",\n ylabel=\"Pauli expectation values\",\n xval_unit=\"s\",\n ylim=(-1, 1),\n )\n default_options.data_subfit_map = {\n \"x\": {\"meas_basis\": \"x\"},\n \"y\": {\"meas_basis\": \"y\"},\n \"z\": {\"meas_basis\": \"z\"},\n }\n\n return default_options", "def set_default_protein_options(treebuilder):\n treebuilder.options = get_default_options()", "def initDefaults(self, kwargs):\n \n for k,v in self.defaults.iteritems():\n if k in kwargs: # use assigned values\n setattr(self, k, kwargs[k])\n else: # use default values\n setattr(self, k, v)\n \n for k,v in kwargs.iteritems():\n if k not in self.defaults:\n setattr(self, k, v)\n pass", "async def _opt_default(self, ctx, option):\n try:\n guild_options = self.database.get_guild_options(ctx.guild.id)\n setattr(guild_options, option, None)\n self.database.save_item(guild_options)\n await ctx.send(f\"Option {option} set to default\")\n except AttributeError:\n await ctx.send(\"I don't recognize that option.\")", "def set_default_dna_options(treebuilder):\n treebuilder.options = get_default_options()", "def _calculate_options(self, options, option_overrides):\n _options = {}\n _options.update(WidgetSettings.OPTIONS)\n _options.update(options if isinstance(options, dict) else {})\n if 'dateFormat' in _options and 'altFormat' not in _options:\n _options['altFormat'] = _options.pop('dateFormat')\n _options.update(option_overrides)\n self.options = _options", "def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n\n options.duration = 160\n options.sigma = 40\n options.amplitudes = np.linspace(-0.95, 0.95, 51)\n options.schedule = None\n\n return options", "def _default_parameters(cls) -> Options:\n params = super()._default_parameters()\n params.main_axes = None\n params.i_means = None\n params.q_means = None\n params.scales = None\n\n return params", "def setOptions(self, options):\n assert isinstance(options, list);\n\n self.__options = list();\n\n for option in options:\n self.setOption(option);", "def options(self, value):\n self._options = value\n if self._options.get(\"legacy\"):\n self._options[\"extended\"] = False", "def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n options.frequency_shift = None\n\n return options", "def test_defaultValues(self):\n argV = []\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 392)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], 4.23)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], None)\n self.failUnlessEqual(self.usage.opts['eggfloat'], None)", "def set_log_option_default(self, value):\n self._log_option_default = value", "def _create_defaults(self):\n return DefaultCommandOptionValues(\n min_confidence=3, output_format='vs7')", "def _options_commandline_overrides(options):\n cmdline_values = {\n 'run_storage_base': options.run_storage_base,\n 'watch': options.watch,\n 'verbose': options.verbose,\n 'uploader_config': options.uploader_config,\n 'logging_config': options.logging_config,\n }\n\n # Commandline options override any value in the config file.\n for k, v in cmdline_values.items():\n if v is not None:\n options[k] = v\n\n return options", "def defaultargs(options):\n config = {}\n for longname, default, _ in options:\n config[longname] = default\n return config", "def __copy_options(user_options, default_options):\n for k in default_options.keys():\n if k not in user_options.keys():\n user_options[k] = default_options[k]\n return user_options", "def setdefaults(self): # 3\n res = self.__obj.setdefaults()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def set_options(*args, **kwargs):\n for option in kwargs:\n if option not in BasePlan.options:\n raise BadOption('%s is not a valid, must be a combination '\n 'of %s' % (option, ','.join(BasePlan.options.keys(),)))\n BasePlan.options.update(kwargs)", "def set_default(self, section, option, value=None):\r\n self.defaults[section, option] = value", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def set_default(self, opt_names = None):\n if opt_names == None:\n TCInit(self)\n else:\n if isinstance(opt_names, str):\n opt_names = [opt_names]\n for k in opt_names:\n if isinstance(k, str):\n self.__delattr__(k)", "def _backend_opts_set(self, val):\n self._dflt_backend_opts = val\n return", "def _get_default_options():\n return {\n \"library_folders\": [],\n \"verbose\": False,\n \"check_balanced\": True,\n \"mtime_check\": True,\n \"cache\": False,\n \"codegen\": False,\n \"expand_mx\": False,\n \"unroll_loops\": True,\n \"inline_functions\": True,\n \"expand_vectors\": False,\n \"resolve_parameter_values\": False,\n \"replace_parameter_expressions\": False,\n \"replace_constant_expressions\": False,\n \"eliminate_constant_assignments\": False,\n \"replace_parameter_values\": False,\n \"replace_constant_values\": False,\n \"eliminable_variable_expression\": None,\n \"factor_and_simplify_equations\": False,\n \"detect_aliases\": False,\n \"allow_derivative_aliases\": True,\n \"reduce_affine_expression\": False,\n }", "def set_default(self, name, default, group=None):\n opt_info = self._get_opt_info(name, group)\n opt_info['default'] = self._get_enforced_type_value(\n opt_info['opt'], default)\n opt_info['location'] = LocationInfo(\n Locations.set_default,\n _get_caller_detail(3), # this function has a decorator to skip\n )", "def initialize_options(self):\n self.base_dir = getcwd()\n self.output_dir = getcwd()\n self.release = None\n self.tag_prefix = 'v'\n self.version = VERSION", "def reset(self):\n self.setOptions(self._options)", "def setOptions(self):\n self.parser.add_option( \"--outputdir\",\n dest = \"outdir\",\n default = None,\n help = \"Directory to write JSON summary to.\" )\n\n self.parser.add_option( \"--dbs\",\n dest = \"usedbs\",\n default = 'no',\n help = \"Use information in DBS to build the input lumi lists and the output lumi lists.\"+\\\n \" Allowed values are yes/no. Default is no.\" )", "def initialize_options(self):\n self.all = False\n self.coverage = False\n super(test, self).initialize_options()", "def create_default_options():\n parser = OptionParser()\n add_option_group(parser)\n options, args = parser.parse_args([])\n return options", "def set_default_values(self, option_ids):\n independent_option_ids = [option_id for option_id in option_ids if option_id.at is None]\n dependent_option_ids = [option_id for option_id in option_ids if option_id.at is not None]\n\n def set_defaults(option_ids):\n for option_id in [option_id for option_id in option_ids if option_id.has_default]:\n if option_id.at is not None:\n entities = self.get(option_id.at)\n for entity in entities:\n if not self._option_key(option_id, entity) in self._config:\n self.set(option_id, option_id.default, 0, 'default', entity)\n else:\n if not self._option_key(option_id) in self._config:\n self.set(option_id, option_id.default, 0, 'default')\n\n set_defaults(independent_option_ids)\n set_defaults(dependent_option_ids)", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def set_default_parameters(self):\n super().set_default_parameters()", "def _options(self):\n return", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def set_missing_defaults(self):\n if 'pub_options' not in self.config:\n self.config['pub_options'] = {\n 'acknowledge': True,\n 'retain': True\n }\n\n if 'sub_options' not in self.config:\n self.config['sub_options'] = {\n 'get_retained': False\n }\n\n if 'subscribed_topics' not in self.config:\n self.config['subscribed_topics'] = None\n\n if 'replay_events' not in self.config:\n self.config['replay_events'] = False\n\n if 'max_reconnect_retries' not in self.config:\n self.config['max_reconnect_retries'] = 10", "def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)", "def initialize(self, options):", "def setOptions(self):\n self.parser.add_option(\"--jobid\",\n dest=\"jobid\",\n default=None,\n type=\"int\",\n help=\"Optional id of the job you want to execute locally\")\n\n self.parser.add_option(\"--enableStageout\",\n dest=\"enableStageout\",\n default=False,\n action=\"store_true\",\n help=\"After the job runs copy the output file on the storage destination\")\n\n self.parser.add_option(\"--destdir\",\n dest=\"destdir\",\n default=None)", "def options(self, parser, env):\n pass", "def defaults(self):\n self.lib.iperf_defaults(self._test)", "def set_options(self, **options):\n self.source = options['source'] or settings.MEDIA_ROOT\n self.container = options['container'] or ls.AZURE_DEFAULT_CONTAINER\n self.verbosity = int(options.get('verbosity', 1))\n ignore_patterns = options['ignore_patterns']\n if options['use_default_ignore_patterns']:\n ignore_patterns += ['.*', '*~']\n self.ignore_patterns = list(set(ignore_patterns))\n self.dir = options['dir']", "def common_options(self, common_options):\n self._common_options = common_options", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def restoreDefaults(self):\n # preserve `_options` if set by clients (for `reset`).\n opts = self._options\n self.setOptions(Options(\"utf-8\", csv.excel()))\n self._options = opts", "def _add_default_options(self, argv):\r\n rc_filename = self._rc_filename()\r\n\r\n options = argv\r\n\r\n if self.IGNORE_RC_FLAG not in argv and os.path.exists(rc_filename):\r\n command = self._command or self.NO_COMMAND\r\n rc_config = ConfigParser.SafeConfigParser()\r\n rc_config.read(rc_filename)\r\n\r\n if rc_config.has_option(command, self.OPTIONS):\r\n default_options_str = rc_config.get(command, self.OPTIONS)\r\n default_options = shlex.split(default_options_str, True)\r\n options = default_options + options\r\n\r\n return options", "def override_from_parsed_args(self, parsed_args):\n arg_values = {\n o.name: getattr(parsed_args, o.name)\n for o in _OPTIONS\n if getattr(parsed_args, o.name, None) is not None\n }\n if arg_values:\n LOG.info('[config] updating from command line options')\n self.override(**arg_values)", "def __init__(self, s=None, unchecked=False):\r\n # if not Options.defaults: # this is different from self.defaults!!!\r\n # Options.defaults = fmin([],[])\r\n if s is None:\r\n super(Options, self).__init__(Options.defaults())\r\n # self = Options.defaults()\r\n elif type(s) is str:\r\n super(Options, self).__init__(Options().match(s))\r\n # we could return here\r\n else:\r\n super(Options, self).__init__(s)\r\n\r\n if not unchecked:\r\n for key in list(self.keys()):\r\n if key not in Options.defaults():\r\n print('Warning in cma.Options.__init__(): invalid key ``' + str(key) + '`` popped')\r\n self.pop(key)\r\n # self.evaluated = False # would become an option entry\r", "def set_option(self, dest, value, force=True):\r\n if hasattr(self._option_values, dest) and not force:\r\n return\r\n setattr(self._option_values, dest, value)", "def set_initial_values(self):\n\n pass", "def reset_device_options_to_default(self, options):\n e = ctypes.POINTER(rs_error)()\n count = len(options)\n option_array_type = ctypes.c_int * count\n lrs.rs_reset_device_options_to_default.argtypes = [ctypes.POINTER(rs_device),\n option_array_type,\n ctypes.c_int,\n ctypes.POINTER(ctypes.POINTER(rs_error))]\n lrs.rs_reset_device_options_to_default.restype = None\n c_options = option_array_type(*options)\n lrs.rs_reset_device_options_to_default(self.dev, c_options, count, ctypes.byref(e))\n _check_error(e)", "def set_options(self, options):\n self._set_steps(options.get('bounds', [(0,1)]), options.get('steps',2))", "def __init__(self, values = None):\n TCInit(self)\n if values is not None:\n self.set_opts(values)", "def set_option(self, **kwargs):\n option = self.dismod_file.option\n unknowns = list()\n for name in kwargs.keys():\n if not (option.option_name == name).any():\n unknowns.append(name)\n if unknowns:\n raise KeyError(f\"Unknown options {unknowns}\")\n\n for name, value in kwargs.items():\n if isinstance(value, str):\n str_value = value\n elif isinstance(value, Iterable):\n str_value = \" \".join(str(x) for x in value)\n elif isinstance(value, bool):\n str_value = str(value).lower()\n elif value is None or isnan(value):\n str_value = None\n else:\n str_value = str(value)\n if str_value is not None:\n option.loc[option.option_name == name, \"option_value\"] = str_value\n else:\n option.loc[option.option_name == name, \"option_value\"] = nan\n option = option.reset_index(drop=True)\n option = option.assign(option_id=option.index)\n self.dismod_file.option = option", "def setup_defaults(self):\n status = self._lib_vscf_ecc.vscf_ecc_setup_defaults(self.ctx)\n VscfStatus.handle_status(status)", "def options(self, parser):\n pass", "def setDefault(self):\n for attr in self._filter():\n if attr.attrName() in ('tx', 'ty', 'tz', 'rx', 'ry', 'rz'):\n defaultValue = 0\n elif attr.attrName() in ('sx', 'sy', 'sz', 'v'):\n defaultValue = 1\n else:\n defaultValue = pm.addAttr(attr, query=True, defaultValue=True)\n\n if not (defaultValue is None):\n try:\n attr.set(defaultValue)\n except RuntimeError as message:\n pm.warning(message.message[:-1])", "def initialize_options(self):\n self.branch = \"master\"\n self.fix = False\n super(lint, self).initialize_options()", "def defaults(options):\n options = copy.deepcopy(options)\n\n # add defaults: device, threshold, and objective terms\n options = {\n \"device\": \"cuda:0\",\n \"threshold\": -0.5,\n \"scorers\": {},\n **options\n }\n\n assert all(key in options for key in [\n \"device\", \"threshold\", \"datasets\", \"features\", \"feeds\",\n \"scorers\", \"objective_terms\", \"model\", \"stages\", \"stage-order\"\n ])\n\n # check that all stages are present\n assert all(stage in options[\"stages\"] for stage in options[\"stage-order\"])\n\n # fix optional settings in stages\n for settings in options[\"stages\"].values():\n # required: \"feed\", \"n_epochs\", \"model\", \"objective\"\n # optional: \"snapshot\", \"restart\", \"reset\", \"grad_clip\",\n # \"optimizer\", \"early\", \"lr_scheduler\"\n\n settings.update({\n \"snapshot\": None,\n \"restart\": False,\n \"reset\": False,\n \"grad_clip\": 0.5,\n \"early\": None,\n \"lr_scheduler\": {\n \"cls\": \"<class 'cplxpaper.musicnet.lr_scheduler.Trabelsi2017LRSchedule'>\"\n },\n \"optimizer\": {\n \"cls\": \"<class 'torch.optim.adam.Adam'>\",\n \"lr\": 0.001,\n \"betas\": (0.9, 0.999),\n \"eps\": 1e-08,\n \"weight_decay\": 0,\n \"amsgrad\": False\n },\n **settings\n })\n\n assert all(key in settings for key in [\n \"snapshot\", \"feed\", \"restart\", \"reset\", \"n_epochs\",\n \"grad_clip\", \"model\", \"lr_scheduler\", \"optimizer\",\n \"objective\", \"early\"\n ])\n\n return options", "def set_defaults(self):\n self.plastic = False\n self.unset_output()\n self.reward = False\n self.patmod = config.impact_modulation_default", "def _CommonOptions(self, p):\n super()._CommonOptions(p, opt_v=False)", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def set_default_option(self, name: str):\n\n if (name not in self.parent.options) and (self.is_available(name)):\n self.parent.options[name] = self.get_app_cfg_by_name(name)" ]
[ "0.82235515", "0.76142746", "0.7325896", "0.7278611", "0.7278611", "0.7222484", "0.71993154", "0.7152846", "0.709889", "0.70756227", "0.70539", "0.70520246", "0.704013", "0.6966003", "0.6921334", "0.6866702", "0.68231", "0.68159455", "0.6777279", "0.67474914", "0.67449015", "0.67381936", "0.6731898", "0.67307144", "0.67055404", "0.6697194", "0.66507465", "0.66507465", "0.6607851", "0.65927047", "0.6588083", "0.6581753", "0.6575515", "0.65749913", "0.6544922", "0.6533218", "0.65129954", "0.65066457", "0.6501926", "0.6500938", "0.650048", "0.64960694", "0.64772075", "0.64468235", "0.6444045", "0.6415319", "0.6376242", "0.63754094", "0.6361395", "0.6358585", "0.63534945", "0.63290966", "0.6327601", "0.62948364", "0.6281722", "0.62557214", "0.62541616", "0.6247314", "0.6246217", "0.62258685", "0.6225058", "0.618248", "0.6177375", "0.6177375", "0.6163553", "0.6156017", "0.6140042", "0.6140042", "0.6140042", "0.61304134", "0.61214924", "0.61201465", "0.61163324", "0.6105188", "0.6097745", "0.6085022", "0.6082439", "0.60811406", "0.60811406", "0.60811406", "0.60811406", "0.6072165", "0.6071151", "0.60680616", "0.60667205", "0.60653794", "0.60647774", "0.60510266", "0.60417914", "0.60227096", "0.60161173", "0.60043436", "0.59997433", "0.5995798", "0.59858054", "0.5973661", "0.59568274", "0.5954031", "0.5938399", "0.5914776" ]
0.64381355
45
Compute count of values for Sx=sum 0<=p<x a_n % M
def hist326(T,P,M): H=[0]*M S=0 T+=1 reps,rem=T//P,T%P for x in range(P): n=reps if x<rem: n+=1 H[S]+=n e=e326(x+1) S=(S+e)%M return sum([h*(h-1)/2 for h in H])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def countm(m):\n nfound=0\n\n for i in range(1,m+1):\n for jpk in range(2,(2*i)+1):\n d1=i*i+(jpk)*(jpk) \n if(checkpsq(d1)): \n if(jpk<=i):\n factor=jpk/2 \n else:\n factor=((2*i-jpk)+2)/2 \n nfound=nfound+factor\n\n return nfound", "def countbrute(m):\n nfound=0\n\n for i in range(1,m+1):\n for j in range(1,i+1):\n for k in range(1,j+1):\n d1=i*i+(j+k)*(j+k) \n if(checkpsq(d1)):\n nfound=nfound+1\n\n return nfound", "def N_out(K,P,S,N_in):\n return (int((N_in+2*P-K)/S)+1)", "def expected_counts(p0, T, n): \n M=T.shape[0]\n if n<=M:\n return ec_matrix_vector(p0, T, n)\n else:\n return ec_geometric_series(p0, T, n)", "def point_count(N, S):\n\n x, y = make_grid(N)\n\n xc, yc = np.zeros_like(x), np.zeros_like(y)\n # grids for holding result of mandelbrot check\n \n z_binary = np.zeros( (N, N) )\n z_density = np.zeros( (N, N) )\n\n for (xi, i) in zip(x, xrange(N)):\n for (yi, j) in zip(y, xrange(N)):\n\n z = 0 ; s = 0\n c = complex( xi , yi ) \n abs_z = np.sqrt( z*z.conjugate() )\n # initial values for z, c, |z|, and step count\n\n for k in xrange(S):\n\n if abs_z > 2:\n break\n else:\n z_prim = z*z + c\n abs_z = np.sqrt( z_prim*z_prim.conjugate() )\n z = z_prim \n s += 1\n z_density[j, i] += 1\n \n\n\n if abs_z < 2:\n z_binary[j, i] = 1\n \n return z_binary, z_density", "def problem2(m, p):\n total = 0\n for k in range(m, m ** p):\n if is_prime(k):\n total = total + sum_of_digits(k)\n return total", "def count(seq):\n\treturn sum(1 for x in seq)", "def count_partitions(n, m):\n # print(n, m)\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif m == 0:\n return 0\n else:\n return count_partitions(n-m, m) + count_partitions(n, m//2)", "def _linear_count(self, empty_registers):\n return self._m * math.log(self._m / empty_registers)", "def count(r, c, k):\n\t\tif 0 <= r < M and 0 <= c < N:\n\t\t\tif A[r][c] == target[k]:\n\t\t\t\t\"*** YOUR CODE HERE ***\"\n\t\t\t\treturn memoized_count(r, c, k)\n\t\t\telse:\n\t\t\t\treturn 0\n\t\telse:\n\t\t\treturn 0", "def n_per_item(self):\n return self.lam().sum(axis=0)", "def calc_score(pins_stats):\n count = 0\n new = pins_stats[:, :2] - ORIG_PINS_LOC\n for p in new:\n if np.linalg.norm(p) > R_PIN / 2:\n count += 1\n return count", "def fn(i, j, mv):\n if not (0 <= i < m and 0 <= j < n): return 1 \n if mv == 0: return 0\n return (fn(i-1, j, mv-1) + fn(i, j-1, mv-1) + fn(i, j+1, mv-1) + fn(i+1, j, mv-1)) % 1_000_000_007", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def compute_ps_mass(ps):\n\treturn sum(AA_mass_table[it] for it in ps)", "def specht(mu):\n return StandardTableaux(mu).cardinality().n()", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def base_binom_num(x,n0):\n res = stats.binom.pmf(range(n0+1), n0, 1/2.0) \n a = 0 \n for i in range(n0+1):\n if i <= x:\n a = a +res[i]\n return a", "def count():", "def get_count_of_elements_by_condition(sequence):\n elements_and_indexes = {sequence[i]: i + 1\n for i in range(1, len(sequence) - 1)}\n filtered_values = filter(lambda element:\n 2 ** element[1] < element[0] < math.factorial(element[1]),\n elements_and_indexes.items())\n return len(dict(filtered_values))", "def num_divisors_iii(n):\n set_pf = set(n)\n n_div = 1\n for pf in set_pf:\n x = n.count(pf)\n n_div *= (1 + x)\n return n_div", "def count_partitions(n, m):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif m == 0:\n return 0\n else:\n with_m = count_partitions(n - m, m)\n without_m = count_partitions(n, m - 1)\n return with_m + without_m", "def _numconditions(self, node_p):\n node_p = self.getnodenamed(node_p) # Verify pointer.\n parents = self.getnodeparents(node_p) # nl_p\n nparents = self.lengthnodelist(parents) # integer\n # Get the number of states in each parent node, multiply them\n npstates = 1\n for idx in range(nparents):\n node_i = self.nthnode(parents, idx)\n nstates_i = self.getnodenumberstates(node_i)\n npstates *= nstates_i\n\n return npstates", "def expected_counts_stationary(T, n, mu=None):\n if n<=0:\n EC=np.zeros(T.shape)\n return EC\n else:\n if mu is None:\n mu=statdist(T)\n EC=n*mu[:, np.newaxis]*T\n return EC", "def count_primes(n):\n i, total = 1, 0\n while i <= n:\n if is_prime(i):\n total += 1\n i += 1\n return total", "def consecutiveNumbersSum(self, N):\n\n count = 0\n # nmax = int(-1 + sqrt(1+8*N)/2)\n # print(nmax)\n n = 1\n n2 = n*(n-1)/2 + n\n while n2 <= N:\n if (N-n2) % n == 0:\n # print(n)\n count += 1\n n += 1\n n2 = n*(n-1)/2 + n\n\n # Note N-(n2-n) % n == N-n2 % n\n return count", "def fn(x, mask):\n ans = size = 0 \n for xx in range(1, 10): \n if not mask & (1 << xx): \n if (x, xx) not in mp or mask & 1 << mp[x, xx]: \n ans += fn(xx, mask^(1<<xx))\n size += 1\n size = 9 - size\n if m <= size <= n: ans += 1\n return ans", "def how_many(e, x):\n return count(np.asarray(x) == e)", "def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])", "def calDominationCount(p,visitedPoints):\n isDominated = utils.MultiThread(utils.dominating, zip([visitedPoints[k].mean for k in visitedPoints],repeat(p.mean)))\n dominationCount = sum(isDominated)\n print('Please _cutils.calDominantionCount(). This method is too slow.')\n return dominationCount", "def nPos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def prime_pi(n):\n if n < 2:\n return 0\n\n primes = sieve(n)\n return len(primes)", "def runcount(test_keys, sigma, sigma_max, sigma_step,\n npoints_min, npoints_max, npoints_step):\n run = 1\n for key in test_keys:\n if key:\n while sigma < sigma_max:\n npoints = npoints_min\n while npoints < npoints_max:\n npoints += npoints_step\n run += 1\n sigma += sigma_step\n return run", "def sum_n_m(n, m):\n total = 0\n for i in range(n, m+1):\n total += i\n return total", "def num_divisors_ii(n):\n set_pf = set(n)\n n_og = 2**(len(set_pf))\n n_div = n_og\n for pf in set_pf:\n x = n.count(pf)\n n_div += n_div//2 * (x - 1)\n return n_div", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def dim_reduction( M ):\n tot_count_per_type = M.sum(axis = 1)\n tot_count = float(tot_count_per_type.sum())\n sorted_index = np.argsort(tot_count_per_type)\n threshold = 0.01\n accu = 0\n for i in range(len(sorted_index)):\n perc = float(tot_count_per_type[sorted_index[i]])/tot_count\n accu = accu + perc\n if accu > threshold:\n break;\n \n return sorted_index[0:i]", "def pmi(colloc_count, c_pattern, c_lw, n):\r\n score = log10((n * colloc_count / (c_pattern * c_lw)))\r\n return score", "def monte_carlo_estimate(self,var,e,n):\n\t\tncpt = self.variables[var].cpt\n\t\tncount = dict((value,0) for value in ncpt.values())\n\t\tesum = 0\n\t\tfor iter in xrange(n):\n sample = self.monte_carlo_sample()\n if all(sample[key]==value for (key,value) in e.iteritems()):\n #sample agrees with e\n ncount[sample[var]] += 1\n esum += 1\n if esum==0: return 'Undefined'\n for value in ncount.iterkeys():\n ncount[value] = float(ncount[value])/float(esum)\n return ncount", "def cal_pn(grams_set, grams, candidate, reference):\n count = 0\n for gram in grams_set:\n # print(gram)\n count += count_clip(gram, grams, reference)\n # calculate log() for p, so '+10**-8' avoid 'p==0'\n p = count / len(grams) + 10**-8 \n return p", "def binomial(n, p):\n sum_ans = 0\n for k in range(n):\n sum_ans = sum_ans + bernoulli(p)\n return sum_ans", "def summation_of_primes():\n \n k=1\n acc=2\n for x in range(2000000):\n if x!=0 and x%2!=0 and x%4!=0 and x%6!=0 and x%8!=0 and x%10!=0:\n k=1\n for m in range(x):\n if x!=1 and m!=0 and m!=1 and x%m==0 and x!=m:\n k=2\n if k==1 and x!=1 and x%2!=0 and x%4!=0: #and y!=2:\n acc=acc+x\n #print str(acc)+' THIS IS ACC\"\"\"\n print x\n return acc", "def support_false_positive_count(m, m_hat):\n m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)\n return int((m_hat_nnz - intersection_nnz) / 2.0)", "def sumaPar(self,numSeg,w):\n total=0\n for i in range(2,numSeg-1,2):\n total+=2*self.F(i*w)\n return total", "def ap_entropy(X, M, R):\n\tN = len(X)\n\n\tEm = embed_seq(X, 1, M)\t\n\tEmp = embed_seq(X, 1, M + 1) #\ttry to only build Emp to save time\n\n\tCm, Cmp = np.zeros(N - M + 1), np.zeros(N - M)\n\t# in case there is 0 after counting. np.log(0) is undefined.\n\n\tfor i in range(0, N - M):\n#\t\tprint i\n\t\tfor j in range(i, N - M): # start from i, self-match counts in ApEn\n#\t\t\tif max(abs(Em[i]-Em[j])) <= R:# compare N-M scalars in each subseq v 0.01b_r1\n\t\t\tif in_range(Em[i], Em[j], R):\n\t\t\t\tCm[i] += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t### Xin Liu\n\t\t\t\tCm[j] += 1\n\t\t\t\tif abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one\n\t\t\t\t\tCmp[i] += 1\n\t\t\t\t\tCmp[j] += 1\n\t\tif in_range(Em[i], Em[N-M], R):\n\t\t\tCm[i] += 1\n\t\t\tCm[N-M] += 1\n\t\t# try to count Cm[j] and Cmp[j] as well here\n\t\n#\t\tif max(abs(Em[N-M]-Em[N-M])) <= R: # index from 0, so N-M+1 is N-M v 0.01b_r1\n#\tif in_range(Em[i], Em[N - M], R): # for Cm, there is one more iteration than Cmp\n#\t\t\tCm[N - M] += 1 # cross-matches on Cm[N - M]\n\t\n\tCm[N - M] += 1 # Cm[N - M] self-matches\n#\timport code;code.interact(local=locals())\n\tCm /= (N - M +1 )\n\tCmp /= ( N - M )\n#\timport code;code.interact(local=locals())\n\tPhi_m, Phi_mp = sum(np.log(Cm)), sum(np.log(Cmp))\n\n\tAp_En = (Phi_m - Phi_mp) / (N - M)\n\n\treturn Ap_En", "def countArrangement(self, n: int) -> int:\n @lru_cache(None)\n def dfs(i, remains: Set[int]):\n if i == n+1:\n return 1\n cnt = 0\n for j in remains:\n if i%j == 0 or j%i == 0:\n cnt += dfs(i+1, remains - {j})\n return cnt\n\n return dfs(1, frozenset(range(1, n+1)))", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def counts(indices, result=None):\n if result is None:\n max_val = indices.max()\n result = zeros(max_val+1)\n for i in indices:\n result[i] += 1\n return result", "def compute_ppmi(a):\n np.fill_diagonal(a, 0)\n a = my_scale_sim_mat(a)\n (p, q) = np.shape(a)\n col = np.sum(a, axis=0)\n col[col == 0] = 1\n ppmi = np.log((float(p)*a)/col[None, :])\n idx_nan = np.isnan(ppmi)\n ppmi[idx_nan] = 0\n ppmi[ppmi < 0] = 0\n return ppmi", "def count_square_sums(n):\n if n == 0: return 1\n total = 4*( sum(1 for i in divisors(n) if i % 4 == 1) \n - sum(1 for i in divisors(n) if i % 4 == 3) )\n ## Remove duplicate countings if n > 0\n ## Eight duplicates: (+/-a, +/-b) (+/-b, +/-a) \n ## Four duplicates: (0,+1), (0,-1), (+1,0), (-1,0)\n ## Four duplicates: (+/-1,+/-1)\n flg = 0\n if is_square(n): flg += 1\n if is_square(n/2) and (n % 2 == 0): flg += 1\n return (total + 4*flg)/8", "def calculate_probability(k: int, m: int, n: int) -> float:\n population = [\"AA\" for _ in range(k)] + [\"Aa\" for _ in range(m)] + [\"aa\" for _ in range(n)]\n pairings = it.combinations(population, 2)\n probabilities = [PROBABILITIES[pairing] for pairing in pairings]\n output = sum(probabilities) / len(probabilities)\n\n return output", "def getCount(self, event):\n # Attempt 2: Still too slow\n count = 0\n \n for mEvent in self:\n if event.__st__(mEvent):\n count += 1\n \n return count\n \n # Attempt 1: Too slow\n #return reduce((lambda x, y: x+y),\n # map((lambda i: itemset <= i), self))", "def farey_count(n, a, b, num, denom):\n count = 0\n c, d = n / b, n - 1\n while c != num and d != denom:\n k = int((n + b) / d)\n a, b, c, d = c, d, (k*c-a), (k*d-b)\n count += 1\n return count", "def sopf(n, primes):\r\n total = 0\r\n for p in primes:\r\n if n % p == 0:\r\n total += p\r\n while n // p == 0:\r\n n //= p\r\n return total", "def count_k(n, k):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n else:\n total = 0\n i = 1\n while i <= k:\n total += count_k(n - i, k)\n i += 1\n return total", "def compute_anon_set_size(m):\n return (m > 0).sum(axis=1, keepdims=True)", "def base_binom_pro(pro,n0):\n res = stats.binom.pmf(range(n0+1), n0, 1/2.0)\n a = 0\n for i in range(n0+1):\n a = a + res[i]\n if a>=pro: \n return i", "def CountPred(self, node):\n if node.children: \n res = 0;\n for child in node.children:\n res += self.CountPred(child);\n return res;\n else:\n if node.tag.startswith(\"V\"):\n self.vv += 1;\n if len(node.arguments) != 0:\n self.vpred += 1;\n return 1;\n else:\n return 0;", "def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p", "def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p", "def fn(x, par):\n c = s = 0\n for xx in graph.get(x, []): \n if xx != par: \n cc, ss = fn(xx, x)\n c, s = c + cc, s + ss + cc\n size[x] = c + 1\n return c + 1, s", "def P(N,L) :\n accum = 0.0\n sign = 1.0\n for i in xrange(len(L)+1) :\n accum2 = 0.0\n for combin in combinations(L,i) :\n term = 1.0\n j = 0.0\n for Li in combin :\n term *= Li/(N-j)\n j += 1\n accum2 += term\n accum += sign*accum2\n sign *= -1.0\n return accum", "def parallel_count_calculate_func(cls, parallel_count):\n if parallel_count == 0:\n result = 1\n\n else:\n count = float(0.0)\n\n for number in range(int(parallel_count)):\n count += float(1 / (float(number) + 1))\n\n result = pow(count, (-1))\n\n return result", "def fn(i):\n if i == 0: return 1 # boundary condition \n ans = 0\n for k in range(1, N+1): \n if k not in seen and (k%i == 0 or i%k == 0): \n seen.add(k)\n ans += fn(i-1)\n seen.remove(k)\n return ans", "def n_suma(a1,nr_wyrazu,r):\n return (2*a1+(nr_wyrazu-1))*nr_wyrazu/2", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def compute_PSSM_self_information(p):\n return -sp.sum(p*sp.log(p))", "def get_p_at_n_in_m(data, n, k, ind):\n pos_score = data[ind][0]\n curr = data[ind:ind + n]\n curr = sorted(curr, key=lambda x: x[0], reverse=True)\n if curr[k - 1][0] <= pos_score:\n return 1\n return 0", "def count_pairs(assignments, v1, v2, M):\n assert v1 != v2\n pairs = assignments[:, v1].astype(np.int32) * M + assignments[:, v2]\n return np.bincount(pairs, minlength=M * M).reshape((M, M))", "def count_nonzero(a):\n return (np.count_nonzero(a))", "def fn(k, i, j):\n if not (0 <= i < N and 0 <= j < N): return 0\n if k == 0: return 1 \n return 1/8*sum(fn(k-1, i+ii, j+jj) for ii, jj in ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)))", "def page_count(n: int, p: int) -> int:\n\n front = p // 2\n back = n // 2 - p // 2\n\n return min([front, back])", "def count_NN(KL):\n zvals = (KL != 0).sum(1)\n return zvals", "def freq_counts(self, arrs, lens):\n no_nans = reduce(np.logical_and, [~np.isnan(a) if bn.anynan(a) else np.ones(self.m).astype(bool) for a in arrs])\n combined = reduce(add, [arrs[i][no_nans]*reduce(mul, lens[:i]) for i in range(1, len(arrs))], arrs[0][no_nans])\n return np.bincount(combined.astype(np.int32, copy=False), minlength=reduce(mul, lens)).astype(float)", "def count(x):\n return sum(len(y) for y in x)", "def probability_of_generating_containing_events(M, s):\n\n # initialize the probabilities of generating containing events.\n f = 1\n\n s_with_inclusion_probabilities = inclusion_probability(M, s)\n for v, p in s_with_inclusion_probabilities.items():\n f *= p\n\n return f", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n return sum(product(len(v) for v in p.values()) if p else 1\n for p in self.param_grid)", "def support_difference_count(m, m_hat):\n m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)\n return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) / 2.0)", "def slow_kp(p, f):\n return sum(1 for n in range(p) if f(n) % p == 0)", "def count(self):\n return sum([self.bits[x][y] for x in range(self.n_rows)\n for y in range(self.n_columns)])", "def solution(A):\n \"\"\"method 2 n**2\n east=[] #0\n west=[] #1\n for i in range(len(A)):\n if A[i] == 0:\n east.append(i)\n else:\n west.append(i)\n\n result = 0\n for e in east:\n count = 0\n for j in range(len(west)):\n if e > west[j]:\n continue\n if e < west[j]:\n count = len(west) - j\n result += count\n #print(e, count)\n break\n return result\n \"\"\"\n east=[] #0\n west=[] #1\n l = len(A)\n for i in range(len(A)):\n if A[i] == 0:\n east.append(i)\n else:\n west.append(i)\n\n result = {}\n for i in range(len(east)):\n e = east[i]\n if i == 0:\n result[e] = l - e - len(east)\n if i != 0:\n result[e] = result[east[i-1]] - (e - east[i-1]-1)\n\n #print(result)\n s = sum(result.values())\n if s > 1000000000:\n return -1\n return s", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n modstr = '%s__' % self.modality\n return sum(product(len(v) for k, v in p.items() if modstr in k) if p else 1\n for p in self.param_grid)", "def count(pred, l):\n nl = [i for i in range(0,len(l)) if pred(l[i])]\n\n return len(nl)", "def Z(n):\n count5 = 0\n i = 1\n while 1:\n a = pow(5, i)\n if a > n:\n return count5\n else:\n count5 += n/a\n i += 1", "def counter(): # Local function\n nonlocal count\n if count < n:\n count += 1\n return count", "def getTotalIndividualCount(self):\r\n return self._n", "def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit", "def count_divisors(n):\r\n if n == 1:\r\n return 0\r\n m = int(sqrt(n))\r\n c = 1\r\n if m * m == n:\r\n c += 1\r\n m -= 1\r\n for i in xrange(2, m+1):\r\n if n % i == 0:\r\n c += 2\r\n return c", "def fn(x, m):\n if x == 31: return int(m > 2)\n ans = fn(x+1, m)\n if freq[x] and masks[x]: \n if x == 1: ans *= 2**freq[x]\n elif not m & masks[x]: ans += freq[x] * fn(x+1, m | masks[x])\n return ans % 1_000_000_007", "def n(self):\n return sum(self._comp.values())", "def total_present_value_rule(_m):\r\n\r\n return sum(m.DELTA[y] * (m.INV[y] + m.FOM[y] + m.OP[y]) for y in m.Y) + m.EOH", "def _P(m):\n P = np.zeros((m**2,m**2), dtype=np.int64)\n for i in range(1, m**2 + 1):\n j = 1 + m*((i - 1) % m) + (i - 1)//m\n P[i-1, j-1] = 1\n return P", "def c(ixs):\n return sum(range(1, sum((i > 0 for i in ixs)) + 1))", "def count_cond(condition):\n \"*** YOUR CODE HERE ***\"\n def f(n):\n i, total = 1, 0\n while i <= n:\n if condition(n, i):\n total += 1\n i += 1\n return total\n return f", "def count(self,val):\n return sum(1 for e in self.frontierpq if e[0]==val)", "def _proba(y):\n N = len(y)\n _, counts = np.unique(y, return_counts=True)\n return counts / N", "def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n", "def get_num_hit(boxes_truth, boxes_pred, is_hit):\n out = 0\n for tbox in boxes_truth:\n for pbox in boxes_pred:\n if is_hit(tbox, pbox):\n out += 1\n return out" ]
[ "0.7074451", "0.67225456", "0.6443518", "0.63969105", "0.6153251", "0.60965586", "0.6039756", "0.6003885", "0.59993243", "0.59983164", "0.5964119", "0.5954213", "0.5915635", "0.5903191", "0.5876021", "0.5867894", "0.5854033", "0.58404493", "0.5825537", "0.5816647", "0.5792668", "0.57594234", "0.57477176", "0.57458717", "0.57289594", "0.5712466", "0.57058775", "0.5699172", "0.56897986", "0.56828076", "0.56728107", "0.5669427", "0.5664058", "0.56629276", "0.5658356", "0.5656658", "0.5633596", "0.5633596", "0.5632762", "0.56270754", "0.56209815", "0.5613237", "0.56065345", "0.5591578", "0.55898917", "0.55864125", "0.5583683", "0.5579903", "0.5571264", "0.55647266", "0.5561489", "0.5560915", "0.55549675", "0.5553515", "0.55530876", "0.55447763", "0.5543465", "0.5541877", "0.55408525", "0.5533826", "0.5530036", "0.5530036", "0.5525764", "0.5524421", "0.5518491", "0.5517946", "0.5516819", "0.55087125", "0.5507449", "0.55059594", "0.55045533", "0.548793", "0.5485079", "0.5482898", "0.54825526", "0.5481056", "0.5480627", "0.54769075", "0.5473061", "0.54696083", "0.54645663", "0.5458173", "0.5451786", "0.54458874", "0.54457456", "0.5444297", "0.54432803", "0.5442951", "0.5438686", "0.54293215", "0.54281896", "0.5419827", "0.5417503", "0.5413828", "0.54137063", "0.54062957", "0.5403246", "0.540303", "0.53976226", "0.5395383", "0.53945327" ]
0.0
-1
Resopnd to keypass and mouse events.
def check_events(ship): for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n keys = set()\n mouse = set()\n old_mouse = set()\n\n # Set up game\n init()\n\n # Perform game loop\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n if event.type == pygame.KEYDOWN: keys.add(event.key)\n if event.type == pygame.KEYUP: keys.discard(event.key)\n if event.type == pygame.MOUSEBUTTONDOWN: mouse.add(event.button)\n if event.type == pygame.MOUSEBUTTONUP: mouse.discard(event.button)\n \n game.update_context(keys, mouse)\n game.draw_context()\n pygame.display.update()\n pygame.display.flip()", "def handle_mouse_press(self, event):", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.context.open=False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.context.open=False\n if event.key == K_SPACE:\n self.setMode((self.mode+1)%3)\n #if event.key == K_f:\n # pygame.display.toggle_fullscreen()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 4: self.context.draw.plane.zoom([1.1,1.1])\n if event.button == 5: self.context.draw.plane.zoom([0.9,0.9])", "def bindHotkeys(self):\r\n self.root.bind(\"s\",self.pause)\r\n self.root.bind(\"p\",self.play)\r\n self.root.bind(\"x\",self.stop)\r\n self.root.bind(\"<Right>\",lambda event, t=10: self.skipFor(event,t=t))\r\n self.root.bind(\"<Left>\",lambda event, t=-10: self.skipFor(event,t=t))\r\n self.bindDPHotkeys()", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def _on_key_press(self, event):", "def setShortcuts(self):\n self.game.app.accept('mouse1', self.onMouse1Down)\n self.game.app.accept('mouse3', self.onMouse2Down)\n self.game.app.accept('space', self.onSpaceBarClear)\n if self.enableMouseCamControl == 1:\n self.game.app.accept('wheel_up', self.onMouseWheelUp)\n self.game.app.accept('wheel_down', self.onMouseWheelDown)", "def _processEvents():\n global _mousel, _mousem, _mouser, _eventsflushed, _pushedEvents\n _eventsflushed = True\n events = _pushedEvents # get events from event.push\n _pushedEvents = [] # then clear the pushed events queue\n\n mouse = _ffi.new('TCOD_mouse_t *')\n libkey = _ffi.new('TCOD_key_t *')\n while 1:\n libevent = _lib.TCOD_sys_check_for_event(_lib.TCOD_EVENT_ANY, libkey, mouse)\n if not libevent: # no more events from libtcod\n break\n\n #if mouse.dx or mouse.dy:\n if libevent & _lib.TCOD_EVENT_MOUSE_MOVE:\n events.append(MouseMotion((mouse.x, mouse.y),\n (mouse.cx, mouse.cy),\n (mouse.dx, mouse.dy),\n (mouse.dcx, mouse.dcy)))\n\n mousepos = ((mouse.x, mouse.y), (mouse.cx, mouse.cy))\n\n for oldstate, newstate, released, button in \\\n zip((_mousel, _mousem, _mouser),\n (mouse.lbutton, mouse.mbutton, mouse.rbutton),\n (mouse.lbutton_pressed, mouse.mbutton_pressed,\n mouse.rbutton_pressed),\n (1, 2, 3)):\n if released:\n if not oldstate:\n events.append(MouseDown(button, *mousepos))\n events.append(MouseUp(button, *mousepos))\n if newstate:\n events.append(MouseDown(button, *mousepos))\n elif newstate and not oldstate:\n events.append(MouseDown(button, *mousepos))\n\n if mouse.wheel_up:\n events.append(MouseDown(4, *mousepos))\n if mouse.wheel_down:\n events.append(MouseDown(5, *mousepos))\n\n _mousel = mouse.lbutton\n _mousem = mouse.mbutton\n _mouser = mouse.rbutton\n\n if libkey.vk == _lib.TCODK_NONE:\n break\n if libkey.pressed:\n keyevent = KeyDown\n else:\n keyevent = KeyUp\n\n if libkey.vk == _lib.TCODK_TEXT:\n # Hack 2017-03-22 HexDecimal\n # Fix undefined libtcod behaviour which breaks 32-bit builds.\n libkey.c = b'\\x00'\n libkey.shift = False\n libkey.lalt = libkey.ralt = False\n libkey.lctrl = libkey.rctrl = False\n libkey.lmeta = libkey.rmeta = False\n\n events.append(\n keyevent(\n libkey.vk,\n libkey.c.decode('ascii', errors='ignore'),\n _ffi.string(libkey.text).decode('utf-8'),\n libkey.shift,\n libkey.lalt,\n libkey.ralt,\n libkey.lctrl,\n libkey.rctrl,\n libkey.lmeta,\n libkey.rmeta,\n )\n )\n\n if _lib.TCOD_console_is_window_closed():\n events.append(Quit())\n\n _eventQueue.extend(events)", "def _on_key_release(self, event):", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def mouse_process(pipe):\n mouse = WindowsMouseListener(pipe)\n mouse.listen()", "def ev_KEYUP(self, event):", "def set_events(self):\r\n\r\n self.canvas.bind(\"<Button-1>\", self.event_click_left)\r\n self.bind(\"<Return>\", self.event_return)", "def bindKeys(self):\r\n self.c.bind(\"<Button-1>\",self.seek)\r\n self.c.bind(\"<MouseWheel>\",self.app.zoom)\r\n self.c.bind(\"<Button-3>\",self.peek)", "def mouse_right_down(self):\n pass", "def OnKeyDown(self, event):\n\t\traw_code = event.GetRawKeyCode()\n\t\tmodifiers = event.GetModifiers()\n\t\t#print \"raw_code=\",raw_code,\";modifiers=\",modifiers\n\n\t\tif raw_code == 39 or raw_code == 73 : # <I> or -> = zoom in \n\t\t\tself.screenXsize += 20 \n\t\t\tprint \"X Zoom In\"\n\t\telif raw_code == 37 or raw_code ==79 :# <O> or <- = zomm out\n\t\t\tself.screenXsize -= 5 \n\t\t\tprint \"X Zoom Out\"\n\t\telif raw_code == 38:# <arrow up> = Y zomm in\n\t\t\tself.factorY += 0.2 \n\t\t\tprint \"Y Zoom In\"\n\t\telif raw_code == 40:# <arrow dn> = Y zomm out\n\t\t\tself.factorY -= 0.1 \n\t\t\tprint \"Y Zoom Out\"\n\t\telif raw_code ==33:# <PgUp> = Y move Up \n\t\t\tself.offsetY -= 10\n\t\t\tprint \"Y Move Up\"\n\t\telif raw_code ==34:# <PgDn> = Y move Down\n\t\t\tself.offsetY += 30 \n\t\t\tprint \"Y Move Down\"\n\t\telif raw_code == 90 and modifiers ==2 :# <ctrl>+<Z> = \n\t\t\tself.SetRenderDefault()\n\t\telif raw_code == 85 and modifiers ==2 :# <ctrl>+<U> = increase Vout \n\t\t\tself.AdjVout(100,'+')\n\t\telif raw_code == 88 and modifiers ==2 :# <ctrl>+<x> = clear debug_out \n\t\t\tself.window.clear_out()\n\t\telif raw_code == 85 and modifiers ==6 :# <ctrl>+<shift>+<U> = decrease Vout \n\t\t\tself.AdjVout(10,'-')\n\t\telif raw_code == 85 and modifiers ==3 :# <ctrl>+<alt>+<U> = Vout@5.0V \n\t\t\tself.AdjVout(5.0,'=')\n\t\t\tprint \"Set Vout to 5.0V\"\n\t\telif raw_code == 74 and modifiers ==7 :# <ctrl>+<shift>+<alt>+<J> = Vout@10.0V\n\t\t\tself.AdjVout(10.0,'=')\n\t\t\tprint \"Set Vout to 10.0V\"\n\t\telif raw_code == 77 and modifiers ==7 :# <ctrl>+<shift>+<alt>+<M> = Vout@15.0V\n\t\t\tself.AdjVout(15.0,'=')\n\t\t\tprint \"Set Vout to 15.0V\"\n\t\telif (raw_code == 3 and modifiers ==2) or raw_code == 32 :# <ctrl>+<Pause> = run/pause\n\t\t\tself.OnRunStop(event)\n\t\telif raw_code == 114 :# <F3> = pause\n\t\t\tself.Pause()\n\t\telif raw_code == 113 :# <F2> = run\n\t\t\tself.Run()\n\t\telif raw_code == 115 :# <F4> = setup\n\t\t\tself.Setup()\n\t\telif raw_code == 116 :# <F5> = full screen\n\t\t\tself.FullScreen = not self.FullScreen \n\t\t\tself.window.ShowFullScreen(self.FullScreen)\n\t\telif raw_code == 27 :# <ESC> = NOT full screen\n\t\t\tself.FullScreen = False\n\t\t\tself.window.ShowFullScreen(self.FullScreen)\n\t\t\tapp=wx.GetApp()\n\t\t\tframe = app.GetTopWindow()\n\t\t\tframe.StopLogo()\n\t\telif raw_code == 118 :# <F7> = open/close debug window\n\t\t\tself.window.SetDebug()\n\t\telif raw_code == 119 :# <F8> = expand/shrink sheet window\n\t\t\tself.window.SetSheet()\n\t\telif raw_code == 120 :# <F9> = hide sheet field\n\t\t\tself.window.HideSheetField()\n\t\telif raw_code == 112 :# <F1> = Select EUT\n\t\t\tself.SelectEut()\n\t\tself.Refresh(True)", "def _handleEvents(self):\n\n pygame.event.pump()\n keyboardState = pygame.key.get_pressed()\n for key in Game.BoundControls:\n Game.ControlState[Game.Controls[key]] = keyboardState[key]\n if Game.ControlState[Game.Quit]:\n self._quit()", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n return\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.running = False\n return\n elif event.key == pygame.K_LEFT:\n if (self.xv, self.yv) != (1, 0):\n self.xv, self.yv = -1, 0\n return\n elif event.key == pygame.K_RIGHT:\n if (self.xv, self.yv) != (-1, 0):\n self.xv, self.yv = 1, 0\n return\n elif event.key == pygame.K_UP:\n if (self.xv, self.yv) != (0, 1):\n self.xv, self.yv = 0, -1\n return\n elif event.key == pygame.K_DOWN:\n if (self.xv, self.yv) != (0, -1):\n self.xv, self.yv = 0, 1\n return", "def ev_keydown(self, event: KeyDown) -> None:", "def play(self):\n self.accept(\"wheel_up\", self.scrollindex, [-1] )\n self.accept(\"wheel_down\", self.scrollindex, [1] )\n self.accept(\"arrow_up\", self.scrollindex, [-1] )\n self.accept(\"arrow_down\", self.scrollindex, [1] )\n self.accept(\"enter\", self._click)\n if callable(self.data['exit']): self.accept(\"escape\", self.data['exit'])\n for item in self.canvas[\"items\"]: item['state']=DGG.NORMAL", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def event(self,events):\n for event in events:\n if event.type == KEYDOWN:\n if event.key == K_RETURN:#starts the game\n self.game.gotoMain()\n #print \"r\"\n if event.key == K_ESCAPE:#quits the game\n sys.exit(0)", "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n if key == glfw.KEY_R:\n os.system(\"pkill aplay\")\n os.system(\"aplay T-Rex.wav &\")\n glfw.set_time(0)\n if key == glfw.KEY_N:\n self.normal_mapping = 1 - self.normal_mapping", "def ev_KEYDOWN(self, event):", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.window.open = False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.window.open = False\n if event.key == K_SPACE:\n self.restart()\n if event.key == K_f:\n self.window.switch(self.default_size)\n if event.type == VIDEORESIZE:\n self.window.screen = pygame.display.set_mode(\n (event.w, event.h), RESIZABLE)\n if event.type == MOUSEMOTION:\n pass\n if event.type == MOUSEBUTTONDOWN and event.button == 1:\n self.click(event.pos)", "def doubleclick(point):\n m = PyMouse()\n m.press(*point)\n m.release(*point)\n m.press(*point)\n m.release(*point)", "def event_loop(self):\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.done = True\n elif event.type in (pg.KEYDOWN, pg.KEYUP):\n self.keys = pg.key.get_pressed()", "def _key_press_event(self, event):\n if event.key() == 'escape':\n self.close()\n\n if event.text() == 'h':\n self._show_help()\n\n if event.text() in ('=', '+', '-'):\n self._zoom(sign=-2 * (event.text() == '-') + 1, draw=True)\n\n # Changing slices\n if event.key() in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down,\n QtCore.Qt.Key_Left, QtCore.Qt.Key_Right,\n QtCore.Qt.Key_Comma, QtCore.Qt.Key_Period,\n QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown):\n ras = np.array(self._ras)\n if event.key() in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down):\n ras[2] += 2 * (event.key() == QtCore.Qt.Key_Up) - 1\n elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Right):\n ras[0] += 2 * (event.key() == QtCore.Qt.Key_Right) - 1\n else:\n ras[1] += 2 * (event.key() == QtCore.Qt.Key_PageUp or\n event.key() == QtCore.Qt.Key_Period) - 1\n self._set_ras(ras)", "def keyboard(self, *args):\n return _ida_hexrays.Hexrays_Hooks_keyboard(self, *args)", "def do_event(self, event):\n self.event = event\n self.event_type = event.type\n self.event_name = pygame.event.event_name(event.type)\n self.surf_list = []\n if event.type == QUIT:\n self.active = False\n \n elif event.type == KEYDOWN:\n self.event_key = event.key\n self.event_mod = event.mod\n self.event_unicode = event.unicode\n if event.key == K_ESCAPE:\n self.active = False\n elif event.key == K_RETURN:\n self.play(self)\n \n elif event.key in self.dirs:\n self.dir = np.array(self.dirs[event.key])\n self.pos += self.dir\n \n self.pos[0] = min(max(self.pos[0], 0), self.n-1)\n self.pos[1] = min(max(self.pos[1], 0), self.m-1)\n self.move(self)\n elif event.key in self.keys:\n self.keys[event.key](self)\n \n elif event.type == MOUSEMOTION:\n self.event_pos = event.pos\n self.event_rel = event.rel\n self.pos = self.get_index(*event.pos)\n if self.mouse_down:\n (x, y) = event.pos\n x -= self.dx//2\n y -= self.dy//2\n self.surf_list.append((self.cursor_img, (x, y)))\n \n elif event.type == MOUSEBUTTONDOWN:\n self.mouse_down = True\n (i, j) = self.get_index(*event.pos)\n t = self.T[i, j]\n if t != 0 and len(self.images)>0:\n self.cursor_img = self.images[t]\n self.T[i, j] = 0\n self.cursor_val = t\n \n elif event.type == MOUSEBUTTONUP:\n self.mouse_down = False\n (i, j) = self.get_index(*event.pos)\n self.pos = [i, j] \n t = self.T[i, j]\n if t == 0 and len(self.images) > 0:\n self.T[i, j] = self.cursor_val\n self.play(self)", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.context.open = False\n if event.type == KEYDOWN:\n\n if event.key == K_ESCAPE:\n self.context.open = False\n if event.key == K_SPACE or event.key == K_MENU or event.key == K_q:\n self.setMode((self.mode + 1) % 3)\n if event.key == K_0:\n self.show_polynomial = not(self.show_polynomial)\n if event.key == K_1:\n self.show_image = not(self.show_image)\n if event.key == K_2:\n self.show_drawing = not(self.show_drawing)\n if event.key == K_3:\n self.show_display = not(self.show_display)\n if event.key == K_4:\n self.show_vectors = not(self.show_vectors)\n if event.key == K_5:\n self.show_circles = not(self.show_circles)\n if event.key == K_6:\n self.show_sample = not(self.show_sample)\n if event.key == K_r:\n self.reset()\n if event.key == K_z:\n self.drawing = self.drawing[:-1]\n self.updateSample()\n if event.key == K_s:\n self.save() # Save the coefficients and the graphs\n if event.key == K_d:\n self.saveCoefficients()\n if event.key == K_a:\n # Save a picture the screen\n self.screenshot(self.directory)\n if event.key == K_p:\n self.pause = not(self.pause)\n if event.key == K_f:\n self.context.switch()\n if event.key == K_c:\n self.show_camera = not(self.show_camera)\n if self.show_camera:\n self.context.camera.buildCapture()\n else:\n self.context.camera.destroy()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if (event.button == 1) and (self.mode == 0):\n self.place()\n self.updateSample()\n if event.button == 4:\n self.context.draw.plane.zoom([1.1, 1.1])\n if event.button == 5:\n self.context.draw.plane.zoom([0.9, 0.9])\n\n if event.type == VIDEORESIZE:\n self.context.screen = pygame.display.set_mode(\n (event.w, event.h), RESIZABLE)", "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n\n self.key_handler(key)", "def test_040_mouse_keyboard(self):\n self.allow_service('qubes.InputMouse')\n self.allow_service('qubes.InputKeyboard')\n self.setUpDevice(mouse_events + keyboard_events)\n dev_name = '{}: {}'.format(\n self.vm.name if hasattr(self, 'vm') else 'remote',\n 'Test input device')\n self.find_device_and_start_listener('pointer:' + dev_name)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_Y', 1)\n self.emit_event('REL_Y', 1)\n self.emit_click('BTN_LEFT')\n\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawButtonPress', '1', {}])\n self.assertEvent(['RawButtonRelease', '1', {}])\n\n self.find_device_and_start_listener('keyboard:' + dev_name)\n\n self.emit_click('KEY_A')\n self.emit_click('KEY_B')\n self.emit_click('KEY_C')\n self.emit_click('KEY_D')\n for _ in range(4):\n self.emit_click('KEY_BACKSPACE')\n\n for key in ('38', '56', '54', '40'):\n self.assertEvent(['RawKeyPress', key, {}])\n self.assertEvent(['RawKeyRelease', key, {}])\n for _ in range(4):\n self.assertEvent(['RawKeyPress', '22', {}])\n self.assertEvent(['RawKeyRelease', '22', {}])", "def setupEventHooks(self):\n # handle mouse clicks\n self.img.scene().sigMouseClicked.connect(self.handleClick)\n # handle mouse movement\n # Use signalproxy for ratelimiting\n sig = self.img.scene().sigMouseMoved\n self.mvProxy = pqg.SignalProxy(signal=sig, rateLimit=60, slot=self.handleMove)", "def BindEvents(self):\n self.bind(\"<Motion>\",\n lambda e, s=self: s.MouseMoveEvent(e, 0, 0))\n self.bind(\"<Control-Motion>\",\n lambda e, s=self: s.MouseMoveEvent(e, 1, 0))\n self.bind(\"<Shift-Motion>\",\n lambda e, s=self: s.MouseMoveEvent(e, 1, 1))\n self.bind(\"<Control-Shift-Motion>\",\n lambda e, s=self: s.MouseMoveEvent(e, 0, 1))\n\n # Left Button\n self.bind(\"<ButtonPress-1>\",\n lambda e, s=self: s.LeftButtonPressEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-1>\",\n lambda e, s=self: s.LeftButtonPressEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-1>\",\n lambda e, s=self: s.LeftButtonPressEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-1>\",\n lambda e, s=self: s.LeftButtonPressEvent(e, 1, 1))\n self.bind(\"<ButtonRelease-1>\",\n lambda e, s=self: s.LeftButtonReleaseEvent(e, 0, 0))\n self.bind(\"<Control-ButtonRelease-1>\",\n lambda e, s=self: s.LeftButtonReleaseEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonRelease-1>\",\n lambda e, s=self: s.LeftButtonReleaseEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonRelease-1>\",\n lambda e, s=self: s.LeftButtonReleaseEvent(e, 1, 1))\n\n # Middle Button\n self.bind(\"<ButtonPress-2>\",\n lambda e, s=self: s.MiddleButtonPressEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-2>\",\n lambda e, s=self: s.MiddleButtonPressEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-2>\",\n lambda e, s=self: s.MiddleButtonPressEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-2>\",\n lambda e, s=self: s.MiddleButtonPressEvent(e, 1, 1))\n self.bind(\"<ButtonRelease-2>\",\n lambda e, s=self: s.MiddleButtonReleaseEvent(e, 0, 0))\n self.bind(\"<Control-ButtonRelease-2>\",\n lambda e, s=self: s.MiddleButtonReleaseEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonRelease-2>\",\n lambda e, s=self: s.MiddleButtonReleaseEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonRelease-2>\",\n lambda e, s=self: s.MiddleButtonReleaseEvent(e, 1, 1))\n\n # Right Button\n self.bind(\"<ButtonPress-3>\",\n lambda e, s=self: s.RightButtonPressEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-3>\",\n lambda e, s=self: s.RightButtonPressEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-3>\",\n lambda e, s=self: s.RightButtonPressEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-3>\",\n lambda e, s=self: s.RightButtonPressEvent(e, 1, 1))\n self.bind(\"<ButtonRelease-3>\",\n lambda e, s=self: s.RightButtonReleaseEvent(e, 0, 0))\n self.bind(\"<Control-ButtonRelease-3>\",\n lambda e, s=self: s.RightButtonReleaseEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonRelease-3>\",\n lambda e, s=self: s.RightButtonReleaseEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonRelease-3>\",\n lambda e, s=self: s.RightButtonReleaseEvent(e, 1, 1))\n\n if sys.platform == 'win32':\n self.bind(\"<MouseWheel>\",\n lambda e, s=self: s.MouseWheelEvent(e, 0, 0))\n self.bind(\"<Control-MouseWheel>\",\n lambda e, s=self: s.MouseWheelEvent(e, 1, 0))\n self.bind(\"<Shift-MouseWheel>\",\n lambda e, s=self: s.MouseWheelEvent(e, 0, 1))\n self.bind(\"<Control-Shift-MouseWheel>\",\n lambda e, s=self: s.MouseWheelEvent(e, 1, 1))\n else:\n # Mouse wheel forward event\n self.bind(\"<ButtonPress-4>\",\n lambda e, s=self: s.MouseWheelForwardEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-4>\",\n lambda e, s=self: s.MouseWheelForwardEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-4>\",\n lambda e, s=self: s.MouseWheelForwardEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-4>\",\n lambda e, s=self: s.MouseWheelForwardEvent(e, 1, 1))\n\n # Mouse wheel backward event\n self.bind(\"<ButtonPress-5>\",\n lambda e, s=self: s.MouseWheelBackwardEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-5>\",\n lambda e, s=self: s.MouseWheelBackwardEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-5>\",\n lambda e, s=self: s.MouseWheelBackwardEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-5>\",\n lambda e, s=self: s.MouseWheelBackwardEvent(e, 1, 1))\n\n # Key related events\n self.bind(\"<KeyPress>\",\n lambda e, s=self: s.KeyPressEvent(e, 0, 0))\n self.bind(\"<Control-KeyPress>\",\n lambda e, s=self: s.KeyPressEvent(e, 1, 0))\n self.bind(\"<Shift-KeyPress>\",\n lambda e, s=self: s.KeyPressEvent(e, 0, 1))\n self.bind(\"<Control-Shift-KeyPress>\",\n lambda e, s=self: s.KeyPressEvent(e, 1, 1))\n\n self.bind(\"<KeyRelease>\",\n lambda e, s=self: s.KeyReleaseEvent(e, 0, 0))\n self.bind(\"<Control-KeyRelease>\",\n lambda e, s=self: s.KeyReleaseEvent(e, 1, 0))\n self.bind(\"<Shift-KeyRelease>\",\n lambda e, s=self: s.KeyReleaseEvent(e, 0, 1))\n self.bind(\"<Control-Shift-KeyRelease>\",\n lambda e, s=self: s.KeyReleaseEvent(e, 1, 1))\n\n self.bind(\"<Enter>\",\n lambda e, s=self: s.EnterEvent(e, 0, 0))\n self.bind(\"<Control-Enter>\",\n lambda e, s=self: s.EnterEvent(e, 1, 0))\n self.bind(\"<Shift-Enter>\",\n lambda e, s=self: s.EnterEvent(e, 0, 1))\n self.bind(\"<Control-Shift-Enter>\",\n lambda e, s=self: s.EnterEvent(e, 1, 1))\n self.bind(\"<Leave>\",\n lambda e, s=self: s.LeaveEvent(e, 0, 0))\n self.bind(\"<Control-Leave>\",\n lambda e, s=self: s.LeaveEvent(e, 1, 0))\n self.bind(\"<Shift-Leave>\",\n lambda e, s=self: s.LeaveEvent(e, 0, 1))\n self.bind(\"<Control-Shift-Leave>\",\n lambda e, s=self: s.LeaveEvent(e, 1, 1))\n\n self.bind(\"<Configure>\", self.ConfigureEvent)\n self.bind(\"<Expose>\",lambda e,s=self: s.ExposeEvent())", "def main(self):\n while 1:\n events = get_gamepad()\n for event in events:\n\n if(event.ev_type == \"Absolute\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.absolute_switch[ self.map[GAMEPAD][event.code] ](event.state)\n\n\n if(event.ev_type == \"Key\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.btn_switch[ self.map[GAMEPAD][event.code] ](self.map[GAMEPAD][event.code], event.state)\n \n\n\n\n #print(event.ev_type, event.code, event.state)", "def modifierDown(self, event):\n if event.keysym in ['Shift_L', 'Shift_R', 'Control_L', 'Control_R',\n 'Alt_L', 'Alt_R']:\n self.kbdModifier[event.keysym] = 1\n # grab all event to make sure get the key release event even\n # if the mouse is outside the application\n\n # we have problems with this because when we release we loose\n # the grab. As a consequence, if SHIFT+buttton1 was used to start\n # a rubberband and SHIFT is released BEFORE the button,\n # We loose motion and release event and the line stops moving\n # and is never deleted :(\n\n # this was an attempt to have tha canvas set the grab. But then\n # modifier event are not caught !\n \n #self.oldgrab = self.master.grab_current()\n #print 'setting global grab', self.oldgrab\n #self.master.grab_set_global()\n\t self.keybdModifierCallbacksDown[event.keysym].CallCallbacks(event)", "def test_020_mouse_keyboard_mouse_only(self):\n self.allow_service('qubes.InputMouse')\n self.setUpDevice(['BTN_LEFT', 'BTN_RIGHT', 'REL_X', 'REL_Y'] + keyboard_events)\n self.find_device_and_start_listener()\n self.emit_event('REL_X', 1)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_Y', 1)\n self.emit_event('REL_Y', 1)\n self.emit_click('BTN_LEFT')\n\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawButtonPress', '1', {}])\n self.assertEvent(['RawButtonRelease', '1', {}])\n\n self.emit_event('KEY_A', 1)\n self.emit_event('KEY_B', 1)\n self.emit_event('KEY_C', 1)\n self.emit_event('KEY_D', 1)\n self.assertNoEvent(msg=\"keyboard should be denied\")", "def handle_mouse_data(data):\n pass", "def handle_input(self, ncode, wparam, lparam):\n x_pos = lparam.contents.x_pos\n y_pos = lparam.contents.y_pos\n data = lparam.contents.mousedata\n\n # This is how we can distinguish mouse 1 from mouse 2\n # extrainfo = lparam.contents.extrainfo\n # The way windows seems to do it is there is primary mouse\n # and all other mouses report as mouse 2\n\n # Also useful later will be to support the flags field\n # flags = lparam.contents.flags\n # This shows if the event was from a real device or whether it\n # was injected somehow via software\n\n self.emulate_mouse(wparam, x_pos, y_pos, data)\n\n # Give back control to Windows to wait for and process the\n # next event\n return ctypes.windll.user32.CallNextHookEx(\n self.hooked, ncode, wparam, lparam)", "def _press(self, event):", "def release():\n gui.mouseUp()", "def keyboardInterface(event):\r\n global rect\r\n if event.key == \"right\":\r\n # Make the rectangle wider\r\n w0 = rect.get_width()\r\n rect.set_width(w0 + 1)\r\n elif event.key == \"left\":\r\n # Make the rectangle narrower\r\n w0 = rect.get_width()\r\n rect.set_width(w0 - 1)\r\n elif event.key == \"up\":\r\n # Make the rectangle shorter\r\n h0 = rect.get_height()\r\n rect.set_height(h0 - 1)\r\n elif event.key == \"down\":\r\n # Make the rectangle taller\r\n h0 = rect.get_height()\r\n rect.set_height(h0 + 1)\r\n################################################################################\r\n# The functions below here will need to be changed for use on Windows!\r\n elif event.key == \"ctrl+right\":\r\n # Make the rectangle wider - faster\r\n w0 = rect.get_width()\r\n rect.set_width(w0 + 10)\r\n elif event.key == \"ctrl+left\":\r\n # Make the rectangle narrower - faster\r\n w0 = rect.get_width()\r\n rect.set_width(w0 - 10)\r\n elif event.key == \"ctrl+up\":\r\n # Make the rectangle shorter - faster\r\n h0 = rect.get_height()\r\n rect.set_height(h0 - 10)\r\n elif event.key == \"ctrl+down\":\r\n # Make the rectangle taller - faster\r\n h0 = rect.get_height()\r\n rect.set_height(h0 + 10)\r\n\r\n rect.figure.canvas.draw()# update the plot window\r", "def event_loop(self):\n for event in pg.event.get():\n self.keys = pg.key.get_pressed()\n if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:\n self.done = True\n self.cannon.get_event(event, self.objects)", "def quartz_mouse_process(pipe):\n # Quartz only on the mac, so don't warn about Quartz\n # pylint: disable=import-error\n import Quartz\n # pylint: disable=no-member\n\n class QuartzMouseListener(QuartzMouseBaseListener):\n \"\"\"Loosely emulate Evdev mouse behaviour on the Macs.\n Listen for key events then buffer them in a pipe.\n \"\"\"\n def install_handle_input(self):\n \"\"\"Constants below listed at:\n https://developer.apple.com/documentation/coregraphics/\n cgeventtype?language=objc#topics\n \"\"\"\n # Keep Mac Names to make it easy to find the documentation\n # pylint: disable=invalid-name\n\n NSMachPort = Quartz.CGEventTapCreate(\n Quartz.kCGSessionEventTap,\n Quartz.kCGHeadInsertEventTap,\n Quartz.kCGEventTapOptionDefault,\n Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) |\n Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseUp) |\n Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) |\n Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) |\n Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) |\n Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDragged) |\n Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDragged) |\n Quartz.CGEventMaskBit(Quartz.kCGEventScrollWheel) |\n Quartz.CGEventMaskBit(Quartz.kCGEventTabletPointer) |\n Quartz.CGEventMaskBit(Quartz.kCGEventTabletProximity) |\n Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) |\n Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp) |\n Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDragged),\n self.handle_input,\n None)\n\n CFRunLoopSourceRef = Quartz.CFMachPortCreateRunLoopSource(\n None,\n NSMachPort,\n 0)\n CFRunLoopRef = Quartz.CFRunLoopGetCurrent()\n Quartz.CFRunLoopAddSource(\n CFRunLoopRef,\n CFRunLoopSourceRef,\n Quartz.kCFRunLoopDefaultMode)\n Quartz.CGEventTapEnable(\n NSMachPort,\n True)\n\n def listen(self):\n \"\"\"Listen for quartz events.\"\"\"\n while self.active:\n Quartz.CFRunLoopRunInMode(\n Quartz.kCFRunLoopDefaultMode, 5, False)\n\n def uninstall_handle_input(self):\n self.active = False\n\n def _get_mouse_button_number(self, event):\n \"\"\"Get the mouse button number from an event.\"\"\"\n return Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGMouseEventButtonNumber)\n\n def _get_click_state(self, event):\n \"\"\"The click state from an event.\"\"\"\n return Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGMouseEventClickState)\n\n def _get_scroll(self, event):\n \"\"\"The scroll values from an event.\"\"\"\n scroll_y = Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGScrollWheelEventDeltaAxis1)\n scroll_x = Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGScrollWheelEventDeltaAxis2)\n return scroll_x, scroll_y\n\n def _get_absolute(self, event):\n \"\"\"Get abolute cursor location.\"\"\"\n return Quartz.CGEventGetLocation(event)\n\n def _get_relative(self, event):\n \"\"\"Get the relative mouse movement.\"\"\"\n delta_x = Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGMouseEventDeltaX)\n delta_y = Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGMouseEventDeltaY)\n return delta_x, delta_y\n\n mouse = QuartzMouseListener(pipe)\n mouse.listen()", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def replay(event_list, delay=0):\n\n\tkeyboard = Keyboard()\n\tpointer = Pointer()\n\n\tfor event in event_list:\n\t\tif isinstance(event, KeyboardEvent):\n\t\t\tkeyboard.keypress(event.key, event.state)\n\t\t\tif event.state is KeyState.RELEASED:\n\t\t\t\ttime.sleep(delay)\n\t\telif isinstance(event, PointerEventMotion):\n\t\t\tpointer.warp(event.position.x, event.position.y)\n\t\t\ttime.sleep(delay)\n\t\telif isinstance(event, PointerEventButton):\n\t\t\tpointer.click(event.button, event.state)\n\t\t\tif event.state is KeyState.RELEASED:\n\t\t\t\ttime.sleep(delay)\n\t\telif isinstance(event, PointerEventAxis):\n\t\t\tpointer.scroll(event.axis, event.value)\n\t\t\ttime.sleep(delay)\n\t\telse:\n\t\t\traise TypeError('Unsupported event')\n\n\tkeyboard.close()\n\tpointer.close()", "def key_press_event(self, event):\n pass", "def event_loop(self):\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.done = True\n elif event.type == pg.KEYDOWN:\n self.keys = pg.key.get_pressed()\n self.toggle_show_fps(event.key)\n elif event.type == pg.KEYUP:\n self.keys = pg.key.get_pressed()\n self.toggle_fullscreen(event.key)\n self._scene.get_event(event)", "def mouse_right_up(self):\n pass", "def __keystroke(self, event):\n if event.state - self.__previous_state == 4: # means that the Control key is pressed\n pass # do nothing if Control key is pressed\n else:\n self.__previous_state = event.state # remember the last keystroke state\n # Up, Down, Left, Right keystrokes\n if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right'\n self.__scroll_x('scroll', 1, 'unit', event=event)\n elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left'\n self.__scroll_x('scroll', -1, 'unit', event=event)\n elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up'\n self.__scroll_y('scroll', -1, 'unit', event=event)\n elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down'\n self.__scroll_y('scroll', 1, 'unit', event=event)", "def events(self):\n # catch all events here\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.quit_game()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n menu.paused = True\n menu.pause_menu() #code gets stuck in this call until a button is pressed in the pause menu\n self.clock=pg.time.Clock()\n if event.key == pg.K_h:\n self.draw_debug = not self.draw_debug\n if event.key == pg.K_o:\n if self.flashlight.on:#turning off flashlight\n self.darkness.on = True\n self.battery.duration-=pg.time.get_ticks()-self.battery.last_update\n self.flashlight.on=False\n else: #turning on flashlight\n self.darkness.on = False\n self.battery.last_update=pg.time.get_ticks()\n self.flashlight.on=True\n\n #darkness condition\n if self.transition:\n self.darkness_transition(self.player)\n self.kidnap(self.player)\n\n # win condition\n if pg.sprite.spritecollide(self.player, self.win, False, collide_hit_rect):\n menu.win_menu()\n\n #got hit condition\n hit=pg.sprite.spritecollide(self.player, self.threat, False, collide_hit2_rect)\n if hit:\n self.hit(self.player, hit[0])\n \n #mirror\n self.portal(self.player)\n self.portal(self.monster)", "def on_keydown(key):\n global source_img, source_msk\n\n def next_image():\n return False\n\n def increase_shape_size():\n global SHAPE_SIZE\n SHAPE_SIZE = min(64, SHAPE_SIZE+SHAPE_SIZE_INC)\n return True\n\n def decrease_shape_size():\n global SHAPE_SIZE\n SHAPE_SIZE = max(1, SHAPE_SIZE-SHAPE_SIZE_INC)\n return True\n\n def clear_mask():\n global source_msk\n source_msk *= 0\n return True\n\n def display_help():\n global show_help, show_help_timestamp\n show_help_timestamp = datetime.now()\n show_help = True\n return True\n\n def stop_editing():\n raise StopIteration\n\n def set_current_label(value):\n global CURRENT_LABEL\n CURRENT_LABEL = value\n\n def set_mode_point():\n \"\"\"\n default point drawing mode\n press CTRL on mousemove to draw\n \"\"\"\n global DRAW_MODE\n DRAW_MODE=\"point\"\n\n def set_mode_line():\n \"\"\"\n start drawing in line mode\n if already in line mode, commit a line to the mask and start anew\n \"\"\"\n global DRAW_MODE, CURRENT_LABEL, SHAPE_SIZE\n global mouse_pos, line_start_pos\n\n if DRAW_MODE==\"line\":\n # draw the line on the mask\n cv.line(source_msk, line_start_pos, mouse_pos, CURRENT_LABEL, thickness=SHAPE_SIZE)\n\n line_start_pos = mouse_pos\n DRAW_MODE=\"line\"\n\n def flood_fill():\n \"\"\"\n flood fill a region in the mask\n FIXME: we really need undo for this!\n \"\"\"\n global CURRENT_LABEL\n global mouse_pos\n\n im_mask = (source_msk==CURRENT_LABEL).astype(np.uint8)\n cv.floodFill(im_mask, None, mouse_pos, CURRENT_LABEL)\n source_msk[im_mask!=0] = CURRENT_LABEL\n\n # function map\n fns = {\n ord(' '): next_image,\n ord('+'): increase_shape_size,\n ord('-'): decrease_shape_size,\n ord('x'): clear_mask,\n ord('h'): display_help,\n 27: stop_editing,\n ord('0'): lambda: set_current_label(0),\n ord('1'): lambda: set_current_label(1),\n ord('2'): lambda: set_current_label(2),\n ord('3'): lambda: set_current_label(3),\n ord('4'): lambda: set_current_label(4),\n ord('5'): lambda: set_current_label(5),\n ord('6'): lambda: set_current_label(6),\n ord('7'): lambda: set_current_label(7),\n ord('s'): set_mode_line,\n ord('a'): set_mode_point,\n ord('f'): flood_fill\n }\n\n try:\n return fns[key]()\n except KeyError:\n # FIXME: value 255 is not handled, what is 255? should we do a noop?\n #logger.warning(\"don't handle '%i'\" % key)\n pass", "def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.set_exclusive_mouse(False)\n else:\n self.gamestatemanager.peek().on_key_press(symbol, modifiers, self.config_data[\"controls\"])", "def event_loop(self):\n if self.on_init() == False:\n self._running = False\n## for event in pg.event.get():\n## if event.type == pg.QUIT: # The user closes the game \n## self.done = True\n \n## keys = pg.key.get_pressed()\n## if (keys[K_LEFT]):\n## pass # To be filled\n## if (keys[K_RIGHT]):\n## pass\n## if (keys[K_UP]):\n## pass\n## if (keys[K_DOWN]):\n## pass\n self.draw()", "def connect_default_events(self):\n self.connect_event('motion_notify_event', self.onmove)\n self.connect_event('button_press_event', self.press)\n self.connect_event('button_release_event', self.release)\n self.connect_event('draw_event', self.update_background)\n self.connect_event('key_press_event', self.on_key_press)\n self.connect_event('key_release_event', self.on_key_release)\n self.connect_event('scroll_event', self.on_scroll)", "def handle_events(self) -> None:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n self.mouse_pos = event.pos\n elif event.type == MOUSEBUTTONDOWN:\n self.mouse_pos = event.pos\n self.mouse_clicked = True\n elif self._focused_button is not None and event.type == KEYDOWN:\n self._handle_key_press(event)", "def on_mouse_press(self, event):\n self.on_mouse_wheel(event)", "def keyPressEvent(self, ev):\n\n self.currentKbKey = ev.key()\n\n if (ev.key() == self.panKey):\n # enable Pan mode\n self.panning = True\n # set the cursor to the hand to indicate pan/zoom mode\n if self.leftBtnClicked:\n self.setCursor(Qt.ClosedHandCursor)\n else:\n self.setCursor(Qt.OpenHandCursor)\n elif (ev.key() == self.selectAddKey):\n # set the cursor to the arrow with \"+\"\n self.setCursor(Qt.DragCopyCursor)\n elif (ev.key() == self.zoomKey):\n # enable zoom mode\n self.__zooming = True\n else:\n self.keyPress.emit(self, ev)", "def ev_MOUSEUP(self, event):", "def listen_presses(self):\n self.time_correction = self.get_time_corr()\n self.listen_start = self.master_clock()\n self._clear_events()", "def ev_MOUSEDOWN(self, event):", "def _check_keyup_events(self, event):\t\n\t\tif event.key == pygame.K_RIGHT:\n\t\t\tself.pigeon.moving_right = False\n\t\telif event.key == pygame.K_LEFT:\n\t\t\tself.pigeon.moving_left = False", "def emulate_mouse(self, key_code, x_val, y_val, data):\n # Once again ignore Windows' relative time (since system\n # startup) and use the absolute time (since epoch i.e. 1st Jan\n # 1970).\n self.update_timeval()\n\n events = []\n\n if key_code == 0x0200:\n # We have a mouse move alone.\n # So just pass through to below\n pass\n elif key_code == 0x020A:\n # We have a vertical mouse wheel turn\n events.append(self.emulate_wheel(data, 'y', self.timeval))\n elif key_code == 0x020E:\n # We have a horizontal mouse wheel turn\n # https://msdn.microsoft.com/en-us/library/windows/desktop/\n # ms645614%28v=vs.85%29.aspx\n events.append(self.emulate_wheel(data, 'x', self.timeval))\n else:\n # We have a button press.\n\n # Distinguish the second extra button\n if key_code == 0x020B and data == 2:\n key_code = 0x020B2\n elif key_code == 0x020C and data == 2:\n key_code = 0x020C2\n\n # Get the mouse codes\n code, value, scan_code = self.mouse_codes[key_code]\n # Add in the press events\n scan_event, key_event = self.emulate_press(\n code, scan_code, value, self.timeval)\n events.append(scan_event)\n events.append(key_event)\n\n # Add in the absolute position of the mouse cursor\n x_event, y_event = self.emulate_abs(x_val, y_val, self.timeval)\n events.append(x_event)\n events.append(y_event)\n\n # End with a sync marker\n events.append(self.sync_marker(self.timeval))\n\n # We are done\n self.write_to_pipe(events)", "def OnLeftDown(self, event): # ANDY some PAN ideas from http://code.google.com/p/pyslip/\n if event.ShiftDown():\n event.Skip()\n return\n click_posn = event.GetPosition()\n self.SetCursor(wx.Cursor(wx.CURSOR_HAND))\n (self.last_drag_x, self.last_drag_y) = click_posn\n event.Skip()", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def event_handler(self):\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == MOUSEBUTTONDOWN and event.button == LEFT_CLICK:\r\n self.left_mouse_down_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button == LEFT_CLICK:\r\n self.left_mouse_up_handler(event)\r\n elif event.type == MOUSEBUTTONDOWN and event.button == RIGHT_CLICK:\r\n self.right_mouse_down_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button == RIGHT_CLICK:\r\n self.right_mouse_up_handler(event)\r\n elif event.type == MOUSEMOTION:\r\n self.mouse_motion_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button in [2, 4, 5]:\r\n self.shortcut_click(event)", "def ev_MOUSEMOTION(self, event):", "def setupKeyMappings(self) :\n\t\t# first create keyMap object with default values\n\t\tself.keyMap = { \"left\":0, \"right\":0, \\\n\t\t\t\t\"forward\":0, \"backward\":0, \"dash\":0, \\\n\t\t\t\t\"slide-left\":0, \"slide-right\":0, \\\n \t\t\t\t\"cam-up\":0, \"cam-down\":0, \\\n\t\t\t\t\"cam-left\":0, \"cam-right\":0, \\\n\t\t\t\t\"zoom-in\":0, \"zoom-out\":0, \\\n\t\t\t\t\"reset-view\":0, \"view\":0}\n\t\t\n\t\t# now setup keyboard events that modify keyMap thru setKey\n\t\tself.accept(\"escape\", sys.exit)\n\n\t\t# turn help text on/off\n\t\tself.accept(\"h\", self.setKey, [\"help\",1])\n\t\tself.accept(\"h-up\", self.setKey, [\"help\",0])\n\n\t\t# movement controls\n\t\tself.accept(\"arrow_left\", self.setKey, [\"left\",1])\n\t\tself.accept(\"arrow_left-up\", self.setKey, [\"left\",0])\n\t\tself.accept(\"arrow_right\", self.setKey, [\"right\",1])\n\t\tself.accept(\"arrow_right-up\", self.setKey, [\"right\",0])\n\n\t\tself.accept(\"arrow_up\", self.setKey, [\"forward\",1])\n\t\tself.accept(\"arrow_up-up\", self.setKey, [\"forward\",0])\n \t\tself.accept(\"arrow_down\", self.setKey, [\"backward\",1])\n \t\tself.accept(\"arrow_down-up\", self.setKey, [\"backward\",0])\n\n \t\tself.accept(\",\", self.setKey, [\"slide-left\",1])\n \t\tself.accept(\",-up\", self.setKey, [\"slide-left\",0])\n \t\tself.accept(\".\", self.setKey, [\"slide-right\",1])\n \t\tself.accept(\".-up\", self.setKey, [\"slide-right\",0])\n\n\t\tself.accept(\"alt-arrow_up\", self.setKey, [\"dash\", 1])\n \t\tself.accept(\"alt-up\", self.setKey, [\"dash\", 0])\n\n\t\t# camera direction contols\n\t\tself.accept(\"shift-arrow_up\", self.setKey, [\"cam-up\",1])\n\t\tself.accept(\"shift-arrow_down\", self.setKey, [\"cam-down\",1])\n\t\tself.accept(\"shift-arrow_left\", self.setKey, [\"cam-left\",1])\n\t\tself.accept(\"shift-arrow_right\", self.setKey, [\"cam-right\",1])\t\n\n\t\t# zoom controls\n\t\tself.accept(\"z\", self.setKey, [\"zoom-in\",1])\n\t\tself.accept(\"z-up\", self.setKey, [\"zoom-in\",0])\n \t\tself.accept(\"shift-z\", self.setKey, [\"zoom-out\",1])\n\t\tself.accept(\"r\", self.setKey, [\"reset-view\",1]) \n\t\tself.accept(\"r-up\", self.setKey, [\"reset-view\",0]) \n\n\t\tself.accept(\"v\", self.setKey, [\"view\",1])\n\t\tself.accept(\"v-up\", self.setKey, [\"view\",0])", "def __keystroke(self, event):\n if event.state - self.__previous_state == 4: # means that the Control key is pressed\n pass # do nothing if Control key is pressed\n else:\n if event.char in [' ', 'f']:\n return self.parent_class.finish_polygons_key()\n self.__previous_state = event.state # remember the last keystroke state\n # Up, Down, Left, Right keystrokes\n if event.keycode in [68, 39, 102]: # scroll right: keys 'D', 'Right' or 'Numpad-6'\n self.__scroll_x('scroll', 1, 'unit', event=event)\n elif event.keycode in [65, 37, 100]: # scroll left: keys 'A', 'Left' or 'Numpad-4'\n self.__scroll_x('scroll', -1, 'unit', event=event)\n elif event.keycode in [87, 38, 104]: # scroll up: keys 'W', 'Up' or 'Numpad-8'\n self.__scroll_y('scroll', -1, 'unit', event=event)\n elif event.keycode in [83, 40, 98]: # scroll down: keys 'S', 'Down' or 'Numpad-2'\n self.__scroll_y('scroll', 1, 'unit', event=event)", "def modifierUp(self, event):\n if event.keysym in ['Shift_L', 'Shift_R', 'Control_L', 'Control_R',\n 'Alt_L', 'Alt_R']:\n self.kbdModifier[event.keysym] = 0\n # release the grab. Release must be done on button release event\n # this is the Problem. if SHFT is released before button we loose\n # button motion and button release events after that.\n # Seems that a solution to this would require this object to also\n # monitor mouse buttons and release the grab after the last release\n # of either the button or the modifier.\n self.master.grab_release()\n\n #if self.oldgrab:\n # self.oldgrab.grab.set()\n\t self.keybdModifierCallbacksUp[event.keysym].CallCallbacks(event)", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def processKeyPressEvent(self, e):\n event = (int(e.modifiers()),e.key())\n# print \"<%x %x>\" % event, event\n# def f(d, stack):\n# for k,v in d.items():\n# stack.append(k)\n# if isinstance(v,dict):\n# f(v, stack)\n# else:\n# for x in stack:\n# print \"[%x %x]\" % x, x\n# print\n# stack.pop()\n# stack = []\n# f(self.AKH_keyBindings, stack)\n\n if event in self.AKH_keyBindingsWaiting:\n found = self.AKH_keyBindingsWaiting[event]\n elif event in self.AKH_keyBindings:\n found = self.AKH_keyBindings[event]\n else:\n key = event[1]\n if key!=QtCore.Qt.Key_Control and \\\n key!=QtCore.Qt.Key_Alt and \\\n key!=QtCore.Qt.Key_Shift:\n self.AKH_keyBindingsWaiting = {}\n return False\n if type(found) == dict:\n self.AKH_keyBindingsWaiting = found\n else:\n found()\n return True", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.set_selected(self.mouse_on_grid())\n if self.get_selected() is not None and event.type == pygame.KEYDOWN:\n self.event_seletect_moved(event)\n self.event_cell_update(event)", "def activate_keyboard_bindings():\n turtle.Screen().listen()\n turtle.Screen().onkey(exit, \"e\")\n turtle.Screen().onkey(exit, \"n\")", "def mouseReleaseEvent(self, event):\n self.box_begin = self.begin\n self.box_end = event.pos()\n self.begin = event.pos()\n self.end = event.pos()\n if not self.permanent_show:\n self.update()", "def ev_mousemotion(self, event: MouseMotion) -> None:", "def process_input(self):\n for event in pygame.event.get():\n\n if self.joystick and self.state == self.STATE_PLAY:\n\n if event.type == pygame.JOYAXISMOTION:\n self.gameevents.add(\"joyaxismotion\", event.axis, event.value, type='EVENT_USER')\n elif event.type == pygame.JOYBUTTONDOWN:\n if event.button == self.fire_button:\n self.gameevents.add(\"press\", \"fire\", type='EVENT_USER')\n elif event.button == self.IFF_button:\n self.gameevents.add(\"press\", \"iff\", type='EVENT_USER')\n elif event.button == self.shots_button:\n self.gameevents.add(\"press\", \"shots\", type='EVENT_USER')\n elif event.button == self.pnts_button:\n self.gameevents.add(\"press\", \"pnts\", type='EVENT_USER')\n elif event.type == pygame.JOYBUTTONUP:\n if event.button == self.fire_button:\n self.gameevents.add(\"release\", \"fire\", type='EVENT_USER')\n elif event.button == self.IFF_button:\n self.gameevents.add(\"release\", \"iff\", type='EVENT_USER')\n elif event.button == self.shots_button:\n self.gameevents.add(\"release\", \"shots\", type='EVENT_USER')\n elif event.button == self.pnts_button:\n self.gameevents.add(\"release\", \"pnts\", type='EVENT_USER')\n\n else:\n\n if event.type == pygame.KEYDOWN:\n\n if (pygame.key.get_mods() & self.modifier):\n if event.key == pygame.K_q:\n self.gameevents.add(\"press\", \"quit\", type='EVENT_USER')\n\n if event.key == pygame.K_RETURN:\n\n if self.state == self.STATE_INTRO:\n self.state = self.STATE_SETUP\n\n elif self.state == self.STATE_SETUP:\n self.state = self.STATE_GAMENO\n\n elif self.state == self.STATE_GAMENO:\n if self.mine_exists:\n self.state = self.STATE_SETUP_IFF\n else:\n self.state = self.STATE_PREPARE\n\n elif self.state == self.STATE_IFF:\n self.state = self.STATE_PREPARE\n\n elif self.state == self.STATE_SCORES:\n self.state = self.STATE_SETUP\n\n elif self.state == self.STATE_PLAY:\n\n if event.key == self.thrust_key:\n self.gameevents.add(\"press\", \"thrust\", type='EVENT_USER')\n elif event.key == self.left_turn_key:\n self.gameevents.add(\"press\", \"left\", type='EVENT_USER')\n elif event.key == self.right_turn_key:\n self.gameevents.add(\"press\", \"right\", type='EVENT_USER')\n elif event.key == self.fire_key:\n self.gameevents.add(\"press\", \"fire\", type='EVENT_USER')\n elif event.key == self.IFF_key:\n self.gameevents.add(\"press\", \"iff\", type='EVENT_USER')\n elif event.key == self.shots_key:\n self.gameevents.add(\"press\", \"shots\", type='EVENT_USER')\n elif event.key == self.pnts_key:\n self.gameevents.add(\"press\", \"pnts\", type='EVENT_USER')\n elif event.key == self.pause_key and self.config['General']['allow_pause']:\n self.gameevents.add(\"press\", \"pause\", type='EVENT_USER')\n else:\n self.gameevents.add(\"press\", event.key, \"user\", type='EVENT_SYSTEM')\n \n elif self.state == self.STATE_PAUSED and event.key == self.pause_key:\n self.gameevents.add(\"press\", \"unpause\", type='EVENT_USER')\n \n else:\n self.gameevents.add(\"press\", event.key, \"user\", type='EVENT_SYSTEM')\n\n elif event.type == pygame.KEYUP:\n\n if self.state == self.STATE_PLAY:\n\n if event.key == self.thrust_key:\n self.gameevents.add(\"release\", \"thrust\", type='EVENT_USER')\n elif event.key == self.left_turn_key:\n self.gameevents.add(\"release\", \"left\", type='EVENT_USER')\n elif event.key == self.right_turn_key:\n self.gameevents.add(\"release\", \"right\", type='EVENT_USER')\n elif event.key == self.fire_key:\n self.gameevents.add(\"release\", \"fire\", type='EVENT_USER')\n elif event.key == self.IFF_key:\n self.gameevents.add(\"release\", \"iff\", type='EVENT_USER')\n elif event.key == self.shots_key:\n self.gameevents.add(\"release\", \"shots\", type='EVENT_USER')\n elif event.key == self.pnts_key:\n self.gameevents.add(\"release\", \"pnts\", type='EVENT_USER')", "def run(self):\n\n redraw = True\n while True:\n if redraw:\n self.__draw()\n c = self.scr.getch()\n redraw = self.key_hooks(c)", "def key_event(self, key: Any, action: Any):\n pass", "def calibrateMousePress(self, mouse_event):\n\n \"\"\" Get mouse posiiton \"\"\"\n pt = mouse_event.pos()\n\n if mouse_event.button() == Qt.LeftButton:\n self.kinect.last_click[0] = pt.x()\n self.kinect.last_click[1] = pt.y()\n self.kinect.new_click = True\n elif mouse_event.button() == Qt.RightButton:\n self.kinect.last_rclick[0] = pt.x()\n self.kinect.last_rclick[1] = pt.y()\n self.kinect.new_rclick = True", "def on_mouse_press(self, x, y, button, modifiers):\n\n self.gamestatemanager.peek().on_mouse_press(x, y, button, modifiers)\n\n if self.exclusive:\n self.gamestatemanager.peek().on_mouse_press(x, y, button, modifiers)\n else:\n self.set_exclusive_mouse(True)", "def keyReleaseEvent(self, ev):\n self.currentKbKey = None\n\n if (ev.key() == self.panKey):\n # disable Pan/Zoom mode\n self.panning = False\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.selectAddKey):\n # disable selection add mode\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.zoomKey):\n # disable zoom mode\n self.__zooming = False\n else:\n self.keyRelease.emit(self, ev)", "def handle_continuous_keys(self):\n shift = pygame.K_LSHIFT in self.held\n ctrl = pygame.K_LCTRL in self.held\n factor = 3 if shift else 1/3 if ctrl else 1\n for key in self.held:\n if not self.followmode:\n # if self.held_delay[key] == 0:\n if key in (pygame.K_w, pygame.K_UP): # up\n # self.canvas.move_offset(0, 5 * factor)\n self.canvas.move_focus(0, 5 * factor)\n elif key in (pygame.K_s, pygame.K_DOWN): # down\n # self.canvas.move_offset(0, -5 * factor)\n self.canvas.move_focus(0, -5 * factor)\n elif key in (pygame.K_d, pygame.K_RIGHT): # right\n # self.canvas.move_offset(-5 * factor, 0)\n self.canvas.move_focus(5 * factor, 0)\n elif key in (pygame.K_a, pygame.K_LEFT): # left\n # self.canvas.move_offset(5 * factor, 0)\n self.canvas.move_focus(-5 * factor, 0)\n if key in (pygame.K_e, pygame.K_KP_PLUS):\n self.canvas.zoom(2 * factor)\n elif key in (pygame.K_q, pygame.K_KP_MINUS):\n self.canvas.zoom(-2 * factor)\n for key in self.held:\n self.held_delay[key] = (self.held_delay[key] + 1) % 5", "def _check_keydown_events(self, event):\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n if event.key == pygame.K_RETURN:\n self.main.switch_gamestate(self, self.main.game_screen)", "def _on_pyglet_mouse_click(self, x, y, button, modifiers):\n button_time = clock()\n this_button = self._button_names[button]\n self._mouse_buffer.append((this_button, x, y, button_time))", "def keyCam(self, event):\n dct = {\n \"d\": 0,\n \"s\": 1,\n \"q\": 2,\n \"z\": 3\n }[event.char]\n self.moveAllSeg(dct)", "def keyPressEvent(self, event):\n self.game_engine.input_manager.keyPressEvent(event)", "def handle_mouse(self, x, y):\n pass", "def handle_events(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n self.paddle.move_left()\n if keys[pygame.K_RIGHT]:\n self.paddle.move_right()\n if keys[pygame.K_ESCAPE]:\n sys.exit()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running, self.playing = False, False\n self.current_menu.display_run = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n self.START_KEY = True\n if event.key == pygame.K_BACKSPACE:\n self.BACK_KEY = True\n if event.key == pygame.K_DOWN:\n self.DOWN_KEY = True\n if event.key == pygame.K_UP:\n self.UP_KEY = True", "def keyboard_process(pipe):\n keyboard = WindowsKeyboardListener(pipe)\n keyboard.listen()", "def key_and_mouse_handler(event, gravity, planet_stencil):\n if str(event.type) == 'KeyPress':\n if event.keysym == 'space':\n # On space press clear the canvas\n gravity.del_all_objects()\n planet_stencil.show()\n elif event.keysym == 'Shift_L':\n planet_stencil.show()\n elif str(event.type) == 'KeyRelease':\n if event.keysym == 'Shift_L':\n planet_stencil.delete()\n\n elif str(event.type) == 'ButtonPress':\n if event.num == 1:\n # In left click\n if event.state == 131080:\n # Left Alt pressed\n planet = Planet(planet_stencil.visual_x * DISTANCE_FACTOR, planet_stencil.visual_y * DISTANCE_FACTOR,\n 0, 0, PLANET_DENSITY, planet_stencil.visual_r * RADIUS_PROPORTION_FACTOR,\n PLANET_IMMOVABLE_COLOR, movable=False)\n gravity.add_object_to_list(planet)\n else:\n planet = Planet(planet_stencil.visual_x * DISTANCE_FACTOR, planet_stencil.visual_y * DISTANCE_FACTOR,\n 0, 0, PLANET_DENSITY, planet_stencil.visual_r * RADIUS_PROPORTION_FACTOR, PLANET_COLOR)\n gravity.add_object_to_list(planet)\n elif event.num == 3:\n # On right-click delete planet on a distance less then stencil radius\n sten_x, sten_y, sten_r = planet_stencil.get_canvas_coords()\n for object_ in gravity.get_objects_list():\n obj_x, obj_y, _ = object_.get_object_canvas_coords()\n if (obj_x - sten_x) ** 2 + (obj_y - sten_y) ** 2 <= sten_r ** 2:\n object_.delete_from_canvas()\n gravity.del_object(object_)\n\n elif str(event.type) == 'Motion':\n planet_stencil.set_canvas_coords(event.x, event.y, planet_stencil.visual_r)\n planet_stencil.show()\n\n elif str(event.type) == 'MouseWheel':\n # Increase stencil radius\n if event.delta >= 0:\n planet_stencil.visual_r += 2\n planet_stencil.show()\n else:\n if planet_stencil.visual_r > 2:\n planet_stencil.visual_r += -2\n planet_stencil.show()", "def set_zoombox_keyboard(self):\n # Idem but with CTRL + left button mouse \n self.set('LeftClickMove', 'ZoomBox',\n key_modifier='Control',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)", "def on_key_press(self, pressed, modifiers):\n if pressed == key.ESCAPE: self.save_world(); self.close(); log.INFO(\"MineGlet was closed!\")\n elif pressed == key.E: self.mouse_lock = not self.mouse_lock", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def on_press(key):\n currentX, currentY = pyautogui.position()\n\n if key in LEFT_LEYS:\n pyautogui.move(-DEFAULT_MOVEMENT, 0)\n if key in DOWN_KEYS:\n pyautogui.move(0, DEFAULT_MOVEMENT)\n if key in UP_KEYS:\n pyautogui.move(0, -DEFAULT_MOVEMENT)\n if key in RIGHT_KEYS:\n pyautogui.move(DEFAULT_MOVEMENT, 0)\n\n if key in LEFTMOST_KEYS:\n pyautogui.moveTo(0, currentY)\n notify(\"Powermouse\", \"Moved to left of the screen\")\n if key in BOTTOM_KEYS:\n pyautogui.moveTo(currentX, screenHeight)\n notify(\"Powermouse\", \"Moved to bottom of screen\")\n if key in TOP_KEYS:\n pyautogui.moveTo(screenWidth, currentY)\n notify(\"Powermouse\", \"Moved to top of screen\")\n if key in RIGHTMOST_KEYS:\n pyautogui.moveTo(0, currentY)\n notify(\"Powermouse\", \"Moved to right of screen\")\n\n if key in CLICK_KEYS:\n pyautogui.click()\n notify(\"Powermouse\", f\"Clicked at position {pyautogui.position()}\")\n\n if key in QUIT_KEYS:\n notify(\"Powermouse\", \"Quitting\")\n exit()", "def RightClick(self):\n self._PressRightButton()\n self._ReleaseAllButtons()", "def on_key_release(event):\n if event.key == 'shift':\n self.shift_is_held = False", "def on_key_press(event):\n if event.key == 'shift':\n self.shift_is_held = True", "def handle_input(self, events):\n for event in events:\n if event.type == QUIT:\n sys.exit(0)\n\n elif event.type == MOUSEMOTION:\n if self.is_in_bounds(event.pos):\n self.editor_cursor_position = self.position_screen_to_grid(event.pos)\n if self.mode_paint:\n self.put_block()\n elif self.mode_erase:\n self.erase_block()\n elif event.type == MOUSEBUTTONDOWN:\n if event.button == MB_LEFT:\n if self.is_in_bounds(event.pos):\n self.put_block()\n self.mode_paint = True\n self.mode_erase = False\n elif event.button == MB_RIGHT:\n if self.is_in_bounds(event.pos):\n self.erase_block()\n self.mode_erase = True\n self.mode_paint = False\n elif event.button == MB_MIDDLE:\n if self.is_in_bounds(event.pos):\n self.pick_block()\n elif event.button == MB_WHEEL_DOWN:\n self.next_block_type()\n elif event.button == MB_WHEEL_UP:\n self.prev_block_type()\n # print str(self.mode_paint) + \" \" + str(self.mode_erase)\n elif event.type == MOUSEBUTTONUP:\n self.mode_paint = False\n self.mode_erase = False\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n # from GameStateMenu import GameStateMenu\n self.context[\"gamestate\"] = self.prev_state\n if event.key == K_MINUS or event.key == K_KP_MINUS:\n self.prev_block_type()\n elif event.key == K_EQUALS or event.key == K_KP_PLUS:\n self.next_block_type()\n elif event.key == K_0 or event.key == K_KP0:\n self.current_block_type = 0\n elif event.key == K_F5:\n self.save()\n elif event.key == K_F9:\n self.open()\n # else:\n # print event" ]
[ "0.61480045", "0.6093318", "0.59711534", "0.59679854", "0.59173495", "0.5892807", "0.5828488", "0.57922244", "0.5735914", "0.5715611", "0.5710519", "0.5705049", "0.5689077", "0.5688968", "0.5657838", "0.5655574", "0.5652674", "0.56487423", "0.5629702", "0.5615708", "0.5594418", "0.5577398", "0.557039", "0.5557546", "0.5546008", "0.5532672", "0.552531", "0.552062", "0.55166537", "0.55164325", "0.5512292", "0.55107355", "0.55029804", "0.5479493", "0.54773605", "0.54697263", "0.5459283", "0.5458876", "0.5458468", "0.5444509", "0.5441498", "0.5421792", "0.5413532", "0.5395142", "0.53906953", "0.5388528", "0.5388419", "0.5388001", "0.5362244", "0.5352892", "0.53516835", "0.53450716", "0.53428364", "0.53330004", "0.5329024", "0.53136414", "0.5310592", "0.530073", "0.5290883", "0.5290317", "0.5289342", "0.52851254", "0.5279455", "0.527841", "0.5276413", "0.527021", "0.52651954", "0.52635884", "0.52490604", "0.5247731", "0.5243489", "0.52406234", "0.5235857", "0.52320576", "0.52269614", "0.5220065", "0.52199185", "0.52174014", "0.5214832", "0.5214191", "0.5210107", "0.5205997", "0.5204988", "0.5195298", "0.5194183", "0.5184808", "0.5181693", "0.51806784", "0.5175069", "0.5175029", "0.51737916", "0.51662284", "0.5165748", "0.51642823", "0.5163634", "0.5160717", "0.5159905", "0.51545006", "0.5151315", "0.5150627", "0.5149414" ]
0.0
-1
Update images on the screen and flip to the new screen.
def update_screen(ai_settings, screen, ship):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_screen(self):\n\t\tself.screen.fill((255, 255, 255))\n\n\t\tself._check_collisions()\n\t\tself._update_objects()\n\t\tself._blit_objects()\n\n\t\tpygame.display.flip()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n for ufo in self.ufos.sprites():\n ufo.blitme()\n #if self.missile.shooting_missile or self.missile.missile_shot:\n for missile in self.missiles.sprites():\n missile.blitme()\n\n pygame.display.flip()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.sideways_ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n pygame.display.flip()", "def _update_screen(self):\n self.screen.fill((250,250,250))\n self.rocket.blitme()\n pygame.display.flip()", "def _update_screen(self):\n\t\tself.screen.fill(self.settings.bg_color)\n\t\tself.pigeon.blitme()\n\t\tfor dropping in self.droppings.sprites():\n\t\t\tdropping.draw_dropping()\n\t\tself.autos.draw(self.screen)\n\n\t\t# Draw the score information.\n\t\tself.sb.show_score()\n\n\t\t# Draw the play button if the game is inactive.\n\t\tif not self.stats.game_active:\n\t\t\tself.play_button.draw_button()\n\n\t\t# Make the most recently drawn screen visible.\n\t\tpygame.display.flip()", "def _update_screen(self):\n self.screen.fill(self.rain_settings.bg_color)\n self.rain.draw(self.screen)\n\n pygame.display.flip()", "def _blit_images(self):\n self.screen.blit(self.dial, (self.dialPos))\n self.screen.blit(self.rotatedImage, self.rotatedImageRectangle)", "def _update_screen(self):\n # Redraw the screen during each pass of the loop\n self.screen.fill(self.bg_color)\n self.ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n if not self.stats.game_active:\n self.play_button.draw_button()\n\n #Draw the scoreboard\n self.sb.show_score()\n\n # Make the most recently drawn screen visible\n pygame.display.flip()", "def _update_screen(self) -> None:\n # setup the source image with an alpha channel\n alpha = 255 * np.ones_like(self.image[..., 0:1])\n image = np.concatenate([self._image, alpha], axis=-1).astype(np.uint8)\n # setup the super pixel segmentations\n super_pixels = np.zeros_like(self.image)\n super_pixels = mark_boundaries(\n super_pixels,\n self._super_pixel_segments,\n self._super_pixel_color\n )\n # concatenate the first channel of sup as the alpha channel\n super_pixels = [super_pixels, super_pixels[..., 0:1]]\n super_pixels = np.concatenate(super_pixels, axis=-1).astype(np.uint8)\n # setup the segmentation image with an alpha channel scaled by the\n # opacity parameter of the application [0, 9]\n intensity = 255 * (self._opacity / 9)\n intensity = intensity * np.ones_like(self._segmentation[..., 0:1])\n segmentation = np.concatenate([self._segmentation, intensity], axis=-1)\n segmentation = segmentation.astype(np.uint8)\n # send the images to the window\n self._view.show([image, segmentation, super_pixels])", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n\n # Draw score information\n self.scoreboard.show_score()\n\n if not self.stats.game_active:\n self.play_button.draw_button()", "def update_imgs(self):\n\n for b in self.gamebuttons:\n b.update_img()\n self.start_but.update_img()", "def viewUpdate(self):\n # Update Capture\n imgtk = self.model.capture\n self.updateImage(self.view.lmain, imgtk)\n # Update Stitch \n imgtk = self.model.stitch\n self.updateImage(self.view.rmain, imgtk)\n self.view.dist.set(self.model.dist)", "def updateScreen(self) -> None:\n\n # fill game display black\n self.surface.fill(Colors.Black)\n\n # draw players and ball\n self.drawImageOnSurface(self.player_one)\n self.drawImageOnSurface(self.player_two)\n self.drawImageOnSurface(self.ball)\n\n # draw all the spacer images\n for image in self.spacers:\n self.drawImageOnSurface(image)\n\n # draw scores and format the scores in byte representation\n self.drawTextOnSurface(format(self._score[0], \"04b\"),\n (Configuration.windowWidth / 4, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n self.drawTextOnSurface(format(self._score[1], \"04b\"),\n (3 * Configuration.windowWidth / 4, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n\n super().updateScreen() # call the parent method to update the screen", "def update(self):\n self.imagecounter +=1\n if self.imagecounter > 7:\n self.imagecounter = 0\n self.image = pygame.image.load(self.pictures[self.imagecounter])\n self.rect = self.image.get_rect()\n self.rect.left = self.x\n self.rect.top = self.y", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n # ignore unresolved reference below, we're using the method from Bullets, not Sprite. Pycharm...sigh.\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n\n # drawing the aliens\n self.aliens.draw(self.screen)\n\n # drawing information about the score\n self.sb.show_score()\n\n # draws play-button on inactive game state\n if not self.stats.game_active:\n self.play_button.draw_button()\n\n pygame.display.flip()", "def render(self, window):\r\n # Update images\r\n for i in self.images:\r\n i.undraw()\r\n\r\n if self.alive == False:\r\n self.images[3].draw(window)\r\n elif self.scared == True:\r\n self.images[2].draw(window)\r\n elif self.a == True:\r\n self.images[0].draw(window)\r\n elif self.a == False:\r\n self.images[1].draw(window)\r\n\r\n # Move images\r\n toX = (self.boundingBox.pos.getX() - (self.images[0].getAnchor().getX() - 20))\r\n toY = (self.boundingBox.pos.getY() - (self.images[0].getAnchor().getY() - 20))\r\n\r\n for i in self.images:\r\n i.move(toX, toY)", "def update_screen(ai_settings, screen, bg_image, ship, bullets, aliens):\n\t# Redraw the screen during each pass through the loop\n\tscreen.fill(ai_settings.bg_colour)\n\tbg_image.blit_bg()\n\tship.blitme()\n\taliens.draw(screen)\n\tfor bullet in bullets.sprites():\n\t\tbullet.draw_bullet()\n\n\t# Make the most recently drawn screen visible\n\tpygame.display.flip()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_colour)\n # Draw ship on the screen\n self.ship.blitme()\n # Draw all bullets in the sprites group on the screen\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.stars.draw(self.screen)\n self.sb.show_score()\n if not self.stats.game_active:\n self.play_button.draw_button()\n pygame.display.flip()", "def update_current_screen(self):\n\t\tself.current_screen.update()", "def update(self, *args):\n\n # change picture every 100 milliseconds\n now = pygame.time.get_ticks()\n if now - self.last_update > 100:\n self.index = self.index ^ 1\n self.image = self.images[self.index]\n self.last_update = now\n prom = self.rect.center\n self.rect = self.image.get_rect()\n self.rect.center = prom\n\n self.rect.x -= args[0]", "def updateDisplay(self):\n if self._displayPjt:\n self._displayPjt.updateim()\n if self._displayUsr:\n self._displayUsr.updateim()\n if self._displayVtk:\n self._displayVtk.updateim()", "def update_screen(self):\r\n\r\n # Redraw the screen during each pass through the loop.\r\n self._screen.fill(self._bb_settings.bg_color)\r\n\r\n # Redraw all markers around edge of board\r\n\r\n # Draw the play button if the game is inactive\r\n if self._stats.get_status() == \"Start_game\":\r\n for button in self._play_mode_button_list:\r\n button.draw_button()\r\n elif self._stats.get_status() == \"replay\":\r\n for button in self._replay_button_list:\r\n button.draw_button()\r\n else:\r\n self.blitme()\r\n shoot_markers = self.get_entry_exit()\r\n atom_markers = self.get_atom_guess()\r\n for marker in shoot_markers.values():\r\n marker[1].draw_marker()\r\n for atom in atom_markers.values():\r\n atom.draw_marker()\r\n # Make the most recently drawn screen visible.\r\n pygame.display.flip()", "def _update_screen(self):\n self.screen.fill(self.bg_colour)\n\n if not self.waiting:\n self._check_cells()\n self._update_cells()\n for row in self.cells:\n for cell in row:\n cell.draw_cell()\n \n pygame.display.flip()", "def update_screen(ai_settings,screen,ship):\n\n\t#redraw the screen\n\n\tscreen.fill(ai_settings.bg_color)\n\tship.blitme()\n\n\t#make the most recent screen visible\n\tpygame.display.flip()", "def refresh_screen(self):", "def update_image(self, surface):\n self.ui_widget.update_image(surface=surface)", "def update_current_image(self):\n\n rect = (0, 0, self.variables.canvas_width, self.variables.canvas_height)\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(rect)\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n self.update()", "def display(self):\n\t\tfor c in self.canvas.values():\n\t\t\tc.update()\n\t\t\tself.superficie.blit(c.superficie, c.origen)\n\t\tpygame.display.flip()", "def update_frame(self):\n if not self.image_queue: return\n image = self.image_queue.pop()\n self.image_queue.rotate(-1)\n self.original_image = image\n self.altered_image = image.copy()\n\n if self.tracking:\n self.update_frame_tracking()\n self.display_image(True)\n elif self.calibrating:\n self.update_frame_calibrating()\n self.display_image(True)\n else:\n image = cv2.flip(self.altered_image, 1)\n self.display_image(True)", "def display_win_view(screen):\n\n pygame.time.Clock().tick(30)\n screen.blit(pygame.image.load('resources//pictures//winb.jpg').convert(), (0, 0))\n pygame.display.flip()", "def update_screen(infrompy_settings, screen, ship):\n\n\t# Redraw the screen during each pass through the loop.\n\tscreen.fill(infrompy_settings.bg_color)\n\tship.blitme()\n\n\t# Make the most recently drawn screen visible.\n\tpygame.display.flip()", "def _refresh_screen(self):\n self.myscreen.refresh()\n self.box1.refresh()\n self.box2.refresh()", "def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())", "def draw(self):\n self.screen.blit(self.image, (self.x_pos1, self.y_pos))\n self.screen.blit(self.image, (self.x_pos2, self.y_pos))", "def draw(self):\r\n self.screen.fill(self.color_bg) \r\n for t in self.thumbs: t.draw(self.screen) \r\n pygame.display.flip()\r\n self.clock.tick(60)", "def update_image(self):\n if self.first is None or self.second is None:\n LOG.warn(\"No images set yet\")\n else:\n pos = self.slider.value()\n moved = np.roll(self.second, self.second.shape[0] / 2 - pos, axis=0)\n self.image_item.setImage(moved - self.first)", "def refreshImages(self):\n fileName1 = \"DECK/\" + str(self.card1) + \".gif\"\n fileName2 = \"DECK/\" + str(self.card2) + \".gif\"\n fileName3 = \"DECK/\" + str('b') + \".gif\"\n self.image1 = PhotoImage(file = fileName1)\n self.cardLabel1[\"image\"] = self.image1\n self.image2 = PhotoImage(file = fileName2)\n self.cardLabel2[\"image\"] = self.image2\n self.image3 = PhotoImage(file = fileName3)\n self.cardLabel3[\"image\"] = self.image3", "def update(self):\n self.screen.blit(self.dial, (DIAL_POS))\n self.screen.blit(self.rotatedImage, self.rotatedImageRectangle)", "def update_image(self):\n if self.updated_sprite_list:\n self.image = self.increment_sprite_index(True)\n self.updated_sprite_list = False\n self.update_count_down = self.update_frames\n self.redraw = True\n elif self.update_frames == 0:\n return\n elif self.update_count_down == 0:\n if self.sprite_index == 2:\n self.remove_action(Action.crouch_attack)\n self.image = self.increment_sprite_index()\n self.update_count_down = self.update_frames\n self.redraw = True\n else:\n self.update_count_down -= 1", "def update_screen(settings, screen, rocket):\n # Redraw the screen during each pass through the loop.\n screen.fill(settings.bg_color)\n rocket.blitme()\n\n # Make the most recently drawn screen visible.\n pygame.display.flip()", "def update_screen(ai_settings, screen, ship, bullets):\n\t# Redraw the screen during each pass through the loop\n\tscreen.fill(ai_settings.bg_color)\n\tship.blitme()\n\n\t# Redraw all bullets behind ship and aliens\n\t# bullets.sprites() returns a list of all the sprites\n\t# in the group bullets\n\tfor bullet in bullets.sprites():\n\t\tbullet.draw_bullet()\n\n\t# Make the most recently drawn screen visible\n\tpygame.display.flip()", "def make_display(self) -> None:\n self.display = pygame.display.set_mode((self._screen_width,\n self._screen_height))\n for name, value in self.images.items():\n if value is None:\n continue\n\n if type(value) in (tuple, list):\n self.images[name] = tuple([img.convert_alpha()\n for img in value])\n else:\n self.images[name] = (value.convert() if name == \"background\"\n else value.convert_alpha())", "def update_screen(rk_settings, screen, rock, stars, bullets):\r\n\t# Redraw the screen during each pass through the loop.\r\n\tscreen.fill(rk_settings.bg_color)\r\n\tfor bullet in bullets.sprites():\r\n\t\tbullet.draw_bullet()\r\n\trock.blitme()\r\n\tstars.draw(screen)\r\n\t\t\r\n\t# Make the most recently drawn screen visible.\r\n\tpygame.display.flip()", "def draw(self, screen):\n if self.state == self.S_ACTIVE:\n screen.blit(self.image, self.rect)", "def update(self) -> None:\n self.all_sprites.update()", "def update_screen(ml_settings,screen, cartesian_plane):\n \t#Redraw the dcreen during each pass through the loop\n \tscreen.fill(ml_settings.bg_color)\n\n\t#Draw the cartesian plane\n\tcartesian_plane.draw(ml_settings,screen)\n\n \n \t#Make the most recetly drawn screen visible\n \tpygame.display.flip()", "def update_screen(self, ai_game):\r\n self.surface.fill(self.settings.bg_color)\r\n self.ship.blitme()\r\n for bullet in self.ship_bullets.sprites():\r\n bullet.draw_bullet()\r\n for bullet in self.alien_bullets.sprites():\r\n bullet.draw_bullet()\r\n self.aliens.draw(self.surface)\r\n self.explosions.draw(self.surface)\r\n\r\n # Draw the score information.\r\n self.sb.show_score()\r\n\r\n # Draw the difficulty buttons if the game is inactive.\r\n if not self.stats.game_active:\r\n for button in self.buttons:\r\n button.draw_button()\r\n\r\n # Draw the game over message if appropriate\r\n if self.stats.game_over:\r\n self.surface.blit(self.game_over_text, self.game_over_text_rect)\r\n\r\n # Make the most recently drawn screen visible.\r\n self.screen.blit(self.surface, (0, 0))\r\n pg.display.flip()", "def update(self):\r\n pygame.display.update()\r\n return", "def update_deck_display(self):\n self.deck_surface.fill(CLEARCOLOUR)\n self.deck_surface.blit(self.background, (0, 0))\n if not self.is_empty():\n cards_to_draw = self.cards\n if self.draw_from_last:\n cards_to_draw = reversed(cards_to_draw)\n\n for i, card in enumerate(cards_to_draw):\n selected = (i == self.selected_card)\n image_to_draw = card.image\n\n if self.deck_reveal == DeckReveal.HIDE_ALL:\n image_to_draw = card.backimage\n\n if self.flip:\n image_to_draw = pygame.transform.flip(image_to_draw, self.vert_orientation,\n not self.vert_orientation)\n\n self.deck_surface.blit(image_to_draw, (card.x - selected * card.x * 0.5 *\n (-1)**self.flip * self.vert_orientation,\n card.y - selected * card.y * 0.5 *\n (-1)**self.flip * (not self.vert_orientation)))", "def update(self):\n \n # Creates a surface on which 3 hearts can be blitted\n self.image = pygame.Surface((150, 50))\n \n # Blits each of the remaining Player lives' onto this surface\n for life in range(self.__num_lives): \n \n # Each heart image is 50x50 pixels, and so they are blitted 50 pixels apart\n self.image.blit(self.image_heart, (life*50, 0))\n \n self.image.set_colorkey((0, 0, 0))\n self.rect = self.image.get_rect()\n \n # The surface's rect is set in the top-right corner of the screen\n self.rect.x = 650\n self.rect.y = 0", "def update_screen(si_settings,screen,stats,sb,ship,aliens,bullets,play_button):\n #Redraw the screen each time through loop\n screen.fill(si_settings.bg_color)\n #Redraws bullets behind ship and invaders\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n ship.blitme()\n aliens.draw(screen)\n #Draws score\n sb.show_score()\n #Draw button if game not active\n if not stats.game_active:\n play_button.draw_button()\n #Makes most recent drawn screen visible\n pygame.display.flip()", "def update_screen(conductor_settings, screen, star_killer, enemy, projectiles):\n screen.fill(conductor_settings.bg_color)\n # Redraw all projectiles behind enemy ships\n for laser in projectiles.sprites():\n laser.draw_laser()\n star_killer.blitspri(screen, (star_killer.index % star_killer.totalCellCount), star_killer.rect)\n enemy.blitspri(screen, (enemy.index % enemy.totalCellCount), enemy.rect)\n\n # Make the most recently drawn screen visible\n pygame.display.flip()", "def update(self):\n surface = pygame.Surface(CARD_SIZE, pygame.SRCALPHA)\n card_filename = f\"assets/sprites/card{self.suit.capitalize()}{self.rank}.png\"\n card_image = pygame.image.load(card_filename)\n surface.blit(card_image, (0, 0))\n self.image = pygame.Surface.convert_alpha(surface)\n self.rect = self.image.get_rect()", "def update_all_elements(self):\n self.screen.blit(self.background_image, (0, 0))\n self.pad_sprites.draw(self.screen)\n self.obstacle_sprites.draw(self.screen)\n self.meteor_sprites.update()\n self.meteor_sprites.draw(self.screen)\n self.player_sprite.update()\n self.player_sprite.draw(self.screen)\n if not self.lander.is_controllable():\n self.screen.blit(self.alert_instruments, (0, 0))\n self.show_on_screen(\"UNCONTROLLABLE\", (120, 82))\n elif self.lander_failure():\n self.screen.blit(self.alert_instruments, (0, 0))\n self.show_on_screen(\"Failure of \" + str(self.failure), (120, 82))\n else:\n self.screen.blit(self.instruments, (0, 0))\n self.update_lander_meters()", "def update(self):\n if self.value:\n self.image = self.rect2 \n else:\n self.image = self.rect1", "def upd_view():\n global state, current_view, show_actives, current_filename\n nonlocal img, listbox\n\n with data_lock:\n if state == 1:\n current_filename = None\n state = 2\n listbox.delete(0, tk.END)\n for item in current_view.actions.by_type(wtl.actions.Click):\n wtl_uid = str(item.target.wtl_uid)\n text = item.target.metadata[\"text\"]\n listbox.insert(tk.END, wtl_uid + f\" ({text})\")\n\n if state == 2:\n filename = \"first\" if show_actives.get() == 0 else \"is_active\"\n if filename != current_filename:\n current_filename = filename\n current_view.snapshot.screenshots[filename].save(Path(\".\"))\n img = tk.PhotoImage(file=f\"{filename}.png\")\n canvas.create_image(5, 5, anchor=tk.NW, image=img)\n\n window.after(250, upd_view)", "def flip_back(self):\n for img in self.image_id:\n self.canvas.delete(img)\n self.image_id.clear()\n for tile in self.canvas.find_withtag(\"match\"):\n self.canvas.itemconfigure(tile, fill=self.color)\n # If item has match tag, removes all the tags\n self.canvas.itemconfigure(tile, tag=\"\")\n for tile in self.canvas.find_withtag(\"selected\"):\n self.canvas.itemconfigure(tile, fill=self.default_color)\n self.canvas.itemconfigure(tile, tag=\"\")\n # check if all tiles are matched, update result label\n if self.num_of_match == 8:\n text_label = f'Game Over!\\n Score: {self.score}\\n ' \\\n f'Number of tries: {self.num_of_tries}'\n self.score_label.config(text=text_label)", "def draw(self):\n self.game.screen.blit(self.image, self.game.off(self.pos))", "def draw(self):\n self.write_image()\n self.update()", "def update(self):\n\n surface = pygame.Surface(CARD_SIZE, pygame.SRCALPHA)\n card_filename = f\"assets/sprites/cardBack.png\"\n card_image = pygame.image.load(card_filename).convert_alpha()\n surface.blit(card_image, (0, 0))\n self.image = surface\n self.rect = self.image.get_rect()", "def updateWorld(self):\n\t self.screen.clear()\n self.update()\n self.screen.refresh()", "def draw(self):\n self.screen.blit(self.image, self.rect)", "def main(self):\n update = self.update\n draw = self.draw\n screen = self.screen\n flip = pg.display.update\n clock = time.time\n frame_length = (1. / self.fps)\n time_since_draw = 0\n last_update = clock()\n fps_timer = 0\n frames = 0\n\n while not self.done:\n clock_tick = clock() - last_update\n last_update = clock()\n time_since_draw += clock_tick\n update(clock_tick)\n if time_since_draw >= frame_length:\n time_since_draw -= frame_length\n draw(screen)\n flip()\n frames += 1\n\n fps_timer, frames = self.handle_fps(clock_tick, fps_timer, frames)\n time.sleep(.01)", "def refresh(self) -> None:\n self.screen.refresh()", "def finish_render():\n get_window().static_display = True\n get_window().flip_count = 0\n get_window().flip()", "def draw_screen(self, master_screen):\n master_screen.blit(self.screen_image, (0, 0))", "def flip(self, horizontally):\n\t\tself.currentPixbuf = self.currentPixbuf.flip(horizontally)\n\t\tself.scaleCache[1] = 0\n\t\tgc.collect()", "def blitme(self):\r\n if self.orientation == 'left':\r\n self.screen.blit(self.image, self.rect)\r\n elif self.orientation == 'right':\r\n self.screen.blit(pygame.transform.flip(self.image, True, False), self.rect)", "def punched(self):\n if not self.dizzy:\n self.dizzy= 1\n self.original= self.image", "def update_state(self, clock, bg_image, allsprites):\n # ensure we don't get more than 60fps\n clock.tick(60)\n # update the background\n self.screen.blit(bg_image, (0, 0))\n # update the greeting\n self.draw_greeting()\n # update the sprite(s)\n allsprites.update()\n allsprites.draw(self.screen)\n # draw some snow\n for s in self.snow:\n s.draw(self.screen)", "def update(self):\n\n self.x += self.dx\n self.y += self.dy\n\n # draw image\n if self.visible:\n self.scene.screen.blit(self.image, (self.x, self.y))\n self.check_bounds()\n self.check_collisions()\n self.check_keys()", "def update_image(self):\n self.image = Image.fromarray(self.img)", "def update_screen(settings, screen, stats, dude, bullets, dbags, play_button, scoreboard):\r\n\t# Refresh the background and redraw the dude and bullets\r\n\tscreen.fill(settings.bg_color)\r\n\tfor dbag in dbags:\r\n\t\tdbag.blitme()\r\n\tfor bullet in bullets.sprites():\r\n\t\tbullet.draw_bullet()\r\n\tdude.blitme()\r\n\tscoreboard.show_score()\r\n\t\r\n\t# Draw play button if the game isn't active\r\n\tif not stats.game_active:\r\n\t\tplay_button.draw_button()\r\n\t\r\n\t# Make the recently drawn screen visible\r\n\tpygame.display.flip()", "def update(self):\n if self.value:\n self.counter += 1\n self.__dx = 0 \n if self.counter == 5:\n self.image = self.blow_up\n if self.counter == 15:\n self.image = self.blow_up_2\n if self.counter == 25:\n self.value = False\n self.image = self.normal\n self.counter = 0 \n self.__dx = 9 \n self.rect.centerx = 540\n \n if self.rect.left <= 0:\n self.rect.left = 0\n if self.rect.right >= self.screen.get_width():\n self.rect.right = self.screen.get_width()", "def _update_images(self, axis=None, draw=True):\n self._update_base_images(axis=axis)\n if draw:\n self._draw(axis)", "def update_graphics(self, new_image_ref):\n self.image_ref = new_image_ref\n self.init_graphics()", "def update(self):\n if self.dizzy:\n self._spin()\n # shrink the monkey when it has been punched every 5 times\n if Chimp.count_punch % 5 == 0:\n self.image = pygame.transform.scale(self.image, (self.rect.width - 5, self.rect.height - 5))\n else:\n self._walk()", "def display(self):\n\t\tself.imgDisplay.set_from_pixbuf(self.getVisible())\n\t\tgc.collect()", "def update(self):\n cv2.imshow(self.window_name, self.map.get_crop())", "def display_lost_view(screen):\n pygame.time.Clock().tick(30)\n screen.blit(pygame.image.load('resources//pictures//lostb.jpg').convert(), (0, 0))\n pygame.display.flip()", "def flip(self):\n if self.is_face_up:\n arcade.load_texture(self.back_file)\n self.is_face_up = False\n else:\n arcade.load_texture(self.face_file)\n self.is_face_up = True", "def update_screen(ai_settings,screen,stats,ship,boss,boss_bullets,bullets,play_button,ui):\n\t# Redraw the screen during each pass through the loop.\n\tscreen.fill(ai_settings.bg_color)\n\tfor boss_bullet in boss_bullets.sprites():\n\t\tboss_bullet.draw_bullet()\n\tfor bullet in bullets.sprites():\n\t\tbullet.draw_bullet()\n\tship.blitme()\n\tboss.blitme()\n\t#draw the ui\n\tui.prep_ships()\n\tui.prep_ui()\n\tui.show_ui()\n\t# Draw the play button if the game is inactive\n\tif stats.game_active == False:\n\t\tplay_button.draw_button()\n\n\t# Make the most recently drawn screen visible.\n\tpygame.display.flip()", "def flip(self, horizontal):\n try:\n self._is_transformable()\n horizontal = get_int(horizontal)\n except NotTransformable as e:\n self._app[\"statusbar\"].message(str(e) + \" flip\", \"error\")\n return\n except StringConversionError as e:\n self._app[\"statusbar\"].message(str(e), \"error\")\n return\n images = self.get_images(\"Flipped\")\n # Apply changes\n for fil in images:\n if fil not in self._changes:\n self._changes[fil] = [0, 0, 0]\n if horizontal:\n self._changes[fil][1] = \\\n (self._changes[fil][1] + 1) % 2\n else:\n self._changes[fil][2] = \\\n (self._changes[fil][2] + 1) % 2\n # Flip the image shown\n if self._app.get_path() in images:\n self.emit(\"changed\", \"flip\", horizontal)\n # Reload thumbnails of flipped images immediately\n if self._app[\"thumbnail\"].toggled:\n self.apply()", "def updateSheetDisplay(self):\n\t\t# Re-render the sheet music\n\t\tself.sheetPath = self.transcribedPart.write(\"lily.png\")\n\t\t_image = Image.open(self.sheetPath)\n\t\t# Scale to fit in window.\n\t\tscale = tuple([int(round(dim*0.6)) for dim in _image.size])\n\n\t\tself.sheetImg = ImageTk.PhotoImage(_image.resize(scale, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tImage.ANTIALIAS))\n\t\t# Update the panel\n\t\tif ( self.justLaunched ):\n\t\t\t# Pack image and display\n\t\t\tself.panel = Label(self.sheetFrame, image=self.sheetImg)\n\t\t\tself.panel.pack(side=TOP, fill=BOTH, expand=YES)\n\t\t\tself.justLaunched = False\n\t\t\t\n\t\tself.panel.configure(image=self.sheetImg)", "def display_frame(self, screen):\n screen.fill(WHITE)\n\n if not self.game_over:\n self.all_sprites_list.draw(screen)\n\n pygame.display.flip()", "def resting(self):\n self.image = self.image_list[self.index]\n\n if self.rect.y % 32 != 0:\n self.correct_position(self.rect.y)\n if self.rect.x % 32 != 0:\n self.correct_position(self.rect.x)", "def update_frame(self):\n if self.should_reset_camera:\n self.ren.ResetCamera()\n self.should_reset_camera = False\n self.interactor.Render()\n app.processEvents()", "def update_frame(self):\n if self.should_reset_camera:\n self.ren.ResetCamera()\n self.should_reset_camera = False\n self.interactor.Render()\n app.processEvents()", "def update_display(self):\r\n\r\n # The display.update() Updates the screen, making the new frame replace the old one. \r\n pg.display.update()\r\n \r\n # clock.tick sets a framerate for the game.\r\n # This is to make the game run at a stable fps \r\n self.clock.tick(cng.FRAMERATE)", "def draw(self):\r\n\r\n self.screen.fill((0,0,0))\r\n self.sprite_group.draw(self.screen)\r\n pygame.display.flip()", "def draw(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.level_image, self.level_rect)\n self.ships.draw(self.screen)", "def update(self):\n while not self.stopped:\n time.sleep(0.01)\n self.grab_image()", "def tick (self):\n\t\n\t\tself.display.clear ()\n\t\tself.draw ()\n\t\tfor sprite in self.sprites:\n\t\t\tsprite.drawToDisplay (self.display)\n\t\tself.display.showFrame ()", "def update(self):\n self.moving_sprites.update() \n self.static_sprites.update()\n self.camera.update(self.player)", "def draw_end(self, screen):\n screen.fill(BLACK) \n game_over_pic = pygame.transform.scale(pygame.image.load('game_over_mushroom.jpg').convert(), [350, 350])\n screen.blit(game_over_pic, (SCREEN_W_MID-175, SCREEN_H_MID-175))", "def flip(self):\n self.width, self.height = self.height, self.width", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def update_screen(wof_settings,screen,walls,hero,diamonds,bombs,explosions,inkblots,deaths):\n screen.fill(wof_settings.bg_color)\n walls.draw(screen) \n for bomb in bombs.sprites():\n bomb.draw_bomb()\n\n for explosion in explosions.sprites():\n explosion.draw_explosion()\n \n diamonds.draw(screen)\n \n for inkblot in inkblots.sprites():\n inkblot.draw_inkblot()\n \n hero.blitme()\n\n for death in deaths.sprites():\n death.draw_death()\n \n pygame.display.flip()", "def display_pygame():\n sprite_group.clear(screen, eraser_image)\n sprite_group.draw(screen)\n pygame.display.update()", "def replay(self):\n self.shown = False\n self.mine = False\n self.flag = False\n self.inPlay = True\n self.count = 0\n self.numFlags = 0\n self.configure(image=Tile.images[10])" ]
[ "0.7323385", "0.73144567", "0.72792417", "0.7079508", "0.70134467", "0.70087224", "0.6951003", "0.6883352", "0.68315375", "0.68152404", "0.67967147", "0.67831165", "0.675435", "0.6707523", "0.6706954", "0.66996604", "0.66944337", "0.6660561", "0.6639409", "0.65874845", "0.6579756", "0.6566093", "0.6538289", "0.65218306", "0.64871275", "0.6479991", "0.647573", "0.6470537", "0.6463609", "0.64500535", "0.6440078", "0.64333713", "0.6423059", "0.64114004", "0.63882935", "0.6360419", "0.63559157", "0.63502437", "0.63332653", "0.63141596", "0.63006294", "0.628122", "0.62672853", "0.6260598", "0.62465024", "0.62435114", "0.62386423", "0.62315965", "0.6221814", "0.6215612", "0.6209494", "0.6209229", "0.6206978", "0.61825407", "0.61813277", "0.6165595", "0.6164104", "0.6159933", "0.61550343", "0.61419904", "0.61366844", "0.61193013", "0.6115776", "0.61045784", "0.6100446", "0.60921836", "0.60890394", "0.60804063", "0.6079151", "0.6078078", "0.6076384", "0.60714275", "0.6054726", "0.60469186", "0.60419357", "0.60362947", "0.6025098", "0.60199517", "0.6014051", "0.60140157", "0.60116225", "0.6007753", "0.6007257", "0.5998346", "0.5996626", "0.5992879", "0.5992385", "0.5992385", "0.59892493", "0.5981362", "0.5975165", "0.5974752", "0.59738255", "0.59607005", "0.5952064", "0.5944691", "0.59445953", "0.5939451", "0.59312725", "0.5930492" ]
0.6261121
43
Provides list of github organization urls based on authenticated user.
def get_orgs(): url = "https://api.github.com/user/orgs" org_urls = [] orgs = utils.get_json(url) for org in orgs: org_urls.append(org["url"]) return org_urls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]", "def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")", "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val", "def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos", "def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r", "def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)", "def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")", "def list_public_repos():\n return Collaborator.objects.filter(user__username=settings.PUBLIC_ROLE)", "def get_repo_options(account, **kwargs):\n client = AsyncHTTPClient()\n uri = \"https://api.github.com/user/repos?per_page=100\"\n data = []\n while uri is not None:\n req = account.get_request(uri, headers={\"Accept\": \"application/vnd.github.moondragon+json\"})\n response = yield client.fetch(req)\n response_object = json.loads(response.body.decode('utf-8'))\n data += response_object\n links = parse_link_header(response.headers.get('Link', ''))\n uri = links.get('next', None)\n return [{\"title\": repo['full_name'], \"value\": repo['full_name']}\n for repo in data]", "def n_public_repos(gh, user):\n return getuser(gh, user).public_repos", "def get_repositories(github_user):\n\n if not github_user:\n return [1, {\"message\": \"GitHub username missing\"}]\n else:\n\n # build Request object\n request = urllib2.Request(\"https://api.github.com/users/\"\n + str(github_user) + \"/repos\")\n request.get_method = lambda: 'GET'\n try:\n '''try to send the request to the GitHub API and\n create Python dictionary from JSON response'''\n repositories = urllib2.urlopen(request)\n repositories = json.loads(\"\\n\".join(repositories.readlines()))\n\n return [0, repositories]\n\n except urllib2.HTTPError as e:\n\n # return HTTP error and the message from the API\n return [1, {\"message\": str(e) + \": \"\n + json.loads('\\n'.join(e.readlines()))['message']}]", "def _get_org_repos(self):\n url = f\"{BASE_URL}/orgs/{ORG}/repos\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def getContributors(auth):\n users = []\n r = requests.get(url='https://gist.github.com/paulmillr/2657075/',\n auth=auth)\n soup = BeautifulSoup(r.text, 'html.parser')\n users = [tr.select_one('a').text for tr in soup('tbody')[0].select('tr')]\n return users", "def get_repos_user(user='xmonader'):\n u = ghclient.get_user(login=user)\n repos = u.get_repos()\n repos_list = []\n for i in range(20):\n page = repos.get_page(i)\n if len(page) == 0:\n break\n repos_list.extend(repos.get_page(i))\n return repos_list", "def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text", "async def All_orgs():\n\n links_13 = []\n links_14 = []\n valid_url = \"/?archive/?gsoc/\\d+[0-9]/orgs/[a-zA-Z]+\"\n for year in range(2009, 2016):\n year_url = melange + \"/archive/gsoc/{}\".format(year)\n soup = await get_page(year_url)\n\n for url in soup.find_all('a'):\n if re.match(valid_url, url.get(\"href\")):\n if year <= 2013:\n links_13.append(join(melange, url.get(\"href\")[1:]))\n else:\n links_14.append(join(melange, url.get(\"href\")[1:]))\n return links_13, links_14", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def user_repositories(self, host: (str), user: (str)) -> Any:\n return search_api(\"user_repositories\", host, user)", "def users_organizations(user):\n if not user or not user.is_authenticated():\n return None\n else:\n return get_users_organizations(user)", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "def get_organizations_list_with_links(year_link):\n response = get_response(year_link)\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n orgs_li = soup.find_all(\n 'li', attrs={'class': 'organization-card__container'})\n orgs_dict = {}\n for orgs_html in orgs_li:\n org_name = orgs_html.select('h4')[0].text.replace('\\n', '')\n relative_link = orgs_html.select('a')[0].get('href')\n full_link = HOME_PAGE + relative_link\n orgs_dict[org_name] = full_link\n return orgs_dict\n else:\n print('Something Went Wrong')\n print(f'Status Code: {response.status_code}')\n sys.exit(1)", "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def query_repos(self):\n return [self.config[\"repo\"]]", "def get_repos(self):\n return requests.get(\"https://api.github.com/user/repos\",\n headers=self.headers).json", "def list_repositories(self):\n data = self._get_all_data('/user/repos')\n return [repo['full_name'] for repo in data]", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def get_members_repos(org_list):\n print(\"\\nGetting repositories of all members.\")\n jsonMembersRepo_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'fork',\n 'stargazers_count',\n 'forks_count',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting repositories of', member['login'])\n jsonMembersRepos = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/repos?per_page=100\")\n for repo in jsonMembersRepos:\n # Add fields to make CSV file more usable\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersRepo_list.append(repo)\n generate_csv(\"members-list\", jsonMembersRepo_list, columns_list)", "def fake_get_user_repo_names_and_locations(self):\n self._get_user_repo_names_and_locations()", "def fetch_gh_org_collaborators(self):\n for config in self.config.get('org.permissions.org_integrity.orgs'):\n host, org = config['url'].rsplit('/', 1)\n for aff in config.get('collaborator_types', GH_ALL_COLLABORATORS):\n url_hash = get_sha256_hash([config['url']], 10)\n json_file = f'gh_{aff}_collaborators_{url_hash}.json'\n path = ['permissions', json_file]\n description = (\n f'{aff.title()} collaborators of the {org} GH org'\n )\n self.config.add_evidences(\n [RawEvidence(path[1], path[0], DAY, description)]\n )\n with raw_evidence(self.locker, '/'.join(path)) as evidence:\n if evidence:\n if host not in self.gh_pool:\n self.gh_pool[host] = Github(base_url=host)\n if not config.get('repos'):\n repos = self.gh_pool[host].paginate_api(\n f'orgs/{org}/repos'\n )\n config['repos'] = [repo['name'] for repo in repos]\n collabs = {}\n for repo in config['repos']:\n collabs_url = f'repos/{org}/{repo}/collaborators'\n collabs[repo] = self.gh_pool[host].paginate_api(\n collabs_url, affiliation=aff\n )\n evidence.set_content(json.dumps(collabs))", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def organization_list(request):\n return [o.slug for o in Organization.objects.all()]", "def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs", "def list_members(orgs):\n members =[]\n \n \n if isinstance(orgs, list):\n #if list of orgs for each org get members list\n for url in orgs:\n #append /member to url - member_url is not valid canidate without a member list\n url = url + \"/members\"\n print(\"Checking \" + url)\n members_data = utils.get_json(url)\n\n for member in members_data:\n members.append(member[\"url\"])\n return members\n \n \n else:\n #build url from input org name and return member list\n url = \"https://api.github.com/orgs/\" + orgs + \"/members\"\n members_data = utils.get_json(url)\n \n #check for invalid GitHub credentials or invalid github org name\n try:\n for member in members_data:\n members.append(member[\"url\"])\n return members\n except TypeError:\n if(members_data[\"message\"] == \"Not Found\"):\n print(\"That organization doesn't exist try again\\n\")\n raise SystemExit\n elif(members_data[\"message\"] == \"Bad credentials\"):\n print(\"Please verify GitHub credentials are correct in config.py\")\n raise SystemExit\n else:\n print (members_data)\n raise SystemExit", "def owner_project_from_github_url(url):\n if not re.match('https://github.com/([a-zA-Z0-9-_]*)/[a-zA-Z0-9-_]*', url):\n print(str(url) + \"is not a valid url!\")\n exit(-1)\n elements = url.split('/')\n project_name = elements[-1]\n organization_name = elements[-2]\n return (organization_name, project_name)", "def get_starred_repos(org_list):\n print(\"\\nGetting repositories starred by members.\")\n jsonMembersStarred_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'html_url',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting starred repositories of', member['login'])\n jsonStarred = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/starred?per_page=100\")\n for repo in jsonStarred:\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersStarred_list.append(repo)\n generate_csv(\"starred-list\", jsonMembersStarred_list, columns_list)", "def get_repositories(self):\n \n endpoint = 'repositories'\n parameters = [('pagelen', '100')]\n \n if len(self.organization):\n endpoint += f'/{self.organization}' \n parameters.append(('role', 'contributor')) \n else: \n parameters.append(('role', 'owner'))\n \n repositories_raw_data = self.__request_api(f'{self.base_url}{endpoint}?{urllib.parse.urlencode(parameters)}', method='GET')\n repositories = []\n has_next_page = True\n \n while has_next_page:\n for datum in repositories_raw_data['values']:\n clone_url = None\n for link in datum['links']['clone']:\n if link['name'] == 'ssh':\n clone_url = link['href']\n break\n \n project_name = None\n if \"name\" in datum['project']:\n project_name = datum['project']['name']\n \n repositories.append(VcsRepository(datum['slug'], datum['description'], clone_url, datum['is_private'], project_name))\n \n has_next_page = \"next\" in repositories_raw_data\n \n if has_next_page: \n repositories_raw_data = self.__request_api(repositories_raw_data[\"next\"], method='GET')\n\n return repositories", "def test_getorgs(self):\n pass", "def load_urls(self, view_in_browser):\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n urls = parser.get(self.CONFIG_URL_SECTION,\n self.CONFIG_URL_LIST)\n urls = urls.strip()\n excludes = ['[', ']', \"'\"]\n for exclude in excludes:\n urls = urls.replace(exclude, '')\n if not view_in_browser:\n urls = urls.replace('https://github.com/', '')\n return urls.split(', ')", "def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True", "def list_repos(self):\n return sorted(self.user_con.list_repos())", "def get_repos(github_id):\r\n\r\n url = 'https://api.github.com/users/{}/repos'.format(github_id)\r\n response = requests.get(url)\r\n todos = json.loads(response.text)\r\n\r\n repo_list = []\r\n \r\n for data in todos:\r\n repo_list.append(data['name'])\r\n\r\n return repo_list", "def get_repos(org_list):\n jsonRepos = []\n for org in org_list:\n print(\"\\nScraping repositories of\", org)\n jsonRepo = load_json(\"https://api.github.com/orgs/\" + org +\n \"/repos?per_page=100\")\n for repo in jsonRepo:\n # Add field for org to make CSV file more useful\n repo['organization'] = org\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonRepos.append(repo)\n # Create a list with the items I'm interested in, then call generate_csv\n columns_list = [\n 'organization',\n 'name',\n 'full_name',\n 'stargazers_count',\n 'language',\n 'created_at',\n 'updated_at',\n 'homepage',\n 'fork',\n 'description'\n ]\n generate_csv(\"repo-list\", jsonRepos, columns_list)", "def get_all_repos(oauth_token, repo_name):\n print('Checking if the user can access the repo...')\n g = Github(oauth_token)\n print('\\tRunning as ' + g.get_user().name)\n all_repos = g.get_user().get_repos()\n all_repos_full_name = []\n for test in list(all_repos):\n all_repos_full_name.append(test.full_name)\n if repo_name in all_repos_full_name:\n return True\n else:\n print('Repo \"' + repo_name + '\" is not in the list')\n print('Here\\'s a list of all repos I can see:')\n for each_repo in all_repos:\n print('\\t' + each_repo.full_name)\n return False", "def _urls(*, repository, commit, mirrors):\n result_with_nulls = [\n _format_url(\n pattern = x,\n repository = repository,\n commit = commit,\n )\n for x in mirrors.get(\"github\")\n ]\n return [\n url\n for url in result_with_nulls\n if url != None\n ]", "def user_details():\n url = 'https://api.github.com/orgs/facebook/repos'\n json_obj = urllib2.urlopen(url)\n userdata = json.load(json_obj)\n if 'error' in userdata:\n print 'errors are scanned in data'\n for data in userdata:\n if 'name' in data:\n if data['name'] == 'codemod':\n print 'language used'\n print data['language']\n print 'number of watchers'\n print data['watchers']\n print 'git url'\n print data['git_url']\n print 'open issues'\n print data['open_issues']\n print 'permissions for user'\n print 'push'\n print data['permissions']['push']\n print 'pull'\n print data['permissions']['pull']", "def _list_orgs(self, context):\r\n try:\r\n rtn = {'context': context,\r\n 'orgs': sorted(list(self._bbreader.cache[context].keys()))}\r\n except KeyError:\r\n raise RequestError('Context {} not found'.format(context))\r\n return rtn", "def collect_org_repos(self):\n log.info(\"GHUB\", \"Collecting org repos.\")\n raw_repos = self._get_org_repos()\n preprocessed_repos = self._preprocess_repos(raw_repos)\n parsed_repos = json_reducer.reduce(REPOS_SCHEMA, preprocessed_repos)\n result = []\n for repo in parsed_repos:\n result.append(repo)\n return result", "def get_repos_lists(self, repo_information, manager_list):\n\n Settings.repositories.append(repo_information)\n\n print(\n \"Extracting, cleaning and formating of domains and ips from %s\"\n % repo_information[\"name\"],\n end=\" \",\n )\n\n url_to_get_base = Settings.raw_link % repo_information[\"name\"]\n clean_url = \"%sclean.list\" % url_to_get_base\n non_clean_url = \"%sdomains.list\" % url_to_get_base\n\n req_clean = get(clean_url)\n\n if req_clean.status_code == 200:\n manager_list.append(self.__get_whitelisted_list(req_clean.text))\n print(Settings.done)\n return None\n\n req_non_clean = get(non_clean_url)\n\n if req_non_clean.status_code == 200:\n manager_list.append(self.__get_whitelisted_list(req_non_clean.text))\n print(Settings.done)\n return None\n\n print(Settings.error)\n raise Exception(\"Unable to get a list from '%s'\" % repo_information[\"name\"])", "def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})", "def list_collaborator_repos(self):\n user = User.objects.get(username=self.username)\n\n return Collaborator.objects.filter(user=user)", "def repolist(orgname, refresh=True):\n filename = os.path.join(SETTINGS[\"folder\"], orgname.lower()) + \"/repodata.json\"\n if not refresh and os.path.isfile(filename):\n repodata = json.loads(open(filename, \"r\").read()) # read cached data\n else:\n endpoint = \"/orgs/\" + orgname.lower() + \"/repos?per_page=100\"\n repodata = github_allpages(endpoint=endpoint)\n dicts2json(repodata, filename)\n print(\n f\"\\r{orgname} - {len(repodata)} total public non-forked repos found\"\n + 60 * \" \"\n )\n\n return sorted(\n [\n (repo[\"name\"].lower(), repo[\"size\"])\n for repo in repodata\n if not repo[\"private\"] and not repo[\"fork\"]\n ]\n )", "def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos", "def gists_for_user(username):\n\n rs = []\n\n def loop(url=None):\n # build the url\n per_page = 2\n page = 1\n gists_url = url or 'https://api.github.com/users/{username}/gists?per_page={per_page}&page={page}'.format(per_page=per_page, page=page, username=username)\n gists_url += \"&client_id=%s&client_secret=%s\" %(\"3cc58ae648e5bfd676cf\", \"f908a9ddcebdbc36d38c1fe902f98cb12d15c44c\")\n\n try:\n response = requests.get(gists_url)\n except requests.exceptions.Timeout as e:\n time.sleep(1)\n response = requests.get(url)\n except requests.exceptions.HTTPError as err:\n response = type('lamdbaobject', (object,), {})()\n response.ok = false\n \n if response.ok:\n # grab the data response\n rs.extend(response.json())\n\n if response.headers.get('Link', None):\n # parse the for the next url\n links = response.headers['Link'].split(',')\n \n # extra the url\n next_url = re.match('<(.*)>; rel=\"next\"', links[0])\n\n # BONUS: What about huge gists?\n if next_url:\n gists_url = next_url.groups()[0]\n loop(gists_url)\n\n else:\n rs.extend(response.json())\n\n else:\n # BONUS: Handle invalid users?\n return response.json()\n\n return rs\n\n return loop()", "def get_public_repos(self, max_repos=DEFAULT_MAX_PUBLIC_REPOS):\n since = 0\n repo_count = 0\n repos = []\n while repo_count < max_repos:\n temp = self.process_repo(self.get_full_url(ALL_REPO_LIST.format(since=since)), True)\n repos.extend(temp)\n repo_count = len(repos) #TODO count if repos <= max_repos\n print 'repos =', len(repos), 'temp=', len(temp)\n since = temp[-1]['id']\n\n return repos", "def list(self) -> List[Organisation]:\n ...", "def org_info(self):\n\n response = self.postman.request('info')\n\n if (response.status_code == requests.codes.ok):\n data = response.json()\n\n self.repos = data['public_repos']\n self.created = data['created_at']\n self.updated = data['updated_at']\n\n self.repo_info()\n self.member_info()", "def AuthorURLs(entry):\n a_URLs = ''\n for a in entry.getAuthors():\n url = a.get('homepage', ' ')\n a_URLs += \"%s and \" % url\n return a_URLs[:-5]", "def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())", "def get_org_admins(self, dataset: Dict) -> List[User]:\n organization_id = dataset[\"organization_id\"]\n orgadmins = list()\n organization = self.organizations[organization_id]\n if \"admin\" in organization:\n for userid in self.organizations[organization_id][\"admin\"]:\n user = self.users.get(userid)\n if user:\n orgadmins.append(user)\n return orgadmins", "def organizations(self):\n self.elements('organizations')", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def grab_project_links(soup):\n project_urls = []\n valid_project_url = \"/?archive/?gsoc/\\d+[0-9]/orgs/[a-zA-Z]+/[a-zA-Z]+/[a-zA-Z]+.html\"\n try:\n # Grab links to all the projects\n all_link = soup.find_all(\"a\")\n for link in all_link:\n if re.match(valid_project_url, link.get(\"href\")):\n project_urls.append(join(melange, link.get(\"href\")[1:]))\n except TypeError:\n print(link)\n\n return project_urls", "def get_repos():\n\n return __do_get_repos()", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def get_all_contributors(request):\n response_dict = {}\n for project in constants.ACTIVE_REPOSITORIES:\n try:\n api_response = requests.get(\n constants.GITHUB_API_GET_CONTRIBUTORS_URL.format(project_name=project)\n )\n api_response_json = api_response.json()\n # if authentication fails\n if api_response.status_code == 401:\n raise Exception(\"Authentication fails. Invalid github access token.\")\n for contributor in api_response_json:\n if contributor['type'] != 'User':\n continue\n result = ContributorResponse(\n username=contributor['login'],\n url=contributor['html_url'],\n avatar_url=contributor['avatar_url'],\n contributions=contributor['contributions'],\n repository_name=[project],\n )\n if result.username in response_dict.keys():\n response_dict[result.username]['contributions'] += result.contributions\n response_dict[result.username]['repository_name'].append(project)\n else:\n response_dict[result.username] = result.to_json()\n except Exception:\n return DOWNSTREAM_ERROR_RESPONSE\n response = sorted(response_dict.values(), key=lambda x: x['contributions'], reverse=True)\n return Response(response)", "def get_organization_url(self, organization: Dict):\n return f\"{self.site_url}/organization/{organization['name']}\"", "def links(self):\r\n return links.RepoLinks(self)", "def github_setup(request, integration_test_setup):\n repo_owner_type = request.param[0]\n repo_type = request.param[1]\n git_command = request.param[2]\n configholder = request.param[3]\n target = request.param[4]\n return get_github_repos(\n repo_owner_type, repo_type, git_command, configholder, target\n )", "def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url", "def _parse_url(repo_url: str) -> List[str]:\n try:\n return re.findall(r\"github\\.com/([^/]+)/([^\\/?]+)\", repo_url, re.I)[0]\n except IndexError:\n raise AnalyzerError(\"Incorrect repository URL\")", "def getURLs():", "def _orgsWithLogoForQuery(query, batch_size=5):\n orgs = []\n for org in query:\n if org.logo_url:\n orgs.append(org)\n if len(orgs) == batch_size:\n break\n\n return orgs", "def _get_repo_contributors(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}/contributors\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def test_retrieve_l_organizations(self):\n pass", "def get(self):\n authenticated_user_id = token_auth.current_user()\n orgs_dto = OrganisationService.get_organisations_managed_by_user_as_dto(\n authenticated_user_id\n )\n if len(orgs_dto.organisations) < 1:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n\n search_dto = self.setup_search_dto()\n admin_projects = ProjectAdminService.get_projects_for_admin(\n authenticated_user_id,\n request.environ.get(\"HTTP_ACCEPT_LANGUAGE\"),\n search_dto,\n )\n return admin_projects.to_primitive(), 200", "def users():\n access_token = session['access_token']\n return \"%s\" % list_users(access_token)", "def get_urls(root):\n urls = []\n classes = \"|\".join([\"msl_organisation_list\", \"view-uclu-societies-directory\",\n \"atoz-container\", \"listsocieties\", \"block-og-menu\"])\n\n req = requests.get(root, headers) # , cookies=cookies)\n soup = BeautifulSoup(req.content, 'html.parser')\n main = soup.find(['div', 'ul', 'section'], class_=re.compile(classes))\n\n for a in main.find_all('a', href=True):\n url = a['href']\n if url.startswith(\"/\"):\n urls.append(domain + url)\n\n if url.startswith(\"https://society.tedu.edu\"):\n urls.append(url)\n\n urls = list(dict.fromkeys(urls))\n return urls", "def api_scrape_url():\n if 'working_repo' in session:\n meta_data = get_tags(request.args['url'])\n return jsonify(msg=\"success\", data=meta_data)\n else:\n return jsonify(msg=\"failure, unauthorized\"), 401", "def get_repos(self):\n\t\tsession = self.login()\n\t\titems = session.query(Repos)\n\t\tresponse = [row2dict(item) for item in items]\n\n\t\tself.logout(session)\n\t\treturn response", "def users(self,org_id=None):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))", "def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data", "async def github(self, ctx: commands.Context, *, path: str):\n user, _, repo = path.replace(' ', '/', 1).partition('/')\n if repo:\n async with self.bot.session.get(\n f\"https://api.github.com/repos/{user}/{repo}\",\n headers={\"Authorization\": f\"token {self.config['token']}\"}\n ) as r:\n data = await r.json()\n embed = discord.Embed(\n title=data['full_name'],\n description=f\"stars: {data['stargazers_count']} forks: {data['forks_count']}\\n\"\n f\"language: {data['language']} license: {data['license']['name'] if data['license'] else 'no'}\\n\"\n +(f\"homepage: {data['homepage']}\" if data['homepage'] else ''),\n url=data['html_url']\n ).set_author(\n name=data['owner']['login'],\n url=data['owner']['html_url'],\n icon_url=data['owner']['avatar_url']\n ).set_thumbnail(\n url=data['owner']['avatar_url']\n ).add_field(\n name=\"Description\",\n value=data['description']\n )\n await ctx.send(embed=embed)\n else:\n async with self.bot.session.get(\n f\"https://api.github.com/users/{user}\",\n headers={\"Authorization\": f\"token {self.config['token']}\"}\n ) as r:\n data = await r.json()\n embed = discord.Embed(\n title=f\"{data['name']} ({data['login']})\",\n description=f\"repos: {data['public_repos']} gists: {data['public_gists']}\\n\"\n f\"followers: {data['followers']} following: {data['following']}\\n\"\n f\"location: {data['location']}\",\n url=data['html_url']\n ).set_thumbnail(\n url=data['avatar_url']\n ).add_field(\n name=\"Bio\",\n value=data['bio']\n ).add_field(\n name=\"Contact\",\n value=''.join([\n (f\"email: [{data['email']}](mailto:{data['email']})\\n\" if data['email'] else ''),\n (f\"twitter: [{data['twitter_username']}](https://twitter.com/{data['twitter_username']})\\n\" if data['twitter_username'] else ''),\n (f\"company: {data['company']}\\n\" if data['company'] else ''),\n \n ]) or 'no contact avalible'\n ).set_footer(\n text=f\"id: {data['id']}\"\n )\n await ctx.send(embed=embed)", "def user_name_urls(self):\n raise NotImplementedError", "def repositories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConfigurationServiceGitRepositoryArgs']]]]:\n return pulumi.get(self, \"repositories\")", "def get_github_repos(\n repo_owner_type, repo_type, git_command, configholder, dest\n):\n os.chdir(dest)\n username = configholder.get_config_value(\"GITHUB_USERNAME\")\n url = (\n \"https://api.github.com/user/repos\"\n if repo_owner_type == GitHub.OWN_CMD_ARG_NAME\n else f\"https://api.github.com/users/{username}/repos\"\n )\n auth = GitHubAuth(configholder.get_config_value(\"GITHUB_API_TOKEN\"))\n response = requests.get(url, auth=auth, params={\"type\": repo_type})\n for repo in response.json():\n git_command(repo[\"svn_url\"], repo[\"name\"])\n os.chdir(\"..\")\n\n return (repo_owner_type, repo_type)", "def retrieve_author_url(name):\n response = requests.get('https://api.github.com/search/users', {'q': name})\n data = json.loads(response.text)\n if data.get('total_count', 0) > 0:\n return data['items'][0]['html_url']\n else:\n print \"--- ERROR: no author URL retrieved for '{0}' ---\".format(\n response.url)\n return name", "def get_repo_ids(self, repo_names: List[str]) -> List[int]:\n global_access_token = self.get_global_access_token()\n headers = {\n \"Authorization\": f\"token {global_access_token}\",\n }\n headers.update(apps.APP_PREVIEW_HEADERS)\n page_number = 1\n repo_ids = []\n while True:\n url = f\"https://api.github.com/installation/repositories?page={page_number}\"\n\n response = requests.get(url=url, headers=headers)\n if response.status_code != 200:\n raise Exception(\n \"Failed to get fetch repositories. \"\n f\"Status code: {response.status_code} \"\n f\"Response: {response.json()} \"\n )\n if not response.json()[\"repositories\"]:\n # Break the while loop if page doesn't have repositories.\n break\n\n for repo in response.json()[\"repositories\"]:\n if repo[\"name\"] in repo_names:\n repo_ids.append(repo[\"id\"])\n\n # Go to next page\n page_number += 1\n\n if len(repo_names) != len(repo_ids):\n raise Exception(\"Github app doesn't have scope for all the repos\")\n\n return repo_ids", "def get_members_info(org_list):\n print(\"\\nGetting user information of all members.\")\n jsonMembersInfo_list = []\n columns_list = [\n 'organization',\n 'login',\n 'name',\n 'url',\n 'type',\n 'company',\n 'blog',\n 'location',\n 'email'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print(\"Getting user information for\", member[\"login\"])\n jsonMember = load_json(\"https://api.github.com/users/\" +\n member[\"login\"] + \"?per_page=100\",\n memberscrape=True)\n # Add field to make CSV file more usable\n jsonMember[\"organization\"] = org\n # Python 2: Using smart_str to deal with encodings\n jsonMember[\"location\"] = smart_str(jsonMember['location'])\n jsonMember[\"name\"] = smart_str(jsonMember['name'])\n jsonMember[\"company\"] = smart_str(jsonMember['company'])\n jsonMember[\"email\"] = smart_str(jsonMember['email'])\n jsonMembersInfo_list.append(jsonMember)\n generate_csv(\"members-info\", jsonMembersInfo_list, columns_list)", "def organization_url(organization, user_key=API_KEY):\n return 'http://api.crunchbase.com/v/2/organization/%s?user_key=%s' % (organization, user_key)", "def showORGusers(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n jsonResponse = get_csp_users_json(strCSPProdURL, ORG_ID, sessiontoken)\n if jsonResponse == None:\n print(\"API Error\")\n sys.exit(1)\n\n users = jsonResponse['results']\n table = PrettyTable(['First Name', 'Last Name', 'User Name'])\n for i in users:\n table.add_row([i['user']['firstName'],i['user']['lastName'],i['user']['username']])\n print (table.get_string(sortby=\"Last Name\"))", "def list_repos_cli(api_client, path_prefix, next_page_token):\n content = ReposApi(api_client).list(path_prefix, next_page_token)\n click.echo(pretty_format(content))", "def ListOrganizations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def org(\n organisation: str, threads: int = 2, uname: Optional[str] = typer.Argument(None)\n):\n usernames = github.find_users_from_organisation(\n organisation=organisation, uname=uname\n )\n result = []\n for username in usernames:\n result.append(github.find_email_from_username(username=username))\n typer.echo(result)", "def test_organizations_list(self):\n pass", "def github_url(self):\n return self.github.replace('.git', '')", "def reponames(gh, user):\n return [u.split('/')[-1] for u in urls(gh, user)]" ]
[ "0.77288854", "0.6896512", "0.6850855", "0.6795222", "0.6704334", "0.6566785", "0.64914256", "0.64815396", "0.64218557", "0.6373243", "0.6368361", "0.6260303", "0.6226184", "0.62181467", "0.6191352", "0.61800104", "0.61728805", "0.6149173", "0.6129998", "0.61161196", "0.61137915", "0.6098523", "0.6093026", "0.6072319", "0.60618544", "0.6056337", "0.60363215", "0.60300016", "0.5989639", "0.5975776", "0.59575576", "0.5951867", "0.58541244", "0.5841549", "0.58191574", "0.5815493", "0.580464", "0.5773067", "0.575898", "0.5724178", "0.5712385", "0.5711832", "0.5705263", "0.5691698", "0.56898797", "0.56775385", "0.567093", "0.56116945", "0.5606715", "0.5604395", "0.5592867", "0.55874914", "0.5582909", "0.55686927", "0.5566849", "0.55554384", "0.55545646", "0.5516528", "0.5502123", "0.5487013", "0.5481971", "0.5480126", "0.54766655", "0.54714245", "0.54714245", "0.5450378", "0.54420936", "0.54344666", "0.54297453", "0.5420962", "0.5384251", "0.5378394", "0.5371741", "0.5364752", "0.5364254", "0.5356517", "0.5352227", "0.5345279", "0.5323794", "0.5310796", "0.5308414", "0.5305344", "0.52894545", "0.5285396", "0.5284048", "0.5279936", "0.52737206", "0.5266099", "0.52659684", "0.5260109", "0.5237918", "0.5235537", "0.5233578", "0.5230334", "0.5225559", "0.52199423", "0.5216712", "0.52102834", "0.5209789", "0.52090013" ]
0.7619069
1
Provides a list of Member urls per organizations. param orgs either a list of urls pointing to organizations or a single org name return list of member urls
def list_members(orgs): members =[] if isinstance(orgs, list): #if list of orgs for each org get members list for url in orgs: #append /member to url - member_url is not valid canidate without a member list url = url + "/members" print("Checking " + url) members_data = utils.get_json(url) for member in members_data: members.append(member["url"]) return members else: #build url from input org name and return member list url = "https://api.github.com/orgs/" + orgs + "/members" members_data = utils.get_json(url) #check for invalid GitHub credentials or invalid github org name try: for member in members_data: members.append(member["url"]) return members except TypeError: if(members_data["message"] == "Not Found"): print("That organization doesn't exist try again\n") raise SystemExit elif(members_data["message"] == "Bad credentials"): print("Please verify GitHub credentials are correct in config.py") raise SystemExit else: print (members_data) raise SystemExit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")", "def _get_org_members(self):\n url = f\"{BASE_URL}/orgs/{ORG}/members\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text", "def get_members_repos(org_list):\n print(\"\\nGetting repositories of all members.\")\n jsonMembersRepo_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'fork',\n 'stargazers_count',\n 'forks_count',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting repositories of', member['login'])\n jsonMembersRepos = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/repos?per_page=100\")\n for repo in jsonMembersRepos:\n # Add fields to make CSV file more usable\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersRepo_list.append(repo)\n generate_csv(\"members-list\", jsonMembersRepo_list, columns_list)", "def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "async def All_orgs():\n\n links_13 = []\n links_14 = []\n valid_url = \"/?archive/?gsoc/\\d+[0-9]/orgs/[a-zA-Z]+\"\n for year in range(2009, 2016):\n year_url = melange + \"/archive/gsoc/{}\".format(year)\n soup = await get_page(year_url)\n\n for url in soup.find_all('a'):\n if re.match(valid_url, url.get(\"href\")):\n if year <= 2013:\n links_13.append(join(melange, url.get(\"href\")[1:]))\n else:\n links_14.append(join(melange, url.get(\"href\")[1:]))\n return links_13, links_14", "def test_getorgs(self):\n pass", "def organizations(self):\n self.elements('organizations')", "def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)", "def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs", "def get_members_info(org_list):\n print(\"\\nGetting user information of all members.\")\n jsonMembersInfo_list = []\n columns_list = [\n 'organization',\n 'login',\n 'name',\n 'url',\n 'type',\n 'company',\n 'blog',\n 'location',\n 'email'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print(\"Getting user information for\", member[\"login\"])\n jsonMember = load_json(\"https://api.github.com/users/\" +\n member[\"login\"] + \"?per_page=100\",\n memberscrape=True)\n # Add field to make CSV file more usable\n jsonMember[\"organization\"] = org\n # Python 2: Using smart_str to deal with encodings\n jsonMember[\"location\"] = smart_str(jsonMember['location'])\n jsonMember[\"name\"] = smart_str(jsonMember['name'])\n jsonMember[\"company\"] = smart_str(jsonMember['company'])\n jsonMember[\"email\"] = smart_str(jsonMember['email'])\n jsonMembersInfo_list.append(jsonMember)\n generate_csv(\"members-info\", jsonMembersInfo_list, columns_list)", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def list_members_of_organisation(\n self, organisation_id: OrganisationId\n ) -> List[Publisher]:\n ...", "def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def _get_org_repos(self):\n url = f\"{BASE_URL}/orgs/{ORG}/repos\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def organizations(self):\r\n return organizations.Organizations(self)", "def users(self,org_id=None):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))", "async def org_info_above_14(orgs_urls14):\n org_info_14 = []\n project_urls_from14 = []\n for url in orgs_urls14:\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[1].text.splitlines()[-1].strip()\n mailing_list = org_info[2].text.split(\":\")[-1].strip()\n description = soup.find('div', {'class': 'main mdl-cell mdl-cell--8-col\\\n mdl-card mdl-shadow--4dp'})\n detail = description.find_all('p')[2].nextSibling\n org_info_14.append({'name': org_name, 'page': web_page,\n 'about': detail, 'mail': mailing_list,\n 'link': url})\n project_urls_from14.extend(grab_project_links(soup))\n except IndexError:\n print(url)\n\n return org_info_14, get_project_info(project_urls_from14)", "def get_organizations_list_with_links(year_link):\n response = get_response(year_link)\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n orgs_li = soup.find_all(\n 'li', attrs={'class': 'organization-card__container'})\n orgs_dict = {}\n for orgs_html in orgs_li:\n org_name = orgs_html.select('h4')[0].text.replace('\\n', '')\n relative_link = orgs_html.select('a')[0].get('href')\n full_link = HOME_PAGE + relative_link\n orgs_dict[org_name] = full_link\n return orgs_dict\n else:\n print('Something Went Wrong')\n print(f'Status Code: {response.status_code}')\n sys.exit(1)", "def get_repos(org_list):\n jsonRepos = []\n for org in org_list:\n print(\"\\nScraping repositories of\", org)\n jsonRepo = load_json(\"https://api.github.com/orgs/\" + org +\n \"/repos?per_page=100\")\n for repo in jsonRepo:\n # Add field for org to make CSV file more useful\n repo['organization'] = org\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonRepos.append(repo)\n # Create a list with the items I'm interested in, then call generate_csv\n columns_list = [\n 'organization',\n 'name',\n 'full_name',\n 'stargazers_count',\n 'language',\n 'created_at',\n 'updated_at',\n 'homepage',\n 'fork',\n 'description'\n ]\n generate_csv(\"repo-list\", jsonRepos, columns_list)", "def _list_orgs(self, context):\r\n try:\r\n rtn = {'context': context,\r\n 'orgs': sorted(list(self._bbreader.cache[context].keys()))}\r\n except KeyError:\r\n raise RequestError('Context {} not found'.format(context))\r\n return rtn", "def get_contributors(org_list):\n print(\"\\nCreating list of contributors.\")\n jsonContributor_list = []\n graph = nx.DiGraph()\n columns_list = [\n 'organization',\n 'repository',\n 'login',\n 'contributions',\n 'html_url',\n 'url'\n ]\n for org in org_list:\n print('\\nScraping contributors of', org)\n jsonRepo = load_json(\"https://api.github.com/orgs/\" + org +\n \"/repos?per_page=100\")\n for repo in jsonRepo:\n # try...except to deal with empty repositories\n try:\n print(\"Getting contributors of\", repo[\"name\"])\n # First, add repo as a node to the graph\n graph.add_node(repo['name'], organization=org)\n # Then get a list of contributors\n jsonContributors = load_json(\"https://api.github.com/\"\n \"repos/\" + org + \"/\" +\n repo[\"name\"] +\n \"/contributors?per_page=100\")\n for contributor in jsonContributors:\n # Add each contributor as an edge to the graph\n graph.add_edge(contributor['login'], repo['name'],\n organization=org)\n # Prepare CSV and add fields to make it more usable\n contributor[\"organization\"] = org\n contributor[\"repository\"] = repo[\"name\"]\n jsonContributor_list.append(contributor)\n except:\n # if repository is empty inform user and pass\n print(\"Repository '\" + repo[\"name\"] + \"' returned an error,\"\n \"possibly because it's empty.\")\n pass\n generate_csv(\"contributor-list\", jsonContributor_list, columns_list)\n nx.write_gexf(graph, \"data/contributor-network_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + '.gexf')\n print(\"\\nSaved graph file: data/contributor-network_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".gexf\")", "def organizations(self):\n return sorted(set([team.org for team in self.teams]), key=lambda o: o.title)", "def get_members(organization):\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")", "def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})", "def organizations_owned(self):\n return sorted(set([team.org for team in self.teams if team.org.owners == team]),\n key=lambda o: o.title)", "def fetch_gh_org_collaborators(self):\n for config in self.config.get('org.permissions.org_integrity.orgs'):\n host, org = config['url'].rsplit('/', 1)\n for aff in config.get('collaborator_types', GH_ALL_COLLABORATORS):\n url_hash = get_sha256_hash([config['url']], 10)\n json_file = f'gh_{aff}_collaborators_{url_hash}.json'\n path = ['permissions', json_file]\n description = (\n f'{aff.title()} collaborators of the {org} GH org'\n )\n self.config.add_evidences(\n [RawEvidence(path[1], path[0], DAY, description)]\n )\n with raw_evidence(self.locker, '/'.join(path)) as evidence:\n if evidence:\n if host not in self.gh_pool:\n self.gh_pool[host] = Github(base_url=host)\n if not config.get('repos'):\n repos = self.gh_pool[host].paginate_api(\n f'orgs/{org}/repos'\n )\n config['repos'] = [repo['name'] for repo in repos]\n collabs = {}\n for repo in config['repos']:\n collabs_url = f'repos/{org}/{repo}/collaborators'\n collabs[repo] = self.gh_pool[host].paginate_api(\n collabs_url, affiliation=aff\n )\n evidence.set_content(json.dumps(collabs))", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def test_success_with_all_orgs(self):\n auth_client = self.create_auth_client()\n all_orgs = ListOrgSerializer(Org.objects.all(), many=True)\n response = auth_client.get(self.search_org_api)\n self.assertEqual(response.data, all_orgs.data)", "def get_all_orgs():\r\n org_filter_set = set()\r\n if not has_configuration_set():\r\n return org_filter_set\r\n\r\n for value in settings.MICROSITE_CONFIGURATION.values():\r\n org_filter = value.get('course_org_filter')\r\n if org_filter:\r\n org_filter_set.add(org_filter)\r\n\r\n return org_filter_set", "def get_starred_repos(org_list):\n print(\"\\nGetting repositories starred by members.\")\n jsonMembersStarred_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'html_url',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting starred repositories of', member['login'])\n jsonStarred = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/starred?per_page=100\")\n for repo in jsonStarred:\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersStarred_list.append(repo)\n generate_csv(\"starred-list\", jsonMembersStarred_list, columns_list)", "def test_retrieve_l_organizations(self):\n pass", "def generate_memberships(org_list):\n print(\"\\nGenerating network of memberships.\")\n graph = nx.DiGraph()\n for org in org_list:\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print(\"Getting membership of\", member['login'])\n graph.add_node(member['login'], node_type='user')\n jsonMemberships = load_json(\"https://api.github.com/users/\" +\n member['login'] + \"/orgs?per_page=100\")\n for organization in jsonMemberships:\n graph.add_edge(member['login'], organization['login'],\n node_type='organization')\n nx.write_gexf(graph, 'data/membership-network_' +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + '.gexf')\n print(\"\\nSaved graph file: data/membership-network_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".gexf\")", "def test_organizations_list(self):\n pass", "def usgs_URL_helper(build_url, config, args):\n # initiate url list for usgs data\n urls_usgs = []\n # call on state acronyms from common.py (and remove entry for DC)\n state_abbrevs = abbrev_us_state\n state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != \"DC\"}\n # replace \"__aggLevel__\" in build_url to create three urls\n for c in config['agg_levels']:\n # at national level, remove most of the url\n if c == 'National':\n url = build_url\n url = url.replace(\"__stateAlpha__/\", \"\")\n url = url.replace(\"&wu_area=__aggLevel__\", \"\")\n url = url.replace(\"&wu_county=ALL\", \"\")\n urls_usgs.append(url)\n else:\n # substitute in state acronyms for state and county url calls\n for d in state_abbrevs:\n url = build_url\n url = url.replace(\"__stateAlpha__\", d)\n url = url.replace(\"__aggLevel__\", c)\n urls_usgs.append(url)\n return urls_usgs", "def organization_list(request):\n return [o.slug for o in Organization.objects.all()]", "def _orgsWithLogoForQuery(query, batch_size=5):\n orgs = []\n for org in query:\n if org.logo_url:\n orgs.append(org)\n if len(orgs) == batch_size:\n break\n\n return orgs", "def list(self) -> List[Organisation]:\n ...", "def get_org_admins(self, dataset: Dict) -> List[User]:\n organization_id = dataset[\"organization_id\"]\n orgadmins = list()\n organization = self.organizations[organization_id]\n if \"admin\" in organization:\n for userid in self.organizations[organization_id][\"admin\"]:\n user = self.users.get(userid)\n if user:\n orgadmins.append(user)\n return orgadmins", "def Run(self, args):\n orgs_client = organizations.Client()\n return orgs_client.List(limit=args.limit, page_size=args.page_size)", "def acceptedOrgs(self, request, access_type,\n page_name=None, params=None, filter=None, **kwargs):\n\n from soc.modules.ghop.views.models.organization import view as org_view\n\n logic = params['logic']\n\n program_entity = logic.getFromKeyFieldsOr404(kwargs)\n\n fmt = {'name': program_entity.name}\n\n params = params.copy()\n params['list_msg'] = program_entity.accepted_orgs_msg\n params['list_description'] = self.DEF_PARTICIPATING_ORGS_MSG_FMT % fmt\n# TODO(LIST)\n return self.list(request, 'any_access', page_name=page_name, params=params)", "def get_members_and_supervisors(organization):\n members = []\n supervisors = []\n if organization:\n if organization.members:\n members = organization.members.reporters.all()\n if organization.supervisors:\n supervisors = organization.supervisors.reporters.all() \n return (members, supervisors)", "def get(self, org_name=None): \n if org_name is None: # Return a list of all orgs\n filter = '%s=*' % self.org_attr\n scope = 1\n trueorfalse = False\n else:\n filter = '%s=%s' % (self.org_attr, org_name)\n scope = self.search_scope\n trueorfalse = True \n result = self._get_object(self.base_dn, scope, filter, \\\n unique=trueorfalse)\n self.log.debug('Result: %s' % result)\n return result", "def test_get_all_for_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_organizations=[org.uid])\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def get_organizations(\n self, *, params: Optional[dict] = None\n ) -> \"resource_types.Organizations\":\n\n return communicator.Organizations(self.__requester).fetch(parameters=params)", "def showORGusers(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n jsonResponse = get_csp_users_json(strCSPProdURL, ORG_ID, sessiontoken)\n if jsonResponse == None:\n print(\"API Error\")\n sys.exit(1)\n\n users = jsonResponse['results']\n table = PrettyTable(['First Name', 'Last Name', 'User Name'])\n for i in users:\n table.add_row([i['user']['firstName'],i['user']['lastName'],i['user']['username']])\n print (table.get_string(sortby=\"Last Name\"))", "def getURLs():", "def getProjectsForOrgs(org_keys, limit=1000):\n q = getProjectsQueryForOrgs(org_keys)\n return q.fetch(limit)", "def sync_org(config, orgs):\n\n logger = logging.getLogger(\"sync-org\")\n\n for org in orgs:\n logger.info(\"Syncing {} organization\".format(org))\n config.get_manager().sync_org(org)", "def get_organization_url(self, organization: Dict):\n return f\"{self.site_url}/organization/{organization['name']}\"", "def _urls(*, repository, commit, mirrors):\n result_with_nulls = [\n _format_url(\n pattern = x,\n repository = repository,\n commit = commit,\n )\n for x in mirrors.get(\"github\")\n ]\n return [\n url\n for url in result_with_nulls\n if url != None\n ]", "def organizations(self):\r\n return Organizations(self)", "def ListOrganizations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _createOrganizationsCollections(folder):\n collections = [\n {'id': 'all_orgs', 'tit': _('all_orgs'), 'subj': (u'search', ), 'query': [\n {'i': 'portal_type',\n 'o': 'plone.app.querystring.operation.selection.is',\n 'v': ['organization']}],\n 'cond': u\"\", 'bypass': [],\n 'flds': (u'select_row', u'org_pretty_link_with_additional_infos',\n u'SelectedInPlonegroupColumn', u'PloneGroupUsersGroupsColumn',\n u'review_state', u'CreationDate', u'actions'),\n 'sort': u'sortable_title', 'rev': False, 'count': False},\n ]\n _createDashboardCollections(folder, collections)", "def org(\n organisation: str, threads: int = 2, uname: Optional[str] = typer.Argument(None)\n):\n usernames = github.find_users_from_organisation(\n organisation=organisation, uname=uname\n )\n result = []\n for username in usernames:\n result.append(github.find_email_from_username(username=username))\n typer.echo(result)", "def collect_org_repos(self):\n log.info(\"GHUB\", \"Collecting org repos.\")\n raw_repos = self._get_org_repos()\n preprocessed_repos = self._preprocess_repos(raw_repos)\n parsed_repos = json_reducer.reduce(REPOS_SCHEMA, preprocessed_repos)\n result = []\n for repo in parsed_repos:\n result.append(repo)\n return result", "def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r", "def participating(model, program):\n # expiry time to fetch the new organization entities\n # the current expiry time is 30 minutes.\n expiry_time = datetime.timedelta(seconds=1800)\n\n batch_size = 5\n\n q = _orgWithLogoQuery(model, program)\n\n # the cache stores a 3-tuple in the order list of org entities,\n # cursor and the last time the cache was updated\n\n key = 'participating_orgs_for' + program.key().name()\n po_cache = memcache.get(key)\n\n if po_cache:\n cached_orgs, cached_cursor, cached_time = po_cache\n if not datetime.datetime.now() > cached_time + expiry_time:\n return cached_orgs\n else:\n q.with_cursor(cached_cursor)\n\n orgs = _orgsWithLogoForQuery(q, batch_size)\n\n if len(orgs) < batch_size:\n q = _orgWithLogoQuery(model, program)\n extra_orgs = _orgsWithLogoForQuery(q, batch_size - len(orgs))\n\n # add only those orgs which are not already in the list\n orgs_keys = [o.key() for o in orgs]\n for org in extra_orgs:\n if org.key() not in orgs_keys:\n orgs.append(org)\n\n new_cursor = q.cursor()\n memcache.set(key, value=(orgs, new_cursor, datetime.datetime.now()))\n\n return orgs", "def test_getorganizations_item(self):\n pass", "def get_org_data(org, session=None):\n url = f'{GITHUB_API_URL}/orgs/{org}'\n return get_whole_response_as_json(url, session)", "def urls(self) -> list[str]:\r\n ...", "def atlas_organizations():\n pass", "def get_members(base_url, end_url):\n reps, content_len, bill_info, votes = get_next_page(base_url, end_url, 1)\n return reps", "def pull_groups(self, org):\n pass", "def organizations_owned_ids(self):\n return list(set([team.org.id for team in self.teams if team.org.owners == team]))", "def process_organizations(self, organizations):\n self.process_elements(\n organizations,\n self.organization_table,\n self.extract_organization,\n ['organization_data', 'member', 'organization']\n )", "def test_get_all_for_other_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='foo@bar.com')\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n status=403\n )", "def organization(request, id):\n org = get_object_or_404(Organization, pk=id)\n context = {\n 'org': org,\n 'cover': modify_image_url(str(org.cover), 'cover'),\n 'logo': modify_image_url(str(org.logo), 'logo'),\n 'mission': \"\",\n 'values': \"\",\n 'members': \"\",\n }\n\n context['mission'] = org.mission\n\n if org.value_set.exists():\n context['values'] = org.value_set.all()\n\n if org.membership_set.exists():\n membership = org.membership_set.all()\n context['members'] = []\n for member in membership:\n m = User.objects.get(pk=member.user_id)\n context['members'].append(m)\n\n return render(request, 'wantedly_app/org.html', context)", "def test_get_organization(self):\n pass", "def getProjectsQueryForOrgs(org_keys):\n query = getProjectsQuery()\n query.filter('org IN', org_keys)\n return query", "def get_org_repos(org, session=None):\n url = f'{GITHUB_API_URL}/orgs/{org}/repos'\n return get_one_item_at_a_time(url, {'type': 'sources'}, session)", "def get_issuer_urls_gnames(self):\n urls = ['uri:' + u for u in self.issuer_urls]\n return self.load_gnames(urls)", "def build_end_url_list(url):\n http_types = [\"http://\", \"https://\"]\n dub_types = [\"www.\", \"\"] # this order needs to preserved for testing at www.hgdatascience.com\n http_dub_urls = [\"{}{}{}\".format(h_type, dub_type, url) for dub_type in dub_types for h_type in http_types]\n return http_dub_urls", "def getUsersByOrganisation(SID, organisation_id, start, max, orderby, asc):\n return call(\"getUsersByOrganisation\", SID, organisation_id, start, max, orderby, asc)", "async def org_info_below_13(org_urls13):\n org_info_till13 = []\n project_urls_till13 = []\n for url in org_urls13:\n # General information about the org\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[0].text.splitlines()[-1].strip()\n mailing_list = org_info[1].text.split(\":\")[-1].strip()\n detail = org_info[2].text\n org_info_till13.append({'name': org_name, 'about': detail,\n 'page': web_page, 'mail': mailing_list,\n 'link': url})\n project_urls_till13.extend(grab_project_links(soup))\n\n except IndexError:\n print(url)\n\n return org_info_till13, get_project_info(project_urls_till13)", "def list_ou(self, _):\n cn_re = re_compile(\"{[^}]+}\")\n results = self.engine.query(self.engine.GPO_INFO_FILTER(), [\"cn\", \"displayName\"])\n gpos = {}\n for gpo in results:\n gpos[gpo[\"cn\"]] = gpo[\"displayName\"]\n\n results = self.engine.query(self.engine.OU_FILTER())\n for result in results:\n print(result[\"distinguishedName\"])\n if \"gPLink\" in result:\n guids = cn_re.findall(result[\"gPLink\"])\n if len(guids) > 0:\n print(\"[gPLink]\")\n print(\"* {}\".format(\"\\n* \".join([gpos[g] if g in gpos else g for g in guids])))", "def build_urls(self, listings_per_page=20, pages_per_location=15):\r\n url_list = []\r\n for i in range(pages_per_location):\r\n offset = listings_per_page * i\r\n url_pagination = self.link + f'&items_offset={offset}'\r\n url_list.append(url_pagination)\r\n self.url_list = url_list", "def allowed_organization_access_use(user, org):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n if user.has_perm(\"vnswww.organization_use_any\"):\n return True\n elif user.has_perm(\"vnswww.organization_use_org\"):\n return org == up.org", "def AuthorURLs(entry):\n a_URLs = ''\n for a in entry.getAuthors():\n url = a.get('homepage', ' ')\n a_URLs += \"%s and \" % url\n return a_URLs[:-5]", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def test_retrieve_l_organization(self):\n pass", "def get_authors(self, instance):\n\n # Get Authors in the specified order\n author_order = Author.objects \\\n .filter(dataset_id=instance.id) \\\n .order_by('order')\n\n # Put in a list\n authors = [a.author for a in author_order]\n\n # Return a list of person urls\n serializers = PersonSerializer(authors, many=True, context={'request': self.context['request']}).data\n return [p[\"url\"] for p in serializers]", "def get_queryset(self):\n mo = MemberOrganisation.objects.get(users=self.request.user)\n if self.user_type_query:\n return User.objects.filter(profile__member_organisation__slug=mo.slug)\n return User.profile.objects.all()", "def get_urls():\r\n return []", "def URLs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('urls', default)\n return [HEP.URLObject(i) for i in tmp]", "def get_organizations(self, language=None):\n return self.get_direct_related_page_extensions(\n Organization, OrganizationPluginModel, language=language\n )", "def _get_ou_ids(self, org):\n\n # get root id\n root_id = self._get_root_id(org)\n\n # get OUs under the Org root\n ou_list_at_root_level = self._list_ou_for_parent(org, root_id)\n\n _ou_name_to_id_map = {}\n _all_ou_ids = []\n\n for ou_at_root_level in ou_list_at_root_level:\n # build list of all the OU IDs under Org root\n _all_ou_ids.append(ou_at_root_level.get('Id'))\n # build a list of ou id\n _ou_name_to_id_map.update(\n {ou_at_root_level.get('Name'): ou_at_root_level.get('Id')}\n )\n\n self.logger.info(\"Print OU Name to OU ID Map\")\n self.logger.info(_ou_name_to_id_map)\n\n return _all_ou_ids, _ou_name_to_id_map", "def get_all_clubs():\n\turl = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\n\tr = rq.get(url)\n\tsoup = BeautifulSoup(r.text)\n\tlinks = soup.find_all('a')\n\n\tlinkArray = []\n\tnameArray = []\n\n\tfor link in links:\n\t\tl = link.get('href')\n\t\tlinkArray.append(l)\n\t\tname = link.get_text()\n\t\tname = name.encode('ascii','ignore')\n\t\tnameArray.append(name)\n\n\treturn nameArray, linkArray", "def get_all_npf_orgs(self):\n return [org for org in Org.objects.filter(status='npf')]", "def users_organizations(user):\n if not user or not user.is_authenticated():\n return None\n else:\n return get_users_organizations(user)", "def test_retrieve_l_organization_locations(self):\n pass", "def repolist(orgname, refresh=True):\n filename = os.path.join(SETTINGS[\"folder\"], orgname.lower()) + \"/repodata.json\"\n if not refresh and os.path.isfile(filename):\n repodata = json.loads(open(filename, \"r\").read()) # read cached data\n else:\n endpoint = \"/orgs/\" + orgname.lower() + \"/repos?per_page=100\"\n repodata = github_allpages(endpoint=endpoint)\n dicts2json(repodata, filename)\n print(\n f\"\\r{orgname} - {len(repodata)} total public non-forked repos found\"\n + 60 * \" \"\n )\n\n return sorted(\n [\n (repo[\"name\"].lower(), repo[\"size\"])\n for repo in repodata\n if not repo[\"private\"] and not repo[\"fork\"]\n ]\n )", "def get_organizers(self):\n return self.members.filter(\n Q(unitmembership__role=UnitMembershipRole.OWNER)\n | Q(unitmembership__role=UnitMembershipRole.ADMIN)\n )", "def get_organizations_to_delete():\n\n all_organizations = seed.models.Organization.objects.all()\n bad_organizations = [org for org in all_organizations if org.id not in get_core_organizations()]\n return bad_organizations", "def process(self, org_name, n, m):\n\n repo_query = self.build_repo_query(org_name)\n repositories = self.fetch_results(repo_query)\n count = n\n\n if repositories['total_count'] < n:\n print(\"Only %s repositories available\" % format(repositories['total_count']))\n count = repositories['total_count']\n \n for repo in repositories['items'][:count]:\n\n name = repo['name']\n owner = repo['owner']['login']\n forks = repo['forks']\n\n data = {'repo_name':name, 'owner':owner, 'contributors':[], 'forks':forks}\n\n committer_query = self.build_committers_query(name, owner)\n contributors = self.fetch_results(committer_query)\n contri_count = m\n\n if len(contributors) < m:\n print(\"Only %s contributors available for %s\" % (str(len(contributors)), name))\n contri_count = len(contributors)\n \n for contributor in contributors[:contri_count]:\n data['contributors'].append({'name':contributor['login'], \n 'commit_counts':contributor['contributions'] })\n \n self.results.append(data)" ]
[ "0.73141086", "0.6856046", "0.6766409", "0.66456306", "0.6501711", "0.64493275", "0.6426517", "0.62708795", "0.6261771", "0.6171711", "0.6152523", "0.605219", "0.6030204", "0.6027043", "0.60185575", "0.60124224", "0.5963386", "0.58976614", "0.5893394", "0.5823597", "0.5802018", "0.58008057", "0.5795049", "0.5792948", "0.5776702", "0.5745321", "0.5735663", "0.57141733", "0.56909424", "0.5648807", "0.5641643", "0.56399107", "0.56399107", "0.5612456", "0.5610004", "0.5607598", "0.55828726", "0.55805475", "0.5519464", "0.5491793", "0.5487033", "0.5450075", "0.54359746", "0.5434536", "0.54205465", "0.54055905", "0.5369158", "0.53686017", "0.5321702", "0.5317927", "0.5310586", "0.5285142", "0.52807367", "0.5260931", "0.52583784", "0.5254395", "0.5253608", "0.52524805", "0.5217242", "0.5206272", "0.5205678", "0.51929677", "0.51899266", "0.5187897", "0.51856375", "0.5182643", "0.51785535", "0.51758254", "0.5168071", "0.51485884", "0.5145124", "0.51450104", "0.514018", "0.5131351", "0.5128988", "0.5107095", "0.5104577", "0.50665784", "0.5065985", "0.50551516", "0.5051291", "0.50486326", "0.50177383", "0.5005903", "0.5000466", "0.49951282", "0.4986631", "0.4984994", "0.49777225", "0.49656937", "0.4964535", "0.4962879", "0.4958481", "0.49457178", "0.4933236", "0.49319005", "0.49276406", "0.49130282", "0.491291", "0.490278" ]
0.7285096
1
Provides a list of Member urls that have [attribute] is null. param attribute to check for null value params memberUrls List of member urls to check return list of member urls with null [attribute] field
def check_for_null(attribute, memberUrls): attributeNotFound =[] for url in memberUrls: member_data = utils.get_json(url) if member_data[attribute] is None: #TODO: TBD Could grab email here if speed was an issue attributeNotFound.append(url) return attributeNotFound
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_should_pass_when_list_of_url_are_not_empty(self):\n self.ebooksPage = EbooksPage(self.driver)\n list_of_links = self.ebooksPage.get_all_links()\n\n assert (isinstance(list_of_links, list) and (len(list_of_links) > 0 ))", "def get_parliament_members_urls(self) -> list:\n directory = self.read_html(self.url)\n return [\n a.attrs[\"href\"]\n for a in directory.select(\".single-mp a\")\n if a.attrs[\"href\"].startswith(\"https\")\n ]", "def every_non_existing_owner_should_not_have_profile_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'link' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile link',\n owner['display_name'])", "def extract_category_members(div):\n category_members = div.find_all('a')\n # remove any members with class\n category_members = list(filter(lambda x: x.get('class') is None, category_members))\n category_member_urls = list(map(lambda x: x['href'], category_members))\n for category_member in category_members:\n if(category_member.get('title') is None):\n print(f'bad category member {category_member}')\n category_member_names = list(map(lambda x: x['title'], category_members))\n # remove parens\n category_member_names = list(map(lambda x: PAREN_MATCHER.sub('', x), category_member_names))\n # combine\n category_member_info = list(zip(category_member_names, category_member_urls))\n # remove long names like 'Lo Nuestro Award for Pop New Artist of the Year'\n category_member_info = list(filter(lambda x: len(x[1]) <= MAX_TITLE_LEN, category_member_info))\n return category_member_info", "def extract_attributes_of_planes(self, cls, attr_name, filter_nones=False):\r\n if filter_nones:\r\n\r\n return [\r\n plane.extract_attribute(cls=cls, attr_name=attr_name)\r\n for plane in self.planes\r\n if plane.extract_attribute(cls=cls, attr_name=attr_name) is not None\r\n ]\r\n\r\n else:\r\n\r\n return [\r\n plane.extract_attribute(cls=cls, attr_name=attr_name)\r\n for plane in self.planes\r\n ]", "def _urls(*, repository, commit, mirrors):\n result_with_nulls = [\n _format_url(\n pattern = x,\n repository = repository,\n commit = commit,\n )\n for x in mirrors.get(\"github\")\n ]\n return [\n url\n for url in result_with_nulls\n if url != None\n ]", "def every_non_existing_owner_should_not_have_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if not owner['user_type'] == 'does_not_exist':\n continue\n assert 'profile_image' not in owner\n logging.debug(\n 'Not existing Owner %s does not have a valid profile image link',\n owner['display_name'])", "def listingURLs(soup):\n\n #Get URLs\n itemListing = soup.find_all(class_=\"user-ad-row link link--base-color-inherit link--hover-color-none link--no-underline\")\n itemListing += soup.find_all(class_=\"user-ad-row user-ad-row--featured-or-premium link link--base-color-inherit link--hover-color-none link--no-underline\")\n itemListing += soup.find_all(class_=\"user-ad-row user-ad-row--premium user-ad-row--featured-or-premium link link--base-color-inherit link--hover-color-none link--no-underline\")\n #Create list\n urlList = [i['href'] for i in itemListing]\n return urlList", "def extract_attributes_of_galaxies(self, cls, attr_name, filter_nones=False):\r\n if filter_nones:\r\n\r\n return [\r\n galaxy.extract_attribute(cls=cls, attr_name=attr_name)\r\n for galaxy in self.galaxies\r\n if galaxy.extract_attribute(cls=cls, attr_name=attr_name) is not None\r\n ]\r\n\r\n else:\r\n\r\n return [\r\n galaxy.extract_attribute(cls=cls, attr_name=attr_name)\r\n for galaxy in self.galaxies\r\n ]", "def get_urls_safely(self, **kwargs):\n urls = self.get_urls(**kwargs)\n if all(len(urls[cik]) == 0 for cik in urls.keys()):\n raise NoFilingsError(\"No filings available.\")\n return urls", "def _filter_url_list(self, regex_pattern: str) -> None:\n matcher = re.compile(regex_pattern)\n filtered_list = []\n if self.url_list:\n for url in self.url_list:\n if matcher.search(url.url):\n filtered_list.append(url)\n self.url_list = filtered_list", "def getUrls(url):\n f = requests.get(url)\n p = MyParser()\n p.feed(f.text)\n list_of_urls = p.output_list\n #deal with possible strange None values\n list_of_urls = [url for url in list_of_urls if url is not None]\n for url in list_of_urls:\n if 'http' not in url: list_of_urls.remove(url)\n return list_of_urls", "def get_member(self, filter_, attrlist=None):\n result = self.search('o=com,dc=mozilla', filter_, attrlist)\n if result == False:\n raise self.SearchError\n elif result == []:\n return []\n return result[1]", "def URLs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('urls', default)\n return [HEP.URLObject(i) for i in tmp]", "def parse_urls(record):\n url_list = []\n try:\n page_url = record['WARC-Header-Metadata']['WARC-Target-URI']\n x = urlparse.urlparse(page_url)\n url_list += [(x.netloc, x.path)]\n except:\n pass\n try: \n links = record['Payload-Metadata']['HTTP-Response-Metadata']['HTML-Metadata']['Links']\n for url in links:\n x = urlparse.urlparse(url['url'])\n url_list += [(x.netloc, x.path)]\n except:\n pass\n \n return url_list", "def urls(self):\n header = \"URL,Linked From,Discovery Date\"\n gcsv = self.read()\n if gcsv[0] != header:\n raise Exception(\"Unexpected CSV format\")\n urls = set()\n for line in gcsv[1:]:\n # Get everything before the first commar (just the URL)\n line = line[:line.find(\",\")]\n urls.add(line)\n return urls", "def check_href(url, soup):\n # pdb.set_trace()\n ret_vals = []\n href = soup.find_all(\"a\")\n for link in href:\n if url in link.get(\"href\"):\n ret_vals.append(link.get(\"href\").split(url)[1])\n return list(set(ret_vals))", "def test_metric_source_urls_without_report(self, mock_url_read):\n mock_url_read.return_value = LAST_SCAN\n self.assertEqual([], self.__report.metric_source_urls())", "def attributes(self):\n\n attributes = []\n\n for member in self.members:\n if member.attribute:\n attributes.append(member)\n\n return attributes", "def GetValidAttributeValues(self, attr):\n return None", "def urls(self) -> list[str]:\r\n ...", "def by_member(self, member):\n return self.get_queryset().filter(member=member)", "def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]", "def get_urls():\r\n return []", "def list_members(orgs):\n members =[]\n \n \n if isinstance(orgs, list):\n #if list of orgs for each org get members list\n for url in orgs:\n #append /member to url - member_url is not valid canidate without a member list\n url = url + \"/members\"\n print(\"Checking \" + url)\n members_data = utils.get_json(url)\n\n for member in members_data:\n members.append(member[\"url\"])\n return members\n \n \n else:\n #build url from input org name and return member list\n url = \"https://api.github.com/orgs/\" + orgs + \"/members\"\n members_data = utils.get_json(url)\n \n #check for invalid GitHub credentials or invalid github org name\n try:\n for member in members_data:\n members.append(member[\"url\"])\n return members\n except TypeError:\n if(members_data[\"message\"] == \"Not Found\"):\n print(\"That organization doesn't exist try again\\n\")\n raise SystemExit\n elif(members_data[\"message\"] == \"Bad credentials\"):\n print(\"Please verify GitHub credentials are correct in config.py\")\n raise SystemExit\n else:\n print (members_data)\n raise SystemExit", "def test_get_non_existent_campaigns_returns_empty_list(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"campaigns\": []})", "def test_get_collection_no_asset_urls(self, mock_get):\n # Arrange\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n\n # Act\n response = get_collection_item_asset_urls(self.item_id)\n\n # Assert\n self.assertListEqual(response, [])", "def get_attribut(cls, attribut):\n\n if attribut:\n lst_attribut = [\"%s\" % value for value in attribut.split(',')]\n return lst_attribut\n return False", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls", "def check_unlinkage(self, attribute=None):\n unlinked = self._related_robot_instance is None\n assert unlinked, type(self)\n _class_attributes = self._class_linkables\n if attribute is not None:\n _class_attributes = [var for var in self._class_linkables if var == attribute]\n else:\n for var in self._class_variables:\n if isinstance(getattr(self, var), Linkable):\n unlinked &= getattr(self, var).check_unlinkage()\n elif isinstance(getattr(self, var), list):\n for v in getattr(self, var):\n if isinstance(v, Linkable):\n unlinked &= v.check_unlinkage()\n for attribute in _class_attributes:\n unlinked &= (\n getattr(self, \"_\" + attribute) is None or\n type(getattr(self, \"_\" + attribute)) == str or\n (isinstance(getattr(self, \"_\" + attribute), list) and\n all([type(x) == str or x is None for x in getattr(self, \"_\" + attribute)]))\n )\n assert unlinked, f\"Attribute {attribute} of {type(self)} {str(self) if self.stringable() else ''} is still linked. type: {type(getattr(self, '_' + attribute))}\"\n return unlinked", "def test_simplelistfilter_with_none_returning_lookups(self):\n modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0]\n self.assertEqual(len(filterspec), 0)", "def test_get_attr_names_empty_attr(self):\n root = netapp_api.NaElement('root')\n\n self.assertEqual([], root.get_attr_names())", "def parse_links(record):\n try:\n page_url = record['WARC-Header-Metadata']['WARC-Target-URI']\n page_domain = urlparse.urlparse(page_url).netloc\n links = record['Payload-Metadata']['HTTP-Response-Metadata']['HTML-Metadata']['Links']\n out_links = Counter([urlparse.urlparse(url['url']).netloc for url in links])\n return (page_domain, out_links)\n except:\n return None", "def getMyLinks(self, link_list, plant):\n my_links = []\n for links in link_list:\n if plant in links:\n my_links.append(links)\n return my_links", "def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")", "def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")", "def members(self) -> \"List[str]\":\n return self._attrs.get(\"members\")", "def _filter_return_url(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n if url in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "def test_notnull_attrs(self):\n obj = VeilRestPaginator(name='name', ordering='ordering', limit=None, offset=5)\n assert 'name' in obj.notnull_attrs\n assert 'limit' not in obj.notnull_attrs", "def test_link_tag_empty_href_attribute(m):\n m.get('http://mock.com/', text='<link rel=\"icon\" href=\"\">')\n\n with pytest.warns(None):\n icons = favicon.get('http://mock.com/')\n\n assert not icons", "def members(self):\r\n return self.exclude(contributor__username=u'anonymous')", "def find_members(self, details):\n\n results = []\n\n for member in self.member:\n if details.match(member):\n results.append(member)\n\n return results", "def filter_members(self, members, want_all):\r\n\r\n def member_is_special(member):\r\n # TODO implement special matlab methods: disp, subsref, etc.\r\n return False\r\n\r\n def member_is_private(member):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n access = attrs.get(\"Access\", None)\r\n get_access = attrs.get(\"GetAccess\", None)\r\n if access:\r\n if access == \"private\":\r\n return True\r\n elif get_access:\r\n if get_access == \"private\":\r\n return True\r\n return False\r\n else:\r\n return False\r\n\r\n def member_is_protected(member):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n access = attrs.get(\"Access\", None)\r\n get_access = attrs.get(\"GetAccess\", None)\r\n if access:\r\n if access == \"protected\":\r\n return True\r\n elif get_access:\r\n if get_access == \"protected\":\r\n return True\r\n return False\r\n else:\r\n return False\r\n\r\n def member_is_hidden(member):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n hidden = attrs.get(\"Hidden\", None)\r\n # It is either None or True\r\n if hidden:\r\n return True\r\n return False\r\n else:\r\n return False\r\n\r\n def member_is_friend(member):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n access = attrs.get(\"Access\", None)\r\n if access:\r\n # Only friend meta classes define access lists\r\n if isinstance(access, list):\r\n return True\r\n elif access:\r\n # This is a friend meta class\r\n return access[0] == \"?\"\r\n return False\r\n else:\r\n return False\r\n\r\n def member_is_friend_of(member, friends):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n access = attrs.get(\"Access\", None)\r\n if not isinstance(access, list):\r\n access = [access]\r\n for has_access in access:\r\n if has_access in friends:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n ret = []\r\n\r\n # search for members in source code too\r\n namespace = \".\".join(self.objpath) # will be empty for modules\r\n\r\n if self.analyzer:\r\n attr_docs = self.analyzer.find_attr_docs()\r\n else:\r\n attr_docs = {}\r\n\r\n # process members and determine which to skip\r\n for membername, member in members:\r\n # if isattr is True, the member is documented as an attribute\r\n isattr = False\r\n\r\n doc = self.get_attr(member, \"__doc__\", None)\r\n # if the member __doc__ is the same as self's __doc__, it's just\r\n # inherited and therefore not the member's doc\r\n cls = self.get_attr(member, \"__class__\", None)\r\n if cls:\r\n cls_doc = self.get_attr(cls, \"__doc__\", None)\r\n if cls_doc == doc:\r\n doc = None\r\n has_doc = bool(doc)\r\n\r\n keep = False\r\n if want_all and member_is_special(member):\r\n # special methods\r\n if self.options.special_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.special_members\r\n and self.options.special_members is not ALL\r\n and membername in self.options.special_members\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif want_all and member_is_private(member):\r\n # ignore private members\r\n if self.options.private_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.private_members\r\n and self.options.private_members is not ALL\r\n and membername in self.options.private_members\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif want_all and member_is_protected(member):\r\n # ignore protected members\r\n if self.options.protected_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.protected_members\r\n and self.options.protected_members is not ALL\r\n and membername in self.options.protected_members\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif want_all and member_is_hidden(member):\r\n # ignore hidden members\r\n if self.options.hidden_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.hidden_members\r\n and self.options.hidden_members is not ALL\r\n and membername in self.options.hidden_members\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif want_all and member_is_friend(member):\r\n # ignore friend members\r\n if self.options.friend_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.friend_members\r\n and self.options.friend_members is not ALL\r\n and member_is_friend_of(member, self.options.friend_members)\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif (namespace, membername) in attr_docs:\r\n # keep documented attributes\r\n keep = True\r\n isattr = True\r\n else:\r\n # ignore undocumented members if :undoc-members: is not given\r\n keep = has_doc or self.options.undoc_members\r\n\r\n # give the user a chance to decide whether this member\r\n # should be skipped\r\n if self.env.app:\r\n # let extensions preprocess docstrings\r\n skip_user = self.env.app.emit_firstresult(\r\n \"autodoc-skip-member\",\r\n self.objtype,\r\n membername,\r\n member,\r\n not keep,\r\n self.options,\r\n )\r\n if skip_user is not None:\r\n keep = not skip_user\r\n\r\n if keep:\r\n ret.append((membername, member, isattr))\r\n\r\n return ret", "def test_list_members(self):\n pass", "def every_existing_owner_should_have_valid_profile_image_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if owner['user_type'] == 'does_not_exist':\n continue\n link = owner['profile_image']\n assert validators.url(link), (\n 'Owner %s (%d) in item %d has an invalid profile image link: %s'\n .format(owner['display_name'], owner['user_id'], link))\n logging.debug(\n 'Owner %s (%d) has a valid profile image link: %s',\n owner['display_name'], owner['user_id'], link)", "def every_existing_owner_should_have_valid_profile_link(context):\n items = context.response.json()['items']\n for item in items:\n owner = item['owner']\n if owner['user_type'] == 'does_not_exist':\n continue\n link = owner['link']\n assert validators.url(link), (\n 'Owner %s (%d) in item %d has an invalid profile link: %s'.format(\n owner['display_name'], owner['user_id'], link))\n logging.debug(\n 'Owner %s (%d) has a valid profile link: %s',\n owner['display_name'], owner['user_id'], link)", "def create_url_rules(self):\n return []", "def print_found_urls_hrefs(elements=None, count=10):\n if count > len(elements):\n print(f'Max value of \"count: {count}\" can not exceed \"given elements count: {len(elements)}\".')\n close_and_finish_execution()\n if elements and count:\n for num, video in enumerate(elements):\n if num == count:\n break\n href = video.get_attribute(\"href\")\n if href is None:\n print(f'Element \"{video}\" has no href attribute.')\n continue\n else:\n print(f\"{num+1}. {href}\")\n else:\n print(f'Attribute elements and count are mandatory.')\n close_and_finish_execution()", "def find_target_urls(url_list):\n candidate_urls = []\n \n #iterate through urls\n for url in get_urls(get_clean_text(message_list)):\n #skip any urls from our 33mail mask domain\n if re.findall('33mail', url):\n pass\n #return everything else\n else:\n candidate_urls.append(url)\n return candidate_urls", "def get_all_members_info(self) -> None:\n members_url = self.get_parliament_members_urls()\n print(f\"Found {len(members_url)} number of Parliamentary members.\")\n members_data = []\n\n for member_url in tqdm(members_url):\n try:\n members_data.append(self.get_person_delayed(member_url))\n except Exception:\n print(f\"Failed to get members information: {member_url}\")\n\n self.dataframe = pd.DataFrame(members_data, columns=self._columns)\n self.dataframe.replace(\"\", np.nan, inplace=True, regex=True)", "def _get_noprofile_member_pks(self):\n members = self._get_members()\n\n # Make a list of all members without a profile\n members = members.filter(profile=None)\n member_pks = members.values_list('id', flat=True).order_by('id')\n\n return list(member_pks)", "def test_list_course_role_members_noparams(self):\r\n url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 400)", "def test_list_members_without_member_rights(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def required_members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"required_members\")", "def get_urls(listObjofPics, userName):\n\n with open('%s_pic_tweets.csv' %userName, newline='') as csvfile:\n # skipinitialspace=True in order to avoid ',' delimiter issues in row[2] from tweet text\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"', skipinitialspace=True)\n \n for row in reader:\n listObjofPics.append(row[3])\n \n return listObjofPics", "def methods(self):\n methods = []\n\n for member in self.members:\n if not member.attribute:\n methods.append(member)\n\n return methods", "def generate_url_list(position, pgNum, pos):\n url = \"http://www.nfl.com/players/search?category=position&playerType=current&conference=ALL&d-447263-p=%s&filter=%s&conferenceAbbr=null\" % (pgNum, position)\n print url\n try:\n readPage = ul.urlopen(url).read()\n soup = BeautifulSoup(readPage, \"html.parser\")\n links = soup.findAll('a', href=re.compile('^/player/'))\n\n for i in range(len(links)):\n url_tag = str(links[i])\n splitText = url_tag.split('\"')\n year = 2015\n numYears = 6\n for j in range(numYears):\n yearTemp = year - j\n link = \"http://www.nfl.com\" + splitText[1].rstrip('profile') + \"gamelogs?season=%s\" % str(yearTemp)\n nameLastFirst = splitText[2].lstrip('>').rstrip('</a>')\n names = nameLastFirst.split(',')\n nameFirstLast = names[1].lstrip() + \" \" + names[0]\n outputLine = abbr[position], ',', nameFirstLast, ',', link, '\\n'\n print \"Player/Year to search for: \" + nameFirstLast + \" \" + str(yearTemp)\n with open(\"../CSV_data/UrlList.csv\", \"a\") as text_file:\n text_file.writelines(outputLine)\n text_file.close()\n # pull_game_stats(link, nameFirstLast, yearTemp, pos)\n\n except IOError, e:\n print 'Failed to open url'\n print '-------------------------------------'\n if hasattr(e, 'code'):\n print 'We failed with error code - %s.' % e.code\n elif hasattr(e, 'reason'):\n print \"The error object has the following 'reason' attribute :\"\n print e.reason\n return False", "def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r", "def urls_in_url(url):\n global url_to_check_manually\n try:\n \"\"\"Return all URLs when given an url\"\"\"\n html = urlopen(url)\n bsObj = BeautifulSoup(html.read(), \"lxml\")\n list_url = []\n for link in bsObj.find_all('a'):\n sublink = link.get('href')\n try:\n list_url.append(str(sublink))\n except:\n pass\n return list_url\n except:\n print('Impossible to open URL :', url)\n url_to_check_manually.append(url)\n return []", "def members(self):\n return self.find_users_by_rel('member')", "def getLinks(self, url, tag = \"a\", attr = \"href\"): \n try: \n response = open(self.filename(url)).read() #read from the file\n except IOError:\n raise IOError\n parsed_url = urlparse(url)\n domain = parsed_url[0] + '://' + parsed_url[1]\n \n try:\n soup = BeautifulSoup.BeautifulSoup(response)\n l = soup.findAll(tag, href = True)\n except Exception:\n raise Exception\n links = []\n \n for tag in l:\n link = str(tag[attr]) #convert the link to a string\n purl = urlparse(link)\n if purl[1] == '': #if the link is relative make it absolute\n link = domain+link\n #check if the extension is that of a document \n if splitext(link)[1] in self._invalidExt: \n self.docs_list.append(link)\n \n #append only the html link\n links.append(link)\n \n \n \n return list(set(links)) #returns only distinct links", "def getMembers():", "def getMembers():", "def getMembers():", "def getMembers():", "def filter_captured_urls(urls_files, url_list_file):\n captured_urls = load_captured_urls(url_list_file)\n \n to_capture = list(filter(lambda d: d['url'] not in captured_urls, urls_files))\n \n return to_capture", "def _get_urls(soup: bs4.BeautifulSoup, keyword: str=\"\") -> list:\n valid_urls = []\n tag = soup.find_all('a')\n for text in tag:\n href_text = text.get('href')\n url = href_text[href_text.find('http'):]\n if keyword and keyword not in url:\n pass\n else:\n if \"http\" in url and not any(\n invalid_url in url for invalid_url in FILTER_URLS\n ):\n valid_urls.append(url)\n return valid_urls", "def filter_none(elems):\n return [x for x in elems if x is not None]", "def check_access(circle, member_required=False):\n member = db.session.query(Member).filter(db.and_(Member.circle == circle, Member.user == g.user)).first()\n if member:\n return member\n elif member_required:\n abort(404)\n else:\n if str(circle.id) in g.invitations:\n return None\n else:\n abort(404)", "def test_attractor_list(self):\n assert len(get_attractor_list()) > 130", "def _parse_links(self, item) -> list:\n # TODO This would be a \"nice to have\" but is not necessary right now.\n return [{\"href\": \"\", \"title\": \"\"}]", "def get_link_info(baseurl, cookie_header, member_id):\n url = baseurl + 'stacking/vsf/members/{}/links'.format(member_id)\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code", "def get_member_link_info(baseurl, cookie_header, member_id, link_id):\n url = baseurl + 'stacking/vsf/members/{}/links/{}'.format(member_id, link_id)\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code", "def serialize_redirect_uris_field(self, obj, **kwargs):\n return list(filter(len, obj.redirect_uris.split()))", "def unique(list_of_links):\n return list(set(list_of_links))", "def optional(cls):\n return []", "def get_url(soup):\r\n \"\"\"criteria: any(s in a[\"title\"] for s in ('新增', '確診', '肺炎')\"\"\"\r\n url_list = []\r\n for a in soup.find_all('a', {\"href\": re.compile(\"typeid=9$\")}):\r\n if any(s in a[\"title\"] for s in ('新增', '確診', '肺炎')):\r\n url = \"https://www.cdc.gov.tw\" + a['href']\r\n url_list.append(url)\r\n return url_list", "def links(self):\n\t\treturn self.list_of_links", "def test_non_url(self):\n for url in ['not-a-url', 'http://not-a-url', 'https://not-a-url']:\n yield self.check_url_raises, url", "def remove_links_with_no_month_reference(all_links: list) -> list:\n\n only_links_with_month_reference = [item for item in all_links if item[1] is not None and item[1] != '\\xa0']\n\n return only_links_with_month_reference", "def extract_urls(genome):\n itemid = genome.get('metadata').get('identifier')\n urls = set([url for url in genome['urls'] if 'archive.org' not in url])\n db_urls_found(itemid, urls)", "def getAdditionnalsUrls(self, ip_version):\n urls = []\n url = self.cp.get(self.CORE_SECTION, 'url_v'+str(ip_version), fallback='')\n for u in filter(lambda s: len(s), map(lambda x: x.strip(), url.split(','))):\n urls.append(u)\n return urls", "def get_course_unit_urls(self):\n\n for url in self.course_page.find_all(attrs=COURSE_UNIT_TITLE):\n self.course_unit_urls.append(url[\"href\"])\n logging.debug(\"course_unit_urls:{}\".format(self.course_unit_urls))\n logging.info(\"Course unit urls retrieved\")", "def getURLs():", "def _check_attribute_in_list(self, check_attribs, component_attribs):\n getattr = attrgetter('attribute_id')\n for key, group in groupby(component_attribs, getattr):\n if set(check_attribs).intersection([x.id for x in group]):\n return True\n return False", "def get_members(base_url, end_url):\n reps, content_len, bill_info, votes = get_next_page(base_url, end_url, 1)\n return reps", "def get_url_from_images(html_images):\n urls = []\n for image in html_images:\n try:\n url = image['data-src']\n if not url.find(\"https://\"):\n urls.append(url)\n except:\n try:\n url = image['src']\n if not url.find(\"https://\"):\n urls.append(image['src'])\n except Exception as e:\n print(f'No found image sources.')\n print(e)\n return urls", "def get(self, request, *args, **kwargs):\n\n liked_photos = None\n l_squarefollowings_count = None\n l_best_media = None\n\n # Common for all members views ===================================================\n l_categories = Category.objects.all()\n l_attributes = Attribute.objects.all()\n try:\n logged_member = Member.objects.get(django_user__username=request.user)\n show_describe_button = logged_member.is_editor(request)\n is_monthly_member = logged_member.is_monthly_member()\n is_yearly_member = logged_member.is_yearly_member()\n except ObjectDoesNotExist:\n logged_member = None\n except:\n raise HttpResponseNotFound\n\n l_squarefollowings_count = SquareFollowing.objects.filter(member_id2=logged_member).count()\n if l_squarefollowings_count >= MIN_SQUAREFOLLOWINGS:\n\n # END Common for all members views ===============================================\n l_squarefollowing_queryset = SquareFollowing.objects.all()\n\n l_token = logged_member.get_member_token(request)\n instagram_session = InstagramSession(p_is_admin=False, p_token=l_token['access_token'])\n instagram_session.init_instagram_API()\n\n l_smart_feed_helper = SmartFeedHelper(\n p_feed_owner_instagram_id=logged_member.instagram_user_id,\n p_instagram_session=instagram_session,\n p_batch_size=SMART_FEED_BATCH_SIZE,\n p_min_id=logged_member.smartfeed_last_seen_instagram_photo_id\n )\n l_best_media = l_smart_feed_helper.find_best_media(\n p_media_to_return=SMART_FEED_BATCH_SIZE,\n p_starting_media_id=None,\n p_logged_member=logged_member,\n p_max_days=30\n )\n\n liked_photos = []\n for x_media in l_best_media:\n my_likes = MyLikes(request.user.username, x_media.id, instagram_session )\n has_user_liked_media, no_of_likes = my_likes.has_user_liked_media()\n if has_user_liked_media:\n liked_photos.extend([x_media.id])\n\n\n # Limit calculation --------------------------------------------------------------\n logged_member.refresh_api_limits(request)\n x_ratelimit_remaining, x_ratelimit = logged_member.get_api_limits()\n\n x_ratelimit_used = x_ratelimit - x_ratelimit_remaining\n if x_ratelimit != 0:\n x_limit_pct = (x_ratelimit_used / x_ratelimit) * 100\n else:\n x_limit_pct = 100\n # END Limit calculation ----------------------------------------------------------\n\n return render(request,\n self.template_name,\n dict(\n best_media=l_best_media,\n liked_photos=liked_photos,\n squarefollowings_count=l_squarefollowings_count,\n new_friends_interaction=0,\n\n is_monthly_member=is_monthly_member,\n is_yearly_member=is_yearly_member,\n logged_member=logged_member,\n x_ratelimit_remaining=x_ratelimit_remaining,\n x_ratelimit=x_ratelimit,\n x_limit_pct=x_limit_pct,\n categories=l_categories,\n attributes=l_attributes,\n )\n )", "def filter_thumbnail_only(_list):\n result = list()\n for count, href in enumerate(_list):\n if count > 15:\n break\n if get_verified_response(get_thumbnail(href)).status == 200:\n result.append(href)\n return result", "async def whitelist_member(self, ctx, *members):\n successes = []\n fails = []\n for member_arg in members:\n try:\n member = await commands.MemberConverter().convert(ctx, member_arg)\n except commands.errors.BadArgument:\n fails.append(f\"Cannot find member {member_arg}\")\n else:\n await self.bot.db.execute(\n \"DELETE FROM blacklisted_member WHERE guild_id = %s AND user_id = %s\",\n ctx.guild.id,\n member.id,\n )\n self.bot.cache.blacklist[str(ctx.guild.id)][\"member\"].discard(member.id)\n successes.append(f\"Unblacklisted {member.mention}\")\n\n await util.send_tasks_result_list(ctx, successes, fails)", "def extract_link(self, page_url, element, attribute_name):\n attribute = element.attrs.get(attribute_name, None)\n if attribute is None:\n return None\n\n return urljoin(page_url, attribute, allow_fragments=False)", "def init_urls(self):\n url = 'http://www.lagou.com/'\n for ip_info in self.col.find(no_cursor_timeout=True):\n ip, port = ip_info['ip'], ip_info['port']\n if ip and port:\n self.urls.append((url, ip, port)) # tuple", "def Affiliations(self, default=[{}]):\n tmp = self.data.get('affiliations', default)\n return [HEP.AffiliationObject(i) for i in tmp]", "def get_objects_from_attribute(self, attribute: str) -> List[TgnObject]:\n pass", "def safemembers(members):\r\n\r\n base = resolved(\".\")\r\n\r\n for finfo in members:\r\n if _is_bad_path(finfo.name, base):\r\n log.debug(\"File %r is blocked (illegal path)\", finfo.name)\r\n raise SuspiciousOperation(\"Illegal path\")\r\n elif finfo.issym() and _is_bad_link(finfo, base):\r\n log.debug( \"File %r is blocked: Hard link to %r\", finfo.name, finfo.linkname)\r\n raise SuspiciousOperation(\"Hard link\")\r\n elif finfo.islnk() and _is_bad_link(finfo, base):\r\n log.debug(\"File %r is blocked: Symlink to %r\", finfo.name,\r\n finfo.linkname)\r\n raise SuspiciousOperation(\"Symlink\")\r\n elif finfo.isdev():\r\n log.debug(\"File %r is blocked: FIFO, device or character file\",\r\n finfo.name)\r\n raise SuspiciousOperation(\"Dev file\")\r\n\r\n return members", "def get_members(organization):\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")", "def usable(self):\n return self.exclude(Q(location=None) | Q(ipv4=None) | Q(ipv6=None))", "def getDirectors(movieCredit):\n if \"crew\" in movieCredit:\n return [ _format(people[\"name\"]) for people in movieCredit[\"crew\"] if people[\"job\"].lower() == \"director\" ]\n else:\n raise AttributeError(\"%s instance has no attribute crew\" % movieCredit)", "def validate_url(attribute_name, url):\n if not url:\n return\n\n try:\n result = urlparse(url=url)\n if [result.scheme, result.netloc, result.path]:\n return True\n except:\n raise ValueError('{attribute_name}: The given string {url} is not a '\n 'valid url.'\n .format(attribute_name=attribute_name, url=url))", "def check_blank_prop(blank_mol, validate_dict):\n\n # Check if properties populated\n property_dict = blank_mol.GetPropsAsDict()\n\n # Properties to ignore\n prop_ignore_list = ['ref_mols', 'ref_pdb']\n\n for key, value in zip(list(property_dict.keys()), list(property_dict.values())):\n if value == '' and key not in prop_ignore_list:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),\n field=key,\n warning_string='description for %s missing' % (key,),\n validate_dict=validate_dict)\n if key == 'ref_url' and check_url(value) is False:\n validate_dict = add_warning(molecule_name=blank_mol.GetProp('_Name'),\n field=key,\n warning_string='illegal URL %s provided' % (value,),\n validate_dict=validate_dict)\n\n return validate_dict" ]
[ "0.5205275", "0.51647687", "0.4913061", "0.49125162", "0.49111745", "0.4911004", "0.48863113", "0.4880418", "0.48025796", "0.47962448", "0.47788674", "0.47781992", "0.47770363", "0.47515574", "0.47040161", "0.46784464", "0.45712367", "0.4568166", "0.45629707", "0.45522118", "0.45343316", "0.45338476", "0.44974712", "0.449056", "0.44749463", "0.4468246", "0.44643593", "0.44633552", "0.44391853", "0.4427086", "0.4425132", "0.44184008", "0.44078505", "0.44049373", "0.44013497", "0.44013497", "0.44013497", "0.43807343", "0.4377251", "0.43764207", "0.43746704", "0.43687186", "0.43594238", "0.43584904", "0.43541333", "0.4349208", "0.433537", "0.43278253", "0.43114397", "0.43084392", "0.43077868", "0.43048206", "0.43033642", "0.42937812", "0.42913738", "0.4286276", "0.4284968", "0.42845488", "0.4284163", "0.42753902", "0.42699763", "0.4267391", "0.4267391", "0.4267391", "0.4267391", "0.4266429", "0.42641345", "0.4263052", "0.42585212", "0.42564735", "0.4256209", "0.42526945", "0.42486808", "0.42476106", "0.42429203", "0.42420888", "0.4241511", "0.42414573", "0.4233539", "0.4228746", "0.42231396", "0.42208302", "0.42182547", "0.4207146", "0.42069155", "0.42017695", "0.41983917", "0.41921926", "0.41899985", "0.4186648", "0.4183223", "0.418302", "0.41815963", "0.4173666", "0.41735938", "0.41690525", "0.41682643", "0.41659072", "0.4165032", "0.4164731" ]
0.841574
0
Define la conexion a la base de datos
def get_db_connection(uri): client = pymongo.MongoClient(uri) return client.cryptongo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connection_database(self):\n # connection to the database\n self.data_base = mysql.connector.connect(user=self.info[0], password=self.info[1],\n host=self.info[2])\n self.cursor = self.data_base.cursor()\n\n # executed \"use Purbeurre\" request\n self.cursor.execute(\"USE Purbeurre\")", "def connect(self):\n try:\n datos = [settings.config.host, settings.config.user, settings.config.password, settings.config.dbname] \n self.logger.debug(\"Connection String:{}, {}, {}\".format(settings.config.host, settings.config.user, settings.config.dbname) )\n self.con = MySQLdb.connect(*datos) # Conectar a la base de datos \n self.set_env_variable(settings.config.NLS_LANG, settings.config.LANG, settings.config.LC_ALL);\n except Exception as e:\n code,message = e\n if code == 1045:\n raise Exception(self.REVISAR_CREDENCIALES)\n else: \n raise e", "def creacion_conexion_db():\n\n global bd\n bd = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"\")\n micursor = bd.cursor(buffered=True)\n try:\n micursor.execute(\"CREATE DATABASE entrega\")\n bd = mysql.connector.connect(\n host=\"localhost\", user=\"root\", passwd=\"\", database=\"entrega\"\n )\n micursor = bd.cursor(buffered=True)\n micursor.execute(\n \"CREATE TABLE paciente( id int(11) NOT NULL PRIMARY KEY AUTO_INCREMENT, nombre VARCHAR(128) COLLATE utf8_spanish2_ci NOT NULL, apellido varchar(128) COLLATE utf8_spanish2_ci NOT NULL, dni VARCHAR(128) COLLATE utf8_spanish2_ci NOT NULL, dia date NOT NULL, hora int(11) NOT NULL, minuto int(11) NOT NULL )\"\n )\n\n return mysql.connector.connect(\n host=\"localhost\", user=\"root\", passwd=\"\", database=\"entrega\"\n )\n\n except Exception:\n\n return mysql.connector.connect(\n host=\"localhost\", user=\"root\", passwd=\"\", database=\"entrega\"\n )", "def bd_conecta():\n if not hasattr(g, 'sqlite_db'):\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n g.sqlite_db = rv\n return g.sqlite_db", "def connect(self):\r\n self.con_origem = fdb.connect(\r\n host=self.host_origem, database=self.database_origem,\r\n user=self.user_origem, password=self.senha_origem\r\n )\r\n\r\n self.con_destino = psycopg2.connect(\r\n \"user=%s password=%s host=%s port=%s dbname=%s\" % (self.user_destino, self.senha_destino, self.host_destino, self.porta_destino, self.database_destino))\r\n\r\n self.cur_origem = self.con_origem.cursor()\r\n self.cur_destino = self.con_destino.cursor()\r\n\r\n self.con_destino.autocommit = True", "def sql_conexion(self):\n try:\n con = sqlite3.connect('sisgenvac.db')\n return con\n except Error:\n print(Error)", "def connect(self):\r\n self.con_origem = pyodbc.connect(\"Driver={SQL Server Native Client 11.0};\"\r\n \"Server=%s;\" \r\n \"Database=%s;\"\r\n \"UID=%s;\"\r\n \"PWD=%s;\" %(self.host_origem,self.database_origem,self.user_origem,self.senha_origem))\r\n\r\n\r\n self.con_destino = psycopg2.connect(\r\n \"user=%s password=%s host=%s port=%s dbname=%s\" % (self.user_destino, self.senha_destino, self.host_destino, self.porta_destino, self.database_destino))\r\n\r\n self.cur_origem = self.con_origem.cursor()\r\n self.cur_destino = self.con_destino.cursor()\r\n\r\n self.con_destino.autocommit = True", "def do_connectdb(self, db_name):\n if not db_name:\n db_name = input(\"Enter database that you want to connect :\\n\")\n else:\n pass\n self.connection_obj = CRUDTable(db_name)\n print(\"Connection Successful\")", "def __init__(self):\r\n self.conn = create_connection(DATABASE_PATH)", "def connect(self):\r\n self.con_origem = cx_Oracle.connect(\r\n \"%s/%s@%s/%s\" % (self.user_origem, self.senha_origem, self.host_origem, self.database_origem))\r\n\r\n self.con_destino = psycopg2.connect(\r\n \"user=%s password=%s host=%s port=%s dbname=%s\" % (self.user_destino, self.senha_destino, self.host_destino, self.porta_destino, self.database_destino))\r\n\r\n self.cur_origem = self.con_origem.cursor()\r\n self.cur_destino = self.con_destino.cursor()\r\n\r\n self.con_destino.autocommit = True", "async def conn(self) -> None:\n self.bot.db = await aiosqlite.connect('database.db')", "def __init__(self, con_uri=None, db_name=\"douyin\"):\n super().__init__()\n self.con_uri = con_uri or 'localhost'\n self.client = AsyncIOMotorClient(self.con_uri)\n self.db = self.client[db_name]", "def setup_db_conn():\n # TODO update so DB does not have to be hard coded\n # Currently DB is hardcoded", "def __init__(self, user, password, database='mesomat', host='localhost'): \n \n \n self.config = {\n 'user' : user,\n 'password' : password,\n 'host' : host,\n 'database' : database,\n 'raise_on_warnings' : True,\n 'auth_plugin' : 'mysql_native_password'\n }\n \n self.INSERT_SAMPLE_COLUMN_COMMAND = ()\n \n \n self.connected = False\n self.cursor = None\n self.cnx = None", "def connection_mysql(self):\n\n # connection MySQL\n config = {\n 'host': HOST,\n 'user': USER,\n 'password': PASSWORD\n }\n try:\n # connection to MySQL\n self.cnx = connector.connect(**config)\n # Graphical interface with Tkinter\n # (text to display in the steps view)\n self.text_connect_mysql = \"You are connected to MySQL\"\n # Mode console\n print(\"You are connected to MySQL\")\n # connection to database 'PurBeurre'\n self.connection_db(self.cnx)\n self.search_presence_api_data_in_database(self.cnx)\n return self.cnx\n\n except connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n # Graphical interface with Tkinter\n self.text_connect_mysql = \"Something is wrong with your \" \\\n \"user name or password\"\n # Mode console\n print(\"Something is wrong with your user name or password\")", "def connect_to_data_base():\n try:\n connection = psycopg2.connect(**DATA_CONNECTION)\n\n cursor = connection.cursor()\n\n cursor.execute(\"SELECT version();\")\n #record = cursor.fetchone()\n \n print(\"{} \\t Conexión Base de datos realizada con éxito\".format(datetime.now()))\n \n return connection, cursor\n\n except (Exception, psycopg2.Error) as error:\n print(\"{} \\t Error en el método connect_to_data_base\".format(datetime.now()))\n print(\"Error en la conexión a la base de datos\", error)\n \n return None", "def __init__(self):\n self.dbcon = DbConnection.get_con()", "def db_connection(self):\n try:\n self.connection = connect(host=self.host, user=self.user, password = self.password, db = self.db, cursorclass = self.cursor)\n except MySQLError:\n print(\"DB Error\")", "def conectar(self):\r\n self.socket = socket.create_connection((self.host, self.puerto))", "def set_db_connexion(self, db_name='local_backup'):\r\n\r\n dbapi = pd.read_json(os.path.join(self.config_path, \"db_access_credentials.json\"), orient='index')\r\n\r\n # Let's create a connexion from the DBAPI variables\r\n connexion = 'mysql+mysqlconnector://{0}:{1}@{2}:{3}/{4}?auth_plugin={5}'\r\n connexion = connexion.format(dbapi['user'][db_name],\r\n dbapi['password'][db_name],\r\n dbapi['server_url'][db_name],\r\n dbapi['port'][db_name],\r\n dbapi['schema'][db_name],\r\n dbapi['authentication_plugin'][db_name])\r\n\r\n self.mariadb_engine = create_engine(connexion)", "def connect(self):\n self.db = pymysql.connect(self.db_ip, self.uid, self.pwd, self.db_name)\n self.cursor = self.db.cursor()", "def getConexion(self):\n return self.conn", "def connect(self):\n\t\t# PostgreSQL PyPgSQL\n\t#\tcp = adbapi.ConnectionPool(\"pyPgSQL.PgSQL\", database=\"test\")\n\t\t# MySQL\n\t\tself.dbpool = adbapi.ConnectionPool('MySQLdb',\n\t\t\thost = self.settings.get('hostname', 'localhost'),\n\t\t\tport = self.settings.get('port', 3306),\n\t\t\tdb = self.settings.get('database'),\n\t\t\tuser = self.settings.get('username'),\n\t\t\tpasswd = self.settings.get('password'),\n\t\t\tcursorclass = MySQLdb.cursors.DictCursor,\n\t\t\tcharset = 'utf8',\n\t\t\tuse_unicode = True,\n\t\t)", "def __init__(self, host, user, password, base):\n global con\n\n try:\n con = _mysql.connect(host, user, password, base)\n \n print \"Polaczono z baza danych.\"\n \n except _mysql.Error, e:\n \n print \"Blad %d: %s\" % (e.args[0], e.args[1])\n sys.exit(1)", "def obtainDatabaseConnection(self):\n\t\tself.databaseConnector = DatabaseConnector()", "def connect_to_db(self):\n self.read_config()\n print('Connecting to database...', end=\"\")\n self.db_conn = pymysql.connect(host=self.host, user=self.user, db=self.db)\n self.db_cur = self.db_conn.cursor()\n print('[DONE]')", "def __init__(self):\r\n self.connection = MySQLdb.connect(host=\"db4free.net\", db=\"academicsys\", user=\"desenvolvedores\", passwd=\"acesso\")\r\n self.cursor = self.connection.cursor()", "def __connect(self):\n self.conn = pymysql.connect(self.opts.DB_HOST, self.opts.DB_USER,\n self.opts.DB_PASSWORD, self.opts.DB_NAME)", "def open_connection(self):\n self.conn = pymysql.connect(host=self.host, user=self.user, passwd=self.passwd, db=self.db)", "def connect(self, dbapi_connection, connection_record):", "def __init__(self):\n self.dbconnect = dbConnection.connection", "def connect_db(self):\n self.connection = MySQLdb.connect(\n host=MYSQL_HOST, passwd=MYSQL_PWD,\n user=MYSQL_USER, db=MYSQL_DB)", "def connection(composition):\n return Connection(BASE_URL, 'http://localhost')", "def set_connection(password):\r\n\r\n SQL_connection = mysql.connector.connect(\r\n host=\"hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com\",\r\n user=\"ossux@hannl-hlo-bioinformatica-mysqlsrv\",\r\n db=\"Ossux\",\r\n password=password)\r\n\r\n return SQL_connection", "def connect(self):\n self.conn.connect()", "def __init__(self):\n self._connection = get_db_connection()", "def connect(cls):\n cls.conn = MySQLdb.connect(**cls.configs)\n cls.conn.autocommit(cls.autocommit)", "def getConexion(self):\n\t\t\treturn self.conn", "def connect_db(self) -> sqlite3.Connection:", "def connect_to_db(self):\n\t\t# connection = psycopg2.connect(database=config.database, user=config.user,password = config.password)\n\t\tconnection = psycopg2.connect(database=config.database, user=config.user)\n\t\treturn connection", "def make_data_connection():\n\n global _engine # pylint: disable=W0603\n\n db_url = 'postgres://{0}:{1}@{2}:{3}/{4}'.format(DataBase.USER,\n DataBase.PASSWORD,\n DataBase.HOST,\n DataBase.PORT,\n DataBase.DB_NAME)\n _engine = create_engine(db_url, echo=DataBase.ECHO)", "def __init__(self, database, host='127.0.0.1', user='root', password='', port='3306', tenant_id=None):\n\n self.HOST = host\n self.USER = user\n self.PASSWORD = password\n self.PORT = port\n self.DATABASE = f'{tenant_id}_{database}' if tenant_id else database\n \n logging.info(f'Host: {self.HOST}')\n logging.info(f'User: {self.USER}')\n logging.info(f'Password: {self.PASSWORD}')\n logging.info(f'Port: {self.PORT}')\n logging.info(f'Database: {self.DATABASE}')\n\n self.connect()", "def connectDatabase(self):\r\n self.checkingConnection()\r\n\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery('''\r\n SELECT Clients.id, Clients.date, Clients.hour, Clients.name, \r\n Clients.birthday, Clients.cellphone, Clients.address, City.name, \r\n Payment.method, Clients.pollo, Clients.carne, Clients.empanachos, \r\n Clients.total, Clients.value FROM Clients JOIN City JOIN Payment\r\n ON Clients.city_id = City.id AND Clients.payment_id = Payment.id\r\n ''', self.db)\r\n\r\n self.setModel(self.model)", "def conn(self):\n self.cnx = psycopg2.connect(**self.dbConfig) \n self.cur = self.cnx.cursor()", "def connect(self):\n\n self._dbcon = psycopg2.connect(\n database=self._dbname, host=self._host, port=self._port, user=self._username, password=self._password)\n self.result_as_dict(self._result_as_dict)", "def create_connection():\n config_settings = {\n \"apiKey\": \"AIzaSyBF7l-W2MdF4yBbknmtNobInj1ujJ1IHA0\", \n \"authDomain\": \"boontje-aa2c4.firebaseapp.com\", \n \"databaseURL\": \"https://boontje-aa2c4.firebaseio.com\", \n \"storageBucket\": \"boontje-aa2c4.appspot.com\", \n \"messagingSenderId\": \"83637322760\"\n }\n\n # verbinding maken met de firebase database\n firebase_link = pyrebase.initialize_app(config_settings)\n\n # link vragen om te authenticeren\n auth = firebase_link.auth()\n\n # teruggekregen authenticatietest gebruiken om te authenticeren met gebruikersnaam en paswoord \n user = auth.sign_in_with_email_and_password(\"abc@def.be\", \"abcdef\")\n\n # aan firebase vragen om databasetoegang te voorzien\n database = firebase_link.database()\n\n # return databasetoegang en gebruikerstoken (unieke sleutel) om bewerkingen op de database te mogen doen\n return database, user[\"idToken\"]", "def _connect(self):\n if self.settings.get('ENGINE', 'mysql') == 'mysql':\n self._db = self.dbdriver.connect(self.settings['HOST'],\n self.settings['USER'],\n self.settings['PASSWORD'],\n self.settings['NAME'],\n self.settings.get('PORT', 3306))\n elif self.settings['ENGINE'] == 'postgres':\n self._db = self.dbdriver.connect(database=self.settings['NAME'],\n user=self.settings['USER'],\n password=self.settings['PASSWORD'],\n host=self.settings['HOST'])\n elif self.settings['ENGINE'] == 'sqlite':\n self._db = self.dbdriver.connect(self.settings['NAME'])\n self.cursor = self._db.cursor()", "def connect(self):\n self.conn = psycopg2.connect(database=self.config['dbname'], user=self.config['dbuser'], password=self.config['dbpassword'], host=self.config['dbhost'])", "def __init__(self):\n\t\tself.obtainDatabaseConnection()", "def create_connection(self):\r\n dbname = \"dbname = \"+\"'\"+self.dbname+\"' \"\r\n user = \"user = \"+\"'\"+self.user+\"' \"\r\n host = \"host = \"+\"'\"+self.host+\"' \"\r\n password = \"password = \"+\"'\"+self.password+\"' \"\r\n connection_info = dbname + user + host + password\r\n try:\r\n conn = psycopg2.connect(connection_info)\r\n except psycopg2.Error:\r\n print(\"Unable to connect to DB\")\r\n return conn", "def connect_db(self):\n try:\n self.connection = self.engine.connect()\n except Exception:\n self.print_std_error()", "def connectDb(self):\n self.db = Database('sqlite',self.settings.sqlitefilename)\n self.db.user = self.session.getAttribute(self.settings.authenvar)\n self.db.settings = self.settings\n self.db.logger = self.logger\n self.db.cgiparam = self.cgiparam\n self.db.writelog = self.writelog", "def openConn():\n\t#abstract the user,passwd,db,host fields in a config file next\n\tconnection = sql.connect(user=\"root\",passwd=\"sct\",db=\"sct\")\n\tcursor = connection.cursor()\n\treturn connection,cursor", "def connect(self, username, table_name):\n # TODO: The CONTROL database is currently only slightly operational.\n # The long-term goal is for the CONTROL database to allow multiple users with multiple\n # API keys to manage their accounts online in a data driven web-app and so the security\n # and also the functionality of the CONTROL database need major upgrades.\n self.database = username\n self.table = table_name\n # Connects to a SQLite database with the specified database name\n self.conn = sqlite3.connect(settings['USERS_DB'].format(username))\n # Creates a table with the specified table name if it doesn't already exist\n self.conn.execute(\"CREATE TABLE IF NOT EXISTS {tn} (days INTEGER,\"\n \"favorites INTEGER,\"\n \"favpd FLOAT,\"\n \"followers INTEGER,\"\n \"folpd FLOAT,\"\n \"hashtags TEXT,\"\n \"in_reply_to TEXT,\"\n \"lang TEXT,\"\n \"links_mentioned TEXT,\"\n \"original_author_handle TEXT,\"\n \"original_author_id TEXT,\"\n \"place TEXT,\"\n \"plain_desc TEXT,\"\n \"plain_text TEXT,\"\n \"source TEXT,\"\n \"statpd FLOAT,\"\n \"user_statuses INTEGER,\"\n \"tweet TEXT,\"\n \"tweet_id TEXT,\"\n \"tweet_location TEXT,\"\n \"tweet_mentions TEXT,\"\n \"tweeted_time TEXT,\"\n \"user_twitter_birthday TEXT,\"\n \"user_description TEXT,\"\n \"user_handle TEXT,\"\n \"user_id TEXT,\"\n \"user_location TEXT,\"\n \"user_name TEXT, PRIMARY KEY (tweet))\".format(tn=self.table))\n # The below allows for the Datamanager object to add the database the CONTROL table\n # self.manage = DataManager()\n # self.manage.connect()\n # self.manage.addition(username, table_name)", "def connect(self):\n if self.db is not None:\n self.disconnect()\n\n self.db = MySQLdb.connect(host=self.conn.host, port=self.conn.port, db=self.conn.db, user=self.conn.user, passwd=self.conn.pwd, use_unicode=True, charset='utf8', cursorclass=MySQLdb.cursors.DictCursor)\n self.db.autocommit(self.conn.auto_commit)", "def connect(self):\r\n\r\n db_config = read_db_config()\r\n\r\n try:\r\n print('Connecting to MySQL database...')\r\n conn = MySQLConnection(**db_config)\r\n\r\n if conn.is_connected():\r\n print('connection established.')\r\n return conn\r\n else:\r\n print('connection failed.')\r\n\r\n except Error as e:\r\n print(e)", "def establish_connection(self,executer):\n self.cursor = self.executer.connect_me(self.hst, self.usr, self.pwd, self.db_name)", "def crearBD(self):\n if self.base.isConnected():\n mensaje = \"Usted ya se encuentra conectado a la base \" + self.base.getDbName() + \", ¿Desea Crear una nueva?\"\n if askyesno(\"Atención\", mensaje):\n nombre = tkinter.simpledialog.askstring(\"Elija el Nombre de la Base\", prompt=\"Nombre\")\n self.base.setDbName(nombre)\n resultado = self.base.createDB()\n showinfo('Resultado', resultado)\n self.crearTabla()\n self.mostrarString.set('Mostrando Registros Existentes en ' + self.base.getDbName())\n self.tituloTree.configure(text=self.mostrarString.get())\n self.updateTree()\n else:\n try:\n mibase = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"\"\n )\n baseNombre['nombre'] = tkinter.simpledialog.askstring(\"Elija el Nombre de la Base\", prompt=\"Nombre\")\n buffer = open('nombreBD.json', 'w')\n buffer.write(json.dumps(baseNombre))\n buffer.close() \n micursor = mibase.cursor()\n baseSQL = f\"CREATE DATABASE {getNameDB()}\"\n micursor.execute(baseSQL)\n mensaje = f\"Se ha creado la base {getNameDB()}\"\n showinfo('BD Creada', mensaje)\n mostrarString.set('Mostrando Registros Existentes en ' + getNameDB())\n tituloTree.configure(text=mostrarString.get())\n\n if askyesno('Tabla Inexistente', '¿Desea crear una tabla?'):\n crearTabla()\n except:\n showinfo ('Error', exc_info()[1])", "def create_connection(self):\n try:\n self.conn = psycopg2.connect(host=self.host, port=self.port, database=self.database, user=self.user, password=self.password)\n\n except:\n print(\"Unable to connect to the database. Please check your options and try again.\")\n exit()", "def __init__(self, db_name):\n if db_name == \"\":\n db_name = \"walter\"\n\n self.dbcon = pysql.connect(user='walter', db=db_name)", "def connect(self):\n try:\n connection = Connection(host=settings.DATABASE_HOST, port=settings.DATABASE_PORT)\n except ConnectionFailure, error:\n return \"Could not connect to database: %s\" % error\n print \"Could not connect to database: %s \\n\" % error\n if __name__ == \"spider\":\n sys.exit(1)\n self.dbconnection = connection[settings.DATABASE_NAME]", "def _dbconnect(self):\n user = mm_cfg.STORM_MEMBER_DB_USER\n password = mm_cfg.STORM_MEMBER_DB_PASS\n host = mm_cfg.STORM_MEMBER_DB_HOST\n dbname = mm_cfg.STORM_MEMBER_DB_NAME\n\n\n db = 'postgres://'+user+':'+password+'@'+host+'/'+dbname\n return create_database(db)", "def _db_connection(self):\n pass", "def config_db():", "def connect():\n connection = pymysql.connect(host='localhost',\n password='budgetpedia',\n user='budgetpedia',\n db='budgetpedia')\n return connection", "def connectToDatabase():\n\n #Connect to Database With Environment Values\n conn = pymysql.connect(\n host=os.environ.get(\"CHRONICLER_DATABASE_HOST\"),\n user=os.environ.get(\"CHRONICLER_DATABASE_USER\"),\n passwd=os.environ.get(\"CHRONICLER_DATABASE_PASSWORD\"),\n db=os.environ.get(\"CHRONICLER_DATABASE_DB\"),\n\t\t\t\tcharset=\"utf8mb4\")\n\n #Return the Connection\n return conn", "def connect():\n # returns the connection object to tournament\n # database from PostgreSQL\n return psycopg2.connect(\"dbname=tournament\")", "def connect(self):\n try:\n self.conn = pymysql.connect(**self.conf)\n self.cur = self.conn.cursor()\n except ConnectionError:\n print(\n '%s@%s\\nConnection Failed!' %\n (self.conf['user'], self.conf['host'])\n )\n raise", "def __init__(self):\n\n\t\tself.connection = self.get_connection()", "def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion", "def connect(self):\n try:\n self.conn = MySQLdb.connect(db=self.conf['db'], host=self.conf['host'],\n port=self.conf['port'], user=self.conf['user'],\n passwd=self.conf['passwd'],\n charset=self.conf['charset'])\n self.cur = self.conn.cursor()\n self.conn.autocommit(self.conf[\"autocommit\"])\n # print \"connected to \", self.conf['host'], self.conf['db']\n except:\n print (\"MySQL connection failed\")\n raise", "def __init__(self):\n # instantiate the class Orm\n self.new_orm = orm.Orm()\n\n # read the file connection.yml\n with open('connection.yml', 'r') as file:\n self.info = file.read().split()\n\n # connection at MySQL with data of connection.yml file and creation cursor\n self.data_base = mysql.connector.connect(user=self.info[0], password=self.info[1],\n host=self.info[2])\n self.cursor = self.data_base.cursor()", "def connect():\n try:\n conn = mysql.connector.connect(host='localhost',\n database='ToDo',\n user='root',\n password='Bhavya123!@#')\n if conn.is_connected():\n print('Connected to MySQL database')\n gui.GUI(conn)\n #ToDoList.delete_activity(conn , id )\n #ToDoList.add_activity(conn , \"book\", \"2020-06-12\", \"2020-06-15\")\n \n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()", "def getDbConnection(self, **kwargs):\r\n \r\n con = sql.connect(self._filename, **kwargs)\r\n con.row_factory = sql.Row\r\n return con", "def configure(self):\n # Defaults\n self.db_type = DB_TYPE.POSTGRES\n self.db_name = \"ambari\"\n self.db_user = \"ambari\"\n self.db_password = \"bigdata\"\n self.db_host = \"localhost\"\n self.db_url = None\n\n if os.path.exists(AMBARI_PROPERTIES_LOCATION):\n self.ambari_props = self.read_conf_file(AMBARI_PROPERTIES_LOCATION)\n\n if \"server.jdbc.database\" in self.ambari_props:\n self.db_type = self.ambari_props[\"server.jdbc.database\"].upper()\n if \"server.jdbc.database_name\" in self.ambari_props:\n self.db_name = self.ambari_props[\"server.jdbc.database_name\"]\n if \"server.jdbc.user.name\" in self.ambari_props:\n self.db_user = self.ambari_props[\"server.jdbc.user.name\"]\n if \"server.jdbc.user.passwd\" in self.ambari_props:\n self.db_password = self.read_file(self.ambari_props[\"server.jdbc.user.passwd\"])\n if \"server.jdbc.hostname\" in self.ambari_props:\n self.db_host = self.ambari_props[\"server.jdbc.hostname\"]\n if \"server.jdbc.url\" in self.ambari_props:\n self.db_url = self.ambari_props[\"server.jdbc.url\"]\n if \"ambari-server.user\" in self.ambari_props:\n self.ambari_server_user = self.ambari_props[\"ambari-server.user\"]\n\n #Logger.info(\"Using database type: {0}, name: {1}, host: {2}\".format(self.db_type, self.db_name, self.db_host))\n connection_string = \"dbname='{0}' user='{1}' host='{2}' password='{3}'\".format(self.db_name, self.db_user, self.db_host, self.db_password)\n\n if self.db_type == DB_TYPE.POSTGRES:\n try:\n import psycopg2 # covered by GNU Lesser General Public License\n except Exception, e:\n Logger.error(\"Need to install python-psycopg2 package for Postgres DB. E.g., yum install python-psycopg2\\n\")\n self.terminate()\n elif self.db_type == DB_TYPE.MYSQL:\n try:\n import pymysql # covered by MIT License\n except Exception, e:\n Logger.error(\"Need to install PyMySQL package for Python. E.g., yum install python-setuptools && easy_install pip && pip install PyMySQL\\n\")\n self.terminate()\n else:\n Logger.error(\"Unknown database type: {0}.\".format(self.db_type))\n self.terminate()\n\n self.conn = None\n self.cursor = None\n try:\n Logger.debug(\"Initializing database connection and cursor.\")\n if self.db_type == DB_TYPE.POSTGRES:\n self.conn = psycopg2.connect(connection_string)\n self.cursor = self.conn.cursor()\n elif self.db_type == DB_TYPE.MYSQL:\n self.conn = pymysql.connect(self.db_host, self.db_user, self.db_password, self.db_name)\n self.cursor = self.conn.cursor()\n\n Logger.debug(\"Created database connection and cursor.\")\n self.cursor.execute(\"SELECT metainfo_key, metainfo_value FROM metainfo WHERE metainfo_key='version';\")\n rows = self.cursor.fetchall()\n if rows and len(rows) == 1:\n self.ambari_version = rows[0][1]\n # Logger.info(\"Connected to database!!! Ambari version is {0}\\n\".format(self.ambari_version))\n\n # Must be Ambari 2.0.0 or higher\n if self.compare_versions(self.ambari_version, MIN_AMBARI_VERSION) < 0:\n Logger.error(\"Must be running Ambari Version {0} or higher.\\n\".format(MIN_AMBARI_VERSION))\n self.terminate()\n else:\n Logger.error(\"Unable to determine Ambari version.\")\n self.terminate()\n\n self.set_cluster()\n except Exception, e:\n Logger.error(\"I am unable to connect to the database. Error: {0}\\n\".format(e))\n self.terminate()\n else:\n raise Exception(\"Could not find file {0}\".format(AMBARI_PROPERTIES_LOCATION))", "def create_connection(self):\r\n\r\n try:\r\n self.conn = sqlite3.connect(self.database_name)\r\n\r\n except sqlite3.Error:\r\n print('Error connecting to database')", "def connect(self, settings):\n self.r = r.connect(settings['host'], settings['port'])\n self.db = r.db(settings['db'])", "def connect(self):\n self.connection = mysql.connector.connect(host=self.host, \n database=self.database, \n user=self.user, \n password=self.password, \n auth_plugin=self.auth_plugin)\n if self.connection.is_connected():\n print(\"Succesful connection to the database {} as {}\".format(self.database, self.user))\n self.cursor = self.connection.cursor()\n else:\n print(\"The connection to the database was not successful.\")", "def connect():\n return psycopg2.connect(\"dbname=forum\")", "def __init__(self):\n self.conn = psycopg2.connect(dbname=DB, user=DB_USER, password=DB_PW, host=HOST, port=PORT)\n self.categories = self.fill_category()\n self.fill_products()", "def connect_to_db(self):\n try:\n self.connection = pymysql.connect(host=self.host, user=self.user, password=self.password,\n database=self.database_name)\n except OperationalError as e:\n print(e)", "def connectDB(self): \n #connect to the database\n try:\n print(self.pg_dbname)\n self.conn = psycopg2.connect(\"dbname=%s user=%s password=%s host=%s port=%s\" % (self.pg_dbname, self.pg_username, self.pg_password, self.pg_host, self.pg_port))\n print(\"connected!\")\n except psycopg2.Error as e:\n print(\"I am unable to connect to the database\")\n print(e)\n\n #define cursor\n self.cur = self.conn.cursor()", "def connect_to_existing_database(self):\n self.my_database = mysql.connector.connect(\n host='localhost',\n user='root',\n password='',\n database=''\n )\n self.mycursor = self.my_database.cursor(prepared=True)\n \n #create database with name defined\n self.mycursor.execute(\"SHOW DATABASES\")\n for x in self.mycursor : # mycursor is a list\n print(x)\n \n db_name = input(\"Entrez le nom de la base de données?\")\n forbidden_word = ['mysql', 'performance_schema', 'sys']\n if db_name in forbidden_word:\n print (\"Choix interdit!\")\n return self.connect_to_existing_database()\n\n try :\n self.my_database = mysql.connector.connect(\n host='localhost',\n user='root',\n password='',\n database= db_name\n )\n self.mycursor = self.my_database.cursor(prepared=True)\n print (\"Base de données correctement selectionnée\")\n\n except:\n print(\"Le nom de la base de données est-il correctement saisi? réessayez!\")\n return self.connect_to_existing_database()", "def __init__(self):\n with open('config.json') as config:\n data = json.load(config)\n\n password = self.decode_password(data['db']['password'])\n db_conn_string = 'postgresql://' + data['db']['username'] + ':' + password + '@' + \\\n data['db']['hostname'] + ':' + data['db']['port'] + '/' + data['db']['database']\n\n self.engine = create_engine(db_conn_string)\n try:\n conn = self.engine.connect()\n if conn is not None:\n print(\"-I- Successful Database Connection\")\n except Exception as e:\n print(\"-W- \" + str(e))", "def init_connection(self, connection):", "def __init__(self):\n super(CSC100DB, self).__init__()\n try:\n self.conn = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"csc100\",\n database=\"azwh\"\n )\n self.cursor = self.conn.cursor()\n\n except Exception as e:\n print(\"Error:\", e)", "def connect_db():\n conn = MySQLdb.connect(host=\"127.0.0.1\",\n user=\"root\",\n passwd=\"123\",\n db=\"graduation\",\n charset=\"utf8\",\n cursorclass=DictCursor)\n return conn", "def lagopus_connect_db():\n cnx = None\n\n try:\n connection_config = {\n k: DBCONF[k]\n for k in [\"user\", \"password\", \"database\", \"host\", \"raise_on_warnings\"]\n }\n cnx = mysql.connector.connect(**connection_config)\n print(\"Initialized database.\")\n except mysql.connector.Error as err:\n print(\"Couldn't connect to MySQL: {}\".format(err))\n\n return cnx", "def connect_db(self) -> sqlite3.Connection:\n self.connection = sqlite3.connect(self.database)\n self.connection.row_factory = sqlite3.Row\n\n self.get_cursor()", "def connection(self, database=None):\n\n try:\n return mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"R+D@11\",\n database=database,\n auth_plugin=\"mysql_native_password\",\n )\n except mysql.connector.Error as err:\n messagebox.showerror(\n \"MySQL Connection Error\",\n \"Oops! Something went wrong!\\n\\n%s\" % err,\n )\n exit()", "def get_data(self, user, password, table):\n self.my_connect = SetData.GetData(self.host, self.database, self.charset)\n self.my_connect.connect(user, password)\n self.my_connect.select(\"SELECT * FROM {}\".format(table))\n self.result = self.my_connect.result", "async def connection():\n return await r.connect(db='main_db')", "def set_connection(self, **kwargs):\n if self.schema is None:\n self.conn = psycopg2.connect(\n host=self.host,\n port=self.port,\n user=self.user,\n password=self.password,\n dbname=self.db_name,\n **self.kwargs)\n else:\n self.conn = psycopg2.connect(\n host=self.host,\n port=self.port,\n user=self.user,\n password=self.password,\n dbname=self.db_name,\n options=f'--search_path={self.schema}',\n **self.kwargs)", "def configurar_canais(self):\n\n print(\"Canais configurados\")", "def reconect(self):\n self.connection = DataPostgres.connect(**self.options)\n return self.connection", "def get_con(s_name, auth_path=None, db=None):\n con=None\n auth_path = os.path.dirname(os.path.abspath(__file__)) + '/db.csv' if auth_path is None else auth_path\n csv_path=util.format_path(auth_path)\n t_db=pd.read_csv(csv_path)\n #t_db.fillna(None, inplace=True)\n t_db=t_db[t_db['ID']==s_name]\n if len(t_db)==0:\n util.error_msg('Database %s is not defined!' % s_name)\n one=t_db.iloc[0]\n s_db=db or one['DB']\n if one['TYPE']=='MYSQL':\n import MySQLdb as mysql\n #print one['HOST'], one['USR'], one['PWD'], s_db\n if not one['PORT'] or pd.isnull(one['PORT']):\n con=mysql.connect(one['HOST'], one['USR'], one['PWD'], s_db, port=3306, charset='utf8')\n else:\n con=mysql.connect(one['HOST'], one['USR'], one['PWD'], s_db, port=int(one['PORT']), charset='utf8')\n elif one['TYPE']=='POSTGRES':\n import pgdb\n # make sure you do:\n #module load postgresql/9.2.4\n #export LD_LIBRARY_PATH=.:/tools/GNU/postgresql/9.2.4/lib/\n con=pgdb.connect(one['CONNECT'])\n elif one['TYPE']=='ORACLE':\n import cx_Oracle as oracle\n con=oracle.connect(one['CONNECT'])\n else:\n util.error_msg('Unsupported database engine: %s' % one['TYPE'])\n return con", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")" ]
[ "0.710717", "0.6864451", "0.6749383", "0.6738874", "0.66128796", "0.6590426", "0.65721476", "0.65279865", "0.651362", "0.6474826", "0.6444196", "0.6436261", "0.64234966", "0.6408445", "0.6395795", "0.63708216", "0.6339452", "0.6337948", "0.6329089", "0.63109976", "0.6289543", "0.62811005", "0.62575597", "0.6256223", "0.6235326", "0.62277997", "0.61415446", "0.6138721", "0.61231565", "0.61148083", "0.6112884", "0.60653186", "0.60549664", "0.6050601", "0.6042185", "0.60421216", "0.6040861", "0.6040234", "0.60378546", "0.6036077", "0.60246676", "0.60206896", "0.6020494", "0.60073364", "0.59933174", "0.59755", "0.5966195", "0.5965478", "0.5964303", "0.59541863", "0.5923915", "0.59213024", "0.5911395", "0.59099007", "0.58999467", "0.58936995", "0.5886082", "0.5882502", "0.58678514", "0.5866898", "0.58662254", "0.58650225", "0.5840994", "0.58295876", "0.58286494", "0.58280355", "0.5819818", "0.5817866", "0.581699", "0.5812658", "0.5807437", "0.5799318", "0.57957613", "0.5792841", "0.5790878", "0.578998", "0.57821333", "0.5781665", "0.5781533", "0.5777966", "0.5768167", "0.57656735", "0.5765563", "0.5756772", "0.5751956", "0.5746133", "0.57436985", "0.5733597", "0.5728549", "0.5725742", "0.5707947", "0.57008487", "0.5692292", "0.568548", "0.56846815", "0.5681202", "0.56802464", "0.56802464", "0.56802464", "0.56802464", "0.56802464" ]
0.0
-1
Almacena el documento en la BD siempre y cuando no exista.
def save_ticker(connection, ticker_data=None): #evita operaciones si no existe informacion. if not ticker_data: return False ticker_hash = get_ticker_hash(ticker_data) if check_if_exists(connection, ticker_hash): return False #ticker_data['ticker_hash'] = get_ticker_hash(ticker_data) ticker_data['ticker_hash'] = ticker_hash # Almacena el documento en la BD de Mongo por medio de insertOne() connection.tickers.insert_one(ticker_data) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n if self.document.id:\n self.db.insert(self.document)\n else:\n self.db.update(self.document.id,self.document)", "def create_doc(c, d):\n try:\n c.insert_one(d)\n return True\n except:\n return False", "def get_or_create(self, orm, obj):\n try:\n dth = self.get()\n if dth:\n obj.FIELDS.update({'indexcol': dth.indexcol})\n self.save(orm, obj)\n return self.get()\n except:\n raise MchCriticalError(Exception('Death cannot be saved and fetched'))\n return False", "def create_document(obj):\n index = obj.get_index_name()\n doc_type = obj.get_document_type()\n body = obj.get_document_body()\n exists = ES.exists(index=index, doc_type=doc_type, id=obj.pk)\n\n if not exists:\n ES.create(index=index, doc_type=doc_type, body=body, id=obj.pk)\n return None\n\n return \"Conflict: document already exists for {0} with id {1}.\".format(\n obj.__class__.__name__, obj.pk)", "def add_doc_if_not_exists(self, doc, unique_property_name):\n doc_type = doc['type']\n property_value = doc[unique_property_name]\n existing_doc = self.find_doc(doc_type, unique_property_name, property_value)\n if existing_doc is not None:\n print('Returning {} doc where {}={}'.format(doc_type, unique_property_name, property_value))\n return existing_doc\n else:\n print('Creating {} doc where {}={}'.format(doc_type, unique_property_name, property_value))\n try:\n self.client.connect()\n db = self.client[self.db_name]\n return db.create_document(doc)\n finally:\n self.client.disconnect()", "def modificacion(self, socio):\n\n aux = self.buscar(socio.id)\n print('El socio a modificar en capa de datos:', aux.id, aux.nombre)\n\n if aux == None:\n return False\n else:\n #persona = session.query(Socio).filter(Socio.dni == aux.id)\n aux.nombre = socio.nombre\n aux.apellido = socio.apellido\n aux.dni = socio.dni\n\n session.commit()\n\n return aux", "def test_add_no_commit(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n\n self.conn.add(**doc)\n results = self.conn.query(\"user_id:\" + user_id).results\n self.assertEquals(len(results), 0,\n \"Document (id:%s) shouldn't have been fetched\" % (id))", "def carregarDadosPessoais(self, documento):\r\n self.__id = int(documento)\r\n self.cursor.execute(\"SELECT * FROM DADOS_PESSOAIS WHERE DOCUMENTO = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None", "def write_to_db(self, doc):\n self.db_connection[self.db_name][self.db_collection].insert_one(doc)", "def save(self, *args, **kwargs):\n if self.name == '':\n raise FieldError\n else:\n return super(CrossRefDB, self).save(*args, **kwargs)", "def save(self):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n if not self._retrieved:\n self.insert()\n self._retrieved = True\n else:\n self.update()", "def test_add_one_document_object(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n \n doc = Document()\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n \n #raise Exception, doc.as_xml\n self.conn.add(doc)\n self.conn.commit()\n results = self.conn.query(\"id:\" + id).results\n \n self.assertEquals(len(results), 1,\n \"Could not find expected data (id:%s)\" % id)", "def add(self, document):\n return self.db.update({document['id']: document})", "def persist_if_needed(self):\n if not self.id:\n super(ComicSiteModel,self).save()", "def clear_document(self, document):\n self._clear_document(document)\n #self.commit()", "def addDoc(self, doc, path):\n query = \"INSERT OR REPLACE INTO docs (local_path, resource_id, etag, title) VALUES (?, ?, ?, ?)\"\n self.db.execute(query, (path, doc.resource_id.text, doc.etag, doc.title.text))", "def save( self, request, idx ) :\n\n if idx != 'None' :\n obj = models.Document.objects.get( id = idx )\n obj.element = self.cleaned_data['element']\n obj.type = self.cleaned_data['type']\n obj.name = self.cleaned_data['name']\n\n else :\n obj = models.Document.objects.get_or_create(element = self.cleaned_data['element'],\n type = self.cleaned_data['type'],\n name = self.cleaned_data['name'],\n author = request.user )[0]\n\n obj.link = self.cleaned_data['link']\n obj.save()\n\n return obj", "def test_add_one_document(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n\n self.conn.add(**doc)\n self.conn.commit()\n results = self.conn.query(\"id:\" + id).results\n\n self.assertEquals(len(results), 1,\n \"Could not find expected data (id:%s)\" % id)\n\n doc = results[0]\n self.assertEquals(doc[\"user_id\"], user_id)\n self.assertEquals(doc[\"data\"], data)", "def db_upsert(self, force_insert=False):\n\n if self.table.search((Query().name == self.name)):\n if force_insert:\n # self.already_exist = False\n self.table.update({'name': self.name, 'cartesian_coords': self.cartesian_coords, 'polar_cords': self.polar_coords}, Query().id == self.id)\n\n else:\n # self.already_exist = True\n return \"Already Exist\"\n else:\n self.table.insert({\n 'id': self.id,\n 'name': self.name,\n 'cartesian_coords': self.cartesian_coords,\n 'polar_cords': self.polar_coords\n }) # insert the given data\n\n return \"\"", "def baja(self, id_socio):\n socios = session.query(Socio).filter(Socio.id == id_socio).delete()\n session.commit()\n print(socios)\n if socios == 0:\n a = False\n else :\n a = True\n print(a)\n return a", "def test_add_one_document_object_implicit_commit(self):\n \n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = Document()\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n\n # Commit the changes\n self.conn.add(True, doc)\n results = self.conn.query(\"id:\" + id).results\n\n self.assertEquals(len(results), 1,\n \"Could not find expected data (id:%s)\" % id)\n\n doc = results[0]\n self.assertEquals(doc[\"user_id\"], user_id)\n self.assertEquals(doc[\"data\"], data)", "def db_upsert(self, force_insert=False):\n\n if self.table.search((Query().name == self.name)):\n if force_insert:\n # self.already_exist = False\n self.table.update({'name': self.name, 'positions': self.positions}, Query().id == self.id)\n\n else:\n # self.already_exist = True\n return \"Already Exist\"\n else:\n self.table.insert({\n 'id': self.id,\n 'name': self.name,\n 'positions': self.positions\n }) # insert the given data\n\n return \"\"", "def dummy():\n\t\t\tself.save()", "def save(self):\n self.save_to_db()\n if hasattr(self, 'id'):\n self.status_code = 201\n return True\n else:\n self.errors['messages'].append(\"DataBase Error, Please Try again\")\n self.status_code = 500\n return False", "def delDadosPessoais(self, documento):\r\n try:\r\n if documento is not None:\r\n self.cursor.execute(\"DELETE FROM DADOS_PESSOAIS WHERE DOCUMENTO = '%s';\" %(documento))\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False", "def save(self):\n try:\n db.session.add(self)\n except Exception as db_exception: # noqa: B902; return nicer error\n current_app.logger.error('Db2Document.save exception: ' + str(db_exception))\n raise DatabaseException(db_exception)", "def init_doc(self, obj, update_dict=True):\n try:\n obj.essentials\n except AttributeError:\n raise AttributeError(\"An object to be saved in db is supposed to have the essentials attribute\")\n\n if obj.essentials is None:\n raise AttributeError(\"An object to be saved in db should not have NoneType as its essentials\")\n\n print(\"Saving this object into db: {}\".format(type(obj)))\n\n start = datetime.now()\n essen = self.mongo_doc_generator(obj.essentials)\n document = {\"essentials\": essen, 'datetime': start, 'filepaths': obj.filepaths}\n\n db_location = obj.db\n element = obj.decide_element()\n host = db_location[\"host\"]\n project = db_location[\"project\"]\n\n target_db = connect_collection(host, project, element)\n doc_created = target_db.insert_one(document)\n inserted_id = doc_created.inserted_id\n\n return inserted_id", "def document_exists(self, docid):\n raise NotImplementedError", "def editarDadosPessoais(self, documento = None, nome = None, sexo = None, dataNascId = None, enderecoId = None, celular = None, fixo = None):\r\n try:\r\n if documento is None:\r\n return False\r\n else:\r\n if nome != None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET NOME = '%s' WHERE DOCUMENTO = '%s';\" %(nome, documento))\r\n if sexo != None:\r\n if sexo == 1:\r\n sexo = 'TRUE'\r\n else:\r\n sexo = 'FALSE'\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET SEXO = %s WHERE DOCUMENTO = '%s';\" %(sexo, documento))\r\n if dataNascId != None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET DATA_NASC = %s WHERE DOCUMENTO = '%s';\" %(dataNascId, documento))\r\n if enderecoId != None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET ENDERECO = %s WHERE DOCUMENTO = '%s';\" %(enderecoId, documento))\r\n if celular != None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET CELULAR = '%s' WHERE DOCUMENTO = '%s';\" %(celular, documento))\r\n if fixo != None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET FIXO = '%s' WHERE DOCUMENTO = '%s';\" %(fixo, documento))\r\n return True\r\n except:\r\n return False", "def update_document(self):\n pass", "def save(self):\n ret = False\n\n # we will only use the primary key if it hasn't been modified\n pk = None\n if self.schema.pk.name not in self.modified_fields:\n pk = self.pk\n\n if pk:\n ret = self.update()\n else:\n ret = self.insert()\n\n return ret", "def test_add_none_field(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n doc[\"num\"] = None\n\n self.conn.add(**doc)", "def save(self):\n if self.id:\n self.update()\n else:\n self.create()", "def test_update_or_create_with_zero(self):\n\n d = {'name': 'winboat', 'some_number': 0}\n e = twsu.update_or_create(self.DBTestCls1, d)\n self.session.flush()\n eq_(e.some_number, 0)", "def atualiza_usuario(self, dados={}):\n\n if self.busca_usuario(id_usuario=dados['id_usuario']):\n self.user = self.uPersistencia.buscarUsuario(\n id_usuario=dados['id_usuario'])\n return None\n else:\n return {'status': False,\n 'msg': 'nenhum usuario encontrado',\n 'dados': dados}", "def update_doc(c, i, d, u=False):\n try:\n c.update_one({'_id': i}, {'$set': d}, upsert = u)\n return True\n except:\n return False", "def addDadosPessoais(self, documento, nome, sexo, dataNascId, enderecoId, celular = None, fixo = None):\r\n if sexo == 1:\r\n sexo = 'TRUE'\r\n else:\r\n sexo = 'FALSE'\r\n try:\r\n self.cursor.execute(\"INSERT INTO DADOS_PESSOAIS(DOCUMENTO, NOME, SEXO, CELULAR, FIXO, DATA_NASC, ENDERECO) VALUES ('%s', '%s', %s, NULL, NULL, %s, %s);\" %(documento, nome, sexo, dataNascId, enderecoId))\r\n if celular is not None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET CELULAR = '%s' WHERE DOCUMENTO = '%s';\" %(celular, documento))\r\n if fixo is not None:\r\n self.cursor.execute(\"UPDATE DADOS_PESSOAIS SET FIXO = '%s' WHERE DOCUMENTO = '%s';\" %(fixo, documento))\r\n return True\r\n except:\r\n return False", "def save(self, db):\n pass", "def db_for_write(self, model, **hints):\n return None", "def save(self):\n if self.id is None:\n self._insert()\n else:\n self._update()", "def save(self, *args, **kwargs):\n if self.pk is None:\n saved_image = self.logo\n self.logo = None\n super().save(*args, **kwargs)\n self.logo = saved_image\n kwargs.pop('force_insert', None)\n super().save(*args, **kwargs)", "def addAluno(self, dados, situacao, login, turma):\r\n try:\r\n self.cursor.execute(\"INSERT INTO ALUNO(MATRICULA, DADOS, SITUACAO, LOGIN, TURMA) VALUES (NULL, '%s', '%s', '%s', %s);\" %(dados, situacao, login, turma))\r\n return True\r\n except:\r\n return False", "def doc_exist(self, docid):\n doc = Document(self.cloudant_database, docid)\n return doc.exists()", "def _commit(self):\n if self._con is not None and self.__tainted:\n self._con.execute('UPDATE word SET normalized_id = rowid WHERE normalized_id is NULL')\n self._con.commit()\n self.__tainted = False", "def test_select_unexisting_field(self, document):\n assert document.select({\"idontexist\": 1}) == {\"_id\": 1, \"idontexist\": None}", "def save_db(self) -> None:", "def putdados(response):\n\n cursos_collections.update_one(response['id'], response['set'], upsert=True)\n\n mensage = 'O id {} foi atualizado'.format(response['id']['_id'])\n\n return mensage", "def save(self):\n self.db.commit()", "def add_document(self, doc):\n assert isinstance(doc, pylastica.document.Document), \"doc must be of type Document: %r\" % doc\n path = urllib.quote_plus(str(doc.doc_id))\n request_type = pylastica.request.Request.PUT\n if path is None or path == '':\n #no doc id has been given; use post so that an id is automatically created\n request_type = pylastica.request.Request.POST\n options = doc.get_options([\n 'version',\n 'version_type',\n 'routing',\n 'percolate',\n 'parent',\n 'ttl',\n 'timestamp',\n 'op_type',\n 'consistency',\n 'replication',\n 'refresh',\n 'timeout'\n ])\n response = self.request(path, request_type, doc.data, options)\n data = response.data\n if (doc.auto_populate or self.index.client.get_config_value(['document', 'autoPopulate'], False)) and response.is_ok():\n if doc.has_id():\n if '_id' in data:\n doc.doc_id = data['_id']\n if '_version' in data:\n doc.version = data['_version']\n return response", "def commit_unless_managed(self):\n if not self.is_managed():\n self.commit()", "def delete(self):\n self.read = False\n self.write = False\n self.save()", "def _apagar_aluno(self, dict_values):\n # Apagar da tabela Aluno\n self.database[\"Aluno\"].delete_one(dict_values)", "def save(self, *args, **kwargs):\n if not self.id and User.objects.filter(email=self.email).exists():\n raise IntegrityError()\n super().save(*args, **kwargs)", "async def _save(self, document, alias=None):\n doc = document.to_son()\n\n if document._id is not None:\n try:\n await self.coll(alias).update({\"_id\": document._id}, doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n else:\n try:\n doc_id = await self.coll(alias).insert(doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n document._id = doc_id\n\n return document", "def exists(self):\n try:\n key = self.key\n except DoesNotExist:\n \"\"\"\n If the object doesn't exists anymore, its PK is deleted, so the\n \"self.key\" call will raise a DoesnotExist exception. We catch it\n to return False, as the field doesn't exists too.\n \"\"\"\n return False\n else:\n return self.connection.exists(key)", "def updateMasivo(self, objects):\n retorno = 1\n try:\n for object in objects:\n self.session.merge(object)\n self.session.commit()\n \n except sqlalchemy.exc.DBAPIError, e:\n if self.session is not None:\n self.session.rollback()\n retorno = -1\n print(\"Error!\", e)\n finally:\n if self._DAOAlchemy__cerrarSesion:\n self.session.close()\n return retorno", "def save(self):\n try:\n db.session.add(self)\n db.session.commit()\n return True\n except SQLAlchemyError as e:\n db.session.rollback()\n logger.error(\"database operation error: \", e)\n return False", "def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")", "def initDataBase():\n if len(Elems):\n if not addElem2Specie():\n return False\n return True", "def save(self, doc, form=None, creation=False, refresh_index=True, asAuthor=True, onSaveEvent=True, mantainOriginalForm=True):\n method = 'save'\n\n # we process computed fields (refresh the value)\n if form is None:\n form = doc.getForm()\n elif not mantainOriginalForm:\n # questa sostituzione può essere evitata...\n doc.setItem('Form', form.getFormName())\n\n db=self.db # SONO QUI\n if form:\n for f in form.getFormFields(includesubforms=True, doc=doc, applyhidewhen=False):\n mode = f.getFieldMode()\n fieldname = f.id\n if mode in [\"COMPUTED\", \"COMPUTEDONSAVE\"] or (mode==\"CREATION\" and creation):\n result = form.computeFieldValue(fieldname, doc)\n doc.setItem(fieldname, result)\n else:\n # computed for display field are not stored\n pass\n\n # compute the document title\n title_formula = form.getDocumentTitle()\n if title_formula:\n # Use the formula if we have one\n try:\n title = doc.runFormulaScript(\"form_\"+form.id+\"_title\", doc, form.DocumentTitle)\n except PlominoScriptException, e:\n message = 'Title formula failed: %s' % e\n self.set_error(method, message)\n else:\n if title != doc.Title():\n doc.setTitle(title)\n elif creation:\n # If we have no formula and we're creating, use Form's title\n title = form.Title()\n if title != doc.Title():\n # We may be calling save with 'creation=True' on\n # existing documents, in which case we may already have\n # a title.\n doc.setTitle(title)\n\n # update the document id\n if creation and form.getDocumentId():\n new_id = doc.generateNewId()\n if new_id:\n transaction.savepoint(optimistic=True)\n db.documents.manage_renameObject(doc.id, new_id)\n\n # update the Plomino_Authors field with the current user name\n if asAuthor:\n authors = doc.getItem('Plomino_Authors', []) or []\n name = db.getCurrentUser().getUserName()\n\n if name not in authors:\n authors.append(name)\n\n doc.setItem('Plomino_Authors', authors)\n\n # execute the onSaveDocument code of the form\n if form and onSaveEvent:\n try:\n result = doc.runFormulaScript(\"form_\"+form.id+\"_onsave\", doc, form.onSaveDocument)\n# if result and hasattr(doc, 'REQUEST'):\n# doc.REQUEST.set('plominoredirecturl', result)\n except PlominoScriptException, e:\n message = 'Document has been saved but onSave event failed: %s' % e\n self.set_error(method, message)\n# if hasattr(doc, 'REQUEST'):\n# e.reportError('Document has been saved but onSave event failed.')\n# doc_path = doc.REQUEST.physicalPathToURL(doc.doc_path())\n# doc.REQUEST.RESPONSE.redirect(doc_path)\n\n if refresh_index:\n # update index\n db.getIndex().indexDocument(doc)\n # update portal_catalog\n if db.getIndexInPortal():\n db.portal_catalog.catalog_object(doc, \"/\".join(db.getPhysicalPath() + (doc.id,)))", "def test_index_over_non_existing_field(self, collection):\n collection.create_index(\n [(\"hello\", Database.DESCENDING), (\"idontexist\", Database.ASCENDING)],\n unique=True,\n )\n\n collection.insert_many([{\"hello\": \"here\"}, {\"hello\": 2}])\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,), (2,), (3,)}),\n \"hello_1_idontexist_1\": (\n (\"hello\", \"idontexist\"),\n {(\"there\", None), (\"here\", None), (2, None)},\n ),\n }\n assert collection.find({}, selection={\"hello\": 1, \"idontexist\": 1}) == [\n {\"_id\": 1, \"hello\": \"there\", \"idontexist\": None},\n {\"_id\": 2, \"hello\": \"here\", \"idontexist\": None},\n {\"_id\": 3, \"hello\": 2, \"idontexist\": None},\n ]", "def alta(self, socio):\n\n soc1 = Socio()\n\n soc1.dni = socio.dni\n soc1.nombre = socio.nombre\n soc1.apellido = socio.apellido\n\n session.add(soc1)\n session.commit()\n\n obj = session.query(Socio).order_by(Socio.id.desc()).first()\n return obj", "def test_delete_one_document_by_query(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = data\n\n self.conn.add(**doc)\n self.conn.commit()\n\n results = self.conn.query(\"id:\" + id).results\n\n self.conn.delete_query(\"id:\" + id)\n self.conn.commit()\n\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 0,\n \"Document (id:%s) should've been deleted\" % id)", "def save(self):\n self.__db.commit()", "def save(self, db):\n db.query(\n \"INSERT INTO fellows (name, accomodation)\\\n VALUES(:name, :accomodation)\",\n name=self.name, accomodation=self.wants_accomodation\n )", "def _put_assume_new(self, _id=None, **data):\n if _id is None:\n _id = str(uuid4())\n doc = dict(_id=_id, **data)\n try:\n current_doc = self._db.create_document(doc, throw_on_exists=True)\n except couchdb.http.ResourceConflict:\n # TODO: _rev is in header, don't need to get entire doc\n # Don't use self.get, don't want to actually download an attachment\n current_doc = self._db.get(_id)\n current_doc.update(doc)\n current_doc.save()\n return current_doc", "def delLogin(self, documento = None):\r\n try:\r\n if documento is not None:\r\n self.cursor.execute(\"DELETE FROM LOGIN WHERE USUARIO = '%s';\" %(documento))\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False", "def add_or_update(self, answer):\n if self.exists(answer):\n self.update(answer)\n else:\n self.add(answer)", "def create():\n error = None\n success = False\n if request.method == 'POST':\n nome = request.form['nome']\n if not nome:\n error = 'Nome é obrigatório.'\n else:\n try:\n if verifica_autor_bd(nome):\n error = 'Autor já cadastrado!'\n else:\n db.insert_bd('INSERT INTO autor values (default, \"%s\")' % nome)\n success = True\n except Exception as e:\n print(e)\n return redirect(url_for('error'))\n\n return render_template('autor/create.html', error=error, success=success)", "def _save(self):\n yield self.validate()\n\n db = self.db_client()\n saved = yield db.save_doc(self._resource)\n\n # Allow couch to create Document IDs\n if '_id' not in self._resource:\n self._resource['_id'] = saved['id']", "def save(self):\n db.session.commit()", "def addData(self, ano, mes, dia, hora = None):\r\n try:\r\n self.__temp = str(ano)+'-'+str(mes)+'-'+str(dia)\r\n if hora == None:\r\n self.cursor.execute(\"INSERT INTO DATAS(ID, DAT, HORA) VALUES (NULL, '%s', NULL);\" %(self.__temp))\r\n return True\r\n else:\r\n self.cursor.execute(\"INSERT INTO DATAS(ID, DAT, HORA) VALUES (NULL, '%s', '%s');\" %(self.__temp, hora))\r\n return True\r\n except:\r\n return False", "def addingNull(self, database):\r\n try:\r\n date = self.lineWidgets[\"FECHA\"].text()\r\n try:\r\n month = int(date.split(\"-\")[1])\r\n except ValueError:\r\n month = int(date.split(\"-\")[1][0])\r\n year = int(date.split(\"-\")[0])\r\n self.conn = connect(\"database.sqlite\")\r\n self.cur = self.conn.cursor()\r\n self.cur.execute(\r\n f'''INSERT INTO {database} (date, month_id, year, concept, \r\n value) VALUES(?, ?, ?, ?, ?)\r\n ''', (date, month, year, \"NADA\", 0))\r\n self.conn.commit()\r\n self.cur.close()\r\n except (ValueError, IndexError):\r\n QMessageBox.critical(\r\n self, \"ERROR\", '''Put the date in its correct form''')", "def saveData(self):\r\n newPaper = self.getData()\r\n\r\n cur = self.dbConn.execute(\"SELECT PaperID FROM Papers WHERE Title = ?\",[self.title.getVal()])\r\n res = cur.fetchone()\r\n if not res==None:\r\n self.dbConn.execute(\"DELETE FROM CoAuthors WHERE Paper = ?\",[res[\"PaperID\"]])\r\n self.dbConn.execute(\"DELETE FROM Papers WHERE Title = ?\",[self.title.getVal()])\r\n\r\n sqlStr = str.format(\"INSERT into {} ({}) VALUES ({})\",self.dataTable,\",\".join(self.dataCols),\"?\"+\",?\"*(len(self.dataCols)-1))\r\n newCur = self.dbConn.execute(sqlStr,[newPaper.symposium, newPaper.title,newPaper.primeAuthor,newPaper.correspond])\r\n\r\n newRow = newCur.lastrowid\r\n if len(newPaper.coauthors)>0:\r\n for coauth in newPaper.coauthors:\r\n sqlStr = str.format(\"INSERT into {} ({}) VALUES ({})\",self.coauthorTable,\",\".join(self.coauthorCols),\"?\"+\",?\"*(len(self.coauthorCols)-1))\r\n newCur = self.dbConn.execute(sqlStr,[newRow,coauth])\r\n\r\n self.dbConn.commit()\r\n self.clearData()", "def delete_document(self):\n pass", "def save(self):\n if self.get('_id'):\n return self.connection.update({'_id': self.get('_id')}, {'$set': self._export(without_id=True)})\n else:\n return self.connection.insert(self._export())", "def atualizar(self, **kwargs):\n p_id_aluno = kwargs.get('id_aluno')\n p_nome = kwargs.get('nome')\n p_cpf = kwargs.get('cpf')\n p_data_nasc = kwargs.get('data_nasc')\n p_telefone = kwargs.get('telefone')\n\n sql = f\"\"\"\n UPDATE public.Alunos\n SET nome_aluno = '{p_nome}',\n cpf_aluno = '{p_cpf}',\n data_nasc_aluno = '{p_data_nasc}',\n telefone_aluno = '{p_telefone}'\n WHERE id_aluno = '{p_id_aluno}';\n \"\"\"\n\n conn = None\n updated_rows = 0\n\n try: \n params = config(filename=\".\\database.ini\")\n conn = psycopg2.connect(**params)\n\n cur = conn.cursor()\n\n cur.execute(sql)\n\n deleted_rows = cur.rowcount\n\n conn.commit()\n\n cur.close()\n return deleted_rows\n except(Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "def save(self)->None:\n item = database.cursor.fetchone()\n if item:\n self.id = item['id']\n database.connection.commit()", "def save(self, orm, obj):\n try:\n ref = orm.ORM.store(self.table, obj.FIELDS)\n return ref\n except:\n raise MchCriticalError(Exception('Death cannot be saved'))\n return False", "def save(self):\n # type: () -> bool\n\n return self.query.commit(self.id, self)", "def delAluno(self, matricula):\r\n try:\r\n if matricula != None:\r\n self.cursor.execute(\"DELETE FROM ALUNO WHERE MATRICULA = %s;\" %(matricula))\r\n return True\r\n except:\r\n return False", "def test_find_empty_embedded(self):\n\n class User(EmbeddedDocument):\n name = StringField()\n\n class BlogPost(Document):\n content = StringField()\n author = EmbeddedDocumentField(User)\n\n BlogPost.drop_collection()\n\n BlogPost.objects.create(content=\"Anonymous post...\")\n\n result = BlogPost.objects.get(author=None)\n assert result.author is None", "def _atualizar_aluno(self, dict_values):\n # Para atualizar temos de usar um filtro para encontrar o aluno\n # que será o numero\n filtro = {\"Numero\": dict_values.pop(\"Numero\")}\n # Criar query para update\n update_query = {\"$set\": dict_values}\n self.database[\"Aluno\"].update_one(filtro, update_query)", "def test_database_object_overwrite_parameter_is_set(self):\n database = generate_database_object(overwrite=True)\n\n self.assertEqual(\n True,\n database.overwrite == True,\n \"Database object did not have an overwrite flag, despite being created with one.\"\n )", "def reset_document(self):\n # FIXME: this state does not make sense\n self.doc_version_set = False\n self.doc_comment_set = False\n self.doc_namespace_set = False\n self.doc_data_lics_set = False\n self.doc_name_set = False\n self.doc_spdx_id_set = False", "def borra_asignatura(id):\n try:\n cnx = mysql.connector.connect(user=user, password=password, database=database) \n cursor = cnx.cursor()\n #cursor.execute(\"DELETE FROM `calificacion` WHERE `calificacion`.`id_asignatura` = {}\".format(id))\n cursor.execute(\"DELETE FROM `asignatura` WHERE `asignatura`.`id` = {}\".format(id))\n cnx.commit()\n except mysql.connector.Error as e:\n cursor.close()\n cnx.close()\n abort(400, \"La asignatura no ha podido ser borrada.\")\n if cursor.rowcount == 0:\n cursor.close()\n cnx.close()\n abort(400, \"La asignatura no existe.\")\n cursor.close()\n cnx.close()\n return \"Asignatura borrada correctamente.\"", "def db_exists(self, db):\n raise NotImplementedError()", "def test_add_one_document_implicit_commit(self):\n\n # That one fails in r5 (<commit/> must be made on its own)\n\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n\n # Commit the changes\n self.conn.add(True, **doc)\n results = self.conn.query(\"id:\" + id).results\n\n self.assertEquals(len(results), 1,\n \"Could not find expected data (id:%s)\" % id)\n\n doc = results[0]\n self.assertEquals(doc[\"user_id\"], user_id)\n self.assertEquals(doc[\"data\"], data)", "def save(self):\n return None", "def checkPersistence(self, _, __): # pylint: disable=C0103\n return False", "def createDocument(self, document):\n data = self.createDocumentAll([document])\n try:\n return data[0]\n except: pass", "def save_book(self):\n db.session.add(self)\n db.session.commit()", "def test_update_no_pk(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n with self.assertRaises(Exception):\n album.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 0)", "def db_has_object(rep_cursor, sql, query_args):\n rep_cursor.execute(sql, query_args)\n if rep_cursor.rowcount == 0:\n return False\n return True", "def add(self, docs: DocumentArray, *args, **kwargs):\n cursor = self.connection.cursor()\n try:\n psycopg2.extras.execute_batch(\n cursor,\n f'INSERT INTO {self.table} (ID, DOC) VALUES (%s, %s)',\n [\n (\n doc.id,\n doc.SerializeToString(),\n )\n for doc in docs\n ],\n )\n except psycopg2.errors.UniqueViolation as e:\n self.logger.warning(\n f'Document already exists in PSQL database. {e}. Skipping entire transaction...'\n )\n self.connection.rollback()\n self.connection.commit()", "def test_count_and_none(self):\n\n class MyDoc(Document):\n pass\n\n MyDoc.drop_collection()\n for i in range(0, 10):\n MyDoc().save()\n\n assert MyDoc.objects.count() == 10\n assert MyDoc.objects.none().count() == 0", "def init_database():\n exists = Agent.query.all()\n if exists is None or len(exists) == 0:\n # Setting up agent\n agent = Agent(name='OpenCampus',\n about=\"Este es el chabot de Open Campus capaz de resolver dudas sobre los diferentes cursos de la oferta actual de Open Campus\")\n\n db.session.add(agent)\n\n # Setting upd properties\n\n description_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/description\")\n begin_date_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/beginDate\")\n end_date_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/endDate\")\n requirement_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/requirement\")\n duration_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/duration\")\n cost_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/cost\")\n teacher_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/teacherName\")\n content_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/content\")\n course_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/courseName\")\n\n # db.session.add(description_prop)\n # db.session.add(begin_date_prop)\n # db.session.add(end_date_prop)\n # db.session.add(requirement_prop)\n # db.session.add(duration_prop)\n # db.session.add(cost_prop)\n # db.session.add(teacher_name_prop)\n # db.session.add(content_name_prop)\n # db.session.add(course_name_prop)\n\n # Setting up answers\n\n ObtenerInformacionAnswer = Answer(uri=\"ObtenerInformacionAnswer\", answer_template=\"{%description%}\",\n properties=[description_prop])\n\n # db.session.add(ObtenerInformacionAnswer)\n ObtenerFechasAnswer = Answer(uri=\"ObtenerFechasAnswer\",\n answer_template=\"Las fechas importantes del curso son {%beginDate%} y termina el dia {%endDate%}\",\n properties=[begin_date_prop, end_date_prop])\n\n # db.session.add(ObtenerFechasAnswer)\n ObtenerFechasInicioAnswer = Answer(uri=\"ObtenerFechasInicioAnswer\",\n answer_template=\"El curso inicia el dia {%beginDate%}\",\n properties=[begin_date_prop])\n # db.session.add(ObtenerFechasInicioAnswer)\n ObtenerFechasFinAnswer = Answer(uri=\"ObtenerFechasFinAnswer\",\n answer_template=\"El curso finaliza el dia {%endDate%}\",\n properties=[end_date_prop])\n # db.session.add(ObtenerFechasFinAnswer)\n ObtenerPrerequisitosAnswer = Answer(uri=\"ObtenerPrerequisitosAnswer\",\n answer_template=\"Los prerequisitos del curso son {%requirement%}\",\n properties=[requirement_prop])\n # db.session.add(ObtenerPrerequisitosAnswer)\n ObtenerDuracionAnswer = Answer(uri=\"ObtenerDuracionAnswer\",\n answer_template=\"El curso tiene una duracion de {%duration%}\",\n properties=[duration_prop])\n # db.session.add(ObtenerDuracionAnswer)\n ObtenerPrecioAnswer = Answer(uri=\"ObtenerPrecioAnswer\", answer_template=\"{%cost%}\", properties=[cost_prop])\n # db.session.add(ObtenerPrecioAnswer)\n ObtenerDocenteAnswer = Answer(uri=\"ObtenerDocenteAnswer\",\n answer_template=\"El docente encargado del curso es {%teacherName%}\",\n properties=[teacher_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasTeacher\")\n # db.session.add(ObtenerDocenteAnswer)\n ObtenerContenidosAnswer = Answer(uri=\"ObtenerContenidosAnswer\",\n answer_template=\"Los contenidos a tratar en el curso son {%content%}\",\n properties=[content_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasContenido\")\n # db.session.add(ObtenerContenidosAnswer)\n ListarCursosAnswer = Answer(uri=\"ListarCursosAnswer\",\n answer_template=\"Los cursos de la oferta actual son: {%courseName%}\",\n properties=[course_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasCourse\",\n answer_from=\"http://127.0.0.1/ockb/resources/OpenCampusFebrero-Julio\")\n\n # Setting up resolution\n ObtenerInformacionResolution = Resolution(uri=\"ObtenerInformacionResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasResolution = Resolution(uri=\"ObtenerFechasResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasInicioResolution = Resolution(uri=\"ObtenerFechasInicioResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasFinResolution = Resolution(uri=\"ObtenerFechasFinResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerPrerequisitosResolution = Resolution(uri=\"ObtenerPrerequisitosResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerDuracionResolution = Resolution(uri=\"ObtenerDuracionResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerPrecioResolution = Resolution(uri=\"ObtenerPrecioResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerDocenteResolution = Resolution(uri=\"ObtenerDocenteResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerContenidosResolution = Resolution(uri=\"ObtenerContenidosResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n\n # Setting up Entity\n\n curso_entity = Entity(name=\"http://127.0.0.1/ockb/course/resource/Course\")\n\n # setting up synonyms:\n Synonym(name=\"Mooc\", entity=curso_entity)\n Synonym(name=\"Taller\", entity=curso_entity)\n Synonym(name=\"Curso\", entity=curso_entity)\n Synonym(name=\"Open Course\", entity=curso_entity)\n\n # Setting up intents\n\n ObtenerInformacion = Intent(name=\"ObtenerInformacion\", agent=agent,\n description=\"Obtener una breve descripcion del curso\",\n answer=ObtenerInformacionAnswer, resolution=ObtenerInformacionResolution,\n entities=[curso_entity])\n ObtenerFechas = Intent(name=\"ObtenerFechas\", agent=agent,\n description=\"Obtener las fechas importantes del curso\",\n answer=ObtenerFechasAnswer, resolution=ObtenerFechasResolution, entities=[curso_entity])\n ObtenerFechasInicio = Intent(name=\"ObtenerFechasInicio\", agent=agent,\n description=\"Obtener las fechas de inicio del curso\",\n answer=ObtenerFechasInicioAnswer, resolution=ObtenerFechasInicioResolution,\n entities=[curso_entity])\n ObtenerFechasFin = Intent(name=\"ObtenerFechasFin\", agent=agent,\n description=\"Obtener las fechas de finalizacion del curso\",\n answer=ObtenerFechasFinAnswer, resolution=ObtenerFechasFinResolution,\n entities=[curso_entity])\n ObtenerPrerequisitos = Intent(name=\"ObtenerPrerequisitos\", agent=agent,\n description=\"Obtener prerequisitos del curso\",\n answer=ObtenerPrerequisitosAnswer,\n resolution=ObtenerPrerequisitosResolution)\n ObtenerDuracion = Intent(name=\"ObtenerDuracion\", agent=agent,\n description=\"Obtener la duracion del curso\", answer=ObtenerDuracionAnswer,\n resolution=ObtenerDuracionResolution, entities=[curso_entity])\n ObtenerPrecio = Intent(name=\"ObtenerPrecio\", agent=agent, description=\"Obtener el precio del curso\",\n answer=ObtenerPrecioAnswer, resolution=ObtenerPrecioResolution, entities=[curso_entity])\n ObtenerDocente = Intent(name=\"ObtenerDocente\", agent=agent,\n description=\"Obtener los nombres de los docentes del curso\",\n answer=ObtenerDocenteAnswer, resolution=ObtenerDocenteResolution,\n entities=[curso_entity])\n ObtenerContenidos = Intent(name=\"ObtenerContenidos\", agent=agent,\n description=\"Obtener los contenidos del curso\",\n answer=ObtenerContenidosAnswer, resolution=ObtenerDocenteResolution,\n entities=[curso_entity])\n ListarCursos = Intent(name=\"ListarCursos\", agent=agent,\n description=\"Presentar la oferta actual de cursos\", answer=ListarCursosAnswer,\n resolution=ObtenerContenidosResolution)\n # Setting up sentences\n\n Sentence(intent=ObtenerInformacion, sentence=\"De que trata el mooc?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Quiero informacion del curso de emprendimiento\")\n Sentence(intent=ObtenerInformacion, sentence=\"Muestrame un resumen del mooc?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Breve introducción al curso\")\n Sentence(intent=ObtenerInformacion, sentence=\"que es emprendimiento\")\n Sentence(intent=ObtenerInformacion, sentence=\"De que se trata el curso?\")\n Sentence(intent=ObtenerInformacion, sentence=\"De qué va el curso?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Me ayudas con información acerca del curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuáles son las fechas importantes del curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Fechas clave del curso\")\n Sentence(intent=ObtenerFechas, sentence=\"Que fechas debo tomar en cuenta\")\n Sentence(intent=ObtenerFechas, sentence=\"fechas de inicio y fin\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuándo comienza el curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Fechas importantes del curso de inteligencia artificial\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuáles son las fechas importantes del curso de emprendimiento\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Cuándo inicia el curso de emprendimiento\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Cuándo empiezan los cursos ?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Fecha de inicio de los moocs?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Día de inicio de los moocs ?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"En que fecha inician los moocs?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"A partir de que fecha empiezan los mooc?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"Cuando finaliza el curso?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"En que fecha termina el curso?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"Cuando termina el curso?\")\n Sentence(intent=ObtenerPrerequisitos,\n sentence=\"Cuáles son los requisitos necesarios para el curso de emprendimiento\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Cuáles son los prerequisitos?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Requisitos previos de ingreso al curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Dame a conocer los prerequisitos\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Me puedes indicar los prerequistos necesarios?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Que necesito saber antes de iniciar el curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"que se necesita saber para este curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Los pre requisitos cuales son?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué se necesita?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué debería saber para tomar el curso?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué conocimientos previos debo tener?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué tengo que saber?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Requisitos previos\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Conocimientos previos\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuanto dura el curso de empendimiento\")\n Sentence(intent=ObtenerDuracion, sentence=\"Duración del curso\")\n Sentence(intent=ObtenerDuracion, sentence=\"Número de horas del mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"En cuántas semanas se realiza el curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuanto dura el curso\")\n Sentence(intent=ObtenerDuracion, sentence=\"Tiempo que dura un curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"cuanto puede durar un curso mooc\")\n Sentence(intent=ObtenerDuracion, sentence=\"cuanto dura el curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"cual es la duracion de psicologia social?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuánto tiempo dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"De cuántas semanas es el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuántas horas dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuánto tiempo dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"De cuántas semanas es el mooc?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cual es el precio del curso de emprendimiento\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuál es el precio?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuánto vale?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Valor del curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Costo\")\n Sentence(intent=ObtenerPrecio, sentence=\"Inversión total del curso\")\n Sentence(intent=ObtenerPrecio, sentence=\"cual es el valor de los componentes?\")\n Sentence(intent=ObtenerPrecio, sentence=\"costo de los cursos?\")\n Sentence(intent=ObtenerPrecio, sentence=\"cuanto cuesta los cursos\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuál es precio del curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuánto cuesta el mooc de Administración Empresarial?\")\n Sentence(intent=ObtenerPrecio, sentence=\"tiene algun valor los cursos?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto cuesta el curso Método Toyota?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto cuesta un curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Que vale el curso ?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Es gratis?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto vale el mooc?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Precio\")\n Sentence(intent=ObtenerDocente, sentence=\"Cual es el docente del curso de emprendimiento\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es mi profesor en el curso?\")\n Sentence(intent=ObtenerDocente, sentence=\"Docente del mooc?\")\n Sentence(intent=ObtenerDocente, sentence=\"Qué docente imparte el mooc?\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es el docente encargado de la materia?\")\n Sentence(intent=ObtenerDocente, sentence=\"Nombre del docente del mooc\")\n Sentence(intent=ObtenerDocente, sentence=\"Que profesor esta a cargo del curso\")\n Sentence(intent=ObtenerDocente, sentence=\"cual es mi docente del mooc\")\n Sentence(intent=ObtenerDocente, sentence=\"información del docente\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es el docente encargado?\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién va a dar el MOOC\")\n Sentence(intent=ObtenerDocente, sentence=\"Que docente acompaña al estudiante?\")\n Sentence(intent=ObtenerDocente, sentence=\"Cual es el profe de Salud Sexual y Reproductiva\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuáles son los contenidos a tratar en el curos de emprendimiento\")\n Sentence(intent=ObtenerContenidos, sentence=\"Contenido del curso\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuál es la temática de cada curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Qué temas se van a tratar en cada curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"De que se tratan los moocs\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuáles son las temas del curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Que se va a tratar en este curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Qué se va a dar en el curso?\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos hay\")\n Sentence(intent=ListarCursos, sentence=\"Muestrame los cursos\")\n Sentence(intent=ListarCursos, sentence=\"Cual es la oferta actual\")\n Sentence(intent=ListarCursos, sentence=\"Cuentame que cursos tienes\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos me ofreces\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos estan disponibles\")\n Sentence(intent=ListarCursos, sentence=\"Listame los cursos\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos tiene\")\n\n # db.session.add(intent_obtenerinformacion)\n # db.session.add(intent_obtenerfechas)\n # db.session.add(intent_obtenerfechasinicio)\n # db.session.add(intent_obtenerfechasfin)\n # db.session.add(intent_obtenerprerequisitos)\n # db.session.add(intent_obtenerduracion)\n # db.session.add(intent_obtenerprecio)\n # db.session.add(intent_obtenerdocente)\n # db.session.add(intent_obtenercontenidos)\n # db.session.add(intent_listarCursos)\n\n db.session.commit()", "def guardarCliente(self, tipo_id, id, nombres, apellido, apellido2, tel, cel, dir, email, fecha):\n try:\n## if self.existeCliente(id):\n## self.conexion.ejecutarSQL(\"update Clientes set id_TipoIdentificacion=%s, id=%s, primer_nombre='%s', segundo_nombre='%s', primer_apellido='%s', segundo_apellido='%s', razon_social='%s', fecha_nacimiento='%s' \\\n## where id=%s\"%(tipo_id,newId,nombre,nombre2,apellido,apellido2,razon_social, fecha,id))\n## else:\n## self.conexion.ejecutarSQL(\"insert into Clientes (id_TipoIdentificacion, id, primer_nombre, segundo_nombre, primer_apellido, segundo_apellido, razon_social, fecha_nacimiento,activo) values \\\n## (%s,%s,'%s','%s','%s','%s','%s','%s','SI')\"%(tipo_id,id,nombre,nombre2,apellido,apellido2,razon_social, fecha))\n self.conexion.ejecutarSQL(\"delete from clientes where id='%s'\"%(id))\n self.conexion.ejecutarSQL(\"insert into clientes (id_TipoIdentificacion, id, nombres, primer_apellido, segundo_apellido, telefono, celular, dir, email, fecha_nacimiento, activo) values (%s, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', 'SI')\"%(tipo_id, id, nombres, apellido, apellido2, tel, cel, dir, email, fecha))\n self.conexion.commit()\n return True\n except Exception, e:\n print \"guardarProducto excepcion: \", e\n self.conexion.rollback()\n return False", "def save_now(self):\r\n self.save()", "def save_now(self):\r\n self.save()" ]
[ "0.63891953", "0.5736484", "0.5692139", "0.5690428", "0.5670163", "0.55836314", "0.5490324", "0.5467444", "0.5449755", "0.54327613", "0.54227865", "0.5420944", "0.54042196", "0.539124", "0.5371962", "0.5348681", "0.5317675", "0.5299088", "0.52955973", "0.5288019", "0.5268371", "0.52629715", "0.5258343", "0.5242743", "0.5238391", "0.5212985", "0.52114534", "0.52054524", "0.51848483", "0.51565546", "0.5141141", "0.5134104", "0.5122332", "0.5104814", "0.50941414", "0.50808716", "0.50596154", "0.50586116", "0.5052192", "0.50409204", "0.5019801", "0.50166136", "0.5013682", "0.49997422", "0.4991637", "0.49891737", "0.49830997", "0.49815348", "0.49793935", "0.49757367", "0.49644724", "0.49610993", "0.4960465", "0.49488953", "0.4933349", "0.49287826", "0.49222636", "0.49066752", "0.48789153", "0.4875539", "0.4871996", "0.4855098", "0.48542368", "0.48424897", "0.48374605", "0.4836578", "0.4833528", "0.48303476", "0.48296168", "0.4829371", "0.48217362", "0.48179403", "0.4816095", "0.48128295", "0.4804192", "0.4799666", "0.47995883", "0.47962153", "0.47958544", "0.4790549", "0.47863263", "0.47840118", "0.4783059", "0.4778926", "0.47748166", "0.4773994", "0.47735935", "0.4769269", "0.475821", "0.47539872", "0.47489277", "0.47422987", "0.47381595", "0.47381055", "0.47360504", "0.4725414", "0.4720402", "0.47173914", "0.47111738", "0.47111738" ]
0.4767223
88
Volume of originations can be misleading. Normalize it to some degree by considering the number of houses in the same census tract.
def volume_per_100_households(volume, num_households): if num_households: return volume * 100.0 / num_households else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def test_normal_unit_length(self):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n normals = np.array(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[3:6])\n lengths = np.sum(normals * normals, axis=0)\n np.testing.assert_almost_equal(np.ones_like(lengths), lengths)", "def normalize(self) -> NoReturn:\n self._ionic_fractions = self._ionic_fractions / np.sum(self._ionic_fractions)", "def normalize(dist):\n return np.array(dist, 'double') / np.sum(dist)", "def normalize(self, factor):\n self.n_atoms /= factor", "def normalize(self, factor):\n self.n_atoms /= factor", "def _normalize(self, inp):\n \n return inp/inp.sum()", "def normalizeChords(self):\n for prevChord in self.chain:\n #print(\"%s: %d\" % (prevChord, len(self.chain[prevChord])))\n total = 0.\n for chord in self.chain[prevChord]:\n total += self.chain[prevChord][chord]\n for chord in self.chain[prevChord]:\n self.chain[prevChord][chord] /= total\n\n #pprint(self.chain)", "def specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value / focal.cardinal\n return round(result, 6)", "def _calculate_norm_vol(dna_concs, ng=5, min_vol=2.5, max_vol=3500,\n resolution=2.5):\n sample_vols = ng / np.nan_to_num(dna_concs) * 1000\n sample_vols = np.clip(sample_vols, min_vol, max_vol)\n sample_vols = np.round(sample_vols / resolution) * resolution\n return sample_vols", "def ratio_local_cons(self):\n if self.current_energy_consumed == 0.0:\n return 1.0\n else:\n return self.local_cons / self.current_energy_consumed", "def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n", "def normalized_effect_size(self):\n mus = self.mu + self.absolute_effects\n pop_mu = (mus * self.test_splits).sum()\n sigma2_m = (self.test_splits * np.square(mus - pop_mu)).sum()\n f = np.sqrt(sigma2_m) / self.sigma\n return f", "def compute_volume(self) -> float:\n return (\n (1 if self.clockwise else -1)\n * np.sum(\n np.linalg.det(\n np.dstack(\n (\n self.vertices[self._faces[:, 0]],\n self.vertices[self._faces[:, 1]],\n self.vertices[self._faces[:, 2]],\n )\n )\n )\n )\n / 6\n )", "def discrepancy(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result -= value * math.log(self.betP(focal), 2)\n return round(result, 6)", "def normalize_houndsfield(data_):\n cpy = data_ + 1024\n cpy /= 3000\n return cpy", "def normalize_volume(vol_data):\n h, w, d = np.shape(vol_data)\n mean = np.sum(vol_data)/(h*w*d)\n std = np.std(vol_data)\n return (vol_data - mean) / std", "def normalizedvolumeerror():\r\n volume_simulation = 0\r\n volume_observation = 0\r\n for sim_index in range(1, len(hydrograph)):\r\n volume_simulation_trapezoid = (hydrograph[sim_index-1]+hydrograph[sim_index])*simulation_timestep/2\r\n volume_simulation = volume_simulation + volume_simulation_trapezoid\r\n for data_index in range(1, len(obs_data)):\r\n volume_observation_trapezoid = (obs_data[data_index-1]+obs_data[data_index])*time_difference.total_seconds()/2\r\n volume_observation = volume_observation + volume_observation_trapezoid\r\n volume_error = abs(volume_simulation-volume_observation)/(volume_simulation + volume_observation)\r\n return volume_error", "def normalize_particles(self):\n tot_weight = sum([particle.w for particle in self.particle_cloud]) or 1\n for particle in self.particle_cloud:\n particle.w = particle.w / tot_weight;", "def normalize(dist):\n\t\n\tif isinstance(dist, dict):\n\t\t# Make sure our keys/values line up in their lists\n\t\tkeys = dist.keys()\n\t\tvals = [dist[k] for k in keys]\n\t\tnormalize(vals)\n\t\tfor k,v in zip(keys,vals):\n\t\t\tdist[k] = v\n\t\treturn\n\tfdist = [float(d) for d in dist]\n\ts = sum(fdist)\n\tif s == 0:\n\t\treturn\n\tfdist = [d/s for d in fdist]\n\tfor i,d in enumerate(fdist):\n\t\tdist[i] = d", "def normalize_return_length(self):\n length = self.length\n if length != 0:\n self.x /= length\n self.y /= length\n return length", "def calculate_volume(hull):\n origin = hull[0][0]\n volume = 0.0\n for face in hull:\n logvolume = signed_volume(form_face(face, origin))[1]\n volume += numpy.exp(logvolume)\n # n-dimensional simplex = det / n!\n volume /= scipy.special.factorial(len(origin))\n\n return volume", "def normalize(self, factor):", "def normalization(distribution):\r\n total_sum = 0\r\n for number in distribution.values():\r\n total_sum += number\r\n \r\n for bin in distribution.keys():\r\n distribution[bin] = float(distribution[bin]) / total_sum\r\n\r\n return distribution", "def normalize(self):\n total = float(self.totalCount())\n if total != 0:\n self.divideAll(total)", "def normalize_initial(self):\n self._i /= self._i.sum()", "def approximate_volume(pdb_filenames, n_molecules_list, box_scaleup_factor=2.0):\n volume = 0.0 # in cubic angstroms\n for k, (pdb_file) in enumerate(pdb_filenames):\n molecule_volume = 0.0\n molecule_trj = md.load(pdb_filenames[k])\n for atom in molecule_trj.topology.atoms:\n if atom.element.symbol == 'H':\n molecule_volume += 5.0 # approximated from bondi radius = 1.06 angstroms\n else:\n molecule_volume += 15.0 # approximated from bondi radius of carbon = 1.53 angstroms\n volume += molecule_volume * n_molecules_list[k]\n box_size = volume**(1.0/3.0) * box_scaleup_factor\n return box_size", "def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol", "def normalize(counts):\n counts_sum0 = np.sum(counts, axis=0, keepdims=True)\n counts_sum1 = np.sum(counts, axis=1, keepdims=True)\n counts_sum = np.sum(counts)\n\n # Get residuals\n theta = 100\n mu = counts_sum1 @ counts_sum0 / counts_sum\n z = (counts - mu) / np.sqrt(mu + mu ** 2 / theta)\n\n # Clip to sqrt(n)\n n = counts.shape[0]\n z[z > np.sqrt(n)] = np.sqrt(n)\n z[z < -np.sqrt(n)] = -np.sqrt(n)\n\n return z", "def standardize_sim_values(self):\n for user_id_A, row in self.sim_matrix.items(): # row is reference\n lA = len(self.users[user_id_A].covered_items)\n for user_id_B in row.keys():\n lB = len(self.users[user_id_B].covered_items)\n row[user_id_B] /= sqrt(lA*lB)\n assert row[user_id_B] <= 1", "def get_standard_house_duct_length() -> (np.ndarray, np.ndarray, np.ndarray):\n\n # internal duct length of the insulated boundary\n internal = np.array([25.6, 8.6, 0.0, 0.0, 0.0])\n\n # external duct length of the insulated boundary\n external = np.array([0.0, 0.0, 10.2, 11.8, 8.1])\n\n # total duc length\n total = internal + external\n\n return internal, external, total", "def normalize_emission(self):\n self._e /= self._e.sum(0)", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n # random normal too slow\n #out_random = np.random.normal(0, 1, size = volume.shape)\n out_random = np.zeros(volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def local_to_normalized(npboxes: np.ndarray, window: Box):\n height, width = window.size\n return npboxes / np.array([[height, width, height, width]])", "def _normalize(self):\n\n n = len(self.e2[0])\n E = []\n\n for e2 in self.e2:\n if len(e2) != n:\n print 'WARNING: non consistent length in error statistics!!!'\n E.append(np.nansum(np.sqrt(e2))) # temporal aggregation\n\n E = np.asarray(E)\n EM = E.mean() # take square root, as e2 is still the squared error!\n self.e_norm = (E - EM) / EM # see Glecker et al, eq.2", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.random.normal(0, 1, size = volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def normalize(self):\n total = float(self.totalCount())\n if total == 0: return\n for key in self.keys():\n self[key] = self[key] / total", "def normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)", "def compute_volume(bundle):\n\taff=np.array([[-1.25, 0, 0, 90],[0, 1.25, 0, -126],[0, 0, 1.25, -72],[0, 0, 0, 1]])\n\tvoxel_list = streamline_mapping(bundle, affine=aff).keys()\n\tvol_bundle = len(set(voxel_list))\n\n\treturn vol_bundle", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def _normalize_detectors(self, data, index):\n vol = self.cell_volumes(index)\n for i, s in enumerate(data):\n data[i] = data[i] / vol\n full_index = list(range(len(self.axes)))\n other_axes = [item for item in full_index if item not in index]\n for i in other_axes:\n v = self.axes[i].total_volume\n data /= v\n return data", "def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize(self):\n if self.normed:\n return\n self._normalize()", "def barycenter(self):\n _value = (sum((v[0] for v in self.objects.values())),sum((v[1] for v in self.objects.values())))\n if self.objects:\n _value = (_value[0]/len(self.objects), _value[1]/len(self.objects))\n self.bc=_value\n return _value", "def test_compute_unnormalized_scores(self):\n # todo: implement this test!\n pass", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize([self.X, self.Z])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/8, 1/12, 2/8, 2/12, 3/8, 3/12, 4/8, 4/12])", "def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))", "def total_variation(magnitudes):\n m = magnitudes.size\n\n mags_m_plus_1 = magnitudes[1:]\n mags_m = magnitudes[0:-1]\n\n abs_diffs = np.absolute(mags_m_plus_1 - mags_m)\n\n return np.sum(abs_diffs) / m", "def normalize_food_value(num, definition_area_size=6):\n ret = num / definition_area_size + 0.5\n\n return ret", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize([self.Y, self.Z])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/10, 1/10, 2/10, 2/10, 3/10, 3/10, 4/10, 4/10])", "def center_normalize(x):\n return (x - K.mean(x)) / K.std(x)", "def center_normalize(x):\n return (x - K.mean(x)) / K.std(x)", "def normalize(merged_table):\r\n #add normalization\r\n # Minimum\r\n min_val = merged_table['covid_cases'].min()\r\n\r\n # Maximum\r\n max_val = merged_table['covid_cases'].max()\r\n\r\n # Calculate a normalized column\r\n normalized = (merged_table['covid_cases'] - min_val) / (max_val - min_val)\r\n\r\n # Add to the dataframe\r\n merged_table['n_covid'] = normalized\r\n return merged_table", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize(self.Z)\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/4, 1/4, 2/6, 2/6, 3/4, 3/4, 4/6, 4/6])", "def _get_rupture_dimensions(src, mag, nodal_plane):\n area = src.magnitude_scaling_relationship.get_median_area(\n mag, nodal_plane.rake)\n rup_length = math.sqrt(area * src.rupture_aspect_ratio)\n rup_width = area / rup_length\n seismogenic_layer_width = (src.lower_seismogenic_depth\n - src.upper_seismogenic_depth)\n max_width = (seismogenic_layer_width\n / math.sin(math.radians(nodal_plane.dip)))\n if rup_width > max_width:\n rup_width = max_width\n rup_length = area / rup_width\n return rup_length, rup_width", "def normalize(self):\n return (1. / abs(self)) * self", "def get_coverage_area(self) -> float:\n return math.sqrt(self.norm_hull.volume)", "def normalize(self):\n self._data /= self.norm()", "def estimate_volume(self):\n volume = 0.\n zvals = np.unique([c.image_z_position for c in self.contours])\n\n # We pad a zval on the bottom that is the same distance from the\n # first zval to the second zval but below the first point. We do \n # the same thing for the top zval.\n if len(self.contours) != 1:\n zlow = zvals[ 0] - (zvals[1]-zvals[0])\n zhigh = zvals[-1] + (zvals[-1]-zvals[-2])\n zvals = np.r_[zlow, zvals, zhigh]\n else:\n zvals = None\n\n for i,contour in enumerate(self.contours):\n contour_array = contour.to_matrix() * self.scan.pixel_spacing\n x = contour_array[:,0]\n y = contour_array[:,1]\n # \"Shoelace\" formula for area.\n area = 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))\n \n if zvals is not None:\n j = np.argmin(np.abs(contour.image_z_position-zvals))\n spacing_z = 0.5*(zvals[j+1]-zvals[j-1])\n else:\n spacing_z = self.scan.slice_thickness\n\n volume += (1. if contour.inclusion else -1.) * area * spacing_z\n return volume", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def norm(self):", "def normalise(da):\n return (da - da.min()) / (da.max() - da.min())", "def norm(self):\n return math.sqrt(sum([x*x for x in self.mV]))", "def _compute_input_normalization(*amps):\n if len(amps) < 2:\n raise ValueError('At least 2 amplitudes must be provided.')\n n_bosons = len(amps)\n left_range = range(n_bosons)\n right_ranges = list(itertools.permutations(left_range))\n total = 0.\n for right_range in right_ranges:\n i_prod = 1.\n for idx1, idx2 in zip(left_range, right_range):\n # if `idx1` and `idx2` are equal the contribution is given\n # by the inner product of an amplitude with itself. Given\n # that we are assuming the amplitudes to be normalized,\n # the result is always 1 and we can just skip it\n if idx1 == idx2:\n pass\n # otherwise we update the partial product computing the\n # inner product of the two relevant amplitudes (states)\n i_prod *= np.vdot(amps[idx1], amps[idx2])\n total += i_prod\n return np.sqrt(total)", "def normalise(a):\n return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))", "def normalizing_constant(self):\n\t\tdim = self.train_data.shape[1]\n\t\treturn 1 / (2 * np.pi * ((self.bandwidth) ** 2)) ** (dim / 2)", "def normalize_test_4(self):\n\n res = self.XYZ_factor_n.normalize(self.X)\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/2, 1/2, 2/4, 2/4, 3/6, 3/6, 4/8, 4/8])", "def volume_error(self) -> float:\n # TODO written formula and executed formula are different.\n ve = np.sum(self.predicted - self.true) / np.sum(self.true)\n return float(ve)", "def normalize_test_7(self):\n\n res = self.XYZ_factor_n.normalize([self.X, self.Y, self.Z])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/20, 1/20, 2/20, 2/20, 3/20, 3/20, 4/20, 4/20])\n\n res = self.XYZ_factor_n.normalize()\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/20, 1/20, 2/20, 2/20, 3/20, 3/20, 4/20, 4/20])", "def check_normalize(self):\n # generate array for easier handling\n values = np.swapaxes(self.psf.psf_value, 0, 2)\n\n # init fail count\n fail_count = 0\n\n # loop over energies\n for i, arr in enumerate(values):\n energy_hi = self.psf.energy_hi[i]\n energy_lo = self.psf.energy_lo[i]\n\n # check if energy is outside of safe energy threshold\n if self.psf.energy_thresh_lo > energy_hi:\n continue\n if self.psf.energy_thresh_hi < energy_lo:\n continue\n\n # loop over offsets\n for arr2 in arr:\n\n # init integral\n sum = 0\n\n # loop over deltas\n for j, v in enumerate(arr2):\n # calculate contribution to integral\n width = self.psf.rad_hi[j].rad - self.psf.rad_lo[j].rad\n rad = 0.5 * (self.psf.rad_hi[j].rad + self.psf.rad_lo[j].rad)\n sum += v.value * width * rad * 2 * np.pi\n\n # check if integral is close enough to 1\n if np.abs(sum - 1.0) > self.config[\"d_norm\"]:\n # add to fail counter\n fail_count += 1\n\n # write results to dict\n results = {}\n if fail_count == 0:\n results[\"status\"] = \"ok\"\n else:\n results[\"status\"] = \"failed\"\n results[\"n_failed_bins\"] = fail_count\n self.results[\"normalize\"] = results", "def normalize(v):\n\n return v * (1.0 / magnitude(v))", "def normalize(self): # Function is fucked TODO\n l = self.length()\n for i in range(0, len(self.coords)):\n self.coords[i] /= l\n return self\n # return Vector(list([0 for i in range(len(v.coords))]))\n\n # if round(self.length() == 0):\n # s = 1 / self.length()\n # return self * s\n # else:\n # return Vector(list([0 for i in range(len(v.coords))]))", "def _normalize_measure(value, maximum=1.0, center=0.0):\n if isiterable(value):\n value = np.asarray(value)\n if isiterable(center):\n center = np.asarray(center)\n if isiterable(maximum):\n maximum = np.asarray(maximum)\n return np.divide(value - center, maximum - center)", "def get_center_of_mass_allies(self,obs):", "def stretch_factor(self):\n p = self._pants_decomposition\n\n # pick a curve to iterate\n c = PantsLamination.random(p)\n # print(c)\n\n cc = (self**100) * c\n # print(self**100)\n # print(cc)\n return float(sum(abs(x) for x in (self*cc).to_vector())) / \\\n sum(abs(x) for x in cc.to_vector())", "def norm(self):\n\n return self.abs()", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def informedness(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif p1 == n:\n return 0.0\n # return _div(a - b, 2 * (a + b))\n elif q1 == n:\n return 0.0\n # return _div(d - c, 2 * (d + c))\n else:\n return _div(self.covar(), p1 * q1)", "def proper_annulus_centres(self) -> Quantity:\n return self._proper_ann_centres", "def non_specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value * math.log(focal.cardinal, 2)\n return round(result, 6)", "def _normalize_pose_landmarks(self, landmarks):\n landmarks = np.copy(landmarks)\n\n # Normalize translation.\n pose_center = self._get_pose_center(landmarks)\n landmarks -= pose_center\n\n # Normalize scale.\n pose_size = self._get_pose_size(landmarks, self._torso_size_multiplier)\n landmarks /= pose_size\n # Multiplication by 100 is not required, but makes it eaasier to debug.\n landmarks *= 100\n\n return landmarks", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))", "def normalize(vals):\n min_val = torch.min(vals)\n max_val = torch.max(vals)\n return (vals - min_val) / (max_val - min_val)", "def _truncate_data(self):\n trunc_idx = np.argsort(self.energies)\n trunc_intensities = self.intensities[trunc_idx]\n norm_by = np.amax(trunc_intensities)\n return norm_by", "def normalize(self):\n total = float(sum(self.values()))\n for key in self:\n self[key] /= total", "def normalize(self):\n self.length = np.ones(self.nV)\n return self", "def normalize(w):\n s = sum(w)\n for i in range(len(w)):\n w[i] /= s\n return w", "def _calculate_volume(seq, window):\n # Article: On the average hydrophobicity of proteins and the relation between it and protein structure\n VOLUME = {'A': 52.6, 'R': 109.1, 'N': 75.7, 'D': 68.4, 'C': 68.3, 'Q': 89.7,\n 'E': 84.7, 'G': 36.3, 'H': 91.9, 'I': 102.0, 'L': 102.0, 'K': 105.1,\n 'M': 97.7, 'F': 113.9, 'P': 73.6, 'S': 54.9, 'T': 71.2, 'W': 135.4,\n 'Y': 116.2, 'V': 85.1}\n\n VOLUME_N = _nomalized_data(VOLUME)\n return _calculate_scale(seq, window, VOLUME_N)", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize([self.X, self.Y])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/6, 1/6, 2/6, 2/6, 3/20, 3/20, 4/20, 4/20])", "def normalization_correction(self):\n return self._normalization_correction", "def normalized(self):\n return self / self.norm()", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def _normalize(self, value_dict):\n median = np.median([value_dict[i] for i in list(value_dict.keys())])\n n = len(value_dict.keys())\n if median < 1.0 / float(n):\n divisor = 1.0 / float(n)\n else:\n divisor = median\n return_dict = {}\n for i in list(value_dict.keys()):\n return_dict[i] = float(value_dict[i]) / float(divisor)\n return return_dict", "def normalize(self):\n norm_value = 60 if self.currency == \"btc\" else 18\n self.barHeightsNormalized = [heights / norm_value for heights in self.barHeights]", "def minimum_spanning_arborescence(sol):", "def normalization_calculation(self) -> None:\n self.normalized_inventory = (\n self.normalization_matrix * self.characterized_inventory\n )" ]
[ "0.60555303", "0.5992782", "0.5814782", "0.5798587", "0.5788853", "0.57513684", "0.57513684", "0.57438356", "0.5718763", "0.5717603", "0.57025325", "0.5634104", "0.56269354", "0.5596752", "0.5582119", "0.5576086", "0.55752635", "0.5556672", "0.55557096", "0.55428916", "0.554076", "0.55366504", "0.5535027", "0.55300224", "0.55294627", "0.5515501", "0.5500732", "0.5491389", "0.54596066", "0.5446539", "0.5445266", "0.539465", "0.538342", "0.53792286", "0.53649116", "0.5364596", "0.5351523", "0.5349022", "0.5338124", "0.53316563", "0.5322461", "0.53167003", "0.5307211", "0.53034395", "0.5296657", "0.5296537", "0.52923787", "0.52910554", "0.52894723", "0.5284907", "0.5283155", "0.5272504", "0.5269001", "0.5269001", "0.5266755", "0.5248664", "0.524451", "0.52423865", "0.5238767", "0.5233212", "0.52289367", "0.52270454", "0.52270454", "0.52245176", "0.52238816", "0.52111906", "0.519481", "0.5191324", "0.51907444", "0.51871276", "0.51842", "0.5177755", "0.51751286", "0.5171464", "0.5170219", "0.5160518", "0.51599765", "0.5159288", "0.51551235", "0.5152678", "0.5146475", "0.5139238", "0.5138686", "0.5133049", "0.5125498", "0.51195514", "0.5117927", "0.51156396", "0.51152635", "0.5113413", "0.5111948", "0.51107556", "0.5109851", "0.51079154", "0.5106645", "0.510492", "0.51027775", "0.50963295", "0.50930303", "0.5089712" ]
0.5479352
28
Get loan originations for a given lender, county combination. This ignores year for the moment.
def loan_originations(request_dict): state_fips = request_dict.get('state_fips', '') county_fips = request_dict.get('county_fips', '') lender = request_dict.get('lender', '') if state_fips and county_fips and lender: records = HMDARecord.objects.filter( countyfp=county_fips, lender=lender, statefp=state_fips, action_taken__lte=6) # actions 7-8 are preapprovals to ignore query = records.values( 'geoid', 'geoid__census2010households__total' ).annotate(volume=Count('geoid')) data = {} for row in query: data[row['geoid']] = { 'volume': row['volume'], 'num_households': row['geoid__census2010households__total'], 'volume_per_100_households': volume_per_100_households( row['volume'], row['geoid__census2010households__total']) } return data else: return HttpResponseBadRequest( "Missing one of state_fips, county_fips, lender")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_land_conso_per_year(self, level, group_name=None):\n fields = Cerema.get_art_field(self.analyse_start_date, self.analyse_end_date)\n qs = self.get_cerema_cities(group_name=group_name)\n qs = qs.values(level)\n qs = qs.annotate(**{f\"20{field[3:5]}\": Sum(field) / 10000 for field in fields})\n return {row[level]: {year: row[year] for year in self.years} for row in qs}", "def get_origins(self, account_id, **kwargs):\r\n\r\n return self.account.getOriginPullMappingInformation(id=account_id,\r\n **kwargs)", "def xbrl_years(self):\n return [year for year in self.years if year >= 2021]", "def get_land_artif_per_year(self, analysis_level):\n qs = CommuneDiff.objects.filter(city__in=self.cities.all())\n if analysis_level == \"DEPART\":\n qs = qs.annotate(name=F(\"city__departement__name\"))\n elif analysis_level == \"EPCI\":\n qs = qs.annotate(name=F(\"city__epci__name\"))\n elif analysis_level == \"REGION\":\n qs = qs.annotate(name=F(\"city__departement__region__name\"))\n elif analysis_level == \"SCOT\":\n qs = qs.annotate(name=F(\"city__scot__name\"))\n else:\n qs = qs.annotate(name=F(\"city__name\"))\n qs = qs.filter(year_old__gte=self.analyse_start_date, year_new__lte=self.analyse_end_date)\n qs = qs.annotate(\n period=Concat(\n \"year_old\",\n Value(\" - \"),\n \"year_new\",\n output_field=models.CharField(),\n )\n )\n qs = qs.values(\"name\", \"period\")\n qs = qs.annotate(net_artif=Sum(\"net_artif\"))\n\n results = collections.defaultdict(dict)\n for row in qs:\n results[row[\"name\"]][row[\"period\"]] = row[\"net_artif\"]\n return results", "def _fetch_laus_data(year=None):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n if year is None:\n year = _get_max_year([27]) + 1\n url = f'http://www.ides.illinois.gov/LMI/Local%20Area%20Unemployment%20Statistics%20LAUS/historical/{year}-moaa.xls'\n\n raw = pd.read_excel(url, skiprows=6)\n raw.columns = ['fips', 'area', 'year', 'month', 'force', 'employed', 'unemployed', 'rate']\n \n filtered = raw[(~raw.fips.isna()) & (raw.month == 13)].drop(columns=['area', 'month', 'rate'])\n filtered.columns = ['fips', 'year', '1030', '1551', '1550']\n\n pivoted = pd.melt(\n filtered,\n id_vars = ['fips', 'year'],\n value_vars=['1030', '1550', '1551'],\n var_name = 'fk_simplecount_indicator'\n )\n \n pivoted['fk_simplecount_county'] = (pivoted['fips'] + 1) / 2\n\n return pivoted[_SIMPLECOUNT_COLUMNS]\n except HTTPError as e:\n if e.code == 404:\n raise ValueError(\"WARNING: Employment data is up to date.\")\n except:\n raise", "def ret_lYM(iYM, eYM):\n iYear, iMon = iYM\n eYear, eMon = eYM\n lYM = []\n for Year in range(iYear, eYear+1):\n if iYear == eYear:\n lMon = list(range(iMon,eMon+1))\n elif Year == iYear:\n lMon = list(range(iMon,12+1))\n elif Year == eYear:\n lMon = list(range(1,eMon+1))\n else:\n lMon = list(range(1,12+1)) \n\n for Mon in lMon:\n lYM.append([Year,Mon])\n return lYM", "def get_incidents(year):\n print 'Downloading year: %s' % year\n \n # Build URL from year.\n # If the year is 2007-2011, download the XML straight from ... my S3 account.\n if year in range(2007, 2011):\n url = 'http://wapo-projects.s3.amazonaws.com/techathon/scraperwiki/xml/crime_incidents_%s_plain.xml' % year\n \n # If the year is 2012, get it from the DC government. This is NOT the whole year.\n if year == 2012:\n url = 'http://data.octo.dc.gov/feeds/crime_incidents/crime_incidents_current.xml' \n \n # Request the data using the Requests library.\n request = requests.get(url)\n unzipped_request = request.content\n \n # Parse the XML using lxml's BeautifulSoup parser.\n crime_xml_parsed = fromstring(unzipped_request)\n\n # Return the parsed Element() objects by grabbing the xpath for <entry> tags.\n return crime_xml_parsed.xpath('//entry')", "def get_incidents(year):\n print 'Downloading year: %s' % year\n \n # Build URL from year.\n # If the year is 2007-2011, download the XML straight from ... my S3 account.\n if year in range(2007, 2011):\n url = 'http://wapo-projects.s3.amazonaws.com/techathon/scraperwiki/xml/crime_incidents_%s_plain.xml' % year\n \n # If the year is 2012, get it from the DC government. This is NOT the whole year.\n if year == 2012:\n url = 'http://data.octo.dc.gov/feeds/crime_incidents/crime_incidents_current.xml' \n \n # Request the data using the Requests library.\n request = requests.get(url)\n unzipped_request = request.content\n \n # Parse the XML using lxml's BeautifulSoup parser.\n crime_xml_parsed = fromstring(unzipped_request)\n\n # Return the parsed Element() objects by grabbing the xpath for <entry> tags.\n return crime_xml_parsed.xpath('//entry')", "def get_start_end_years(df: pd.DataFrame) -> Tuple[int, int]:\n return df.iloc[0].year, df.iloc[-1].year", "def get_age_fields():\n under_18_fields = CensusFields.get_under_18_fields()\n\n age_18_to_29_fields = [ \n 'B01001_007E', # Male:!!18 and 19 years\n 'B01001_008E', # Male:!!20 years\n 'B01001_009E', # Male:!!21 years\n 'B01001_010E', # Male:!!22 to 24 years\n 'B01001_011E', # Male:!!25 to 29 years\n 'B01001_031E', # Female:!!18 and 19 years\n 'B01001_032E', # Female:!!20 years\n 'B01001_033E', # Female:!!21 years\n 'B01001_034E', # Female:!!22 to 24 years\n 'B01001_035E', # Female:!!25 to 29 years\n ]\n age_30_to_39_fields = [\n 'B01001_012E', # Male:!!30 to 34 years\n 'B01001_013E', # Male:!!35 to 39 years\n 'B01001_036E', # Female:!!30 to 34 years\n 'B01001_037E', # Female:!!35 to 39 years\n ]\n age_40_to_49_fields = [\n 'B01001_014E', # Male:!!40 to 44 years\n 'B01001_038E', # Female:!!40 to 44 years\n 'B01001_015E', # Male:!!45 to 49 years\n 'B01001_039E', # Female:!!45 to 49 years\n\n ]\n age_50_to_59_fields = [\n 'B01001_016E', # Male:!!50 to 54 years\n 'B01001_017E', # Male:!!55 to 59 years\n 'B01001_040E', # Female:!!50 to 54 years\n 'B01001_041E', # Female:!!55 to 59 years\n\n ]\n age_60_to_69_fields = [\n 'B01001_018E', # Male:!!60 and 61 years\n 'B01001_019E', # Male:!!62 to 64 years\n 'B01001_020E', # Male:!!65 and 66 years\n 'B01001_021E', # Male:!!67 to 69 years\n 'B01001_042E', # Female:!!60 and 61 years\n 'B01001_043E', # Female:!!62 to 64 years\n 'B01001_044E', # Female:!!65 and 66 years\n 'B01001_045E', # Female:!!67 to 69 years\n ]\n age_70_to_79_fields = [\n 'B01001_022E', # Male:!!70 to 74 years\n 'B01001_023E', # Male:!!75 to 79 years\n 'B01001_046E', # Female:!!70 to 74 years\n 'B01001_047E', # Female:!!75 to 79 years\n ]\n age_81_plus_fields = [\n 'B01001_024E', # Male:!!80 to 84 years\n 'B01001_025E', # Male:!!85 years and over\n 'B01001_048E', # Female:!!80 to 84 years\n 'B01001_049E', # Female:!!85 years and over\n ]\n \n age_fields = OrderedDict()\n age_fields[ 'age_18_to_29' ] = { 'label': '18-29', 'fields': age_18_to_29_fields }\n age_fields[ 'age_30_to_39' ] = { 'label': '30s', 'fields': age_30_to_39_fields }\n age_fields[ 'age_40_to_49' ] = { 'label': '40s', 'fields': age_40_to_49_fields }\n age_fields[ 'age_50_to_59' ] = { 'label': '50s', 'fields': age_50_to_59_fields }\n age_fields[ 'age_60_to_69' ] = { 'label': '60s', 'fields': age_60_to_69_fields } \n age_fields[ 'age_70_to_79' ] = { 'label': '70s', 'fields': age_70_to_79_fields }\n age_fields[ 'age_81_plus' ] = { 'label': '80+', 'fields': age_81_plus_fields }\n\n return age_fields", "def lons(self):\n lons = []\n quads = self.getQuadrilaterals()\n groups = self._getGroupIndex()\n u_groups = np.unique(groups)\n ng = len(u_groups)\n for i in range(ng):\n q_ind = np.where(groups == u_groups[i])[0]\n nq = len(q_ind)\n top_lons = []\n bot_lons = []\n for j in range(nq):\n if j == 0:\n top0 = [quads[q_ind[j]][0].longitude]\n bot0 = [quads[q_ind[j]][3].longitude]\n top_lons = top_lons + top0\n bot_lons = bot_lons + bot0\n top_lons = top_lons + [quads[q_ind[j]][1].longitude]\n bot_lons = bot_lons + [quads[q_ind[j]][2].longitude]\n lons = lons + top_lons + bot_lons[::-1] + top0 + [np.nan]\n return np.array(lons)", "def _enumerate_years(self, preprocessed_data, disjoint):\n pass", "def get_look_a_like_conso_per_year(self):\n return {\n land.name: land.get_conso_per_year(\n self.analyse_start_date,\n self.analyse_end_date,\n )\n for land in self.get_look_a_like()\n }", "def get_city_conso_per_year(self, group_name=None):\n return self.get_land_conso_per_year(\"city_name\", group_name=group_name)", "def get_years():\n if request.method == 'OPTIONS':\n logging.info(\"Handle options\")\n return create_response({}, 200, '*', 'content-type, token')\n\n logging.info(\"Getting recruitment years\")\n\n role, response = handle_request_token(request)\n\n if role is None:\n logging.warning(\"Role is None!\")\n return response\n\n years = set()\n for rec in Recruitment.query.all():\n if rec.end_date.year not in years:\n years.add(rec.end_date.year)\n\n years = list(years)\n years.sort(reverse=True)\n\n return create_response(years, 200, '*')", "def lcode(self):\n###############################################################################\n lcode = []\n for M in list(self.estimates.values()):\n if (M.code not in lcode):lcode.append(M.code)\n return(lcode)", "def get_rates():\n orig_code = request.args.get(\"orig_code\")\n dest_code = request.args.get(\"dest_code\")\n date_from = parse_iso_date(request.args.get(\"date_from\"))\n date_to = parse_iso_date(request.args.get(\"date_to\"))\n\n if not date_from or not date_to:\n raise BadRequest(\"Invalid date arguments\")\n if orig_code and dest_code:\n return get_rates_using_codes(\n date_from, date_to, orig_code, dest_code\n )\n raise BadRequest(\"Invalid location arguments\")", "def get_clams_age(theta, eq_lat, day_of_year, as_timedelta=False, clams_dat=dict()):\n if len(clams_dat) == 0:\n # Take advantage of mutable default arguments to cache the CLAMS data. The first time this function is called,\n # the dict will be empty, so the data will be loaded. The second time, since the dict will have been modified,\n # with all the data, we don't need to load it. This should hopefully speed up this part of the code.\n with ncdf.Dataset(_clams_file, 'r') as clams:\n clams_dat['eqlat'] = clams.variables['lat'][:]\n clams_dat['theta'] = clams.variables['extended_theta'][:]\n clams_dat['doy'] = clams.variables['doy'][:]\n\n # The original CLAMS file provided by Arlyn only went up to 2000 K. At first we tried just using the top\n # for greater potential temperatures, but that led to too-great N2O values at those levels. We now\n # extrapolate using the three end points to calculate a slope of age vs. theta. This calculation takes some\n # time, so we've added the extended age to the CLAMS file using backend_analysis.clams.modify_clams_file().\n clams_dat['age'] = clams.variables['extended_age'][:]\n\n clams_dat['eqlat_grid'], clams_dat['theta_grid'] = np.meshgrid(clams_dat['eqlat'], clams_dat['theta'])\n if clams_dat['eqlat_grid'].shape != clams_dat['age'].shape[1:] or clams_dat['theta_grid'].shape != clams_dat['age'].shape[1:]:\n raise RuntimeError('Failed to create equivalent lat/theta grids the same shape as CLAMS age')\n\n idoy = np.argwhere(clams_dat['doy'] == day_of_year).item()\n\n el_grid, th_grid = np.meshgrid(clams_dat['eqlat'], clams_dat['theta'])\n clams_points = np.array([[el, th] for el, th in zip(el_grid.flat, th_grid.flat)])\n\n # interp2d does not behave well here; it interpolates to points outside the range of eqlat/theta and gives a much\n # noisier result.\n age_interp = LinearNDInterpolator(clams_points, clams_dat['age'][idoy, :, :].flatten())\n prof_ages = np.array([age_interp(el, th).item() for el, th in zip(eq_lat, theta)])\n\n if as_timedelta:\n # The CLAMS ages are in years, but relativedeltas don't accept fractional years. Instead, separate the whole\n # years and the fractional years.\n prof_ages = np.array(mod_utils.frac_years_to_reldelta(prof_ages))\n\n return prof_ages", "def lae_nccd(year):\n html = load_campdoc_html(year)\n table = extract_main_table_from_html(html)\n data = process_main_table(table)\n return(data)", "def build_messy_lookup_lad(source,dest):\n la = QuickGrid().open(source)\n\n lookup = QuickGrid()\n lookup.header = [\"gss-code\",\"local-authority-code\"]\n\n possible = [\"gss-code\",\"archaic-gss-code\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n values = r[p].split(\",\")\n for v in values:\n lookup.add([v,r[\"local-authority-code\"]])\n \n lookup.save(dest,force_unicode=True)", "def validated_origins(self, pfx):\n nodes = self.radix.search_covering(pfx)\n origins = set()\n\n for node in nodes:\n for roa in node.data[\"roas\"]:\n asn = int(roa[\"asn\"].lstrip(\"AS\"))\n maxlength = roa[\"maxLength\"]\n if maxlength >= int(pfx.split(\"/\")[1]):\n origins.add(asn)\n return origins", "def get_age_bounds(input_dir):\r\n ages = pd.read_csv(os.path.join(input_dir, \"age_bounds.csv\"))\r\n return ages", "def get(self):\n trainer_id = request.args.get(\"trainerId\")\n if trainer_id is not None:\n try:\n return TrainerService.get_years_for_trainer(int(trainer_id))\n except ValueError:\n return INVALID_ID_ERROR, 400 # Bad Request\n else:\n return []", "def linkage_or_origin_all_parents(elf, addr, linkage=False):\n result = subprocess.run(\n (DWARFDUMP, \"--lookup=0x\" + addr, \"-p\", elf), capture_output=True, text=True\n )\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n\n origins = list(map(getFunction, matches))\n return origins", "def hindu_lunar_holiday(l_month, l_day, gregorian_year):\n l_year = HinduLunarDate.from_fixed(GregorianDate.new_year(gregorian_year)).year\n date1 = hindu_date_occur(l_month, l_day, l_year)\n date2 = hindu_date_occur(l_month, l_day, l_year + 1)\n return list_range([date1, date2], GregorianDate.year_range(gregorian_year))", "def get_leagues_and_countries(source=utils.get_native_source):\n if not isinstance(source, games.models.Source):\n # If I used source=native_source() or if native_source was a global variable then\n # during db initialization (running command initialize) you would get an error since\n # it gets its value when the database is empty.\n source = source()\n logger.info(\"getting leagues and countries from source %s...\", source)\n if not source:\n return [], []\n data, meta, status_code = sportmonks.countries.all(include='leagues.seasons')\n if not data:\n # if the status code is not 200 data and meta are None\n return [], []\n # with open('sportmonks/response_texts/aws_01.txt', 'w') as outfile:\n # json.dump(meta, outfile, indent=4)\n # json.dump(data, outfile, indent=4)\n\n pre_countries, pre_competitions = [], []\n\n try:\n # Notice that only the first supported sport will be processed (currently this is is acceptable since we only\n # support football and so the first supported sport will always be football)\n sport_sids = parse_sport(meta)\n sports = []\n for sport_sid in sport_sids:\n sport = games.models.Sport.by_sid(sid=sport_sid, source=source)\n if not sport:\n logger.info(\"Sport contained in the response with sid {} is not supported\".format(sport_sid))\n continue\n sports.append(sport)\n if not sports:\n logger.error(\"No supported sport in the response\")\n return [], []\n football_gname = games.naming.sport_names.get('football', None)\n football = games.models.Sport.objects.get(name=football_gname)\n if football not in sports:\n logger.info(\"Football is not in response\")\n return [], []\n # logger.debug(\"Trying to get sport from source: %s and sid: %s\", source, sport_sid)\n sport_gname = football_gname\n for item in data:\n try:\n country_sid = item.get('id')\n # logger.debug('country_sid: %s', country_sid)\n country_sname = item.get('name')\n # logger.debug('country_sname: %s', country_sname)\n extra = item.get('extra')\n # logger.debug('extra: %s', extra)\n leagues = item.get('leagues').get('data')\n # logger.debug('leagues: %s', leagues)\n try:\n fifa_code = extra.get('fifa') # some countries might lack extra information\n except AttributeError:\n fifa_code = None\n except Exception as e:\n logger.data_error('%s', e)\n continue\n pre_country = pre_models.PreCountry(source=source, sname=country_sname, sid=country_sid, fifa_code=fifa_code)\n pre_countries.append(pre_country)\n for league in leagues:\n try:\n # sportmonks uses sgname for leagues. I use this sgname as an sname (comp_season_specific name)\n competition_sname = league.get('name')\n # logger.debug('competition_sname: %s', competition_sname)\n sid = league.get('id')\n # logger.debug('sid: %s', sid)\n seasons = league.get('seasons').get('data')\n # logger.debug('seasons: %s', seasons)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n competition_season_utils = []\n # comp_seas_sids = []\n for season in seasons:\n try:\n season_name = season.get('name')\n # logger.debug('season_name: %s', season_name)\n # season_name = seasons_special_treatment(season_name)\n competition_season_sid = season.get('id')\n # logger.debug('competition_season_sid: %s', competition_season_sid)\n is_current_season = season.get('is_current_season', False)\n # logger.debug('is_current_season: %s', is_current_season)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n # comp_seas_sids.append(competition_season_sid)\n zak_season_name = games.models.Season.zakandify_season_string(season_name)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n competition_season_type = get_competition_season_type(season)\n competition_season_util = pre_models.CompetitionSeasonUtil(season, competition_season_sid, competition_sname, competition_season_type)\n competition_season_utils.append(competition_season_util)\n # logger.debug(\"competition season sids: %s\", comp_seas_sids)\n pre_competition = pre_models.PreCompetition(\n source=source, sname=competition_sname, sid=sid, sport_name=sport_gname,\n competition_season_utils=competition_season_utils, pre_country=pre_country)\n pre_competitions.append(pre_competition)\n\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.countries.all from source %s', e, source)\n logger.info(\"%s pre countries and %s pre competitions were created\", len(pre_countries), len(pre_competitions))\n return pre_countries, pre_competitions", "def llincc(x,y):\r\n covar = lcov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n\r\n xvar = lvar(x)*(len(x)-1)/float(len(x)) # correct denom to n\r\n yvar = lvar(y)*(len(y)-1)/float(len(y)) # correct denom to n\r\n lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))\r\n return lincc", "def lats(self):\n lats = []\n quads = self.getQuadrilaterals()\n groups = self._getGroupIndex()\n u_groups = np.unique(groups)\n ng = len(u_groups)\n for i in range(ng):\n q_ind = np.where(groups == u_groups[i])[0]\n nq = len(q_ind)\n top_lats = []\n bot_lats = []\n for j in range(nq):\n if j == 0:\n top0 = [quads[q_ind[j]][0].latitude]\n bot0 = [quads[q_ind[j]][3].latitude]\n top_lats = top_lats + top0\n bot_lats = bot_lats + bot0\n top_lats = top_lats + [quads[q_ind[j]][1].latitude]\n bot_lats = bot_lats + [quads[q_ind[j]][2].latitude]\n lats = lats + top_lats + bot_lats[::-1] + top0 + [np.nan]\n\n return np.array(lats)", "def _getlons(self):\n dlon = 360. / self.nlon\n lons = np.linspace(0. + dlon / 2., 360. - dlon / 2., self.nlon)\n return lons", "def country_of_origin(self):\n if self.investor_company:\n return self.investor_company.address_country", "def country_codes():\n return (pd.read_csv(csv_path(\"country-codes.csv\"), usecols=[1, 3, 4],\n index_col=2, keep_default_na=False))", "def get(self, id_code):\r\n return self.fetch('getLegislators', id=id_code)['legislator']", "def main():\n tl = TwoLocus(in_path='/csbiodata/public/www.csbio.unc.edu/htdocs/sgreens/pairwise_origins/')\n # tl = TwoLocus()\n # tl.preprocess(glob.glob('OR_ss_origins/*.hap'))\n print len(tl.list_available_strains())\n exit()\n # print len(tl.list_available_strains())\n # tl.preprocess(['cc_origins.csv'])\n # tl.preprocess(['ccv_origins.csv'])\n classical = [s for s in\n [\"129P1/ReJ\", # \"129P3/J\", \"129S1SvlmJ\", \"129S6\", \"129T2/SvEmsJ\", \"129X1/SvJ\", \"A/J\", \"A/WySnJ\",\n \"AEJ/GnLeJ\", \"AEJ/GnRk\", \"AKR/J\", \"ALR/LtJ\", \"ALS/LtJ\", \"BALB/cByJ\", \"BALB/cJ\", \"BDP/J\", \"BPH/2J\",\n # \"BPL/1J\", \"BPN/3J\", \"BTBR T<+>tf/J\", \"BUB/BnJ\", \"BXSB/MpJ\", \"C3H/HeJ\", \"C3HeB/FeJ\", \"C57BL/10J\",\n # \"C57BL/10ScNJ\", \"C57BL/10SAAAJ\", \"C57BL/6CR\", \"C57BL/6J\", \"C57BL/6NCI\", \"C57BL/6Tc\", \"C57BLKS/J\",\n # \"C57BR/cdJ\", \"C57L/J\", \"C58/J\", \"CBA/CaJ\", \"CBA/J\", \"CE/J\", \"CHMU/LeJ\", \"DBA/1J\", \"DBA/1LacJ\",\n # \"DBA/2DeJ\", \"DBA/2HaSmnJ\", \"DBA/2J\", \"DDK/Pas\", \"DDY/JclSidSeyFrkJ\", \"DLS/LeJ\", \"EL/SuzSeyFrkJ\",\n # \"FVB/NJ\", \"HPG/BmJ\", \"I/LnJ\", \"IBWSP2\", \"IBWSR2\", \"ICOLD2\", \"IHOT1\", \"IHOT2\", \"ILS\", \"ISS\", \"JE/LeJ\",\n # \"KK/HlJ\", \"LG/J\", \"LP/J\", \"LT/SvEiJ\", \"MRL/MpJ\", \"NOD/ShiLtJ\", \"NON/ShiLtJ\", \"NONcNZO10/LtJ\",\n # \"NONcNZO5/LtJ\", \"NOR/LtJ\", \"NU/J\", \"NZB/BlNJ\", \"NZL/LtJ\", \"NZM2410/J\", \"NZO/HlLtJ\", \"NZW/LacJ\", \"P/J\",\n # \"PL/J\", \"PN/nBSwUmabJ\", \"RF/J\", \"RHJ/LeJ\", \"RIIIS/J\", \"RSV/LeJ\", \"SB/LeJ\", \"SEA/GnJ\", \"SEC/1GnLeJ\",\n # \"SEC/1ReJ\", \"SH1/LeJ\", \"SI/Col Tyrp1 Dnahc11/J\", \"SJL/Bm\", \"SJL/J\", \"SM/J\", \"SSL/LeJ\", \"ST/bJ\",\n \"STX/Le\", ] # \"SWR/J\", \"TALLYHO/JngJ\", \"TKDU/DnJ\", \"TSJ/LeJ\", \"YBR/EiJ\", \"ZRDCT Rax<+>ChUmdJ\"]\n if tl.is_available(s)]\n wild_derived = [s for s in\n ['22MO',\n # 'BIK/g', 'BULS', 'BUSNA', 'BZO', 'CALB/RkJ', 'CASA/RkJ', 'CAST/EiJ', 'CIM', 'CKN', 'CKS',\n 'CZECHI/EiJ', 'CZECHII/EiJ', 'DCA', 'DCP', 'DDO', 'DEB', 'DGA', 'DIK', 'DJO', 'DKN', 'DMZ', 'DOT',\n # 'IS/CamRkJ', 'JF1/Ms', 'LEWES/EiJ', 'MBK', 'MBS', 'MCZ', 'MDG', 'MDGI', 'MDH', 'MGA', 'MH',\n # 'MOLD/RkJ', 'MOLF/EiJ', 'MOLG/DnJ', 'MOR/RkJ', 'MPB', 'MSM/Ms', 'PERA/EiJ', 'PERC/EiJ', 'POHN/Deh',\n # 'PWD/PhJ', 'PWK/PhJ', 'RBA/DnJ', 'RBB/DnJ', 'RBF/DnJ', 'SF/CamEiJ', 'SKIVE/EiJ', 'SOD1/EiJ',\n # 'STLT', 'STRA', 'STRB', 'STUF', 'STUP', 'STUS', 'TIRANO/EiJ', 'WLA', 'WMP', 'WSB/EiJ',\n 'ZALENDE/EiJ'] if tl.is_available(s)]\n tl.contingency_table(classical, wild_derived, '/csbiohome01/sgreens/Projects/intervals/contingency.csv')\n exit()\n x = TwoLocus(chrom_sizes=[20e6, 20e6])\n x.preprocess([\"test2.csv\"])\n x.unique_combos(['A', 'B', 'D'], ['C', 'E'])\n x.sources_at_point_pair('1', 1, '1', 10000000, ['A'])\n # x.interlocus_dependence([chr(c) for c in xrange(ord('A'), ord('J')+1)])\n # exit()\n\n x = TwoLocus(chrom_sizes=[20 * 10 ** 6, 20 * 10 ** 6])\n x.preprocess([\"test.csv\"])\n rez = x.pairwise_frequencies([\"A\"])\n\n areas = x.calculate_genomic_area(rez[0], rez[1])\n total = 0.0\n\n for combo in subspecies.iter_combos():\n print \"\\t{:15s}({:4d}):{:1.5f}\".format(subspecies.to_string(combo), combo,\n areas[str(subspecies.to_string(combo))])\n total += areas[str(subspecies.to_string(combo))]\n print \"\\t{:21s}:{:1.5f}\".format(\"Total\", total)\n\n sys.exit(1)\n # for code, combo in combos.iteritems():\n # print \"\\n\", rez[1]\n # print \"\\t{} ({}):\\n{}\".format(combo, code, rez[0][code])", "def lats(self):\n return self._origin.lat", "def lats(self):\n return self._origin.lat", "def get_state_ids(state_initials = str()):\n try:\n quality = \"20m\"\n year = \"2019\"\n area_type = \"state\"\n state_initials = state_initials.upper()\n url = f\"https://raw.githubusercontent.com/uscensusbureau/citysdk/master/v2/GeoJSON/{quality}/{year}/{area_type}.json\"\n df = gpd.read_file(url)\n df = df[['NAME', 'STUSPS', 'STATEFP', 'STATENS', 'AFFGEOID', 'GEOID']]\n df = df[df.STUSPS == state_initials]\n return df\n except Exception as err:\n print(f'An error occured. Parameter must be in string format.: {err}')", "def calc_source_blend_ang_radii(source, blend, log):\n\n log.info('\\n')\n log.info('Calculating the angular radius of the source star:')\n source.calc_stellar_ang_radius(log)\n log.info('Source angular radius (from SDSS (g-i), Boyajian+ 2014 relations) = '+str(round(source.ang_radius,4))+' +/- '+str(round(source.sig_ang_radius,4)))\n\n log.info('\\n')\n log.info('Calculating the angular radius of the blend:')\n blend.calc_stellar_ang_radius(log)\n log.info('Blend angular radius (from SDSS (g-i), Boyajian+ 2014 relations) = '+str(round(blend.ang_radius,4))+' +/- '+str(round(blend.sig_ang_radius,4)))\n\n return source, blend", "def get_roi(landmarks):\n centers = compute_centers(landmarks)\n corners = compute_corners(centers, 'face')\n return [corners[i][0] for i in range(len(corners))]", "def get_glevel_ori_agency(county_cens_file, crime_df, filename, cens_year, city_cens_file=False):\n\n \"\"\"\n 1. Append cities census file to counties census file\n \"\"\"\n national_census_df = pd.read_csv(county_cens_file)\n\n \"\"\"\n Checking for city census file coz we need to first append city census file to the bottom of county census file for 2000 and 2010.\n And city census file is passed only for 2000 and 2010 since for 1990 city and county census data is already together.\n \"\"\"\n if city_cens_file:\n cities_df = pd.read_csv(city_cens_file)\n national_census_df = national_census_df.append([cities_df])\n\n # Drop duplicates\n national_census_df = national_census_df.drop_duplicates(['STATEFP', 'place_fips'])\n national_census_df.to_csv(f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/US Crime Analytics/data/cen_00/Census_{cens_year}_Unique.csv', index=False)\n\n\n \"\"\"\n 2.\n Merge census unique files with Crime_Major_Gov_Fips to get the correct cgovtype, CNTY based on fips state, fips place. \n Also obtain ORI, Agency columns from crime file. \n \"\"\"\n national_census_df = national_census_df.merge(crime_df, on=['STATEFP', 'place_fips'], how='right')\n\n\n \"\"\"\n 3. Create final Govt_level = Govt_level_y column which has govt_level values from crime file and get rid of _x and _y columns \n \"\"\"\n national_census_df['Govt_level'] = national_census_df['Govt_level_y']\n national_census_df['CNTY'] = national_census_df['CNTY_y']\n national_census_df.drop(['Govt_level_x', 'Govt_level_y', 'CNTY_x', 'CNTY_y'], axis=1, inplace=True)\n\n \"\"\"\n Add the year column to have year for even the missing census rows for certain ORIs\n \"\"\"\n national_census_df['YEAR'] = cens_year\n\n \"\"\"\n 4. Rearrange columns so that ORI, AGENCY, Govt_level are at the beginning\n \"\"\"\n cols = list(national_census_df.columns.values)\n cols.pop(cols.index('ORI'))\n cols.pop(cols.index('AGENCY'))\n cols.pop(cols.index('Govt_level'))\n cols.pop(cols.index('CNTY'))\n cols.pop(cols.index('YEAR'))\n\n national_census_df = national_census_df[['ORI', 'AGENCY', 'Govt_level', 'CNTY', 'YEAR'] + cols]\n #national_census_df = national_census_df[['ORI', 'AGENCY', 'YEAR'] + cols]\n\n # write the final df with updated govt_level, ori, agency etc. to a csv\n national_census_df.to_csv(f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/US Crime Analytics/data/cen_00/{filename}.csv', index=False)", "def build_allyears_dict(years):\n \n #initialize Male and Female dictionaries\n years_F_dict = {}\n years_M_dict = {}\n for year in years:\n singleyear_M_dict, singleyear_F_dict = get_allnames_year(year)\n years_M_dict[year] = singleyear_M_dict\n years_F_dict[year] = singleyear_F_dict\n \n \n return (years_M_dict, years_F_dict)", "def calc_origin_days(day, month, year, installment_time_period, num_installment):\n date_arr = []\n day_num_arr = []\n start_date = datetime.datetime(year=year, month=month, day=day)\n prev_date = start_date\n day_num_arr.append(0)\n start_date_str = '{0}-{1}-{2}'.format(start_date.day, month_num_to_str_dict[start_date.month], start_date.year)\n date_arr.append(start_date_str)\n\n for idx in range(num_installment):\n days_to_incre = get_num_days_to_incre(installment_time_period, prev_date)\n new_date = prev_date + datetime.timedelta(days=days_to_incre)\n new_date_str = '{0}-{1}-{2}'.format(new_date.day, month_num_to_str_dict[new_date.month], new_date.year)\n date_arr.append(new_date_str)\n day_num_arr.append(days_to_incre)\n prev_date = new_date\n return date_arr, day_num_arr", "def get_ligand(self, ligand_chain=\"L\"):\n ligand = self.get_chain(ligand_chain)\n for line in ligand.split(\"\\n\"):\n if not \"HETATM\" in line[0:6] and not line == \"\":\n print(\"Selected chain: {}\".format(ligand_chain))\n print(\"Line:\")\n print(line)\n raise TypeError(\"The selected chain does not contain HETATM in all lines!\")\n return ligand", "def origin_list(self) -> List[Origin]:\n return self._origin_list", "def dbf_years(self):\n return [year for year in self.years if year <= 2020]", "def read_lads():\n lad_shapes = os.path.join(\n DATA_RAW, 'shapes', 'lad_uk_2016-12.shp'\n )\n\n with fiona.open(lad_shapes, 'r') as lad_shape:\n return [lad for lad in lad_shape if\n not lad['properties']['name'].startswith((\n 'E06000053',\n 'S12000027',\n 'N09000001',\n 'N09000002',\n 'N09000003',\n 'N09000004',\n 'N09000005',\n 'N09000006',\n 'N09000007',\n 'N09000008',\n 'N09000009',\n 'N09000010',\n 'N09000011',\n ))]", "def get_reference_no_from_narration(narration,validatorlist=[validate_western_code,validate_ria,validate_placid, validate_xpress,validate_moneygram, validate_prabhu_ref, validate_merchantrade_ref, validate_necmoney_ref, validate_necitaly_ref, validate_cbl_ref, swift_re]):\n ref_list = []\n for e in narration.split():\n for fn in validatorlist:\n try:\n fn(e)\n if e not in ref_list:\n ref_list.append(e)\n except ValidationError:\n pass\n return ref_list", "def get_landmarks(self):\n lm_x = [x for x, y in self.landmarks]\n lm_y = [y for x, y in self.landmarks]\n return [lm_x, lm_y]", "def get_bilan_conso_per_year(self):\n qs = self.get_cerema_cities().aggregate(\n **{f\"20{f[3:5]}\": Sum(f) / 10000 for f in Cerema.get_art_field(\"2011\", \"2020\")}\n )\n return qs", "def get_father_days(year=2020):\n days_to_countries = defaultdict(list)\n\n _parse_father_days_per_country(year,days_to_countries)\n _parse_recurring_father_days(days_to_countries)\n\n return days_to_countries", "def get_data_from_latlon(self, latitudes, longitudes, years):\n lat_idx, lon_idx = self.get_idx_from_latlon(latitudes, longitudes)\n return self.get_data_from_idx(lat_idx, lon_idx, years)", "def guess_landmark(self, obs):\n distance, direction = obs\n lm_x = self.pos_x + distance * math.cos(direction)\n lm_y = self.pos_y + distance * math.sin(direction)\n return Landmark(lm_x, lm_y)", "def import_un_locode(filepaths: list[str]):\n names = [\n 'Ch', 'LOCODE_country', 'LOCODE_city', 'Name', 'NameWoDiacritics', \n 'SubDiv', 'Function', 'Status', 'Date', 'IATA', 'Coordinates', \n 'Remarks']\n dfs = list()\n for filepath in filepaths:\n assert os.path.isfile(filepath)\n dfs.append(pd.read_csv(filepath, encoding_errors='ignore', names=names))\n \n df = pd.concat(dfs).dropna(subset=['Coordinates'])\n \n Latitude = list()\n Longitude = list()\n for lat_str, long_str in df['Coordinates'].str.split():\n sign_lat = -1 if lat_str[-1] == 'S' else 1\n sign_long = -1 if long_str[-1] == 'W' else 1\n Latitude.append(sign_lat * float(lat_str[:-1]) / 100)\n Longitude.append(sign_long * float(long_str[:-1]) / 100)\n labels = df['LOCODE_country'] + df['LOCODE_city']\n \n return (\n pd.DataFrame({'x': Longitude, 'y': Latitude, 'label': labels})\n .drop_duplicates(subset='label')\n .set_index('label')\n )", "def get_event_list(year, zipcode=80919, radius=10000):\n req = init_session()\n eventspage = req.get(\"http://www.usacycling.org/events/?zipcode=\" + str(zipcode)+\"&radius=\" + str(radius) + \"&race=&fyear=\" + str(year) + \"&rrfilter=rr\" , headers=HDRS)\n load_events()", "def get_intl_arrivals(self):\n\n # Fetch international arrival times from database.\n arrival_id_and_times = self.cursor.execute(\n 'SELECT arrivals.arrival_time, arrivals.id '\n 'FROM arrivals LEFT JOIN airports '\n 'ON arrivals.airport_code = airports.code '\n 'WHERE arrivals.code_share = \\'\\' '\n 'AND arrivals.terminal = \\'4\\' '\n 'AND airports.country != \\\"United States\\\" '\n 'AND airports.preclearance != \\\"true\\\";')\\\n .fetchall()\n\n # Dict the results and return.\n dic = {}\n for arrival in arrival_id_and_times:\n if arrival[0] in dic:\n dic[arrival[0]].append(arrival[1])\n else:\n dic[arrival[0]] = [arrival[1]]\n return dic", "def _derive_country_MX(place):\n lname = place.name.lower()\n derived = []\n match = _PARENTHETICAL.search(lname)\n if match:\n derived.append(_PARENTHETICAL.sub(\"\", lname).strip())\n derived.append(match.group(1).strip())\n\n if _MX_COLONIA.search(place.name):\n derived.append(_MX_COLONIA.sub(\"col\", lname))\n\n if _MX_DELEG.search(place.name):\n derived.append(_MX_DELEG.sub(\"delegación\", lname))\n derived.append(_MX_DELEG.sub(\"del\", lname))\n derived.append(_MX_DELEG.sub(\"deleg\", lname))\n\n if _MX_CIUDAD.search(place.name):\n derived.append(_MX_CIUDAD.sub(\"cd\", lname))\n\n alternative_names = _MX_SUPPORT[\"alternative_names\"][\"es\"]\n try:\n derived += alternative_names[lname]\n except KeyError:\n pass\n\n return [DerivedName(text, \"es\") for text in derived]", "def get_city_artif_per_year(self):\n qs = CommuneDiff.objects.filter(city__in=self.cities.all()).filter(\n year_old__gte=self.analyse_start_date, year_new__lte=self.analyse_end_date\n )\n results = collections.defaultdict(dict)\n for commune in qs:\n results[commune.city.name][commune.period] = commune.net_artif\n return results", "def detect(location):\n copyrights = []\n copyrights_extend = copyrights.extend\n authors = []\n authors_extend = authors.extend\n years = []\n years_extend = years.extend\n holders = []\n holders_extend = holders.extend\n\n for cp, auth, yr, hold, _start, _end in detect_copyrights(location):\n copyrights_extend(cp)\n authors_extend(auth)\n years_extend(yr)\n holders_extend(hold)\n return copyrights, authors, years, holders", "def fix_location(r):\n \n # all is fine: just change zipcode datatype to str\n if not np.isnan(r['zip']) and not np.isnan(r['lat']):\n return [str(int(r['zip'])), r['lng'], r['lat']]\n \n # try to locate within zipcode polygons\n if not np.isnan(r['lat']):\n query = \"\"\"\n SELECT t.geoid as zip, {} as lng, {} as lat\n FROM us_zcta5 t JOIN usps_zcta5 z ON t.geoid = z.zip\n WHERE ST_Contains(t.shape, ST_GeomFromText('POINT({} {})', 2))\n \"\"\"\n res = pd.read_sql(query.format(r['lng'], r['lat'], r['lng'], r['lat']), con = con)\n if len(res) == 1:\n return res.values[0].tolist()\n\n # use zipcode center as location proxy: geocoding is prefered in this case, but might be quite expensive\n if not np.isnan(r['zip']):\n res = zipcodes[zipcodes['zip'] == str(int(r['zip']))]\n if len(res) == 1:\n return res.values[0].tolist()[:3]\n\n return [None, None, None]", "def get_age_display(self):\n ls = []\n age = self.age.split(',')\n if len(age) > 0:\n for a in age:\n if a == 'b':\n ls.append(range(1, 19))\n elif a == 'y':\n ls.append(range(19, 37))\n elif a == 'a':\n ls.append(range(37, 57))\n elif a == 's':\n ls.append(range(57, 100))\n chained = frozenset(chain.from_iterable(ls))\n return chained", "def europe_central_asia_countries():\r\n europe_central_asia_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in europe_central_asia:\r\n europe_central_asia_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in europe_central_asia_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def arrayofyears(month1,day1,year1,month2,day2,year2):\n return (arrayofdaysmonthsyears(month1,day1,year1,month2,day2,year2))[2]", "def list_geoparams(**kwargs):\n try:\n df_list = pd.read_html('https://github.com/uscensusbureau/citysdk/blob/master/README.md')\n df_params = pd.DataFrame(df_list[2])\n df_table = df_params.replace('✔', 'Yes')\n for k in kwargs:\n year = kwargs['year']\n if year == '2013' or year == '2014' or year == '2015':\n year = '2013 - 2015'\n elif year == '2016' or year == '2017' or year == '2018' or 'year' == '2019':\n year = '2016 - 2019'\n df_table = df_table[['Geographic Area Type', year]]\n return df_table\n except Exception as err:\n print(f'An error occured. If you specified a year, please make sure it is between 1990 and 2019: {err}')", "def get_lat_lon_from_arcGIS(cities, nworkers=20):\n print(\"get city lat and long from arcGIS\")\n # if len(cities) > 1000:\n # raise Exception(\n # \"Can only fetch up to 1000 lat/long per day from arcGIS\")\n nominatum = Nominatim()\n\n @retry(5)\n def geocoder(nominatum, city):\n notfound = {\n 'City, State/Country': city, 'latitude': None, 'longitude': None}\n loc = nominatum.geocode(city)\n if not loc:\n print(\"SKIPPING %s due to %s\" % (city, \"it being unrecognized\"))\n return notfound\n elif isinstance(loc, Exception):\n print(\"SKIPPING %s due to %s\" % (city, loc))\n return notfound\n else:\n return {'City, State/Country': city,\n 'latitude': loc[1][0],\n 'longitude': loc[1][1]}\n\n _lat_lon = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=nworkers) as executor:\n for city in cities:\n executor\\\n .submit(geocoder, nominatum, city)\\\n .add_done_callback(receives_future(False)(_lat_lon.append))\n\n lat_lon = pd.DataFrame(_lat_lon)\n return lat_lon", "def get_absolute_and_relative_covid19_occurance():\n geolocation_data = load_geolocation_data()\n abs_cases = load_absolute_case_numbers()\n abs_cases_aggregated_age_groups = aggregate_absolute_cases_by_age(abs_cases)\n abs_cases_aggregated = aggregate_absolute_cases_by_lk(abs_cases_aggregated_age_groups)\n lk_information = load_landkreis_information()\n merged_data = merge_data(abs_cases_aggregated, lk_information, geolocation_data)\n return prettify_output(merged_data, columns=[\"AnzahlFall\", \"AnzahlTodesfall\", \"RelativFall\", \"RelativTodesfall\", \"geo_point_2d\"])", "def year_lookup_bounds(self, value):\n first = '%s-01-01 00:00:00'\n # SQL Server doesn't support microseconds\n last = '%s-12-31 23:59:59'\n return [first % value, last % value]", "def get_sea_level_raw(start_year, end_year, path_out):\n c = cdsapi.Client()\n\n for year in range(start_year, end_year + 1):\n\n print(f\"Starting Year: {year}\")\n\n c.retrieve(\n \"satellite-sea-level-global\",\n {\n \"format\": \"tgz\",\n \"year\": [str(year)],\n \"month\": [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"12\",\n ],\n \"day\": [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n \"15\",\n \"16\",\n \"17\",\n \"18\",\n \"19\",\n \"20\",\n \"21\",\n \"22\",\n \"23\",\n \"24\",\n \"25\",\n \"26\",\n \"27\",\n \"28\",\n \"29\",\n \"30\",\n \"31\",\n ],\n },\n os.path.join(path_out, str(year) + \"_download.tar.gz\"),\n )", "def CalcAnglesAndSlantRanges(numRxScanAngles, altitude, angles, minLookAngle):\n IA_Arr = []\n SR_Arr = []\n for i in range(numRxScanAngles):\n ia_arr = SEM.IncidenceAngle(altitude, angles[i] + minLookAngle)\n sr_arr = SEM.SlantRange(altitude, ia_arr, angles[i] + minLookAngle)\n IA_Arr.append(ia_arr)\n SR_Arr.append(sr_arr)\n return IA_Arr, SR_Arr", "def _construct_yearly_climate(self, cl, now, end):\n if self.simulation:\n years = range(now.year, end.year + 1)\n if len(years)>1:\n # the ordinals of days within the year\n lastord = float(date(now.year, 12, 31).timetuple()[7])\n noword = float(now.timetuple()[7])\n firstyearweight = (lastord - noword) / lastord\n lastord = float(date(end.year, 12, 31).timetuple()[7])\n endord = float(end.timetuple()[7])\n lastyearweight = endord / lastord\n else:\n # for steady state computation year 0 or 1 used\n years = [0]\n firstyearweight = 1.0\n lastyearweight = 1.0\n self.curr_yr_ind = 0\n rain = 0.0\n temp = 0.0\n ampl = 0.0\n addyear = True\n if len(years)==1:\n firstyearweight = 1.0\n if now.year==end.year and not(end.month==12 and end.day==31):\n addyear = False\n maxind = len(self.md.yearly_climate) - 1\n for ind in range(len(years)):\n if self.curr_yr_ind > maxind:\n self.curr_yr_ind = 0\n cy = self.md.yearly_climate[self.curr_yr_ind]\n if self.simulation and cy.timestep==0:\n # timestep 0 is used only for steady state calculation\n self.curr_yr_ind += 1\n if self.curr_yr_ind <= maxind:\n cy = self.md.yearly_climate[self.curr_yr_ind]\n if ind == 0:\n weight = firstyearweight\n passedzero = False\n elif ind == len(years) - 1:\n weight = lastyearweight\n else:\n weight = 1.0\n temp += weight * cy.mean_temperature\n rain += weight * cy.annual_rainfall\n ampl += weight * cy.variation_amplitude\n if addyear:\n self.curr_yr_ind += 1\n # backs one year back, if the last weight was less than 1\n if weight < 1.0 and addyear:\n self.curr_yr_ind -= 1\n if self.curr_yr_ind < 0:\n self.curr_yr_ind = len(self.md.yearly_climate) - 1\n cl['rain'] = rain / len(years)\n cl['temp'] = temp / len(years)\n cl['amplitude'] = ampl / len(years)\n return cl", "def getYearsOfService(person, chamber=\"\"):\n\tcongresses = []\n\n\tif chamber and chamber not in [\"House\", \"Senate\"]:\n\t\treturn []\n\n\tif chamber:\n\t\tif \"congresses_\" + chamber.lower() in person:\n\t\t\tcongresses = person[\"congresses_\" + chamber.lower()]\n\telse:\n\t\tif \"congresses\" in person:\n\t\t\tcongresses = person[\"congresses\"]\n\n\tif not congresses:\n\t\treturn []\n\n\tyearSet = [[congressToYear(congress[0], 0), congressToYear(congress[1], 1)] for congress in congresses]\n\n\tif chamber and len(yearSet) and \"voting_dates\" in person and chamber in person[\"voting_dates\"] and int(person[\"voting_dates\"][chamber][1].split(\"-\")[0]):\n\t\tif yearSet[-1][1] > int(person[\"voting_dates\"][chamber][1].split(\"-\")[0]):\n\t\t\tyearSet[-1][1] = int(person[\"voting_dates\"][chamber][1].split(\"-\")[0])\n\n\treturn yearSet", "def get_countries_by_calling_code(cls, calling_code, filters=None):\n resource = \"/callingcode\"\n return cls._get_country_list(resource, calling_code, filters=filters)", "def lons(self):\n return self._origin.lon", "def lons(self):\n return self._origin.lon", "def test_retrieve_l_organization_locations(self):\n pass", "def _getlats(self):\n lats = 90. - np.degrees(self.zeros)\n return lats", "def get_school_data(year: int) -> pd.DataFrame:\n\n # the year for the data\n\n # import the three sets of data\n ks4 = pd.read_csv(f'data/815_{year}_ks4.csv',\n usecols=(\"URN\", \"SCHNAME\", \"PTEBACC_95\", \"ATT8SCR\"), dtype={'URN': 'S75'})\n ks4['URN'] = pp.clean_urn(ks4['URN']) # clean away all the bullshit in the URN column\n census = pd.read_csv(f'data/815_{year}_census.csv', usecols=(\"URN\", \"PNUMEAL\"))\n cfr = pd.read_csv(f'data/815_{year}_cfr.csv', usecols=('URN', 'TOTALINCOME'))\n\n # merge tables\n merge = pd.merge(left=ks4, right=census, how='left', left_on='URN', right_on='URN')\n merge = pd.merge(left=merge, right=cfr, how='left', left_on='URN', right_on='URN')\n\n # populate the year column\n merge['YEAR'] = str(year)\n\n return merge", "def inflation (years, salaries):\n for i in range(len(salaries)):\n salaries[i][0] = float(salaries[i][0])/(0.368+0.025*(years[i][0]-2000))", "def scrape_all_world_cup_lineups():\n def scrape_lineups_year(year):\n urls = scrape_world_cup_scoreboard(year)\n lineups = []\n for url in urls:\n lineups.extend(scrape_fifa_lineups(url, 'FIFA World Cup'))\n return lineups\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_lineups_year(year))\n return l", "def getYearlyPrism(all_basin_geoms, fromYear, toYear):\n \n yearly_Prism = {}\n for yy in range(fromYear, toYear):\n print(\"Processing year\", yy)\n for mm in range(1, 3):\n print(\"Processing month\", mm) \n if(mm<10):\n mmyy = '0'+str(mm)+'-'+str(yy)\n else:\n mmyy = str(mm)+'-'+str(yy)\n print(mmyy)\n yearly_Prism[mmyy] = get_intersected_basins_ppt_data(all_basin_geoms, month=mm, year=yy)\n \n return yearly_Prism", "def get_age_grad(self,renew=False):\n\t\ttry:\n\t\t\tdriv_lat = self['deriv_lat'].value\n\t\t\tdriv_lon = self['deriv_lon'].value\n\t\t\tdriv_msk = self['deriv_msk'].value\n\t\texcept:\n\t\t\tself._cal_age_grad()\n\t\tderiv_lat = self['deriv_lat'].value\n\t\tderiv_lon = self['deriv_lon'].value\n\t\tderiv_msk = self['deriv_msk'].value\n\t\tage_lon_Vec = self['age_lon_Vec'].value\n\t\tage_lat_Vec = self['age_lat_Vec'].value\n\t\txx, yy = np.meshgrid(age_lon_Vec, age_lat_Vec) # xx for longitude, yy for latitude\n\t\txx = xx.reshape(xx.size)\n\t\tyy = yy.reshape(yy.size)\n\t\tf_deriv_lat = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_lat.reshape(deriv_lat.size),rescale=False)\n\t\tf_deriv_lon = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_lon.reshape(deriv_lon.size),rescale=False)\n\t\tf_deriv_msk = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_msk.reshape(deriv_msk.size),rescale=False)\n\t\tfor period in self.attrs['prd_arr']:\n\t\t\tgroup = self['%g_sec'%( period )]\n\t\t\tlons_orig = group['lonArr'].value\n\t\t\tlons = lons_orig.reshape(lons_orig.size)\n\t\t\tlats = group['latArr'].value.reshape(lons_orig.size)\n\t\t\tderiv_lat_Arr = f_deriv_lat(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tderiv_lon_Arr = f_deriv_lon(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tderiv_msk_Arr = f_deriv_msk(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tif renew:\n\t\t\t\tdel group['age_deriv_lat_Arr']\n\t\t\t\tdel group['age_deriv_lon_Arr']\n\t\t\t\tdel group['age_deriv_msk_Arr']\n\t\t\tgroup.create_dataset(name='age_deriv_lat_Arr', data=deriv_lat_Arr)\n\t\t\tgroup.create_dataset(name='age_deriv_lon_Arr', data=deriv_lon_Arr)\n\t\t\tgroup.create_dataset(name='age_deriv_msk_Arr', data=deriv_msk_Arr)\n\t\tpass", "def _ul_lr(self):\n ulx, xres, xskew, uly, yskew, yres = self.geotransform\n # Index from the end - GDal usually orders bands-first:\n lrx = ulx + (self.array.shape[-2] * xres)\n lry = uly + (self.array.shape[-1] * yres)\n return ulx, uly, lrx, lry", "def get_inclination(self, p_arr, q_arr):\n n = len(self.planets)\n inclinations = []\n p, q = p_arr, q_arr\n for j in range(n):\n inclinations.append(np.real(np.sqrt(p[j]*np.conjugate(p[j])+q[j]*np.conjugate(q[j]))))\n return np.array(inclinations)", "def _lagrange2(x, y):\n\n def P(x_ip):\n total = 0\n n = len(x)\n for i in range(0, n):\n\n def g(i, n):\n tot_mul = 1\n for j in range(0, n):\n if i == j:\n continue\n if x[i] == x[j]:\n log.fatal(\n f\"Leads to division by zero (x = {x[i]}). Identical values given in x array. \"\n \"For example by using Lagrange interpolation for precise orbit, \"\n \"check if identical observation epochs are given in SP3 file\"\n )\n tot_mul *= (x_ip - x[j]) / float(x[i] - x[j])\n return tot_mul\n\n total += y[i] * g(i, n)\n return total\n\n return P", "def year_dropdown_populator():\n start_year_unique = funding_data['start_year'].unique()\n\n year_list = []\n for i in start_year_unique:\n if i == -1:\n # print({'label': i, 'value': i})\n # NA values has been changes to -1\n year_list.append({'label': 'NA', 'value': -1})\n else:\n x = int(i)\n # print({'label': x, 'value': i})\n year_list.append({'label': i, 'value': i})\n return year_list", "def find_legislation(self, frbr_uri_prefix, fields=('field_frbr_uri',)):\n params = {\n 'filter[field_frbr_uri][value]': frbr_uri_prefix,\n 'filter[field_frbr_uri][operator]': 'STARTS_WITH'\n }\n\n if fields:\n params['fields[node--legislation]'] = ','.join(fields)\n\n resp = self.session.get(self.url + '/jsonapi/node/legislation', params=params, timeout=self.timeout)\n self.check_for_error(resp)\n info = resp.json()\n if info['data']:\n return info['data'][0]", "def _compute_asset_lifetimes(self, country_codes):\n sids = starts = ends = []\n equities_cols = self.equities.c\n futures_cols = self.futures_contracts.c\n if country_codes:\n equities_query = sa.select((\n equities_cols.sid,\n equities_cols.start_date,\n equities_cols.auto_close_date,\n )).where(\n (self.exchanges.c.exchange == equities_cols.exchange) &\n (self.exchanges.c.country_code.in_(country_codes))\n )\n futures_query = sa.select((\n futures_cols.sid,\n futures_cols.start_date,\n futures_cols.auto_close_date,\n )).where(\n (self.exchanges.c.exchange == futures_cols.exchange) &\n (self.exchanges.c.country_code.in_(country_codes))\n )\n results = equities_query.union(futures_query).execute().fetchall()\n if results:\n sids, starts, ends = zip(*results)\n\n sid = np.array(sids, dtype='i8')\n start = np.array(starts, dtype='f8')\n end = np.array(ends, dtype='f8')\n start[np.isnan(start)] = 0 # convert missing starts to 0\n end[end==np.datetime64('NaT').view('i8')] = np.iinfo(int).max # convert missing end to INTMAX\n return Lifetimes(sid, start, end)", "def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry", "def central_coords(self):\n lons=[]\n lats=[]\n for k in range(self.len_regions):\n geometry = self.regions_json['features'][k]['geometry']\n\n if geometry['type'] == 'Polygon':\n county_coords=np.array(geometry['coordinates'][0])\n elif geometry['type'] == 'MultiPolygon':\n county_coords=np.array(geometry['coordinates'][0][0])\n\n m0, M0 =county_coords[:,0].min(), county_coords[:,0].max()\n m1, M1 =county_coords[:,1].min(), county_coords[:,1].max()\n lons.append(0.5*(m0+M0))\n lats.append(0.5*(m1+M1))\n\n return lons, lats", "def _get_lcc_params(projection_object):\n\n projection_string = projection_object.srs\n words = projection_string.split()\n\n property_names = [w.split('=')[0][1:] for w in words]\n property_values = [w.split('=')[1] for w in words]\n projection_dict = dict(list(\n zip(property_names, property_values)\n ))\n\n if projection_dict['proj'] != LAMBERT_CONFORMAL_STRING:\n error_string = 'Grid projection should be \"{0:s}\", not \"{1:s}\".'.format(\n LAMBERT_CONFORMAL_STRING, projection_dict['proj']\n )\n\n raise ValueError(error_string)\n\n central_longitude_deg = float(projection_dict['lon_0'])\n standard_latitudes_deg = numpy.array([\n float(projection_dict['lat_1']), float(projection_dict['lat_2'])\n ])\n\n return standard_latitudes_deg, central_longitude_deg", "def get_school_year(data, date_col, data_path):\n\n # generate empty df with all dates between start and end\n start = data[date_col].min()\n end = data[date_col].max()\n df = pd.date_range(start, end, freq=\"D\").to_frame(index=False, name=\"date\")\n\n # read external holidays csv\n def _parser(date):\n return pd.to_datetime(date)\n\n holidays = pd.read_csv(f'{data_path}',\n parse_dates=['date_debut', 'date_fin'],\n date_parser=_parser)\n holidays = holidays[[\"annee_scolaire\", \"date_debut\", \"date_fin\"]]\n holidays = holidays.drop_duplicates()\n\n # simulate an interval based left join using pandas\n # perform a cross join on temp_key\n up_bound = \"date_fin\"\n low_bound = \"date_debut\"\n df['temp_key'] = 1\n holidays['temp_key'] = 1\n crossjoindf = pd.merge(df, holidays, on=['temp_key'])\n\n df.drop(columns=['temp_key'], inplace=True)\n crossjoindf.drop(columns=['temp_key'], inplace=True)\n \n # filter with lower_bound & upper_bound\n conditionnal_df = crossjoindf[\n (crossjoindf[\"date\"] >= crossjoindf[low_bound]) & (crossjoindf[\"date\"] <= crossjoindf[up_bound])]\n\n # merge on the main df with all cols as keys to simulate left join\n df_col = df.columns.values.tolist()\n conditionnal_df.set_index(df_col, inplace=True)\n df = df.merge(conditionnal_df, left_on=df_col, right_index=True, how='left')\n\n df.set_index('date', inplace=True) \n data = pd.merge(data, df['annee_scolaire'], on='date')\n \n return data", "def radecs_to_lb(ras, decs):\n obj = coord.SkyCoord(ras, decs, unit = \"deg\", frame = \"icrs\")\n obj = obj.galactic\n \n ls = obj.l.degree\n bs = obj.b.degree\n \n return ls, bs", "def test_leap_years_1899to1904_and_1999to2004():\n\tyears = [1899,1900,1901,1902,1903,1904,1999,2000,2001,2002,2003,2004]\n\tleaps = date_functions.is_leap( years )\n\tassert not leaps[0] # 1899 was not a leap year\n\tassert not leaps[1] # 1900 was not a leap year\n\tassert not leaps[2] # 1901 was not a leap year\n\tassert not leaps[3] # 1902 was not a leap year\n\tassert not leaps[4] # 1903 was not a leap year\n\tassert leaps[5] # 1904 was a leap year\n\tassert not leaps[6] # 1999 was not a leap year\n\tassert leaps[7] # 2000 was a leap year\n\tassert not leaps[8] # 2001 was not a leap year\n\tassert not leaps[9] # 2002 was not a leap year\n\tassert not leaps[10] # 2003 was not a leap year\n\tassert leaps[11] # 2004 was a leap year", "def calculate_rise_set_lsts(self, telescope_latitude, horizon_buffer=0.04364):\n lat_rad = telescope_latitude.rad\n buff = horizon_buffer\n\n lon, lat = self.get_lon_lat()\n\n tans = np.tan(lat_rad) * np.tan(lat.rad)\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", message=\"invalid value encountered\", category=RuntimeWarning\n )\n rise_lst = lon.rad - np.arccos((-1) * tans) - buff\n set_lst = lon.rad + np.arccos((-1) * tans) + buff\n\n rise_lst[rise_lst < 0] += 2 * np.pi\n set_lst[set_lst < 0] += 2 * np.pi\n rise_lst[rise_lst > 2 * np.pi] -= 2 * np.pi\n set_lst[set_lst > 2 * np.pi] -= 2 * np.pi\n\n self._rise_lst = rise_lst\n self._set_lst = set_lst", "def austral_year_daily(x, y):\n if isinstance(x, xr.DataArray):\n x = x.values\n \n jfmamj = x < 182.\n jasond = x >= 182.\n \n x_jasond = []\n y_jasond = []\n if any(jasond):\n x_jasond = x[jasond] - 181\n y_jasond = y[jasond]\n\n x_jfmamj = []\n y_jfmamj = []\n if any(jfmamj):\n x_jfmamj = x[jfmamj] + 184\n y_jfmamj = y[jfmamj]\n\n xout = np.concatenate([xi for xi in [x_jasond, x_jfmamj] if len(xi)])\n yout = np.concatenate([yi for yi in [y_jasond, y_jfmamj] if len(yi)])\n \n return xout, yout", "def origins(self) -> Sequence['outputs.ResourceReferenceResponse']:\n return pulumi.get(self, \"origins\")", "def LINNEAN_RANKS(cls):\n return (\n cls.KINGDOM,\n cls.PHYLUM,\n cls.CLASS,\n cls.ORDER,\n cls.FAMILY,\n cls.GENUS,\n cls.SPECIES\n )", "def test_found_all_years(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n self.assertEqual(ar.years, [2008,2009])", "def loan_data():\n return pd.read_csv(data_path / \"credit_data.csv\")", "def offices_ldap():\n conn = Connection(\"ldap.laas.fr\", auto_bind=True)\n conn.search(\n \"dc=laas,dc=fr\",\n \"(laas-mainGroup=gepetto)\",\n attributes=[\"sn\", \"givenName\", \"roomNumber\", \"st\"],\n )\n offices = Offices()\n for entry in conn.entries:\n room, gn, sn, st = (\n str(entry.roomNumber),\n str(entry.givenName),\n str(entry.sn),\n str(entry.st),\n )\n if (\n st not in [\"JAMAIS\", \"NON-PERTINENT\"]\n and date(*(int(i) for i in reversed(st.split(\"/\")))) < date.today()\n ):\n continue # filter out alumni\n if room == \"[]\":\n continue # filter out the Sans-Bureaux-Fixes\n offices[room].add(Gepettist(sn, gn))\n return offices", "def get_inflation_country():\n print(\">> Downloading WORLD BANK inflation / country data...\")\n url = source_config.inflation_data_url['latest']\n output_file = source_config.inflation_data_files['raw']['latest']\n download_insee_excel(url, output_file, check=False)", "def east_asia_pacific_countries():\r\n east_asia_pacific_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in east_asia_pacific:\r\n east_asia_pacific_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in east_asia_pacific_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians" ]
[ "0.54118204", "0.5000237", "0.4949208", "0.4931147", "0.4911661", "0.4811012", "0.47071233", "0.47071233", "0.47031814", "0.4534821", "0.45185816", "0.45088586", "0.45060778", "0.44371125", "0.4435685", "0.44336432", "0.4428876", "0.44172257", "0.44085371", "0.43731523", "0.43697885", "0.4329497", "0.43193823", "0.43125367", "0.4302499", "0.42985916", "0.42971858", "0.42867166", "0.42796117", "0.4272254", "0.42656305", "0.4263965", "0.4247813", "0.42418402", "0.42418402", "0.4237165", "0.42327294", "0.42144084", "0.421317", "0.42126164", "0.42121816", "0.42094988", "0.42021328", "0.41989055", "0.41954973", "0.4195237", "0.41896257", "0.41721243", "0.41664526", "0.41512254", "0.41491246", "0.41438055", "0.41398254", "0.41385746", "0.41382167", "0.41348445", "0.41316247", "0.41311315", "0.41233858", "0.41206938", "0.41199058", "0.4115827", "0.4109235", "0.41001377", "0.4097641", "0.40938854", "0.40871418", "0.40867564", "0.4085843", "0.40831554", "0.4073114", "0.4073114", "0.40690282", "0.4065475", "0.4057871", "0.405707", "0.40565377", "0.40538973", "0.4045215", "0.4037163", "0.4028717", "0.40266207", "0.402654", "0.4026089", "0.4023288", "0.40217242", "0.40208682", "0.40097186", "0.4006386", "0.39996436", "0.3995988", "0.39930582", "0.39917105", "0.39828712", "0.3974064", "0.39703125", "0.39696723", "0.39688122", "0.39673892", "0.3967219" ]
0.6544321
0
Return string representation of the instance.
def __str__(self): return self.branch_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_string(self):\n return self.__repr__()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85652095", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225" ]
0.0
-1
Return repr representation of the instance.
def __repr__(self): return "<Branch: %s>" % self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n items = (\"{}={}\".format(k, repr(v)) for k, v in self.__dict__.items())\n return \"<{}({})>\".format(self.__class__.__name__, \", \".join(items))", "def __repr__(self):\n return self.pretty_print(self.__dict__)", "def __str__(self):\n return repr(self)", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self):\n # type: () -> str\n return self.to_str()", "def __repr__(self):\n # type: () -> str\n return self.to_str()", "def __repr__(self):\n # type: () -> str\n return self.to_str()", "def as_string(self):\n return self.__repr__()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.8206054", "0.8206054", "0.8206054", "0.81909746", "0.8183906", "0.8177763", "0.8165392", "0.8165392", "0.8165392", "0.8165392", "0.81573534", "0.81573534", "0.81573534", "0.81428844", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096" ]
0.0
-1
Loads branch data from Derek.
def _load(self): context = { "username": self.username, "reponame": self.reponame, "name": self.name } LOG.debug("Loading %s" % self.branch_id) doc = self._client.getjson(path="/users/%(username)s/repos/%(reponame)s" "/branches/%(name)s" % context) LOG.debug("doc loaded: %r" % doc) slice_id = "%(username)s/%(reponame)s/%(slice_id)s" % { "username": self.username, "reponame": self.reponame, "slice_id": doc["slice_id"] } self._slice = self._client.slice(slice_id) self._packages = doc["packages"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self) -> None:", "def load_data(self):", "def branches(self):\r\n url = self.base_url + 'branches/'\r\n return json.loads(self.bb.load_url(url))", "def _finish_init(self):\n\n # This is usually done in set_other(), but we already set it as part of\n # the constructor.\n self.this_branch.fetch(self.other_branch,\n last_revision=self.other_basis)", "def load_tree(self, name='tree'):\n handle = self.target_connector.get_handle()\n handle.setContext(self.target_connector)\n fname = fs.path.join('.', name + '.json')\n with handle.open(fname, 'rb') as fp:\n data = fp.read()\n self.request.response.setHeader('content-type', 'application/json')\n return data", "def load_breeze(self, breeze_path):\n self.breeze = pd.read_pickle(os.path.join(self.data_path, 'breeze.pick'))", "def load_from_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.load_from_disk(file_name)", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def fetch_branches(self):\n for jrepo in self.json_repos['repos']:\n title = str(jrepo[\"title\"])\n self.branches[title] = str(jrepo['current'])", "def _load_tree(tree_pickle):\n try:\n with open(tree_pickle, 'rb') as f:\n tree = pickle.load(f)\n except Exception as e:\n logger.warning('Could not extract tree from {}: {}'\n .format(tree_pickle, e))\n tree = None\n\n return tree", "def testLoadIntegratedDrugBankData(self):\n try:\n crw = ChemRefEtlWorker(self.__cfgOb, self.__cachePath)\n crExt = ChemRefExtractor(self.__cfgOb)\n\n idD = crExt.getChemCompAccessionMapping(referenceResourceName=\"DrugBank\")\n logger.info(\"Mapping dictionary %r\", len(idD))\n #\n ok = crw.load(self.__updateId, extResource=\"DrugBank\", loadType=\"full\")\n #\n self.assertTrue(ok)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def fetch(self, remote: str, branch: str) -> str:\n self.__verify_repo_initialized()\n address = heads.get_remote_address(self._env.branchenv, name=remote)\n self._client = HangarClient(envs=self._env, address=address)\n CW = ContentWriter(self._env)\n\n with closing(self._client) as client:\n client: HangarClient\n\n # ----------------- setup / validate operations -------------------\n\n try:\n cHEAD = heads.get_branch_head_commit(self._env.branchenv, branch)\n except ValueError:\n # branch does not exist on local client\n try:\n s_branch = client.fetch_branch_record(branch)\n sHEAD = s_branch.rec.commit\n except grpc.RpcError as rpc_error:\n if rpc_error.code() == grpc.StatusCode.NOT_FOUND:\n # branch does not exist on remote\n logger.error(rpc_error.details())\n raise rpc_error\n else:\n c_bhistory = summarize.list_history(\n self._env.refenv, self._env.branchenv, branch_name=branch)\n try:\n s_branch = client.fetch_branch_record(branch)\n sHEAD = s_branch.rec.commit\n except grpc.RpcError as rpc_error:\n if rpc_error.code() == grpc.StatusCode.NOT_FOUND:\n # branch does not exist on remote\n logger.error(rpc_error.details())\n raise rpc_error\n\n # verify histories are intact and should be synced\n if sHEAD == cHEAD:\n warnings.warn(f'NoOp: {sHEAD} == client HEAD {cHEAD}', UserWarning)\n return branch\n elif sHEAD in c_bhistory['order']:\n warnings.warn(\n f'REJECTED: remote HEAD: {sHEAD} behind local: {cHEAD}', UserWarning)\n return branch\n\n # ------------------- get data ------------------------------------\n\n mCmtResponse = client.fetch_find_missing_commits(branch)\n m_cmts = mCmtResponse.commits\n for commit in tqdm(m_cmts, desc='fetching commit data refs'):\n # Get missing label (metadata) digest & values\n m_labels = set(client.fetch_find_missing_labels(commit))\n for label in m_labels:\n received_hash, labelVal = client.fetch_label(label)\n CW.label(received_hash, labelVal)\n # Get missing data schema digests & values\n mSchemaResponse = client.fetch_find_missing_schemas(commit)\n for schema in mSchemaResponse.schema_digests:\n schema_hash, schemaVal = client.fetch_schema(schema)\n CW.schema(schema_hash, schemaVal)\n # Record missing data hash digests (does not get data itself)\n m_hashes = client.fetch_find_missing_hash_records(commit)\n m_schema_hash_map = defaultdict(list)\n for digest, schema_hash in m_hashes:\n m_schema_hash_map[schema_hash].append((digest, schema_hash))\n for schema_hash, received_data in m_schema_hash_map.items():\n CW.data(schema_hash, received_data, backend='50')\n\n # Get missing commit reference specification\n for commit in tqdm(m_cmts, desc='fetching commit spec'):\n cmt, parentVal, specVal, refVal = client.fetch_commit_record(commit)\n CW.commit(cmt, parentVal, specVal, refVal)\n\n # --------------------------- At completion -----------------------\n\n # Update (or create) remote branch pointer with new HEAD commit\n fetchBranchName = f'{remote}/{branch}'\n try:\n heads.create_branch(\n self._env.branchenv, name=fetchBranchName, base_commit=sHEAD)\n except ValueError:\n heads.set_branch_head_commit(\n self._env.branchenv, branch_name=fetchBranchName, commit_hash=sHEAD)\n\n return fetchBranchName", "def load_data(self):\n raise NotImplementedError()", "def _load_state(\n self, datapath: str, dpr_model: str, pretrained_path: str, encoder_type: str\n ):\n if dpr_model == 'bert':\n state_dict = BertConversionUtils.load_bert_state(\n datapath,\n self.state_dict(),\n pretrained_dpr_path=pretrained_path,\n encoder_type=encoder_type,\n )\n self.load_state_dict(state_dict)\n elif dpr_model == 'bert_from_parlai_rag':\n state_dict = torch.load(pretrained_path, map_location='cpu')[\"model\"]\n key = f\"{encoder_type}_encoder.\"\n state_dict = {\n k.split(key)[-1]: v for k, v in state_dict.items() if key in k\n }\n self.load_state_dict(state_dict)", "def _load_tree(tree_path):\n try:\n with open(tree_path, 'rb') as f:\n tree = pickle.load(f)\n except Exception as e:\n logger.warning('Could not extract tree from {}: {}'\n .format(tree_path, e))\n tree = None\n\n return tree", "def dumbcache_load(cache_dir=r'data\\cache'):\n DUMBCACHE = os.path.join(r'..', cache_dir, r'br_store.dmp')\n with open(DUMBCACHE, 'rb') as f:\n return pkl.load(f)", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def _load_tree_pickle(self, folderpath):\n pickle_file = os.path.join(folderpath, 'grid.pickle')\n if os.path.exists(pickle_file):\n pf = open(pickle_file, \"rb\")\n self.__dict__.update(pickle.load(pf))\n else:\n raise EnvironmentError('Unable to load pickle file {}, does not exist'.format(pickle_file))\n self.tree.manager = self", "def load_and_fix(self):\n # Read in json\n self.read_json()\n\n if self.size_to_load:\n self.data = self.data[:self.size_to_load]\n\n # Add names from database given _bsn:\n self.extend_dataframe_with_personnames()\n\n # Clean rows in the data_frame where the names column is empty - > thus no response from the database\n self.clean_none_response()\n\n # Fix path from A09.pdf to A09.json\n self.fix_path()\n\n # Get the correct names from the database response\n self.parse_names_from_response()\n\n print(\" --- Final Shape Data ---\")\n print(self.data.shape)\n print(list(self.data))\n\n # Save pickled object in ./data map\n self.save_obj(self.data, self.file_name_to_save)", "def load (self, filename) :\n\t\tserialFile = open (filename, \"rb\")\n\t\tself.production_rules = pickle.load (serialFile)\n\t\tself.unitrelation = pickle.load (serialFile)\n\t\tself.labels = pickle.load (serialFile)\n\t\tself.keeper = pickle.load (serialFile)\n\t\tself.strnodes = pickle.load(serialFile)\n\t\tself.tokens = pickle.load (serialFile)\n\t\tserialFile.close()", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def get_repo_branch(self):\n # Load HEAD and find ref.\n with open('{path}HEAD'.format(path=self.workpath), 'rb') as fp:\n ref = fp.read().strip().decode().split(': ')[1]\n\n print('[+] Downloading {}'.format(ref))\n\n # Requests for head hash and save\n head_url = '{base_url}{ref}'.format(base_url=self.base_url, ref=ref)\n data = self._request(head_url).read().strip()\n\n # Save the hash inside the ref file into the target place.\n ref_path = '/'.join(ref.split('/')[:-1])\n if not os.path.exists('{path}{ref_path}'.format(path=self.workpath, ref_path=ref_path)):\n os.makedirs('{path}{ref_path}'.format(path=self.workpath, ref_path=ref_path))\n with open('{path}{ref}'.format(path=self.workpath, ref=ref), 'wb') as fp:\n fp.write(data)\n\n # After get ref->head_hash, why not share it.\n self.head_hash = data.decode()", "def _get_branches(self):\n logging.info('--- Get Branches ---')\n self.local_branches = set(self.find_branches())\n self.remote_branches = set(self.find_branches(remote=True))\n # Tags are remote branches that start with \"tags/\".\n self.tags = {\n single_branch for single_branch in self.remote_branches\n if PRX_SVNTAGS_PREFIX.match(single_branch)}", "def load(self):\n db = CrawlDBI.DBI(dbtype='crawler')\n if self.rowid is not None:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"rowid = ?\",\n data=(self.rowid,))\n else:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"path = ?\",\n data=(self.path,))\n if 0 == len(rows):\n self.in_db = False\n elif 1 == len(rows):\n self.in_db = True\n rz = list(rows[0])\n self.rowid = rz.pop(0)\n self.path = rz.pop(0)\n self.type = rz.pop(0)\n self.cos = rz.pop(0)\n self.cart = rz.pop(0)\n self.ttypes = rz.pop(0)\n self.checksum = rz.pop(0)\n self.last_check = rz.pop(0)\n try:\n self.fails = rz.pop(0)\n except IndexError:\n self.fails = 0\n try:\n self.reported = rz.pop(0)\n except IndexError:\n self.reported = 0\n self.dirty = False\n else:\n raise StandardError(\"There appears to be more than one copy \" +\n \"of %s in the database\" % self)\n\n db.close()", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def post_load(self, data):\n return data", "def load(self):", "def cma_bst(redownload: bool = False) -> Dataset:\n return Dataset.get(\"cma_bst\", redownload=redownload)", "def load_language_families():\n # If languagefamilies.pk is not available, create it. \n if not os.path.exists(ETHNO_DIR+'languagefamilies.pk'):\n lfs = get_language_families()\n with codecs.open(ETHNO_DIR+'languagefamilies.pk','wb') as fout:\n pickle.dump(lfs, fout)\n # Loads the pickled file.\n with codecs.open(ETHNO_DIR+'languagefamilies.pk','rb') as fin2: \n return pickle.load(fin2)", "async def load_state(self):\n\n\t\twith open(os.path.join(\"config\", \"leaderboards.json\"), \"r+\") as leaderboards:\n\t\t\tself.leaderboards = json.loads(leaderboards.read())", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def __init__(self, root, branches=None):\n self.tree_dict = {}\n self.directory = Path(root)\n self.start = str(self.directory).rfind(os.sep) + 1\n self.branches = branches\n self.get()", "def load(self):\n self.classifier = joblib.load(\n \"data/models/badlymappedfinder/badlymappedfinder.joblib\"\n )", "def _load_disk(self):", "def _load_disk(self):", "def get_data(binary_path=None, en_ids_path=None, fr_ids_path=None):\n data_set = None\n if binary_path is not None and os.path.exists(binary_path):\n with open(binary_path, 'rb') as binary:\n data_set = pickle.load(binary)\n else:\n assert en_ids_path is not None and fr_ids_path is not None, 'Tokenized File Not Found!'\n data_set = read_into_buckets(en_ids_path, fr_ids_path)\n return data_set", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load(self):\n\n args = self.id, self.name\n self.loader.session.logger.debug(\"loading CDR%d (%r)\", *args)\n cursor = self.loader.dictionary_cursor\n cursor.execute(self.DICTIONARY_INSERT, self.entry)\n for alias in self.aliases:\n cursor.execute(self.ALIAS_INSERT, alias)\n self.loader.dictionary_conn.commit()", "def loader(self):\n\n with open(self.source, 'rb') as labels_file:\n self.distance_map = pd.read_pickle(labels_file)\n\n return self.distance_map", "def load_settings(self):\n # config file from branch's asdf\n config_exists = os.path.isfile(self.config_path)\n\n if config_exists:\n\n config_file = open(self.config_path, 'r')\n self.config_json = json.load(config_file)\n config_file.close()\n\n else:\n raise Exception(\"Error BranchConfig: could not find config json\")\n\n\n try:\n self.branch_settings = self.config_json[self.branch]\n\n self.branch_keys = self.branch_settings.keys()\n\n for attr in self.branch_keys:\n setattr(self, attr, self.branch_settings[attr])\n\n except:\n raise Exception(\"Error BranchConfig: could not add config settings to BranchConfig\")", "def branches_merging(file_names, tree_name):\n\n branches_to_read = get_unique_branch_names(file_names, tree_name)\n branches = {}\n for file_name in file_names:\n tree = uproot.open(file_name + \":\" + tree_name)\n for branch_name in branches_to_read[file_name]:\n branches[branch_name] = tree[branch_name].array()\n\n tree_branches = { k: v for k, v in branches.items() }\n\n return tree_branches", "def load_pickle(args):\n with open(args.pickle_name, 'rb') as fh:\n datum = pickle.load(fh)\n\n df = pd.DataFrame.from_dict(datum['labels'])\n\n return df", "def load(self):\n logger.debug('Loading state from file %s', self.file_path)\n\n with open(self.file_path, 'rb') as f:\n self.data = pickle.load(f)", "def branches(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'branches')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def load_kmer_data(k):\n\thg38 = load_kmer_file(\"hg38_{}k.pkl\".format(k))\n\thiv1 = load_kmer_file(\"HIV-1_{}k.pkl\".format(k))\n\treturn (hg38, hiv1)", "def branches(self):\r\n url = '{0}/branches/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def loadData():\n project_dir = \"/home/c/chandanchowdhury/Documents/CIS-833/CSSearch/indexer/\"\n\n index_file = \"index_file.pkl\"\n link_file = \"link_file.pkl\"\n\n index_data = loadPickle(project_dir+index_file)\n link_data = loadPickle(project_dir+link_file)\n\n return index_data, link_data", "def load(self):\n cwd = os.getcwd()\n path = os.path.join(*[cwd, 'data', 'weighted_clusters',\n f\"weighted_clusters_WIJK{self.input}.dat\"])\n sys.path.append(path)\n\n with open(path, \"rb\") as f:\n unpickler = pickle.Unpickler(f)\n house_batt = unpickler.load()\n\n self.houses, self.batteries = house_batt[0], house_batt[1]", "def load_pickle_data(filename):\n path = \"../tmp/{}.pckl\".format(filename)\n if os.path.exists(path):\n print(\"LOADING PCKL FILE FROM {}\".format(path))\n f = open(path, 'rb')\n obj = pickle.load(f)\n f.close()\n return obj", "def load(self):\n return", "def load_object(self, name: str):\r\n with open_(self._path_for_pickle(name), \"rb\") as f:\r\n return dill.load(f)", "def load_from_disk(name):\n shortname = _dumpify(_compress_name(name) + '.pkl')\n print 'load_from_disk(%s)' % shortname\n pkl_file = open(shortname, 'rb')\n object = pickle.load(pkl_file)\n pkl_file.close()\n return object", "def load_data_set_from_pickle(file_name=None):\n if not file_name:\n try:\n file_name = max(glob.glob(os.path.join(__pickled_data_directory__, '*.chars74k-lite.gz')), key=os.path.getctime)\n except ValueError as e:\n log.error('Unable to load data set from file since no pickled files could be found, ')\n return None\n\n log.debug('Loading data set from file: %s' % file_name)\n return unpickle_data(file_name)", "def load(self):\n self._really_load()", "def load_pickle(filename):\n\n with open(filename, 'rb') as file:\n if filename.split('.')[-1] == 'dill':\n obj = dill.load(file)\n else:\n obj = pickle.load(file)\n return obj", "def load_data():\n with open('data.pickle', 'rb') as f:\n data = pickle.load(f)\n return data", "def load(self):\n self.classifier = joblib.load(\"data/models/repeatsfinder/repeatsfinder.joblib\")", "def read(self):\n\n if self.filename.endswith(\".pkl\"):\n logging.debug(\"Loading pickle file %s\", self.filename)\n data = pd.read_pickle(self.filename)\n\n elif self.filename.endswith(\".hdf5\"):\n logging.debug(\"Loading HDF5 file %s\", self.filename)\n with h5py.File(self.filename, \"r\") as data_file:\n\n data = pd.DataFrame(\n {\n column: data_file[\"/haloTrees/%s\" % column].value\n for column in self.columns\n }\n ).set_index(\"nodeIndex\")\n\n # with open(\"./data/cache.pkl\", \"w\") as pickle_file:\n # data.to_pickle(pickle_file)\n\n else:\n raise TypeError(\"Unknown filetype %s\" % self.filename)\n\n return data", "def _fetch_data(self):\n pass", "def download_and_load(self, data_path=None):\n if data_path is None:\n data_path = 'data'\n\n if not self.check_files(data_path + '/cifar-10-batches-py'):\n self.download_and_extract(data_path=data_path)\n\n self.load_cifar10_data(data_path=data_path + '/cifar-10-batches-py')", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def _load_data(self):\n path = os.path.join(self._cache_path, '%s.data' % self._name)\n\n if not os.path.exists(path):\n raise IOError('Data cache missing at %s' % path)\n\n f = bz2.BZ2File(path)\n data = pickle.loads(f.read())\n f.close()\n\n return data", "def load_data(file_dir='../data/', mp_id='mp_id'):\n\n with open('{file_dir}raw_data_{mp_id}.json'.format(file_dir=file_dir, mp_id=mp_id), 'r') as f:\n data = json.load(f)\n bands = data[\"band\"][\"bands\"]\n bands = np.array(bands)\n branches = data[\"band\"][\"branches\"]\n\n return bands, branches", "def __init__(self,path):\n self.path = path\n self.data = {}\n self.hasChanged = False\n #--Load\n if os.path.exists(self.path):\n ins = open(self.path)\n inData = compat.uncpickle(ins)\n self.data.update(inData)", "def load_vals_berkeley(s):\n \n download_dates = s.berk.download_dates\n download_dates_oth = s.berk.download_dates_oth\n sensor_swap_date = s.berk.sensor_swap_date\n \n dfs = {'indoor':{}}\n \n # outdoor temp\n dfs['outdoor'] = get_outdoor_data(s.berk.temps_dir,'berk')\n \n # base directory for indoor temp measurements\n indoor_temp_dir = join(s.berk.temps_dir,'indoor')\n\n print(\"Downloading temps: {}...\".format(download_dates[0]))\n \n # grab room temp data from both control and treatment rooms\n for gi,g in enumerate(['control','treatment']):\n this_df = dfs['indoor'][g] = {}\n \n ## download data from early in experiment when we were using different sensors\n this_df['partition'] = pd.read_excel(join(indoor_temp_dir,download_dates[0].strftime('%Y%m%d'),'{}_p.xls'.format(g)),\n sheet_name='Records',parse_dates=True,index_col=0).loc[:pd.to_datetime(sensor_swap_date),:]\n \n this_df['RA'] = pd.read_excel(join(indoor_temp_dir,download_dates[0].strftime('%Y%m%d'),'{}_RA.xls'.format(g)),\n sheet_name='Data Table',parse_dates=True,index_col=1,header=21).iloc[:,1].loc[:pd.to_datetime(sensor_swap_date)]\n this_df['RA'].name = 'T'\n this_df['RA'] = pd.DataFrame(this_df['RA'])\n this_df['RA']['RH'] = np.nan\n \n for loc in ['partition','RA']:\n this_df[loc].columns = ['T','RH']\n this_df[loc].index.name='time'\n \n ## now download data from sensors we switched to\n for d in download_dates[1:]:\n csv_dir = join(indoor_temp_dir,d.strftime('%Y%m%d'),'csvs')\n print(\"Downloading temps: {}...\".format(d))\n for loc in [('partition','p'),('RA','RA')]:\n fpath = join(csv_dir,'{}_{}.csv'.format(g,loc[1]))\n this_df = add_file_to_dfs(fpath, this_df, [1,2,3], ['T','RH'], loc[0], sensor_swap = sensor_swap_date)\n \n ## add individual temp/RH \n for s_ix in range(1,7):\n if isfile(join(csv_dir,'{}_{}.csv'.format(g,s_ix))):\n fpath = join(csv_dir,'{}_{}.csv'.format(g,s_ix))\n this_df = add_file_to_dfs(fpath, this_df, [1,2,3], ['T','RH'], str(s_ix))\n \n ## add operative temp\n if isfile(join(csv_dir,'{}_ot.csv'.format(g))):\n fpath = join(csv_dir, '{}_ot.csv'.format(g))\n this_df = add_file_to_dfs(fpath, this_df, [1,4], ['Top'], 'Top')\n \n ## add CO2\n for d in download_dates_oth:\n print(\"Downloading co2: {}...\".format(d))\n csv_dir = join(s.berk.other_dir,d.strftime('%Y%m%d'))\n \n # pass when the file doesn't exist (aka when\n # the CO2 sensor's batteries died\n fpath = join(csv_dir,'{}_co2.csv'.format(g))\n if not isfile(fpath):\n continue\n \n # otherwise, parse\n this_df = add_file_to_dfs(fpath, this_df, [1,4], ['co2'], 'co2')\n \n this_df = drop_duplicates_and_flags(this_df)\n \n return dfs", "def load_pokec_data(indirname):\n infilename = \"soc-pokec-relationships.txt.gz\"\n tmpdir = tempfile.mkdtemp()\n try:\n fin = gzip.open(os.path.join(indirname, infilename), 'rb')\n filename = os.path.join(tmpdir, \"soc-pokec-relationships.txt\")\n fout = open(filename, 'w')\n fout.write(fin.read())\n\tfout.close()\n G = snap.LoadEdgeList(snap.PNGraph, filename, 0, 1, '\\t')\n finally:\n cleanup_tmpdir(tmpdir)\n\n # https://snap.stanford.edu/data/soc-pokec-readme.txt\n # but 'user_id' column 0 used as dict key so not included here\n colnames = [ 'public', 'completion_percentage',\n 'gender', 'region', 'last_login', 'registration',\n 'AGE', 'body', 'I_am_working_in_field',\n 'spoken_languages', 'hobbies',\n 'I_most_enjoy_good_food', 'pets', 'body_type',\n 'my_eyesight', 'eye_color', 'hair_color',\n 'hair_type', 'completed_level_of_education',\n 'favourite_color', 'relation_to_smoking',\n 'relation_to_alcohol', 'sign_in_zodiac',\n 'on_pokec_i_am_looking_for', 'love_is_for_me',\n 'relation_to_casual_sex', 'my_partner_should_be',\n 'marital_status', 'children',\n 'relation_to_children', 'I_like_movies',\n 'I_like_watching_movie', 'I_like_music',\n 'I_mostly_like_listening_to_music',\n 'the_idea_of_good_evening',\n 'I_like_specialties_from_kitchen', 'fun',\n 'I_am_going_to_concerts', 'my_active_sports',\n 'my_passive_sports', 'profession', 'I_like_books',\n 'life_style', 'music', 'cars', 'politics',\n 'relationships', 'art_culture',\n 'hobbies_interests', 'science_technologies',\n 'computers_internet', 'education', 'sport',\n 'movies', 'travelling', 'health',\n 'companies_brands', 'more']\n profile_colnames = dict([(name, col) for (col, name) in enumerate(colnames)])\n profilepath = os.path.join(indirname, \"soc-pokec-profiles.txt.gz\")\n profiledata = [ (x[0], x[1:]) for x in csv.reader(gzip.open(profilepath, 'rb'), delimiter='\\t') ]\n profiledict = dict([(int(x[0]), x[1]) for x in profiledata])\n assert(G.GetNodes() == len(profiledict))\n return (G, profiledict, profile_colnames)", "def _load_disk(self):\r\n pass", "def load_data(self, read_shelf):\n if read_shelf:\n try:\n # Attempt reading pre-shelved objects first\n self.__read_shelf()\n except Exception as e:\n print(f'Exception while reading the data shelf ({e})')\n # Otherwise, read data from the the json files\n self.__read_json()\n else:\n self.__read_json()", "def postLoad(self):\n pass", "def load_kbs(self) -> None:\n sys.stdout.write(\"Loading KBs...\\n\")\n\n for kb_name in constants.PATHWAY_KBS:\n sys.stdout.write('\\n%s \\n' % kb_name)\n kb_path = os.path.join(self.processed_data_path, 'kb_{}.pickle'.format(kb_name))\n if os.path.exists(kb_path):\n kb = PathKB(kb_name)\n kb = kb.load_pickle(kb_name, kb_path)\n self.kbs.append(kb)", "def load_data_pickle(PATH, dataset, filename):\n with open(PATH + '/' + dataset + \"_\" + filename + \".pkl\",\"rb\") as f:\n new_data = pickle.load(f)\n\n # print(filename, \"opened\")\n return new_data", "def load(self):\n raise NotImplementedError", "def load(self):\n raise NotImplementedError", "def load_tree_from_file(fname: str = 'tree') -> dict:\r\n fid = open(fname + \".pkl\", \"rb\")\r\n return pickle.load(fid)", "def load(self, path):\n actor_state_dict = load(path + '_actor.pkl')\n critic_state_dict = load(path + '_critic.pkl')\n self.actor_net.load_state_dict(actor_state_dict)\n self.critic_net.load_state_dict(critic_state_dict)", "def _load(self):\n with qisys.sh.TempDir() as work_dir:\n pkg = portage.xpak.tbz2(self.path)\n pkg.decompose(work_dir, cleanup=0)\n arch, arch_variant = _get_pkg_arch(work_dir)\n with open(os.path.join(work_dir, 'PF'), 'r') as fpf:\n pf = fpf.readline().strip()\n name, version, revision = portage.versions.pkgsplit(pf)\n dependency = dict()\n for dep, dep_filename in _DEPENDENCY.items():\n dep_path = os.path.join(work_dir, dep_filename)\n if not os.path.exists(dep_path):\n dependency[dep] = list()\n continue\n with open(dep_path, 'r') as fdep:\n dependency[dep] = fdep.read().strip().split()\n dependency['all'] = list()\n for dep_list in _DEPENDENCY:\n dependency['all'].extend(dependency[dep_list])\n for dep, dep_list in dependency.items():\n dependency[dep] = list(set(dep_list))\n metadata = {\n 'name': name,\n 'version': version,\n 'revision': revision,\n 'arch': arch,\n 'arch_variant': arch_variant,\n 'dependencies': dependency,\n }\n self.metadata = metadata", "def __getitem__(self, path):\n if path in self._BRANCHES:\n return self._BRANCHES[path](self)\n resource = self.__load__(path)\n if resource is None:\n raise KeyError\n if isinstance(resource, dict):\n return self._child_from_dict(path, resource)\n return resource", "def loadData(dataPathFile):\r\n if dataPathFile[-3:] == 'pkl':\r\n dataBaseDict = pickle.load(open(dataPathFile, 'rb'))\r\n return dataBaseDict\r\n else:\r\n raise Exception('File that is trying to be loaded is not a pickle file\\n')", "def load_datapair(self, ds):\n raise NotImplementedError(\"Define this in your derived checker class\")", "def from_pickle(cls, path_or_bytes):\n if isinstance(path_or_bytes, bytes):\n return dill.loads(path_or_bytes)\n with open(path_or_bytes, \"rb\") as f:\n return dill.load(f)", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def _load(self):\n raise NotImplementedError()", "def loadLabeled(self):\n\n maxNumChannels = self._maxNumChannels # 4\n\n baseFilePath, ext = os.path.splitext(self.path)\n baseFilePath = baseFilePath.replace('_ch1', '')\n baseFilePath = baseFilePath.replace('_ch2', '')\n\n # load mask\n #labeledPath = dvMaskPath + '_mask.tif'\n #labeledData = tifffile.imread(labeledPath)\n\n maskFromLabelGreaterThan = 0\n\n # load labeled\n for channelIdx in range(maxNumChannels):\n channelNumber = channelIdx + 1 # for _ch1, _ch2, ...\n stackListIdx = maxNumChannels + channelIdx # for index into self._stackList\n\n chStr = '_ch' + str(channelNumber)\n labeledPath = baseFilePath + chStr + '_labeled.tif'\n maskPath = baseFilePath + chStr + '_mask.tif'\n\n # if we find _labeeled.tif, load and make a mask\n # o.w. if we find _mask.tif then load that\n if os.path.isfile(maskPath):\n print(' bStack.loadLabeled() loading _mask.tif channelNumber:', channelNumber, 'maskPath:', maskPath)\n maskData = tifffile.imread(maskPath)\n self._stackList[stackListIdx] = maskData\n elif os.path.isfile(labeledPath):\n print(' bStack.loadLabeled() loading channelNumber:', channelNumber, 'labeledPath:', labeledPath)\n labeledData = tifffile.imread(labeledPath)\n self._stackList[stackListIdx] = labeledData > maskFromLabelGreaterThan\n else:\n # did not find _mask or _labeled file\n pass\n\n # erode _mask by 1 (before skel) as skel was getting mized up with z-collisions\n #self._dvMask = bimpy.util.morphology.binary_erosion(self._dvMask, iterations=2)\n\n # bVascularTracing.loadDeepVess() uses mask to make skel", "def load(self,previous=True):\n\n\t\tincoming = pickle.load(open(self.filename,'rb'))\n\t\t#---reconstitute things that were bootstrapped\n\t\t#---we do not load spots because e.g. paths might have changed slightly in paths.yaml\n\t\tself.post = incoming.post\n\t\tself.groups = incoming.groups\n\t\tself.slices = incoming.slices\n\t\tself.vars = incoming.vars\n\t\tself.meta = incoming.meta\n\t\tself.calc = incoming.calc\n\t\tself.toc = incoming.toc\n\n\t\t#---retain the incoming workspace for comparison\n\t\tif previous: self.previous = incoming", "def load(self) -> None:\n data = get_dictionary()\n if 'error' in data:\n quit()\n self.data = data", "def _load_saved_ledger(self):\n\n with open(self.ledger_file, 'r') as ledger:\n self.log.debug('Loading blocks from local ledger!')\n i = 0\n for block_str in ledger:\n i += 1\n if self._add_block_str(block_str.strip(), False):\n self.log.info(\"Loaded block %d\", i)\n\n # After loading all blocks from file, tell our miner to continue\n self.last_update = self.latest_time\n self.mining_flag = CONTINUE_MINING", "def _post_load(self):\n pass", "def load_pkl(self, name, file_object=None):\n if file_object:\n f = file_object\n else:\n f = gzip.open(name, 'rb')\n temp = pickle.load(f)\n if temp.ht_version < HT_OLD_COMPAT_VERSION:\n raise ValueError('Version of ' + name + ' is ' + str(temp.ht_version)\n + ' which is not at least ' +\n str(HT_OLD_COMPAT_VERSION))\n # assert temp.ht_version >= HT_COMPAT_VERSION\n params = temp.params\n self.hashbits = temp.hashbits\n self.depth = temp.depth\n if hasattr(temp, 'maxtimebits'):\n self.maxtimebits = temp.maxtimebits\n else:\n self.maxtimebits = _bitsfor(temp.maxtime)\n if temp.ht_version < HT_COMPAT_VERSION:\n # Need to upgrade the database.\n print(\"Loading database version\", temp.ht_version,\n \"in compatibility mode.\")\n # Offset all the nonzero bins with one ID count.\n temp.table += np.array(1 << self.maxtimebits).astype(np.uint32) * (\n temp.table != 0)\n temp.ht_version = HT_VERSION\n self.table = temp.table\n self.ht_version = temp.ht_version\n self.counts = temp.counts\n self.names = temp.names\n self.hashesperid = np.array(temp.hashesperid).astype(np.uint32)\n self.dirty = False\n self.params = params", "def load_data(self):\n (\n self._market_status,\n self._selection_status,\n self._available_to_back,\n self._available_to_lay,\n self._traded_volume\n ) = _load_market_data(self.zip_file)\n\n self.data_is_loaded = True", "def load(self, path, model_id):\n self.load_state_dict(torch.load(os.path.join(path, '{}-retriever'.format(model_id))))", "def _read_data(self):", "def load_ptb_dataset(name='ptb', path='raw_data'):\n path = os.path.join(path, name)\n logging.info(\"Load or Download Penn TreeBank (PTB) dataset > {}\".format(path))\n\n # Maybe dowload and uncompress tar, or load exsisting files\n maybe_download_and_extract(PTB_FILENAME, path, PTB_URL, extract=True)\n\n data_path = os.path.join(path, 'simple-examples', 'data')\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id = nlp.build_vocab(nlp.read_words(train_path))\n\n train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)\n valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)\n test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)\n vocab_size = len(word_to_id)\n\n # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']\n # logging.info(train_data) # ... 214, 5, 23, 1, 2]\n # logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }\n # logging.info(vocabulary) # 10000\n # exit()\n return train_data, valid_data, test_data, vocab_size", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n dump = load_dict[\"data\"]\n self._data = pickle.loads(dump)\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n try:\n self.v_protocol = load_dict[PickleParameter.PROTOCOL]\n except KeyError:\n # For backwards compatibility\n self.v_protocol = PickleParameter._get_protocol(dump)\n\n if \"explored_data\" in load_dict:\n explore_table = load_dict[\"explored_data\"]\n\n name_col = explore_table[\"idx\"]\n\n explore_list = []\n for name_id in name_col:\n arrayname = self._build_name(name_id)\n loaded = pickle.loads(load_dict[arrayname])\n explore_list.append(loaded)\n\n self._explored_range = explore_list\n self._explored = True\n\n self._default = self._data\n self._locked = True", "def load_pkl_data(path):\n with open(path, 'rb') as fi:\n data = pickle.load(fi)\n return data" ]
[ "0.6128238", "0.592409", "0.57337046", "0.5703952", "0.56251985", "0.56168956", "0.5596647", "0.55911064", "0.5567281", "0.5439576", "0.5429689", "0.54277277", "0.5410592", "0.5379778", "0.5364775", "0.53645855", "0.53460085", "0.53446186", "0.5340837", "0.5321691", "0.53162515", "0.52969426", "0.52969426", "0.52969426", "0.52969426", "0.52903396", "0.528235", "0.5262936", "0.52548283", "0.5231266", "0.5229357", "0.521971", "0.52149576", "0.52104956", "0.5207062", "0.5205904", "0.5194503", "0.5188129", "0.5188129", "0.51828074", "0.5176955", "0.51419663", "0.5109035", "0.51068366", "0.5093475", "0.50925505", "0.50741225", "0.5068852", "0.50635093", "0.50625485", "0.506247", "0.50565904", "0.504767", "0.5044531", "0.5041689", "0.5038386", "0.50318676", "0.5029463", "0.502717", "0.5020397", "0.501606", "0.5012256", "0.500046", "0.49998355", "0.49947923", "0.49751958", "0.49648765", "0.49536824", "0.4952551", "0.4949127", "0.49466893", "0.4942953", "0.49372265", "0.49360797", "0.49346137", "0.49325013", "0.4927687", "0.4927687", "0.49269146", "0.4926703", "0.49266192", "0.49223936", "0.4915113", "0.4914168", "0.49121818", "0.4903009", "0.4903009", "0.49022296", "0.48961806", "0.48929837", "0.4891289", "0.48905748", "0.4888107", "0.48814005", "0.4875435", "0.48753023", "0.48702636", "0.48693484", "0.48692128", "0.48684376" ]
0.64722985
0
Merge packages from another branch.
def merge(self, branch): if branch.username != self.username or branch.reponame != self.reponame: raise BranchError("Branch to merge must be in the same repository") context = { "username": self.username, "reponame": self.reponame, "name": self.name } LOG.debug("Merging from %r to %r" % (branch, self)) self._client.postjson(path="/users/%(username)s/repos/%(reponame)s/" "branches/%(name)s/merge" % context, payload={"from_branch": branch.name})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(self, branch_names):\n\n self.git(\"merge\", *branch_names)", "def merge(): #Status: WIP\r\n pass", "def merge(self, ref, *args):\n return self.cmd('merge', ref, *args)", "def main():\n\n local_pkgs = set(os.listdir(GIT_FOLDER))\n local_pkgs = set([it.replace('.git', '') for it in local_pkgs])\n\n pkgdb_info = pkgdb_pkg_branch()\n\n pkgdb_pkgs = set(pkgdb_info.keys())\n\n ## Commented out as we keep the git of retired packages while they won't\n ## show up in the information retrieved from pkgdb.\n\n #if (local_pkgs - pkgdb_pkgs):\n #print 'Some packages are present locally but not on pkgdb:'\n #print ', '.join(sorted(local_pkgs - pkgdb_pkgs))\n\n if (pkgdb_pkgs - local_pkgs):\n print 'Some packages are present in pkgdb but not locally:'\n print ', '.join(sorted(pkgdb_pkgs - local_pkgs))\n\n tofix = set()\n for pkg in sorted(pkgdb_info):\n pkgdb_branches = pkgdb_info[pkg]\n git_branches = get_git_branch(pkg)\n diff = (pkgdb_branches - git_branches)\n if diff:\n print '%s missing: %s' % (pkg, ','.join(sorted(diff)))\n tofix.add(pkg)\n branch_package(pkg, diff)\n\n if tofix:\n print 'Packages fixed (%s): %s' % (\n len(tofix), ', '.join(sorted(tofix)))", "def install_branch(branch):\n\n # if it's already in the virtualenv, remove it\n ver = '.'.join(map(str,(sys.version_info.major,sys.version_info.minor)))\n sitepack = os.path.join(virtual_dir, 'lib','python'+ver, 'site-packages')\n if os.path.exists(sitepack):\n dir_list = os.listdir(sitepack)\n else:\n dir_list = []\n for f in dir_list:\n if 'statsmodels' in f:\n shutil.rmtree(os.path.join(sitepack, f))\n\n # checkout the branch\n os.chdir(gitdname)\n retcode = subprocess.call('git checkout ' + branch, shell=True)\n if retcode != 0:\n msg = \"\"\"Could not checkout out branch %s\"\"\" % branch\n raise Exception(msg)\n\n # build and install\n retcode = subprocess.call(\" \".join([virtual_python, 'setup.py', 'build']),\n shell=True)\n if retcode != 0:\n msg = \"\"\" Could not build branch %s\"\"\" % branch\n raise Exception(msg)\n retcode = subprocess.call(\" \".join([virtual_python, os.path.join(gitdname,\n 'setup.py'), 'install']), shell=True)\n if retcode != 0:\n os.chdir(dname)\n msg = \"\"\"Could not install branch %s\"\"\" % branch\n raise Exception(msg)\n os.chdir(dname)", "def pkgdb_pkg_branch():\n url = '%s/api/vcs' % PKGDB_URL\n req = requests.get(url, params={'format': 'json'})\n data = req.json()\n\n output = {}\n for pkg in data['packageAcls']:\n if pkg in output:\n if VERBOSE:\n print 'Strange package: %s, it is present twice in the ' \\\n 'pkgdb output' % pkg\n output[pkg].updated(data['packageAcls'][pkg].keys())\n else:\n output[pkg] = set(data['packageAcls'][pkg].keys())\n\n return output", "def merge():\n click.echo(\"Not implemented yet. In the future, this command will be used for merging models.\")\n sys.exit(-2)", "def svn_fs_merge(*args):\r\n return _fs.svn_fs_merge(*args)", "def __gitCommitMerge(self):\n self.vcs.gitCommitMerge(self.project.getProjectPath())", "def merge(src):\n ok, out, err = common.git_call('merge {0}'.format(src))\n return _parse_merge_output(ok, out, err)", "def sync_from_upstream(self):\n if not self.missing_branches:\n self.log(f\"All branches are synced, nothing to do here.\")\n return\n\n with tempfile.TemporaryDirectory() as tmpdir:\n src_path = Path(tmpdir) / self.deb_model.src\n self.deb_model.base.clone(cwd=tmpdir)\n for branch in self.missing_branches:\n self.log(f\"Processing branch {branch}\")\n self.deb_model.base.checkout(branch, new_branch=True, cwd=str(src_path))\n\n changelog_fn = src_path / \"debian/changelog\"\n changelog_fn_tpl = src_path / \"debian/changelog.in\"\n\n k8s_major_minor = semver.VersionInfo.parse(branch.lstrip(\"v\"))\n\n changelog_context = {\n \"deb_version\": f\"{str(k8s_major_minor)}-0\",\n }\n\n self.log(f\"Writing template vars {changelog_context}\")\n changelog_out = changelog_fn_tpl.read_text()\n changelog_out = self.render(changelog_fn_tpl, changelog_context)\n changelog_fn.write_text(changelog_out)\n\n self.log(f\"Committing {branch}\")\n self.deb_model.base.add([str(changelog_fn)], cwd=str(src_path))\n self.deb_model.base.commit(\n f\"Creating branch {branch}\", cwd=str(src_path)\n )\n self.deb_model.base.push(ref=branch, cwd=str(src_path))", "def merge(self, other_btree):\n pass", "def pull(self, repo, remote_name='github,gitee', branch='master'):\r\n repo.remotes.set_url('gitee', self.UrlGitee)\r\n repo.remotes.set_url('github', self.UrlGithub)\r\n for remote in repo.remotes:\r\n if remote.name in remote_name:\r\n AppLog.info('update from: {}'.format(remote.name))\r\n remote.fetch()\r\n remote_master_id = repo.lookup_reference(\r\n 'refs/remotes/origin/%s' % (branch)).target\r\n merge_result, _ = repo.merge_analysis(remote_master_id)\r\n # Up to date, do nothing\r\n if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:\r\n return\r\n # We can just fastforward\r\n elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:\r\n repo.checkout_tree(repo.get(remote_master_id))\r\n try:\r\n master_ref = repo.lookup_reference('refs/heads/%s' %\r\n (branch))\r\n master_ref.set_target(remote_master_id)\r\n except KeyError:\r\n repo.create_branch(branch, repo.get(remote_master_id))\r\n repo.head.set_target(remote_master_id)\r\n return\r\n elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:\r\n repo.merge(remote_master_id)\r\n\r\n if repo.index.conflicts is not None:\r\n for conflict in repo.index.conflicts:\r\n for c in conflict:\r\n if not c:\r\n continue\r\n AppLog.error('Conflicts found in: %s', c.path)\r\n raise AssertionError('Conflicts, ahhhhh!!')\r\n\r\n user = repo.default_signature\r\n tree = repo.index.write_tree()\r\n repo.create_commit('HEAD', user, user, 'Merge!', tree,\r\n [repo.head.target, remote_master_id])\r\n # We need to do this or git CLI will think we are still\r\n # merging.\r\n repo.state_cleanup()\r\n return\r\n else:\r\n raise AssertionError('Unknown merge analysis result')", "def pull_submodules():\n subprocess.call([\"git\", \"submodule\", \"init\"])\n subprocess.call([\"git\", \"submodule\", \"update\", \"--recursive\", \"--remote\"])", "def update_from_remote(remote, force=False):\n git_cmd('fetch', [remote])\n for pkg in TEST_PKGS:\n update_pkg_branches(pkg, remote, force=force)", "def _merge(self):\n raise NotImplementedError", "def git_upgraded_pkgs(self):\n\n self.extract_from_cachedir()\n self.etc_commits.added.commit()\n\n cherry_pick_sha = None\n if self.etc_commits.cherry_pick.rpaths:\n self.etc_commits.cherry_pick.commit()\n cherry_pick_sha = self.repo.git_cmd('rev-list -1 HEAD --')\n\n # Clean the working area of the files that are not under version\n # control.\n self.repo.git_cmd('clean -d -x -f')\n\n # Update the master-tmp branch with new files.\n if self.master_commits.added.rpaths:\n self.repo.checkout('master-tmp')\n for rpath in self.master_commits.added.rpaths:\n repo_file = os.path.join(self.repodir, rpath)\n if os.path.lexists(repo_file):\n warn('adding %s to the master-tmp branch but this file'\n ' already exists' % rpath)\n copy_file(rpath, self.root_dir, self.repodir,\n repo_file=repo_file)\n self.master_commits.added.commit()\n\n return cherry_pick_sha", "def merge(*args):\n from ..operators.observable.merge import merge_\n return merge_(*args)", "def merge(self, other):\n\n for n in other.cfg_nodes:\n self.insert_cfgnode(n)\n\n for ins_addr, outs in other.out_branches.items():\n if ins_addr in self.out_branches:\n for stmt_idx, item in outs.items():\n if stmt_idx in self.out_branches[ins_addr]:\n self.out_branches[ins_addr][stmt_idx].merge(item)\n else:\n self.out_branches[ins_addr][stmt_idx] = item\n\n else:\n item = next(iter(outs.values()))\n self.out_branches[ins_addr][item.stmt_idx] = item", "def merge_feature(repo, cfg, model, date):\n repo.index.merge_tree(\n repo.heads.master, base=repo.merge_base(repo.heads.master, repo.head)\n )\n kwargs = {\n **author_committer_facts(model, date),\n **dict(head=True, parent_commits=(repo.heads.master.commit, repo.head.commit)),\n }\n repo.index.commit(message_of(cfg, model.ticket, \"merge_commit_words\"), **kwargs)\n repo.heads.master.commit = repo.head.commit\n repo.head.reference = repo.heads.master\n repo.delete_head(model.ticket)\n model = groom_model(model)\n\n return repo, model", "def build(self, latest_branch_version):\n self.upstream_model.clone()\n self.upstream_model.checkout(\n ref=f\"tags/v{str(latest_branch_version)}\",\n force=True,\n cwd=self.upstream_model.name,\n )\n with tempfile.TemporaryDirectory() as tmpdir:\n self.log(f\"Building {self.deb_model.name} debian package\")\n self.deb_model.base.clone(cwd=tmpdir)\n self.deb_model.base.checkout(\n ref=f\"v{str(latest_branch_version)}\",\n force=True,\n cwd=f\"{tmpdir}/{self.deb_model.name}\",\n )\n self.bump_revision(cwd=f\"{tmpdir}/{self.deb_model.name}\")\n self.write_debversion(\n semver.VersionInfo.parse(latest_branch_version),\n src_path=Path(tmpdir) / self.deb_model.name,\n )\n cmd_ok(\n f\"cp -a {tmpdir}/{self.deb_model.name}/* {self.upstream_model.name}/.\",\n shell=True,\n )\n self.source(cwd=self.upstream_model.name)\n self.deb_model.base.add(\n [\"debian/changelog\"], cwd=f\"{tmpdir}/{self.deb_model.name}\"\n )\n self.deb_model.base.commit(\n \"Automated Build\", cwd=f\"{tmpdir}/{self.deb_model.name}\"\n )\n self.deb_model.base.push(\n ref=f\"v{str(latest_branch_version)}\",\n cwd=f\"{tmpdir}/{self.deb_model.name}\",\n )", "def __gitSubmodulesSync(self):\n self.vcs.gitSubmoduleSync(self.project.getProjectPath())", "def merge(*args):\n return _libsbml.Unit_merge(*args)", "def action_merge(branch_dir, branch_props):\n # Check branch directory is ready for being modified\n check_dir_clean(branch_dir)\n\n source_revs, phantom_revs, reflected_revs, initialized_revs = \\\n analyze_source_revs(branch_dir, opts[\"source-url\"],\n find_reflected=\n should_find_reflected(branch_dir))\n\n if opts[\"revision\"]:\n revs = RevisionSet(opts[\"revision\"])\n else:\n revs = source_revs\n\n blocked_revs = get_blocked_revs(branch_dir, opts[\"source-pathid\"])\n merged_revs = opts[\"merged-revs\"]\n\n # Show what we're doing\n if opts[\"verbose\"]: # just to avoid useless calculations\n if merged_revs & revs:\n report('\"%s\" already contains revisions %s' % (branch_dir,\n merged_revs & revs))\n if phantom_revs:\n report('memorizing phantom revision(s): %s' % phantom_revs)\n if reflected_revs:\n report('memorizing reflected revision(s): %s' % reflected_revs)\n if blocked_revs & revs:\n report('skipping blocked revisions(s): %s' % (blocked_revs & revs))\n if initialized_revs:\n report('skipping initialized revision(s): %s' % initialized_revs)\n\n # Compute final merge set.\n revs = revs - merged_revs - blocked_revs - reflected_revs - \\\n phantom_revs - initialized_revs\n if not revs:\n report('no revisions to merge, exiting')\n return\n\n # When manually marking revisions as merged, we only update the\n # integration meta data, and don't perform an actual merge.\n record_only = opts[\"record-only\"]\n\n if record_only:\n report('recording merge of revision(s) %s from \"%s\"' %\n (revs, opts[\"source-url\"]))\n else:\n report('merging in revision(s) %s from \"%s\"' %\n (revs, opts[\"source-url\"]))\n\n # Do the merge(s). Note: the starting revision number to 'svn merge'\n # is NOT inclusive so we have to subtract one from start.\n # We try to keep the number of merge operations as low as possible,\n # because it is faster and reduces the number of conflicts.\n old_block_props = get_block_props(branch_dir)\n merge_metadata = logs[opts[\"source-url\"]].merge_metadata()\n block_metadata = logs[opts[\"source-url\"]].block_metadata()\n for start,end in minimal_merge_intervals(revs, phantom_revs):\n if not record_only:\n # Preset merge/blocked properties to the source value at\n # the start rev to avoid spurious property conflicts\n set_merge_props(branch_dir, merge_metadata.get(start - 1))\n set_block_props(branch_dir, block_metadata.get(start - 1))\n # Do the merge\n svn_command(\"merge --force -r %d:%d %s %s\" % \\\n (start - 1, end, opts[\"source-url\"], branch_dir))\n # TODO: to support graph merging, add logic to merge the property\n # meta-data manually\n\n # Update the set of merged revisions.\n merged_revs = merged_revs | revs | reflected_revs | phantom_revs | initialized_revs\n branch_props[opts[\"source-pathid\"]] = str(merged_revs)\n set_merge_props(branch_dir, branch_props)\n # Reset the blocked revs\n set_block_props(branch_dir, old_block_props)\n\n # Write out commit message if desired\n if opts[\"commit-file\"]:\n f = open(opts[\"commit-file\"], \"w\")\n if record_only:\n f.write('Recorded merge of revisions %s via %s from \\n' % \\\n (revs, NAME))\n else:\n f.write('Merged revisions %s via %s from \\n' % \\\n (revs, NAME))\n f.write('%s\\n' % opts[\"source-url\"])\n if opts[\"commit-verbose\"]:\n f.write(\"\\n\")\n f.write(construct_merged_log_message(opts[\"source-url\"], revs))\n\n f.close()\n report('wrote commit message to \"%s\"' % opts[\"commit-file\"])", "def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')", "def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes", "def update_source():\n\n require('environment', provided_by=env.environments)\n with cd(env.code_root):\n sudo('git pull', user=env.deploy_user)\n sudo('git checkout %(branch)s' % env, user=env.deploy_user)", "def update_from_repo():\n\treturn", "def on_merge(self, to_be_merged, merge_result, context):\n pass", "def svn_client_merge2(char_source1, svn_opt_revision_t_revision1, char_source2, svn_opt_revision_t_revision2, char_target_wcpath, svn_boolean_t_recurse, svn_boolean_t_ignore_ancestry, svn_boolean_t_force, svn_boolean_t_dry_run, apr_array_header_t_merge_options, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def svn_client_merge(char_source1, svn_opt_revision_t_revision1, char_source2, svn_opt_revision_t_revision2, char_target_wcpath, svn_boolean_t_recurse, svn_boolean_t_ignore_ancestry, svn_boolean_t_force, svn_boolean_t_dry_run, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)", "def merge_and_cleanup(self):\n print('Checking out base branch and merging topic branch...')\n self.git.checkout(self.base_branch)\n self.git.merge('--ff-only', self.topic_branch)\n\n # Push merge and delete topic branch\n print('Pushing base branch with topic branch merged...')\n self.git.push()\n print('Deleting remote topic branch...')\n self.git.push('origin', ':{}'.format(self.topic_branch))\n\n # Optionally delete local topic branch\n if self.delete_local:\n print('Deleting local topic branch...')\n self.git.branch('-D', self.topic_branch)", "def updateBuildArea(self):\r\n\r\n self.initializeBuildArea()\r\n\r\n for obj in self.config[\"repos\"]:\r\n if \"branch\" in obj:\r\n self.gitCheckoutBranch(obj[\"path\"], obj[\"branch\"])\r\n elif \"rev\" in obj:\r\n self.gitCheckoutRevision(obj[\"path\"], obj[\"rev\"])", "def main(branch):\n try:\n # Ensure that we're in a git repository. This command is silent unless\n # you're not actually in a git repository, in which case, you receive a\n # \"Not a git repository\" error message.\n output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8')\n sys.stdout.write(output)\n except subprocess.CalledProcessError:\n # Bail if we're not in a git repository.\n return\n\n # This behavior ensures a better user experience for those that aren't\n # intimately familiar with git.\n ensure_remote_branch_is_tracked(branch)\n\n # Switch to the specified branch and update it.\n subprocess.check_call(['git', 'checkout', '--quiet', branch])\n\n # Pulling is always safe here, because we never commit to this branch.\n subprocess.check_call(['git', 'pull', '--quiet'])\n\n # Checkout the top commit in the branch, effectively going \"untracked.\"\n subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch])\n\n # Clean up the repository of Python cruft. Because we've just switched\n # branches and compiled Python files should not be version controlled,\n # there are likely leftover compiled Python files sitting on disk which may\n # confuse some tools, such as sqlalchemy-migrate.\n subprocess.check_call(['find', '.', '-name', '\"*.pyc\"', '-delete'])\n\n # For the sake of user experience, give some familiar output.\n print('Your branch is up to date with branch \\'origin/%s\\'.' % branch)", "def merge(self, merge_strategy=None, close_source_branch=None):\n self._check_if_open()\n\n if merge_strategy is not None and merge_strategy not in self.MERGE_STRATEGIES:\n raise ValueError(\"merge_strategy must be {}\".format(self.MERGE_STRATEGIES))\n\n data = {\n \"close_source_branch\": close_source_branch or self.close_source_branch,\n \"merge_strategy\": merge_strategy,\n }\n\n return self.post(\"merge\", data)", "def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()", "def __gitSubmodulesUpdate(self):\n self.vcs.gitSubmoduleUpdate(self.project.getProjectPath())", "def refresh():\n git.fetch()\n output = str(git.merge('--ff-only')).strip()\n if output != 'Already up to date.':\n print(output)\n git.fetch('--tags')", "def hxlmerge():\n run_script(hxlmerge_main)", "def update():\n call('git -C ~/norminette+ pull', shell=True)", "def checkout_latest():\n with cd(env.repo_path):\n run('git checkout %(branch)s;' % env)\n run('git pull origin %(branch)s' % env)", "def pull():\n _with_deploy_env(['git pull'])", "def mergeWith(self, others):", "def merge(self, ref):\n active = self._git.active_branch\n active_commit = self._git.active_branch.commit\n active_name = active.name\n merge_base = self._git.merge_base(active, ref)\n ref_commit = self._git.commit(ref)\n self._git.index.merge_tree(ref_commit, base=merge_base)\n merge_commit = self._git.index.commit(\n f\"Merged {ref} into {active_name}\",\n parent_commits=(active_commit, ref_commit),\n )\n self.log.error(\"MERGE %s\", merge_commit)\n self._git.active_branch.reference = merge_commit\n active.checkout()\n self._git.head.reset(index=True, working_tree=True)", "def build_test_pkg(pkg_name, branch, outdir, silent_build=False):\n LOG.info('Building package %s', pkg_name)\n if branch == 'master':\n tag_pattern = 'srcdata/%s/release/*' % pkg_name\n else:\n tag_pattern = 'srcdata/%s/%s/release/*' % (pkg_name, branch)\n\n build_branch = 'srcdata/%s/%s' % (pkg_name, branch)\n # Check for hooks\n hooks = {}\n try:\n hook_py = git_cmd('show', ['%s:.bootstrap_hooks.py' % build_branch],\n True)\n LOG.info('Loading bootstrap hooks')\n exec(hook_py, hooks, hooks)\n except GitError:\n LOG.debug('No hooks found for %s', build_branch)\n tags = git_cmd('tag', ['-l', tag_pattern], True)\n for ind, tag in enumerate(tags):\n builddir = tempfile.mkdtemp(dir='.',\n prefix='build-%s-%s_' % (pkg_name, ind))\n do_build(tag, builddir, silent_build)\n\n # Run postbuild_all hook\n if 'postbuild' in hooks:\n LOG.info('Running postbuild_all() hook for %s / %s', pkg_name, tag)\n hooks['postbuild'](builddir, tag, LOG)\n\n # Create subdirs\n orig_dir = '%s/%s' % (outdir, 'orig')\n rpm_dir = '%s/%s' % (outdir, 'rpm')\n for path in (orig_dir, rpm_dir):\n if not os.path.isdir(path):\n os.mkdir(path)\n\n for fname in glob('%s/SRPMS/*rpm' % builddir):\n LOG.debug('Copying %s -> %s', fname, outdir)\n shutil.copy(fname, outdir)\n for fname in glob('%s/RPMS/*/*rpm' % builddir):\n LOG.debug('Copying %s -> %s', fname, rpm_dir)\n shutil.copy(fname, rpm_dir)\n for fname in os.listdir('%s/SOURCES' % builddir):\n if (fnmatch(fname, 'gbp*tar.gz') or fnmatch(fname, 'gbp*tar.bz2') or\n fnmatch(fname, 'gbp*zip')):\n LOG.debug('Copying %s -> %s', fname, orig_dir)\n\n shutil.copy('%s/SOURCES/%s' % (builddir, fname), orig_dir)\n shutil.rmtree(builddir)", "def __gitMergedBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False,\n listAll=False, merged=True)", "def build_packages(self, board, args=None, **kwargs):\n self.cros_sdk('build packages',\n ['./build_packages', '--board', board],\n args, **kwargs)", "def _submodule_pull_ff(path, branch):\n slab_logger.log(15, 'Fast forward pull of all ccs-data submodules')\n # Note: Branch defaults to master in the click application\n # TODO: Do more error checking here --> after debugging, definitely\n # TODO: checkout a branch ifexists in origin only--> not replacing git\n # or setup a tracking branch if there's nothing local or fail.\n path_to_reporoot = os.path.split(path)\n path_to_reporoot = os.path.split(path_to_reporoot[0])\n path_to_reporoot = path_to_reporoot[0]\n returncode, myinfo = run_this('git submodule foreach git pull --ff-only origin %s' %\n (branch), path_to_reporoot)\n return(returncode, myinfo)", "def patch_repos(self):", "def branched_repo(tmp_path_factory):\n tmpdir = tmp_path_factory.mktemp(\"branched_repo\")\n git_repo = GitRepoFixture.create_repository(tmpdir)\n git_repo.add(\n {\n \"del_master.py\": \"original\",\n \"del_branch.py\": \"original\",\n \"del_index.py\": \"original\",\n \"del_worktree.py\": \"original\",\n \"mod_master.py\": \"original\",\n \"mod_branch.py\": \"original\",\n \"mod_both.py\": \"original\",\n \"mod_same.py\": \"original\",\n \"keep.py\": \"original\",\n },\n commit=\"Initial commit\",\n )\n branch_point = git_repo.get_hash()\n git_repo.add(\n {\n \"del_master.py\": None,\n \"add_master.py\": \"master\",\n \"mod_master.py\": \"master\",\n \"mod_both.py\": \"master\",\n \"mod_same.py\": \"same\",\n },\n commit=\"master\",\n )\n git_repo.create_branch(\"branch\", branch_point)\n git_repo.add(\n {\n \"del_branch.py\": None,\n \"mod_branch.py\": \"branch\",\n \"mod_both.py\": \"branch\",\n \"mod_same.py\": \"same\",\n },\n commit=\"branch\",\n )\n git_repo.add(\n {\"del_index.py\": None, \"add_index.py\": \"index\", \"mod_index.py\": \"index\"}\n )\n (git_repo.root / \"del_worktree.py\").unlink()\n (git_repo.root / \"add_worktree.py\").write_bytes(b\"worktree\")\n (git_repo.root / \"mod_worktree.py\").write_bytes(b\"worktree\")\n return git_repo", "def lpshipit(directory, source_branch, target_branch, mp_owner):\n lp = _get_launchpad_client()\n lp_user = lp.me\n\n print('Retrieving Merge Proposals from Launchpad...')\n person = lp.people[lp_user.name if mp_owner is None else mp_owner]\n mps = person.getMergeProposals(status=['Needs review', 'Approved'])\n mp_summaries = summarize_mps(mps)\n if mp_summaries:\n\n def urwid_exit_on_q(key):\n if key in ('q', 'Q'):\n raise urwid.ExitMainLoop()\n\n def urwid_exit_program(button):\n raise urwid.ExitMainLoop()\n\n def mp_chosen(user_args, button, chosen_mp):\n source_branch, target_branch, directory, repo, checkedout_branch =\\\n user_args['source_branch'], \\\n user_args['target_branch'], \\\n user_args['directory'], \\\n user_args['repo'], \\\n user_args['checkedout_branch']\n\n local_branches = [branch.name for branch in repo.branches]\n\n def source_branch_chosen(user_args, button, chosen_source_branch):\n chosen_mp, target_branch, directory, repo, checkedout_branch =\\\n user_args['chosen_mp'], \\\n user_args['target_branch'], \\\n user_args['directory'], \\\n user_args['repo'], \\\n user_args['checkedout_branch']\n\n def target_branch_chosen(user_args, button, target_branch):\n\n source_branch, chosen_mp, directory, repo, \\\n checkedout_branch = \\\n user_args['source_branch'], \\\n user_args['chosen_mp'], \\\n user_args['directory'], \\\n user_args['repo'], \\\n user_args['checkedout_branch']\n\n if target_branch != source_branch:\n local_git = git.Git(directory)\n\n commit_message = build_commit_msg(\n author=chosen_mp['author'],\n reviewers=\",\".join(\n chosen_mp['reviewers']),\n source_branch=source_branch,\n target_branch=target_branch,\n commit_message=chosen_mp[\n 'commit_message'],\n mp_web_link=chosen_mp['web']\n )\n\n repo.branches[target_branch].checkout()\n\n local_git.execute(\n [\"git\", \"merge\", \"--no-ff\", source_branch,\n \"-m\", commit_message])\n\n merge_summary = \"{source_branch} has been merged \" \\\n \"in to {target_branch} \\nChanges \" \\\n \"have _NOT_ been pushed\".format(\n source_branch=source_branch,\n target_branch=target_branch\n )\n\n merge_summary_listwalker = urwid.SimpleFocusListWalker(\n list())\n merge_summary_listwalker.append(\n urwid.Text(u'Merge Summary'))\n merge_summary_listwalker.append(\n urwid.Divider())\n merge_summary_listwalker.append(\n urwid.Text(merge_summary))\n merge_summary_listwalker.append(\n urwid.Divider())\n button = urwid.Button(\"Exit\")\n urwid.connect_signal(button,\n 'click',\n urwid_exit_program)\n merge_summary_listwalker.append(button)\n merge_summary_box = urwid.ListBox(\n merge_summary_listwalker)\n loop.widget = merge_summary_box\n else:\n error_text = urwid.Text('Source branch and target '\n 'branch can not be the same. '\n '\\n\\nPress Q to exit.')\n error_box = urwid.Filler(error_text, 'top')\n loop.unhandled_input = urwid_exit_on_q\n loop.widget = error_box\n\n user_args = {'chosen_mp': chosen_mp,\n 'source_branch': chosen_source_branch,\n 'directory': directory,\n 'repo': repo,\n 'checkedout_branch': checkedout_branch}\n if not target_branch:\n target_branch_listwalker = urwid.SimpleFocusListWalker(\n list())\n target_branch_listwalker.append(\n urwid.Text(u'Target Branch'))\n target_branch_listwalker.append(urwid.Divider())\n focus_counter = 1\n focus = None\n for local_branch in local_branches:\n focus_counter = focus_counter + 1\n button = urwid.Button(local_branch)\n urwid.connect_signal(button,\n 'click',\n target_branch_chosen,\n local_branch,\n user_args=[user_args])\n target_branch_listwalker.append(button)\n\n if local_branch == chosen_mp['target_branch']:\n focus = focus_counter\n if checkedout_branch \\\n and hasattr(checkedout_branch, 'name') \\\n and local_branch == checkedout_branch.name \\\n and focus is None:\n focus = focus_counter\n\n if focus:\n target_branch_listwalker.set_focus(focus)\n\n target_branch_box = urwid.ListBox(target_branch_listwalker)\n loop.widget = target_branch_box\n else:\n target_branch_chosen(user_args, None, target_branch)\n user_args = {'chosen_mp': chosen_mp,\n 'target_branch': target_branch,\n 'directory': directory,\n 'repo': repo,\n 'checkedout_branch': checkedout_branch}\n if not source_branch:\n source_branch_listwalker = urwid.SimpleFocusListWalker(list())\n source_branch_listwalker.append(urwid.Text(u'Source Branch'))\n source_branch_listwalker.append(urwid.Divider())\n focus_counter = 1\n focus = None\n for local_branch in local_branches:\n focus_counter = focus_counter + 1\n button = urwid.Button(local_branch)\n urwid.connect_signal(button, 'click',\n source_branch_chosen,\n local_branch,\n user_args=[user_args])\n source_branch_listwalker.append(button)\n if local_branch == chosen_mp['source_branch']:\n focus = focus_counter\n if checkedout_branch \\\n and hasattr(checkedout_branch, 'name') \\\n and local_branch == checkedout_branch.name \\\n and focus is None:\n focus = focus_counter\n\n if focus:\n source_branch_listwalker.set_focus(focus)\n\n source_branch_box = urwid.ListBox(source_branch_listwalker)\n loop.widget = source_branch_box\n else:\n source_branch_chosen(user_args, None, source_branch)\n\n def directory_chosen(directory):\n repo = git.Repo(directory)\n checkedout_branch = None\n try:\n checkedout_branch = repo.active_branch\n except TypeError:\n # This is OK, it more than likely means a detached HEAD\n pass\n listwalker = urwid.SimpleFocusListWalker(list())\n listwalker.append(urwid.Text(u'Merge Proposal to Merge'))\n listwalker.append(urwid.Divider())\n user_args = {'source_branch': source_branch,\n 'target_branch': target_branch,\n 'directory': directory,\n 'repo': repo,\n 'checkedout_branch': checkedout_branch\n }\n\n for mp in mp_summaries:\n button = urwid.Button(mp['summary'])\n urwid.connect_signal(button, 'click', mp_chosen, mp,\n user_args=[user_args])\n listwalker.append(button)\n mp_box = urwid.ListBox(listwalker)\n loop.unhandled_input = urwid_exit_on_q\n loop.widget = mp_box\n\n if not directory:\n class GetDirectoryBox(urwid.Filler):\n def keypress(self, size, key):\n if key != 'enter':\n return super(GetDirectoryBox, self).keypress(size, key)\n chosen_directory = directory_q.edit_text.strip()\n if chosen_directory == '':\n chosen_directory = os.getcwd()\n if os.path.isdir(chosen_directory):\n directory_chosen(chosen_directory)\n else:\n error_text = urwid.Text('{} is not a valid directory. '\n '\\n\\nPress Q to exit.'\n .format(chosen_directory))\n error_box = urwid.Filler(error_text, 'top')\n loop.unhandled_input = urwid_exit_on_q\n loop.widget = error_box\n\n directory_q = urwid.Edit(\n u\"Which directory [{current_directory}]?\\n\".format(\n current_directory=os.getcwd()\n ))\n fill = GetDirectoryBox(directory_q, 'top')\n loop = urwid.MainLoop(fill, unhandled_input=urwid_exit_on_q)\n loop.run()\n\n\n else:\n print(\"You have no Merge Proposals in either \"\n \"'Needs review' or 'Approved' state\")", "def __gitSubmodulesUpdateRemote(self):\n self.vcs.gitSubmoduleUpdate(self.project.getProjectPath(),\n remote=True)", "def git_update(c):\n c.run('git submodule update --recursive --remote')", "def pin_gitmodules(self):\n self.gitmodules_status = gitutil.save_submodule_status(self.dist_dir)", "def pull(args):\n do_all_projects(args + [\"pull\"])", "def checkout2(repo, branch, overwrite=True):\n cmd = 'git checkout %s' % (branch,)\n out = repo.issue(cmd, error='return')\n if overwrite and out is not None:\n repo._handle_overwrite_error(out)\n repo._handle_abort_merge_rebase(out)\n # Retry\n repo.issue(cmd)", "def push(self, base_repo, branch=\"master\"):\n base_repo.push_to(self, branch)", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def combine(self, other) -> None:\n assert self.linenum == other.linenum\n assert self.source == other.source\n if other.coverage >= 0:\n if self.coverage < 0:\n self.coverage = other.coverage\n else:\n self.coverage += other.coverage\n for branch in other.values():\n self.add_branch(branch)", "def pull_nightly_version(spdir):\n nightly_version = _nightly_version(spdir)\n cmd = [\"git\", \"merge\", nightly_version]\n p = subprocess.run(cmd, check=True)", "def __merge(self, year, month, day):\n print 'Merge...'\n logging.info('[merge]->Merge...')\n\n k7dir = self.aodSetting.k7_dir # path.join(baseDir, 'k7')\n mdir = self.aodSetting.merge_dir # path.join(baseDir, 'merge')\n t = datetime.datetime(year, month, day)\n\n stids = self.aodSetting.stations.getstIds()\n\n # Loop - merge k7 files for each station\n for stid in stids:\n stk7dir = path.join(\n k7dir, stid, t.strftime('%Y%m'), t.strftime('%d'))\n if not path.isdir(stk7dir):\n continue\n\n fns = glob.glob(path.join(stk7dir, '*.k7'))\n if len(fns) == 0:\n continue\n\n # check k7 and remove it if empty file\n for fn in fns:\n if path.getsize(fn) == 0:\n print 'Empty K7 [{0}] => {1} '.format(stid, fn)\n logging.info(\n '[merge]->Empty K7 [{0}] => {1}'.format(stid, fn))\n fns.remove(fn)\n\n stmdir = path.join(mdir, stid, t.strftime('%Y%m'))\n if not os.path.exists(stmdir):\n os.makedirs(stmdir)\n\n outfn = path.join(stmdir, stid + '_' +\n t.strftime('%Y%m%d') + '_merge.k7')\n spdata.merge_files(fns, outfn)\n print 'Merge [{0}] => {1}'.format(stid, outfn)\n logging.info('[merge]->Merge [{0}] => {1}'.format(stid, outfn))\n\n print 'Merge Done!'\n logging.info('[merge]->Merge Done!')", "def push_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n if opts.push:\n vprint (\"Pushing branch\", name)\n ex (\"cd $DOC_ROOT/\" + product + \" && git push origin refs/heads/\" + name,\n allow_fail=True)", "def merge_files(locale, fail_if_missing=True):\r\n for target, sources in CONFIGURATION.generate_merge.items():\r\n merge(locale, target, sources, fail_if_missing)", "def collect(self, project, branch):\n # reset class variable\n self.clear_data()\n # sync branch info\n self.branch_data_sync(project, branch)\n if self.commit_begin is '':\n # log period need to be concerned\n # if branch has never been dealt with\n period = 'HEAD'\n else:\n period = '...'.join([self.commit_begin, self.commit_end])\n project_dir = os.path.join(settings.PROJECT_DIR, project.name)\n try:\n os.chdir(project_dir)\n os.system('git checkout -q ' + branch.name)\n try:\n os.system('git pull -q ')\n except Exception, error:\n print error\n # git log command for no merges commits\n cmd_git_log = [\"git\", \"log\", \"--shortstat\", \"--no-merges\", \"-m\",\n \"--pretty=format:%h %at %aN <%aE> %s\", period]\n proc = subprocess.Popen(cmd_git_log,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n self.deal_lines(stdout.split('\\n'), 'no_merges')\n # git log command for merges commits\n cmd_git_log = [\"git\", \"log\", \"--shortstat\", \"--first-parent\",\n \"--merges\", \"-m\",\n \"--pretty=format:%h %at %aN <%aE> %s\", period]\n proc = subprocess.Popen(cmd_git_log,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n self.deal_lines(stdout.split('\\n'), 'merges')\n try:\n self.database_operate(project, branch)\n except Exception, error:\n print error\n except Exception, error:\n print error", "def _get_branches_to_merge(branch):\n branches = [(branch, branch.subfolder or '')]\n for dependency in branch.branch_dependency_ids:\n branches.append((dependency.merge_with_branch_id, dependency.merge_subfolder or ''))\n return branches[::-1]", "def github_merge(config_data):\n issues = load_issues(config_data.issues_path)\n with open(config_data.entropy_path) as entropy:\n entropy_rdr = csv.reader(entropy)\n with open(config_data.merged_path, 'w', newline='') as merge:\n merge_wrtr = csv.writer(merge)\n entropy_hdrs = next(entropy_rdr)\n issue_hdrs = [\"Created\", \"Closed\", \"Open\",\n \"Created_Avg\", \"Closed_Avg\", \"Open_Avg\"\n ]\n merge_hdrs = entropy_hdrs + issue_hdrs\n merge_wrtr.writerow(merge_hdrs)\n default_i = {}\n for k in issue_hdrs:\n default_i[k] = None\n for e_row in entropy_rdr:\n i = issues.get(e_row[0], default_i)\n i_row = [i[k] for k in issue_hdrs]\n e_row.extend(i_row)\n merge_wrtr.writerow(e_row)\n \n print(\"Generated: {0}\".format(config_data.merged_path))", "def add_all(self, top_repo_path):\n my_output = subprocess.check_output([\"git\", \"add\", \"-A\"], cwd=top_repo_path)\n return my_output", "def update_submodules(options, project_directory=None):\n pass", "def sync_debs(self, force=False):\n for ppa_name in self.ppas.names:\n ppa = self.ppas.get_ppa_by_major_minor(ppa_name)\n exclude_pre = True\n latest_deb_version = ppa.get_source_semver(self.deb_model.name)\n latest_deb_version_mmp = (\n f\"{latest_deb_version.major}.{latest_deb_version.minor}.{latest_deb_version.patch}\"\n if latest_deb_version\n else None\n )\n latest_branch_version = self.deb_model.base.latest_branch_from_major_minor(\n enums.K8S_CRI_TOOLS_SEMVER, exclude_pre\n )\n if (\n force\n or not latest_deb_version\n or semver.compare(str(latest_branch_version), latest_deb_version_mmp)\n > 0\n ):\n self.log(\n f\"Found new branch {str(latest_branch_version)} > {str(latest_deb_version_mmp)}, building new deb\"\n )\n self.build(latest_branch_version)\n self.upload(enums.DEB_K8S_TRACK_MAP.get(ppa_name))\n else:\n self.log(\n f\"> Versions match {str(latest_branch_version)} == {str(latest_deb_version_mmp)}, not building a new deb\"\n )", "def git_pull():\n\n puts(yellow(\"Pull master from GitHub\"))\n with cd(env.source_dir):\n run('git reset --hard HEAD')\n run('git pull')", "def deploy_pull_master(self, restart=True):\n self.ops.local(\"cd \"+self.local_path+\"/src && git reset --hard HEAD && git pull origin master && git submodule update\")\n PiService.deploy(self, restart)", "def package():\n call([sys.executable, \"setup.py\", \"clean\", \"--all\", \"bdist_egg\"], cwd=\"src\")\n call([sys.executable, \"setup.py\", \"clean\", \"--all\", \"bdist_wheel\"], cwd=\"src\")", "def merge(self, message: str, master_branch: str, dev_branch: str) -> str:\n self.__verify_repo_initialized()\n commit_hash = select_merge_algorithm(\n message=message,\n branchenv=self._env.branchenv,\n stageenv=self._env.stageenv,\n refenv=self._env.refenv,\n stagehashenv=self._env.stagehashenv,\n master_branch=master_branch,\n dev_branch=dev_branch,\n repo_path=self._repo_path)\n\n return commit_hash", "def update_code_from_git():\n if not files.exists(REMOTE_REPO_DIR):\n with cd(HOME_DIR):\n run(\"git clone %s\" % MAIN_GITHUB_REP )\n with cd(REMOTE_REPO_DIR):\n run(\"git pull\")", "def svn_client_merge_peg2(char_source, svn_opt_revision_t_revision1, svn_opt_revision_t_revision2, svn_opt_revision_t_peg_revision, char_target_wcpath, svn_boolean_t_recurse, svn_boolean_t_ignore_ancestry, svn_boolean_t_force, svn_boolean_t_dry_run, apr_array_header_t_merge_options, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def pull(self, remote, branch, *args):\n return self.cmd('pull', remote, branch, *args)", "def sync_debs(self, force=False):\n for ppa_name in self.ppas.names:\n ppa = self.ppas.get_ppa_by_major_minor(ppa_name)\n exclude_pre = True\n latest_deb_version = ppa.get_source_semver(self.deb_model.name)\n latest_deb_version_mmp = (\n f\"{latest_deb_version.major}.{latest_deb_version.minor}.{latest_deb_version.patch}\"\n if latest_deb_version\n else None\n )\n latest_branch_version = self.deb_model.base.latest_branch_from_major_minor(\n enums.K8S_CNI_SEMVER, exclude_pre\n )\n if (\n force\n or not latest_deb_version\n or semver.compare(str(latest_branch_version), latest_deb_version_mmp)\n > 0\n ):\n self.log(\n f\"Found new branch {str(latest_branch_version)} > {str(latest_deb_version_mmp)}, building new deb\"\n )\n self.build(latest_branch_version)\n self.upload(enums.DEB_K8S_TRACK_MAP.get(ppa_name))\n else:\n self.log(\n f\"> Versions match {str(latest_branch_version)} == {str(latest_deb_version_mmp)}, not building a new deb\"\n )", "def fetch_pkgbuild(self):\n\n package_dir = os.path.join(Package.cache_dir, self.pkgbase)\n\n # check if repo has ever been fetched\n if os.path.isdir(package_dir):\n if run([\"git\", \"fetch\"], cwd=package_dir).returncode != 0:\n logging.error(\"git fetch failed in directory {}\".format(package_dir))\n raise ConnectionProblem(\"git fetch failed in directory {}\".format(package_dir))\n\n head = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, universal_newlines=True, cwd=package_dir\n ).stdout.strip()\n u = run(\n [\"git\", \"rev-parse\", \"@{u}\"], stdout=PIPE, universal_newlines=True, cwd=package_dir\n ).stdout.strip()\n\n # if new sources available\n if head != u:\n reset_return = run(\n [\"git\", \"reset\", \"--hard\", \"HEAD\"],\n stdout=DEVNULL, stderr=PIPE, cwd=package_dir, universal_newlines=True\n )\n if reset_return.returncode != 0:\n print(reset_return.stderr)\n logging.error(\"git reset failed in directory {}\".format(package_dir))\n raise InvalidInput(\"git reset failed in directory {}\".format(package_dir))\n\n pull_return = run(\n [\"git\", \"pull\"],\n stdout=DEVNULL, stderr=PIPE, cwd=package_dir, universal_newlines=True\n )\n if pull_return.returncode != 0:\n print(pull_return.stderr)\n logging.error(\"git pull failed in directory {}\".format(package_dir))\n raise ConnectionProblem(\"git pull failed in directory {}\".format(package_dir))\n\n # repo has never been fetched\n else:\n # create package dir\n try:\n os.makedirs(package_dir, mode=0o700, exist_ok=True)\n except OSError:\n logging.error(\"Creating package dir {} failed\".format(package_dir))\n raise InvalidInput(\"Creating package dir {} failed\".format(package_dir))\n\n # clone repo\n if run(\n [\"git\", \"clone\", \"{}/{}.git\".format(AurVars.aur_domain, self.pkgbase)],\n cwd=Package.cache_dir\n ).returncode != 0:\n logging.error(\"Cloning repo of {} failed in directory {}\".format(self.name, package_dir))\n raise ConnectionProblem(\"Cloning repo of {} failed in directory {}\".format(self.name, package_dir))", "def update_code_from_git():\n if not files.exists(CODE_DIR):\n with cd(HOME_DIR):\n run(\"git clone %s\" % MAIN_GITHUB_REP )\n\n with cd(CODE_DIR):\n git_pull()", "def merge(self, other):\n merged = copy.deepcopy(self.__dict__())\n for k, v in other.__dict__():\n if k in merged and getattr(self, k):\n if isinstance(v, (string_types, bool)):\n pass\n else:\n list_of_stuff = merged.get(k, [])\n for entry in v:\n if entry not in list_of_stuff:\n list_of_stuff.append(entry)\n merged[k] = list_of_stuff\n else:\n merged[k] = v\n return CondaEnvironmentProvider(**merged)", "def __gitBundleFetch(self):\n self.vcs.gitBundleFetch(self.project.getProjectPath())", "def update():\n require('PROJECT_NAME')\n\n with cd(utils.home('apps', env.PROJECT_NAME)):\n run('hg pull')\n run('hg up')", "def branch(branch_name):\n env.branch = branch_name", "def branch(branch_name):\n env.branch = branch_name", "def Unit_merge(*args):\n return _libsbml.Unit_merge(*args)", "def git_fix_detached(c, message):\n c.run('git submodule update')\n c.run('git submodule foreach git checkout master')\n c.run('git submodule foreach git pull origin master')", "def _merge_dependencies(self, args):\n for analysis, _, _ in args:\n path = os.path.normcase(os.path.abspath(analysis.scripts[-1][1]))\n path = path.replace(self._common_prefix, \"\", 1)\n path = os.path.splitext(path)[0]\n if os.path.normcase(path) in self._id_to_path:\n path = self._id_to_path[os.path.normcase(path)]\n self._set_dependencies(analysis, path)", "def pull(ctx, path_base):\n with ctx.cd(path_base):\n ctx.run('git reset --hard')\n ctx.run('git pull origin master')", "def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))", "def merge(a,b):\n c = a.copy()\n c.update(b)\n return c", "def main():\n try:\n merge_envs(parse_args())\n except MergeError:\n return 1", "def upload_packages(self, packages):\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n\n filepaths = [os.path.join(os.path.dirname(path), pfile['filename'])\n for path in packages\n for pfile in deb_changes(path)['files']]\n filepaths.extend(packages)\n\n # get upload token\n resp = self._client.postjson(path=\"/users/%(username)s/\"\n \"repos/%(reponame)s/\"\n \"branches/%(name)s/get_upload_token\" %\n context)\n token = resp['utoken']\n for pfile in filepaths:\n self._client.upload(path=\"/upload/%s/send/%s\" %\n (token, os.path.basename(pfile)),\n filepath=pfile)\n self._client.post(path=\"/upload/%s/dput\" % token)", "def abort_merge():\n common.safe_git_call('merge --abort')", "def packages():", "def checkout_chibios():\n chibios = ('chibios', CHIBIOS_GIT_URL, CHIBIOS_GIT_BRANCH)\n chibios_contrib = ('chibios-contrib', CHIBIOS_CONTRIB_GIT_URL, CHIBIOS_CONTRIB_GIT_BRANCH)\n\n os.chdir('qmk_firmware/lib')\n\n for submodule, git_url, git_branch in chibios, chibios_contrib:\n if exists(submodule):\n rmtree(submodule)\n\n if not fetch_source(submodule):\n git_clone(git_url, git_branch)\n\n os.chdir('../..')", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def update(pkg_name):\n\n vendor_file = os.path.join('vendor', 'vendor.json')\n target = 'golang.org/x/{}'.format(pkg_name)\n\n with open(vendor_file) as content:\n deps = json.load(content)\n packages = [dep['path'] for dep in deps['package'] if dep['path'].startswith(target)]\n revision = '@{revision}'.format(revision=args.revision) if args.revision else ''\n packages = ['{pkg}{revision}'.format(pkg=pkg, revision=revision) for pkg in packages]\n cmd = ['govendor', 'fetch'] + packages\n if args.verbose:\n print(' '.join(cmd))\n subprocess.check_call(cmd)", "def merge(self, key, brk):\n brk = types.maybe_get_cls(brk, Broker)\n key = types.maybe_get_cls(key, str)\n classes = brk.classes\n for cls in classes:\n mfr = brk.get_manufacturer(cls)\n self.merge_mfr(key, mfr)", "def svn_client_merge_peg(char_source, svn_opt_revision_t_revision1, svn_opt_revision_t_revision2, svn_opt_revision_t_peg_revision, char_target_wcpath, svn_boolean_t_recurse, svn_boolean_t_ignore_ancestry, svn_boolean_t_force, svn_boolean_t_dry_run, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass" ]
[ "0.65263903", "0.6383677", "0.5910188", "0.5779606", "0.5774576", "0.5738681", "0.57243484", "0.5715652", "0.56322825", "0.56101876", "0.5508702", "0.54770225", "0.5444733", "0.5418547", "0.5407719", "0.5407596", "0.5389976", "0.53783303", "0.5357206", "0.5333331", "0.5327569", "0.53160965", "0.5313933", "0.53054905", "0.52926755", "0.52884126", "0.5287647", "0.5287378", "0.5273752", "0.52681375", "0.5253491", "0.5213683", "0.51999044", "0.5198055", "0.5197601", "0.5178182", "0.5155429", "0.51520395", "0.51184464", "0.5113989", "0.51020956", "0.5097892", "0.5091865", "0.5087724", "0.5078055", "0.5075163", "0.5073691", "0.5063829", "0.50617015", "0.50399894", "0.50360894", "0.50284827", "0.5027589", "0.50272167", "0.50195247", "0.5019296", "0.5018894", "0.50038826", "0.49885863", "0.49745765", "0.49701482", "0.49675852", "0.496148", "0.49491656", "0.49268594", "0.4919667", "0.49089983", "0.49058646", "0.48942232", "0.4891112", "0.488954", "0.48828655", "0.4871141", "0.4866412", "0.48652396", "0.48643643", "0.48589718", "0.48550922", "0.48532167", "0.4843797", "0.48397714", "0.482923", "0.48272318", "0.48251054", "0.48251054", "0.48249736", "0.4819747", "0.48142043", "0.48100147", "0.4807163", "0.48037755", "0.47980765", "0.4797575", "0.47913206", "0.47880676", "0.47868726", "0.47720313", "0.47692758", "0.47683957", "0.47595015" ]
0.61638653
2
Upload packages to branch.
def upload_packages(self, packages): context = { "username": self.username, "reponame": self.reponame, "name": self.name } filepaths = [os.path.join(os.path.dirname(path), pfile['filename']) for path in packages for pfile in deb_changes(path)['files']] filepaths.extend(packages) # get upload token resp = self._client.postjson(path="/users/%(username)s/" "repos/%(reponame)s/" "branches/%(name)s/get_upload_token" % context) token = resp['utoken'] for pfile in filepaths: self._client.upload(path="/upload/%s/send/%s" % (token, os.path.basename(pfile)), filepath=pfile) self._client.post(path="/upload/%s/dput" % token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_package(self, __contents):\n raise NotImplementedError", "def deploy():\n build()\n collect()\n commit()\n push()", "def push(self, base_repo, branch=\"master\"):\n base_repo.push_to(self, branch)", "def push(self):\n out, err, code = self.command( [\"git\", \"push\"], self.directory )", "def upload(self, connection):\n if not self.already_deployed(connection):\n if self.config.project_type == \"java\":\n print(blue('Pushing jar to nexus server'))\n connection.local('mvn deploy')\n self._already_deployed = True\n else:\n raise Exception(f\"Unsupported project type: {self.config.project_type}\")", "def upload():\n sh('python setup.py register sdist upload')", "def __gitPush(self):\n self.vcs.gitPush(self.project.getProjectPath())", "def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def git_push(c):\n c.run(\"git submodule foreach git push \")", "def push_updates():\n check_call(['git', 'push', '--tags', '--force'])", "def deploy():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n with settings(warn_only=True):\n maintenance_up()\n \n checkout_latest()\n gzip_assets()\n deploy_to_s3()\n maintenance_down()", "def upload():\n\n # Our credentials are only available from within the main repository and not forks.\n # We need to prevent uploads from all BUT the branches in the main repository.\n # Pull requests and master-branches of forks are not allowed to upload.\n is_pull_request = (\n (\"TRAVIS_PULL_REQUEST\" in os.environ and os.environ[\"TRAVIS_PULL_REQUEST\"] != \"false\") or\n \"APPVEYOR_PULL_REQUEST_NUMBER\" in os.environ\n )\n if is_pull_request:\n click.echo(\"Refusing to upload artifacts from a pull request!\")\n return\n\n if \"AWS_ACCESS_KEY_ID\" in os.environ:\n subprocess.check_call([\n \"aws\", \"s3\", \"cp\",\n \"--acl\", \"public-read\",\n DIST_DIR + \"/\",\n \"s3://snapshots.mitmproxy.org/{}/\".format(UPLOAD_DIR),\n \"--recursive\",\n ])\n\n upload_pypi = (\n TAG and\n \"WHEEL\" in os.environ and\n \"TWINE_USERNAME\" in os.environ and\n \"TWINE_PASSWORD\" in os.environ\n )\n if upload_pypi:\n whl = glob.glob(join(DIST_DIR, 'mitmproxy-*-py3-none-any.whl'))[0]\n click.echo(\"Uploading {} to PyPi...\".format(whl))\n subprocess.check_call([\n \"twine\",\n \"upload\",\n whl\n ])\n\n upload_docker = (\n (TAG or BRANCH == \"master\") and\n \"DOCKER\" in os.environ and\n \"DOCKER_USERNAME\" in os.environ and\n \"DOCKER_PASSWORD\" in os.environ\n )\n if upload_docker:\n docker_tag = \"dev\" if BRANCH == \"master\" else VERSION\n\n click.echo(\"Uploading Docker image to tag={}...\".format(docker_tag))\n subprocess.check_call([\n \"docker\",\n \"login\",\n \"-u\", os.environ[\"DOCKER_USERNAME\"],\n \"-p\", os.environ[\"DOCKER_PASSWORD\"],\n ])\n subprocess.check_call([\n \"docker\",\n \"push\",\n \"mitmproxy/mitmproxy:{}\".format(docker_tag),\n ])", "def install():\n execute(generate)\n execute(upload)", "def upload_package(conn, module, remotepath = None, chunk_size = 16000):\n if remotepath is None:\n site = conn.modules[\"distutils.sysconfig\"].get_python_lib()\n remotepath = conn.modules.os.path.join(site, module.__name__)\n localpath = os.path.dirname(inspect.getsourcefile(module))\n upload(conn, localpath, remotepath, chunk_size = chunk_size)", "def push ():\n\n tagname = get_tag (comp_versions, 'ACE')\n\n if opts.push:\n if opts.take_action:\n vprint (\"Pushing ACE_TAO\", opts.ace_tao_branch, \"to origin\")\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin \" + opts.ace_tao_branch)\n\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin tag \" + tagname)\n\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n ex (\"cd $DOC_ROOT/MPC && git push origin tag \" + tagname)\n\n # Push release branches\n latest_branch_helper (push_latest_branch, opts.release_type)\n else:\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n print (\"Pushing tags:\\n\")\n print (\"Pushing tag \" + tagname + \"\\n\")", "def upload_tar_from_git():\n require(\"release\", provided_by=[deploy])\n tree = prompt(\"Please enter a branch or SHA1 to deploy\", default=\"master\")\n local(\"git archive --format=tar %s | gzip > %s.tar.gz\" % (tree, env['release']))\n sudo(\"mkdir %(path)s/releases/%(release)s\" % env)\n put(\"%(release)s.tar.gz\" % env, \"%(path)s/packages/\" % env, use_sudo=True)\n sudo(\"cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz\" % env)\n local(\"rm %(release)s.tar.gz\" % env)", "def prepare_deploy(ticket=None, msg=None, branch=None):\n test()\n commit(ticket, msg)\n push(branch)\n pull(branch)", "def _push(self):\n push_cmds = self.vcs.push_commands()\n if not push_cmds:\n return\n if utils.ask(\"OK to push commits to the server?\"):\n for push_cmd in push_cmds:\n output = execute_command(push_cmd)\n logger.info(output)", "def upload(ctx, release, rebuild, version):\n\n dist_path = Path(DIST_PATH)\n if rebuild is False:\n if not dist_path.exists() or not list(dist_path.glob('*')):\n print(\"No distribution files found. Please run 'build' command first\")\n return\n else:\n ctx.invoke(build, force=True, version=version)\n\n if release:\n args = ['twine', 'upload', 'dist/*']\n else:\n repository = 'https://test.pypi.org/legacy/'\n args = ['twine', 'upload', '--repository-url', repository, 'dist/*']\n\n env = os.environ.copy()\n\n p = subprocess.Popen(args, env=env)\n p.wait()", "def upload(bld):\n\n with bld.create_virtualenv() as venv:\n venv.run(\"python -m pip install twine\")\n\n wheel = _find_wheel(ctx=bld)\n\n venv.run(\"python -m twine upload {}\".format(wheel))", "def prepare_deploy():\n from fabdeploy.django import test as django_test\n django_test()\n git.add_commit_pull()\n git.push()", "def upload_wheels():\n build()\n sh(\"%s -m twine upload dist/*.whl\" % PYTHON)", "def push(ctx):\n dufl_root = ctx.obj['dufl_root']\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n git.run('push', 'origin', git.working_branch())", "def upload(ctx: click.Context, **kwargs):\n root_commands.cmd_upload(ctx.obj, **kwargs)", "def push_sources():\n ensure_src_dir()\n push_rev = getattr(env, 'push_rev', None)\n if push_rev is None:\n push_rev = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n local(\"git tag -a {0} -m \\\"Tagged for release\\\"\".format(push_rev))\n local(\"git push origin master --tags\")\n\n with cd(SRC_DIR):\n run(\"git pull origin master\")\n run(\"git fetch -t\")\n run(\"git checkout {0}\".format(push_rev))", "def upload_release_files():\n version = get_release_version()\n target = sf_files + sourceforge_target_dir(version)\n\n print()\n print(\"Uploading release files...\")\n print(\" Source:\", release_path)\n print(\" Target: \" + target)\n print(\" Files: \" + ', '.join(glob.glob('*')))\n print()\n call_rsync(\n username,\n \"\",\n path.join(release_path, \"*\"),\n target\n )\n print()", "def push():\n local('hg push jvacx')", "def deploy():\n build()\n copy()\n install()", "def push(self, remote, branch, *args):\n return self.cmd('push', remote, branch, *args)", "def push_mockups():\n local('cd ../../cts-ui && grunt')\n local('cp ../../cts-ui/mockups/css/*.css ../../mockups/cts-ui/css/.')\n local('cp -R ../../cts-ui/mockups/css/bootstrap ../../mockups/cts-ui/css/bootstrap')\n local('cp -R ../../cts-ui/mockups/img ../../mockups/cts-ui/img')\n local('cp ../../cts-ui/mockups/*.html ../../mockups/cts-ui/.')\n local('cd ../../mockups/cts-ui && git add *.html')\n local('cd ../../mockups/cts-ui/css && git add *.css')\n local('cd ../../mockups/cts-ui/css && git add bootstrap/*')\n local('cd ../../mockups/cts-ui && git add img/*')\n local('cd ../../mockups && git commit -am \"New cts-ui mockups [fabfile]\"')\n local('cd ../../mockups && git push origin master')", "def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()", "def release_package_to_repository(self, version: str) -> None:\n logger.info(f\"Uploading the package [{version}]\")\n pass", "def release_pypi():\n local('python setup.py clean sdist register upload')", "def deploy():\n git_pull()\n# build_virtualenv()\n# collectstatic()\n migrate()\n# reload_gunicorn()\n# restart_celery()\n puts(green(\"Deployment done!\"))", "def cli(ctx, paths, **kwds):\n\n def upload(realized_repository):\n return shed.upload_repository(ctx, realized_repository, **kwds)\n\n exit_code = shed.for_each_repository(ctx, upload, paths, **kwds)\n sys.exit(exit_code)", "def push():\n files = []\n for i in sp.check_output([\"git\", \"status\"]).decode().split(\"\\n\"):\n nf = \"#\\tnew file:\"\n mf = \"#\\tmodified:\"\n\t# Should have a deleted-files option here too.\n if i[: len(nf)] == nf or i[: len(mf)] == mf:\n f = i.split(\" \")[-1]\n files.append(f)\n files = list(set(files)) # Remove duplicates\n\n print(\"Committing these files: {}\".format(files))\n\n # Run all py scripts through black for formatting.\n# for f in files:\n# if f[-3:] == \".py\":\n# sp.call([\"black\", f])\n\n [sp.call([\"git\", \"add\", \"{}\".format(i)]) for i in files]\n\n commit_message = str(input(\"Enter commit message:\\n\"))\n commit_message = \"Updated\" if commit_message == \"\" else commit_message\n print(\"Committing with commit message of: {}\\n\\n\".format(commit_message))\n sp.call([\"git\", \"commit\", \"-m\", \"{}\".format(commit_message)])\n sp.call([\"git\", \"push\"])", "def setup():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n setup_directories()\n setup_virtualenv()\n clone_repo()\n checkout_latest()\n install_requirements()\n install_apache_conf()\n deploy_to_s3()", "def upload_files(self, files):\n\n for f in files:\n self.scp.put(f, recursive=True)", "def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')", "def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())", "def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)", "def submitUploads(self, local = False):\n\n # Set and upload files to repo using uploadRepoFiles()\n if local:\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.uploadRepoFiles(key)\n\n else:\n # Upload on remote machine.\n ACCESS_TOKEN = initZenodo(self.hostDefn['localhost']['localSettings']/'zenodoSettings.dat')\n with self.c.prefix(f\"source {self.hostDefn[self.host]['condaPath']} {self.hostDefn[self.host]['condaEnv']}\"):\n result = self.c.run(f\"{Path(self.hostDefn[self.host]['repoScpPath'], self.scpDefnRepo['uploadNohup']).as_posix()} \\\n {Path(self.hostDefn[self.host]['repoScpPath'], self.scpDefnRepo['upload']).as_posix()} \\\n {self.hostDefn[self.host]['nbProcDir']/self.jsonProcFile.name} {ACCESS_TOKEN}\",\n warn = True, timeout = 10)\n\n print(f\"Log file set: {self.hostDefn[self.host]['nbProcDir']/self.jsonProcFile.name}\")\n # Remote upload set to run via nohup... will need to pull logs later.\n\n # Publish\n\n # return 'Not implemented'", "def install_branch(branch):\n\n # if it's already in the virtualenv, remove it\n ver = '.'.join(map(str,(sys.version_info.major,sys.version_info.minor)))\n sitepack = os.path.join(virtual_dir, 'lib','python'+ver, 'site-packages')\n if os.path.exists(sitepack):\n dir_list = os.listdir(sitepack)\n else:\n dir_list = []\n for f in dir_list:\n if 'statsmodels' in f:\n shutil.rmtree(os.path.join(sitepack, f))\n\n # checkout the branch\n os.chdir(gitdname)\n retcode = subprocess.call('git checkout ' + branch, shell=True)\n if retcode != 0:\n msg = \"\"\"Could not checkout out branch %s\"\"\" % branch\n raise Exception(msg)\n\n # build and install\n retcode = subprocess.call(\" \".join([virtual_python, 'setup.py', 'build']),\n shell=True)\n if retcode != 0:\n msg = \"\"\" Could not build branch %s\"\"\" % branch\n raise Exception(msg)\n retcode = subprocess.call(\" \".join([virtual_python, os.path.join(gitdname,\n 'setup.py'), 'install']), shell=True)\n if retcode != 0:\n os.chdir(dname)\n msg = \"\"\"Could not install branch %s\"\"\" % branch\n raise Exception(msg)\n os.chdir(dname)", "def deploy():\n git_pull()\n if confirm(\"Install/upgrade requirements with pip?\"):\n install_requeriments()\n django_command('collectstatic')\n django_command('migrate')\n restart()", "def push(self):\n if self.forward:\n git = self.repo.git\n try:\n git.push()\n self.forward = \"pushed\"\n except:\n self.forward = \"push error - \"+self.forward", "def _git_push(branch):\n\n local(\n 'git push -f origin %(branch)s:%(branch)s' % {'branch': branch},\n capture=True\n )\n print('Pushed to %s' % branch)", "def upload_tar_from_git(path):\n require('release', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('branch', provided_by=[prod])\n local('git checkout %s' % (env.branch))\n local('git archive --format=tar %s | gzip > %s.tar.gz' % (env.branch, env.release))\n sudo('mkdir -p %s' % (path))\n put('%s.tar.gz' % (env.release), '/tmp/', mode=0755)\n sudo('mv /tmp/%s.tar.gz %s/packages/' % (env.release, env.code_root))\n sudo('cd %s && tar zxf ../../../packages/%s.tar.gz' % (env.whole_path, env.release))\n local('rm %s.tar.gz' % (env.release))\n sudo('rm %s/packages/%s.tar.gz' % (env.code_root, env.release))", "def deploy():\n upload_static()\n compile_code()\n upload_code()\n upload_supervisor()\n start_server()", "def perform_upload(path):\n subprocess.call(\n ['twine', 'upload', path + '/dist/*'])", "def push_code(repo, branch='gh-pages'):\n return repo.remotes.origin.push(branch)", "def do_push(self, remote, branch, force=False, force_with_lease=False, remote_branch=None):\n if self.savvy_settings.get(\"confirm_force_push\", True):\n if force:\n if not sublime.ok_cancel_dialog(CONFIRM_FORCE_PUSH.format(\"--force\")):\n return\n elif force_with_lease:\n if not sublime.ok_cancel_dialog(CONFIRM_FORCE_PUSH.format(\"--force--with-lease\")):\n return\n\n self.window.status_message(START_PUSH_MESSAGE)\n self.push(\n remote,\n branch,\n set_upstream=self.set_upstream,\n force=force,\n force_with_lease=force_with_lease,\n remote_branch=remote_branch\n )\n self.window.status_message(END_PUSH_MESSAGE)\n util.view.refresh_gitsavvy(self.window.active_view())", "def _upload_build_source_package(self, targz):\n # Upload to temporary storage, only if doesn't exist\n self.pipeline_package = \"source/cache/%s\" % os.path.basename(targz)\n blob = self.bucket.blob(self.pipeline_package)\n logger.debug(\"build-package=%s\" % self.pipeline_package)\n if not blob.exists():\n blob.upload_from_filename(targz, content_type=\"application/gzip\")", "def deploy():", "def upload_package(self, filename=None):\n logger.info(\"Uploading the package to S3\")\n s3f = S3FunctionUploader(self.function_config['Code']['S3Bucket'])\n self.s3_filename = path.join(\n self.function_config['Code']['S3KeyPath'],\n path.basename(filename or self.local_filename)\n )\n s3f.upload(filename or self.local_filename,\n self.s3_filename)", "def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def call_git_push():\n print(\"This will commit and push the git repo\")\n today = datetime.datetime.today()\n call([\"git\", \"add\", \".\"])\n call([\"git\", \"commit\", \"-m\", \"Updated notes. {:%Y-%m-%d %H:%M:%S}\".format(today)])\n call([\"git\", \"push\", \"origin\", \"master\"])", "def move_packages (name, stage_dir, package_dir):\n\n print (\"Storing packages for \", name)\n\n # Take care of the zip file\n print (\"\\tZip file...\")\n target_file = join (package_dir, name + \".zip\")\n shutil.copy (join (stage_dir, \"zip-archive.zip\"), target_file)\n ex (\"md5sum \" + target_file + \" > \" + target_file + \".md5\")\n\n\n tar_file = join (stage_dir, \"tar-archive.tar\")\n target_file = join (package_dir, name + \".tar\")\n\n # bzip\n print (\"\\tBzip2 file.....\")\n shutil.copy (tar_file, target_file)\n ex (\"bzip2 \" + target_file)\n ex (\"md5sum \" + target_file + \".bz2 > \" + target_file + \".bz2.md5\")\n\n print (\"\\tgzip file.....\")\n shutil.copy (tar_file, target_file)\n ex (\"gzip \" + target_file)\n ex (\"md5sum \" + target_file + \".gz > \" + target_file + \".gz.md5\")", "def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()", "def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")", "def push_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n if opts.push:\n vprint (\"Pushing branch\", name)\n ex (\"cd $DOC_ROOT/\" + product + \" && git push origin refs/heads/\" + name,\n allow_fail=True)", "def build_and_upload(python_version, package, vers):\n vspl = python_version.split('.')\n python_string = vspl[0] + vspl[1]\n current_os = get_current_os()\n cmd = ('conda build --old-build-string conda.recipe '\n '--output-folder artifacts '\n '--no-anaconda-upload --python {python_version}')\n run(cmd.format(python_version=python_version))\n cmd = ('conda convert '\n 'artifacts/{os}/{package}-{vers}-py{python_string}_0.tar.bz2 '\n '-p all -o artifacts/')\n run(cmd.format(os=current_os, package=package, vers=vers,\n python_string=python_string))\n\n # check that we have the operating systems we want\n assert len(set(OPERATING_SYSTEMS) - set(os.listdir('artifacts'))) == 0\n\n for os_ in OPERATING_SYSTEMS:\n cmd = ('anaconda --token $TOKEN upload --force --user {CONDA_USER} '\n 'artifacts/{os_}/{package}-{vers}-py{python_string}_0.tar.bz2')\n run(cmd.format(CONDA_USER=CONDA_USER, os_=os_, package=package,\n vers=vers, python_string=python_string))", "def ship():\n cotton.git_push()\n cotton.install_python_dependencies()\n\n # Deploy the secrets module to the remote project root\n spath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'secrets'))\n put(spath, env.project_root)\n\n cotton.upload_template_and_reload('cron')", "def upload_a_file(self, package, version, file_path):\n cmd_args = [self._push_executable]\n cmd_args += [\"--user\", self._username]\n cmd_args += [\"--api_key\", self._api_key]\n cmd_args += [\"--subject\", self._subject]\n cmd_args += [\"--repo\", self._repo]\n cmd_args += [\"--package\", package]\n cmd_args += [\"--version\", version]\n cmd_args += [\"--file_path\", file_path]\n\n if self._component:\n cmd_args += [\"--component\", self._component]\n if self._distribution:\n cmd_args += [\"--distribution\", self._distribution]\n if self._architecture:\n cmd_args += [\"--architecture\", self._architecture]\n\n cmd_args += [\"--package\", package]\n cmd_args += [\"--version\", version]\n cmd_args += [\"--file_path\", file_path]\n\n try:\n proc = subprocess.Popen(cmd_args,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=False)\n (out, err) = proc.communicate()\n if proc.returncode != 0:\n raise RuntimeError(err)\n except subprocess.CalledProcessError as ex:\n raise RuntimeError(\"Failed to upload file {0} due to {1}\".format(file_path, ex))\n\n return True", "def upload(ui, repo, name, **opts):\n\trepo.ui.quiet = True\n\tcl, err = LoadCL(ui, repo, name, web=True)\n\tif err != \"\":\n\t\treturn err\n\tif not cl.local:\n\t\treturn \"cannot upload non-local change\"\n\tcl.Upload(ui, repo)\n\tprint \"%s%s\\n\" % (server_url_base, cl.name)\n\treturn", "def push(api_client, folder, verbose):\n local_folder, remote_folder = _get_local_and_remote_folders(folder)\n workspace = WorkspaceApi(api_client)\n\n def work():\n workspace.import_workspace_dir(local_folder, remote_folder,\n True, False, verbose=verbose)\n if not verbose:\n with loadingbar(msg=\"Pushing to {}\".format(remote_folder), width=10,\n fill_char=\"o\", interval=.25):\n work()\n else:\n work()", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def test_upload(pytestconfig, pypi):\n #pypi_chishop.restore()\n with PkgTemplate(name='acme.tpi_test_upload') as pkg:\n pkg.create_pypirc(pypi.get_rc())\n pkg.install_package('pytest-cov')\n new_env = copy.copy(pkg.env)\n new_env['HOME'] = pkg.workspace\n print pkg.run_with_coverage(['%s/setup.py' % pkg.trunk_dir, 'sdist', 'register',\n 'upload', '--show-response'],\n pytestconfig, env=new_env, cd=HERE)\n assert os.path.isfile(os.path.join(pypi.workspace, 'chishop/media/dists/a/acme.tpi_test_upload/acme.tpi_test_upload-1.0.0.dev1.tar.gz'))\n\n import tarfile, urllib2, cStringIO\n dist_url = 'http://%s:%s/media/dists/a/acme.tpi_test_upload/acme.tpi_test_upload-1.0.0.dev1.tar.gz' % (pypi_chishop.hostname, pypi_chishop.port)\n response = urllib2.urlopen(dist_url)\n buf = response.read()\n fh = cStringIO.StringIO(buf)\n try:\n tf = tarfile.open(fileobj=fh)\n assert 'acme.tpi_test_upload-1.0.0.dev1/PKG-INFO' in tf.getnames()\n finally:\n tf.close()", "def sync_and_add ( self, add_method ):\n # _nowait raises Exception when queue is full which is good\n # in non-threaded execution\n qput = lambda r: self._queue_packages_from_repo ( r, add_method )\n\n self._sync_all_repos_and_run ( when_repo_done=qput )", "def deploy(ctx):\n click.echo('deploying')\n ctx.deploy()\n click.echo('done')", "def install(self):\n\n self.clean_git_checkout(self.git_repo, '/src')\n\n self.__copy_config_templates();\n\n self.local(\"sudo pip install -r src/requirements.txt --upgrade\")\n\n if not self.is_local():\n PiService.install(self) #copy to remote\n\n self.sudo(\"pip install -r src/requirements.txt --upgrade\")", "def push_to_github(label):\n\n # Make sure we're in the right place to do all the git things.\n os.chdir(taas.data_root())\n\n # If there's nothing to do, then do nothing.\n if (not something_to_commit()):\n print(\"Nothing to commit.\")\n return\n\n branch_name = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n branch_name += \"-\" + label\n\n run([\"git\", \"checkout\", \"-b\", branch_name])\n\n run([\"git\", \"add\", \"-A\"])\n\n run([\"git\", \"status\"])\n\n run([\"git\", \"commit\", \"-m\", \"Automated update: \"+label])\n\n run([\"git\", \"push\", \"--set-upstream\", \"origin\", branch_name])", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def push_backup(args: Arguments) -> None:\n\n files = get_files_from_previous_backup(args.site)\n bucket = get_bucket(args)\n\n for path in files:\n upload_file(\n path=path,\n site_name=args.site,\n bucket=bucket,\n bucket_directory=args.bucket_directory,\n )\n\n print(\"Done!\")", "def upload(version=minv.__version__, release=\"1\"):\n version = version or minv.__version__\n put(\n join(\n env.builder_path,\n \"build/RPMS/minv-%s-%s.noarch.rpm\" % (version, release)\n ), \"\"\n )\n put(\"minv/package/minv_install_postgresql.sh\", \"\")\n sudo(\"chmod a+x minv_install_postgresql.sh\")\n with lcd(env.ink_path):\n for rpm in RPMS:\n put(rpm, \"\")", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()", "def install():\n deploy()\n configure()", "def upload_package(self, pointer: FileStorage, token: str) -> Upload:\n files = {'file': (pointer.filename, pointer, pointer.mimetype)}\n data, _, _ = self.json('post', '/', token, files=files,\n expected_code=[status.CREATED,\n status.OK],\n timeout=30, allow_2xx_redirects=False)\n return self._parse_upload_status(data)", "def publish_files():\n print(\"Publishing files to the internet...\", end=\"\", flush=True)\n import subprocess\n try:\n subprocess.run(\"./upload.sh\", timeout=120.0)\n print(\"done.\\n\")\n except:\n print(\"failed.\\n\")", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()", "def commit_master(do_deploy=True):\n local(\"git pull origin master\")\n commit()\n local(\"git checkout master\")\n local(\"git pull origin master\")\n local(\"git merge dev\")\n local(\"git push origin master\")\n if do_deploy:\n deploy()\n deploy_config()", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n _git_pull()\n _migrate()\n _collect_static_files()\n _restart_webserver()", "def bulk_upload ( server, identity, src_dir, tgt_dir ) :\n tmp_tarfilepath = '/tmp/'\n tmp_tarfilename = server + '.tar.gz'\n tmp_file = tmp_tarfilepath + tmp_tarfilename\n\n # Tar up the src directory\n s = subprocess.call( [ '/bin/sh', '-c',\n 'cd ' + src_dir + ' && tar czf ' + tmp_file + ' .' ] )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Copy the tar file up to the server\n s = scp_call( server, identity, tmp_file, tmp_tarfilepath )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Unpack the tar file on the server\n s = ssh_call( server,\n identity,\n 'cd ' + tgt_dir + ' && sudo tar xzf ' + tmp_file + ' && rm ' + tmp_file + ' && sudo chown -R root:root *' )\n return s", "def upload(self):\n # TODO: Should CD to the working directory set by the robscript.\n src = self.state_frame[0]\n dest = self.state_frame[1]\n self.send_upload(src, dest, True, None)\n self.state = STATE_READ_LINE", "def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')", "def uploadPackages(self, directory):\n files_to_upload_dict = {}\n files_to_upload_list = [ f for f in listdir(directory) if isfile(join(directory,f)) ]\n self.logger.debug(\"uploadPackages(\" + \"{})\".format(directory))\n #print \"Files to upload:\"\n for index in range(len(files_to_upload_list)):\n self.logger.info(files_to_upload_list[index])\n self.uploadFileToIM (directory, files_to_upload_list[index], files_to_upload_list[index])\n #file_tuple = {'files':{str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}} \n #file_tuple = {str(files_to_upload_list[index]), {open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}}\n #file_tuple = {'files': (str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm')}\n #file_tuple = (str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'))\n #file_tuple = {str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}\n #files_data_to_upload_list.append(file_tuple)\n #print \"Files to upload Dictionary:\"", "def push(self):\n self.runtime.logger.info('Pushing config...')\n with Dir(self.runtime.metadata_dir):\n exectools.cmd_assert([\"git\", \"push\"])", "def _push_to_server(self) -> None:\n pass", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "def deploy(branch=None, to='master', keep=False, heroku_app=HEROKU_APP):\n if branch is None:\n proc = subprocess.run(['git', 'branch'], stdout=subprocess.PIPE)\n lines = [\n line[2:]\n for line in proc.stdout.decode('utf8').splitlines()\n if line.startswith('* ')\n ]\n branch = lines[0]\n\n assert branch != to\n\n subprocess.run(['git', 'checkout', to])\n subprocess.run(['git', 'merge', branch])\n if not keep:\n subprocess.run(['git', 'branch', '--delete', branch])\n subprocess.run(['git', 'push'])\n\n migrate(heroku_app)", "def install(self, *packages):\n raise NotImplementedError", "def upload(self, ppa, **subprocess_kwargs):\n for changes in list(Path(\".\").glob(\"*changes\")):\n cmd = f\"dput {ppa} {str(changes)}\"\n self.log(cmd)\n cmd_ok(cmd, **subprocess_kwargs)\n self.cleanup_source()\n self.cleanup_debian(cwd=self.upstream_model.name)", "def all(source=None):\n package(source)\n deploy()", "def pack(**kwargs):\n require('repository')\n #if env.repository.startswith('svn://'):\n if env.repository.type == 'svn':\n execute(svn.pack, **kwargs)\n if env.repository.type == 'git':\n execute(git.pack, **kwargs)\n else:\n abort('Unsupported repository type %s' % env.repository)", "def install_from_repository(self) -> None:\n pass" ]
[ "0.6992921", "0.65332377", "0.6517984", "0.64805365", "0.6429827", "0.6415388", "0.64010024", "0.6369851", "0.6338582", "0.62850165", "0.6262531", "0.6237359", "0.6235568", "0.6226121", "0.61871606", "0.6132188", "0.60597724", "0.60434294", "0.60356283", "0.6010991", "0.59812975", "0.59805983", "0.59608626", "0.59449154", "0.59299654", "0.59215945", "0.59112567", "0.5909056", "0.59046596", "0.5881801", "0.5881193", "0.58637077", "0.5854998", "0.5793166", "0.57548463", "0.5736983", "0.56990504", "0.5684287", "0.5676975", "0.5676597", "0.5673277", "0.566763", "0.5652355", "0.564188", "0.5634739", "0.56334525", "0.5624732", "0.56238776", "0.56169254", "0.56115353", "0.56107426", "0.5602505", "0.55932844", "0.5574474", "0.5570179", "0.5564084", "0.5563784", "0.55619895", "0.5560875", "0.55571", "0.5547614", "0.5541943", "0.55358005", "0.55342084", "0.55328065", "0.5532595", "0.5522556", "0.54968864", "0.5491315", "0.5487683", "0.54853487", "0.5481622", "0.54753304", "0.546958", "0.5456494", "0.5454284", "0.54472464", "0.544469", "0.544352", "0.54382926", "0.5433378", "0.54169977", "0.54084766", "0.5402688", "0.5402688", "0.5402688", "0.53995997", "0.53985125", "0.5387672", "0.53790087", "0.5375535", "0.53718066", "0.5368718", "0.5361922", "0.53612304", "0.53589284", "0.5358208", "0.5341808", "0.5341587", "0.5340973" ]
0.6928756
1
Look up package ID from list of package infos.
def get_pkg_id(pkgs, name, version): for pinfo in pkgs: if pinfo["name"] == name and pinfo["version"] == version: return "%(name)s/%(version)s/%(id)s" % pinfo raise DerekError("No package %s %s in the branch" % (name, version))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bd_selectPackageList_byID(self, _c, _pckgID):\n\n result = {}\n\n _c.execute(\"SELECT id, num, desc, status, source_env, dest_env, app, last_rev FROM package WHERE id=? ORDER BY num DESC\", [_pckgID]) \n data = _c.fetchone()\n\n if data:\n result['id'] = data[0] \n result['desc'] = data[2]\n result['status'] = data[3]\n result['source_env'] = data[4]\n result['dest_env'] = data[5]\n result['app'] = data[6]\n result['last_rev'] = data[7]\n result['num'] = data[1] #Place this attribute the last because of print issue. I know this is a Dict, take it easy.\n #\n\n return result", "def get_package_id(name, version, arch, data):\n return \";\".join((name, version, arch, data))", "def get_info(self, pkgname):\n for pkg in self.rpc.info(pkgname):\n return pkg", "def get_package_info(package_name):\n r = requests.get(f'https://api.npms.io/v2/search?q={package_name}&size=1')\n response_json = r.json()\n\n if 'results' in response_json:\n result = response_json['results'][0]\n return result['package']", "def list_package_ids(self):\n raise NotImplementedError", "def get_distid(pkgtype, distslug, config):\n\n distindex = get_distributions(config)\n distributions = distindex[pkgtype]\n distname, codename = distslug.split('/')\n\n if config['debug']:\n print(\"DEBUG: Pkgtype: {} Distribution: {} Codename: {}\".\n format(pkgtype, distname, codename))\n\n for dist in distributions:\n if dist['index_name'] == distname:\n for ver in dist['versions']:\n if ver['index_name'] == codename:\n return ver['id']\n\n abort(\"No distribution id found for: {}\".format(distslug))", "def _package_ids_satisfying_requirement(pool, requirement):\n for package in pool.what_provides(requirement):\n yield pool.package_id(package)", "def get_package(self, __package_id):\n raise NotImplementedError", "def find_package(self, package_title):\n metadata = self.get_ckan_metadata()\n results = []\n for id, resource in metadata.items():\n if resource['dataset']['title'] == package_title:\n results.append(resource['dataset'])\n return results[0] if len(results) == 1 else results", "def getPackageInfo(self, pid):\n if pid == self.ROOT_PACKAGE:\n pack = RootPackage(self, OWNER).toInfoData()\n elif pid in self.packages:\n pack = self.packages[pid].toInfoData()\n pack.stats = self.db.getStatsForPackage(pid)\n else:\n pack = self.db.getPackageInfo(pid)\n\n if not pack: return None\n\n # todo: what does this todo mean?!\n #todo: fill child packs and files\n packs = self.db.getAllPackages(root=pid)\n if pid in packs: del packs[pid]\n pack.pids = packs.keys()\n\n files = self.db.getAllFiles(package=pid)\n pack.fids = files.keys()\n\n return pack", "def get_package_info():\n\n with open(hack.CONDA_ARTIFACT_FILENAME, 'r') as fn:\n pkg_location = fn.read().strip()\n pkg_name = os.path.basename(pkg_location)\n\n return pkg_location, pkg_name", "def get_item_id_from_list_by_name(self, item_name, list_of_items):\n\n for item in list_of_items:\n if item_name in item['content']:\n return item['id']\n\n return None", "def _get_item_id_for_upgrade(self, package_items, option, value,\r\n public=True):\r\n vs_id = {'memory': 3, 'cpus': 80, 'nic_speed': 26}\r\n for item in package_items:\r\n for j in range(len(item['categories'])):\r\n if not (item['categories'][j]['id'] == vs_id[option] and\r\n item['capacity'] == str(value)):\r\n continue\r\n if option == 'cpus':\r\n if public and ('Private' not in item['description']):\r\n return item['prices'][0]['id']\r\n elif not public and ('Private' in item['description']):\r\n return item['prices'][0]['id']\r\n elif option == 'nic_speed':\r\n if 'Public' in item['description']:\r\n return item['prices'][0]['id']\r\n else:\r\n return item['prices'][0]['id']", "def package(id = 0):\n\tresults = queries.package(id)\n\tif not results:\n\t\treturn render_template('package_not_found.html')\n\treturn render_template('package.html', package=results)", "def create_package_id(self):\n return self.create(\"TrackPackageIdentifier\")", "def test_package_id(self) -> str:\n return pulumi.get(self, \"test_package_id\")", "def get_spynl_package(name, packages=None):\n if packages is None:\n packages = get_spynl_packages()\n return next(filter(lambda p: p.project_name == name, packages), None)", "def populate_package(package_count: int) -> None:\n logging.info(f\"Fetching {package_count} packages\")\n response = CurlController.send_get_request(url=CONFIG.EXTERNAL_API.ALL_PACKAGES)\n get_version = False\n count = 0\n temp_dir = filestore.generate_temp_dir()\n # Local Testing\n # response_arr = ['Package: A3', 'Version: 1.0.0', 'Depends: R (>= 2.15.0), xtable, pbapply', 'Suggests: randomForest, e1071', 'License: GPL (>= 2)', 'MD5sum: 027ebdd8affce8f0effaecfcd5f5ade2', 'NeedsCompilation: no', '', 'Package: aaSEA', 'Version: 1.1.0', 'Depends: R(>= 3.4.0)', 'Imports: DT(>= 0.4), networkD3(>= 0.4), shiny(>= 1.0.5),', ' shinydashboard(>= 0.7.0), magrittr(>= 1.5), Bios2cor(>= 2.0),', ' seqinr(>= 3.4-5), plotly(>= 4.7.1), Hmisc(>= 4.1-1)', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 0f9aaefc1f1cf18b6167f85dab3180d8', 'NeedsCompilation: no', '', 'Package: AATtools', 'Version: 0.0.1', 'Depends: R (>= 3.6.0)', 'Imports: magrittr, dplyr, doParallel, foreach', 'License: GPL-3', 'MD5sum: 3bd92dbd94573afb17ebc5eab23473cb', 'NeedsCompilation: no', '', 'Package: ABACUS', 'Version: 1.0.0', 'Depends: R (>= 3.1.0)', 'Imports: ggplot2 (>= 3.1.0), shiny (>= 1.3.1),', 'Suggests: rmarkdown (>= 1.13), knitr (>= 1.22)', 'License: GPL-3', 'MD5sum: 50c54c4da09307cb95a70aaaa54b9fbd', 'NeedsCompilation: no', '', 'Package: abbyyR', 'Version: 0.5.5', 'Depends: R (>= 3.2.0)', 'Imports: httr, XML, curl, readr, plyr, progress', 'Suggests: testthat, rmarkdown, knitr (>= 1.11), lintr', 'License: MIT + file LICENSE', 'MD5sum: e048a3bca6ea32126e6c367415c0bfaf', 'NeedsCompilation: no', '', 'Package: abc', 'Version: 2.1', 'Depends: R (>= 2.10), abc.data, nnet, quantreg, MASS, locfit', 'License: GPL (>= 3)', 'MD5sum: c9fffe4334c178917f762735aba59653', 'NeedsCompilation: no', '', 'Package: abc.data', 'Version: 1.0', 'Depends: R (>= 2.10)', 'License: GPL (>= 3)', 'MD5sum: 799079dbbdd0cfc9d9c61c3e35241806', 'NeedsCompilation: no', '', 'Package: ABC.RAP', 'Version: 0.9.0', 'Depends: R (>= 3.1.0)', 'Imports: graphics, stats, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 38c65a7251d28ef2462ee430ded95700', 'NeedsCompilation: no', '', 'Package: abcADM', 'Version: 1.0', 'Imports: Rcpp (>= 1.0.1)', 'LinkingTo: Rcpp, BH', 'License: GPL-3', 'MD5sum: 8134f67912b506194e3dab4ccd6e75f7', 'NeedsCompilation: yes', '', 'Package: ABCanalysis', 'Version: 1.2.1', 'Depends: R (>= 2.10)', 'Imports: plotrix', 'License: GPL-3', 'MD5sum: 678e03837e25a922bf71bafe1f8de617', 'NeedsCompilation: no', '', 'Package: abcdeFBA', 'Version: 0.4', 'Depends: Rglpk,rgl,corrplot,lattice,R (>= 2.10)', 'Suggests: LIM,sybil', 'License: GPL-2', 'MD5sum: c84d45a85d8ab6bbe517365e8845db83', 'NeedsCompilation: no', '', 'Package: ABCoptim', 'Version: 0.15.0', 'Imports: Rcpp, graphics, stats, utils', 'LinkingTo: Rcpp', 'Suggests: testthat, covr', 'License: MIT + file LICENSE', 'MD5sum: a62ed03650273c09899655065437078f', 'NeedsCompilation: yes', '', 'Package: ABCp2', 'Version: 1.2', 'Depends: MASS', 'License: GPL-2', 'MD5sum: e920282d5a369df71e15241be40cb60e', 'NeedsCompilation: no', '', 'Package: abcrf', 'Version: 1.8.1', 'Depends: R(>= 3.1)', 'Imports: readr, MASS, matrixStats, ranger, doParallel, parallel,', ' foreach, stringr, Rcpp (>= 0.11.2)', 'LinkingTo: Rcpp, RcppArmadillo', 'License: GPL (>= 2)', 'MD5sum: 4d5a304f46d117226791523cef4e2427', 'NeedsCompilation: yes', '', 'Package: abcrlda', 'Version: 1.0.3', 'Imports: stats', 'License: GPL-3', 'MD5sum: 651e6e18e08916b443aaf011b5a63525', 'NeedsCompilation: no', '', 'Package: abctools', 'Version: 1.1.3', 'Depends: R (>= 2.10), abc, abind, parallel, plyr, Hmisc', 'Suggests: ggplot2, abc.data', 'License: GPL (>= 2)', 'MD5sum: c5937b65837ef7e6bfbe141cea257f40', 'NeedsCompilation: yes', '', 'Package: abd', 'Version: 0.2-8', 'Depends: R (>= 3.0), nlme, lattice, grid, mosaic', 'Suggests: boot, car, ggplot2, plyr, HH, ICC, vcd, Hmisc', 'License: GPL-2', 'MD5sum: 1913d76a0fbc44222709381f63f385b9', 'NeedsCompilation: no', '', 'Package: abdiv', 'Version: 0.2.0', 'Imports: ape', 'Suggests: testthat (>= 2.1.0), vegan', 'License: MIT + file LICENSE', 'MD5sum: 80931c0ca85ba5386000bf617552c5ce', 'NeedsCompilation: no', '', 'Package: abe', 'Version: 3.0.1', 'License: GPL (>= 2)', 'MD5sum: 9c151db5397422c8927dee41dabfbfab', 'NeedsCompilation: no', '', 'Package: abess', 'Version: 0.3.0', 'Depends: R (>= 3.1.0)', 'Imports: Rcpp, MASS, methods, Matrix', 'LinkingTo: Rcpp, RcppEigen', 'Suggests: testthat, knitr, rmarkdown', 'License: GPL (>= 3) | file LICENSE', 'MD5sum: e0ea7d068147c49c011c7135ab290bd3', 'NeedsCompilation: yes', '', 'Package: abf2', 'Version: 0.7-1', 'License: Artistic-2.0', 'MD5sum: 6792a51c6fb3e239165d69aa8a71d3cd', 'NeedsCompilation: no', '', 'Package: abglasso', 'Version: 0.1.1', 'Imports: MASS, pracma, stats, statmod', 'Suggests: testthat', 'License: GPL-3', 'MD5sum: 18bd0759cd005c5ac6fb515799b3f3d8', 'NeedsCompilation: no', '', 'Package: ABHgenotypeR', 'Version: 1.0.1', 'Imports: ggplot2, reshape2, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: ca4397ba7390c0e0a3728c0cda864494', 'NeedsCompilation: no', '', 'Package: abind', 'Version: 1.4-5', 'Depends: R (>= 1.5.0)', 'Imports: methods, utils', 'License: LGPL (>= 2)', 'MD5sum: 136f981e1c4f618b64a87faaa7797c97', 'NeedsCompilation: no', '', 'Package: abjutils', 'Version: 0.3.1', 'Depends: R (>= 4.0)', 'Imports: dplyr, magrittr, purrr, rlang, rstudioapi, stringi, stringr,', ' tidyr', 'Suggests: testthat', 'License: MIT + file LICENSE', 'MD5sum: a596c07aaa7f82e5d123b2f7354e5b55', 'NeedsCompilation: no', '', 'Package: abmR', 'Version: 1.0.2', 'Depends: R (>= 3.5)', 'Imports: sp, rgdal, table1, googledrive, swfscMisc, geosphere,', ' kableExtra, gtsummary, ggplot2, gstat, purrr, rnaturalearth,', ' rnaturalearthdata, sf, tmap, raster, utils, stats, methods,', ' rgeos', 'Suggests: jpeg, knitr', 'License: GPL (>= 3)', 'MD5sum: cf96d']\n response_arr = response.decode(\"utf-8\").split(\"\\n\")\n with temp_dir:\n for item in response_arr:\n if count >= package_count:\n break\n if get_version:\n # Fetching the version, once we have the package name\n package_version = Command.get_package_version(item=item)\n if package_version:\n # Generating the required URL for the package to fetch the details\n package_url = Template(\n CONFIG.EXTERNAL_API.PACKAGE_DETAIL\n ).substitute(\n package_name=package_name,\n separator=\"_\",\n package_version=package_version,\n )\n logging.info(f\"Downloading {package_url}\")\n # Downloading the details of the package and extracting the DESCRIPTION file\n extract_file_path = filestore.join_paths(\n prefix=package_name,\n suffix=CONFIG.EXTERNAL_API.DETAIL_FILE_NAME,\n )\n target_dir = filestore.download_file(\n url=package_url,\n temp_dir=temp_dir,\n extract_file_path=extract_file_path,\n )\n # Reading contents of DESCRIPTION file\n package_details = filestore.join_paths(\n prefix=temp_dir.name,\n suffix=extract_file_path,\n )\n with open(package_details) as details_file:\n for line in details_file:\n if line.startswith(PackageInfoPrefix.PUBLICATION_DATE):\n publication_time_str = (\n Command.get_publication_timestamp(line)\n )\n publication_timestamp = (\n datetime_util.string_to_datetime(\n publication_time_str\n )\n )\n elif line.startswith(PackageInfoPrefix.TITLE):\n title = Command.get_package_title(line)\n elif line.startswith(PackageInfoPrefix.DESCRIPTION):\n description = Command.get_package_description(line)\n elif line.startswith(PackageInfoPrefix.AUTHOR):\n (\n author_name,\n author_email,\n ) = Command.get_package_author(line)\n elif line.startswith(PackageInfoPrefix.MAINTAINER):\n (\n maintainer_name,\n maintainer_email,\n ) = Command.get_package_maintainer(line)\n\n package_info_dict = {\n \"name\": package_name,\n \"version\": package_version,\n \"publication_timestamp\": publication_timestamp,\n \"title\": title,\n \"description\": description,\n \"author_name\": author_name,\n \"author_email\": author_email,\n \"maintainer_name\": maintainer_name,\n \"maintainer_email\": maintainer_email,\n }\n logging.info(package_info_dict)\n obj = PackageManager.create_object(\n create_data=package_info_dict\n )\n if obj == CONFIG.DB.FAILURE:\n raise Exception(f\"Could not insert package in DB\")\n count += 1\n get_version = False\n # Fetching the package name\n package_name = Command.get_package_name(item=item)\n if package_name:\n get_version = True", "def get_package_id(self, ver):\n assert isinstance(ver, apt.package.Version)\n return \"%s;%s;%s;\" % (ver.package.shortname, ver.version, ver.package.architecture())", "def get_closest_nuget_package_name(query):\n url_nuget_service = \"https://api.nuget.org/v3/index.json\"\n url_nuget_search = \"\"\n\n api_resources = requests.get(url_nuget_service).json()\n for resource in api_resources.get(\"resources\") or []:\n if resource.get(\"@type\") == \"SearchQueryService\":\n url_nuget_search = resource[\"@id\"]\n break\n\n if url_nuget_search:\n url_query = urljoin(url_nuget_search, f\"?q={query}\")\n query_response = requests.get(url_query).json()\n if query_response.get(\"data\"):\n return query_response[\"data\"][0][\"id\"]", "def _find_rpms_in_packages(koji_api, name_list, major_minor):\n rpms_for_package = {}\n tags = _tags_for_version(major_minor)\n for package in name_list:\n for tag in tags:\n for build in koji_api.getLatestBuilds(tag=tag, package=package):\n rpm_list = set(rpm[\"name\"] for rpm in koji_api.listBuildRPMs(build[\"build_id\"]))\n rpms_for_package.setdefault(package, set()).update(rpm_list)\n\n if package not in rpms_for_package:\n # it wasn't in our tags; look for it by name\n pkg_info = koji_api.getPackage(package)\n if not pkg_info:\n continue\n latest_builds = koji_api.listBuilds(packageID=pkg_info[\"id\"], state=1, queryOpts=dict(limit=1))\n if not latest_builds:\n continue\n rpm_list = set(rpm[\"name\"] for rpm in koji_api.listBuildRPMs(latest_builds[0][\"build_id\"]))\n rpms_for_package[package] = set(rpm_list)\n\n return rpms_for_package", "def get_python_package_info(name):\n command = [\"python\", \"setup.py\", \"--name\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n assert proc.stdout.readline().strip().decode(\"utf-8\") == name\n\n command = [\"python\", \"setup.py\", \"--version\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n release_version = proc.stdout.readline().strip().decode(\"utf-8\")\n\n command = [\"python\", \"setup.py\", \"--url\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n github_url = proc.stdout.readline().strip().decode(\"utf-8\")\n\n github_repo = urlparse(github_url)\n assert github_repo.netloc == \"github.com\", \"specified repo is not on GitHub\"\n return (release_version, github_repo)", "def match_pkg(cache, serial, pkg):\n if pkg in cache:\n matched_packages.append([serial, pkg])\n elif ' ' in pkg:\n pkg2 = pkg.replace(' ', '-')\n match_pkg(cache,serial,pkg2)\n # # if pkg2 in cache:\n # matched_packages.append([serial, pkg2]) #\n else:\n not_found_packages.append([serial, pkg])", "def __extract_package_version(package_string):\n # remove leading whitespace\n package_string = package_string.strip()\n # create a re parser\n compil = re.compile(r'(?P<name>.+(-[^-])*)-(?P<version>.+)')\n # search package name and version\n search = compil.search(package_string)\n # retrieve result as list\n output = search.groupdict()\n\n return output", "def test_package_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"test_package_id\")", "def get_packages(packages):\n for package in package_data:\n p_id = package[\"packageId\"]\n p_lat = package[\"destination\"][\"latitude\"]\n p_long = package[\"destination\"][\"longitude\"]\n deadline = package[\"deadline\"]\n\n p = Package(p_id, Location(p_lat, p_long), deadline)\n p.set_latest_departure(DRONE_SPEED_kms, DEPOT)\n packages_list.append(p)", "def bootstrap_package_id(self) -> str:\n return pulumi.get(self, \"bootstrap_package_id\")", "def findPackages(self, pkgcode, packages=None):\n if not packages:\n if (self.system_pkgcode and pkgcode == self.system_pkgcode):\n scanlist = ['language-pack', 'language-support-fonts', 'language-support-input', 'language-support-writing']\n else:\n scanlist = ['language-pack']\n for x in scanlist:\n pkg = '%s-%s' % (x, pkgcode)\n if pkg in self._cache:\n if not self._cache[pkg].is_installed and \\\n not self._cache[pkg].marked_install:\n self.missing.add(pkg)\n else:\n self.installed.add(pkg)\n \n if pkgcode in self.pkg_translations:\n for (pkg, translation) in self.pkg_translations[pkgcode]:\n if packages:\n if pkg in packages and \\\n pkg in self._cache and \\\n translation in self._cache:\n if ((not self._cache[translation].is_installed and \\\n not self._cache[translation].marked_install and \\\n not self._cache[translation].marked_upgrade) or \\\n self._cache[translation].marked_delete):\n self.missing.add(translation)\n else:\n self.installed.add(translation)\n else:\n if pkg in self._cache and \\\n (self._cache[pkg].is_installed or \\\n self._cache[pkg].marked_install or \\\n self._cache[pkg].marked_upgrade) and \\\n translation in self._cache:\n if ((not self._cache[translation].is_installed and \\\n not self._cache[translation].marked_install and \\\n not self._cache[translation].marked_upgrade) or \\\n self._cache[translation].marked_delete):\n self.missing.add(translation)\n else:\n self.installed.add(translation)\n \n if pkgcode in self.pkg_writing and \\\n (pkgcode == self.system_pkgcode or \\\n ('language-support-writing-%s' % pkgcode in self._cache and \\\n self._cache['language-support-writing-%s' % pkgcode].is_installed) or \\\n ('language-support-writing-%s' % pkgcode in self._cache and \\\n self._cache['language-support-writing-%s' % pkgcode].mark_install) or \\\n ('language-support-writing-%s' % pkgcode in self._cache and \\\n self._cache['language-support-writing-%s' % pkgcode].markUpgrade)):\n for (pkg, pull_pkg) in self.pkg_writing[pkgcode]:\n if '|' in pkg:\n # multiple dependencies, if one of them is installed, pull the pull_pkg\n for p in pkg.split('|'):\n if packages:\n if p in packages and \\\n p in self._cache and \\\n pull_pkg in self._cache:\n if ((not self._cache[pull_pkg].is_installed and \\\n not self._cache[pull_pkg].marked_install and \\\n not self._cache[pull_pkg].marked_upgrade) or \\\n self._cache[pull_pkg].marked_delete):\n self.missing.add(pull_pkg)\n else:\n self.installed.add(pull_pkg)\n break\n else:\n if p in self._cache and \\\n (self._cache[p].is_installed or \\\n self._cache[p].marked_install or \\\n self._cache[p].marked_upgrade) and \\\n pull_pkg in self._cache:\n if ((not self._cache[pull_pkg].is_installed and \\\n not self._cache[pull_pkg].marked_install and \\\n not self._cache[pull_pkg].marked_upgrade) or \\\n self._cache[pull_pkg].marked_delete):\n self.missing.add(pull_pkg)\n else:\n self.installed.add(pull_pkg)\n break\n else:\n if packages:\n if pkg in packages and \\\n pkg in self._cache and \\\n pull_pkg in self._cache:\n if ((not self._cache[pull_pkg].is_installed and \\\n not self._cache[pull_pkg].marked_install and \\\n not self._cache[pull_pkg].marked_upgrade) or \\\n self._cache[pull_pkg].marked_delete):\n self.missing.add(pull_pkg)\n else:\n self.installed.add(pull_pkg)\n else:\n if pkg in self._cache and \\\n (self._cache[pkg].is_installed or \\\n self._cache[pkg].marked_install or \\\n self._cache[pkg].marked_upgrade) and \\\n pull_pkg in self._cache:\n if ((not self._cache[pull_pkg].is_installed and \\\n not self._cache[pull_pkg].marked_install and \\\n not self._cache[pull_pkg].marked_upgrade) or \\\n self._cache[pull_pkg].marked_delete):\n self.missing.add(pull_pkg)\n else:\n self.installed.add(pull_pkg)", "def get_package(self, package_id):\n return self._package_cache.get(package_id)", "def findSymbolByItemnum(itemnum, libs_dict):\n e_itemnum = re.escape(itemnum)\n for libname, dat in libs_dict.items():\n m = re.search(r'^DEF ([^ ]*) .*(?:\\n[^\\$].+)+\\nF ?\\d+ \"'+e_itemnum+r'\".* \"Item Number\"\\n', dat, re.MULTILINE)\n try:\n symname = m.group(1)\n return libname, symname\n except:\n continue\n return None, None", "def getRegisteredPackageName(*args):\n return _libsbml.SBMLExtensionRegistry_getRegisteredPackageName(*args)", "def bootstrap_package_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bootstrap_package_id\")", "def _find_info(list_, name):\n output = None\n for sublist in list_:\n if name in sublist:\n if len(sublist) > 1:\n output = sublist[1]\n\n return output", "def _find_koji_task_result(package, rpm_list):\n for rpm in rpm_list:\n if package == _get_koji_task_result_package_name(rpm):\n return rpm\n return None", "def getpackinfo(package_name: str) -> Dict[str, str]:\n\t# execute command\n\tproc = subprocess.Popen(['pip', 'show', package_name], stdout=subprocess.PIPE)\n\tproc.wait()\n\n\t# get and treate return\n\tlines = proc.stdout.read().decode('utf8')\n\tinfo = {}\n\tlines = list(map(lambda inf: inf.split(': '), lines.split('\\n')))\n\n\t# process retun\n\tfor line in lines:\n\t\tkey = line[0].lower()\n\t\tif not not key and len(key) > 0:\n\t\t\tvalue = line[1]\n\t\t\tif key == 'name':\n\t\t\t\tinfo[key] = value.lower()\n\t\t\telif key == 'requires':\n\t\t\t\tinfo[key] = list(map(lambda x: x.strip(), value.lower().split(','))) if value else []\n\t\t\telif key == 'required-by':\n\t\t\t\tinfo[key] = list(map(lambda x: x.strip(), value.lower().split(','))) if value else []\n\t\t\telse:\n\t\t\t\tinfo[key] = value\n\n\treturn info", "def get(self, id):\n pkg_key, component_key = id\n if pkg_key not in self.packages:\n raise Exception(\"Package not found while looking for id: %s \" % repr(id))\n p = self.packages[pkg_key]\n if component_key not in p.components:\n raise Exception(\"Component %s not found in package %s.\" % (component_key, pkg_key))\n return p.components[component_key]", "def split_package_id(id):\n return id.split(\";\", 4)", "def _extract_packages(self, index: int, packages: Packages) -> Packages:\n log = self[index]\n extracted_packages = Packages()\n for package in packages:\n package_expression = re.compile(f\"(({package.name})(=[a-z0-9_=.]+)?)\")\n spec = package_expression.search(log).group(0)\n extracted_packages.append_spec(spec)\n return extracted_packages", "def get_vul_info(vul_info):\n packages = []\n if vul_info.get('fixes') is None:\n return packages\n for fixes in vul_info['fixes']:\n packages.extend(get_package_os(fixes))\n return packages", "def spdx_package(self) -> Optional[pulumi.Input['PackageInfoNoteArgs']]:\n return pulumi.get(self, \"spdx_package\")", "def get_canonical_name(package_name, index_urls=None, *args):\n # type: (str, Optional[Iterable[str]], str) -> str\n if not index_urls: # pragma: no cover\n index_urls = {pip.models.PyPI.simple_url}\n for index_url in index_urls:\n response = requests.get(index_url, stream=True)\n parser = PyPiHtmlParser(search=package_name)\n for line in response.iter_lines():\n parser.feed(six.text_type(line, response.encoding, 'ignore'))\n if parser.state == PyPiHtmlParserState.found_package_name:\n parser.close()\n return parser.collected_packages[-1]\n parser.close()\n raise pip.exceptions.DistributionNotFound(\n 'No matching distribution found for {}'.format(package_name))", "def get_package_info(manifest_abs_path, package_name):\n assert os.path.isabs(manifest_abs_path)\n\n command = [\n \"cargo\",\n \"metadata\",\n \"--format-version=1\",\n \"--no-deps\",\n f\"--manifest-path={manifest_abs_path}\",\n ]\n\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n metadata_str = proc.stdout.readline()\n\n metadata = json.loads(metadata_str)\n packages = metadata[\"packages\"]\n assert len(packages) == 1, \"Unexpected Cargo metadata format\"\n package = packages[0]\n assert package[\"name\"] == package_name, (\n f'crate name in Cargo.toml ({package[\"name\"]}) != specified'\n f\"package name ({package_name})\"\n )\n github_repo = urlparse(package[\"repository\"].rstrip(\"/\"))\n assert github_repo.netloc == \"github.com\", \"specified repo is not on GitHub\"\n return (package[\"version\"], github_repo)", "def find_packages(self, name):\n return tuple(self._name_to_packages.get(name, self._default_factory()))", "def find_pkg(self, pkg):\n pass", "def get_id(self, app_name):\n _id = []\n apps = [app for app in self.applications.response if app.name == app_name]\n if len(apps) > 0:\n return apps[0].id", "def BuildIdsPaths(package_paths):\n\n build_ids_paths = map(\n lambda package_path: os.path.join(\n os.path.dirname(package_path), 'ids.txt'),\n package_paths)\n return build_ids_paths", "def convert_pkg_info(pkg_url_list):\n package_urls = {}\n for pkg_url in pkg_url_list:\n package_urls.update(\n {\n pkg_url.get(\"packagetype\"): {\n \"md5\": pkg_url.get(\"digests\").get(\"md5\"),\n \"sha256\": pkg_url.get(\"digests\").get(\"sha256\"),\n \"filename\": pkg_url.get(\"filename\"),\n \"size\": pkg_url.get(\"size\"),\n \"upload_time\": pkg_url.get(\"upload_time\"),\n \"url\": pkg_url.get(\"url\"),\n }\n }\n )\n return package_urls", "def get_bare_metal_package_id(self):\r\n packages = self.client['Product_Package'].getAllObjects(\r\n mask='mask[id, name]',\r\n filter={'name': query_filter('Bare Metal Instance')})\r\n\r\n hw_id = 0\r\n for package in packages:\r\n if 'Bare Metal Instance' == package['name']:\r\n hw_id = package['id']\r\n break\r\n\r\n return hw_id", "def get_project_id(group, name):\n response = urllib.request.urlopen(\"https://abf.io/api/v1/search.json?type=projects&query=\" + name)\n decode = response.read().decode(\"UTF-8\")\n projects = json.loads(decode)['results']['projects']\n projects = [x for x in projects if \"fullname\" in x and x['fullname'] == group + \"/\" + name]\n if len(projects) > 0:\n project = projects[0]\n return project['id'], project['git_url']\n else:\n return None", "def get_package_by_id(cls, packageId, raiseFlag=True):\n package = Package.query.filter_by(id=packageId).first()\n if (package is None) and raiseFlag:\n raise DbException(404, \"Package with id: %r not found\"%(packageId))\n return package", "def extract_packages_and_versions_including_duplicates(self, output):\n self.composite_logger.log_debug(\"\\nExtracting package and version data...\")\n packages = []\n versions = []\n package_extensions = ['.x86_64', '.noarch', '.i686']\n\n def is_package(chunk):\n # Using a list comprehension to determine if chunk is a package\n return len([p for p in package_extensions if p in chunk]) == 1\n\n lines = output.strip().split('\\n')\n\n for line_index in range(0, len(lines)):\n line = re.split(r'\\s+', lines[line_index].strip())\n next_line = []\n\n if line_index < len(lines) - 1:\n next_line = re.split(r'\\s+', lines[line_index + 1].strip())\n\n # If we run into a length of 3, we'll accept it and continue\n if len(line) == 3 and is_package(line[0]):\n packages.append(self.get_product_name(line[0]))\n versions.append(line[1])\n # We will handle these two edge cases where the output is on\n # two different lines and treat them as one line\n elif len(line) == 1 and len(next_line) == 2 and is_package(line[0]):\n packages.append(self.get_product_name(line[0]))\n versions.append(next_line[0])\n line_index += 1\n elif len(line) == 2 and len(next_line) == 1 and is_package(line[0]):\n packages.append(self.get_product_name(line[0]))\n versions.append(line[1])\n line_index += 1\n else:\n self.composite_logger.log_debug(\" - Inapplicable line (\" + str(line_index) + \"): \" + lines[line_index])\n\n return packages, versions", "def get_apid_by_apname(self, apname):\r\n \r\n for ap in self.aps:\r\n if ap.name == apname:\r\n return ap.get_id()\r\n return None", "def collect_pypi_data():\n\n rclient = xmlrpc.client.ServerProxy('http://pypi.python.org/pypi')\n python = {'Programming Language :: Python': rclient.browse(['Programming Language :: Python'])}\n python_two = {}\n python_three = {}\n\n for classifier in [\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.3',\n 'Programming Language :: Python :: 2.4',\n 'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2 :: Only']:\n python_two[classifier] = rclient.browse([classifier])\n\n for classifier in [\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.0',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4']:\n python_three[classifier] = rclient.browse([classifier])\n\n return {datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):\n {'python': python,\n 'python_two': python_two,\n 'python_three': python_three}}", "def get_all_package_ids(self):\n return self._package_cache.keys()", "def _get_package_items(self):\r\n mask = \"mask[description,capacity,prices.id,categories[name,id]]\"\r\n package = self.client['Product_Package']\r\n return package.getItems(id=46, mask=mask)", "def _parse_package_data(self, package_id):\r\n package = self.client['Product_Package']\r\n\r\n results = {\r\n 'categories': {},\r\n 'locations': []\r\n }\r\n\r\n # First pull the list of available locations. We do it with the\r\n # getObject() call so that we get access to the delivery time info.\r\n object_data = package.getRegions(id=package_id)\r\n\r\n for loc in object_data:\r\n details = loc['location']['locationPackageDetails'][0]\r\n\r\n results['locations'].append({\r\n 'delivery_information': details.get('deliveryTimeInformation'),\r\n 'keyname': loc['keyname'],\r\n 'long_name': loc['description'],\r\n })\r\n\r\n mask = 'mask[itemCategory[group]]'\r\n\r\n for config in package.getConfiguration(id=package_id, mask=mask):\r\n code = config['itemCategory']['categoryCode']\r\n group = NestedDict(config['itemCategory']) or {}\r\n category = {\r\n 'sort': config['sort'],\r\n 'step': config['orderStepId'],\r\n 'is_required': config['isRequired'],\r\n 'name': config['itemCategory']['name'],\r\n 'group': group['group']['name'],\r\n 'items': [],\r\n }\r\n\r\n results['categories'][code] = category\r\n\r\n # Now pull in the available package item\r\n for category in package.getCategories(id=package_id):\r\n code = category['categoryCode']\r\n items = []\r\n\r\n for group in category['groups']:\r\n for price in group['prices']:\r\n items.append({\r\n 'id': price['itemId'],\r\n 'description': price['item']['description'],\r\n 'sort': price['sort'],\r\n 'price_id': price['id'],\r\n 'recurring_fee': price.get('recurringFee'),\r\n 'setup_fee': price.get('setupFee'),\r\n 'hourly_recurring_fee':\r\n price.get('hourlyRecurringFee'),\r\n 'one_time_fee': price.get('oneTimeFee'),\r\n 'labor_fee': price.get('laborFee'),\r\n 'capacity': float(price['item'].get('capacity', 0)),\r\n })\r\n results['categories'][code]['items'] = items\r\n\r\n return results", "def get_id(self, item):\n return self.exist_products.get(hash(item['title']))", "def get_target_from_pkg_deps(self, matches):\n highest = None\n pkg_name = None\n\n pkgs=[]\n\n for match in matches:\n pkg_name = match['pkg']\n if match['slot'] and match['slot'] != '0':\n pkg_name += '-' + match['slot']\n try:\n pkg = self.manager.get_package(pkg_name)\n pkgs.append(pkg)\n except:\n pass\n\n deep_pkgs = self.get_needed_packages(*pkgs)\n\n for pkg in deep_pkgs:\n try:\n target = pkg.target()\n if not highest:\n highest = target\n pkg_name = pkg.name()\n if self.version_cmp(highest, target) < 0:\n highest = target\n pkg_name = pkg.name()\n except:\n pass\n\n return pkg_name, highest", "def get_package_data(name, package=None):\n if not package:\n package = models.Package(name=name)\n releases = {}\n else:\n releases = package.get_all_releases()\n\n client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi', transport=Urllib2Transport())\n\n versions = client.package_releases(package.name, True)\n\n # package_releases() method is case-sensitive, if nothing found\n # then we search for it\n # XXX: Ask pypi to make it case-insensitive?\n if not versions:\n for item in client.search({'name': name}):\n if name.lower() == item['name'].lower():\n package.name = name = item['name']\n break\n else:\n logger.info(\"No packages found matching %r\", name)\n return\n\n # Retry retrieving the versions with the new/correct name\n versions = client.package_releases(package.name, True)\n\n # Save the package if it is new\n if not package.pk:\n package.save()\n\n for version in versions:\n release, files = releases.get(version, (None, {}))\n if not release:\n release = models.Release(package=package, version=version)\n release.save()\n\n data = client.release_data(package.name, release.version)\n\n release_form = forms.PypiReleaseDataForm(data, instance=release)\n if release_form.is_valid():\n release_form.save()\n\n release_files = client.package_urls(package.name, release.version)\n for info in release_files:\n release_file = files.get(info['filename'])\n if not release_file:\n release_file = models.ReleaseFile(\n release=release, filename=info['filename'])\n\n release_file.python_version = info['python_version']\n release_file.filetype = info['packagetype']\n release_file.url = info['url']\n release_file.size = info['size']\n release_file.md5_digest = info['md5_digest']\n release_file.save()\n\n package.update_timestamp = now()\n package.save()", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def pipi(pkgs:str)->int:\n return pipil(pkgs.split())", "def get_package_list(*args):\n logger.debug(\"Loading package list\")\n package_list_filepath = Base.get_package_list_filepath()\n\n # check package list file is valid\n logger.debug(\"Checking filepath\")\n package_list_file_valid = Base.check_package_list_file(package_list_filepath)\n if package_list_file_valid == 1:\n logger.error(\"Package file missing, please run `gitget setup`\")\n exit(1)\n elif package_list_file_valid == 2:\n logger.error(\n \"Package file is a directory, please remove `.gitget.yaml` and run `gitget setup`\"\n )\n exit(1)\n elif package_list_file_valid == 0:\n logger.debug(\"Package file found\")\n\n # try loading the file\n logger.debug(\"Attempting to load file\")\n try:\n with open(package_list_filepath) as file:\n package_list = yaml.safe_load(file)\n except Exception as ex:\n logger.error(\"Could not load package list due to the following error:\")\n logger.error(ex)\n exit(1)\n logger.debug(\"Package list loaded\")\n\n # if the list is NONE, set to an empty dictionary to prevent iteration errors\n logger.debug(\"Checking if package list is None\")\n if package_list is None:\n package_list = {}\n logger.debug(\"Package list has no content, set to empty dict\")\n return package_list", "def get_package_info(package_name):\n \n info_command_template = \"yum info installed --cacheonly {}\"\n info_command = info_command_template.format(package_name)\n\n # subprocess wants a list of arguments, not a single string\n try:\n package_info = subprocess.check_output(info_command.split(),\n stderr=subprocess.STDOUT)\n package_info = str(package_info)\n \n except subprocess.CalledProcessError as e:\n print(e)\n package_info = \"\"\n\n return package_info.split(\"\\n\")", "def preprocess_raw_pkg_ids(self, raw_pkg_ids):\n return raw_pkg_ids", "def create_aiida_package_entry(self, aiida_version):\n # directly return if aiida-core is defined to be installed from source\n if utils.assert_package_is_source(aiida_version):\n return aiida_version\n # otherwise: check if version is OK an build the package definition\n # understood by PiP\n else:\n # unpack will also unpack defs of type 1.4.4[extras]\n version, extras = utils.unpack_raw_package_input(aiida_version)\n if utils.assert_valid_aiida_version(version):\n return \"aiida-core=={}{}\".format(version, extras)\n else:\n raise Exception(\"Defined AiiDA version '{}' is malformed!\"\n .format(aiida_version))", "def get_packages(name_only=False):\n\n packages = list()\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n # This just returns the name\n if name_only:\n\n sql = ('SELECT name '\n 'FROM packages ')\n\n for pack in cur.execute(sql):\n packages.append(pack[0])\n\n # This returns a list of items\n else:\n\n sql = ('SELECT name, version, '\n 'about, author '\n 'FROM packages '\n 'ORDER BY name')\n\n cur.execute(sql)\n\n while True:\n\n item = dtf.core.item.Item()\n line = cur.fetchone()\n if line is None:\n break\n\n item.type = dtf.core.item.TYPE_PACKAGE\n item.name = line[0]\n item.version = line[1]\n item.about = line[2]\n item.author = line[3]\n\n packages.append(item)\n\n return packages", "def parsePackages(self, packages_list) -> None:\n\t\tif self.package_manager == \"apt\":\n\t\t\tfor package in packages_list:\n\t\t\t\tpackage = package.strip().split(\" \")\n\t\t\t\tname = package[0].split(\"/\")[0]\n\t\t\t\tversion = package[1]\n\t\t\t\tarchitecture = package[2]\n\t\t\t\tself.installed_packages.add(Package(name=name, version=version, architecture=architecture))\n\t\telse:\n\t\t\tlogger.error(\"Package manager parser not supported.\")\n\t\t\traise ValueError(\"Package manager unsupported\")\n\t\tlogger.info(\"Packages parsed successfully\")", "def package(self) -> Optional[pulumi.Input['PackageArgs']]:\n return pulumi.get(self, \"package\")", "def get_version_information(input_list, ecosystem):\n str_query = \"data=[]; \"\n for package in input_list:\n str_query += \"pkg = g.V().has('ecosystem', '{eco}').has('name', '{pkg}'); \" \\\n \"lnv = []; pkg.clone().values('latest_non_cve_version', \" \\\n \"'latest_version').fill(lnv); pkg.clone().as('package').V().\" \\\n \"has('pecosystem', '{eco}').has('pname', '{pkg}').\" \\\n \"has('version', within(lnv)).as('version').\" \\\n \"select('package', 'version').by(valueMap()).fill(data);\".format(\n eco=ecosystem,\n pkg=package)\n str_query += \"data\"\n payload = {\n 'gremlin': str_query\n }\n\n # Query Gremlin with packages list to get their version information\n gremlin_response = execute_gremlin_dsl(url=GREMLIN_SERVER_URL_REST, payload=payload)\n if gremlin_response is None:\n return []\n response = get_response_data(gremlin_response, [{0: 0}])\n return response", "def koji_pkgid2pkgs(kapi, pkgid):\n ret = []\n for binfo in kapi.listBuilds(packageID=pkgid):\n pkg = _koji_buildinfo2pkg(kapi, binfo)\n ret.append(pkg)\n return ret", "def find_package(self, name, version):\n candidates = self._name_to_packages.get(name, self._default_factory())\n for candidate in candidates:\n if candidate.version == version:\n return candidate\n package_string = '{0}-{1}'.format(name, str(version))\n raise NoPackageFound(\n Requirement.from_package_string(package_string),\n \"Package '{0}' not found\".format(package_string),\n )", "def get_import_id(import_name, model, workspace, header, user):\n uri = (\"https://api.anaplan.com/1/3/workspaces/{}/models/{}/\"\n \"imports/\").format(workspace, model)\n response = requests.get(uri, headers = header)\n response_json = json.loads(response.text.encode(\"utf-8\"))\n for imp in response_json:\n if imp[u\"name\"] == unicode(import_name):\n return imp[u\"id\"]", "def package_for_view(view):\n if view.file_name() is not None:\n spp = sublime.packages_path()\n if view.file_name().startswith(spp):\n file_name = view.file_name()[len(spp)+1:]\n for pkg_name, pkg_info in help_index_list().items():\n if file_name.startswith(pkg_info.doc_root):\n return pkg_info\n\n return None", "def show_packagelist(user, repo, packages, distro=False, version=False,\n name=False, match=False, pkgtype=False):\n\n print('Currently {}/{} contains these matching packages:'.format(\n user, repo))\n\n numpkgs = 0\n for package in packages:\n if (distro and not package['distro_version'] == distro) or \\\n (version and not package['version'] == version) or \\\n (name and not package['name'] == name) or \\\n (pkgtype and not package['type'] == pkgtype) or \\\n (match and match not in package['filename']):\n continue\n\n print(fmt_pkg(user, repo, package))\n numpkgs += 1\n\n print(\"Repo contains {} matching packages.\".format(numpkgs))", "def __getitem__(self, package):\n\n\t\treturn self._packages.setdefault(\n\t\t\tpackage,\n\t\t\tPackage(package)\n\t\t)", "def get_packages(self, package_ids):\n return [self.get_package(package_id) for package_id in package_ids]", "def search_package(package, satellite_connection, satellite_connection_auth):\n package_sat_result = satellite_connection.packages.search.name(satellite_connection_auth, package)\n return package_sat_result", "def bandwidth_package_id(self) -> str:\n return pulumi.get(self, \"bandwidth_package_id\")", "def package(self, pkg_name):\n return self._pkgs[pkg_name]", "def _get_info(self, fullmodname):\n parts = fullmodname.split('.')\n submodname = parts[-1]\n modpath = '/'.join(parts)\n for suffix, is_package in _SEARCH_ORDER:\n relpath = modpath + suffix\n try:\n self.datablocks[relpath]\n except KeyError:\n pass\n else:\n return submodname, is_package, relpath\n msg = ('Can\\'t find module %s in .blend %r' %\n (fullmodname, self.path_entry))\n ##logging.debug(msg)\n raise BlendImportError(msg)", "def get_version_id(self, version_labels):\n return self.version_labels_to_id.get(frozenset(version_labels.items()), None)", "def get_pids_formatted(self, site: str) -> List[str]:\n try:\n self._cursor.execute(f\"SELECT productIdStr FROM {site}\")\n except sqlite3.OperationalError:\n raise sqlite3.OperationalError(f\"Table '{site}' does not exist. You can create it by the `create_table_safe` method.\")\n rows: List[Tuple[int]] = self._cursor.fetchall()\n return [row[0] for row in rows]", "def lookup_product(product_id,all_products):\n matching_products = [p for p in all_products if str(p[\"id\"]) == str(product_id)]\n if any(matching_products):\n return matching_products[0]\n else:\n return None", "def _get_suggested_id(self, info: dict) -> str:\n return info[CONF_NAME]", "def lifecycle_get_installed_package(self, timeout):\n if self.version in BasicEnv.binary_versions_v2:\n res, installed = self.lifecycle_query_installed(\"3s\")\n res_return = 0\n if res == 0:\n for item in installed['installed_chaincodes']:\n # packages_id.append(item['package_id'])\n res_get = os.system(\"./../bin/{}/bin/peer lifecycle chaincode getinstalledpackage --package-id {} \"\n \"--output-directory ./ --connTimeout {}\"\n .format(self.version, item['package_id'], timeout))\n res_get = res_get >> 8\n res_return = res_return or res_get\n\n else:\n print(\"package_id get failed.\")\n return 1, {}\n\n # res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode getinstalledpackage --package-id {} \"\n # \"--output-directory ./ --connTimeout {}\".format(self.version, packages_id[0], timeout))\n # res = res >> 8\n return res_return", "def find_issue_id(self):", "def lookup_module(id):\n return _registry[id]", "def determinePackage(inFile):\n fileDir= os.path.dirname(inFile)\n files= os.listdir(fileDir)\n \n pkgName= None\n if \"__init__.py\" in files:\n pkgName= os.path.basename(fileDir)\n\n return pkgName", "def getIDs():" ]
[ "0.6142764", "0.59174037", "0.5913275", "0.58575547", "0.5794914", "0.57480824", "0.5745953", "0.56321526", "0.5583289", "0.55084556", "0.5450892", "0.5416782", "0.5412792", "0.5388353", "0.5381202", "0.5359032", "0.5345228", "0.5315372", "0.5281844", "0.5267592", "0.5251155", "0.5249847", "0.5241232", "0.52150255", "0.5211557", "0.52111346", "0.52034724", "0.5196443", "0.5172637", "0.5144054", "0.5134468", "0.512956", "0.51111174", "0.5106761", "0.5105042", "0.509095", "0.5089832", "0.50775754", "0.50516075", "0.5049809", "0.5044831", "0.50400335", "0.50356305", "0.5035068", "0.49930328", "0.49911237", "0.49871588", "0.49791205", "0.49690035", "0.49681252", "0.4965392", "0.49633175", "0.4958264", "0.49569476", "0.49467745", "0.49458325", "0.49337316", "0.49229732", "0.49213165", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.49208373", "0.4911107", "0.49065137", "0.48949334", "0.4883694", "0.48762003", "0.4874686", "0.48733833", "0.48673335", "0.48668894", "0.48643818", "0.48558086", "0.4845318", "0.48448467", "0.4834903", "0.48334652", "0.48302895", "0.4829408", "0.4827719", "0.48276442", "0.48178944", "0.48124552", "0.48103422", "0.48085693", "0.48010406", "0.47944504", "0.47929597", "0.4787158", "0.47822565", "0.4775331" ]
0.6708183
0
Return current list of packages in the branch.
def packages(self): if self._packages: return self._packages self._load() return self._packages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def packages(self):\n return []", "def packages(self):\r\n return self._packages", "def get_packages_in_current_dir() -> list:\n from os import listdir\n\n pkgs = []\n ext = ('.tgz', '.txz')\n for file_in_current_dir in sorted(listdir()):\n if file_in_current_dir.endswith(ext):\n pkgs.append(file_in_current_dir)\n\n return pkgs", "def list_packages():\n\n shelf_dir = settings.shelf_dir\n\n package_list = os.listdir(shelf_dir)\n\n package_list.sort()\n\n return package_list", "def __gitBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False)", "def get_repo_packages() -> List['Package']:\n return Package.get_packages_from_expac(\"-S\", [], PossibleTypes.REPO_PACKAGE)", "def get_packages(self):\n raise NotImplementedError(\"get_packages is not implemented\")", "def get_packages():\n packages = []\n for repo in repositories:\n packages.extend(repo.get_packages())\n return packages", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def __gitSubmodulesList(self):\n self.vcs.gitSubmoduleList(self.project.getProjectPath())", "def get_all_packages(self):\n return self._package_cache.values()", "def get_packages(self):\n packages = []\n for obj in self.objects_used:\n packages.extend(obj.get_packages())\n # Remove duplicates (not necessary but it's cleaner)\n packages = list(set(packages))\n return packages", "def packages():", "def __gitMergedBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False,\n listAll=False, merged=True)", "def list_packages(self):\n for tag, pkg in PACKAGES.iteritems():\n print \"{tag} - {label}\".format(tag=tag, label=pkg['label'])", "def getusersitepackages():\n\tpass", "def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)", "def main():\n\n local_pkgs = set(os.listdir(GIT_FOLDER))\n local_pkgs = set([it.replace('.git', '') for it in local_pkgs])\n\n pkgdb_info = pkgdb_pkg_branch()\n\n pkgdb_pkgs = set(pkgdb_info.keys())\n\n ## Commented out as we keep the git of retired packages while they won't\n ## show up in the information retrieved from pkgdb.\n\n #if (local_pkgs - pkgdb_pkgs):\n #print 'Some packages are present locally but not on pkgdb:'\n #print ', '.join(sorted(local_pkgs - pkgdb_pkgs))\n\n if (pkgdb_pkgs - local_pkgs):\n print 'Some packages are present in pkgdb but not locally:'\n print ', '.join(sorted(pkgdb_pkgs - local_pkgs))\n\n tofix = set()\n for pkg in sorted(pkgdb_info):\n pkgdb_branches = pkgdb_info[pkg]\n git_branches = get_git_branch(pkg)\n diff = (pkgdb_branches - git_branches)\n if diff:\n print '%s missing: %s' % (pkg, ','.join(sorted(diff)))\n tofix.add(pkg)\n branch_package(pkg, diff)\n\n if tofix:\n print 'Packages fixed (%s): %s' % (\n len(tofix), ', '.join(sorted(tofix)))", "def get_module_list_from_pkglist(self):\n module_list = []\n for package in self.package_list:\n mod_list = self.get_module_list_from_pkg_rcrsv(package, [])\n module_list.extend(mod_list)\n return module_list", "def list_packages(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/apiservices/%s/packages\" % (self.project_key, self.service_id))", "def get_ref_pkg_list(self):\n dstor_root_url = (\n self.cluster_conf.get('distribution-storage', {}).get(\n 'rooturl', ''\n )\n )\n dstor_pkglist_path = (\n self.cluster_conf.get('distribution-storage', {}).get(\n 'pkglistpath', ''\n )\n )\n # Unblock irrelevant local operations\n if self.cluster_conf_nop or dstor_pkglist_path == 'NOP':\n LOG.info(f'{self.msg_src}: ref_pkg_list: NOP')\n return []\n\n rpl_url = posixpath.join(dstor_root_url, dstor_pkglist_path)\n rpl_fname = Path(dstor_pkglist_path).name\n\n try:\n cm_utl.download(rpl_url, str(self.inst_storage.tmp_dpath))\n LOG.debug(f'{self.msg_src}: Reference package list: Download:'\n f' {rpl_fname}: {rpl_url}')\n except Exception as e:\n raise cr_exc.RCDownloadError(\n f'Reference package list: Download: {rpl_fname}: {rpl_url}:'\n f' {type(e).__name__}: {e}'\n ) from e\n\n rpl_fpath = self.inst_storage.tmp_dpath.joinpath(rpl_fname)\n try:\n return cr_utl.rc_load_json(\n rpl_fpath, emheading=f'Reference package list: {rpl_fname}'\n )\n except cr_exc.RCError as e:\n raise e\n finally:\n rpl_fpath.unlink()", "def list_package(all: bool = False) -> List[List[str]]:\n if not all:\n pkgs_info = read_installation_records()\n else:\n pkgs_info = []\n for pkg in pkg_resources.working_set:\n pkgs_info.append([pkg.project_name, pkg.version])\n\n return pkgs_info", "def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages", "def get_packages(module, repo_mgr, list_type, package):\n rc_code, out, err = module.run_command(\"/usr/bin/%s -q list %s %s\"\n % (repo_mgr, list_type, package), environ_update=ENV_LOCALE)\n if rc_code is 0:\n return out.splitlines()\n else:\n if rc_code == 1 and str(err) == 'Error: No matching Packages to list\\n':\n return out.splitlines()\n else:\n module.fail_json(msg=\"Unable to collect \" + repo_mgr + \" list \" + list_type + \" : \" + str(err) + \" - \" + str(out))", "def getData(self):\n\t\treturn self.golang_project_packages", "def get_packages(root):\n root = os.path.realpath(root)\n proot = parent(root) + \"/\"\n py_files = [file.rsplit(proot)[1] for file in listfiles(root)]\n packages = list(np.unique([parent(file).replace(\"/\", \".\") for file in py_files]))\n # return list(np.unique([parent(file).replace(\"/\", \".\").split(\".{name_root}.\".format(name_root=name(root)))[1]\n # for file in py_files]))\n return packages", "def get_all_packages(cls):\n packages = Package.query.all()\n return packages", "def get_packages(package):\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]", "def get_git_branch(pkg):\n git_folder = os.path.join(GIT_FOLDER, '%s.git' % pkg)\n if not os.path.exists(git_folder):\n print 'Could not find %s' % git_folder\n return set()\n\n head_folder = os.path.join(git_folder, 'refs', 'heads')\n return set(os.listdir(head_folder))", "def get_packages():\n\n packages = find_packages()\n packages = ['{}.{}'.format('uniq', package) for package in packages]\n packages.append('uniq')\n return packages", "def GetPackages(self, package_target):\n return self._packages.get(package_target, None)", "def pkgdb_pkg_branch():\n url = '%s/api/vcs' % PKGDB_URL\n req = requests.get(url, params={'format': 'json'})\n data = req.json()\n\n output = {}\n for pkg in data['packageAcls']:\n if pkg in output:\n if VERBOSE:\n print 'Strange package: %s, it is present twice in the ' \\\n 'pkgdb output' % pkg\n output[pkg].updated(data['packageAcls'][pkg].keys())\n else:\n output[pkg] = set(data['packageAcls'][pkg].keys())\n\n return output", "def get_all_packages(self):\n with self._conn.begin():\n return {\n rec.package\n for rec in self._conn.execute(self._packages.select())\n }", "def getsitepackages():\n\tpass", "def pypackage(self):\n nb = read_notebook(self._nb_path)\n add_pkgs = None\n if \"nbproject\" in nb.metadata and \"pypackage\" in nb.metadata[\"nbproject\"]:\n if nb.metadata[\"nbproject\"][\"pypackage\"] is not None:\n add_pkgs = nb.metadata[\"nbproject\"][\"pypackage\"].keys()\n return infer_pypackages(nb, add_pkgs, pin_versions=True)", "def getAllInstalledPackages(installedPkgPath):\n allPkgVers = []\n if os.path.exists(installedPkgPath):\n for pkg in os.listdir(installedPkgPath):\n pkgVersions = os.listdir(os.path.join(installedPkgPath, pkg))\n for pkgVersion in pkgVersions:\n pkgPath = os.path.join(installedPkgPath, pkg)\n if not fnmatch.fnmatch(pkgVersion, '*.inprogress'):\n allPkgVers.append(os.path.join(pkgPath, pkgVersion))\n return allPkgVers", "def list_packages(self):\n\n # First extract loaded module names from sys.modules\n sys_modules = sys.modules.keys()\n\n packages = {}\n\n # First add moduels in sys.modules (built-ins,\n # preloads and already loaded ones)\n for name in sys_modules:\n d = self.find_package(name)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] }\n\n #import site\n # Loop through all directories in sys.path and check for modules\n # Dont iterate through <prefix>/lib directory\n libdir = os.path.join(sys.prefix, 'lib')\n\n walked = []\n for top_level in self.paths:\n if not os.path.isdir(top_level):\n continue\n\n # Dont iterate through libdir\n if os.path.abspath(top_level) == os.path.abspath(libdir):\n continue\n\n walked.append(top_level)\n for item in os.listdir(top_level):\n\n fullpath = os.path.join(top_level, item)\n if fullpath in walked: continue\n\n walked.append(fullpath)\n # Remove the extension\n idx = item.find('.')\n if idx != -1: item = item[:idx]\n d = self.find_package(item)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] } \n\n for key,item in packages.items():\n print\n print self.pkgTypeInfo(key)\n print\n\n # Print sorted\n listofitems = item.keys()\n listofitems.sort()\n\n for key2 in listofitems:\n print key2,':',item[key2]", "def __gitNotMergedBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False,\n listAll=False, merged=False)", "def base_branches() -> list[str]:\n branches = []\n\n default = sh(\"git rev-parse --abbrev-ref origin/HEAD\").removeprefix(\"origin/\")\n branches.append(default)\n\n releases = sh(\n \"git branch --all --sort=-committerdate --list *release/* | head -10\"\n ).splitlines()\n releases = [b.removeprefix(\"*\").strip() for b in releases]\n branches.extend(releases)\n\n return branches", "def list_package_ids(self):\n raise NotImplementedError", "def list_cmd(repo):\n click.echo('Packages and scripts installed through pipsi:')\n for venv, scripts in repo.list_everything():\n if not scripts:\n continue\n click.echo(' Package \"%s\":' % venv)\n for script in scripts:\n click.echo(' ' + script)", "def get_installed_packages():\n global INSTALLED_PACKAGES\n chk = Popen(\"{} -m pip freeze\".format(sys.executable),\n shell=True, stdout=PIPE)\n installed = chk.communicate()[0].decode().splitlines()\n for pkg in installed:\n item = pkg.split(\"==\")\n INSTALLED_PACKAGES[item[0]] = item[1]", "def _listBranches(self):\n assert self.wc.exists('branches')\n branches = self.wc.ls('branches')\n\n # Some early release branches used a different naming scheme\n # that doesn't sort properly with new-style release names. We\n # filter those out here, along with empty lines.\n branches = [b.strip('/') for b in branches\n if MELANGE_RELEASE_RE.match(b.strip('/'))]\n\n return sorted(branches)", "def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data", "def getPackages(self):\n cat = getToolByName(self.context, 'portal_catalog')\n ideeSejour = getattr(self.context, 'idee-sejour')\n url = '/'.join(ideeSejour.getPhysicalPath())\n contentFilter = {}\n path = {}\n path['query'] = url\n path['depth'] = 1\n contentFilter['path'] = path\n contentFilter['portal_type'] = ['Package']\n contentFilter['sort_on'] = 'effective'\n contentFilter['sort_order'] = 'reverse'\n results = cat.queryCatalog(contentFilter)\n results = list(results)\n return results", "def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())", "def list_packages(pretty=False) -> Dict:\n\n packages = dict()\n lp = Commands._list_packages()\n inst_packages = lp.stdout.split('\\n')[:-1]\n\n for package in inst_packages:\n name, version = package.split('==')[0], package.split('==')[1]\n packages[name] = version\n \n if pretty:\n import json\n return json.dumps(packages, sort_keys=True, indent=4)\n return packages", "def get_packages(name_only=False):\n\n packages = list()\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n # This just returns the name\n if name_only:\n\n sql = ('SELECT name '\n 'FROM packages ')\n\n for pack in cur.execute(sql):\n packages.append(pack[0])\n\n # This returns a list of items\n else:\n\n sql = ('SELECT name, version, '\n 'about, author '\n 'FROM packages '\n 'ORDER BY name')\n\n cur.execute(sql)\n\n while True:\n\n item = dtf.core.item.Item()\n line = cur.fetchone()\n if line is None:\n break\n\n item.type = dtf.core.item.TYPE_PACKAGE\n item.name = line[0]\n item.version = line[1]\n item.about = line[2]\n item.author = line[3]\n\n packages.append(item)\n\n return packages", "def get_repos():\n\n return __do_get_repos()", "def get_package_list(*args):\n logger.debug(\"Loading package list\")\n package_list_filepath = Base.get_package_list_filepath()\n\n # check package list file is valid\n logger.debug(\"Checking filepath\")\n package_list_file_valid = Base.check_package_list_file(package_list_filepath)\n if package_list_file_valid == 1:\n logger.error(\"Package file missing, please run `gitget setup`\")\n exit(1)\n elif package_list_file_valid == 2:\n logger.error(\n \"Package file is a directory, please remove `.gitget.yaml` and run `gitget setup`\"\n )\n exit(1)\n elif package_list_file_valid == 0:\n logger.debug(\"Package file found\")\n\n # try loading the file\n logger.debug(\"Attempting to load file\")\n try:\n with open(package_list_filepath) as file:\n package_list = yaml.safe_load(file)\n except Exception as ex:\n logger.error(\"Could not load package list due to the following error:\")\n logger.error(ex)\n exit(1)\n logger.debug(\"Package list loaded\")\n\n # if the list is NONE, set to an empty dictionary to prevent iteration errors\n logger.debug(\"Checking if package list is None\")\n if package_list is None:\n package_list = {}\n logger.debug(\"Package list has no content, set to empty dict\")\n return package_list", "def _get_package_data() -> list[list[str]]:\n moddata = []\n modlist: tuple[str, ...] = (\n \"click\",\n \"cryptography\",\n \"globus_cli\",\n \"globus_sdk\",\n \"jmespath\",\n \"requests\",\n )\n if verbosity() < 2:\n modlist = (\"globus_cli\", \"globus_sdk\", \"requests\")\n\n for mod in modlist:\n cur = [mod]\n try:\n loaded_mod = __import__(mod)\n except ImportError:\n loaded_mod = None\n\n for attr in (\"__version__\", \"__file__\", \"__path__\"):\n # if loading failed, be sure to pad with error messages\n if loaded_mod is None:\n cur.append(\"[import failed]\")\n continue\n\n try:\n attrval = getattr(loaded_mod, attr)\n except AttributeError:\n attrval = \"\"\n cur.append(attrval)\n moddata.append(cur)\n\n return moddata", "def list():\n\n click.secho('List of libraries in SJSU-Dev2\\n', fg='white', bold=True)\n package_registry = GetListOfSJSUDev2Repos()\n library_list = [f'{x : <20}: {package_registry[x]}'\n for x in package_registry if x.startswith('lib')]\n print('\\n'.join(library_list))", "def _cscript_package(self, tree, options):\n if len(inspect.getfullargspec(tree.cscript['package']).args) == 3:\n packages = tree.call('package', tree.version, options)\n else:\n log_normal(\"Deprecated cscript package() method with no options parameter\")\n packages = tree.call('package', tree.version)\n\n return packages if isinstance(packages, list) else [packages]", "def get_all_packages(user, repo, config):\n packages = []\n total = 1\n fetched = 0\n offset = 1\n\n while fetched < total:\n url = \"{}/repos/{}/{}/packages.json?page={}\".format(config['url_base'],\n user, repo, offset)\n try:\n resp = (api_call(url, 'get', config['debug']))\n packages = packages + resp.json()\n total = int(resp.headers['Total'])\n perpage = int(resp.headers['Per-Page'])\n fetched += perpage\n offset += 1\n\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n\n return packages", "def get_stack_versions(stack_root):\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n if not versions:\n versions = get_versions_from_stack_root(stack_root)\n return versions", "def getPackageList(self, softwareProfileName):\n return self._sp_db_api.getPackageList(softwareProfileName)", "def get_branch_names(self):\n return [\n branch.name for branch in self.repo.branches\n ]", "def get_changed_packages(blob_name1, blob_name2, package_list):\n changed_files = check_output(\n 'git', 'diff', '--name-only', blob_name1, blob_name2)\n changed_files = changed_files.split('\\n')\n\n result = set()\n for filename in changed_files:\n file_root = rootname(filename)\n if file_root in package_list:\n result.add(file_root)\n\n return sorted(result)", "def get_lb_pkgs(self):\r\n\r\n lb_filter = '*Load Balancer*'\r\n _filter = NestedDict({})\r\n _filter['items']['description'] = query_filter(lb_filter)\r\n\r\n kwargs = NestedDict({})\r\n kwargs['id'] = 0 # look at package id 0\r\n kwargs['filter'] = _filter.to_dict()\r\n packages = self.prod_pkg.getItems(**kwargs)\r\n pkgs = []\r\n for package in packages:\r\n if not package['description'].startswith('Global'):\r\n pkgs.append(package)\r\n return pkgs", "def get_all_package_versions(self):\n with self._conn.begin():\n return {\n (rec.package, rec.version)\n for rec in self._conn.execute(self._versions.select())\n }", "def _get_packages_from_repo(repository):\n base = dnf.Base()\n url = settings.REPOSITORIES[repository]['url']\n base_urls = _fetch_base_urls(url)\n for name, base_url in base_urls:\n repo = dnf.repo.Repo(name, settings.TMPDIR)\n repo.baseurl = [base_url]\n base.repos.add(repo)\n base.fill_sack()\n\n # Query all available packages in sack\n packages = base.sack.query().available().run()\n\n return packages", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def get_packages_with_prefixes():\n return get_resources('packages')", "def create_package_list(base):\n\n return [base] + [\"{}.{}\".format(base, pkg) for pkg in find_packages(base)]", "def branch_list(repo, remote_name, pattern=None):\n # The return string for a remote reference is a single line with two\n # fields separated by a tab string. The first field is a commit hash.\n # The second field is the reference path. The unique part of the path\n # is the last field.\n #\n # 423f434cd877926ff47f3a710a7b0c414785515e\trefs/heads/enterprise-3.0\n\n lines = repo.git.ls_remote(remote_name, pattern, heads=True).split(\"\\n\")\n return [str(line.split('/')[-1]) for line in lines]", "def required_packages(cls) -> List[Text]:\n return []", "def get_source_packages(self, project, expand=False):\n query = {'expand': 1} if expand else {}\n root = ET.parse(osc.core.http_GET(osc.core.makeurl(self.apiurl,['source', project],\n query=query))).getroot()\n packages = [i.get('name') for i in root.findall('entry')]\n\n return packages", "def get_installed_versions(cls) -> list[str]:\n\n pyenv_root = os.getenv(\"PYENV_ROOT\")\n if pyenv_root is None:\n raise Failure(\"PYENV_ROOT is not configured\")\n\n root_dir = Path(pyenv_root)\n version_dir = root_dir / \"versions\"\n\n return [i.name for i in version_dir.iterdir() if i.is_dir()]", "def list_branches(self) -> List[str]:\n self.__verify_repo_initialized()\n branches = heads.get_branch_names(self._env.branchenv)\n return branches", "def get_available_packages():\n all_providers_yaml = load_package_data()\n provider_package_names = [\n provider[\"package-name\"] for provider in all_providers_yaml if not provider.get(\"suspended\")\n ]\n return [\n \"apache-airflow\",\n \"docker-stack\",\n *provider_package_names,\n \"apache-airflow-providers\",\n \"helm-chart\",\n ]", "def getversions(package_name: str) -> list:\n\t# execute command\n\tproc = subprocess.Popen(['pip', 'install', package_name+'==CRASHME'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tproc.wait()\n\n\t# processed returned data\n\tlines = proc.stderr.read().decode('utf8')\n\tsearchterm = \"(from versions:\"\n\tstart = lines.find(searchterm) + len(searchterm)\n\tend = lines.find(\")\", start)\n\tlines = lines[start:end].split(',')\n\tlines = list(map(lambda x: x.strip(), lines))\n\n\treturn lines", "def get_installed_packages() -> List['Package']:\n repo_packages_names = set(expac(\"-S\", ['n'], []))\n\n # packages the user wants to install from aur\n aur_names = packages_from_other_sources()[0]\n repo_packages_names -= aur_names\n\n installed_packages_names = set(expac(\"-Q\", ['n'], []))\n installed_repo_packages_names = installed_packages_names & repo_packages_names\n unclassified_installed_names = installed_packages_names - installed_repo_packages_names\n\n return_list = []\n\n # installed repo packages\n if installed_repo_packages_names:\n return_list.extend(\n Package.get_packages_from_expac(\"-Q\", list(installed_repo_packages_names), PossibleTypes.REPO_PACKAGE)\n )\n\n # installed aur packages\n installed_aur_packages_names = set(\n [package.name for package in Package.get_packages_from_aur(list(unclassified_installed_names))]\n )\n\n # package names the user gave us must be in the aur\n for name in aur_names:\n if name not in installed_aur_packages_names:\n aurman_error(\"Package {} not found in AUR!\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n raise InvalidInput(\"Package {} not found in AUR!\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n\n if installed_aur_packages_names:\n return_list.extend(\n Package.get_packages_from_expac(\"-Q\", list(installed_aur_packages_names), PossibleTypes.AUR_PACKAGE)\n )\n\n unclassified_installed_names -= installed_aur_packages_names\n\n # installed not repo not aur packages\n if unclassified_installed_names:\n return_list.extend(\n Package.get_packages_from_expac(\n \"-Q\", list(unclassified_installed_names),\n PossibleTypes.PACKAGE_NOT_REPO_NOT_AUR\n )\n )\n\n return return_list", "def dump(self):\n\n\t\treturn [\n\t\t\tpkg.dump()\n\t\t\tfor pkg in self._packages.values()\n\t\t]", "def load_packages(locker: Locker, lock_data: _TOMLDocument) -> List[Package]:\n locker._lock_data = lock_data\n repository = locker.locked_repository(with_dev_reqs=True)\n activate_dependencies(repository.packages)\n return repository.packages # type: ignore[no-any-return] # noqa: F723", "def fetch_branches(self):\n for jrepo in self.json_repos['repos']:\n title = str(jrepo[\"title\"])\n self.branches[title] = str(jrepo['current'])", "def _get_submodules():\n import sh\n git = sh.git.bake(_tty_out=False)\n submodules = git.submodule().strip().split(\"\\n\")\n return [\n line.strip()[1:].split()[1]\n for line in submodules\n ]", "def get_installed_jdk_packages():\n # Convert to a set and back to a list again to uniqueify.\n return sorted(list(set(rpm_query_whatprovides('java-devel', 'java7-devel', 'jdk'))))", "def get_packages(package):\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))\n ]", "def branches(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"branches\", _args)\n return _ctx.execute_sync(list[str])", "def listpacks(all: bool=False) -> [str, str]:\n\t# execute command\n\tcommand = ['pip', 'freeze']\n\tif all:\n\t\tcommand.append('--all')\n\tproc = subprocess.Popen(command, stdout=subprocess.PIPE)\n\tproc.wait()\n\n\t# process returned data\n\tlines = proc.stdout.read().decode('utf8')\n\tlines = list(\n\t\tfilter(\n\t\t\tlambda inf: inf[0] and inf[0].split(' ')[0].lower() != '-e',\n\t\t\tmap(\n\t\t\t\tlambda inf: list(map(\n\t\t\t\t\tlambda x: x.lower().strip(),\n\t\t\t\t\tinf.split('==')\n\t\t\t\t\t)),\n\t\t\t\tlines.split('\\n')\n\t\t\t)\n\t\t)\n\t)\n\n\treturn lines", "def get_packages(package):\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]", "def get_packages(package):\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]", "def get_packages(package):\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]", "def get_vul_info(vul_info):\n packages = []\n if vul_info.get('fixes') is None:\n return packages\n for fixes in vul_info['fixes']:\n packages.extend(get_package_os(fixes))\n return packages", "def _get_nodes_from_package(*, package_name: str) -> List[Node]:\n nodl_files = _get_nodl_files_from_package_share(package_name=package_name)\n return _parse_multiple(paths=nodl_files)", "def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)", "def __gitBundleListHeads(self):\n self.vcs.gitBundleListHeads(self.project.getProjectPath())", "def list_packages ( self, name_only=False ):\n if name_only:\n for name, subdir in self._subdirs.items():\n if not subdir.empty():\n yield name\n else:\n for name, subdir in self._subdirs.items():\n if not subdir.empty():\n yield self.name + os.sep + name", "def git_status():\n\tl = []\n\tdebug(\"Not implemented\",1)\n\n\treturn l", "def _get_packages_for_state(self, state, observer):\n available = super(AdbRemote, self)._get_packages_for_state(state, observer)\n\n if not available:\n if (state == AdbRemote.adb_shell) or (state == AdbRemote.adb_shell_root):\n available = {TextualDevice.cmds: ['moler.cmd.unix'],\n TextualDevice.events: ['moler.events.shared']}\n if available:\n return available[observer]\n elif state == UnixRemote.unix_remote: # this is unix extended with adb commands\n if observer == TextualDevice.cmds:\n available.append('moler.cmd.adb')\n\n return available", "def get_global_active_list(self):\n return self.api.get_active_global_version_manager()", "def get_required_packages(self) -> list:\n\t\tret = []\n\t\tlocal_packages = ChocoInfo.get_local_packages(\n\t\t\tPUSHED_PACKAGES_PATH)\n\n\t\tprint(\"local_packages\", local_packages)\n\n\t\treturn [c_package for c_package in self._community_packages if c_package not in local_packages]", "def get_default_packages(rootfs, include_essential=False,\n include_priorities=None):\n if include_priorities is None:\n include_priorities = []\n\n package_list = set()\n\n list_dir = os.path.join(rootfs, 'var/lib/apt/lists')\n for filename in os.listdir(list_dir):\n if not filename.endswith('_Packages'):\n continue\n\n with open(os.path.join(list_dir, filename)) as infile:\n for pkg in rfc822_parse(infile):\n if 'Essential' in pkg and include_essential:\n package_list.add(pkg['Package'])\n continue\n if 'Priority' not in pkg:\n continue\n\n if pkg['Priority'] in include_priorities:\n package_list.add(pkg['Package'])\n\n return list(sorted(package_list))", "def releases():\n result = run('ls %(releases_dir)s' % env)\n releases_list = re.split('\\s+', result)\n releases_list.sort(reverse=True)\n return releases_list", "def import_packages_global():\n return \"\"", "def list_repos(self):\n return sorted(self.user_con.list_repos())", "def getsitepackages():\n # For now used only on Windows. Raise Exception for other platforms.\n if is_win:\n pths = [os.path.join(sys.prefix, 'Lib', 'site-packages')]\n # Include Real sys.prefix for virtualenv.\n if is_virtualenv:\n pths.append(os.path.join(base_prefix, 'Lib', 'site-packages'))\n return pths\n else:\n # TODO Implement for Python 2.6 on other platforms.\n raise NotImplementedError()", "def namespace_packages(self):\n dotted_names = []\n namespace_packages_file = self.find_egg_info_file('namespace_packages.txt')\n if namespace_packages_file:\n with open(namespace_packages_file) as handle:\n for line in handle:\n line = line.strip()\n if line:\n dotted_names.append(line)\n return dotted_names", "def getInstalledPackages(self) -> PackageContainer:\n\t\tself.getPackageManager()\n\t\tif self.package_manager == \"apt\":\n\t\t\tpackages = subprocess.check_output([\"apt\", \"list\", \"--installed\"], encoding='UTF-8', universal_newlines=True)\n\t\t\tpackages = packages.split(\"\\n\")[1:-1]\n\t\telse:\n\t\t\tlogger.error(\"Package manager not supported for extracting packages.\")\n\t\t\traise ValueError(\"Package manager unsupported\")\n\n\t\t# Parse packages to self.installed_packages\n\t\tself.parsePackages(packages)\n\n\t\tlogger.info(\"Installed packages collected\")\n\t\treturn self.installed_packages", "def expand_package(self, pkg):\n return [(pkg, c) for c in self.packages[pkg].components]" ]
[ "0.7107418", "0.6885296", "0.68503803", "0.68283534", "0.6706045", "0.6589522", "0.65444344", "0.65231556", "0.65145665", "0.63925606", "0.6339857", "0.6297373", "0.6296177", "0.62841654", "0.62576807", "0.621807", "0.61917984", "0.61838996", "0.6180382", "0.61489815", "0.61210734", "0.6113647", "0.6046085", "0.60439694", "0.60126543", "0.60060525", "0.599396", "0.5987566", "0.5969569", "0.59684944", "0.59558505", "0.59380686", "0.5915459", "0.5900845", "0.5891452", "0.58738565", "0.5869072", "0.5821477", "0.58107156", "0.58015156", "0.5786083", "0.5773251", "0.57678354", "0.5767107", "0.57543606", "0.5749469", "0.5747548", "0.57301325", "0.57190907", "0.57093155", "0.56881493", "0.5688093", "0.56771505", "0.5670665", "0.5668329", "0.5639258", "0.56347233", "0.56346947", "0.5620853", "0.5609537", "0.55876553", "0.5575413", "0.55694294", "0.556505", "0.5558246", "0.5553915", "0.554697", "0.5545423", "0.5537461", "0.5537037", "0.552859", "0.5524605", "0.5519755", "0.5512019", "0.54860824", "0.5483697", "0.5482674", "0.548111", "0.5478791", "0.54770035", "0.54709584", "0.54709584", "0.54709584", "0.5469187", "0.546734", "0.5463104", "0.54598653", "0.54596716", "0.54539496", "0.5450757", "0.54463863", "0.5444163", "0.5418661", "0.5414008", "0.54122466", "0.54091173", "0.5406137", "0.53911954", "0.53823453", "0.53769386" ]
0.6295743
13
test upsert user template as anonymous raises access control error
def test_upsert_user_template_as_anonymous_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.anonymous_user) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.user1_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_set_display_name_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_update_template_registration(self):\n pass", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_set_display_name_user_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_replace_user(self):\n pass", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_anonymous_user_update_anoymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_set_display_name_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_delete_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_anonymous_user_update_user_taskrun(self):\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)", "def test_set_display_name_global_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_set_display_name_global_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_not_logged_in(self):\n self.request.user = None\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_anonymous_user_update_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n blogpost = BlogpostFactory.create()\r\n\r\n assert_raises(Unauthorized, getattr(require, 'blogpost').update, blogpost)", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='another_user1@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='another_user1@mail.com', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_anonymous_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').update,\r\n token)", "def test_authenticated_user_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)", "def test_unshare_template_registration(self):\n pass", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_update_user(self):\n pass", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_api_user_put(self):\n pass", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_set_display_name_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_update_activity_template(self):\n pass", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_share_template_registration(self):\n pass", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_admin_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)", "def test_functionality(self):\n templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n #Deleting Standard User\n userList = self.getLocalUsers(userName=globalVars.standardUser)\n if len(userList) > 0:\n self.deleteLocalUser(globalVars.standardUser, verifyUser=True)\n \n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=templateName, deleteAndCreate=True, publishedTemplate=True)", "def test_post_creation_unauthorized(self):\n url = reverse('post-list')\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Test Title'\n body = 'Test Body'\n response = self.client.post(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_delete_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_update_template_subscription(self):\n pass", "def test_updateview_write_for_anonymous_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n valid_data = {'email': user.email, 'first_name': user.first_name,\n 'last_name': user.last_name, 'language': user.language}\n invalid_data = valid_data.copy()\n invalid_data['email'] = 'invalid_email_address'\n valid_data_response = self.client.post(updateview, valid_data)\n invalid_data_response = self.client.post(updateview, invalid_data)\n\n self.assertEqual(valid_data_response.status_code, 302)\n self.assertEqual(invalid_data_response.status_code, 302)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_update_useruser_uuid_put(self):\n pass", "def test_replace_user_identity_mapping(self):\n pass", "def test_put_unauthenticated(self):\n\n url = reverse('file')\n\n data = {}\n\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_wrong_id(self):\n self.request.matchdict = {'user_id': int(self.request.user.id)+4}\n self.request.json_body = {}\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def testAnonymousCannotEdit(self):\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") == -1,\n \"Anonymous user is able to edit tasks.\")", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_post_creation_regular_user(self):\n url = reverse('post-list')\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Test Title'\n body = 'Test Body'\n self.client.force_authenticate(user=self.user)\n response = self.client.post(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_modify_nonexist_username(self):\n print('(' + self.test_modify_nonexist_username.__name__+')',\n self.test_modify_nonexist_username.__doc__)\n self.assertIsNone(self.connection.modify_user(\n NON_EXIST_PATIENT_USERNAME, PATIENT['public_profile'],\n PATIENT['restricted_profile']))", "def test_patch_user(self):\n pass", "def test_put_unauthorized(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_unauthorized_create_user(self):\n res = self.submit()\n\n assert res.status_code == 401", "def test_patch_user_identity_mapping(self):\n pass", "def test_modify_userid_404(self):\n resp = self.app.put('/users/thisuserdoesntexist',\n data=json.dumps(self.test_user1_data))\n assert resp.status_code == 404", "def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)", "def test_set_display_name_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='foo@bar.com', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_delete_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_update_person_not_authenticated(self):\n\n data = {'first_name': 'Daenerys'}\n response = self.client.patch(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_username_in_use(self):\n self.request.json_body = {'username': 'testuser'}\n self.datautils.create_user({'username': 'testuser', 'password': 'testpass'})\n result = users_post_view(self.request)['d']\n self.assertIsInstance(result, dict)\n self.assertEqual(result, error_dict('verification_error', 'username already in use: testuser'))", "def test_delete_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_user_update_request(self):\n pass", "def test_create_empty_user(patch_mongo):\n user = {\n \"name\": \"\"\n }\n\n response = client.put(\"/user\", json=user)\n assert response.status_code == status.HTTP_400_BAD_REQUEST", "def test_anonymous_user_create_blogposts(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n\r\n assert_raises(Unauthorized, getattr(require, 'blogpost').create)", "def test_put_detail_unauthorized(self):\n author = User.objects.create_user(\"test2\", \"test2@example.com\", \"test2\")\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n author=author)\n self.api_client.client.login(username=self.username, password=self.password)\n post_data = {\n 'title': \"New Title\",\n 'summary': \"New Summary\",\n 'byline': \"New Byline\",\n 'status': \"published\",\n }\n response = self.api_client.put('/api/0.1/stories/%s/' % (story.story_id),\n format='json', data=post_data)\n self.assertHttpUnauthorized(response)", "def test_create__site_editor_forbidden(self):\n testing_config.sign_in('editor@example.com', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_post()\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == 'new@example.com').get()\n self.assertIsNone(new_appuser)", "def test_user_id_put(self):\n pass", "def test_get_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_delete_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user2_template, request=mock_request)", "def test_if_not_created_unauthorized(self):\r\n payload = {\r\n \"email\": \"t@t.pl\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_not_authenticated.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def allowed_topologytemplate_access_change(user, template):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return template.owner == user or user.has_perm(\"vnswww.topologytemplate_change_any\") or (user.has_perm(\"vnswww.topologytemplate_change_org\") and template.org == up.org)", "def test_homepage_logged_in_except(self):\r\n\r\n u1 = User.query.filter_by(username='testuser').one()\r\n u1.location = 'US-FAKE'\r\n db.session.add(u1)\r\n db.session.commit()\r\n\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = u1.id\r\n response = c.get('/')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'No data found for your region.', response.data)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_profile_route_uses_right_templates(self):\n self.add_testuser()\n response = self.client.get(\"/profile/testuser/\")\n self.assertTemplateUsed(response, \"layout.html\")", "def test_no_uid_causes_error():\n empty = create_db()\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n empty,\n \"some_uid\",\n INP\n )", "def test_update_author_unlogged(self):\n data = {'name': 'Ken Thompson'}\n\n request = self.client.patch(self.epoint, data)\n\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_post_owner(self):\n self.client.force_authenticate(self.user)\n response = self.post(content='foo')\n self.assertEqual(response.data['owner'], self.user.pk)" ]
[ "0.79757524", "0.78699815", "0.77971363", "0.7779306", "0.7315423", "0.7268201", "0.72340584", "0.71491605", "0.711088", "0.70220673", "0.66764414", "0.6670372", "0.66626596", "0.6651719", "0.656055", "0.65165913", "0.6485727", "0.64777356", "0.64196575", "0.6418435", "0.64110607", "0.640406", "0.63888854", "0.6337296", "0.6316503", "0.6288584", "0.62806493", "0.62736815", "0.6254986", "0.62276226", "0.62085176", "0.6194817", "0.61655074", "0.61392665", "0.6124781", "0.6121909", "0.60912794", "0.60709673", "0.60613567", "0.6055823", "0.6041575", "0.60375243", "0.6032906", "0.6030935", "0.60182494", "0.60175514", "0.6013207", "0.6010717", "0.59970623", "0.5940938", "0.5934624", "0.5914551", "0.5911487", "0.5910918", "0.5906166", "0.5903216", "0.5895289", "0.58853894", "0.5875188", "0.5875078", "0.58721447", "0.5863529", "0.5863065", "0.5859136", "0.5844397", "0.5842335", "0.5842049", "0.5833507", "0.5829239", "0.58234066", "0.5807508", "0.58071417", "0.58060735", "0.5803993", "0.5790735", "0.57895774", "0.57861733", "0.5785018", "0.5782529", "0.57776123", "0.57596594", "0.5758886", "0.574868", "0.5747317", "0.5747148", "0.5741287", "0.5740055", "0.5733486", "0.5720404", "0.5708373", "0.57081074", "0.5694404", "0.5691958", "0.5689285", "0.56834584", "0.56814325", "0.5681425", "0.56707877", "0.56704384", "0.5670181" ]
0.81394374
0
test upsert user template as anonymous with access right raises access control error
def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.anonymous_user) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.user1_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_upsert_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_set_display_name_user_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_update_template_registration(self):\n pass", "def test_set_display_name_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_replace_user(self):\n pass", "def test_delete_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_set_display_name_global_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_set_display_name_global_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_anonymous_user_update_anoymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='another_user1@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='another_user1@mail.com', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_anonymous_user_update_user_taskrun(self):\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)", "def test_set_display_name_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_set_display_name_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_get_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_delete_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_get_all_accessible_by_hash_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_delete_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_delete_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='foo@bar.com', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_anonymous_user_update_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n blogpost = BlogpostFactory.create()\r\n\r\n assert_raises(Unauthorized, getattr(require, 'blogpost').update, blogpost)", "def test_not_logged_in(self):\n self.request.user = None\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_update_user(self):\n pass", "def test_set_display_name_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_authenticated_user_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)", "def test_post_creation_unauthorized(self):\n url = reverse('post-list')\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Test Title'\n body = 'Test Body'\n response = self.client.post(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_anonymous_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').update,\r\n token)", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_api_user_put(self):\n pass", "def test_share_template_registration(self):\n pass", "def test_delete_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_post_creation_regular_user(self):\n url = reverse('post-list')\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Test Title'\n body = 'Test Body'\n self.client.force_authenticate(user=self.user)\n response = self.client.post(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_activity_template(self):\n pass", "def test_unshare_template_registration(self):\n pass", "def test_allowed_if_superuser(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n class User(object):\n is_superuser = True\n is_authenticated = True\n\n request = self.factory.get(\"/\")\n request.user = None\n response = view(request)\n self.assertEqual(response.status_code, 403)\n\n request.user = User()\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def test_modify_nonexist_username(self):\n print('(' + self.test_modify_nonexist_username.__name__+')',\n self.test_modify_nonexist_username.__doc__)\n self.assertIsNone(self.connection.modify_user(\n NON_EXIST_PATIENT_USERNAME, PATIENT['public_profile'],\n PATIENT['restricted_profile']))", "def test_admin_update_anonymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)", "def test_put_detail_unauthorized(self):\n author = User.objects.create_user(\"test2\", \"test2@example.com\", \"test2\")\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n author=author)\n self.api_client.client.login(username=self.username, password=self.password)\n post_data = {\n 'title': \"New Title\",\n 'summary': \"New Summary\",\n 'byline': \"New Byline\",\n 'status': \"published\",\n }\n response = self.api_client.put('/api/0.1/stories/%s/' % (story.story_id),\n format='json', data=post_data)\n self.assertHttpUnauthorized(response)", "def test_functionality(self):\n templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n #Deleting Standard User\n userList = self.getLocalUsers(userName=globalVars.standardUser)\n if len(userList) > 0:\n self.deleteLocalUser(globalVars.standardUser, verifyUser=True)\n \n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=templateName, deleteAndCreate=True, publishedTemplate=True)", "def test_put_unauthorized(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def allowed_topologytemplate_access_change(user, template):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return template.owner == user or user.has_perm(\"vnswww.topologytemplate_change_any\") or (user.has_perm(\"vnswww.topologytemplate_change_org\") and template.org == up.org)", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_create__site_editor_forbidden(self):\n testing_config.sign_in('editor@example.com', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_post()\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == 'new@example.com').get()\n self.assertIsNone(new_appuser)", "def testAnonymousCannotEdit(self):\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") == -1,\n \"Anonymous user is able to edit tasks.\")", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_user_cannot_write(app, resource):\n with app.user():\n data = {}\n\n # Try to post something\n app.client.post('/' + resource,\n data=data,\n assert_status=403)\n\n # Create fake item, try to patch/delete it\n _id = app.data.driver.db[resource].insert({})\n app.client.patch('/%s/%s' % (resource, _id),\n data=data,\n assert_status=403)\n app.client.delete('/%s/%s' % (resource, _id),\n assert_status=403)", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user2_template, request=mock_request)", "def test_get_all_accessible_by_hash_as_anonymous_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def allowed_topologytemplate_access_create(user):\n return user.has_perm(\"vnswww.add_topologytemplate\")", "def test_patch_user(self):\n pass", "def test_profile_route_uses_right_templates(self):\n self.add_testuser()\n response = self.client.get(\"/profile/testuser/\")\n self.assertTemplateUsed(response, \"layout.html\")", "def test_username_is_writable_for_user_creation(self):\n request = Mock()\n assert 'username' not in self.admin.get_readonly_fields(request)", "def test_project_create_cant_edit_users_existing_user(self):\n user = fake_clients.FakeUser(name=\"test@example.com\")\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertTrue(all([act.valid for act in actions]))", "def test_update_container_privilege(self):\n pass", "def test_update_restaurant_unauthorized(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Pho 2000'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n headers = {'Content-Type': 'application/json'}\n info = {'name': 'Php 2048'}\n resp = self.test_client.put(self.API_BASE + '/1', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 401)", "def test_modify_userid_404(self):\n resp = self.app.put('/users/thisuserdoesntexist',\n data=json.dumps(self.test_user1_data))\n assert resp.status_code == 404", "def test_update_template_subscription(self):\n pass", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_unauthorized_create_user(self):\n res = self.submit()\n\n assert res.status_code == 401", "def test_create_successful(self):\n self.webkom.add_user(self.disallowed_user)\n request = self.factory.post(\"/permissiontest/\", self.test_update_object)\n force_authenticate(request, self.disallowed_user)\n view = TestViewSet.as_view({\"post\": \"create\"})\n\n response = view(request)\n created = response.data\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(created[\"name\"], self.test_update_object[\"name\"])", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_delete_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_put_change_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(new_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_wrong_id(self):\n self.request.matchdict = {'user_id': int(self.request.user.id)+4}\n self.request.json_body = {}\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_put_unauthenticated(self):\n\n url = reverse('file')\n\n data = {}\n\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_if_not_created_authenticated_permissions(self):\r\n payload = {\r\n \"email\": \"t@t.pl\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_authenticated.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_create__forbidden(self):\n testing_config.sign_in('one@example.com', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_post(self.appuser_id)\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == 'new@example.com').get()\n self.assertIsNone(new_appuser)", "def test_updateview_write_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n valid_data = {'email': user.email, 'first_name': user.first_name,\n 'last_name': user.last_name, 'language': user.language}\n invalid_data = valid_data.copy()\n invalid_data['email'] = 'invalid_email_address'\n valid_data_response = self.client.post(updateview, valid_data)\n invalid_data_response = self.client.post(updateview, invalid_data)\n\n self.assertEqual(valid_data_response.status_code, 403)\n self.assertEqual(invalid_data_response.status_code, 403)", "def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_create_user(self):\n pass" ]
[ "0.80965334", "0.7977133", "0.7963794", "0.7793425", "0.7664125", "0.7498813", "0.72754526", "0.7163729", "0.71102697", "0.708983", "0.6803297", "0.6699159", "0.6673843", "0.6615065", "0.66076845", "0.66003054", "0.6594539", "0.65447944", "0.6544686", "0.65272033", "0.64977634", "0.6496953", "0.6473507", "0.6436947", "0.6425713", "0.6418514", "0.64179814", "0.635491", "0.6345167", "0.63288957", "0.63203406", "0.6314373", "0.63112473", "0.6310762", "0.62795925", "0.6269178", "0.62355894", "0.6185811", "0.6184902", "0.6175548", "0.61633986", "0.61500776", "0.6142709", "0.6135854", "0.61344856", "0.61259097", "0.6112303", "0.6110663", "0.6083543", "0.60513836", "0.60461426", "0.6041386", "0.6037267", "0.60344625", "0.60318387", "0.6020283", "0.6018681", "0.60152423", "0.601422", "0.6012446", "0.60098183", "0.60077363", "0.6003441", "0.60007346", "0.599935", "0.59838134", "0.59784997", "0.5971767", "0.5969463", "0.59587646", "0.5957727", "0.5925479", "0.59199804", "0.59188676", "0.5917084", "0.5902147", "0.59016365", "0.5896506", "0.5895874", "0.58746046", "0.5867948", "0.5863745", "0.58615196", "0.58466804", "0.5845454", "0.58443666", "0.5842501", "0.5839089", "0.5834961", "0.58336073", "0.58300596", "0.58291113", "0.5818142", "0.5816303", "0.5816053", "0.58152187", "0.5808154", "0.5804712", "0.58033174", "0.58030266" ]
0.82011104
0
test upsert global template as anonymous raises access control error
def test_upsert_global_template_as_anonymous_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.anonymous_user) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.global_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_update_template_registration(self):\n pass", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_unshare_template_registration(self):\n pass", "def test_upsert_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_register_template(self):\n pass", "def test_share_template_registration(self):\n pass", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_update_template_subscription(self):\n pass", "def test_replace_namespaced_template(self):\n pass", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_update_activity_template(self):\n pass", "def test_set_display_name_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_patch_namespaced_template(self):\n pass", "def test_set_display_name_global_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_delete_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_retrieve_template_registration(self):\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_update_template_profile_for_system_module(self):\n pass", "def test_set_display_name_global_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_unregister_template(self):\n pass", "def test_create_template_subsciption(self):\n pass", "def test_update_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.patch(f\"/templates/{template_id}\", json={})\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)", "def test_exists_false(self):\n self.assertFalse(PrepTemplate.exists(2))", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_update_subscription_template(self):\n pass", "def test_set_display_name_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_create_namespaced_processed_template(self):\n pass", "def test_update_device_template(self):\n pass", "def test_exists_true(self):\n self.assertTrue(PrepTemplate.exists(1))", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_05_use_private_template_in_project(self):\n # 1. Create a project\n # 2. Verify that in order to use somebody's Private template for vm\n # creation in the project, permission to use the template has to\n # be granted to the Project (use API 'updateTemplatePermissions'\n # with project id to achieve that).\n\n try:\n self.debug(\"Deploying VM for with public template: %s\" %\n self.template.id)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_1)\n # Verify VM state\n self.assertEqual(virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\")\n virtual_machine_1.stop(self.apiclient)\n # Get the Root disk of VM\n volumes = list_volumes(\n self.apiclient,\n projectid=self.project.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volumes, list),\n True,\n \"Check for list volume response return valid data\"\n )\n volume = volumes[0]\n\n self.debug(\"Creating template from volume: %s\" % volume.id)\n # Create a template from the ROOTDISK\n template_1 = Template.create(\n self.userapiclient,\n self.services[\"template\"],\n volumeid=volume.id\n )\n\n self.cleanup.append(template_1)\n # Verify Template state\n self.assertEqual(\n template_1.isready,\n True,\n \"Check Template is in ready state or not\"\n )\n\n # Update template permissions to grant permission to project\n self.debug(\n \"Updating template permissions:%s to grant access to project: %s\" % (\n template_1.id,\n self.project.id\n ))\n\n template_1.updatePermissions(\n self.apiclient,\n op='add',\n projectids=self.project.id\n )\n self.debug(\"Deploying VM for with privileged template: %s\" %\n self.template.id)\n virtual_machine_2 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=template_1.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_2)\n # Verify VM state\n self.assertEqual(\n virtual_machine_2.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n except Exception as e:\n self.fail(\"Exception occurred: %s\" % e)\n return", "def test_set_display_name_user_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_create_namespaced_template(self):\n pass", "def test_replace_user(self):\n pass", "def test_update_template_given_name_already_exists(self):\n template_id = util.MOCK_UUID_1\n template_name = util.MOCK_TEMPLATE_NAME_2\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"message\": \"a template with that name already exists\",\n \"code\": \"TemplateNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def test_06_datastore_upsert_without_resource_id(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.task_upsert), 200,\r\n {'content-type': 'application/json'})\r\n\r\n record = dict(info=dict(foo=\"bar\"))\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=None)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_upsert failed\" == type, type", "def test_template_feedback(self):\r\n pass", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_get_all_as_anonymous_with_access_right_returns_global_templates(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 1)\n self.assertTrue((template.user is None for template in templates))", "def test_delete_namespaced_template(self):\n pass", "def test_templater(self):\n\n # Set a global templater for all items\n self.site.template(r\"(.*)\", lambda item: \"ALL\")\n # Set another templater on the index item\n self.site.template(r\"index.html\", lambda item: \"INDEX\")\n\n # Since an item can only have one templater, the index templater should have been overwritten\n self.assertEqual(\"INDEX\", self.site.items[\"index.html\"].templated)\n self.assertEqual(\"ALL\", self.site.items[\"test/test.html\"].templated)", "def test_global_template_as_user_returns_template(self):\n mock_request = create_mock_request(user=self.user1)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_functionality(self):\n templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n #Deleting Standard User\n userList = self.getLocalUsers(userName=globalVars.standardUser)\n if len(userList) > 0:\n self.deleteLocalUser(globalVars.standardUser, verifyUser=True)\n \n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=templateName, deleteAndCreate=True, publishedTemplate=True)", "def test_04_public_private_template_use_in_project(self):\n # 1. Create a project\n # 2. Verify Public templates can be used without any restriction\n # 3. Verify that private template created in project belongs to this project\n # Verify that list template api wth project id list this template\n\n\n\n try:\n self.debug(\"Deploying VM for with public template: %s\" %\n self.template.id)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_1)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n virtual_machine_1.stop(self.apiclient)\n # Get the Root disk of VM\n volumes = list_volumes(\n self.apiclient,\n projectid=self.project.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volumes, list),\n True,\n \"Check for list volume response return valid data\"\n )\n volume = volumes[0]\n\n self.debug(\"Creating template from volume: %s\" % volume.id)\n # Create a template from the ROOTDISK\n template_1 = Template.create(\n self.apiclient,\n self.services[\"template\"],\n volumeid=volume.id,\n projectid=self.project.id\n )\n\n self.cleanup.append(template_1)\n # Verify Template state\n self.assertEqual(\n template_1.isready,\n True,\n \"Check Template is in ready state or not\"\n )\n # Verify list template with project id is listing this template\n templatelist = Template.list(self.apiclient,projectid=self.project.id,id=template_1.id,templatefilter=\"all\")\n self.assertEqual(templatelist[0].id,template_1.id,\"template created does not belong to the project\")\n\n\n except Exception as e:\n self.fail(\"Exception occurred: %s\" % e)\n return", "def test_delete_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_update_template_success(self):\n template_id = util.MOCK_UUID_1\n template_name = \"template-3\"\n\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"uuid\": template_id,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n }\n ],\n \"experimentId\": util.MOCK_UUID_1,\n \"deploymentId\": None,\n \"createdAt\": util.MOCK_CREATED_AT_1.isoformat(),\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_blank_content_object_production(self):\n tmpl = Template(\"\"\"\n {% load editregion %}\n {% editregion \"test\" obj %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('fallback', tmpl.render(Context()).strip())", "def test_no_uid_causes_error():\n empty = create_db()\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n empty,\n \"some_uid\",\n INP\n )", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_none_content_object_production(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" None %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('output:', tmpl.render(Context()).strip())", "def test_team_template_folders_id_put(self):\n pass", "def test_06_datastore_upsert(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.task_upsert), 200,\r\n {'content-type': 'application/json'})\r\n\r\n record = dict(info=dict(foo=\"bar\"))\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_upsert failed\" == type, type", "def test_anonymous_user_update_anoymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def testTemplateGet(self):\n self.assertRaises(NotImplementedError, getattr,\n self.tempfile, 'template')", "def test_init(self):\n st = PrepTemplate(1)\n self.assertTrue(st.id, 1)", "def test_not_authed_public_project(self):\n # Clear out existing project with ID=1 if necessary.\n Project.objects.filter(id=1).delete()\n locale = LocaleFactory.create(code='fakelocale')\n project = ProjectFactory.create(id=1, slug='valid-project', locales=[locale])\n ResourceFactory.create(project=project)\n\n response = self.client.get('/fakelocale/valid-project/')\n assert_equal(response.status_code, 200)\n # I'd assertTemplateUsed here but it doesn't work on non-DTL\n # templates.", "def test_replace_identity(self):\n pass", "def test_put_unauthenticated(self):\n\n url = reverse('file')\n\n data = {}\n\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_allow_relaxed_update():\n starting_db = create_db(STARTING_DB_INPUT)\n response = o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n json.dumps({\n \"my_key\": \"I don't include a uid, but passed it in the url\"\n }))\n assert response == {\n \"uid\": \"some_uid\",\n \"my_key\": \"I don't include a uid, but passed it in the url\"\n }", "def test_put_unauthorized(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate(2)", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def test_template_local_file_noclobber(file, tmp_path):\n source = dest = tmp_path / \"source\"\n source.write_text(\"{{ foo }}\\n\")\n\n ret = file.managed(\n name=str(dest),\n source=str(source),\n template=\"jinja\",\n context={\"foo\": \"Hello world!\"},\n )\n assert ret.result is False\n assert \"Source file cannot be the same as destination\" in ret.comment", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '&lt;hello&gt;')", "def test_create_template_for_all_namespaces(self):\n pass", "def testNonExistantTemplate(self):\n try:\n self.service.get_template(\"Non_Existant\")\n self.fail(\"No ServiceError raised by non-existant template\")\n except ServiceError as ex:\n self.assertEqual(\n ex.message,\n \"There is no template called 'Non_Existant' at this service\")", "def test_xml_template_set(self):\n xmlTemplateResult = XmlTemplate.objects.get(id=1)\n xmlTemplateResult.set(\"newTemplate\", '''<?xml >\n <project name=\"newTemplate\">\n </project> ''',)\n self.assertEqual(xmlTemplateResult.template_name, \"newTemplate\")\n self.assertEqual(xmlTemplateResult.template_content, '''<?xml >\n <project name=\"newTemplate\">\n </project> ''')", "def test_get_any_template_as_superuser_returns_template(self):\n mock_request = create_mock_request(user=self.superuser1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)\n template = template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user2_template)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "async def test_rendering_template_legacy_user(\n hass: HomeAssistant,\n mock_api_client: TestClient,\n aiohttp_client: ClientSessionGenerator,\n legacy_auth: LegacyApiPasswordAuthProvider,\n) -> None:\n hass.states.async_set(\"sensor.temperature\", 10)\n client = await aiohttp_client(hass.http.app)\n resp = await client.post(\n const.URL_API_TEMPLATE,\n json={\"template\": \"{{ states.sensor.temperature.state }}\"},\n )\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_create_activity_template(self):\n pass", "def test_init_wrong_template(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n Sample('SKB8.640193', PrepTemplate(1))", "def test_tag_with_invalid_object(self):\n with self.assertRaises(TemplateSyntaxError):\n edit_link('anyobject')", "def test_get_all_accessible_by_hash_as_superuser_returns_global_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_iso_datastore_crud(provider_init, no_iso_dss, provider_crud, iso_datastore):\n template_crud = pxe.ISODatastore(provider_crud.name)\n template_crud.create()\n template_crud.delete(cancel=False)" ]
[ "0.7650722", "0.756786", "0.73319674", "0.72709674", "0.71599734", "0.6970519", "0.68684846", "0.67423", "0.65906715", "0.6536628", "0.64568055", "0.64072675", "0.6396326", "0.6376108", "0.6357626", "0.63539773", "0.6263191", "0.6222696", "0.6143397", "0.6139016", "0.6118972", "0.60936224", "0.6063915", "0.6031473", "0.59694725", "0.59293234", "0.5908521", "0.58814085", "0.587513", "0.5859355", "0.5814249", "0.57979053", "0.5760208", "0.57554096", "0.5748462", "0.57264376", "0.5708247", "0.56995595", "0.5691743", "0.5666711", "0.5620948", "0.5596867", "0.55665", "0.5561048", "0.5555373", "0.55392194", "0.5538952", "0.55244076", "0.5513533", "0.5509216", "0.5480971", "0.5475434", "0.54637223", "0.54602313", "0.54593456", "0.54587483", "0.5457292", "0.5451667", "0.54320776", "0.54072684", "0.54019386", "0.539697", "0.53800684", "0.53719276", "0.5367305", "0.5353598", "0.5345565", "0.53400475", "0.53379077", "0.53311706", "0.5325127", "0.53199375", "0.5309711", "0.5302011", "0.5290506", "0.52723163", "0.52678365", "0.52624196", "0.5256076", "0.5245994", "0.5238057", "0.52327794", "0.523138", "0.52307016", "0.52262163", "0.52183604", "0.52051723", "0.52041596", "0.52014464", "0.5200366", "0.5195117", "0.519425", "0.5193299", "0.5193299", "0.51844865", "0.51819026", "0.51742214", "0.51737106", "0.51723593", "0.51708573" ]
0.7757241
0
test upsert global template as anonymous raises access control error
def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.anonymous_user) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.global_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_update_template_registration(self):\n pass", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_unshare_template_registration(self):\n pass", "def test_upsert_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_register_template(self):\n pass", "def test_share_template_registration(self):\n pass", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_update_template_subscription(self):\n pass", "def test_replace_namespaced_template(self):\n pass", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_update_activity_template(self):\n pass", "def test_set_display_name_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_patch_namespaced_template(self):\n pass", "def test_set_display_name_global_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_delete_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_retrieve_template_registration(self):\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_update_template_profile_for_system_module(self):\n pass", "def test_set_display_name_global_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_unregister_template(self):\n pass", "def test_create_template_subsciption(self):\n pass", "def test_update_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.patch(f\"/templates/{template_id}\", json={})\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)", "def test_exists_false(self):\n self.assertFalse(PrepTemplate.exists(2))", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_update_subscription_template(self):\n pass", "def test_set_display_name_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_create_namespaced_processed_template(self):\n pass", "def test_update_device_template(self):\n pass", "def test_exists_true(self):\n self.assertTrue(PrepTemplate.exists(1))", "def test_get_user_template_as_anonymous_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_05_use_private_template_in_project(self):\n # 1. Create a project\n # 2. Verify that in order to use somebody's Private template for vm\n # creation in the project, permission to use the template has to\n # be granted to the Project (use API 'updateTemplatePermissions'\n # with project id to achieve that).\n\n try:\n self.debug(\"Deploying VM for with public template: %s\" %\n self.template.id)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_1)\n # Verify VM state\n self.assertEqual(virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\")\n virtual_machine_1.stop(self.apiclient)\n # Get the Root disk of VM\n volumes = list_volumes(\n self.apiclient,\n projectid=self.project.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volumes, list),\n True,\n \"Check for list volume response return valid data\"\n )\n volume = volumes[0]\n\n self.debug(\"Creating template from volume: %s\" % volume.id)\n # Create a template from the ROOTDISK\n template_1 = Template.create(\n self.userapiclient,\n self.services[\"template\"],\n volumeid=volume.id\n )\n\n self.cleanup.append(template_1)\n # Verify Template state\n self.assertEqual(\n template_1.isready,\n True,\n \"Check Template is in ready state or not\"\n )\n\n # Update template permissions to grant permission to project\n self.debug(\n \"Updating template permissions:%s to grant access to project: %s\" % (\n template_1.id,\n self.project.id\n ))\n\n template_1.updatePermissions(\n self.apiclient,\n op='add',\n projectids=self.project.id\n )\n self.debug(\"Deploying VM for with privileged template: %s\" %\n self.template.id)\n virtual_machine_2 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=template_1.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_2)\n # Verify VM state\n self.assertEqual(\n virtual_machine_2.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n except Exception as e:\n self.fail(\"Exception occurred: %s\" % e)\n return", "def test_set_display_name_user_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_create_namespaced_template(self):\n pass", "def test_replace_user(self):\n pass", "def test_update_template_given_name_already_exists(self):\n template_id = util.MOCK_UUID_1\n template_name = util.MOCK_TEMPLATE_NAME_2\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"message\": \"a template with that name already exists\",\n \"code\": \"TemplateNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def test_06_datastore_upsert_without_resource_id(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.task_upsert), 200,\r\n {'content-type': 'application/json'})\r\n\r\n record = dict(info=dict(foo=\"bar\"))\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=None)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_upsert failed\" == type, type", "def test_template_feedback(self):\r\n pass", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_get_all_as_anonymous_with_access_right_returns_global_templates(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 1)\n self.assertTrue((template.user is None for template in templates))", "def test_delete_namespaced_template(self):\n pass", "def test_templater(self):\n\n # Set a global templater for all items\n self.site.template(r\"(.*)\", lambda item: \"ALL\")\n # Set another templater on the index item\n self.site.template(r\"index.html\", lambda item: \"INDEX\")\n\n # Since an item can only have one templater, the index templater should have been overwritten\n self.assertEqual(\"INDEX\", self.site.items[\"index.html\"].templated)\n self.assertEqual(\"ALL\", self.site.items[\"test/test.html\"].templated)", "def test_global_template_as_user_returns_template(self):\n mock_request = create_mock_request(user=self.user1)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_functionality(self):\n templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n #Deleting Standard User\n userList = self.getLocalUsers(userName=globalVars.standardUser)\n if len(userList) > 0:\n self.deleteLocalUser(globalVars.standardUser, verifyUser=True)\n \n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=templateName, deleteAndCreate=True, publishedTemplate=True)", "def test_04_public_private_template_use_in_project(self):\n # 1. Create a project\n # 2. Verify Public templates can be used without any restriction\n # 3. Verify that private template created in project belongs to this project\n # Verify that list template api wth project id list this template\n\n\n\n try:\n self.debug(\"Deploying VM for with public template: %s\" %\n self.template.id)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_1)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n virtual_machine_1.stop(self.apiclient)\n # Get the Root disk of VM\n volumes = list_volumes(\n self.apiclient,\n projectid=self.project.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volumes, list),\n True,\n \"Check for list volume response return valid data\"\n )\n volume = volumes[0]\n\n self.debug(\"Creating template from volume: %s\" % volume.id)\n # Create a template from the ROOTDISK\n template_1 = Template.create(\n self.apiclient,\n self.services[\"template\"],\n volumeid=volume.id,\n projectid=self.project.id\n )\n\n self.cleanup.append(template_1)\n # Verify Template state\n self.assertEqual(\n template_1.isready,\n True,\n \"Check Template is in ready state or not\"\n )\n # Verify list template with project id is listing this template\n templatelist = Template.list(self.apiclient,projectid=self.project.id,id=template_1.id,templatefilter=\"all\")\n self.assertEqual(templatelist[0].id,template_1.id,\"template created does not belong to the project\")\n\n\n except Exception as e:\n self.fail(\"Exception occurred: %s\" % e)\n return", "def test_delete_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_update_template_success(self):\n template_id = util.MOCK_UUID_1\n template_name = \"template-3\"\n\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"uuid\": template_id,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n }\n ],\n \"experimentId\": util.MOCK_UUID_1,\n \"deploymentId\": None,\n \"createdAt\": util.MOCK_CREATED_AT_1.isoformat(),\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_blank_content_object_production(self):\n tmpl = Template(\"\"\"\n {% load editregion %}\n {% editregion \"test\" obj %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('fallback', tmpl.render(Context()).strip())", "def test_no_uid_causes_error():\n empty = create_db()\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n empty,\n \"some_uid\",\n INP\n )", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_none_content_object_production(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" None %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('output:', tmpl.render(Context()).strip())", "def test_team_template_folders_id_put(self):\n pass", "def test_06_datastore_upsert(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.task_upsert), 200,\r\n {'content-type': 'application/json'})\r\n\r\n record = dict(info=dict(foo=\"bar\"))\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_upsert failed\" == type, type", "def test_anonymous_user_update_anoymous_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n anonymous_taskrun = AnonymousTaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n anonymous_taskrun)", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def testTemplateGet(self):\n self.assertRaises(NotImplementedError, getattr,\n self.tempfile, 'template')", "def test_init(self):\n st = PrepTemplate(1)\n self.assertTrue(st.id, 1)", "def test_not_authed_public_project(self):\n # Clear out existing project with ID=1 if necessary.\n Project.objects.filter(id=1).delete()\n locale = LocaleFactory.create(code='fakelocale')\n project = ProjectFactory.create(id=1, slug='valid-project', locales=[locale])\n ResourceFactory.create(project=project)\n\n response = self.client.get('/fakelocale/valid-project/')\n assert_equal(response.status_code, 200)\n # I'd assertTemplateUsed here but it doesn't work on non-DTL\n # templates.", "def test_replace_identity(self):\n pass", "def test_put_unauthenticated(self):\n\n url = reverse('file')\n\n data = {}\n\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_allow_relaxed_update():\n starting_db = create_db(STARTING_DB_INPUT)\n response = o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n json.dumps({\n \"my_key\": \"I don't include a uid, but passed it in the url\"\n }))\n assert response == {\n \"uid\": \"some_uid\",\n \"my_key\": \"I don't include a uid, but passed it in the url\"\n }", "def test_put_unauthorized(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate(2)", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def test_template_local_file_noclobber(file, tmp_path):\n source = dest = tmp_path / \"source\"\n source.write_text(\"{{ foo }}\\n\")\n\n ret = file.managed(\n name=str(dest),\n source=str(source),\n template=\"jinja\",\n context={\"foo\": \"Hello world!\"},\n )\n assert ret.result is False\n assert \"Source file cannot be the same as destination\" in ret.comment", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_create_template_for_all_namespaces(self):\n pass", "def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '&lt;hello&gt;')", "def test_xml_template_set(self):\n xmlTemplateResult = XmlTemplate.objects.get(id=1)\n xmlTemplateResult.set(\"newTemplate\", '''<?xml >\n <project name=\"newTemplate\">\n </project> ''',)\n self.assertEqual(xmlTemplateResult.template_name, \"newTemplate\")\n self.assertEqual(xmlTemplateResult.template_content, '''<?xml >\n <project name=\"newTemplate\">\n </project> ''')", "def testNonExistantTemplate(self):\n try:\n self.service.get_template(\"Non_Existant\")\n self.fail(\"No ServiceError raised by non-existant template\")\n except ServiceError as ex:\n self.assertEqual(\n ex.message,\n \"There is no template called 'Non_Existant' at this service\")", "def test_get_any_template_as_superuser_returns_template(self):\n mock_request = create_mock_request(user=self.superuser1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)\n template = template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user2_template)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "async def test_rendering_template_legacy_user(\n hass: HomeAssistant,\n mock_api_client: TestClient,\n aiohttp_client: ClientSessionGenerator,\n legacy_auth: LegacyApiPasswordAuthProvider,\n) -> None:\n hass.states.async_set(\"sensor.temperature\", 10)\n client = await aiohttp_client(hass.http.app)\n resp = await client.post(\n const.URL_API_TEMPLATE,\n json={\"template\": \"{{ states.sensor.temperature.state }}\"},\n )\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_create_activity_template(self):\n pass", "def test_tag_with_invalid_object(self):\n with self.assertRaises(TemplateSyntaxError):\n edit_link('anyobject')", "def test_init_wrong_template(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n Sample('SKB8.640193', PrepTemplate(1))", "def test_get_all_accessible_by_hash_as_superuser_returns_global_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_iso_datastore_crud(provider_init, no_iso_dss, provider_crud, iso_datastore):\n template_crud = pxe.ISODatastore(provider_crud.name)\n template_crud.create()\n template_crud.delete(cancel=False)" ]
[ "0.7756423", "0.75673836", "0.7331551", "0.7270877", "0.7159347", "0.6970686", "0.6868954", "0.6742805", "0.65919876", "0.6536369", "0.6457105", "0.6407503", "0.6395695", "0.63756883", "0.6357793", "0.6353528", "0.62642133", "0.6223189", "0.6144017", "0.61375785", "0.61193377", "0.6092641", "0.60622394", "0.6031787", "0.5968342", "0.59273356", "0.59084797", "0.5879845", "0.58750254", "0.58585554", "0.5812124", "0.57973653", "0.57602715", "0.57551277", "0.57480437", "0.57248735", "0.570871", "0.5698997", "0.5689939", "0.56676376", "0.56209725", "0.5597016", "0.5565333", "0.55610824", "0.55546254", "0.55395675", "0.5538785", "0.55247545", "0.5512868", "0.55088615", "0.54796386", "0.5473917", "0.54632527", "0.54590505", "0.54586005", "0.5457524", "0.54574895", "0.54506207", "0.54323727", "0.5405736", "0.5400833", "0.53973734", "0.5381259", "0.53723824", "0.5367154", "0.53521174", "0.53463984", "0.5339869", "0.53373164", "0.532959", "0.53250074", "0.531886", "0.530855", "0.53011435", "0.52908045", "0.52718914", "0.52681774", "0.5261239", "0.5256621", "0.52446556", "0.5236358", "0.5232409", "0.5230643", "0.5230502", "0.52262247", "0.5217653", "0.5204485", "0.52037334", "0.5201074", "0.52007717", "0.51943386", "0.5193092", "0.51929456", "0.51929456", "0.5183997", "0.51821685", "0.5173887", "0.51737744", "0.5170524", "0.5169949" ]
0.76497203
1
test upsert own template as user saves
def test_upsert_own_template_as_user_saves(self): mock_request = create_mock_request(user=self.user1) template_api.upsert(self.fixture.user1_template, request=mock_request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_update_template_registration(self):\n pass", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_profile(mocker, mock_es_profile_serializer, user):\n patched_task = mocker.patch(\"search.search_index_helpers.tasks.upsert_profile\")\n upsert_profile(user.profile.id)\n patched_task.assert_called_once_with(user.profile.id)", "def test_delete_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def upsert(self, context: dict=None) -> None:\n assert(context.get('data', None) is not None)\n\n data = context.get('data', None)\n\n logging.info(f\"Upsert: {data}\")\n try:\n user_record = self.table.first(formula=f\"username='{data['username']}'\")\n user_id = user_record['id'] if user_record else None\n if user_id:\n self.table.update(user_id, fields=data, replace=True, typecast=True)\n else:\n self.table.create(fields=data, typecast=True)\n except Exception as ex:\n self.close_database()\n raise DatabaseError({\n \"code\": f\"Airtable exception\",\n \"description\": f'Database: `{self.db_name}`\\n`upsert({data})`\\nEnsure DB entities exist',\n \"message\": str(ex),\n }, 500)", "def test_is_data_in_template_and_database_same(self):\n SimplePersonFactory.create()\n contact = Person.objects.last()\n self.assertEqual(check_content_in_template(contact), True)\n contact.name = \"test_is_data_in_template_and_database_same\"\n contact.save()\n self.assertEqual(check_content_in_template(contact), True)", "def test_upsert_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_update_template_subscription(self):\n pass", "def test_delete_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_update_activity_template(self):\n pass", "def test_update_useruser_uuid_put(self):\n pass", "def test_06_datastore_upsert(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.task_upsert), 200,\r\n {'content-type': 'application/json'})\r\n\r\n record = dict(info=dict(foo=\"bar\"))\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_upsert failed\" == type, type", "def test_update_user(self):\n pass", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_api_user_put(self):\n pass", "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_collection_saved(self, mock_template):\n collection = Collection.objects.get_by_natural_key(\n 'elasticsearch', 'test_index', 'test_docs')\n collection.save()\n self.assertEqual(mock_template.call_count, 1)", "def test_update_template_success(self):\n template_id = util.MOCK_UUID_1\n template_name = \"template-3\"\n\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"uuid\": template_id,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n }\n ],\n \"experimentId\": util.MOCK_UUID_1,\n \"deploymentId\": None,\n \"createdAt\": util.MOCK_CREATED_AT_1.isoformat(),\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_replace_user(self):\n pass", "def post(self):\n self.reqparse.add_argument('templateName', type=str, required=True)\n self.reqparse.add_argument('template', type=str, required=True)\n args = self.reqparse.parse_args()\n\n template = db.Template.find_one(template_name=args['templateName'])\n if template:\n return self.make_response('Template already exists, update the existing template instead', HTTP.CONFLICT)\n\n template = Template()\n template.template_name = args['templateName']\n template.template = args['template']\n\n db.session.add(template)\n db.session.commit()\n auditlog(event='template.create', actor=session['user'].username, data=args)\n\n return self.make_response('Template {} has been created'.format(template.template_name), HTTP.CREATED)", "def test_createUser_single(self):\n #TODO: this and other tests", "def upsert_location(self, location):", "def test_register_template(self):\n pass", "def test_exists_true(self):\n self.assertTrue(PrepTemplate.exists(1))", "def test_06_datastore_upsert_without_resource_id(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.task_upsert), 200,\r\n {'content-type': 'application/json'})\r\n\r\n record = dict(info=dict(foo=\"bar\"))\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=None)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_upsert failed\" == type, type", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_delete_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user2_template, request=mock_request)", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_collection_put(testapp, execute_counter):\n initial = {\n 'title': \"Testing\",\n 'type': \"object\", # include a non-required field\n 'description': \"This is the initial insert\",\n }\n item_url = testapp.post_json('/embedding-tests', initial).location\n\n with execute_counter.expect(1):\n item = testapp.get(item_url).json\n\n for key in initial:\n assert item[key] == initial[key]\n\n update = {\n 'title': \"New Testing\",\n 'type': \"object\",\n 'description': \"This is the updated insert\",\n }\n testapp.put_json(item_url, update, status=200)\n\n res = testapp.get('/' + item['uuid']).follow().json\n\n for key in update:\n assert res[key] == update[key]", "def test_post_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)", "def test_update_template_given_name_already_exists(self):\n template_id = util.MOCK_UUID_1\n template_name = util.MOCK_TEMPLATE_NAME_2\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"message\": \"a template with that name already exists\",\n \"code\": \"TemplateNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_upsert_user_list(mocker, list_type):\n patched_task = mocker.patch(\"search.tasks.upsert_user_list\")\n user_list = UserListFactory.create(list_type=list_type)\n upsert_user_list(user_list.id)\n patched_task.assert_called_once_with(user_list.id)", "def test_upsert_content_file(mocker):\n patched_task = mocker.patch(\"search.tasks.upsert_content_file\")\n content_file = ContentFileFactory.create()\n upsert_content_file(content_file.id)\n patched_task.assert_called_once_with(content_file.id)", "def test_user_update(self):\n userPK = self.testUser.pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_set_display_name_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_user_id_put(self):\n pass", "def test_delete_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_share_template_registration(self):\n pass", "def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200", "def test_team_template_folders_id_put(self):\n pass", "def test_distillery_saved(self, mock_template):\n distillery = Distillery.objects.get_by_natural_key(\n 'elasticsearch.test_index.test_docs')\n distillery.save()\n self.assertEqual(mock_template.call_count, 1)", "def test_write(self):\n userEdited = self.env['res.users'].browse(\n self.user.id).write({'user_profile_id': self.user_profile2.id})\n self.assertEqual(userEdited, True)", "def upsert(self, obj):\r\n url = '{0}/upsert'.format(self.get_url())\r\n request = http.Request('PUT', url, self.wrap_object(obj))\r\n\r\n return request, parsers.parse_empty", "def test_create_or_update__update(collection, user_id, media_item, repo):\n insert = collection.insert_one(\n media_item\n | {\n \"userId\": user_id,\n \"filename\": \"other-filename.jpg\",\n }\n )\n repo.create_or_update(media_item)\n\n document = collection.find_one({\"id\": media_item[\"id\"]})\n assert document is not None\n assert document[\"userId\"] == user_id\n assert document[\"filename\"] == media_item[\"filename\"]", "def test_upsert_w_returning(self, connection):\n\n data = self.tables.data\n\n initial_data = [\n {\"x\": \"x1\", \"y\": \"y1\", \"z\": 4},\n {\"x\": \"x2\", \"y\": \"y2\", \"z\": 8},\n ]\n ids = connection.scalars(\n data.insert().returning(data.c.id), initial_data\n ).all()\n\n upsert_data = [\n {\n \"id\": ids[0],\n \"x\": \"x1\",\n \"y\": \"y1\",\n },\n {\n \"id\": 32,\n \"x\": \"x19\",\n \"y\": \"y7\",\n },\n {\n \"id\": ids[1],\n \"x\": \"x5\",\n \"y\": \"y6\",\n },\n {\n \"id\": 28,\n \"x\": \"x9\",\n \"y\": \"y15\",\n },\n ]\n\n stmt = provision.upsert(\n config,\n data,\n (data,),\n set_lambda=lambda inserted: {\"x\": inserted.x + \" upserted\"},\n )\n\n result = connection.execute(stmt, upsert_data)\n\n eq_(\n result.all(),\n [\n (ids[0], \"x1 upserted\", \"y1\", 4),\n (32, \"x19\", \"y7\", 5),\n (ids[1], \"x5 upserted\", \"y2\", 8),\n (28, \"x9\", \"y15\", 5),\n ],\n )", "def test_duplicate_user(self):\n json_resp = make_user(self.client)\n json_resp = make_user(self.client, username='Blah')\n # email should be taken\n self.assertEqual(json_resp['status'], 'email taken')\n # check only one user in the db\n self.assertEqual(User.query.count(), 1)\n # username should be taken\n json_resp = make_user(self.client, email='other@test.com')\n # check api response\n self.assertEqual(json_resp['status'], 'username taken')", "def test_create_account(self):\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests\", \"password\": \"TestTest\"})\n first_user = MyUser.objects.get()\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertEqual(first_user.username, 'tests')\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests2\", \"password\": \"TestTest\"})\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertTrue(MyUser.objects.filter(username=\"tests2\").exists())\n user = MyUser.objects.get(username=\"tests2\")\n response = self.client.put(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"tst@test.te\"})\n # Not logged shouldnt change anything\n self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED)\n user.set_password(\"TestTest\")\n user.save()\n self.assertTrue(self.client.login(username=\"tests2\", password=\"TestTest\"))\n response = self.client.patch(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"tst@test.te\"})\n # Logged, should change\n self.assertEqual(response.status_code, HTTP_200_OK)\n self.assertEqual(MyUser.objects.get(username=\"tests2\").email, \"tst@test.te\")\n # Dont update others users\n response = self.client.patch(f\"http://localhost:8000/api/users/{first_user.pk}/\", data={\"email\": \"tst@test.te\"})\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)", "def test_create__duplicate(self):\n testing_config.sign_in('admin@example.com', 123567890)\n\n json_data = {'email': 'user@example.com'}\n with test_app.test_request_context(self.request_path, json=json_data):\n with self.assertRaises(werkzeug.exceptions.BadRequest):\n self.handler.do_post()\n\n unrevised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertEqual('user@example.com', unrevised_appuser.email)", "def test_xml_template_set(self):\n xmlTemplateResult = XmlTemplate.objects.get(id=1)\n xmlTemplateResult.set(\"newTemplate\", '''<?xml >\n <project name=\"newTemplate\">\n </project> ''',)\n self.assertEqual(xmlTemplateResult.template_name, \"newTemplate\")\n self.assertEqual(xmlTemplateResult.template_content, '''<?xml >\n <project name=\"newTemplate\">\n </project> ''')", "def test_user_update_request(self):\n pass", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_upsert_w_returning(self, connection):\n\n data = self.tables.data\n\n initial_data = [\n {\"x\": \"x1\", \"y\": \"y1\", \"z\": 4},\n {\"x\": \"x2\", \"y\": \"y2\", \"z\": 8},\n ]\n ids = connection.scalars(\n data.insert().returning(data.c.id), initial_data\n ).all()\n\n upsert_data = [\n {\n \"id\": ids[0],\n \"x\": \"x1\",\n \"y\": \"y1\",\n },\n {\n \"id\": 32,\n \"x\": \"x19\",\n \"y\": \"y7\",\n },\n {\n \"id\": ids[1],\n \"x\": \"x5\",\n \"y\": \"y6\",\n },\n {\n \"id\": 28,\n \"x\": \"x9\",\n \"y\": \"y15\",\n },\n ]\n\n stmt = provision.upsert(\n config,\n data,\n (data,),\n lambda inserted: {\"x\": inserted.x + \" upserted\"},\n )\n\n result = connection.execute(stmt, upsert_data)\n\n eq_(\n result.all(),\n [\n (ids[0], \"x1 upserted\", \"y1\", 4),\n (32, \"x19\", \"y7\", 5),\n (ids[1], \"x5 upserted\", \"y2\", 8),\n (28, \"x9\", \"y15\", 5),\n ],\n )", "def test_db(self):\n db.tests.insert_one({'name': 'test-name'})\n r = db.tests.find_one({'name': 'test-name'})\n self.assertEqual(r['name'], 'test-name')\n\n db.tests.insert_one({'_id': '_id', 'a': 'A', 'b': 'B', 'c': 'c'})", "def test_update(self):\n # creating a new sample template\n st = SampleTemplate.create(self.metadata, self.new_study)\n # updating the sample template\n st.update(self.metadata_dict_updated)\n\n # validating values\n exp = self.metadata_dict_updated_dict['Sample1'].values()\n obs = st.get('2.Sample1').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample2'].values()\n obs = st.get('2.Sample2').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample3'].values()\n obs = st.get('2.Sample3').values()\n self.assertItemsEqual(obs, exp)\n\n # checking errors\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_sample_error)\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_column_error)", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def insert(template_version_manager, template, request):\n # save the template in database\n template_api.upsert(template)\n try:\n # insert the initial template in the version manager\n version_manager_api.insert_version(\n template_version_manager, template, request=request\n )\n # insert the version manager in database\n version_manager_api.upsert(template_version_manager, request=request)\n # get template display name\n display_name = get_latest_version_name(template_version_manager)\n # update saved template\n main_template_api.set_display_name(template, display_name, request=request)\n # return version manager\n return template_version_manager\n except Exception as e:\n main_template_api.delete(template, request=request)\n raise e", "def test_update_update_has_a_value(self):\n self.Person.drop_collection()\n\n author = self.Person.objects.create(name=\"Test User\")\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update({})\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update_one({})", "def test_retrieve_template_registration(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_set_display_name_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_exists_true(self):\n self.assertTrue(SampleTemplate.exists(self.test_study.id))", "def test_upsert_video(mocker):\n patched_task = mocker.patch(\"search.tasks.upsert_video\")\n video = VideoFactory.create()\n upsert_video(video.id)\n patched_task.assert_called_once_with(video.id)", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)", "def upsert_db(data: List[Dict[str, Any]]):\n questions = data[\"items\"][:5]\n timestamp = f\"{DATE:%Y-%m-%d %H:%M}\"\n convert_epoch = datetime.datetime.utcfromtimestamp\n\n db = sqlite_utils.Database(ROOT / \"stackoverflow.db\")\n db[\"questions\"].upsert_all(\n (\n {\n \"question_id\": row[\"question_id\"],\n \"title\": row[\"title\"],\n \"tags\": \",\".join(row[\"tags\"]),\n \"owner_id\": row[\"owner\"][\"user_id\"],\n \"is_answered\": row[\"is_answered\"],\n \"view_count\": row[\"view_count\"],\n \"answer_count\": row[\"answer_count\"],\n \"score\": row[\"score\"],\n \"site\": row[\"link\"].split(\".\")[0].split(\"/\")[-1],\n \"link\": row[\"link\"],\n \"creation_date\": f'{convert_epoch(row[\"creation_date\"]):%Y-%m-%d %H:%M}',\n \"inserted_date\": timestamp\n }\n for row in questions\n ),\n pk=\"question_id\"\n )\n\n db[\"users\"].upsert_all(\n (\n {\n \"user_id\": row[\"owner\"][\"user_id\"],\n \"user_type\": row[\"owner\"][\"user_type\"],\n \"display_name\": row[\"owner\"][\"display_name\"],\n \"link\": row[\"owner\"][\"link\"],\n \"site\": row[\"link\"].split(\".\")[0].split(\"/\")[-1],\n \"inserted_date\": timestamp \n }\n for row in questions\n ),\n pk=\"user_id\"\n )", "def test_update_one(self):\n pass", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def post(self, request, pk: int = None):\n if not pk:\n template_id = request.POST.get('template_id')\n template = Template.objects.get(pk=int(template_id))\n _pk = TemplateVersion.objects.create(template=template, test_data={}).pk\n else:\n _pk = TemplateVersion.objects.duplicate(pk)\n template = TemplateVersion.objects.get(pk=pk).template\n return JsonResponse({'id': _pk, 'template_id': template.pk})", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)", "def test_upsert(self):\n my_conn = MySQL(*self.conn_params)\n Base = declarative_base()\n current_dir = os.path.dirname(os.path.abspath(__file__))\n\n # IMPORTANT: table logic reuse pattern with mixins\n class PmhMixin(object):\n \"\"\"Auxiliary sqlalchemy table model for the tests.\"\"\"\n\n __tablename__ = 'pmh'\n\n id = Column(Integer, primary_key=True)\n anno = Column(Integer)\n cpro = Column(Integer)\n cmun = Column(Integer)\n csexo = Column(Integer)\n sexo = Column(String(20))\n orden_gredad = Column(Integer)\n gredad = Column(String(30))\n personas = Column(Integer)\n codigo_ine = Column(String(50))\n\n class Pmh(Base, PmhMixin):\n \"\"\"Auxiliary sqlalchemy table model for the tests.\"\"\"\n\n __tablename__ = 'pmh'\n\n class PmhTmp(Base, PmhMixin):\n \"\"\"Auxiliary sqlalchemy table model for the tests.\"\"\"\n\n __tablename__ = 'tmp_pmh'\n\n # table to update/insert\n Pmh.__table__.create(bind=my_conn.engine)\n data = pd.read_csv(f'''{current_dir}/pmh.csv''')\n data.name = 'pmh'\n my_conn.insert(data, if_exists='append')\n # https://github.com/PyCQA/pylint/issues/1161\n # there's an issue with pylint and pandas read methods.\n original_table = pd.DataFrame(\n pd.read_sql_table(data.name, my_conn.conn_string))\n\n # temporary table with data to update/insert\n PmhTmp.__table__.create(bind=my_conn.engine)\n tmp_data = pd.read_csv(f'''{current_dir}/pmh_update.csv''')\n tmp_data.name = 'tmp_pmh'\n\n sql = f'''INSERT INTO pmh\n (id, anno, cpro, cmun, csexo, sexo, orden_gredad, gredad,\n personas, codigo_ine)\n SELECT *\n FROM tmp_pmh\n ON DUPLICATE KEY UPDATE\n id = tmp_pmh.id,\n anno = tmp_pmh.anno,\n cpro = tmp_pmh.cpro,\n cmun = tmp_pmh.cmun,\n csexo = tmp_pmh.csexo,\n orden_gredad = tmp_pmh.orden_gredad,\n gredad = tmp_pmh.gredad,\n personas = tmp_pmh.personas,\n codigo_ine = tmp_pmh.codigo_ine;'''\n expected = 76\n current = original_table.loc[\n original_table['id'] == 5192]['personas'].tolist()[0]\n self.assertEqual(current, expected)\n my_conn.upsert(tmp_data, data.name, sql, if_exists='append')\n updated_table = pd.DataFrame(\n pd.read_sql_table(data.name, my_conn.conn_string))\n expected = 9976\n current = updated_table.loc[\n updated_table['id'] == 5192]['personas'].tolist()[0]\n self.assertEqual(current, expected)\n expected = 46\n current = updated_table.loc[\n updated_table['id'] == 30001]['cmun'].tolist()[0]\n my_conn.drop(data.name)", "def test_upsert_course(mocker):\n patched_task = mocker.patch(\"search.tasks.upsert_course\")\n course = CourseFactory.create()\n upsert_course(course.id)\n patched_task.assert_called_once_with(course.id)", "def test_create_or_update__create(collection, user_id, media_item, repo):\n repo.create_or_update(media_item)\n\n document = collection.find_one({\"id\": media_item[\"id\"]})\n assert document is not None\n assert document[\"userId\"] == user_id", "def upsert_user(user_id, nick_name, first_name, last_name):\n if execute_select(get_user_sql, (user_id,)):\n execute_insert(update_user_sql, (nick_name, first_name, last_name, user_id))\n else:\n execute_insert(add_user_sql, (user_id, nick_name, first_name, last_name))", "def test_create_template_subsciption(self):\n pass", "def test_functionality(self):\n templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n #Deleting Standard User\n userList = self.getLocalUsers(userName=globalVars.standardUser)\n if len(userList) > 0:\n self.deleteLocalUser(globalVars.standardUser, verifyUser=True)\n \n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=templateName, deleteAndCreate=True, publishedTemplate=True)", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_create_empty_user(patch_mongo):\n user = {\n \"name\": \"\"\n }\n\n response = client.put(\"/user\", json=user)\n assert response.status_code == status.HTTP_400_BAD_REQUEST", "def upsert(self, variable_value=None, commit=False):\n statement = UPDATE if self.exists else INSERT\n self.oxdb.execute(\n statement,\n variable_value or datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),\n self.variable_name, commit=commit)\n self.select()", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_set_display_name_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_update_record(self):\n pass", "def test_user_update(self):\n self.client.login(username=self.teacher.username,\n password='1234')\n post = {'email': 'teacher_updated@test.com', 'first_name': 'Tim',\n 'last_name': 'Teacher'}\n response = self.client.post(self.update_url, post)\n updated_teacher = SchoolUser.objects.get(\n username=self.teacher.username)\n self.assertEqual(updated_teacher.email, post['email'])", "def test_save_rewrite(self):\n\n user = CustomUser.objects.get(email=\"test@test.test\")\n user.first_name = \"UpdatedName\"\n user.save()\n actual_user = CustomUser.objects.get(email=\"test@test.test\")\n\n self.assertEqual(actual_user.first_name, \"UpdatedName\")", "async def test_update(self):\n await self.collection.create({'id': 'foo', 'value': 'bar'})\n updated = await self.resource.update('foo', {'value': 'new'})\n self.assertEqual({'id': 'foo', 'value': 'new'}, updated)", "def test_exists_true(self):\n self.assertTrue(PrepSample.exists(self.sample_id, self.prep_template))", "def test_simple_patches(self):\n payload = json.dumps([\n {\"op\": \"add\", \"path\": \"/name\", \"value\": \"New name\"},\n {\"op\": \"copy\", \"from\": \"/email\", \"path\": \"/username\"},\n {\"op\": \"replace\", \"path\": \"/subscriber\", \"value\": True}\n ])\n result = patch_item(self.valid_users[0], payload)\n user = Users.query.filter_by(UserID=self.valid_users[0]).first_or_404()\n self.assertEqual(\"New name\", result[\"name\"])\n self.assertEqual(\"unittest1@email.com\", result[\"username\"])\n self.assertEqual(True, result[\"subscriber\"])\n self.assertEqual(\"New name\", user.Name)\n self.assertEqual(\"unittest1@email.com\", user.Username)\n self.assertEqual(True, user.Subscriber)\n self.assertNotEqual(None, user.Updated) # Should update automatically", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_products_ref_users_put(self):\n pass", "def test_update(self):\n\n user = CustomUser.objects.get(email=\"test@test.test\")\n user.update(first_name=\"UpdatedName\", second_name=\"UpdatedSecondName\")\n\n self.assertEqual(user.first_name, \"UpdatedName\")\n self.assertEqual(user.second_name, \"UpdatedSecondName\")" ]
[ "0.8351025", "0.80516064", "0.8045996", "0.8009917", "0.7813934", "0.6972186", "0.678505", "0.6546894", "0.63948953", "0.63871586", "0.6360882", "0.6320311", "0.63129383", "0.61979985", "0.6153889", "0.61455595", "0.61436296", "0.6135426", "0.6107875", "0.6063069", "0.6057141", "0.60564005", "0.6055595", "0.6018213", "0.5990856", "0.59901106", "0.59875154", "0.5979962", "0.59781814", "0.59447676", "0.59335387", "0.5931053", "0.593056", "0.59234494", "0.59209347", "0.59195334", "0.59191394", "0.59187883", "0.59056735", "0.5892443", "0.58867455", "0.58740425", "0.5857413", "0.58523476", "0.5851407", "0.5850977", "0.58405405", "0.58249384", "0.581359", "0.579109", "0.5786077", "0.57756704", "0.57702965", "0.5767868", "0.5760124", "0.57584244", "0.575069", "0.57473457", "0.57409096", "0.57409096", "0.573859", "0.57368606", "0.57357895", "0.57238644", "0.57224244", "0.57150483", "0.57136", "0.57076067", "0.57061476", "0.56906044", "0.5688028", "0.5668787", "0.566262", "0.56592065", "0.5653345", "0.5649625", "0.5641639", "0.5637782", "0.56355155", "0.5631756", "0.5629966", "0.5628676", "0.562597", "0.56207293", "0.5619672", "0.56164294", "0.561522", "0.5615135", "0.5613662", "0.5608077", "0.56080437", "0.56008124", "0.55995387", "0.5598182", "0.55969864", "0.5576882", "0.55619687", "0.5559804", "0.5548387", "0.5547533" ]
0.866422
0
test upsert other users template as user raises access control error
def test_upsert_other_users_template_as_user_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.user1) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.user2_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_update_template_registration(self):\n pass", "def test_delete_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user2_template, request=mock_request)", "def test_replace_user(self):\n pass", "def test_set_display_name_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_delete_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)", "def test_get_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_delete_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_updateview_write_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n valid_data = {'email': user.email, 'first_name': user.first_name,\n 'last_name': user.last_name, 'language': user.language}\n invalid_data = valid_data.copy()\n invalid_data['email'] = 'invalid_email_address'\n valid_data_response = self.client.post(updateview, valid_data)\n invalid_data_response = self.client.post(updateview, invalid_data)\n\n self.assertEqual(valid_data_response.status_code, 403)\n self.assertEqual(invalid_data_response.status_code, 403)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_delete_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_update_user(self):\n pass", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='foo@bar.com', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_set_display_name_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_get_all_accessible_by_hash_as_superuser_returns_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user2_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_set_display_name_global_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_update_useruser_uuid_put(self):\n pass", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_update_another_user(self):\n user1_response = self.client.post(reverse('user-list'), {\n 'username': 'aseem', 'password': 'passwrodaosida123'\n })\n update_user_resp = self.client.patch(\n reverse('user-list') + '1/', {\n 'username': 'rakesh', 'password': 'passwrodaosida123'\n })\n\n self.assertEqual(update_user_resp.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_updateview_read_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(updateview)\n\n self.assertEqual(response.status_code, 403)", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='another_user1@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='another_user1@mail.com', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_patch_user(self):\n pass", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_share_template_registration(self):\n pass", "def test_get_all_accessible_by_hash_list_as_superuser_returns_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user2_template.hash], request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_api_user_put(self):\n pass", "def test_update_activity_template(self):\n pass", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_modify_userid_404(self):\n resp = self.app.put('/users/thisuserdoesntexist',\n data=json.dumps(self.test_user1_data))\n assert resp.status_code == 404", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_unshare_template_registration(self):\n pass", "def test_update_template_subscription(self):\n pass", "def test_get_all_accessible_by_hash_as_superuser_returns_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_expected_users(self):\n print()\n print(\"Testing users whose parameters are safe...\")\n for user_id in self.get_unique_ids(100):\n self.store_expected_user(user_id)\n \n User.objects.all().delete()\n print(\"Testing many users whose parameters are safe with bulk_create...\")\n self.store_many_expected_users()\n\n print(\"-\" * 10)", "def test_get_any_template_as_superuser_returns_template(self):\n mock_request = create_mock_request(user=self.superuser1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)\n template = template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user2_template)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_user_update_request(self):\n pass", "def test_with_duplicate_user(data_store_path):\n data_set = [\n {\"name\": \"Eric Idle\", \"phone\": \"123-456-7890\", \"address\": \"here\"},\n {\"name\": \"John Cleese\", \"phone\": \"111-222-3333\", \"address\": \"there\"},\n ]\n data_store_path.write_text(yaml.dump(data_set))\n data_store = YAMLDataStore(file_path=str(data_store_path))\n assert data_store._users == data_set\n\n updated_user = {\n \"name\": \"John Cleese\",\n \"phone\": \"999-999-9999\",\n \"address\": \"not here\",\n }\n with pytest.raises(DuplicateUserError) as error:\n data_store.update(\"Eric Idle\", **updated_user)\n\n assert \"John Cleese\" in str(error.value)", "def test_set_display_name_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_delete_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_duplicate_users(self):\n\n auag = UsersAndGroups()\n\n # create a duplicate with default flag to raise an error.\n auag.add_user(User(name=\"user1\"))\n with self.assertRaises(Exception):\n auag.add_user(User(name=\"user1\"))\n\n # create with overwrite.\n auag.add_user(\n User(name=\"user2\", mail=\"user2@foo.com\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_user(\"user2\")\n self.assertEqual(u.name, \"user2\")\n self.assertEqual(u.mail, \"user2@foo.com\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_user(\n User(name=\"user2\", mail=\"user2@bar.com\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.OVERWRITE_ON_DUPLICATE,\n )\n u = auag.get_user(\"user2\")\n self.assertEqual(u.name, \"user2\")\n self.assertEqual(u.mail, \"user2@bar.com\")\n self.assertEqual(u.groupNames, [\"group3\"])\n\n # create with update.\n auag.add_user(\n User(name=\"user3\", mail=\"user3@foo.com\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.UPDATE_ON_DUPLICATE,\n )\n u = auag.get_user(\"user3\")\n self.assertEqual(u.name, \"user3\")\n self.assertEqual(u.mail, \"user3@foo.com\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_user(\n User(name=\"user3\", mail=\"user3@bar.com\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.UPDATE_ON_DUPLICATE,\n )\n u = auag.get_user(\"user3\")\n self.assertEqual(u.mail, \"user3@bar.com\")\n self.assertEqual(u.groupNames, [\"group3\", \"group2\"])\n\n # create with ignore.\n auag.add_user(\n User(name=\"user4\", mail=\"user4@foo.com\", group_names=[\"group2\"]),\n duplicate=UsersAndGroups.IGNORE_ON_DUPLICATE,\n )\n u = auag.get_user(\"user4\")\n self.assertEqual(u.name, \"user4\")\n self.assertEqual(u.mail, \"user4@foo.com\")\n self.assertEqual(u.groupNames, [\"group2\"])\n\n auag.add_user(\n User(name=\"user4\", mail=\"user4@bar.com\", group_names=[\"group3\"]),\n duplicate=UsersAndGroups.IGNORE_ON_DUPLICATE,\n )\n u = auag.get_user(\"user4\")\n self.assertEqual(u.name, \"user4\")\n self.assertEqual(u.mail, \"user4@foo.com\")\n self.assertEqual(u.groupNames, [\"group2\"])", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_groups_group_users_put(self):\n pass", "def test_groups_group_users_put(self):\n pass", "def test_update(self):\n user = self.custodian_1_user\n user_client = self.custodian_1_client\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n new_first_name = \"New First Name\"\n data = {\n \"first_name\": new_first_name,\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client],\n \"allowed\": [self.admin_client, user_client]\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.patch(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n new_first_name += '1'\n data['first_name'] = new_first_name\n self.assertEqual(\n client.patch(url, data, format='json').status_code,\n status.HTTP_200_OK\n )\n user.refresh_from_db()\n self.assertEqual(user.first_name, new_first_name)", "def test_add_duplicate(self, api):\n self.builder.add_user(api.get_user())\n resp = api.add_user(api.get_user())\n self.builder.del_user(api.get_user())\n assert resp.status_code == 304", "def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200", "def test_no_uid_causes_error():\n empty = create_db()\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n empty,\n \"some_uid\",\n INP\n )", "def test_profile_route_uses_right_templates(self):\n self.add_testuser()\n response = self.client.get(\"/profile/testuser/\")\n self.assertTemplateUsed(response, \"layout.html\")", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_invalid_update_request_with_taken_username(self):\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(self.author.get_key()))\n response: Response = self.client.patch(BASE_URL + '/update/', data={\n 'username': self.temporary_author.username\n })\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT, msg=data)\n self.assertEqual(data, {'detail': f\"User '{self.temporary_author.username}' already exists.\"})", "def test_admin_cannot_update_user_with_vague_user_id(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/kk',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'User_id should be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_post_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)", "def test_sync_incorrect_user_yaml_file(syncer, monkeypatch, db_session):\n path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"data/yaml/incorrect_user.yaml\"\n )\n monkeypatch.setattr(syncer, \"sync_from_local_yaml_file\", path)\n with pytest.raises(AssertionError):\n syncer.sync()\n assert syncer.arborist_client.create_resource.not_called()\n assert syncer.arborist_client.create_role.not_called()\n assert syncer.arborist_client.create_policy.not_called()", "def test_username_not_unique(self, client, users):\n user = users[0]\n data = factory.build(dict, FACTORY_CLASS=UserFactory, username=users[1].username)\n url = reverse('users:update', args=(user.pk,))\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'A user with that username already exists.' in str(response.content)", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_wrong_id(self):\n self.request.matchdict = {'user_id': int(self.request.user.id)+4}\n self.request.json_body = {}\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "async def test_invalid_insert_user_duplicate_key(database):\n await database.setup_database(reset=True)\n await database.insert_user(\"\")\n for user_id in zip([\"1\" for _ in range(0,10)]):\n try:\n await database.insert_user(user_id=user_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_bulk_create(self):\n urls = [reverse('api:user-list')]\n data = [\n {\n \"username\": \"newuser1\",\n \"email\": \"newuser1@example.com\",\n \"password\": \"password\"\n },\n {\n \"username\": \"newuser2\",\n \"email\": \"newuser2@example.com\",\n \"password\": \"password\"\n },\n ]\n access = {\n \"forbidden\": [self.admin_client, self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": []\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )", "def test_create_account(self):\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests\", \"password\": \"TestTest\"})\n first_user = MyUser.objects.get()\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertEqual(first_user.username, 'tests')\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests2\", \"password\": \"TestTest\"})\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertTrue(MyUser.objects.filter(username=\"tests2\").exists())\n user = MyUser.objects.get(username=\"tests2\")\n response = self.client.put(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"tst@test.te\"})\n # Not logged shouldnt change anything\n self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED)\n user.set_password(\"TestTest\")\n user.save()\n self.assertTrue(self.client.login(username=\"tests2\", password=\"TestTest\"))\n response = self.client.patch(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"tst@test.te\"})\n # Logged, should change\n self.assertEqual(response.status_code, HTTP_200_OK)\n self.assertEqual(MyUser.objects.get(username=\"tests2\").email, \"tst@test.te\")\n # Dont update others users\n response = self.client.patch(f\"http://localhost:8000/api/users/{first_user.pk}/\", data={\"email\": \"tst@test.te\"})\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)", "def test_register_duplicate(self):\n self._storage.register_user(\"user1\", \"code1\")\n with self.assertRaises(DuplicateUserException):\n self._storage.register_user(\"user1\", \"code1\")", "def test_create__duplicate(self):\n testing_config.sign_in('admin@example.com', 123567890)\n\n json_data = {'email': 'user@example.com'}\n with test_app.test_request_context(self.request_path, json=json_data):\n with self.assertRaises(werkzeug.exceptions.BadRequest):\n self.handler.do_post()\n\n unrevised_appuser = user_models.AppUser.get_by_id(self.appuser_id)\n self.assertEqual('user@example.com', unrevised_appuser.email)", "def test_put_change_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(new_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_create_with_duplicate_userid(self):\n\n self.sdkapi.guest_create(self.userid, 1, 1024)\n try:\n self.sdkapi.guest_create(self.userid, 1, 1024)\n except exception.SDKSMUTRequestFailed as err:\n self.assertEqual(err.results['rc'], 400)\n self.assertEqual(err.results['rs'], 8)", "def test_project_create_cant_edit_users_existing_user(self):\n user = fake_clients.FakeUser(name=\"test@example.com\")\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n task = Task.objects.all()[0]\n action_models = task.actions\n actions = [act.get_action() for act in action_models]\n self.assertTrue(all([act.valid for act in actions]))", "def test_set_display_name_user_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_less_common_patches(self):\n payload = json.dumps([\n {\"op\": \"test\", \"path\": \"/level\", \"value\": 3},\n {\"op\": \"move\", \"from\": \"/email\", \"path\": \"/username\"},\n {\"op\": \"remove\", \"path\": \"/created\"}\n ])\n result = patch_item(self.valid_users[1], payload)\n user = Users.query.filter_by(UserID=self.valid_users[1]).first_or_404()\n self.assertEqual(\"unittest2@email.com\", result[\"username\"])\n self.assertEqual(None, result[\"created\"])\n self.assertEqual(\"unittest2@email.com\", user.Username)\n self.assertEqual(None, user.Created)\n self.assertNotEqual(None, user.Updated) # Should update automatically", "def testUpdateAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])", "def test_user_cannot_write(app, resource):\n with app.user():\n data = {}\n\n # Try to post something\n app.client.post('/' + resource,\n data=data,\n assert_status=403)\n\n # Create fake item, try to patch/delete it\n _id = app.data.driver.db[resource].insert({})\n app.client.patch('/%s/%s' % (resource, _id),\n data=data,\n assert_status=403)\n app.client.delete('/%s/%s' % (resource, _id),\n assert_status=403)", "def test_products_ref_users_put(self):\n pass", "def test_get_all_accessible_by_hash_list_as_superuser_returns_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_signup_dupe_username(self):\n\n invalid_u = User.signup(\"test@test.com\", \"allison\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_get_all_as_user_returns_accessible_templates(self):\n mock_request = create_mock_request(user=self.user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 2)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_get_all_accessible_by_hash_as_staff_does_not_return_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user2_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_duplicate_signup_attempt(self):\n\n self.client.post(\n \"api/v2/auth/signup\", data=json.dumps(self.generic_user), content_type=\"application/json\")\n res = self.client.post(\n \"api/v2/auth/signup\", data=json.dumps(self.generic_user), content_type=\"application/json\")\n result = json.loads(res.data)\n self.assertEqual(result[\"Error\"], \"User already exists\")\n self.assertEqual(res.status_code, 409)", "def test_get_all_accessible_by_hash_as_user_returns_user_template(self):\n mock_request = create_mock_request(user=self.user1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_fail_put_detail_user(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n user_data = {\"email\": \"new@testuser.com\", \"password\": \"testpassword\"}\n response = client.put(reverse(\"user-detail\"), user_data, format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED" ]
[ "0.820034", "0.8083881", "0.8039612", "0.80145544", "0.80041873", "0.76042765", "0.7525442", "0.7524677", "0.7430861", "0.7153691", "0.7078404", "0.69165915", "0.67743903", "0.6736194", "0.6717341", "0.6690355", "0.6580235", "0.65308565", "0.6507076", "0.6504665", "0.6474388", "0.6453021", "0.6449245", "0.6447113", "0.6443009", "0.6431808", "0.6425109", "0.6398658", "0.63973486", "0.6363779", "0.63532203", "0.629723", "0.6265195", "0.6258842", "0.6242454", "0.6201719", "0.6185738", "0.61796004", "0.6176208", "0.61592245", "0.6158886", "0.6113121", "0.6105671", "0.60826766", "0.6073193", "0.60544026", "0.6041931", "0.6041652", "0.603291", "0.60173386", "0.6016414", "0.5993814", "0.59933954", "0.5984844", "0.59756225", "0.5959909", "0.5954779", "0.5952625", "0.594848", "0.5946578", "0.5933018", "0.5910414", "0.5910414", "0.5908508", "0.5908121", "0.59026116", "0.5902364", "0.5893936", "0.58812994", "0.58811563", "0.5877327", "0.5872608", "0.58657515", "0.5865554", "0.5864684", "0.5860754", "0.58563155", "0.5856217", "0.58489954", "0.5845729", "0.58443934", "0.5843802", "0.5838332", "0.5835209", "0.58232266", "0.5806731", "0.5805227", "0.57912153", "0.57909805", "0.5787844", "0.57835376", "0.5783252", "0.57790726", "0.5778151", "0.5771569", "0.57713044", "0.5768407", "0.5767222", "0.57669747", "0.57652146" ]
0.82194316
0
test upsert global template as user raises access control error
def test_upsert_global_template_as_user_raises_access_control_error(self): mock_request = create_mock_request(user=self.user1) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.global_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_update_template_registration(self):\n pass", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_share_template_registration(self):\n pass", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_set_display_name_global_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_replace_user(self):\n pass", "def test_unshare_template_registration(self):\n pass", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_update_template_profile_for_system_module(self):\n pass", "def test_register_template(self):\n pass", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_functionality(self):\n templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n #Deleting Standard User\n userList = self.getLocalUsers(userName=globalVars.standardUser)\n if len(userList) > 0:\n self.deleteLocalUser(globalVars.standardUser, verifyUser=True)\n \n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=templateName, deleteAndCreate=True, publishedTemplate=True)", "def test_update_activity_template(self):\n pass", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_global_template_as_user_returns_template(self):\n mock_request = create_mock_request(user=self.user1)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_template_subscription(self):\n pass", "def test_delete_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_retrieve_template_registration(self):\n pass", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_create_template_subsciption(self):\n pass", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_delete_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user2_template, request=mock_request)", "def test_set_display_name_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_delete_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_exists_false(self):\n self.assertFalse(PrepTemplate.exists(2))", "def test_update_user(self):\n pass", "def test_delete_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_05_use_private_template_in_project(self):\n # 1. Create a project\n # 2. Verify that in order to use somebody's Private template for vm\n # creation in the project, permission to use the template has to\n # be granted to the Project (use API 'updateTemplatePermissions'\n # with project id to achieve that).\n\n try:\n self.debug(\"Deploying VM for with public template: %s\" %\n self.template.id)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_1)\n # Verify VM state\n self.assertEqual(virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\")\n virtual_machine_1.stop(self.apiclient)\n # Get the Root disk of VM\n volumes = list_volumes(\n self.apiclient,\n projectid=self.project.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volumes, list),\n True,\n \"Check for list volume response return valid data\"\n )\n volume = volumes[0]\n\n self.debug(\"Creating template from volume: %s\" % volume.id)\n # Create a template from the ROOTDISK\n template_1 = Template.create(\n self.userapiclient,\n self.services[\"template\"],\n volumeid=volume.id\n )\n\n self.cleanup.append(template_1)\n # Verify Template state\n self.assertEqual(\n template_1.isready,\n True,\n \"Check Template is in ready state or not\"\n )\n\n # Update template permissions to grant permission to project\n self.debug(\n \"Updating template permissions:%s to grant access to project: %s\" % (\n template_1.id,\n self.project.id\n ))\n\n template_1.updatePermissions(\n self.apiclient,\n op='add',\n projectids=self.project.id\n )\n self.debug(\"Deploying VM for with privileged template: %s\" %\n self.template.id)\n virtual_machine_2 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=template_1.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_2)\n # Verify VM state\n self.assertEqual(\n virtual_machine_2.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n except Exception as e:\n self.fail(\"Exception occurred: %s\" % e)\n return", "def test_set_display_name_global_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_api_user_put(self):\n pass", "def test_patch_user(self):\n pass", "def test_get_all_accessible_by_hash_as_superuser_returns_global_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)", "def test_get_any_template_as_superuser_returns_template(self):\n mock_request = create_mock_request(user=self.superuser1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)\n template = template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user2_template)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_set_display_name_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_set_display_name_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_exists_true(self):\n self.assertTrue(PrepTemplate.exists(1))", "def test_update_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.patch(f\"/templates/{template_id}\", json={})\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_get_global_template_as_anonymous_with_access_right_returns_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_sync_incorrect_user_yaml_file(syncer, monkeypatch, db_session):\n path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"data/yaml/incorrect_user.yaml\"\n )\n monkeypatch.setattr(syncer, \"sync_from_local_yaml_file\", path)\n with pytest.raises(AssertionError):\n syncer.sync()\n assert syncer.arborist_client.create_resource.not_called()\n assert syncer.arborist_client.create_role.not_called()\n assert syncer.arborist_client.create_policy.not_called()", "def test_delete_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_update_device_template(self):\n pass", "def test_set_display_name_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_04_public_private_template_use_in_project(self):\n # 1. Create a project\n # 2. Verify Public templates can be used without any restriction\n # 3. Verify that private template created in project belongs to this project\n # Verify that list template api wth project id list this template\n\n\n\n try:\n self.debug(\"Deploying VM for with public template: %s\" %\n self.template.id)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_1)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n virtual_machine_1.stop(self.apiclient)\n # Get the Root disk of VM\n volumes = list_volumes(\n self.apiclient,\n projectid=self.project.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volumes, list),\n True,\n \"Check for list volume response return valid data\"\n )\n volume = volumes[0]\n\n self.debug(\"Creating template from volume: %s\" % volume.id)\n # Create a template from the ROOTDISK\n template_1 = Template.create(\n self.apiclient,\n self.services[\"template\"],\n volumeid=volume.id,\n projectid=self.project.id\n )\n\n self.cleanup.append(template_1)\n # Verify Template state\n self.assertEqual(\n template_1.isready,\n True,\n \"Check Template is in ready state or not\"\n )\n # Verify list template with project id is listing this template\n templatelist = Template.list(self.apiclient,projectid=self.project.id,id=template_1.id,templatefilter=\"all\")\n self.assertEqual(templatelist[0].id,template_1.id,\"template created does not belong to the project\")\n\n\n except Exception as e:\n self.fail(\"Exception occurred: %s\" % e)\n return", "def test_set_display_name_user_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_update_template_given_name_already_exists(self):\n template_id = util.MOCK_UUID_1\n template_name = util.MOCK_TEMPLATE_NAME_2\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"message\": \"a template with that name already exists\",\n \"code\": \"TemplateNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def test_team_template_folders_id_put(self):\n pass", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_no_uid_causes_error():\n empty = create_db()\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n empty,\n \"some_uid\",\n INP\n )", "def test_get_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )", "def test_set_display_name_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_put_unauthorized(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_unregister_template(self):\n pass", "def test_update_useruser_uuid_put(self):\n pass", "def test_delete_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='foo@bar.com', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_get_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_get_all_accessible_by_hash_as_superuser_returns_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user2_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_get_all_as_user_returns_accessible_templates(self):\n mock_request = create_mock_request(user=self.user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 2)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_delete_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_delete_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_patch_namespaced_template(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_modify_userid_404(self):\n resp = self.app.put('/users/thisuserdoesntexist',\n data=json.dumps(self.test_user1_data))\n assert resp.status_code == 404", "def test_post_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_get_all_accessible_by_hash_as_superuser_returns_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='another_user1@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='another_user1@mail.com', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def allowed_topologytemplate_access_create(user):\n return user.has_perm(\"vnswww.add_topologytemplate\")", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_profile_route_uses_right_templates(self):\n self.add_testuser()\n response = self.client.get(\"/profile/testuser/\")\n self.assertTemplateUsed(response, \"layout.html\")", "def test_get_all_accessible_by_hash_as_user_returns_global_template(self):\n mock_request = create_mock_request(user=self.user1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.global_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_user_update_request(self):\n pass", "def test_set_display_name_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_set_display_name_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_get_all_accessible_by_hash_list_as_superuser_returns_global_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.global_template.hash], request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_anonymous_user_update_user_taskrun(self):\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert_raises(Unauthorized,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)" ]
[ "0.8106673", "0.76909196", "0.7503215", "0.7482491", "0.7482349", "0.747998", "0.74361557", "0.7395664", "0.7269864", "0.72538686", "0.72130823", "0.7131496", "0.701815", "0.6556384", "0.65447783", "0.6527001", "0.64378923", "0.6381135", "0.6367743", "0.6320074", "0.63030666", "0.6258095", "0.62367326", "0.6224009", "0.6196587", "0.6176663", "0.613376", "0.6124334", "0.6122764", "0.6072628", "0.6047649", "0.60389596", "0.6021597", "0.6020519", "0.6005275", "0.59444076", "0.593967", "0.5931032", "0.5921061", "0.5919533", "0.5905131", "0.5861943", "0.58574116", "0.58571696", "0.5850721", "0.58443856", "0.58419764", "0.58417815", "0.58090436", "0.5803972", "0.57969123", "0.57815284", "0.57560885", "0.57560885", "0.5739967", "0.5734785", "0.57254946", "0.57253534", "0.57183385", "0.57023907", "0.5698989", "0.5697127", "0.56929207", "0.56921965", "0.5691365", "0.5691177", "0.5676087", "0.5667554", "0.56621933", "0.5660298", "0.5654399", "0.565059", "0.56503135", "0.56450206", "0.56446564", "0.564129", "0.56407344", "0.56364846", "0.5634533", "0.5634202", "0.5633714", "0.5632132", "0.56196743", "0.5618769", "0.5615608", "0.5605639", "0.5604393", "0.5602808", "0.55829954", "0.5563995", "0.55636865", "0.55623466", "0.55598336", "0.55559945", "0.5549932", "0.55391884", "0.553305", "0.5532311", "0.5528668", "0.5525273" ]
0.82474107
0
test upsert own template as staff saves
def test_upsert_own_template_as_staff_saves(self): mock_request = create_mock_request(user=self.staff_user1) template_api.upsert(self.fixture.user1_template, request=mock_request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_update_template_registration(self):\n pass", "def test_is_data_in_template_and_database_same(self):\n SimplePersonFactory.create()\n contact = Person.objects.last()\n self.assertEqual(check_content_in_template(contact), True)\n contact.name = \"test_is_data_in_template_and_database_same\"\n contact.save()\n self.assertEqual(check_content_in_template(contact), True)", "def test_update_activity_template(self):\n pass", "def test_upsert_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_delete_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_put(self):\n pass", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_06_datastore_upsert(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.task_upsert), 200,\r\n {'content-type': 'application/json'})\r\n\r\n record = dict(info=dict(foo=\"bar\"))\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_upsert failed\" == type, type", "def test_update_template_subscription(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_upsert_content_file(mocker):\n patched_task = mocker.patch(\"search.tasks.upsert_content_file\")\n content_file = ContentFileFactory.create()\n upsert_content_file(content_file.id)\n patched_task.assert_called_once_with(content_file.id)", "def test_collection_saved(self, mock_template):\n collection = Collection.objects.get_by_natural_key(\n 'elasticsearch', 'test_index', 'test_docs')\n collection.save()\n self.assertEqual(mock_template.call_count, 1)", "def test_distillery_saved(self, mock_template):\n distillery = Distillery.objects.get_by_natural_key(\n 'elasticsearch.test_index.test_docs')\n distillery.save()\n self.assertEqual(mock_template.call_count, 1)", "def upsert_location(self, location):", "def test_update_template_success(self):\n template_id = util.MOCK_UUID_1\n template_name = \"template-3\"\n\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"uuid\": template_id,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n }\n ],\n \"experimentId\": util.MOCK_UUID_1,\n \"deploymentId\": None,\n \"createdAt\": util.MOCK_CREATED_AT_1.isoformat(),\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_delete_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_xml_template_set(self):\n xmlTemplateResult = XmlTemplate.objects.get(id=1)\n xmlTemplateResult.set(\"newTemplate\", '''<?xml >\n <project name=\"newTemplate\">\n </project> ''',)\n self.assertEqual(xmlTemplateResult.template_name, \"newTemplate\")\n self.assertEqual(xmlTemplateResult.template_content, '''<?xml >\n <project name=\"newTemplate\">\n </project> ''')", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_register_template(self):\n pass", "def test_06_datastore_upsert_without_resource_id(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.task_upsert), 200,\r\n {'content-type': 'application/json'})\r\n\r\n record = dict(info=dict(foo=\"bar\"))\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=None)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_upsert(name='task',\r\n records=json.dumps([record]),\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_upsert failed\" == type, type", "def test_delete_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_set_display_name_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_collection_put(testapp, execute_counter):\n initial = {\n 'title': \"Testing\",\n 'type': \"object\", # include a non-required field\n 'description': \"This is the initial insert\",\n }\n item_url = testapp.post_json('/embedding-tests', initial).location\n\n with execute_counter.expect(1):\n item = testapp.get(item_url).json\n\n for key in initial:\n assert item[key] == initial[key]\n\n update = {\n 'title': \"New Testing\",\n 'type': \"object\",\n 'description': \"This is the updated insert\",\n }\n testapp.put_json(item_url, update, status=200)\n\n res = testapp.get('/' + item['uuid']).follow().json\n\n for key in update:\n assert res[key] == update[key]", "def test_exists_true(self):\n self.assertTrue(PrepTemplate.exists(1))", "def test_share_template_registration(self):\n pass", "def test_upsert_course(mocker):\n patched_task = mocker.patch(\"search.tasks.upsert_course\")\n course = CourseFactory.create()\n upsert_course(course.id)\n patched_task.assert_called_once_with(course.id)", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_upsert_profile(mocker, mock_es_profile_serializer, user):\n patched_task = mocker.patch(\"search.search_index_helpers.tasks.upsert_profile\")\n upsert_profile(user.profile.id)\n patched_task.assert_called_once_with(user.profile.id)", "def test_upsert_user_list(mocker, list_type):\n patched_task = mocker.patch(\"search.tasks.upsert_user_list\")\n user_list = UserListFactory.create(list_type=list_type)\n upsert_user_list(user_list.id)\n patched_task.assert_called_once_with(user_list.id)", "def test_update(self):\n # creating a new sample template\n st = SampleTemplate.create(self.metadata, self.new_study)\n # updating the sample template\n st.update(self.metadata_dict_updated)\n\n # validating values\n exp = self.metadata_dict_updated_dict['Sample1'].values()\n obs = st.get('2.Sample1').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample2'].values()\n obs = st.get('2.Sample2').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample3'].values()\n obs = st.get('2.Sample3').values()\n self.assertItemsEqual(obs, exp)\n\n # checking errors\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_sample_error)\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_column_error)", "def test_create_template_subsciption(self):\n pass", "def test_update_record(self):\n pass", "def test_post_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)", "def test_update_subscription_template(self):\n pass", "def test_db(self):\n db.tests.insert_one({'name': 'test-name'})\n r = db.tests.find_one({'name': 'test-name'})\n self.assertEqual(r['name'], 'test-name')\n\n db.tests.insert_one({'_id': '_id', 'a': 'A', 'b': 'B', 'c': 'c'})", "def test_delete_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_workflows_id_templates_fk_put(self):\n pass", "def test_update_one(self):\n pass", "def test_update_case(self):\n pass", "def test_exists_true(self):\n self.assertTrue(SampleTemplate.exists(self.test_study.id))", "def test_update_template_given_name_already_exists(self):\n template_id = util.MOCK_UUID_1\n template_name = util.MOCK_TEMPLATE_NAME_2\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"message\": \"a template with that name already exists\",\n \"code\": \"TemplateNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def test_retrieve_template_registration(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_update(self):\n payload = {\n 'id': self.rout1.id,\n 'name': 'Tuesday routine',\n 'exercises': [self.exer1.id]\n }\n response = self.client.put(\n '/routines/{}/'.format(self.rout1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n Routine.objects.get(id=self.rout1.id).name, payload['name'])", "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_upsert_video(mocker):\n patched_task = mocker.patch(\"search.tasks.upsert_video\")\n video = VideoFactory.create()\n upsert_video(video.id)\n patched_task.assert_called_once_with(video.id)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def test_update_collection(self):\n pass", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def tests_ti_document_update(self, request: FixtureRequest):\n super().group_update(request)", "def test_set_display_name_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_update_update_has_a_value(self):\n self.Person.drop_collection()\n\n author = self.Person.objects.create(name=\"Test User\")\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update({})\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update_one({})", "def test_interest_save(self):\n user1 = get_user_model().objects.get(username='test1@example.com')\n self.client.login(username='test1@example.com', password='1')\n\n office_list = OfficeLocation.objects.all()\n org_list = OrgGroup.objects.filter(parent__isnull=True)\n\n submission1 = Interest()\n submission1.owner = user1\n submission1.for_coffee = True\n submission1.initial_save(locations=office_list, departments=org_list)\n\n self.assertNotEqual(submission1.id, None)\n self.assertEqual(submission1.locations.count(), len(office_list))\n self.assertEqual(submission1.departments.count(), len(org_list))\n self.assertEqual(submission1.match, None)\n\n user2 = random_user()\n submission2 = Interest()\n submission2.owner = user2\n submission2.for_coffee = True\n submission2.save()\n submission2.locations.add(office_list[0])\n submission2.departments.add(org_list[0])\n submission2.save()\n\n\n self.assertNotEqual(submission2.id, None)\n self.assertEqual(submission2.locations.count(), 1)\n self.assertEqual(submission2.departments.count(), 1)\n self.assertEqual(submission2.match, submission1)\n submission1 = Interest.objects.get(id=submission1.id) # refresh\n self.assertEqual(submission1.match, submission2)", "def test_update(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.my_task.title = 'foo'\n key = self.task_storage.update(self.my_task)\n new_task = self.task_storage.find(key)\n\n self.assertEqual(self.my_task, new_task)", "def test_delete_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user2_template, request=mock_request)", "def test_partial_update_services(self):\n\n services = sample_services(user=self.user)\n services.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name='Transformer')\n\n payload = {'title' : 'sample service job' , 'tags' : [new_tag.id]}\n url = detail_url(services.id)\n self.client.patch(url, payload)\n\n services.refresh_from_db()\n\n self.assertEqual(services.title, payload['title'])\n tags = services.tags.all()\n\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)", "def test_team_template_folders_post(self):\n pass", "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_prep_new_data(self):\n pass", "def test_upsert(self):\n my_conn = MySQL(*self.conn_params)\n Base = declarative_base()\n current_dir = os.path.dirname(os.path.abspath(__file__))\n\n # IMPORTANT: table logic reuse pattern with mixins\n class PmhMixin(object):\n \"\"\"Auxiliary sqlalchemy table model for the tests.\"\"\"\n\n __tablename__ = 'pmh'\n\n id = Column(Integer, primary_key=True)\n anno = Column(Integer)\n cpro = Column(Integer)\n cmun = Column(Integer)\n csexo = Column(Integer)\n sexo = Column(String(20))\n orden_gredad = Column(Integer)\n gredad = Column(String(30))\n personas = Column(Integer)\n codigo_ine = Column(String(50))\n\n class Pmh(Base, PmhMixin):\n \"\"\"Auxiliary sqlalchemy table model for the tests.\"\"\"\n\n __tablename__ = 'pmh'\n\n class PmhTmp(Base, PmhMixin):\n \"\"\"Auxiliary sqlalchemy table model for the tests.\"\"\"\n\n __tablename__ = 'tmp_pmh'\n\n # table to update/insert\n Pmh.__table__.create(bind=my_conn.engine)\n data = pd.read_csv(f'''{current_dir}/pmh.csv''')\n data.name = 'pmh'\n my_conn.insert(data, if_exists='append')\n # https://github.com/PyCQA/pylint/issues/1161\n # there's an issue with pylint and pandas read methods.\n original_table = pd.DataFrame(\n pd.read_sql_table(data.name, my_conn.conn_string))\n\n # temporary table with data to update/insert\n PmhTmp.__table__.create(bind=my_conn.engine)\n tmp_data = pd.read_csv(f'''{current_dir}/pmh_update.csv''')\n tmp_data.name = 'tmp_pmh'\n\n sql = f'''INSERT INTO pmh\n (id, anno, cpro, cmun, csexo, sexo, orden_gredad, gredad,\n personas, codigo_ine)\n SELECT *\n FROM tmp_pmh\n ON DUPLICATE KEY UPDATE\n id = tmp_pmh.id,\n anno = tmp_pmh.anno,\n cpro = tmp_pmh.cpro,\n cmun = tmp_pmh.cmun,\n csexo = tmp_pmh.csexo,\n orden_gredad = tmp_pmh.orden_gredad,\n gredad = tmp_pmh.gredad,\n personas = tmp_pmh.personas,\n codigo_ine = tmp_pmh.codigo_ine;'''\n expected = 76\n current = original_table.loc[\n original_table['id'] == 5192]['personas'].tolist()[0]\n self.assertEqual(current, expected)\n my_conn.upsert(tmp_data, data.name, sql, if_exists='append')\n updated_table = pd.DataFrame(\n pd.read_sql_table(data.name, my_conn.conn_string))\n expected = 9976\n current = updated_table.loc[\n updated_table['id'] == 5192]['personas'].tolist()[0]\n self.assertEqual(current, expected)\n expected = 46\n current = updated_table.loc[\n updated_table['id'] == 30001]['cmun'].tolist()[0]\n my_conn.drop(data.name)", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_upsert_podcast(mocker):\n patched_task = mocker.patch(\"search.tasks.upsert_podcast\")\n podcast = PodcastFactory.create()\n upsert_podcast(podcast.id)\n patched_task.assert_called_once_with(podcast.id)", "async def test_update(self):\n await self.collection.create({'id': 'foo', 'value': 'bar'})\n updated = await self.resource.update('foo', {'value': 'new'})\n self.assertEqual({'id': 'foo', 'value': 'new'}, updated)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_update_scenario(self):\n pass", "def test_set_display_name_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tag.add(sample_tag(user=self.user))\n recipe.ingredient.add(sample_ingredient(user=self.user))\n new_tag = sample_tag(user=self.user,name='curry')\n payload = {\n 'title':'chicken tikka recipe',\n 'tag' : [new_tag.id]\n }\n url = detail_url(recipe.id)\n res = self.client.patch(url,payload)\n recipe.refresh_from_db();\n self.assertEqual(recipe.title,payload['title'])\n self.assertEqual(len(recipe.tag.all()),1)\n self.assertIn(new_tag,recipe.tag.all())", "def test_update_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.patch(f\"/templates/{template_id}\", json={})\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)", "def save(self, *args, **kwargs):\n if self.pk is None:\n if not self.name.startswith(TEMPLATE_PREFIX):\n self.name = f'{TEMPLATE_PREFIX}{self.name}'\n super(Template, self).save(*args, **kwargs)", "def test_exists_false(self):\n self.assertFalse(PrepTemplate.exists(2))", "def insert(template_version_manager, template, request):\n # save the template in database\n template_api.upsert(template)\n try:\n # insert the initial template in the version manager\n version_manager_api.insert_version(\n template_version_manager, template, request=request\n )\n # insert the version manager in database\n version_manager_api.upsert(template_version_manager, request=request)\n # get template display name\n display_name = get_latest_version_name(template_version_manager)\n # update saved template\n main_template_api.set_display_name(template, display_name, request=request)\n # return version manager\n return template_version_manager\n except Exception as e:\n main_template_api.delete(template, request=request)\n raise e", "def test_update_user(self):\n pass", "def test_add_or_update_case(self):\n pass", "def test_replace_user(self):\n pass", "def upsert(self, obj):\r\n url = '{0}/upsert'.format(self.get_url())\r\n request = http.Request('PUT', url, self.wrap_object(obj))\r\n\r\n return request, parsers.parse_empty", "def test_exists_true(self):\n self.assertTrue(PrepSample.exists(self.sample_id, self.prep_template))", "def test_create_activity_template(self):\n pass", "def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_upsert_program(mocker):\n patched_task = mocker.patch(\"search.tasks.upsert_program\")\n program = ProgramFactory.create()\n upsert_program(program.id)\n patched_task.assert_called_once_with(program.id)", "def test_client_verification_document_update(self):\n pass", "def post(self):\n self.reqparse.add_argument('templateName', type=str, required=True)\n self.reqparse.add_argument('template', type=str, required=True)\n args = self.reqparse.parse_args()\n\n template = db.Template.find_one(template_name=args['templateName'])\n if template:\n return self.make_response('Template already exists, update the existing template instead', HTTP.CONFLICT)\n\n template = Template()\n template.template_name = args['templateName']\n template.template = args['template']\n\n db.session.add(template)\n db.session.commit()\n auditlog(event='template.create', actor=session['user'].username, data=args)\n\n return self.make_response('Template {} has been created'.format(template.template_name), HTTP.CREATED)", "def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name= 'curry')\n\n payload = {\n 'title':'chicken tikka', 'tags':[new_tag.id]\n\n }\n url = detail_url(recipe.id)\n\n self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)", "def test_save(self):", "def test_save(self):", "def test_meeting_update(self):\n pass", "def test_upsert_w_returning(self, connection):\n\n data = self.tables.data\n\n initial_data = [\n {\"x\": \"x1\", \"y\": \"y1\", \"z\": 4},\n {\"x\": \"x2\", \"y\": \"y2\", \"z\": 8},\n ]\n ids = connection.scalars(\n data.insert().returning(data.c.id), initial_data\n ).all()\n\n upsert_data = [\n {\n \"id\": ids[0],\n \"x\": \"x1\",\n \"y\": \"y1\",\n },\n {\n \"id\": 32,\n \"x\": \"x19\",\n \"y\": \"y7\",\n },\n {\n \"id\": ids[1],\n \"x\": \"x5\",\n \"y\": \"y6\",\n },\n {\n \"id\": 28,\n \"x\": \"x9\",\n \"y\": \"y15\",\n },\n ]\n\n stmt = provision.upsert(\n config,\n data,\n (data,),\n set_lambda=lambda inserted: {\"x\": inserted.x + \" upserted\"},\n )\n\n result = connection.execute(stmt, upsert_data)\n\n eq_(\n result.all(),\n [\n (ids[0], \"x1 upserted\", \"y1\", 4),\n (32, \"x19\", \"y7\", 5),\n (ids[1], \"x5 upserted\", \"y2\", 8),\n (28, \"x9\", \"y15\", 5),\n ],\n )" ]
[ "0.8371305", "0.80961037", "0.8029739", "0.7766117", "0.7704741", "0.70811933", "0.6713066", "0.648494", "0.64786816", "0.6461537", "0.6387213", "0.638419", "0.63225466", "0.6304995", "0.63047177", "0.62928706", "0.62527716", "0.6247133", "0.6220966", "0.62020767", "0.6166314", "0.6128568", "0.61061525", "0.6092912", "0.6081847", "0.6068162", "0.6007954", "0.5989855", "0.5960996", "0.5954581", "0.5943567", "0.59381497", "0.5925859", "0.5889738", "0.5889589", "0.5888746", "0.5885714", "0.58777475", "0.58747137", "0.58663076", "0.5839641", "0.5833316", "0.5832994", "0.5826289", "0.5815017", "0.58069867", "0.58018315", "0.57827854", "0.5762042", "0.5761437", "0.5755084", "0.5744043", "0.5740082", "0.57345545", "0.57345545", "0.571557", "0.5715087", "0.570705", "0.5704829", "0.56901", "0.56894153", "0.5687886", "0.5677415", "0.56753063", "0.5666371", "0.5655381", "0.564289", "0.56413525", "0.56283337", "0.5623507", "0.5620498", "0.5612554", "0.5612554", "0.5612554", "0.56088334", "0.5607579", "0.5602823", "0.5602004", "0.55998445", "0.55952525", "0.55797845", "0.5567875", "0.55677474", "0.5564927", "0.55635303", "0.55624676", "0.5561243", "0.55564106", "0.5553106", "0.55448043", "0.5543613", "0.55424815", "0.55356294", "0.5534295", "0.55340904", "0.5527517", "0.55226314", "0.55226314", "0.5520713", "0.55032647" ]
0.85387015
0
test upsert other users template as staff raises access control error
def test_upsert_other_users_template_as_staff_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.staff_user1) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.user2_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_get_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_request\n )", "def test_update_template_registration(self):\n pass", "def test_set_display_name_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_delete_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_delete_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user2_template, request=mock_request)", "def test_replace_user(self):\n pass", "def test_delete_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_set_display_name_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_update_user(self):\n pass", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_updateview_write_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n valid_data = {'email': user.email, 'first_name': user.first_name,\n 'last_name': user.last_name, 'language': user.language}\n invalid_data = valid_data.copy()\n invalid_data['email'] = 'invalid_email_address'\n valid_data_response = self.client.post(updateview, valid_data)\n invalid_data_response = self.client.post(updateview, invalid_data)\n\n self.assertEqual(valid_data_response.status_code, 403)\n self.assertEqual(invalid_data_response.status_code, 403)", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_all_accessible_by_hash_as_superuser_returns_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user2_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_update_activity_template(self):\n pass", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_delete_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_delete_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_set_display_name_global_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def test_delete_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_update_another_user(self):\n user1_response = self.client.post(reverse('user-list'), {\n 'username': 'aseem', 'password': 'passwrodaosida123'\n })\n update_user_resp = self.client.patch(\n reverse('user-list') + '1/', {\n 'username': 'rakesh', 'password': 'passwrodaosida123'\n })\n\n self.assertEqual(update_user_resp.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_all_as_staff_returns_accessible_templates(self):\n mock_request = create_mock_request(user=self.staff_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 2)\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='foo@bar.com', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_get_all_accessible_by_hash_as_staff_does_not_return_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user2_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_set_display_name_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='another_user1@mail.com', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='another_user1@mail.com', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_share_template_registration(self):\n pass", "def test_get_all_accessible_by_hash_list_as_superuser_returns_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user2_template.hash], request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_patch_user(self):\n pass", "def test_get_any_template_as_superuser_returns_template(self):\n mock_request = create_mock_request(user=self.superuser1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)\n template = template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user2_template)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def test_get_all_accessible_by_hash_as_staff_returns_user_template(self):\n mock_request = create_mock_request(user=self.staff_user1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_update(self):\n user = self.custodian_1_user\n user_client = self.custodian_1_client\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n new_first_name = \"New First Name\"\n data = {\n \"first_name\": new_first_name,\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client],\n \"allowed\": [self.admin_client, user_client]\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.patch(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n new_first_name += '1'\n data['first_name'] = new_first_name\n self.assertEqual(\n client.patch(url, data, format='json').status_code,\n status.HTTP_200_OK\n )\n user.refresh_from_db()\n self.assertEqual(user.first_name, new_first_name)", "def test_upsert_user(self):\n db = database.Database()\n db.upsert_user('nick', 100, 100)\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'INSERT INTO quota_violations (username, triggered, last_notified)\\n VALUES (%s, %s, %s)\\n ON CONFLICT (username)\\n DO UPDATE SET\\n (triggered, last_notified)\\n = (EXCLUDED.triggered, EXCLUDED.last_notified);\\n '\n\n self.assertEqual(sql, expected_sql)", "def test_update_template_subscription(self):\n pass", "def test_get_all_accessible_by_hash_list_as_staff_does_not_return_other_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user2_template.hash], request=mock_request\n )\n self.assertTrue(self.fixture.user1_template not in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_modify_userid_404(self):\n resp = self.app.put('/users/thisuserdoesntexist',\n data=json.dumps(self.test_user1_data))\n assert resp.status_code == 404", "def test_post_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_team_template_folders_id_put(self):\n pass", "def test_updateview_read_for_wrong_user(self):\n\n for user in self.users:\n updateview = reverse('account_update', args=(user.uuid,))\n other_users = self.users\n other_users.remove(user)\n random_user = random.choice(other_users)\n\n self.client.login(email=random_user.email, password='letmein')\n\n response = self.client.get(updateview)\n\n self.assertEqual(response.status_code, 403)", "def test_get_all_accessible_by_hash_as_superuser_returns_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash(\n self.fixture.user1_template.hash, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_get_all_accessible_by_hash_list_as_staff_returns_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_set_display_name_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_set_display_name_user_template_as_anonymous_with_access_right_access_raises_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_set_display_name_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200", "def test_user_can_change_superuser(self):\n self.assertTrue(self.story.user_can_change(self.superuser))", "def test_unshare_template_registration(self):\n pass", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_mailpiece_put_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n mailPiecePK = MailPiece.objects.filter(user=userPK)[0].pk\n url = reverse('MailPiece-detail', kwargs={'pk': mailPiecePK})\n self.data['user'] = userPK\n response = self.client.put(url, self.data, format='json')\n #This is 404 instead of 403 because there is no way to view a mail piece\n #that you arent the user on.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(MailPiece.objects.get(pk=mailPiecePK).user,\n self.data['user'])", "def test_profile_route_uses_right_templates(self):\n self.add_testuser()\n response = self.client.get(\"/profile/testuser/\")\n self.assertTemplateUsed(response, \"layout.html\")", "def test_user_update_request(self):\n pass", "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_update_useruser_uuid_put(self):\n pass", "def test_first_user_is_admin(self):\n user = User.objects.create(username='username', email='foo@bar.com')\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)\n user = User.objects.create(username='username2', email='foo@bar.com')\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)", "def test_groups_group_users_put(self):\n pass", "def test_groups_group_users_put(self):\n pass", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_create_template_subsciption(self):\n pass", "def test_alert_create_for_site_members(self):\n pass", "def test_tiers_update_permission_post(self):\n pass", "def test_api_user_put(self):\n pass", "def test_get_all_accessible_by_hash_list_as_superuser_returns_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.superuser1)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template not in list(templates))", "def test_get_all_accessible_by_id_list_as_staff_returns_accessible_templates(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n templates = template_api.get_all_accessible_by_id_list(\n self.template_id_list, request=mock_request\n )\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template not in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_no_template(self):\n distillery = Distillery.objects.get_by_natural_key(\n 'mongodb.test_database.test_docs')\n try:\n distillery.save()\n except AttributeError:\n self.fail('put_template() raised AttributeError unexpectedly')", "def test_api_video_create_staff_or_user(self):\n for user in [factories.UserFactory(), factories.UserFactory(is_staff=True)]:\n self.client.login(username=user.username, password=\"test\")\n response = self.client.post(\"/api/videos/\")\n self.assertEqual(response.status_code, 401)\n self.assertFalse(models.Video.objects.exists())", "def test_admin_approval_nonexistent_id(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIs(user, False)", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_write(self):\n userEdited = self.env['res.users'].browse(\n self.user.id).write({'user_profile_id': self.user_profile2.id})\n self.assertEqual(userEdited, True)", "def test_modify(modify_requests):\n # 先创建用户\n admin1 = User(username=\"admin1\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"123456\", is_admin=True, query_json=\"\")\n admin2 = User(username=\"admin2\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"456789\", is_admin=True, query_json=\"\")\n admin3 = User(username=\"admin3\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"456789\", is_admin=True, query_json=\"\")\n user1 = User(username=\"user1\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"123456\", is_admin=False, query_json=\"\")\n user2 = User(username=\"user2\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"0987654\", is_admin=False, query_json=\"\")\n # user3 = User(username=\"user3\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n # password=\"qwerty\", is_admin=False, query_json=\"\")\n\n for user in [admin1, admin2, admin3, user1, user2]:\n user.full_clean()\n user.save()\n # 再创建文档\n document_exist_1 = Document(id=1, content=\"高温超导(High-temperature superconductivity,High Tc)\\\n 是一种物理现象,指一些具有较其他超导物质相对较高的临界温度的物质在液态氮的环境下产生的超导现象。\",\n status=0, title=\"超导现象\", src=0)\n document_exist_2 = Document(id=2, content=\"2020年新西兰大选(英语:2020 New Zealand general election),\\\n 即第53届新西兰国会选举于该年10月17日举行[1]。本届选举为新西兰自1996年采用混合议员比例代表制(联立单一选区两票制)\\\n 以来的第八次选举。此次大选与有关大麻和安乐死合法化的两个公投一并进行。结果工党取得过半数议席,有权单独执政,\\\n 为现有选举制度开始实施至今的首次。[2]\", status=0, title=\"新西兰大选\", src=0)\n document_exist_3 = Document(id=3, content=\"永州之野产异蛇,黑质而白章,触草木尽死。以啮人,无御之者。然得而腊之以为饵,\\\n 可以已大风、挛、瘘、疠,去死肌,杀三虫。其始,太医以王命聚之,岁赋其二,募有能捕之者,当其租入,永之人争奔走焉。\\\n 有蒋氏者,专其利三世矣。问之,则曰:“吾祖死于是,吾父死于是,今吾嗣为之十二年,几死者数矣。”言之,貌若甚戚者。\\\n 余〈杰按:通“予”,下同。〉悲之,且曰:“若毒之乎?余将告于莅是者,更若役,复若赋,则何如?”蒋氏大戚,\\\n 汪然出涕曰:“君将哀而生之乎?则吾斯役之不幸,未若复吾赋不幸之甚也。向吾不为斯役,则久已病矣。\\\n 自吾氏三世居是乡,积于今六十岁矣,而乡邻之生日蹙。殚其地之出,竭其庐之入,号呼而转徙,饥渴而顿踣,\\\n 触风雨,犯寒暑,呼嘘毒疠,往往而死者相藉也。曩与吾祖居者,今其室十无一焉;与吾父居者,今其室十无二三焉;\\\n 与吾居十二年者,今其室十无四五焉,非死即徙尔。而吾以捕蛇独存。悍吏之吾乡,叫嚣乎东西,隳突乎南北,哗然而骇者,虽鸡狗不得宁焉。\\\n 吾恂恂而起,视其缶,而吾蛇尚存,则弛然而卧。谨食之,时而献焉。退而甘食其土之有,以尽吾齿。盖一岁之犯死者二焉,其馀则熙熙而乐,\\\n 岂若吾乡邻之旦旦有是哉!今虽死乎此,比吾乡邻之则已后矣,又安敢毒耶?”余闻而愈悲。孔子曰:“苛政猛于虎也。”吾尝疑乎是,今以蒋氏观之,\\\n 犹信。呜呼!孰知赋敛之毒,有甚是蛇者乎!故为之说,以俟夫观人风者得焉。\", status=0, title=\"捕蛇者说\", src=0)\n document_deleted_4 = Document(id=4, content=\"采薇采薇,薇亦作止。曰归曰归,岁亦莫止。靡室靡家,玁狁之故。不遑启居,玁狁之故。\\\n 采薇采薇,薇亦柔止。曰归曰归,心亦忧止。忧心烈烈,载饥载渴。我戍未定,靡使归聘。\\\n 采薇采薇,薇亦刚止。曰归曰归,岁亦阳止。王事靡盬,不遑启处。忧心孔疚,我行不来!\\\n 彼尔维何?维常之华。彼路斯何?君子之车。戎车既驾,四牡业业。岂敢定居?一月三捷。\\\n 驾彼四牡,四牡骙骙。君子所依,小人所腓。四牡翼翼,象弭鱼服。岂不日戒?玁狁孔棘!\\\n 昔我往矣,杨柳依依。今我来思,雨雪霏霏。行道迟迟,载渴载饥。我心伤悲,莫知我哀!\", status=1, title=\"采薇\", src=0)\n document_deleted_5 = Document(id=5, content=\"氓之蚩蚩,抱布贸丝。匪来贸丝,来即我谋。\\\n 送子涉淇,至于顿丘。匪我愆期,子无良媒。将子无怒,秋以为期。\\\n  乘彼垝垣,以望复关。不见复关,泣涕涟涟。既见复关,载笑载言。尔卜尔筮,体无咎言。以尔车来,以我贿迁。\\\n  桑之未落,其叶沃若。于嗟鸠兮,无食桑葚!于嗟女兮,无与士耽!士之耽兮,犹可说也。女之耽兮,不可说也。\\\n  桑之落矣,其黄而陨。自我徂尔,三岁食贫。淇水汤汤,渐车帷裳。女也不爽,士贰其行。士也罔极,二三其德。\\\n  三岁为妇,靡室劳矣;夙兴夜寐,靡有朝矣。言既遂矣,至于暴矣。兄弟不知,咥其笑矣。静言思之,躬自悼矣。\\\n  及尔偕老,老使我怨。淇则有岸,隰则有泮。总角之宴,言笑晏晏。信誓旦旦,不思其反。反是不思,亦已焉哉!\", status=1, title=\"氓\", src=0)\n\n for document in [document_exist_1, document_exist_2, document_exist_3,\n document_deleted_4, document_deleted_5]:\n document.full_clean()\n document.save()\n\n for request in modify_requests:\n response = modify(request[\"request\"])\n assert response.status_code == request[\"response\"][\"code\"]\n # assert json.loads(response.content)[\"data\"].startswith(request[\"response\"][\"data\"])", "def test_user_belonging_to_more_sites(self):\n self._create_simple_setup()\n foo_site = Site.objects.get(domain='foo.site.com')\n bar_site = Site.objects.get(domain='bar.site.com')\n writer_role = Role.objects.get(name='writer')\n bob = User.objects.get(username='bob')\n foo_master_page = Page.objects.get(\n title_set__title='master',\n site=foo_site)\n writer_role.grant_to_user(bob, foo_site, [foo_master_page])\n news_page = Page.objects.get(\n title_set__title='news',\n parent=foo_master_page)\n PagePermission.objects.create(user=bob, page=news_page)\n writer_role.ungrant_from_user(bob, foo_site)\n writer_users = writer_role.users(foo_site)\n self.assertNotIn(bob, writer_users)\n writer_users = writer_role.users(bar_site)\n self.assertIn(bob, writer_users)", "def test_put_change_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(new_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def allowed_topologytemplate_access_change(user, template):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return template.owner == user or user.has_perm(\"vnswww.topologytemplate_change_any\") or (user.has_perm(\"vnswww.topologytemplate_change_org\") and template.org == up.org)", "def test_create_account(self):\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests\", \"password\": \"TestTest\"})\n first_user = MyUser.objects.get()\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertEqual(first_user.username, 'tests')\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests2\", \"password\": \"TestTest\"})\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertTrue(MyUser.objects.filter(username=\"tests2\").exists())\n user = MyUser.objects.get(username=\"tests2\")\n response = self.client.put(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"tst@test.te\"})\n # Not logged shouldnt change anything\n self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED)\n user.set_password(\"TestTest\")\n user.save()\n self.assertTrue(self.client.login(username=\"tests2\", password=\"TestTest\"))\n response = self.client.patch(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"tst@test.te\"})\n # Logged, should change\n self.assertEqual(response.status_code, HTTP_200_OK)\n self.assertEqual(MyUser.objects.get(username=\"tests2\").email, \"tst@test.te\")\n # Dont update others users\n response = self.client.patch(f\"http://localhost:8000/api/users/{first_user.pk}/\", data={\"email\": \"tst@test.te\"})\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)", "def test_expected_users(self):\n print()\n print(\"Testing users whose parameters are safe...\")\n for user_id in self.get_unique_ids(100):\n self.store_expected_user(user_id)\n \n User.objects.all().delete()\n print(\"Testing many users whose parameters are safe with bulk_create...\")\n self.store_many_expected_users()\n\n print(\"-\" * 10)", "def testUpdateAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])", "def test_patch_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n patch_data = {'user': str(new_user.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_view_update_permissions(self):\n user = self.user\n group = self.test_save()\n group.user_set.add(user)\n group1 = self.test_save('other_group')\n \n c = Client()\n url = '/group/%d/permissions/user/%s/'\n url_post = '/group/%d/permissions/'\n args = (group.id, user.id)\n args_post = group.id\n \n # anonymous user\n response = c.get(url % args, follow=True)\n self.assertEqual(200, response.status_code)\n self.assertTemplateUsed(response, 'registration/login.html')\n \n # unauthorized\n self.assert_(c.login(username=user.username, password='secret'))\n response = c.get(url % args)\n self.assertEqual(403, response.status_code)\n response = c.post(url % args)\n self.assertEqual(403, response.status_code)\n \n # authorized post (perm granted)\n grant(user, 'admin', group)\n response = c.get(url % args, {'user':user.id, 'obj':self.object0.pk})\n self.assertEqual(200, response.status_code)\n self.assertEquals('text/html; charset=utf-8', response['content-type'])\n self.assertTemplateUsed(response, 'object_permissions/permissions/form.html')\n \n # authorized post (superuser)\n revoke(user, 'admin', group)\n user.is_superuser = True\n user.save()\n response = c.get(url % args, {'user':user.id})\n self.assertEqual(200, response.status_code)\n self.assertEquals('text/html; charset=utf-8', response['content-type'])\n self.assertTemplateUsed(response, 'object_permissions/permissions/form.html')\n \n # invalid user (GET)\n response = c.get(url % (group.id, -1))\n self.assertEqual(404, response.status_code)\n \n # invalid group (GET)\n response = c.get(url % (-1, user.id))\n self.assertEqual(404, response.status_code)\n \n # invalid user (POST)\n data = {'permissions':['admin'], 'user':-1, 'obj':self.object0.pk}\n response = c.post(url_post % args_post, data)\n self.assertEqual(200, response.status_code)\n self.assertEquals('application/json', response['content-type'])\n self.assertNotEquals('1', response.content)\n \n # invalid group (POST)\n data = {'permissions':['admin'], 'group':-1, 'obj':self.object0.pk}\n response = c.post(url_post % args_post, data)\n self.assertEqual(200, response.status_code)\n self.assertEquals('application/json', response['content-type'])\n self.assertNotEquals('1', response.content)\n \n # user and group (POST)\n data = {'permissions':['admin'], 'user':user.id, 'group':group1.id, 'obj':self.object0.pk}\n response = c.post(url_post % args_post, data)\n self.assertEqual(200, response.status_code)\n self.assertEquals('application/json', response['content-type'])\n self.assertNotEquals('1', response.content)\n \n # invalid permission\n data = {'permissions':['DoesNotExist'], 'user':user.id, 'obj':self.object0.pk}\n response = c.post(url_post % args_post, data)\n self.assertEqual(200, response.status_code)\n self.assertEquals('application/json', response['content-type'])\n self.assertNotEquals('1', response.content)\n \n # setup signal\n self.signal_sender = self.signal_user = self.signal_obj = None\n def callback(sender, user, obj, **kwargs):\n self.signal_sender = sender\n self.signal_user = user\n self.signal_obj = obj\n view_edit_user.connect(callback)\n \n # valid post user\n data = {'permissions':['admin'], 'user':user.id, 'obj':self.object0.pk}\n response = c.post(url_post % args_post, data)\n self.assertEqual(200, response.status_code)\n self.assertEquals('text/html; charset=utf-8', response['content-type'])\n self.assertTemplateUsed(response, 'object_permissions/permissions/user_row.html')\n self.assert_(user.has_perm('admin', group))\n self.assertEqual(['admin'], get_user_perms(user, group))\n \n # check signal fired with correct values\n self.assertEqual(self.signal_sender, user)\n self.assertEqual(self.signal_user, user)\n self.assertEqual(self.signal_obj, group)\n view_edit_user.disconnect(callback)\n \n # valid post no permissions user\n data = {'permissions':[], 'user':user.id, 'obj':self.object0.pk}\n response = c.post(url_post % args_post, data)\n self.assertEqual(200, response.status_code)\n self.assertEqual([], get_user_perms(user, group))\n \n # valid post group\n data = {'permissions':['admin'], 'group':group1.id, 'obj':self.object0.pk}\n response = c.post(url_post % args_post, data)\n self.assertEqual(200, response.status_code)\n self.assertEquals('text/html; charset=utf-8', response['content-type'])\n self.assertTemplateUsed(response, 'object_permissions/permissions/user_row.html')\n self.assertEqual(['admin'], group1.get_perms(group))\n \n # valid post no permissions group\n data = {'permissions':[], 'group':group1.id, 'obj':self.object0.pk}\n response = c.post(url_post % args_post, data)\n self.assertEqual(200, response.status_code)\n self.assertEqual([], group1.get_perms(group))" ]
[ "0.8093766", "0.80680555", "0.80376196", "0.7835847", "0.77455807", "0.7744498", "0.76982164", "0.76218945", "0.736171", "0.7214227", "0.7022283", "0.6874125", "0.68300915", "0.68178946", "0.68173504", "0.6804541", "0.67034173", "0.66750884", "0.6632048", "0.6587507", "0.6538", "0.65097654", "0.6508674", "0.64719915", "0.6470846", "0.6451183", "0.6366788", "0.6325715", "0.6317642", "0.62718457", "0.62587035", "0.62375385", "0.6231884", "0.6216829", "0.6216479", "0.61958313", "0.61722445", "0.61678606", "0.616118", "0.6156584", "0.6153296", "0.6141626", "0.613472", "0.61332726", "0.6123222", "0.61099476", "0.6096489", "0.6091977", "0.6089016", "0.60793656", "0.6077851", "0.6076189", "0.60756034", "0.6052483", "0.604106", "0.60324156", "0.60306233", "0.60240567", "0.6020875", "0.6015926", "0.5997754", "0.59694344", "0.59534025", "0.5936214", "0.59357715", "0.5916624", "0.59102815", "0.59076124", "0.5899863", "0.58900553", "0.5885474", "0.58814836", "0.5880914", "0.5870208", "0.58653456", "0.5854433", "0.58316606", "0.58316606", "0.5825735", "0.5825296", "0.5824374", "0.5817181", "0.58163124", "0.58111376", "0.5809717", "0.580743", "0.5806921", "0.5794203", "0.5792491", "0.57863224", "0.57796544", "0.5776122", "0.5772992", "0.57717377", "0.5770814", "0.5769001", "0.5766917", "0.57636184", "0.5761979", "0.5761939" ]
0.82656085
0