query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Tests that 'admin' cannot add a product with empty fields | def test_admin_cannot_create_product_with_empty_fields(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='',
category='',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Please enter all fields!')
self.assertEqual(resp.status_code, 400) | [
"def test_product_cannot_create_with_invalid_details(self):\n res = self.client().post('/api/v1/products', data=json.dumps(self.empty_product), headers = {\"content-type\": \"application/json\"})\n self.assertEqual(res.status_code, 201)",
"def test_update_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='',\n category='',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')\n self.assertEqual(resp.status_code, 400)",
"def test_create_purchase_not_all_required_fields(self):\n data = {\"user_id\": 4, \"product_id\": 3}\n res = self.post(url=\"/purchases\", role=\"admin\", data=data)\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.DataIsMissing)\n self.assertEqual(len(Purchase.query.all()), 0)",
"def test_add_product_empty_quantity(self):\n product_url = self.product.get_absolute_url()\n postdata = {'product_slug': self.product.slug, 'quantity': '' }\n response = self.client.post(product_url, postdata )\n expected_error = unicode(ProductAddToCartForm.base_fields['quantity'].error_messages['required'])\n self.assertFormError(response, \"form\", \"quantity\", [expected_error])",
"def test_add_new_product_with_product_name_missing(self):\n response = self.app_test_client.post('{}/products'.format(\n self.BASE_URL), json={\n 'product_price': 300,\n 'category':self.category_id, 'inventory': 10, 'min_quantity': 5\n }, headers=dict(Authorization=self.token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(common_functions.convert_response_to_json(\n response)['message'],\n 'Provide a name for the product')",
"def test_create_product_no_data(self):\n resp = self.app.post(\n \"/products\", json={}, content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)",
"def test_create_purchase_unknown_field(self):\n data = {\"user_id\": 4, \"product_id\": 3, \"amount\": 4, \"foo\": \"bar\"}\n res = self.post(url=\"/purchases\", role=\"admin\", data=data)\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnknownField)\n self.assertEqual(len(Purchase.query.all()), 0)",
"def _clean_standalone(self):\n if not self.title:\n raise ValidationError(_(\"Your product must have a title.\"))\n if not self.product_class:\n raise ValidationError(_(\"Your product must have a product class.\"))\n if self.parent_id:\n raise ValidationError(_(\"Only child products can have a parent.\"))",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_create_invalid_product_blank_name(self):\n product_name = \"\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_create_invalid_product_no_name(self):\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_no_change_product_option(self):\r\n self.F.ProductFactory.create()\r\n s = self.F.SuiteFactory()\r\n self.F.SuiteCaseFactory(suite=s)\r\n\r\n f = self.form(instance=s)\r\n self.assertEqual(\r\n [c[0] for c in f.fields[\"product\"].choices],\r\n ['', s.product.id]\r\n )\r\n self.assertTrue(f.fields[\"product\"].readonly)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_add_new_product_with_min_quantity_missing(self):\n response = self.app_test_client.post('{}/products'.format(\n self.BASE_URL), json={\n 'product_name': \"Hammer\",\n 'product_price': 300, 'inventory': 10, 'category': self.category_id\n }, headers=dict(Authorization=self.token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(common_functions.convert_response_to_json(\n response)['message'],\n 'Please define the minimum quantity')",
"def test_create_purchase_non_existing_product(self):\n data = {\"user_id\": 1, \"product_id\": 5, \"amount\": 1}\n res = self.post(url=\"/purchases\", role=\"admin\", data=data)\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)\n self.assertEqual(len(Purchase.query.all()), 0)",
"def test_c_dont_adds_product_if_not_in_stock(self, client, product):\n product.stock = 0\n product.save()\n\n response = client.post(\n reverse('shoppingcart:add-product'),\n json.dumps({'id_': product.pk}),\n content_type='application/json',\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n assert response.status_code == 400\n assert product.line_set.all().exists() is False",
"def test_product_name_is_required(self):\n product = {\n 'name': '',\n 'price': '100.00',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that product_name field cannot contain a number | def test_Product_name_cannot_contain_a_number(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_3',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Please enter strings in name and category!')
self.assertEqual(resp.status_code, 400) | [
"def test_category_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='4dens',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)",
"def prodName(self, pName):\r\n if str(pName).isnumeric() == False:\r\n self.__prodName = pName\r\n else:\r\n raise Exception(\"Product Names cannot be numbers\")",
"def validate_data_product_id(self):\n pid = self.product_id\n if isinstance(pid, str):\n return pid.isdigit() and int(pid) >= 0\n return isinstance(pid, int) and pid >= 0",
"def test_non_numberic_validation(self):",
"def __valid_product(name):\n\tproduct_name = db.product_catalog.name == name\n\tquery = db(product_name).select().first()\n\tif query.amount > 0:\n\t\treturn True\n\treturn False",
"def __validate_alpha_num_fields(self, data, field_name):\n\n errs = []\n if not data[field_name].isalnum():\n errs.append(self.return_field_message(field_name, \"alpha_num\"))\n\n return errs",
"def test_validate_non_valid_numbers(self):\n not_valid = Phone.validate_number(\"xxx-xxx xx xx\")\n self.assertFalse(not_valid)\n\n not_valid = Phone.validate_number(\"073456129-\")\n self.assertFalse(not_valid)\n\n not_valid = Phone.validate_number(\"073-456 12 9a\")\n self.assertFalse(not_valid)",
"def test_add_sale_with_product_name_not_string(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 1, 'price': 1500, 'quantity': 10, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product name should be a string.')",
"def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')",
"def validate_product_inputs(self, product_name, quantity, price):\n if not product_name:\n return \"product name is missing\"\n if product_name == \" \":\n return \"product name is missing\"\n if not re.match(r\"^([a-zA-Z]+[-_\\s])*[a-zA-Z]+$\", product_name):\n return \"product name should contain no white spaces\"\n if not re.match(r\"^[0-9]*$\", quantity):\n return \"quantity should only be digits with no white spaces\"\n if not re.match(r\"^[0-9]*$\", price):\n return \"price should only be digits with no white spaces\" \n if len(product_name) < 3:\n return \"product name should be more than 3 characters long\"\n if not quantity:\n return \"quantity is missing\"\n if quantity == \" \":\n return \"quantity is missing\"\n if int(quantity) < 1:\n return \"quantity should be at least 1 item\" \n if not price:\n return \"price is missing\"\n if int(price) < 1:\n return \"price should be greater than zero or more\" \n if price == \" \":\n return \"price is missing\"",
"def test_numeric_names(self):\n # Set up and introduce numeric names\n sample_sheet = CasavaSampleSheet(fp=cStringIO.StringIO(self.sample_sheet_text))\n sample_sheet[3]['SampleID'] = 8861\n sample_sheet[4]['SampleProject'] = 123\n # Check for illegal names\n self.assertEqual(len(sample_sheet.illegal_names),0)\n # Check for empty names\n self.assertEqual(len(sample_sheet.empty_names),0)\n # Check for duplicated names\n self.assertEqual(len(sample_sheet.duplicated_names),0)",
"def not_a_number(name):\n return create_error_message(\n 'Invalid query parameters. {} must be a number.'.format(\n name\n )\n )",
"def test_add_malformed_number(self):\n result = self.env.run('phonebook ' + \\\n ('add \"John Michael\" \"123 456 abcd\" ') + \\\n ('-b %s/phonebook_fixture.pb' % self.prefix))\n expected_output = \"Entry not created: '123 456 abcd' is not a valid phone number.\"\n nose.tools.assert_in(expected_output, result.stdout)\n entry_fields = ['John Michael', '123 456 abcd']\n self.assert_not_added(entry_fields)",
"def test_create_invalid_price_higher_than_999(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 1001\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_add_new_product_with_product_name_not_string(self):\n response = self.app_test_client.post('{}/products'.format(\n self.BASE_URL), json={\n 'product_name': 300, 'product_price': 300,\n 'category':self.category_id, 'inventory': 10, 'min_quantity': 5\n }, headers=dict(Authorization=self.token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(common_functions.convert_response_to_json(\n response)['message'],\n 'Product name should be a string')",
"def _testValueNonNumeric(self): # pylint: disable=invalid-name\n\n non_numeric_items = ('1.0', (1,), (0.1, None))\n for value in non_numeric_items:\n self._testBadValue(value, '{} must be numeric')",
"def test_agg_notNumber(self):\n f = AssessmentAggregateForm({\n 'aggregate_proficiency':\"at least 80\"\n })\n self.assertFalse(f.is_valid())",
"def validate_sales(sales):\r\n check = re.compile('[0-9]{2,3}')\r\n if re.fullmatch(check, sales):\r\n return True\r\n else:\r\n return False",
"def test_donor_ionization_energy_non_numeric(self):\n non_num = \"this string is non-numeric.\"\n try:\n self.el.donor_ionization_energy = non_num\n except TypeError:\n # Setting `donor_ionization_energy` as a type that isn't numeric should raise a TypeError, so things are working.\n pass\n else:\n self.fail(\"`donor_ionization_energy` attribute can be assigned a non-numeric value.\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that category field cannot contain a number | def test_category_cannot_contain_a_number(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='4dens',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Please enter strings in name and category!')
self.assertEqual(resp.status_code, 400) | [
"def test_isNumericCategory(self):\n obs = self.overview_map.isNumericCategory('Treatment')\n self.assertEqual(obs, False)\n\n obs = self.overview_map.isNumericCategory('DOB')\n self.assertEqual(obs, True)",
"def test_non_numberic_validation(self):",
"def test_validate_input_value_categorical_integer_nohit(categorical_space):\n namespace = \"x\"\n\n is_valid, casted_value = _validate_input_value(\"15\", categorical_space, namespace)\n assert not is_valid",
"def test_agg_notNumber(self):\n f = AssessmentAggregateForm({\n 'aggregate_proficiency':\"at least 80\"\n })\n self.assertFalse(f.is_valid())",
"def isNumericCategory(self, category):\r\n category_values = self.getCategoryValues(self.SampleIds, category)\r\n\r\n is_numeric = True\r\n for category_value in category_values:\r\n try:\r\n float(category_value)\r\n except ValueError:\r\n is_numeric = False\r\n return is_numeric",
"def _testValueNonNumeric(self): # pylint: disable=invalid-name\n\n non_numeric_items = ('1.0', (1,), (0.1, None))\n for value in non_numeric_items:\n self._testBadValue(value, '{} must be numeric')",
"def test_validate_input_value_categorical_integer_hit(categorical_space):\n namespace = \"x\"\n\n is_valid, casted_value = _validate_input_value(\"11\", categorical_space, namespace)\n assert is_valid\n assert isinstance(casted_value, numbers.Number)\n\n is_valid, casted_value = _validate_input_value(\"11.0\", categorical_space, namespace)\n assert is_valid\n assert isinstance(casted_value, numbers.Number)",
"def test_donor_concentration_non_numeric(self):\n non_num = \"this string is non-numeric.\"\n try:\n self.el.donor_concentration = non_num\n except TypeError:\n # Setting `donor_concentration` as a type that isn't numeric should raise a TypeError, so things are working.\n pass\n else:\n self.fail(\"`donor_concentration` attribute can be assigned a non-numeric value.\")",
"def test_validate_input_value_categorical_string_number(categorical_space):\n namespace = \"x\"\n\n # Make sure integer 12 does not pass\n is_valid, casted_value = _validate_input_value(\"12\", categorical_space, namespace)\n assert not is_valid\n\n # Now test \"12\" as a string\n is_valid, casted_value = _validate_input_value(\"'12'\", categorical_space, namespace)\n assert is_valid\n assert isinstance(casted_value, str)",
"def test_drop_numbers():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"123,123.123\"]])[\"corpus\"][0] == \"\"\n assert not cleaner.drops[\"number\"].dropna().empty",
"def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))",
"def testNumberAttribute(self):\n def action(field_class):\n # Check range.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n 0)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n -1)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.MAX_FIELD_NUMBER + 1)\n\n # Check reserved.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.FIRST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.LAST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n '1')\n\n # This one should work.\n field_class(number=1)\n self.ActionOnAllFieldClasses(action)",
"def slug_is_numerical(slug):\r\n try:\r\n float(slug)\r\n except ValueError:\r\n return False\r\n\r\n return True",
"def test_clean_data_non_numeric():\n with pytest.raises(ValueError):\n s = Sudoku(test_data[\"bad_data_invalid_character\"])",
"def is_number(self, c: str) -> bool:\n return \".\" == c or (\"0\" <= c and c <= \"9\")",
"def test_validate_input_value_categorical_real_nohit(categorical_space):\n namespace = \"x\"\n\n is_valid, casted_value = _validate_input_value(\"10\", categorical_space, namespace)\n assert not is_valid\n\n is_valid, casted_value = _validate_input_value(\"10.0\", categorical_space, namespace)\n assert not is_valid\n\n is_valid, casted_value = _validate_input_value(\"10.2\", categorical_space, namespace)\n assert not is_valid",
"def test_category_invalid(self):\n # wiki and questions\n ques = QuestionFactory(title=u'q1 audio')\n ques.tags.add(u'desktop')\n ans = AnswerFactory(question=ques)\n AnswerVoteFactory(answer=ans, helpful=True)\n\n d1 = DocumentFactory(\n title=u'd1 audio',\n locale=u'en-US',\n category=10,\n is_archived=False,\n tags=[u'desktop'])\n ApprovedRevisionFactory(document=d1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 3, 'format': 'json', 'category': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(2, json.loads(response.content)['total'])",
"def test_cartebancaire_number(self):\n cartebancaire_number = 4035501000000008\n self.assertTrue(formatter.is_cartebancaire(cartebancaire_number))",
"def test_validate_input_value_categorical_real_hit(categorical_space):\n namespace = \"x\"\n\n is_valid, casted_value = _validate_input_value(\"10.1\", categorical_space, namespace)\n assert is_valid\n assert isinstance(casted_value, numbers.Number)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that stock and price fields must be numbers | def test_stock_and_price_must_be_numbers(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock='stock',
price='money'
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')
self.assertEqual(resp.status_code, 400) | [
"def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')",
"def test_non_numberic_validation(self):",
"def test__validate_product_price(self):\n sm = self._mock_supermarket_instance()\n # Test vaild price int\n self.assertIsNone(sm._validate_product_price(1))\n\n # Test vaild price float\n self.assertIsNone(sm._validate_product_price(1.1))\n\n # Test invalid price 0\n with self.assertRaises(ValueError):\n sm._validate_product_price(0)\n\n # Test invalid price < 0\n with self.assertRaises(ValueError):\n sm._validate_product_price(-1)",
"def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42",
"def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"",
"def test_make_order_with_price_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': -50, 'quantity': 3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Price and quantity must be ints >= 1')",
"def test_price_details_number(self):\n with self.client:\n response = self.add_meal(\"beef\", \"jasmine\")\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a number\")\n self.assertEqual(response.status_code, 400)",
"def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)",
"def test_price_float_type(self):\n for i in data:\n self.assertIsInstance(data[i]['PRICE'], float)",
"def _testValueNonNumeric(self): # pylint: disable=invalid-name\n\n non_numeric_items = ('1.0', (1,), (0.1, None))\n for value in non_numeric_items:\n self._testBadValue(value, '{} must be numeric')",
"def test_clean_price_with_pricing_support(self):\n self.test_data['price'] = ''\n self.form.cleaned_data = self.test_data\n\n self.assertRaisesMessage(\n forms.ValidationError,\n _(u'Please, enter a price'),\n self.form.clean_price\n )",
"def clean_stock(self):\n stock = self.cleaned_data.get('stock')\n if stock == 0:\n raise forms.ValidationError(u'Please insert product quantity')\n return stock",
"def test_pricing_info_with_wrong_floats(self):\n sub = {\n 'lastname': 'toto',\n 'subscription_price': 'foo',\n 'membership_price': 'bar'\n }\n retrieved = self._save_and_retrieve_from_lastname(sub, 'toto')\n self.assertEqual(retrieved.subscription_price, 0)",
"def _validate_qty(values: dict):\n\n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n\n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.lot_size_filter\n # if ONE :=1 and not filter.min_qty <= quantity <= filter.max_qty:\n # ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n quantity,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values",
"def test_numeric(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_numeric')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_numeric ' \\\n '( value NUMERIC(100,50) NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_numeric VALUES (%s)'\n for i in range(100):\n int = random.getrandbits(150)\n frac = random.getrandbits(150)\n item = decimal.Decimal('%d.%s' % (int, frac))\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_numeric'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, decimal.Decimal)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_numeric')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_numeric')\n cursor.execute(query)\n conn.commit()",
"def test_custom_valid_input_test_number(self, ucc_smartx_selenium_helper, ucc_smartx_rest_helper):\n custom = Custom(ucc_smartx_selenium_helper, ucc_smartx_rest_helper)\n custom.test_string.set_value(\"test_str\")\n custom.test_number.set_value(\"a\")\n self.assert_util(\n custom.save,\n r\"Field Test Number is not a number\",\n left_args={'expect_error': True}\n )",
"def test_add_with_negative_price(self):\n good = GoodInfo(\"ัะนัะพ 1 ะบะฐั.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)",
"def test_empty_price(self):\n actual = a1.stock_price_summary([])\n expected = (0,0)\n self.assertEqual(actual, expected)",
"def check_for_float_and_int(check):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that product already exists in the Inventory | def test_product_exists_in_inventory(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'This product exists in the Inventory!')
self.assertEqual(resp.status_code, 400) | [
"def test_already_existing_product_name(self):\n self.query_with_token(\n self.access_token,\n create_product.format(\n supplier_id=self.supplier_id, backup_id=self.backup_id))\n response = self.query_with_token(\n self.access_token,\n create_product.format(\n supplier_id=self.supplier_id, backup_id=self.backup_id))\n self.assertIn('errors', response)",
"def test_add_new_product_with_product_name_already_existing(self):\n self.app_test_client.post('{}/products'.format(\n self.BASE_URL), json=self.PRODUCT, headers=dict(Authorization=self.token),\n content_type='application/json')\n\n response = self.app_test_client.post('{}/products'.format(\n self.BASE_URL), json=self.PRODUCT, headers=dict(Authorization=self.token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(common_functions.convert_response_to_json(\n response)['message'],\n 'Record already exists in the database')",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_create_duplicate_vendor_product_id_under_same_vendor(self):\n duplicated_vendor_product_id = '1'\n inventory1 = Inventory.objects.create(\n vendor=self.vendor,\n product=self.product,\n vendor_product_id=duplicated_vendor_product_id,\n )\n product2 = Product.objects.create(name='Second Product')\n with self.assertRaises(IntegrityError):\n inventory2 = Inventory.objects.create(\n vendor=self.vendor,\n product=product2,\n vendor_product_id=duplicated_vendor_product_id,\n )",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_add_already_present(self):\n food_item = self.create_a_food_item()\n # remove an entry from the frozen\n self.shelves['frozen'].food_dict.popitem()\n rc = process_new_item(self.shelves, food_item)\n self.assertEqual(rc, NewItemStatus.ok)\n food_item_dup = self.create_a_food_item()\n food_item_dup.temp = 'hot'\n rc = process_new_item(self.shelves, food_item_dup)\n self.assertEqual(rc, NewItemStatus.already_shelved)",
"def test_create_duplicate_vendor_product_id_under_different_vendors(self):\n duplicated_vendor_product_id = '1'\n inventory1 = Inventory.objects.create(\n vendor=self.vendor,\n product=self.product,\n vendor_product_id=duplicated_vendor_product_id,\n )\n vendor2 = Vendor.objects.create(name='Second Vendor')\n inventory2 = Inventory.objects.create(\n vendor=vendor2,\n product=self.product,\n vendor_product_id=duplicated_vendor_product_id,\n )\n self.assertIsInstance(inventory1, Inventory)\n self.assertIsInstance(inventory2, Inventory)",
"def test_c_dont_adds_if_already_in_cart(self, client, product, admin_user):\n cart = Cart.objects.create(owner=admin_user)\n Line.objects.create(cart=cart, product=product)\n\n client.force_login(admin_user)\n\n response = client.post(\n reverse('shoppingcart:add-product'),\n json.dumps({'id_': product.pk}),\n content_type='application/json',\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n assert response.status_code == 400\n assert product.line_set.all().exists() is True",
"def check_inventory(self) -> None:\n self.store.check_inventory()",
"def test_add_product(self):\n sm = self._mock_supermarket_instance()\n product = {\n 'BBBB-BBBB-BBBB-BBBB': {\n 'product': 'b',\n 'price': 2\n }\n }\n sm.add_product(product)\n # Test product is added\n self.assertEqual(\n sm.product_codes['BBBB-BBBB-BBBB-BBBB'],\n { 'product': 'b', 'price': 2 }\n )\n\n # Test KeyError is raised if product already exists\n with self.assertRaises(KeyError):\n sm.add_product(product)",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_create_purchase_non_existing_product(self):\n data = {\"user_id\": 1, \"product_id\": 5, \"amount\": 1}\n res = self.post(url=\"/purchases\", role=\"admin\", data=data)\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)\n self.assertEqual(len(Purchase.query.all()), 0)",
"def test_inventory_1(self):\n sys, ob1, feat1, per1, r1 = self.set_up_1()\n self.assertFalse(sys.inventory())",
"def test_c_dont_adds_product_if_not_in_stock(self, client, product):\n product.stock = 0\n product.save()\n\n response = client.post(\n reverse('shoppingcart:add-product'),\n json.dumps({'id_': product.pk}),\n content_type='application/json',\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n assert response.status_code == 400\n assert product.line_set.all().exists() is False",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_item_creation_twice(self):\n # create an item\n self.test_shoppingitem()\n # create the same item twice\n res2 = self.client().post(\"/shoppinglists/1/items\",\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.shoppingitem)\n self.assertIn(\"Item name already exists\", str(res2.data))",
"def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"ะะพะฒัะดะธะฝะฐ ะะตะผะตัะบะฐั 2ะบะณ\", 3)\n self.assertFalse(result_buy)",
"def test_product_name(self):\n found, name = self.is_found_product_name()\n self.assertTrue(\n found, \"Product name {} not in set {}\".format(name, self.product_name)\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a user can view a product in the Inventory | def test_view_a_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(
'/api/v1/products/1',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertIn('NY_denims', str(reply['product']))
self.assertEqual(resp.status_code, 200) | [
"def test_permission_detail(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n data = {\n \"user\": self.user_1,\n \"item\": self.item_1,\n \"quantity\": 90,\n }\n inventory = Inventory.objects.create(**data)\n\n url = reverse(\"inventory-detail\", None, {inventory.id})\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_admin_product_info(self):\n self.add_product(self.TESTPRODUCT1, 1)\n\n # Missing product\n rv = self.app.get('/admin/product/nothing', follow_redirects=True)\n assert b'Produkten existerar inte!' in rv.data\n\n # Existing product\n rv = self.app.get('/admin/product/%s' % self.TESTPRODUCT1['barcode'], follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data",
"def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')",
"def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')",
"def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)",
"def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)",
"def test_get_specific_product(self):\n product = Product.objects.first()\n url, parsed = self.prepare_urls('v1:product-detail', subdomain=self.company.subdomain, kwargs={'pk':product.id})\n\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def setUp(self):\r\n super(EditProductTest, self).setUp()\r\n self.product = self.F.ProductFactory.create()\r\n self.add_perm(\"manage_products\")",
"def test_admin_product_list(self):\n # No products\n rv = self.app.get('/admin/product')\n assert rv.status_code == 200\n\n # More than 0 products\n self.add_product(self.TESTPRODUCT1, 1)\n self.add_product(self.TESTPRODUCT2, 2)\n rv = self.app.get('/admin/product', follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data\n assert self.TESTPRODUCT2['name'] in rv.data",
"def test_product_by_category_logged_in_user(self):\n\n # Log In user that is not the seller, check that the products not created by the user do show up\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Search for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that status code is 200\n self.assertEqual(response.status_code, 200)\n\n # Make sure that only the product associated with product category 1 is displayed\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n\n # Search for product category 2\n response_non_seller = self.client.get(reverse('website:product_by_category', args=(2,)))\n\n # Check that the status code is 200\n self.assertEqual(response_non_seller.status_code, 200)\n\n # Make sure that only the product associated with product category 2 is displayed\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response_non_seller.content)\n self.assertIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response_non_seller.content)",
"def setUp(self):\r\n super(EditProductVersionEnvironmentsViewTest, self).setUp()\r\n self.productversion = self.F.ProductVersionFactory.create()\r\n self.add_perm(\"manage_products\")",
"def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a user cannot view a product in the Inventory with blacklisted token | def test_cannot_view_a_product_with_blacklisted_token(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.delete(
'/api/v1/logout',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'You are successfully logged out!')
self.assertEqual(resp.status_code, 200)
resp = self.client.get(
'/api/v1/products/1',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')
self.assertEqual(resp.status_code, 401) | [
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_buy_disabled(self):\n # Make sure we can't buy the product\n self.app.post('/user/jane/buy', data=dict(barcode='0012345678905'), follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n assert streck.models.user.User('jane').debt() == 0.0\n\n # Make sure it appears on the user page\n rv = self.app.get('/user/jane', follow_redirects=True)\n assert b'The Product' not in rv.data",
"def test_remove_not_allowed(self):\n # Make sure we can't remove the product\n self.app.post('/user/%s/buy' % app.config['REMOVE_JOBBMAT_BARCODE'], data=dict(barcode=self.TESTPRODUCT_BAD['barcode']), follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n assert streck.models.user.User(app.config['JOBBMAT_BARCODE']).debt() == 0.0\n\n # Make sure it appears on the user page\n rv = self.app.get('/user/%s' % app.config['JOBBMAT_BARCODE'], follow_redirects=True)\n assert self.TESTPRODUCT_BAD['name'] not in rv.data",
"def test_missing_token(self):\n\n self.register_test_admin_account()\n token = \"\"\n\n response = self.app_test_client.get(\n '{}/products'.format(self.BASE_URL),\n headers=dict(Authorization=token),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(helper_functions.convert_response_to_json(\n response)[\"Message\"], \"You need to login\")",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')",
"def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_check_permission_list_non_authenticated(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n url = reverse(\"price-list\")\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_check_permission_list_non_authenticated(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n url = reverse(\"offer-list\")\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_buy_missing_user(self):\n rv = self.app.post('/user/nobody/buy', data=dict(barcode='0012345678905'), follow_redirects=True)\n assert b'Anvรคndaren existerar inte!' in rv.data",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_list_products_without_authentication(self):\n response = self.client.get('/api/1.0/products/')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a user can view all products in the Inventory | def test_view_all_products(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(
'/api/v1/products',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertIn('NY_denims', str(reply['products']))
self.assertEqual(resp.status_code, 200) | [
"def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_get_all_products(self):\n\n self.register_user()\n results = self.login_user()\n access_token = json.loads(results.data.decode())['access_token']\n\n response = self.client().post(\n '/products/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.product\n )\n self.assertEqual(response.status_code, 201)\n get_req = self.client().get(\n '/products/',\n headers=dict(Authorization=\"Bearer \" + access_token)\n )\n self.assertEqual(get_req.status_code, 200)\n self.assertIn('Product 1', str(get_req.data))",
"def test_admin_product_list(self):\n # No products\n rv = self.app.get('/admin/product')\n assert rv.status_code == 200\n\n # More than 0 products\n self.add_product(self.TESTPRODUCT1, 1)\n self.add_product(self.TESTPRODUCT2, 2)\n rv = self.app.get('/admin/product', follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data\n assert self.TESTPRODUCT2['name'] in rv.data",
"def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')",
"def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_product_list(self):\n self.url = reverse(\"product-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)",
"def test_admin_product_info(self):\n self.add_product(self.TESTPRODUCT1, 1)\n\n # Missing product\n rv = self.app.get('/admin/product/nothing', follow_redirects=True)\n assert b'Produkten existerar inte!' in rv.data\n\n # Existing product\n rv = self.app.get('/admin/product/%s' % self.TESTPRODUCT1['barcode'], follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_permission_detail(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n data = {\n \"user\": self.user_1,\n \"item\": self.item_1,\n \"quantity\": 90,\n }\n inventory = Inventory.objects.create(**data)\n\n url = reverse(\"inventory-detail\", None, {inventory.id})\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_add_all(self): #SAUCE-LAB-7\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('\\n')\n print('Not all items were added')",
"def test_product_by_category_logged_in_user(self):\n\n # Log In user that is not the seller, check that the products not created by the user do show up\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Search for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that status code is 200\n self.assertEqual(response.status_code, 200)\n\n # Make sure that only the product associated with product category 1 is displayed\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n\n # Search for product category 2\n response_non_seller = self.client.get(reverse('website:product_by_category', args=(2,)))\n\n # Check that the status code is 200\n self.assertEqual(response_non_seller.status_code, 200)\n\n # Make sure that only the product associated with product category 2 is displayed\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response_non_seller.content)\n self.assertIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response_non_seller.content)",
"def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])",
"def test_two_inventories_needed(self):\n quantity = 25\n product = Product.objects.get(name='test product')\n\n inventories = Inventory.objects.filter(\n product=product\n ).order_by('input_date')\n\n expected_response = [\n dict(inventory=inventories[1], quantity=5),\n dict(inventory=inventories[2], quantity=20),\n ]\n\n actual_response = get_items_from_inventory(product, quantity)\n\n self._test_list(actual_response, expected_response, quantity)",
"def test_filter_by_product(self):\r\n one = self.factory.create(name=\"Foo 1\")\r\n self.factory.create(name=\"Foo 2\")\r\n\r\n res = self.get(\r\n params={\"filter-product\": str(one.product.id)})\r\n\r\n self.assertInList(res, \"Foo 1\")\r\n self.assertNotInList(res, \"Foo 2\")",
"def available_products(request):\n Product.objects.all()\n return HttpResponse(\"View not implemented!\")",
"def test_vault_get_all_vault_items(self):\n pass",
"def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a user cannot view all products in the Inventory with blacklisted token | def test_cannot_view_all_products_with_blacklisted_token(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.delete(
'/api/v1/logout',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'You are successfully logged out!')
self.assertEqual(resp.status_code, 200)
resp = self.client.get(
'/api/v1/products',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')
self.assertEqual(resp.status_code, 401) | [
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_buy_disabled(self):\n # Make sure we can't buy the product\n self.app.post('/user/jane/buy', data=dict(barcode='0012345678905'), follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n assert streck.models.user.User('jane').debt() == 0.0\n\n # Make sure it appears on the user page\n rv = self.app.get('/user/jane', follow_redirects=True)\n assert b'The Product' not in rv.data",
"def test_get_unauthenticated_items(self):\n\n number_of_items = 5\n create_multiple_items(number_of_items)\n\n url = reverse('sysmon_alert_items')\n\n response = self.client.get(url)\n\n self.assertTrue(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')",
"def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_check_permission_list_non_authenticated(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n url = reverse(\"price-list\")\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_check_permission_list_non_authenticated(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n url = reverse(\"offer-list\")\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_list_products_without_authentication(self):\n response = self.client.get('/api/1.0/products/')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_remove_not_allowed(self):\n # Make sure we can't remove the product\n self.app.post('/user/%s/buy' % app.config['REMOVE_JOBBMAT_BARCODE'], data=dict(barcode=self.TESTPRODUCT_BAD['barcode']), follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n assert streck.models.user.User(app.config['JOBBMAT_BARCODE']).debt() == 0.0\n\n # Make sure it appears on the user page\n rv = self.app.get('/user/%s' % app.config['JOBBMAT_BARCODE'], follow_redirects=True)\n assert self.TESTPRODUCT_BAD['name'] not in rv.data",
"def test_missing_token(self):\n\n self.register_test_admin_account()\n token = \"\"\n\n response = self.app_test_client.get(\n '{}/products'.format(self.BASE_URL),\n headers=dict(Authorization=token),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(helper_functions.convert_response_to_json(\n response)[\"Message\"], \"You need to login\")",
"def test_check_permission_list_non_authenticated(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n url = reverse(\"currency-list\")\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_check_permission_list_non_authenticated(self):\n\n self.client.logout()\n\n error_message = \"Authentication credentials were not provided.\"\n\n url = reverse(\"item-list\")\n response = self.client.get(url, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_with_unnecessary_vulnerability_id_in_allowed_list():",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a user cannot view a product that doesnot exist in the Inventory | def test_view_product_that_doesnot_exist_in_inventory(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(
'/api/v1/products/2',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'This product does not exist!')
self.assertEqual(resp.status_code, 404) | [
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_insufficient_units(self):\n product = Product.objects.get(name='test product')\n other_product = Product.objects.get(name='test other product')\n\n with self.assertRaises(Exception):\n get_items_from_inventory(product, 26)\n\n with self.assertRaises(Exception):\n get_items_from_inventory(product, 100)\n\n with self.assertRaises(Exception):\n get_items_from_inventory(other_product, 1)",
"def test_create_purchase_non_existing_product(self):\n data = {\"user_id\": 1, \"product_id\": 5, \"amount\": 1}\n res = self.post(url=\"/purchases\", role=\"admin\", data=data)\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)\n self.assertEqual(len(Purchase.query.all()), 0)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_buy_disabled(self):\n # Make sure we can't buy the product\n self.app.post('/user/jane/buy', data=dict(barcode='0012345678905'), follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n assert streck.models.user.User('jane').debt() == 0.0\n\n # Make sure it appears on the user page\n rv = self.app.get('/user/jane', follow_redirects=True)\n assert b'The Product' not in rv.data",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')",
"def test_remove_not_allowed(self):\n # Make sure we can't remove the product\n self.app.post('/user/%s/buy' % app.config['REMOVE_JOBBMAT_BARCODE'], data=dict(barcode=self.TESTPRODUCT_BAD['barcode']), follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n assert streck.models.user.User(app.config['JOBBMAT_BARCODE']).debt() == 0.0\n\n # Make sure it appears on the user page\n rv = self.app.get('/user/%s' % app.config['JOBBMAT_BARCODE'], follow_redirects=True)\n assert self.TESTPRODUCT_BAD['name'] not in rv.data",
"def test_add_cart_item_unauthorized_user(self):\n cart_id = self.cart_item_manager.create_cart('111', 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.add_cart_item(catalog=self.catalog,\n user_id='112',\n cart_id=cart_id,\n entity_id='entity_id',\n entity_type='entity_type',\n entity_version='entity_version')",
"def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()",
"def test_create_purchase_non_existing_user(self):\n data = {\"user_id\": 6, \"product_id\": 3, \"amount\": 4}\n res = self.post(url=\"/purchases\", role=\"admin\", data=data)\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)\n self.assertEqual(len(Purchase.query.all()), 0)",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"ั
ะปะตะฑ ัะตััะน ั
ะปะตะฑะพะทะฐะฒะพะด\", 3)\n self.assertFalse(result_buy)",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_product_is_not_installed(self):\n portal_quickinstaller = getToolByName(self.portal, 'portal_quickinstaller')\n self.assertFalse(portal_quickinstaller.isProductInstalled('collective.pfg.norobots'),\n 'package appears to be already installed')",
"def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a user cannot view products from empty Inventory | def test_view_products_from_empty_inventory(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
resp = self.client.get(
'/api/v1/products',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'There are no products yet!')
self.assertEqual(resp.status_code, 404) | [
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_insufficient_units(self):\n product = Product.objects.get(name='test product')\n other_product = Product.objects.get(name='test other product')\n\n with self.assertRaises(Exception):\n get_items_from_inventory(product, 26)\n\n with self.assertRaises(Exception):\n get_items_from_inventory(product, 100)\n\n with self.assertRaises(Exception):\n get_items_from_inventory(other_product, 1)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"ั
ะปะตะฑ ัะตััะน ั
ะปะตะฑะพะทะฐะฒะพะด\", 3)\n self.assertFalse(result_buy)",
"def check_inventory(self) -> None:\n self.store.check_inventory()",
"def test_inventory_1(self):\n sys, ob1, feat1, per1, r1 = self.set_up_1()\n self.assertFalse(sys.inventory())",
"def test_app_inventory_not_empty():\n assert app_deployer.app_inventory is not None",
"async def test_product_list_not_found(self, client: aiokatcp.Client) -> None:\n await assert_request_fails(client, \"product-list\", \"product\")",
"def test_buy_missing_product(self):\n rv = self.app.post('/user/john/buy', data=dict(barcode='nothing'), follow_redirects=True)\n assert b'Produkten existerar inte!' in rv.data",
"def test_create_purchase_non_existing_product(self):\n data = {\"user_id\": 1, \"product_id\": 5, \"amount\": 1}\n res = self.post(url=\"/purchases\", role=\"admin\", data=data)\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)\n self.assertEqual(len(Purchase.query.all()), 0)",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_buy_disabled(self):\n # Make sure we can't buy the product\n self.app.post('/user/jane/buy', data=dict(barcode='0012345678905'), follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n assert streck.models.user.User('jane').debt() == 0.0\n\n # Make sure it appears on the user page\n rv = self.app.get('/user/jane', follow_redirects=True)\n assert b'The Product' not in rv.data",
"def test_list_products_without_authentication(self):\n response = self.client.get('/api/1.0/products/')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_get_unauthenticated_items(self):\n\n number_of_items = 5\n create_multiple_items(number_of_items)\n\n url = reverse('sysmon_alert_items')\n\n response = self.client.get(url)\n\n self.assertTrue(response.status_code, status.HTTP_401_UNAUTHORIZED)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a user cannot view a product with invalid id | def test_view_product_with_invalid_id(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(
'/api/v1/products/2kk',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Try an interger for product id')
self.assertEqual(resp.status_code, 400) | [
"def test_wrong_products_id(self):\n\t\tresponse = self.client.get('/api/V1/products/a', content_type=\"application/json\")\n\t\tself.assertEqual(response.status_code,404)",
"def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_bad_productversion_id(self):\r\n res = self.post({\"productversion\": 75}, status=302)\r\n\r\n self.assertRedirects(\r\n res,\r\n reverse(\r\n \"manage_caseversion_edit\",\r\n kwargs=dict(caseversion_id=self.cv.id)\r\n )\r\n )",
"def test_ProductsDataViewSet_with_get_request_Invalid_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(-1))\n\n # Checking the response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json()['detail'], 'Not found.')",
"def test_cant_fetch_a_sale_with_an_invalid_id(self):\n with self.client:\n # post a product\n _ = self.create_product()\n # post a sale\n _ = self.create_sale()\n\n response = self.user_login()\n data = json.loads(response.data.decode())\n token = data['token']\n\n # get sale by id\n response = self.client.get(\n '/api/v2/sales/123456789',\n content_type='application/json',\n headers=dict(Authorization='Bearer ' + token)\n )\n\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 'unsuccessful')\n self.assertEqual(\n data['message'],\n 'sale with ID 123456789 doesnot exist')",
"def test_request_membership_form_with_an_invalid_user_id(self):\n pass",
"def test_get_404_due_invalid_id(self):\n\n result = testing_app.get(f'{CURRENT_API_VER}/user/?user_id=-1')\n print(result.json)\n assert result.status_code == 404\n assert result.json['message'] == \"Instance of <class 'data.models.User'> with id = -1 not found\"",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.htsv.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()",
"def test_query_recommendation_by_product_id_bad_request(self):\n service.Recommendation(id=0, product_id=PS4, recommended_product_id=CONTROLLER, recommendation_type=\"accessory\").save()\n service.Recommendation(id=0, product_id=PS4, recommended_product_id=PS5, recommendation_type=\"up-sell\").save()\n\n resp = self.app.get('/recommendations?product_id=' + 'PS5')\n self.assertEqual(resp.status_code, 500)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_retrieve_specific_product_not_found(self):\n response = self.app_test_client.get(\n '{}/product/1000'.format(self.BASE_URL),\n headers=dict(Authorization=self.token),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 404)",
"def test_invalid_aids_cannot_become_reviewable(client, contributor):\n\n form_url = reverse(\"aid_create_view\")\n client.force_login(contributor)\n aids = Aid.objects.filter(author=contributor)\n assert aids.count() == 0\n\n invalid_data = {\"name\": \"Almost empty aid\", \"_status\": \"reviewa\"}\n res = client.post(form_url, data=invalid_data)\n assert res.status_code == 200\n assert aids.count() == 0",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_view_as_regular_user(self):\n response = self.client.get(self.url)\n self.assertEqual(403, response.status_code)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_get_user_bad_id(self):\r\n res = self.backend.get_user(-1)\r\n\r\n self.assertIsNone(res)",
"def test_create_purchase_non_existing_product(self):\n data = {\"user_id\": 1, \"product_id\": 5, \"amount\": 1}\n res = self.post(url=\"/purchases\", role=\"admin\", data=data)\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)\n self.assertEqual(len(Purchase.query.all()), 0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that product cannot be updated successfully with blacklisted token | def test_cannot_update_product_with_blacklisted_token(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.delete(
'/api/v1/logout',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'You are successfully logged out!')
self.assertEqual(resp.status_code, 200)
product_update = dict(
prod_name='NY_jeans',
category='denims',
stock=50,
price=180
)
resp = self.client.put(
'/api/v1/products/1',
content_type='application/json',
data=json.dumps(product_update),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')
self.assertEqual(resp.status_code, 401) | [
"def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_a_product_loyalty_weight_with_invalid_value(self):\n data = {\"product_id\": self.product.id, \"loyalty_value\": -1}\n response = self.query_with_token(\n self.access_token_master,\n update_a_product_loyalty_weight.format(**data))\n self.assertIn(\"errors\", response)",
"def test_invalid_token(self):\n client = APIClient()\n client.credentials(HTTP_TOKEN=\"uuyytttt\")\n response = client.put(\n \"/api/profile/update/\", self.profile_update, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_user_update_failed_no_token(self):\n self.url = reverse('users-detail', kwargs={'id': self.user_id})\n headers = self.headers.copy()\n headers.pop('token')\n data = self.user_data.copy()\n response = self.client.put(self.url, data, format='json', **headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(response.data.get('detail'), 'Token is required.')",
"def test_cannot_update_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_user_update_failed_invalid_token(self):\n self.url = reverse('users-detail', kwargs={'id': self.user_id})\n headers = self.headers.copy()\n data = self.user_data.copy()\n response = self.client.put(self.url, data, format='json', **headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(response.data.get('detail'), 'Authorization failed due to an Invalid token.')",
"def test_check_permission_update_non_authenticated(self):\n\n error_message = \"Authentication credentials were not provided.\"\n\n data = {\n \"item\": self.item_2.id,\n \"status\": \"SELL\",\n \"entry_quantity\": 700,\n \"price\": Decimal(\"3222.23\"),\n }\n response = self.post_offer(data)\n\n self.client.logout()\n\n url = reverse(\"price-detail\", None, {response.data[\"id\"]})\n new_data = {\n \"item\": self.item_1.id,\n \"status\": \"PURCHASE\",\n \"entry_quantity\": 999,\n \"price\": Decimal(\"1.00\"),\n }\n put_response = self.client.put(url, new_data, format=\"json\")\n\n assert put_response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(put_response.data[\"detail\"]) == error_message",
"def test_suspend_not_available(self):\n resp = self.app.put(\n \"/customers/{}/suspend\".format(0),\n content_type=\"application/json\",\n )\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_new_bid_miss(self):",
"def testInvalidToken(self):\r\n self._mobile_dev.push_token = 'invalid-scheme:push-token'\r\n self._mobile_dev.Update(self._client, self._OnDeviceUpdate)",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"ั
ะปะตะฑ ัะตััะน ั
ะปะตะฑะพะทะฐะฒะพะด\", 3)\n self.assertFalse(result_buy)",
"def test_update_user_no_token(self):\n\n response = self.update_user(self.user_update)\n self.assertEqual(response.status_code, 403)\n msg = \"Authentication credentials were not provided.\"\n self.assertEqual(response.data[\"detail\"], msg)",
"def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)",
"def test_update_user_invalid_token(self):\n\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + self.invalid_token)\n response = self.update_user(self.user_update)\n self.assertEqual(response.status_code, 403)\n msg = \"Invalid token. Token decode failed\"\n self.assertEqual(response.data[\"detail\"], msg)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that you cant updated a nonexistant product | def test_update_nonexistant_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product_update = dict(
prod_name='NY_jeans',
category='denims',
stock=50,
price=180
)
resp = self.client.put(
'/api/v1/products/1',
content_type='application/json',
data=json.dumps(product_update),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], "This product doesn't exists in the Inventory!")
self.assertEqual(resp.status_code, 400) | [
"def test_update_product_not_found(self):\n test_product = ProductFactory()\n resp = self.app.put(\n \"/products/0\",\n json=test_product.serialize(),\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_edit_no_product(self):\r\n mozlogger.info('test_edit_no_product')\r\n\r\n # create fixture\r\n fixture1 = self.factory\r\n backend_obj = self.backend_object(fixture1.id)\r\n obj_id = str(fixture1.id)\r\n fields = self.new_object_data\r\n product = fields.pop(u'product')\r\n\r\n # do put\r\n res = self.put(\r\n self.get_detail_url(self.resource_name, obj_id),\r\n params=self.credentials,\r\n data=fields\r\n )\r\n\r\n # make sure object has been updated in the database\r\n fields[u'product'] = product\r\n fixture1 = self.refresh(fixture1)\r\n backend_data = self.clean_backend_data(fixture1)\r\n\r\n self.maxDiff = None\r\n self.assertEqual(fields, backend_data)",
"def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_c_dont_adds_product_if_not_in_stock(self, client, product):\n product.stock = 0\n product.save()\n\n response = client.post(\n reverse('shoppingcart:add-product'),\n json.dumps({'id_': product.pk}),\n content_type='application/json',\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n assert response.status_code == 400\n assert product.line_set.all().exists() is False",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_update_not_updatable_attr(self):\n usage_to_update = 0\n self.assertRaises(exception.UnableUpdateValue, DB_API.alarming_update,\n self.alarm_platform_vcpus.id,\n {\"usage\": usage_to_update})\n\n settings_uuid_to_update = \"\"\n self.assertRaises(exception.UnableUpdateValue, DB_API.alarming_update,\n self.alarm_platform_vcpus.id,\n {\"settings_uuid\": settings_uuid_to_update})\n\n id_to_update = 0\n self.assertRaises(exception.UnableUpdateValue, DB_API.alarming_update,\n self.alarm_platform_vcpus.id,\n {\"id\": id_to_update})",
"def test_update_a_product_loyalty_weight_with_invalid_value(self):\n data = {\"product_id\": self.product.id, \"loyalty_value\": -1}\n response = self.query_with_token(\n self.access_token_master,\n update_a_product_loyalty_weight.format(**data))\n self.assertIn(\"errors\", response)",
"def test_update_transaction_dispute_item(self):\n pass",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})",
"def testProductUninstalled(self):\n self.failIf(self.qitool.isProductInstalled(\"NuPlone\"))",
"def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)",
"def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_update_non_existent(cards_db):\n i = 123 # any number will do, db is empty\n with pytest.raises(InvalidCardId):\n cards_db.update_card(i, Card(summary=\"bar\", owner=\"not me\"))",
"def test_key_error_on_update(self, bad_mock):\n bad_mock.side_effect = KeyError()\n wishlist = Wishlist(\"fido\", \"1\")\n wishlist.save()\n wishlist.name = 'Fifi'\n wishlist.update()\n #self.assertEqual(pet.name, 'fido')",
"def test_unique_constraint_with_unset_product(self):\r\n new = self.model.ProductVersion()\r\n\r\n with self.assertRaises(ValidationError):\r\n new.full_clean()",
"def test_api_can_edit_non_existing_item(self):\n item = {'name': 'sugar'}\n # create a shoppinglist\n self.test_shoppinglist()\n # edit non-existing item\n res2 = self.client().put(\"/shoppinglists/1/items/1\",\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=item)\n self.assertIn(\"No such item\", str(res2.data))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that product cannot be updated with unauthorised user | def test_unauthorized_product_update(self):
resp = self.admin_create_user()
reply = self.attendant_login()
token = reply['token']
product_update = dict(
prod_name='NY_jeans',
category='denims',
stock=50,
price=180
)
resp = self.client.put(
'/api/v1/products/1',
content_type='application/json',
data=json.dumps(product_update),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Unauthorized Access!')
self.assertEqual(resp.status_code, 401) | [
"def test_check_permission_update_not_owner(self):\n\n error_message = \"Authentication credentials were not provided.\"\n\n data = {\n \"item\": self.item_2.id,\n \"status\": \"SELL\",\n \"entry_quantity\": 700,\n \"price\": Decimal(\"3222.23\"),\n }\n response = self.post_offer(data)\n\n self.client.logout()\n User.objects.create(username=\"testuser3\", password=\"testpassword\")\n self.client.login(username=\"testuser3\", password=\"testpassword\")\n\n url = reverse(\"offer-detail\", None, {response.data[\"id\"]})\n new_data = {\n \"item\": self.item_1.id,\n \"status\": \"PURCHASE\",\n \"entry_quantity\": 999,\n \"price\": Decimal(\"1.00\"),\n }\n put_response = self.client.put(url, new_data, format=\"json\")\n\n assert put_response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(put_response.data[\"detail\"]) == error_message",
"def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_check_permission_update_non_authenticated(self):\n\n error_message = \"Authentication credentials were not provided.\"\n\n data = {\n \"item\": self.item_2.id,\n \"status\": \"SELL\",\n \"entry_quantity\": 700,\n \"price\": Decimal(\"3222.23\"),\n }\n response = self.post_offer(data)\n\n self.client.logout()\n\n url = reverse(\"price-detail\", None, {response.data[\"id\"]})\n new_data = {\n \"item\": self.item_1.id,\n \"status\": \"PURCHASE\",\n \"entry_quantity\": 999,\n \"price\": Decimal(\"1.00\"),\n }\n put_response = self.client.put(url, new_data, format=\"json\")\n\n assert put_response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(put_response.data[\"detail\"]) == error_message",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_check_permission_update_non_authenticated(self):\n\n error_message = \"Authentication credentials were not provided.\"\n\n data = {\n \"currency\": self.currency_2.id,\n \"item\": self.item_2.id,\n \"price\": Decimal(\"2123.01\"),\n \"date\": \"2020-12-23T10:05:00Z\",\n }\n response = self.post_price(data)\n\n self.client.logout()\n\n url = reverse(\"price-detail\", None, {response.data[\"id\"]})\n new_data = {\n \"currency\": self.currency_1.id,\n \"item\": self.item_1.id,\n \"price\": Decimal(\"4234.01\"),\n \"date\": \"2020-10-14T13:05:00Z\",\n }\n put_response = self.client.put(url, new_data, format=\"json\")\n\n assert put_response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(put_response.data[\"detail\"]) == error_message",
"def test_unauthenticated_user_cannot_update(self):\n collaborator = Collaborator.objects.order_by('?').first()\n # This should be unauthorized as the user has not authenticated\n self.assertUnauthorized(\n \"/collaborators/{}/\".format(collaborator.pk),\n \"PATCH\",\n dict(role = Collaborator.Role.OWNER.name),\n )",
"def test_check_permission_update_not_owner(self):\n\n error_message = \"Authentication credentials were not provided.\"\n\n self.client.logout()\n User.objects.create(username=\"testuser3\", password=\"testpassword\")\n self.client.login(username=\"testuser3\", password=\"testpassword\")\n\n new_data = {\n \"item\": [\n self.item_1.id,\n self.item_2.id,\n self.item_3.id,\n ]\n }\n url = reverse(\"watchlist-detail\", None, {WatchList.objects.first().id})\n response = self.client.put(url, new_data, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_update_non_existent_user(self):\n\n self.token = self.get_user_token()\n self.email = self.user['email']\n User.objects.get(email=self.email).delete()\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.update_user(self.user_update)\n self.assertEqual(response.status_code, 403)\n msg = \"No user matching this token\"\n self.assertEqual(response.data[\"detail\"], msg)",
"def test_check_permission_update_non_authenticated(self):\n\n error_message = \"Authentication credentials were not provided.\"\n\n data = {\n \"code\": \"AAPL\",\n \"name\": \"Apple\",\n \"details\": \"Stocks of Apple Inc.\",\n }\n response = self.post_item(data)\n\n self.client.logout()\n\n url = reverse(\"item-detail\", None, {response.data[\"id\"]})\n new_data = {\n \"code\": \"TSLA\",\n \"name\": \"Tesla\",\n \"details\": \"Stocks of Tesla Inc.\",\n }\n put_response = self.client.put(url, new_data, format=\"json\")\n\n assert put_response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(put_response.data[\"detail\"]) == error_message",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_can_not_update_department(self):\n self.user = User.objects.create_user('user', 'user@cs.local', 'user', is_staff=False)\n self.client.login(username='user', password='user')\n response = self.client.put(reverse(self.url, args=[self.department.id]), self.data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_can_not_update_company(self):\n self.user = User.objects.create_user('user', 'user@cs.local', 'user', is_staff=False)\n self.client.login(username='user', password='user')\n url = 'stat_app:company-detail'\n response = self.client.put(reverse(url, args=[self.company.id]), self.data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)",
"def test_check_permission_update_non_authenticated(self):\n\n error_message = \"Authentication credentials were not provided.\"\n\n data = {\"code\": \"BYN\", \"name\": \"Belarusian rubles\"}\n response = self.post_currency(data)\n\n self.client.logout()\n\n url = reverse(\"currency-detail\", None, {response.data[\"id\"]})\n new_data = {\n \"code\": \"EUR\",\n \"name\": \"Euro\",\n }\n put_response = self.client.put(url, new_data, format=\"json\")\n\n assert put_response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(put_response.data[\"detail\"]) == error_message",
"def test_check_permission_update_non_authenticated(self):\n\n error_message = \"Authentication credentials were not provided.\"\n\n self.client.logout()\n\n new_data = {\n \"item\": [\n self.item_1.id,\n self.item_2.id,\n self.item_3.id,\n ]\n }\n url = reverse(\"watchlist-detail\", None, {WatchList.objects.first().id})\n response = self.client.put(url, new_data, format=\"json\")\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert str(response.data[\"detail\"]) == error_message",
"def test_unauthenticated_user_cannot_update_article(self):\n response = self.update_article(\"\", \"how-to-train-your-dragon\", self.testArticle1)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_update_user_no_token(self):\n\n response = self.update_user(self.user_update)\n self.assertEqual(response.status_code, 403)\n msg = \"Authentication credentials were not provided.\"\n self.assertEqual(response.data[\"detail\"], msg)",
"def test_non_admin_update_one_equipment(self):\n self.client.login(username='ordinary_user', password='ordinary_password')\n kb = Equipment.objects.get(name=\"kettlebell\")\n url = reverse('equipment_detail', kwargs={'pk': kb.pk})\n data = {\n \"id\": kb.pk,\n \"name\": \"big kettlebell\",\n \"founder\": kb.founder.pk\n }\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that product cannot be updated with empty fields | def test_update_product_with_empty_fields(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
product_update = dict(
prod_name='',
category='',
stock=50,
price=180
)
resp = self.client.put(
'/api/v1/products/1',
content_type='application/json',
data=json.dumps(product_update),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')
self.assertEqual(resp.status_code, 400) | [
"def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_edit_no_product(self):\r\n mozlogger.info('test_edit_no_product')\r\n\r\n # create fixture\r\n fixture1 = self.factory\r\n backend_obj = self.backend_object(fixture1.id)\r\n obj_id = str(fixture1.id)\r\n fields = self.new_object_data\r\n product = fields.pop(u'product')\r\n\r\n # do put\r\n res = self.put(\r\n self.get_detail_url(self.resource_name, obj_id),\r\n params=self.credentials,\r\n data=fields\r\n )\r\n\r\n # make sure object has been updated in the database\r\n fields[u'product'] = product\r\n fixture1 = self.refresh(fixture1)\r\n backend_data = self.clean_backend_data(fixture1)\r\n\r\n self.maxDiff = None\r\n self.assertEqual(fields, backend_data)",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_product_cannot_create_with_invalid_details(self):\n res = self.client().post('/api/v1/products', data=json.dumps(self.empty_product), headers = {\"content-type\": \"application/json\"})\n self.assertEqual(res.status_code, 201)",
"def test_missing_update_fields(self):\n manager = self.manager_class()\n m = self.model.create(value='blah')\n resp = manager.update(dict(id=m.id), dict(value='duh', id='blah'))\n self.assertDictEqual(dict(value='duh', id=m.id), resp)\n m = self.model.filter(id=m.id).get()\n self.assertDictEqual(dict(value=m.value, id=m.id), resp)\n self.assertRaises(DoesNotExist, self.model.filter(id='blah').get)",
"def test_cannot_update_with_empty_field(self):\n\n self.client.login(username='notlogged', password='notlogged')\n group_fields = ['name', 'description']\n\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)\n\n # Group is not updated.\n updated_group = Group.objects.get(pk=self.group.pk)\n self.assertEqual(updated_group.name, 'test')\n self.assertEqual(updated_group.description, 'test')\n self.assertIsNone(updated_group.last_edit_date)",
"def test_partial_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n\n payload = {\n 'name': 'Updated name',\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.patch(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')",
"def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})",
"def test_update_product_not_found(self):\n test_product = ProductFactory()\n resp = self.app.put(\n \"/products/0\",\n json=test_product.serialize(),\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)",
"def test_update_not_updatable_attr(self):\n usage_to_update = 0\n self.assertRaises(exception.UnableUpdateValue, DB_API.alarming_update,\n self.alarm_platform_vcpus.id,\n {\"usage\": usage_to_update})\n\n settings_uuid_to_update = \"\"\n self.assertRaises(exception.UnableUpdateValue, DB_API.alarming_update,\n self.alarm_platform_vcpus.id,\n {\"settings_uuid\": settings_uuid_to_update})\n\n id_to_update = 0\n self.assertRaises(exception.UnableUpdateValue, DB_API.alarming_update,\n self.alarm_platform_vcpus.id,\n {\"id\": id_to_update})",
"def test_update_not_force(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'period': reverse('period-detail', args=[self.period.id]),\n 'price': '10.00', # Will use Period's price if not provided\n 'start_time': LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 12)),\n 'end_time': LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 16)),\n }\n\n response = self.client.put(\n reverse(\n 'timeslot-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n content = {\n 'non_field_errors': [\n 'Trying to push an update that affects users without '\n 'providing `force_update` field.'\n ]\n }\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n )",
"def test_model_fields_with_incorrect_values(self):\n\n self.assertNotEqual(self.warehouse.address, \"Wrong Test address\")\n self.assertNotEqual(self.warehouse.phone, \"+3435342322343\")",
"def test_update_a_product_loyalty_weight_with_invalid_value(self):\n data = {\"product_id\": self.product.id, \"loyalty_value\": -1}\n response = self.query_with_token(\n self.access_token_master,\n update_a_product_loyalty_weight.format(**data))\n self.assertIn(\"errors\", response)",
"def test_invalid_update_kwarg(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)",
"def test_invalid_update_kwarg(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)",
"def test_update_cart_invalid_attributes(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.update_cart(user_id, cart_id, {'InvalidAttribute': 'Cart2'})\n self.assertEqual('Cart1', self.cart_item_manager.get_cart(user_id, cart_id)['CartName'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that admin can delete a product | def test_admin_delete_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.delete(
'/api/v1/products/1',
content_type='application/json',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product deleted!')
self.assertEqual(resp.status_code, 200) | [
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_delete_product(self):\n\n # Create a couple test products\n self.test_create_product()\n self.test_create_product()\n\n # Attempt to delete the second product\n url = \"/products/2\"\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.delete(url, None, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # Verify product was \"deleted\" (it's soft-deleted, but still won't\n # show in the GET request)\n url = \"/products\"\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.get(url, None, format='json')\n json_response = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(json_response), 1)\n self.assertEqual(json_response[0][\"id\"], 1)",
"def test_delete_product_detail(self):\n product = Product(\n id=1,\n title='Beatles Blog',\n description='All the latest Beatles news.',\n published=True\n )\n product.save()\n url = reverse('product_detail', kwargs={'pk': 1})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Product.objects.count(), 0)",
"def test_products_ref_users_delete(self):\n pass",
"def test_post_delete_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_delete_product(self):\n query_string = [('productID', 56)]\n response = self.client.open(\n '/product-master/delete/product/id',\n method='GET',\n content_type='multipart/form-data',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_non_admin_delete_one_equipment(self):\n self.client.login(username='ordinary_user', password='ordinary_password')\n initial_equipments = Equipment.objects.count()\n kb = Equipment.objects.get(name=\"kettlebell\")\n url = reverse('equipment_detail', kwargs={'pk': kb.pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(Equipment.objects.count(), initial_equipments)",
"def test_delete(self):\n pass",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_products_ref_users_user_delete(self):\n pass",
"def test_admin_delete_one_equipment(self):\n self.client.login(username='admin_user', password='admin_password')\n initial_equipments = Equipment.objects.count()\n kb = Equipment.objects.get(name=\"kettlebell\")\n url = reverse('equipment_detail', kwargs={'pk': kb.pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Equipment.objects.count(), initial_equipments - 1)",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_destroy_as_admin(self):\n auth_headers = self.get_token()\n resp = self.client.delete(reverse('question-detail', kwargs={'pk': 5}),\n **auth_headers)\n self.assertEqual(resp.status_code, 204)",
"def test_delete_item_using_delete(self):\n pass",
"def test_bulk_undelete(self):\r\n p = self.F.ProductFactory.create(name=\"Firefox\")\r\n p.delete()\r\n form = self.get(self.changelist_url).forms[\"changelist-form\"]\r\n form[\"action\"] = \"undelete\"\r\n form[\"_selected_action\"] = str(p.id)\r\n form.submit(\"index\", 0)\r\n\r\n self.assertEqual(self.refresh(p).deleted_on, None)",
"def test_unit_delete(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a non admin cannot delete a product | def test_non_admin_cannot_delete_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.admin_create_user()
reply = self.attendant_login()
token = reply['token']
resp = self.client.delete(
'/api/v1/products/1',
content_type='application/json',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Unauthorized Access!')
self.assertEqual(resp.status_code, 401) | [
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_delete_resource_mutation_when_not_admin(self):\n CommonTestCases.user_token_assert_in(\n self,\n delete_resource,\n \"You are not authorized to perform this action\"\n )",
"def test_delete_prevention(self):\r\n env = self.F.EnvironmentFactory.create()\r\n self.F.ProductVersionFactory.create(environments=[env])\r\n\r\n with self.assertRaises(self.model.ProtectedError):\r\n env.delete()",
"def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)",
"def test_non_admin_delete_one_equipment(self):\n self.client.login(username='ordinary_user', password='ordinary_password')\n initial_equipments = Equipment.objects.count()\n kb = Equipment.objects.get(name=\"kettlebell\")\n url = reverse('equipment_detail', kwargs={'pk': kb.pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(Equipment.objects.count(), initial_equipments)",
"def test_products_ref_users_delete(self):\n pass",
"def test_post_delete_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_plant_delete_normal_users(self):\n url = reverse('plant-detail', kwargs={\"pk\": self.plant.pk})\n self.client.force_authenticate(user=self.user)\n response = self.client.delete(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_remove_not_allowed(self):\n # Make sure we can't remove the product\n self.app.post('/user/%s/buy' % app.config['REMOVE_JOBBMAT_BARCODE'], data=dict(barcode=self.TESTPRODUCT_BAD['barcode']), follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n assert streck.models.user.User(app.config['JOBBMAT_BARCODE']).debt() == 0.0\n\n # Make sure it appears on the user page\n rv = self.app.get('/user/%s' % app.config['JOBBMAT_BARCODE'], follow_redirects=True)\n assert self.TESTPRODUCT_BAD['name'] not in rv.data",
"def test_deletable(self):\r\n env = self.F.EnvironmentFactory.create()\r\n self.F.ProductVersionFactory.create(environments=[env])\r\n\r\n self.assertFalse(env.deletable)",
"def test_products_ref_users_user_delete(self):\n pass",
"def test_delete_prevention(self):\r\n el = self.F.ElementFactory.create(name=\"Debian\")\r\n env = self.F.EnvironmentFactory.create()\r\n env.elements.add(el)\r\n\r\n with self.assertRaises(self.model.ProtectedError):\r\n el.category.delete()",
"def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)",
"def test_delete_prevention(self):\r\n el = self.F.ElementFactory.create(name=\"Debian\")\r\n env = self.F.EnvironmentFactory.create()\r\n env.elements.add(el)\r\n\r\n with self.assertRaises(self.model.ProtectedError):\r\n el.delete()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that admin cannnot delete a product from empty Inventory | def test_admin_cannot_delete_product_from_empty_Inventory(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
resp = self.client.delete(
'/api/v1/products/1',
content_type='application/json',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'There are no products in Inventory!')
self.assertEqual(resp.status_code, 404) | [
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_non_admin_delete_one_equipment(self):\n self.client.login(username='ordinary_user', password='ordinary_password')\n initial_equipments = Equipment.objects.count()\n kb = Equipment.objects.get(name=\"kettlebell\")\n url = reverse('equipment_detail', kwargs={'pk': kb.pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(Equipment.objects.count(), initial_equipments)",
"def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)",
"def test_delete_inventory(self):\n # save the current number of inventories for later comparision\n inventory_count = self.get_inventory_count()\n # delete a inventory\n resp = self.app.delete('/inventories/1', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(resp.data), 0)\n new_count = self.get_inventory_count()\n self.assertEqual(new_count, inventory_count - 1)",
"def test_admin_delete_one_equipment(self):\n self.client.login(username='admin_user', password='admin_password')\n initial_equipments = Equipment.objects.count()\n kb = Equipment.objects.get(name=\"kettlebell\")\n url = reverse('equipment_detail', kwargs={'pk': kb.pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Equipment.objects.count(), initial_equipments - 1)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_vault_delete_vault_item(self):\n pass",
"def test_deletehardwares_item(self):\n pass",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_bulk_undelete(self):\r\n p = self.F.ProductFactory.create(name=\"Firefox\")\r\n p.delete()\r\n form = self.get(self.changelist_url).forms[\"changelist-form\"]\r\n form[\"action\"] = \"undelete\"\r\n form[\"_selected_action\"] = str(p.id)\r\n form.submit(\"index\", 0)\r\n\r\n self.assertEqual(self.refresh(p).deleted_on, None)",
"def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_delete_item_using_delete(self):\n pass",
"def check_and_delete(self, inventory): # used in a transaction block only so dont initiate a transaction here\n try:\n lines = inventory.lines\n for i in lines:\n if i.quantity == 0:\n i.delete((i,))\n # inventory.reload()\n inventory.save()\n chk = inventory.lines\n if len(chk) == 0:\n inventory.state = 'cancel'\n inventory.save()\n inventory.delete((inventory,))\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_inventory_1(self):\n sys, ob1, feat1, per1, r1 = self.set_up_1()\n self.assertFalse(sys.inventory())",
"def test_update_inventory(self):\n pass",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_delete_nveto_gain_item(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that admin cannnot delete a nonexistant product | def test_admin_cannot_delete_nonexistant_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.delete(
'/api/v1/products/2',
content_type='application/json',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'This product does not exist in Inventory!')
self.assertEqual(resp.status_code, 404) | [
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_non_admin_delete_one_equipment(self):\n self.client.login(username='ordinary_user', password='ordinary_password')\n initial_equipments = Equipment.objects.count()\n kb = Equipment.objects.get(name=\"kettlebell\")\n url = reverse('equipment_detail', kwargs={'pk': kb.pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(Equipment.objects.count(), initial_equipments)",
"def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)",
"def test_products_ref_users_delete(self):\n pass",
"def test_get_deleted_product(self):\n product = self.add_product()\n product.is_deleted = True\n product.save()\n\n url = f'{self.url}{product.id}/'\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_bulk_undelete(self):\r\n p = self.F.ProductFactory.create(name=\"Firefox\")\r\n p.delete()\r\n form = self.get(self.changelist_url).forms[\"changelist-form\"]\r\n form[\"action\"] = \"undelete\"\r\n form[\"_selected_action\"] = str(p.id)\r\n form.submit(\"index\", 0)\r\n\r\n self.assertEqual(self.refresh(p).deleted_on, None)",
"def test_delete_product_detail(self):\n product = Product(\n id=1,\n title='Beatles Blog',\n description='All the latest Beatles news.',\n published=True\n )\n product.save()\n url = reverse('product_detail', kwargs={'pk': 1})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Product.objects.count(), 0)",
"def test_admin_delete_one_equipment(self):\n self.client.login(username='admin_user', password='admin_password')\n initial_equipments = Equipment.objects.count()\n kb = Equipment.objects.get(name=\"kettlebell\")\n url = reverse('equipment_detail', kwargs={'pk': kb.pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Equipment.objects.count(), initial_equipments - 1)",
"def test_unit_delete(self):\n pass",
"def test_delete(self):\n pass",
"def test_products_ref_users_user_delete(self):\n pass",
"def test_delete_item_using_delete(self):\n pass",
"def test_delete_peas_id_delete(self):\n pass",
"def test_unlike_product_does_not_exist(self):\n\n # Attempt to unlike a product that doesn't exist\n url = \"/products/12312312/like\"\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.delete(url, None, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_deletehardwares_item(self):\n pass",
"def test_delete_prevention(self):\r\n env = self.F.EnvironmentFactory.create()\r\n self.F.ProductVersionFactory.create(environments=[env])\r\n\r\n with self.assertRaises(self.model.ProtectedError):\r\n env.delete()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test ComBat feature harmonization. | def test_combat():
# Check if example data directory exists
example_data_dir = th.find_exampledatadir()
# Check if example data required exists
features = glob.glob(os.path.join(example_data_dir, 'examplefeatures_Patient*.hdf5'))
if len(features) < 7:
message = 'Too few example features for ComBat testing not found! ' +\
'Run the create_example_data script from the WORC exampledata ' +\
'directory!'
raise WORCValueError(message)
elif len(features) > 7:
message = 'Too many example features for ComBat testing not found! ' +\
'Run the create_example_data script from the WORC exampledata ' +\
'directory!'
raise WORCValueError(message)
objectlabels = os.path.join(example_data_dir, 'objectlabels.csv')
# Python
config = os.path.join(example_data_dir, 'ComBatConfig_python.ini')
features_train_out = [f.replace('examplefeatures_', 'examplefeatures_ComBat_python_') for f in features]
# First run synthetic test
# Synthetictest()
# # Run the Combat function: only for training
# ComBat(features_train_in=features,
# labels_train=objectlabels,
# config=config,
# features_train_out=features_train_out)
# # Run the Combat function: now for train + testing
ComBat(features_train_in=features[0:4],
labels_train=objectlabels,
config=config,
features_train_out=features_train_out[0:4],
features_test_in=features[4:],
labels_test=objectlabels,
features_test_out=features_train_out[4:])
# # Matlab
# config = os.path.join(example_data_dir, 'ComBatConfig_matlab.ini')
# features_train_out = [f.replace('examplefeatures_', 'examplefeatures_ComBat_matlab_') for f in features]
#
# # # Run the Combat function: only for training
# ComBat(features_train_in=features,
# labels_train=objectlabels,
# config=config,
# features_train_out=features_train_out)
#
# # Run the Combat function: now for train + testing
# ComBat(features_train_in=features[0:4],
# labels_train=objectlabels,
# config=config,
# features_train_out=features_train_out[0:4],
# features_test_in=features[4:],
# labels_test=objectlabels,
# features_test_out=features_train_out[4:])
# Remove the feature files
# for i in glob.glob(os.path.join(example_data_dir, '*features_ComBat*.hdf5')):
# os.remove(i) | [
"def test_single_feature_label():\n pass",
"def test_workbench_scenarios(self):\n result_title = 'Adaptive Numeric Input XBlock'\n basic_scenario = \"<adaptivenumericinput />\"\n test_result = self.xblock.workbench_scenarios()\n self.assertEquals(result_title, test_result[0][0])\n self.assertIn(basic_scenario, test_result[0][1])",
"def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()",
"def test_scenario_analysis(self):\n pass",
"def test_category_and_its_feature_dep(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo'], defaults=['bravo'])\n self.register_feature_class(\n 'foxtrot', Feature, requires=['alpha', 'bravo'])\n self.register_feature_category_class('echo', features=['foxtrot'])\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['foxtrot'])\n self.assertEqual(['bravo', 'foxtrot'], total_order)",
"def feature_cli():",
"def test_two_features(self):\n\n self.check_deckfile(\"tests/twofeatures.set\", 2)",
"def test_features(boston):\n assert boston.num_features == 13\n assert boston.feature_names == [\n \"CRIM\",\n \"ZN\",\n \"INDUS\",\n \"CHAS\",\n \"NOX\",\n \"RM\",\n \"AGE\",\n \"DIS\",\n \"RAD\",\n \"TAX\",\n \"PTRATIO\",\n \"B\",\n \"LSTAT\",\n ]",
"def init_features():\n TEST_FEATURES.append(feature_1)",
"def run_bash_with_features(self):\n self.bash_command += \" --use_feature_units\"\n self.run_bash()",
"def test_predictor():",
"def test_holms_test():\n holms_test = apply_holms_test(EXPERIMENT.results_, control_oversampler=None)\n assert set(holms_test.Classifier.unique()) == set(EXPERIMENT.classifiers_names_)\n assert len(holms_test) == len(CLASSIFIERS)",
"def test_features(self):\n assert list(parser.generate_commands(yaml.load(\n '- my_command: {name: my_name}'))) == [('my_command', {'name': 'my_name'})]",
"def test_change_algorithm(self):\n extloader = ExtensionLoader()\n pipeline = Pipeline(extloader.cats_container)\n pipeline.new_category('Preprocessing',1)\n self.assertTrue(pipeline.change_algorithm(1,'Blur'))",
"def test_text_classifier_vaporise(self):\n pass",
"def ConstrTest():\n with open(path.join(MAIN_PATH, TEST)) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n src, dest = line[1:]\n features = Features(src, dest)\n test_instances.append(features)",
"def test(): \n\t\treturn [\"vice.multizone\", \n\t\t\t[\n\t\t\t\tmig_matrix_row.test(run = False), \n\t\t\t\tmig_matrix.test(run = False), \n\t\t\t\tmig_specs.test(run = False), \n\t\t\t\tzone_array.test(run = False), \n\t\t\t\t_multizone.test(run = False), \n\t\t\t\tsrc_test(run = False) \n\t\t\t]\n\t\t]",
"def main():\n\n scenario = 2\n verbose = 3\n\n ### Generic for all scenario - Data Pre processing -\n ### Removal of ['neutrophil', 'serumLevelsOfWhiteBloodCell', 'lymphocytes'] due to the significant lack of information.\n data = PreProcess(\"./data.csv\", ['neutrophil', 'serumLevelsOfWhiteBloodCell', 'lymphocytes'])\n data.loadDataset()\n data.cleanDataAttributes()\n data.labelEncodings()\n\n if scenario == 1:\n scenario_1(data, verbose)\n elif scenario == 2:\n scenario_2(data, verbose)\n elif scenario == 3:\n scenario_3(data)\n elif scenario == 4:\n scenario_4(data)\n elif scenario == 5:\n scenario_5(data, verbose)\n elif scenario == 6:\n scenario_6(data)\n else:\n help(main)",
"def test_sensitivity_analysis(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true for all hostclasses which aren't tagged as nonZDD hostclasses | def is_deployable(self, hostclass):
return ((hostclass in self._hostclasses and
is_truthy(self._hostclasses[hostclass].get("deployable"))) or
hostclass not in self._hostclasses) | [
"def IsNoHost(self):\n if self.no_host:\n return True\n return any([node.no_host for node in self.GetAncestorGroups()])",
"def is_opaque(self, classobj):\n try:\n return self.instance_vars[classobj] == []\n except KeyError:\n return False",
"def has_ghosts(self):\n return not np.all(self.mesh.discretization.ghosts == 0)",
"def include_up_hosts(nmap_host):\n if nmap_host.status == 'up':\n return True\n return False",
"def test_no_unlisted_classes_derived_from_Target(self):\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n m = __import__('forcebalance.' + module)\n objects = dir(eval('m.' + module))\n for object in objects:\n object = eval('m.'+module+'.'+object)\n if type(object) == abc.ABCMeta:\n implemented = [i for i in forcebalance.objective.Implemented_Targets.itervalues()]\n # list of documented exceptions\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Moments']\n if object not in implemented and object.__name__ not in exclude:\n self.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % object.__name__)",
"def test_no_unlisted_classes_derived_from_Target(self):\n self.skipTest(\"Not sure if test is working properly.\")\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n print(module)\n if module == \"_dcdlib\": continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n print(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if type(obj) == abc.ABCMeta:\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Thermo',\n 'Hydration',\n 'Moments']\n print(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n self.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)",
"def any_public_tests(self):\n return any([not t.hidden for t in self.tests])",
"def test_no_unlisted_classes_derived_from_Target(self):\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n self.logger.debug(module)\n # Skip over smirnoff_hack because it is not intended to contain any Target implementations.\n if module in [\"_dcdlib\", \"smirnoff_hack\"]: continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n self.logger.debug(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if inspect.isclass(obj) and issubclass(obj, forcebalance.target.Target):\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Hessian',\n 'Thermo',\n 'Hydration',\n 'Moments', \n 'OptGeoTarget',\n 'TorsionProfileTarget']\n self.logger.debug(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n pytest.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)",
"def is_ncar_host():\n hostname = socket.getfqdn()\n \n return any([re.compile(ncar_host).search(hostname) \n for ncar_host in ['cheyenne', 'casper', 'hobart']])",
"def any_public_tests(test_cases):\n return any(not test.hidden for test in test_cases)",
"def _should_reject_unexamined(self, base_cls):\n result = (\n self.serialize_type(base_cls) not in self.classes_examined\n and base_cls.__module__ not in self.modules_examined\n and not qcore.inspection.is_cython_class(base_cls)\n )\n if not result:\n self.unexamined_base_classes.add(base_cls)\n return result",
"def has_nomo_class(self, elem):\n return self.nomo_class in self.get_classes(elem)",
"def _is_pod_host_networked(self, pod_uid):\n for pod in self.pod_list['items']:\n if pod.get('metadata', {}).get('uid', '') == pod_uid:\n return pod.get('spec', {}).get('hostNetwork', False)\n return False",
"def all_same_class(instances):\r\n class_labels = []\r\n for instance in instances:\r\n if instance[-1] not in class_labels:\r\n class_labels.append(instance[-1])\r\n \r\n if len(class_labels) == 1:\r\n return True\r\n else:\r\n return False",
"def is_ssh_up_on_all_instances(self, stack_id):\n instances = self.get_instance_public_ips(\n self.cfn.get_stack_instance_ids(stack_id))\n if not instances:\n return False\n if all([ssh.is_ssh_up(i) for i in instances]):\n return True\n return False",
"def is_virtual_network_host():\n return False",
"def is_builtin_dataclass(_cls: Type[Any]) -> bool:\n import dataclasses\n\n return not hasattr(_cls, '__processed__') and dataclasses.is_dataclass(_cls)",
"def is_process_class(node):\n if isinstance(node, ClassDef):\n for b in node.bases:\n if isinstance(b, Name) and b.id == KW_PROCESS_DEF:\n return True\n return False",
"def __isNXnode(self, node):\n if not hasattr(node, \"h5Class\"):\n return False\n class_ = node.h5Class\n if class_ is None or class_ != silx.io.utils.H5Type.GROUP:\n return False\n nxClass = node.obj.attrs.get(\"NX_class\", None)\n return nxClass is not None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the integration test for this hostclass, or None if none exists | def get_integration_test(self, hostclass):
return (hostclass in self._hostclasses and
self._hostclasses[hostclass].get("integration_test")) or None | [
"def current_test(self) -> IUTest:\n if self._running_test is not None and self._running_test < len(self._tests):\n return self._tests[self._running_test]\n return None",
"def get_test(self,test_id):\n for test in self.suite.get_tests():\n if test.id == test_id:\n return test\n return None",
"def GetHWTestSuite(self):\n hw_tests = self._run.config['hw_tests']\n if not hw_tests:\n # TODO(milleral): Add HWTests back to lumpy-chrome-perf.\n raise unittest.SkipTest('Missing HWTest for %s' % (self._bot_id,))\n\n return hw_tests[0]",
"def get_test(self, t_id: int) -> Optional[Tests]:\n try:\n test = self.session.query(Tests).get(t_id)\n return test\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get test: {excpt}')\n return None",
"def get_tester(task):\n assert task in TESTER_KITTI_FACTORY\n return TESTER_KITTI_FACTORY[task]",
"def get_existing_health_check_job(self):\n try:\n return TestJob.objects.filter((models.Q(actual_device=self) |\n models.Q(requested_device=self)),\n status__in=[TestJob.SUBMITTED,\n TestJob.RUNNING],\n health_check=True)[0]\n except IndexError:\n return None",
"def last_test(self) -> IUTest:\n if self._last_test is not None and self._last_test < len(self._tests):\n return self._tests[self._last_test]\n return None",
"def tester(self) -> IUTester:\n return self._tester",
"def get_suite(self):\n\t\treturn self.suite",
"def test(self):\n return self._test",
"def get_host(self):\n if not self.host_id:\n return None\n return self.system.hosts.get_by_id_lazy(self.host_id)",
"def get_suite(cls, suite_key: Callable) -> Union[\"JunitTestSuite\", None]:\n if cls.is_suite_exist(suite_key):\n return cls._junit_suites[suite_key]\n return None",
"def get_test_type(self):\n return self.test_type",
"def TestStepApi(self):\n return self.__TestStepApi",
"def get_test(self, index):\n\t\ttry:\n\t\t\treturn self._tests[index]\n\t\texcept IndexError:\n\t\t\traise(IndexError(f'list index out of range: there is no TestData at position {index}'))",
"def get_runner(cfg):\n\n if not cfg.run_ansible_test:\n return None\n\n if cfg.infra_osd:\n return OpenshiftJobTestRunner\n\n if cfg.ansible_test_local_image:\n return LocalImageTestRunner\n\n return LocalAnsibleTestRunner",
"def get_implementation(self):\n return self.__capabilities[\"IMPLEMENTATION\"]",
"def GetTesterName(self):\n callResult = self._Call(\"GetTesterName\", )\n\n if callResult is None:\n return None\n\n return callResult",
"def get_test_ID(self):\n return self.test_id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Promote AMI to specified stage. And, conditionally, make executable by production account if ami is staged as tested. | def _promote_ami(self, ami, stage):
prod_baker = self._disco_bake.option("prod_baker")
promote_conditions = [
stage == "tested",
prod_baker,
ami.tags.get("baker") == prod_baker,
]
try:
self._disco_bake.promote_ami(ami, stage)
if all(promote_conditions):
self._disco_bake.promote_ami_to_production(ami)
except:
logging.exception("promotion failed") | [
"def _execute_stage(self, stage):\n new_retcode = {\n 'init': self.__preconfigure,\n 'lock': self.__get_lock,\n 'configure': self.core.plugins_configure,\n 'prepare': self.core.plugins_prepare_test,\n 'start': self.core.plugins_start_test,\n 'poll': self.core.wait_for_finish,\n 'end': self.__end,\n 'postprocess': self.__postprocess,\n 'unlock': self.core.release_lock}[stage]()\n if new_retcode is not None:\n self.retcode = new_retcode",
"def stage(self, stage: osbuild.Stage):",
"def deploy():\n stage(branch='live', role='live')",
"def run_stage(self, stage):\n if self._active_prod is None:\n raise ValueError(\"No product has been activated!\")\n\n self.logger.info(f'Opening job for {self._stages[stage].name}'\n f' stage for {self._active_prod.name} product.')\n\n stage_conf = self._active_config.get(stage, {})\n stage_conf = Config(stage_conf)\n instrument = self._active_prod.instrument\n\n self.logger.debug('Freezing instrument and config.')\n stage_conf.freeze()\n instrument.freeze()\n\n # Execute!\n self._stages[stage]._call_pipeline(instrument, stage_conf)\n\n if self._stages[stage].status == 'error':\n self.logger.error(f'Stage {stage} failed.')\n elif self._stages[stage].status == 'done':\n self.logger.info(f'Stage {stage} done without problems.')\n\n self.logger.debug('Unfreezing instrument and config')\n stage_conf.unfreeze()\n instrument.unfreeze()\n self.logger.info(f'Closing job for {stage} stage.')",
"def stage(config, skip_verify, yaml):\n config.skip_verify = skip_verify\n if config.verbose:\n click.echo('Staging {}...'.format(yaml))\n if config.mason.parse_os_config(yaml):\n config.mason.stage(yaml)",
"def devpiserver_stage_created(stage):",
"def stage(ssh, pool, pnfsid):\n cmd = \"\\s \" + pool + \" rh restore \" + pnfsid\n result = execute_admin_command(ssh, cmd)",
"def _stage(self):\n\n local_source_path = join(BespokeGlobals.ABS_LOCAL_TOOLS,\n self._tool.install_properties['source_file'])\n\n self._remote_target_path = join(self._sut.bespoke_root,\n BespokeGlobals.TOOLS,\n self._tool.install_properties['source_file'])\n\n if isfile(local_source_path):\n self._staf_file_copy(local_source_path, self._remote_target_path)\n else:\n raise CoreError('Failed to stage tool \"{0}\" on remote machine! The file/directory '\n '\"{1}\" does not exist!'.format(self._tool.name, local_source_path))",
"def stage():\n _setup_env()\n\n if not 'stage' in _config:\n abort('Could not find \"stage\" in config file')\n\n # Make sure cdn exists\n exists(dirname(env.cdn_path), required=True)\n\n # Ask user for a new version\n _config['version'] = git.prompt_tag('Enter a new version number',\n unique=True)\n\n # Build version\n # use execute to allow for other implementations of 'build'\n execute('build')\n\n # Commit/push/tag\n with lcd(env.project_path):\n with settings(warn_only=True):\n local('git add build')\n # support builds where there's no change; sometimes comes up when\n # reusing a tag because of an unexpected problem\n with settings(warn_only=True):\n msg = local('git commit -m \"Release %(version)s\"' % _config,capture=True)\n if 'nothing to commit' in msg:\n warn(msg)\n warn('continuing anyway')\n elif not msg.startswith('[master'):\n abort(\"Unexpected result: %s\" % msg)\n local('git push')\n\n git.push_tag(_config['version'])\n\n # Copy to local CDN repository\n cdn_path = join(env.cdn_path, _config['version'])\n clean(cdn_path)\n\n for r in _config['stage']:\n static.copy(_config, [{\n \"src\": r['src'],\n \"dst\": cdn_path, \"regex\": r['regex']}])\n\n # Create zip file in local CDN repository\n _make_zip(join(cdn_path, '%(name)s.zip' % _config))",
"def run(cls, stage, gpath, upath):\n assert stage in (1, 2)\n if stage == 1:\n cls.run_stage1(gpath)\n else: # stage == 2:\n cls.run_stage2(gpath, upath)",
"def provision(vm='', env=''):\n local( main_dir + '/vagrant/bin/vm.sh provision ' + str(vm) + ' ' + str(env) )\n #result = local( main_dir + '/vagrant/bin/vm.sh provision ' + str(vm) + ' ' + str(env) )\n #if result != '0'\n # abort( \"Failed test - Aborting\")",
"def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance",
"def update_stage(conn, imobj):\n cur = conn.cursor()\n\n cur.execute('UPDATE image SET stage = %s WHERE id = %s',\n (imobj.stage, imobj.id))\n\n conn.commit()\n cur.close()",
"def install_sm_local_dependencies(framework, job_type, image, ec2_conn, ec2_instance_ami):\n python_invoker = get_python_invoker(ec2_instance_ami)\n # Install custom packages which need to be latest version\"\n # using virtualenv to avoid package conflicts with the current packages\n ec2_conn.run(f\"sudo apt-get install virtualenv -y \")\n ec2_conn.run(f\"virtualenv env --python {python_invoker}\")\n ec2_conn.run(f\"source ./env/bin/activate\")\n if framework == \"pytorch\":\n # The following distutils package conflict with test dependencies\n ec2_conn.run(\"sudo apt-get remove python3-scipy python3-yaml -y\")\n ec2_conn.run(f\"sudo {python_invoker} -m pip install -r requirements.txt \", warn=True)",
"def install_stage2(self):\n log.info('Installing stage2 (%s)' % self.stage2)\n self.copy(self.stage2, self.inner_stage2)",
"def stage_dev():\n _setup_env()\n\n if not 'stage' in _config:\n abort('Could not find \"stage\" in config file')\n\n # Make sure cdn exists\n exists(dirname(env.cdn_path), required=True)\n\n # Build version\n build()\n\n # Copy to local CDN repository\n cdn_path = join(env.cdn_path, 'dev')\n clean(cdn_path)\n\n for r in _config['stage']:\n static.copy(_config, [{\n \"src\": r['src'],\n \"dst\": cdn_path, \"regex\": r['regex']}])\n\n # Create zip file in local CDN repository\n _make_zip(join(cdn_path, '%(name)s.zip' % _config))",
"def create_ami_from_instance ( aws_account_type,\n ec2_conn,\n instance,\n ami_name,\n ami_description = None,\n wait_for_available = True ) :\n ami_id = instance.create_image( ami_name, ami_description )\n ami = aws_wait( ec2_conn.get_all_images, ami_id, [ ami_id ] )\n if not ami :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n if wait_for_available :\n ami_available = wait_on_object_state( ami, 'available' ,max_wait=3600)\n if not ami_available :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n # Allow other AWS accounts the ability to see this AMI.\n if aws_account_type == 'esp-nonprod' :\n priv_account_id = esp_prod[ 'accountid' ]\n else :\n priv_account_id = esp_nonprod[ 'accountid' ]\n\n ami.set_launch_permissions( user_ids = [ priv_account_id ] )\n\n return ami",
"def test_sfonly_new_image(self):\n stages = {'source finding' : True, 'source association' : False,\n 'catalog matching' : False}\n opts = {'save to database' : False, 'quality checks' : True,\n 'overwrite' : False, 'reprocess' : False,\n 'redo match' : False, 'update match' : False}\n # Pipeline should stop after source finding\n vdp.process(self.conn, stages, opts, self.dirs, self.files,\n self.catalogs, self.sfparams, self.qaparams)\n self.assertEqual(vdp.branch, 6)",
"def apply_pre_exec(\n *,\n context,\n params,\n ):\n\n # Create an archive of each source that Terraform can upload to S3\n for instance_current in instances:\n path_source = Path(terraform_dir, instance_current)\n path_staging = Path(terraform_dir, 'staging', instance_current)\n\n # Copy source into a staging directory\n shutil.rmtree(path=path_staging, ignore_errors=True)\n shutil.copytree(src=path_source, dst=path_staging)\n\n # Determine whether we need to update the buildspec.yml with environment variables\n if codebuild_environment_variables_factory != None and instance_current in codebuild_environment_variables_factory:\n # Obtain the variables we need to update in the buildspec.yml\n codebuild_environment_variables_current = codebuild_environment_variables_factory[instance_current](context=context)\n\n # Use a parsing object for roundtrip\n yaml_parser = ruamel.yaml.YAML()\n path_buildspec = Path(path_staging, 'buildspec.yml')\n\n # Update the buildspec to add provided environment variables\n with open(path_buildspec) as file_buildspec:\n yaml_buildspec = yaml_parser.load(file_buildspec)\n\n # Ensure the buildspec provides for environment variables\n if 'env' not in yaml_buildspec:\n yaml_buildspec['env'] = {}\n if 'variables' not in yaml_buildspec['env']:\n yaml_buildspec['env']['variables'] = {}\n\n # Add the variables\n for key_current, value_current in codebuild_environment_variables_current.items():\n yaml_buildspec['env']['variables'][key_current] = value_current\n\n # Replace the buildspec\n os.remove(path_buildspec)\n with open(path_buildspec, mode='w') as file_buildspec:\n yaml_parser.dump(yaml_buildspec, file_buildspec)\n\n # Make the archive\n shutil.make_archive(\n base_name=path_staging,\n format='zip',\n root_dir=path_staging\n )\n\n # Remove the staged source directory\n shutil.rmtree(\n path=path_staging,\n ignore_errors=True\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Line is correctlt split and missing/corrupetd fields are checked. | def test_read_line(self):
expected_data = ['\"lu, jr\"','ming-yuan','\"DRUG,1\"',135.999,True,3]
input_string = '001,\"LU, JR\",MING-YUAN,\"DRUG,1\",135.999\n'
data = read_line(input_string)
self.assertEqual(expected_data[0],data[0])
self.assertEqual(expected_data[1],data[1])
self.assertEqual(expected_data[2],data[2])
self.assertAlmostEqual(expected_data[3],data[3])
self.assertEqual(expected_data[4],data[4])
self.assertAlmostEqual(expected_data[5],data[5])
#Check for odd numers of quotation marks
input_string = '001,\"LU\",\"MING-YUAN,DRUG1,135\n'
data = read_line(input_string)
self.assertFalse(data[4])
#Check for missing fields
input_string = '001,,MING-YUAN,DRUG1,135\n'
data = read_line(input_string)
self.assertFalse(data[4])
input_string = '001,LU,MING-YUAN,DRUG1,\n'
data = read_line(input_string)
self.assertFalse(data[4])
#Check for corrupted fields
input_string = '001x,LU,MING-YUAN,DRUG1,135\n'
data = read_line(input_string)
self.assertFalse(data[4])
input_string = '001,LU,MING-YUAN,DRUG1,1ag5\n'
data = read_line(input_string)
self.assertFalse(data[4]) | [
"def check_record(idline,nclline,sepline,qualiline):\n return check_idline(idline) and check_sepline(sepline)",
"def check_separation(self, line):\n # Raising an error, in case the passed object is not a string or not the separation string\n if not isinstance(line, str):\n raise TypeError(\"The passed line is not even a string\")\n if not self.checkup_separation(line):\n raise ValueError(\"The passed line is not the separation string\")",
"def check_line(self):\n if not self.hosts and not self.line:\n self.msg(\"There is no line here. You can create one with +line/createline.\")\n return\n return True",
"def testBadLine(self):\n\n self.assertRaises(\n ValueError,\n tools._trackInfo,\n 'not a real line'\n )",
"def test_malformed_line():\n \n header_parser = get_header()\n # Missing position\n variant_line = \"1\\t.\\tA\\tT\\t100\\tPASS\\tMQ=1\\tGT:GQ\\t0/1:60\\t\"\\\n \"0/1:60\\t1/1:60\"\n \n with pytest.raises(SyntaxError):\n variant = format_variant(\n line = variant_line, \n header_parser=header_parser, \n check_info=True\n )",
"def split_line_robust(line):\n\n line_split0 = [x.rstrip('\\n') for x in line.split(' ') if x]\n line_split1 = [x.split('\\t') for x in line_split0 if x]\n line_split = []\n for l_one in line_split1:\n for l_two in l_one:\n if l_two: line_split.append(l_two)\n return(line_split)",
"def test_parse_no_fields(self):\n received = self._p.parse_line(self._line)\n expected = {}\n msg = 'Line parse with no fields should return None'\n self.assertDictEqual(received, expected, msg)",
"def check_line(self, line):\n line = line.rstrip('\\r\\n')\n try:\n line = line.decode('utf8')\n except:\n pass\n return self.rules['all'].validate(line)",
"def validate_line(self, line):\n line0 = line\n pos = 0\n while line:\n seg_m = self.ft.seg_regex.match(line)\n wsp_m = self.ws_punc_regex.match(line)\n if seg_m:\n length = len(seg_m.group(0))\n line = line[length:]\n pos += length\n elif wsp_m:\n length = len(wsp_m.group(0))\n line = line[length:]\n pos += length\n else:\n msg = 'IPA not valid at position {} in \"{}\".'.format(pos, line0.strip())\n # msg = msg.decode('utf-8')\n print(msg, file=sys.stderr)\n line = line[1:]\n pos += 1",
"def parseLine(self, line):\n try:\n self.parseFields(*line.split())\n except ValueError:\n raise InvalidInetdConfError('Invalid line: ' + repr(line))",
"def test_check_iod_line_regex_none():\n check_iod_line_regex()",
"def is_valid(line):\n\n if line['type'] == 'edge' or line['type'] == 'endova' or \\\n line['type'] == 'gable' or line['type'] == 'roof_fracture':\n\n if line['points'][0]['z'] is not None or line['points'][1]['z'] is not None:\n\n # Line has all the coordinates\n if line['points'][0]['z'] is not None and line['points'][1]['z'] is not None:\n return True\n\n # Line has both lengths\n if line['length_real'] is not None and line['length_plan'] is not None:\n return True\n\n # Line has an angle and at least one height\n if line['angle']:\n return True\n\n elif line['type'] == 'skate' or line['type'] == 'cornice':\n\n if line['points'][0]['z'] is not None or line['points'][1]['z'] is not None:\n return True\n\n\n\n return False",
"def _raise_if_not_line(self, l: float):\n # todo: check, if line exists -> if not, causes crash (raise exception before!)\n pass",
"def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None",
"def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info",
"def isLineData(self, line):\n\n if line is None or line.strip().startswith('#'):\n return False, None, 0\n\n dataType = self.getDataType()\n\n if dataType == 'Y':\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n newYValues = []\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n newYValues.append(yValue)\n except ValueError:\n pass\n\n return True, 'Y', len(newYValues)\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n elif dataType == 'XY':\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n else:\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n numberValues = 0\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n numberValues += 1\n except ValueError:\n pass\n\n return True, 'Y', numberValues\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n return False, None, 0",
"def test_invalid_split():\n split = CHAID.Split(None, None, None, 1, 0)\n assert split.invalid_reason == None\n assert split.column == None",
"def checkup_separation(self, line):\n # The first condition of the line being the separation is it being longer than the sep string alone\n if len(line) > len(self.separation):\n separation_length = len(self.separation)\n if line[:separation_length] == self.separation:\n return True\n # In case one of the conditions was not given, False is returned\n return False",
"def test_rest_of_line(self):\r\n self._check_dispatch('hello x , : ; sdf ,: ',\r\n 'x , : ; sdf ,: ')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unique drug list dict is correctly returned. | def test_get_unique_drug_list(self):
dict1 = self.test_dict
dict2 = get_unique_drug_list(self.test_sorted_tuple)
self.assertEqual(dict1, dict2) | [
"def _make_data_unique(collected_data: List[Dict]) -> List[Dict]:\n return list(dict(x) for x in set(tuple(x.items()) for x in collected_data))",
"def unique_drugs(self):\n if self.results is not None:\n return tuple(self.results['drug'].unique())",
"def get_drugs(drug_ind_lst, ind2drug):\n\tcur.execute(\"select drug_index, drug_id, drug_name, synonyms, drug_type, drug_group, smiles, pubchem_id, chembl_id, conditions, num_pros, pro_inds, num_paths, path_ids,path_names, go_function, go_process, go_component from \"+ tbl_drug_index + \" where drug_index in \"+drug_ind_lst+\";\")\n\trows = cur.fetchall()\n\tfor row in rows:\n\t\tind2drug[row[0]] = row[1:]\n\treturn ind2drug",
"def get_drug_data(self, drug_info):\n\n drug_dict = self.create_dict(drug_info)\n drug_key = drug_dict[\"drugName\"]\n values = {key: value for key, value in drug_dict.items() if key is not \"drugName\"}\n return drug_key, values",
"def getDrugData(self, moleculeChEMBLIdList):\n oD = {}\n chunkSize = 50\n try:\n for ii in range(0, len(moleculeChEMBLIdList), chunkSize):\n drug = new_client.drug # pylint: disable=no-member\n drug.set_format(\"json\")\n mDL = drug.filter(molecule_chembl_id__in=moleculeChEMBLIdList[ii : ii + chunkSize])\n if mDL:\n logger.info(\"mDL (%d)\", len(mDL))\n for mD in mDL:\n oD.setdefault(mD[\"molecule_chembl_id\"], []).append(mD)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return oD",
"def buildAltIdDict(self) :\n altIdDict = dict()\n for GOid in self.GO :\n real_id = GOid[\"id\"][0]\n alt_id = [real_id] + GOid.get(\"alt_id\", [])\n for each in alt_id :\n assert altIdDict.get(each, \"not_recorded\") == \"not_recorded\"\n altIdDict[each] = real_id\n self.altIdDict = altIdDict",
"def unique_val():\n try: \n data = [{\"V\":\"S001\"}, {\"V\": \"S002\"}, {\"VI\": \"S001\"}, {\"VI\": \"S005\"},\n {\"VII\":\"S005\"}, {\"V\":\"S009\"},{\"VIII\":\"S007\"}]\n print(\"Original List: \",data)\n #loop itrate here\n u_value = set( val for dic in data for val in dic.values())\n print(\"Unique Values: \",u_value) \n except ValueError as e:\n logger.error(\"Not find the dictnary\"+str(e))",
"def test_magicdictlist_dedupe():\n d1 = magicdictlist()\n\n d1['key1'].append('1 hello')\n d1['key1'].append('1 world')\n d1['key2'].append('2 hello')\n d1['key1'].append('1 world')\n\n d2 = d1.dedupe()\n assert len(d2) == 2\n assert len(d2['key1']) == 2\n assert len(d2['key2']) == 1\n assert set(d2['key1']) == set(['1 hello', '1 world'])\n assert d2['key2'] == ['2 hello']",
"def dictionize(self):\n dd = {}\n for index in range(0, len(self.__list), 2):\n key = self.__list[index]\n if index == len(self.__list) - 1:\n value = ' '\n else:\n value = self.__list[index + 1]\n dd[key] = value\n return dd",
"def _generate_additional_mutation_return_valies(self) -> Dict[str, any]:\r\n return dict()",
"def question_3(dogs):\n # Add your code here\n colours = COLOURS.keys()\n N = len(colours)\n ans = dict(zip(colours, [set() for _ in range(N)]))\n for d in dogs:\n ans[d.colour_id].add(d.name)\n return ans",
"def _create_dictionary_of_ned_d(\n self):\n self.log.debug(\n 'starting the ``_create_dictionary_of_ned_d`` method')\n\n count = 0\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n totalRows = sum(1 for row in csvReader)\n csvFile.close()\n totalCount = totalRows\n\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n theseKeys = []\n dictList = []\n for row in csvReader:\n if len(theseKeys) == 0:\n totalRows -= 1\n if \"Exclusion Code\" in row and \"Hubble const.\" in row:\n for i in row:\n if i == \"redshift (z)\":\n theseKeys.append(\"redshift\")\n elif i == \"Hubble const.\":\n theseKeys.append(\"hubble_const\")\n elif i == \"G\":\n theseKeys.append(\"galaxy_index_id\")\n elif i == \"err\":\n theseKeys.append(\"dist_mod_err\")\n elif i == \"D (Mpc)\":\n theseKeys.append(\"dist_mpc\")\n elif i == \"Date (Yr. - 1980)\":\n theseKeys.append(\"ref_date\")\n elif i == \"REFCODE\":\n theseKeys.append(\"ref\")\n elif i == \"Exclusion Code\":\n theseKeys.append(\"dist_in_ned_flag\")\n elif i == \"Adopted LMC modulus\":\n theseKeys.append(\"lmc_mod\")\n elif i == \"m-M\":\n theseKeys.append(\"dist_mod\")\n elif i == \"Notes\":\n theseKeys.append(\"notes\")\n elif i == \"SN ID\":\n theseKeys.append(\"dist_derived_from_sn\")\n elif i == \"method\":\n theseKeys.append(\"dist_method\")\n elif i == \"Galaxy ID\":\n theseKeys.append(\"primary_ned_id\")\n elif i == \"D\":\n theseKeys.append(\"dist_index_id\")\n else:\n theseKeys.append(i)\n continue\n\n if len(theseKeys):\n count += 1\n if count > 1:\n # Cursor up one line and clear line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n if count > totalCount:\n count = totalCount\n percent = (float(count) / float(totalCount)) * 100.\n print \"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory\" % locals()\n rowDict = {}\n for t, r in zip(theseKeys, row):\n rowDict[t] = r\n if t == \"ref_date\":\n try:\n rowDict[t] = int(r) + 1980\n except:\n rowDict[t] = None\n\n if rowDict[\"dist_index_id\"] != \"999999\":\n dictList.append(rowDict)\n\n csvFile.close()\n\n self.log.debug(\n 'completed the ``_create_dictionary_of_ned_d`` method')\n return dictList",
"def get_list(self):\n return self.mydict",
"def get_drugs_name_to_id_dict():\n\n query = \"\"\"SELECT drug.name as name, drug.id AS id\n FROM drug\n\n UNION ALL\n\n SELECT drug_synonym.syn as name, drug_synonym.drug as id\n FROM drug_synonym\"\"\"\n \n cnx = get_connection()\n cursor = cnx.cursor()\n\n cursor.execute(query)\n drugs_dict = {x[0].lower(): x[1] for x in cursor.fetchall()}\n\n cursor.close()\n close_connection(cnx)\n\n return drugs_dict",
"def unique(self):\n dict_util.unique(self)",
"def pop_dic():\n #global taxa, taxa_dic #global variables declaration\n for i in range(len(taxa)):\n taxa_dic[taxa[i][1]] = set()## set all keys and initiallise all values to empty sets\n for i in range(len(taxa)):\n taxa_dic[taxa[i][1]].add(taxa[i][0])## iterate the list and add values to the corresponding sets according to the kay names\n return 0",
"def __init__(self):\n self.dict = {}\n self.drugs = []",
"def _get_hash_list(self, key, uniq):\n arr = None\n try:\n arr = self._dict[key][uniq]\n finally:\n return arr",
"def item_duplicate():\n return {'name':'chair',\n 'value':300}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Total cost of each drug is correct. | def test_get_total_cost_each_drug(self):
list1 = self.test_total_cost_each_drug
list2 = get_total_cost_each_drug(self.test_sorted_tuple, self.test_dict)
self.assertEqual(list1, list2) | [
"def cost(self) -> float:",
"def total_cost(self):\n return sum(partial_solution.cost for partial_solution in self)",
"def totalpartcost(self):\n cost = 0.0\n for (element, myvendor, qty, price) in self.data:\n cost += qty * price\n return cost",
"def totalordercost(self, solution):\n (n, p) = solution.vitalstats()\n totalcost = n * self.costperorder + p\n return totalcost",
"def patrimony_total(self):\n pass",
"def cost(self,solution,problem):\n return 0",
"def calculate_total_cost(state):\n pass",
"def calculate_total_cost(state):\r\n return state.cost()",
"def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))",
"def total_cost(self):\n return self._total_cost",
"def calculate_total_cost(state):\n return state.cost",
"def cost_of_solution(dragon_knight_assignments,knight_heights): \n return sum([knight_heights[i] for i in dragon_knight_assignments])",
"def calcul_du_total(self):\r\n \r\n df_donnees = self._donnees\r\n self._total = df_donnees['Montant'].sum()",
"def get_overall_cost():\n items = Item.objects.all()\n total_cost = 0 #total cost of an item\n for item in items:\n total_cost += item.price\n return total_cost",
"def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs",
"def total_cost(self):\n if self.goal:\n return self.goal + (self.community_contribution or 0)\n else:\n return 0",
"def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value",
"def calculate_total_cost(state):\n \n return state.cost + calculate_manhattan_dist_block(state)",
"def calculate_total_cost(state):\n ### STUDENT CODE GOES HERE ###\n total_manhattan_distance = 0\n for i in range(len(state.config)):\n if state.config[i] != 0:\n sum = calculate_manhattan_dist( i, state.config[i], state.n )\n total_manhattan_distance += sum\n return total_manhattan_distance + state.cost"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if a string is a permutation of a palindrome by populating a map and counting the occurrences of letters. O(N) | def is_palindrome_permutation(string):
letter_to_count = dict()
for letter in string:
letter_to_count[letter] = letter_to_count.get(letter, 0) + 1
residual = 0
for count in letter_to_count.values():
residual += count % 2
# there are can be a single letter with an odd character count when the palindrome is of odd length
return residual <= 1 | [
"def check_palindrome_permutation(string: str) -> bool:\n\n string = string.lower()\n\n char_counter = defaultdict(int)\n for char in string:\n count = char_counter[char]\n if count:\n char_counter[char] -= 1\n else:\n char_counter[char] += 1\n\n return sum(char_counter.values()) <= 1",
"def palindrom_permutation(string: str):\n string = re.sub(r'\\W+', '', string.lower())\n\n chars = dict()\n for c in string:\n chars[c] = chars[c] + 1 if c in chars else 1\n\n almost_not_okey = False\n for val in chars.values():\n if val % 2 == 1:\n if not almost_not_okey:\n almost_not_okey = True\n else:\n return False\n\n if almost_not_okey:\n return len(string) % 2 == 1\n return True",
"def solution1(string):\n if not string:\n return True\n\n string = string.lower()\n # Palindrome based on count\n h_map = {}\n # get character count\n for c in string:\n if not c.isalnum():\n continue\n if c in h_map:\n h_map[c]+=1\n if h_map[c]>2:\n h_map[c]=1\n else:\n h_map[c]=1\n # check palindrome based on the count\n is_first = True\n for k in h_map.keys():\n if h_map[k]==1:\n if is_first:\n is_first=False\n continue\n else:\n return False\n return True",
"def permutation(string):\n i = 0\n j = len(string) - 1\n while i < j:\n if string[i] != string[j]:\n return False\n i += 1\n j -= 1\n return True",
"def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1",
"def find_permutations(stringa, stringb):\n \n dict_x = {}\n for char in stringa:\n if char not in dict_x.keys():\n dict_x[char] = 1\n else:\n dict_x[char] += 1\n \n for char in stringb:\n if char not in dict_x.keys():\n return \"not permutation\"\n else:\n dict_x[char] -= 1\n if dict_x[char] < 0:\n return \"not permutation\"\n \n return \"permutation\"",
"def is_permutation_palindrome(str):\n for s in permutations(str): # loop through all permutations of str\n if is_palindrome(s):\n return True # successfully find a palindrome permutation\n return False # reach this, then no possible permutation is palindrome",
"def ispseduoPalindrom(string):\n c_string = Counter(string)\n odds = sum([v % 2 for v in c_string.values()])\n return odds < 2",
"def palindrome_permutation(w):\n w = w.strip().replace(' ', '')\n chars = {}\n for c in w:\n try:\n chars[c] += 1\n except KeyError:\n chars[c] = 1\n\n if len(w) % 2 == 0:\n #Check if there is an even number\n #of every character in w.\n return all(x % 2 == 0 for x in chars.values()) \n else:\n #Check if there is an even number\n #of every character in w,\n #except for exactly one character.\n found_odd = False\n for c in chars:\n if chars[c] % 1 == 0:\n if not found_odd:\n found_odd = True\n else:\n return False\n \n if found_odd:\n return True\n else:\n return False",
"def is_anagram_of_palindrome(word):\n\n seen = {}\n\n # Count each letter\n\n for letter in word:\n seen[letter] = seen.get(letter, 0) + 1\n\n seen_an_odd = 0\n\n for count in seen.values():\n if count % 2 != 0:\n seen_an_odd += 1\n\n if seen_an_odd > 1:\n return False\n\n return True",
"def can_form_palindrome(s):\n n_odd = 0\n for count in collections.Counter(s).values():\n if count % 2 == 1:\n n_odd += 1\n return n_odd <= 1",
"def perm(string1, string2):\n if len(string2) != len(string1):\n return False\n\n char_set1 = dict()\n char_set2 = dict()\n\n for char in string1:\n if char in char_set1:\n char_set1[char] += 1\n else:\n char_set1[char] = 1\n\n for char in string2:\n if char in char_set2:\n char_set2[char] += 1\n else:\n char_set2[char] = 1\n\n return char_set1 == char_set2",
"def is_anagram_of_palindrome(word):\n\n counts = {}\n num_of_odd_occurences = 0\n\n for char in word:\n counts[char] = counts.get(char, 0) + 1\n for val in counts.values():\n if val % 2 != 0:\n num_of_odd_occurences += 1\n\n return num_of_odd_occurences <= 1",
"def is_perm_of_pal(str):\n\n odds = set()\n\n for letter in str:\n if letter in odds:\n odds.remove(letter)\n elif letter != \" \":\n odds.add(letter)\n\n\n return len(odds) <= 1",
"def is_permutation(str1, str2):\n\n chars = dict()\n\n def check_chars(ch, can_add, word_index):\n \"\"\"\n\n :param ch: the character we're looking for\n :param can_add: boolean which states if we can add more items to the dict\n :param word_index: int to identify the word\n :return: void\n \"\"\"\n if ch not in chars and can_add:\n chars[ch] = [False, word_index]\n else:\n chars[ch] = [True, word_index]\n\n n1 = len(str1)\n n2 = len(str2)\n for i in range(0, max(n1, n2)):\n if i < n1:\n check_chars(str1[i], i < n1, 1)\n if i < n2:\n check_chars(str2[i], i < n2, 2)\n\n word = None\n for ch in chars:\n if not chars[ch][0]:\n if word is None:\n word = chars[ch][1]\n elif word is not chars[ch][1]:\n return False\n return True",
"def checkPalindrome_1(string, k):\n if not is_valid_palindrome_input(string):\n raise PalindromeInputError\n\n # special case: no characters need to be removed to form a palindrome\n if is_palindrome(string):\n return True\n\n # try every possible permutation of string and see if any is a palindrome\n for substring_length in range(1, k + 1): # convert to 1-based\n for slice_start_index in range(len(string)):\n slice_end_index = slice_start_index + substring_length\n test_string = string[0:slice_start_index] + string[slice_end_index:None]\n if is_palindrome(test_string):\n return True\n\n # no palindrome found in above check\n return False",
"def palindromeIterative(string):\n string = string.lower()\n length = len(string)\n number = 0\n for i in range(length):\n for j in range(i+1, length+1):\n tmp = string[i:j]\n if tmp == tmp[::-1] and len(tmp) > 1:\n number += 1\n return number",
"def is_anagram_of_palindrome(anagram):\n\tif len(anagram) == 0:\n\t\treturn True\n\n\tcounter = {}\n\n\tfor letter in anagram:\n\t\tif letter not in counter:\n\t\t\tcounter[letter] = 1\n\t\telse:\n\t\t\tcounter[letter] += 1\n\n\todd = 0\n\tfor letter in counter:\n\t\tif counter[letter] % 2 != 0:\n\t\t\todd += 1\n\n\tif odd <= 1:\n\t\treturn True\n\n\treturn False",
"def is_permutation(string_1, string_2):\n return sorted(string_1) == sorted(string_2)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the cycle consistenty loss. L_cyc = lamA [Expectation of L1_norm(F(G(A)) A)] + lamb [Expectation of L1_norm(G(F(B)) B)] | def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \
self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB))
return loss | [
"def directed_cycle_score(A):\n\n # Implement your cycle score given Problem 4 Part 2\n temp_matrix = np.zeros(A.shape)\n alpha = 0.05\n k = 0\n summation_term = 999999\n num_terms = A.shape[0]\n # while change < 0.05:\n for i in range(num_terms):\n summation_term = (1 / np.math.factorial(k)) * expm(A)\n temp_matrix += summation_term\n\n cycle_score = np.trace(temp_matrix) - (A.shape[0] * num_terms)\n return cycle_score",
"def backward_G(self):\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # Identity loss\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)\n # GAN loss D_B(G_B(B))\n self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_A + self.loss_G_B + \\\n self.loss_cycle_A + self.loss_cycle_B + \\\n self.loss_idt_A + self.loss_idt_B\n\n if self.opt.online_distillation or self.opt.normal_distillation:\n\n self.Tfake_A = self.teacher_model.fake_A\n self.Tfake_B = self.teacher_model.fake_B\n current_distillation_Afeatures = self.get_distillation_features(AorB='A')\n current_distillation_Bfeatures = self.get_distillation_features(AorB='B')\n\n self.teacher_model.netD_A(self.fake_B.detach()) # output teacher discriminator feature for netG_A\n self.teacher_model.netD_B(self.fake_A.detach()) # output teacher discriminator feature for netG_B\n teacher_discriminator_Afeatures = list(self.teacher_model.total_netD_A_features.values())\n current_distillation_Afeatures[-len(self.discriminator_extract_layers):] = teacher_discriminator_Afeatures[:]\n teacher_discriminator_Bfeatures = list(self.teacher_model.total_netD_B_features.values())\n current_distillation_Bfeatures[-len(self.discriminator_extract_layers):] = teacher_discriminator_Bfeatures[:]\n\n self.loss_content_A = 0.0\n self.loss_gram_A = 0.0\n self.loss_content_B = 0.0\n self.loss_gram_B = 0.0\n self.loss_L1_A = 0.0\n self.loss_L1_B = 0.0\n for i, feature in enumerate(current_distillation_Afeatures):\n\n if i < 4: # generator feature should transform channel dimension consistent with teacher\n feature = self.transform_A_convs[i](feature)\n target_feature = self.target_distillation_A_features[i]\n # import pdb\n # pdb.set_trace()\n # print(feature.size(), target_feature.size())\n self.loss_gram_A += self.criterionMSE(self.gram(feature), self.gram(target_feature.detach()))\n self.loss_content_A += self.criterionMSE(feature, target_feature.detach())\n self.loss_L1_A += self.criterionL1(self.fake_B, self.Tfake_B.detach())\n for i, feature in enumerate(current_distillation_Bfeatures):\n\n if i < 4: # generator feature should transform channel dimension consistent with teacher\n feature = self.transform_B_convs[i](feature)\n target_feature = self.target_distillation_B_features[i]\n self.loss_gram_B += self.criterionMSE(self.gram(feature), self.gram(target_feature.detach()))\n self.loss_content_B += self.criterionMSE(feature, target_feature.detach())\n self.loss_L1_B += self.criterionL1(self.fake_A, self.Tfake_A.detach())\n\n self.loss_gram_A = self.opt.lambda_gram * self.loss_gram_A\n self.loss_content_A = self.opt.lambda_content * self.loss_content_A\n self.loss_L1_A = self.opt.lambda_L1 * self.loss_L1_A\n self.loss_gram_B = self.opt.lambda_gram * self.loss_gram_B\n self.loss_content_B = self.opt.lambda_content * self.loss_content_B\n self.loss_L1_B = self.opt.lambda_L1 * self.loss_L1_B\n self.loss_G += self.loss_gram_A + self.loss_gram_B\n self.loss_G += self.loss_content_A + self.loss_content_B\n self.loss_G += self.loss_L1_A + self.loss_L1_B\n\n self.loss_G.backward()\n\n self.L1_sparsity()",
"def compute_ab_cycles(c_cycles, linear_combinations, g, tretkoff_graph):\n lincomb = linear_combinations\n M,N = lincomb.shape\n\n a_cycles = []\n b_cycles = []\n\n for i in range(g):\n a = []\n b = []\n for j in range(N):\n cij = lincomb[i,j]\n c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j])\n a.extend(abs(cij)*c[:-1])\n\n cij = lincomb[i+g,j]\n c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j])\n b.extend(abs(cij)*c[:-1])\n\n a = a + [0]\n b = b + [0]\n a = compress_cycle(a, tretkoff_graph)\n b = compress_cycle(b, tretkoff_graph)\n\n a_cycles.append(a)\n b_cycles.append(b)\n\n return a_cycles, b_cycles",
"def backward_G(self):\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # lambda_shading_A = self.opt.lambda_shading_A\n # lambda_shading_B = self.opt.lambda_shading_B\n # Identity loss\n if lambda_idt > 0:\n # G_A should be identity if real_B is fed: ||G_A(B) - B||\n self.idt_A = self.netG_A(self.real_B)\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n # G_B should be identity if real_A is fed: ||G_B(A) - A||\n self.idt_B = self.netG_B(self.real_A)\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt_A = 0\n self.loss_idt_B = 0\n\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)\n # GAN loss D_B(G_B(B))\n self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B\n self.loss_G.backward()",
"def backward_G(self):\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # Identity loss\n if lambda_idt > 0:\n # G_A should be identity if real_B is fed: ||G_A(B) - B||\n self.idt_A = self.netG_A(self.real_B)\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n # G_B should be identity if real_A is fed: ||G_B(A) - A||\n self.idt_B = self.netG_B(self.real_A)\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt_A = 0\n self.loss_idt_B = 0\n\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)\n # GAN loss D_B(G_B(B))\n self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n\n\n # ่ฎก็ฎๅๅฒๆๅคฑ๏ผๆฅไฝฟGๅSeg็ฝ็ป๏ผไฝฟๅๅฒๆญฃ็กฎ\n self.CrossEntropyLoss = self.criterionSeg(self.pre, self.label.long().squeeze(1))\n self.L2_loss = self.L2_loss_net(self.netSeg)\n self.loss_Seg = self.CrossEntropyLoss + self.L2_loss\n\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B + self.loss_Seg\n self.loss_G.backward()",
"def test_cfu_cycles(self):\n # Input: (function, in0, in1, cmd_valid, rsp_ready)\n # Output: (result, rsp_valid, cmd_ready)\n X = None\n DATA = [\n # Nothing\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Same cycle instruction, CPU not ready\n ((0, 1, 2, 1, 0), (3, 1, 1)),\n ((0, 0, 0, 0, 1), (3, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Multi-cycle instruction, CPU ready\n ((3, 3, 0, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (6, 1, 0)),\n # Same cycle instruction, CPU ready\n ((0, 5, 3, 1, 1), (8, 1, 1)),\n # Multi-cycle instruction, CPU not ready\n ((3, 2, 0, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 0, 0)),\n ((0, 0, 0, 0, 0), (2, 1, 0)),\n ((0, 0, 0, 0, 1), (2, 1, 0)),\n # Multi-cycle instruction, but always ready next cycle\n ((4, 3, 5, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (8, 1, 0)),\n # CPU not ready\n ((4, 3, 4, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 1), (7, 1, 0)),\n # Fallback instruction - same cycle, CPU ready\n ((7, 0, 0, 1, 1), (X, 1, 1)),\n ]\n\n def process():\n for n, (inputs, expected_outputs) in enumerate(DATA):\n func, i0, i1, cmd_valid, rsp_ready = inputs\n exp_result, exp_rsp_valid, exp_cmd_ready = expected_outputs\n yield self.dut.cmd_function_id.eq(func)\n yield self.dut.cmd_in0.eq(i0)\n yield self.dut.cmd_in1.eq(i1)\n yield self.dut.cmd_valid.eq(cmd_valid)\n yield self.dut.rsp_ready.eq(rsp_ready)\n yield Delay(0.1)\n if exp_result is not None:\n self.assertEqual((yield self.dut.rsp_out), exp_result)\n if exp_rsp_valid is not None:\n self.assertEqual((yield self.dut.rsp_valid), exp_rsp_valid)\n # We don't currently support returning non-OK responses, so\n # if our response is valid, it must be OK.\n if exp_rsp_valid:\n self.assertTrue((yield self.dut.rsp_ok))\n if exp_cmd_ready is not None:\n self.assertEqual((yield self.dut.cmd_ready), exp_cmd_ready)\n yield\n self.run_sim(process, False)",
"def test_lcl_convergence():\n with pytest.raises(RuntimeError):\n lcl(1000. * units.mbar, 30. * units.degC, 20. * units.degC, max_iters=2)",
"def mg_cycle(\n x: Tiles,\n b: Tiles,\n prs: multigrid_utils.ProlongRestrictMatrices,\n homogeneous_smoother_fn: Callable[[Tiles, Tiles], Tiles],\n residual_fn: Callable[[Tiles, Tiles], Tiles],\n a_inv_for_coarsest_level: Optional[np.ndarray] = None,\n n_coarse: int = 1,\n replica_id: Union[int, tf.Tensor] = 0,\n replicas: Optional[np.ndarray] = None,\n coordinates: Optional[TensorOrArray] = None,\n num_cycles: int = 1) -> Tiles:\n\n if (replicas is None) or (np.prod(replicas.shape) == 1):\n # Single core.\n dirichlet0_halo_exchange_fn = multigrid_3d_utils.zero_borders\n else:\n dirichlet0_halo_exchange_fn = multigrid_utils.halo_exchange_step_fn(\n replica_id, replicas, halo_exchange_utils.homogeneous_bcs())\n\n def body(i, x):\n x = _mg_cycle_internal(\n x, b, prs, homogeneous_smoother_fn, residual_fn,\n dirichlet0_halo_exchange_fn, a_inv_for_coarsest_level, n_coarse,\n replica_id, replicas, coordinates)\n return i + 1, x\n\n cond = lambda i, _: i < num_cycles\n\n i = 0\n if isinstance(x[0], tf.Tensor):\n _, x = tf.while_loop(\n cond=cond, body=body, loop_vars=[i, x], back_prop=False)\n else:\n while cond(i, x):\n i, x = body(i, x)\n\n return x",
"def cl_alm2d(alm1=None, alm2=None, lmax=100):\n if alm2 is None:\n alm2 = alm1\n cl = np.zeros(lmax+1)\n ls = np.arange(lmax+1)\n for l in ls:\n ms = np.arange(-l,l+1)\n \n cl[l] += ((alm1[l][ms]*np.conjugate(alm2[l][ms])).real).sum()/(2.*l+1.)\n return cl",
"def cycle_loss(self, origin, transformed):\n return self.l1_loss(origin, transformed)",
"def test_reverse_mode_lambda(self):\n\n np.random.seed(4321)\n\n\n N = 5\n B = 5\n A = 0\n T = 0\n D = 3\n\n x0 = np.random.rand(N,D).astype(dtype=np.float64)*2\n\n precision = np.float64\n \n (bond_params, ref_bond), test_bond = prepare_bonded_system(\n x0,\n B,\n A,\n T,\n precision\n )\n\n (restr_params, ref_restr), test_restr = prepare_restraints(\n x0,\n B,\n precision\n )\n\n E = 2\n\n lambda_plane_idxs = np.random.randint(low=0, high=2, size=N, dtype=np.int32)\n lambda_offset_idxs = np.random.randint(low=0, high=2, size=N, dtype=np.int32)\n\n (charge_params, lj_params), ref_nb_fn, test_nb_ctor = prepare_nonbonded_system(\n x0,\n E,\n lambda_plane_idxs,\n lambda_offset_idxs,\n p_scale=10.0,\n cutoff=1000.0,\n precision=precision \n )\n\n test_nb = test_nb_ctor()\n\n masses = np.random.rand(N)\n\n v0 = np.random.rand(x0.shape[0], x0.shape[1])\n N = len(masses)\n\n num_steps = 5\n lambda_schedule = np.random.rand(num_steps)\n cas = np.random.rand(num_steps)\n cbs = np.random.rand(len(masses))/10\n ccs = np.zeros_like(cbs)\n\n step_sizes = np.random.rand(num_steps)\n\n def loss_fn(du_dls):\n return jnp.sum(du_dls*du_dls)/du_dls.shape[0]\n\n def sum_loss_fn(du_dls):\n du_dls = np.sum(du_dls, axis=0)\n return jnp.sum(du_dls*du_dls)/du_dls.shape[0] \n\n def integrate_once_through(\n x_t,\n v_t,\n bond_params,\n restr_params,\n charge_params,\n lj_params):\n\n ref_bond_impl = functools.partial(ref_bond, params=bond_params)\n ref_restr_impl = functools.partial(ref_restr, params=restr_params)\n ref_nb_impl = functools.partial(ref_nb_fn, charge_params=charge_params, lj_params=lj_params)\n\n def ref_total_nrg_fn(*args):\n nrgs = []\n for fn in [ref_bond_impl, ref_restr_impl, ref_nb_impl]:\n nrgs.append(fn(*args))\n return jnp.sum(nrgs)\n\n dU_dx_fn = jax.grad(ref_total_nrg_fn, argnums=(0,))\n dU_dl_fn = jax.grad(ref_total_nrg_fn, argnums=(1,))\n\n all_du_dls = []\n for step in range(num_steps):\n lamb = lambda_schedule[step]\n du_dl = dU_dl_fn(x_t, lamb)[0]\n all_du_dls.append(du_dl)\n dt = step_sizes[step]\n cb_tmp = np.expand_dims(cbs, axis=-1) \n v_t = cas[step]*v_t + cb_tmp*dU_dx_fn(x_t, lamb)[0]\n x_t = x_t + v_t*dt\n # note that we do not calculate the du_dl of the last frame.\n\n all_du_dls = jnp.stack(all_du_dls)\n return loss_fn(all_du_dls)\n\n # when we have multiple parameters, we need to set this up correctly\n ref_loss = integrate_once_through(\n x0,\n v0,\n bond_params,\n restr_params,\n charge_params,\n lj_params\n )\n\n grad_fn = jax.grad(integrate_once_through, argnums=(2, 3))\n ref_dl_dp_bond, ref_dl_dp_restr = grad_fn(\n x0,\n v0,\n bond_params,\n restr_params,\n charge_params,\n lj_params\n )\n\n stepper = custom_ops.AlchemicalStepper_f64(\n [test_bond, test_restr, test_nb],\n lambda_schedule\n )\n\n seed = 1234\n\n ctxt = custom_ops.ReversibleContext_f64(\n stepper,\n x0,\n v0,\n cas,\n cbs,\n ccs,\n step_sizes,\n seed\n )\n\n # run 5 steps forward\n ctxt.forward_mode()\n test_du_dls = stepper.get_du_dl()\n test_loss = sum_loss_fn(test_du_dls)\n loss_grad_fn = jax.grad(sum_loss_fn, argnums=(0,))\n du_dl_adjoint = loss_grad_fn(test_du_dls)[0]\n\n # limit of precision is due to the settings in fixed_point.hpp\n # np.testing.assert_almost_equal(test_loss, ref_loss, decimal=7)\n np.testing.assert_allclose(test_loss, ref_loss, rtol=1e-6)\n stepper.set_du_dl_adjoint(du_dl_adjoint)\n ctxt.set_x_t_adjoint(np.zeros_like(x0))\n ctxt.backward_mode()\n\n test_dl_dp = test_bond.get_du_dp_tangents()\n np.testing.assert_allclose(test_dl_dp, ref_dl_dp_bond, rtol=1e-6)\n\n test_dl_dp = test_restr.get_du_dp_tangents()\n np.testing.assert_allclose(test_dl_dp, ref_dl_dp_restr, rtol=1e-6)",
"def scg(a, b, M, reg1, reg2, reg3, beta, f1, f2, f3, df1, df2, df3, j_dist, G0=None, numItermax=10, numInnerItermax=50, \n stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False,amijo=True, C1=None, C2=None, constC=None):\n\n loop = 1\n\n if log:\n log = {'loss': []}\n\n if G0 is None:\n G = np.outer(a, b)\n else:\n G = G0\n\n def cost(G):\n return np.sum(M * G) + reg2 * f2(G) + reg3 * f3(G) + beta * np.linalg.norm(G,'fro')**2 + reg1 * f1(G) - 1e-4 * (np.sum(G * np.log(G)) - np.sum(G * np.log(j_dist)))\n\n f_val = cost(G)\n if log:\n log['loss'].append(f_val)\n\n it = 0\n\n if verbose:\n print('{:5s}|{:12s}|{:8s}|{:8s}'.format(\n 'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\\n' + '-' * 48)\n print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, f_val, 0, 0))\n\n while loop:\n\n it += 1\n old_fval = f_val\n\n # problem linearization\n Mi = M + reg1 * df1(G)\n # set M positive\n Mi += Mi.min()\n\n # solve linear program with Sinkhorn-knopp\n # MUTAG, PTC-MR, COX2 AND BZR -> 0.5\n # ENZYMES AND PROTEINS -> 0.9\n Gc = sinkhorn(a, b, Mi, 0.5, method='sinkhorn', numItermax=numInnerItermax)\n\n deltaG = Gc - G\n\n # line search\n dcost = Mi + reg2 * df2(G) + reg3 * df3(G) + beta * G - 1e-4 * (1 + np.log(G) - np.log(j_dist))\n # set dcost positive\n dcost += dcost.min()\n alpha, fc, f_val = line_search_armijo(cost, G, deltaG, dcost, f_val)\n\n if alpha is None:\n print(it)\n if alpha is None or np.isnan(alpha) :\n raise NonConvergenceError('Alpha is not converged')\n else:\n G = G + alpha * deltaG\n\n # test convergence\n if it >= numItermax:\n loop = 0\n\n abs_delta_fval = abs(f_val - old_fval)\n \n # computing suboptimality gap by Frobenius inner product\n #delta_i = np.multiply(dcost.T, (G - Gc)).sum()\n delta_i = np.trace(dcost.T @ (G - Gc))\n\n if delta_i <= stopThr or abs_delta_fval <= stopThr2:\n loop = 0\n\n if log:\n log['loss'].append(f_val)\n\n if verbose:\n if it % 20 == 0:\n print('{:5s}|{:12s}|{:8s}|{:8s}'.format(\n 'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\\n' + '-' * 48)\n print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, f_val, relative_delta_fval, abs_delta_fval))\n\n if log:\n return G, log\n else:\n return G",
"def cLCG(G):\n \n gens = []\n \n for g in G:\n gens.append(LCG(*g))\n \n m0 = G[0][3]-1\n \n while True:\n yield sum([(-1**j)*next(g) for j,g in enumerate(gens)]) % m0",
"def solve_directly(self, cycle='V'):\n g = self.grid[0]\n\n # No need to copy x and b, since they have already been assigned before\n\n g.residual()\n\n normb = g.norm(which='b')\n if self.verbose:\n print('||b|| = %g' % normb)\n\n if normb > 0:\n res0 = g.norm()/normb\n else:\n self.stats = {'normb': 0, 'res': [0], 'blowup': False}\n return 0, 0.\n\n res = res0\n reslist = [res]\n nite = 0\n nite_diverge = 0\n # improve the solution until one of this condition is wrong\n ok = True\n blowup = False\n if normb > 1e6:\n ok = False\n blowup = True\n\n while (nite < self.maxite) and (res0 > self.tol) and ok:\n if cycle == 'V':\n self.Vcycle(0)\n\n elif cycle == 'F':\n self.Fcycle()\n\n else:\n raise ValueError('use cycle V or F')\n\n g.residual()\n\n res = g.norm() / normb\n conv = res0 / res\n\n res0 = res\n reslist += [res]\n nite += 1\n if self.verbose:\n template = ' ite = {} / res = {:.2e} / conv = {:8.4f}'\n print(template.format(nite, res, conv))\n\n if (conv < 1):\n nite_diverge += 1\n\n if (nite_diverge > 4):\n ok = False\n print('solver is not converging')\n print('Abort!')\n blowup = True\n #raise ValueError('solver is not converging')\n\n # No need to copy x back to an external variable\n\n # store the statistics\n self.stats = {'normb': normb, 'res': reslist, 'blowup': blowup}\n\n return nite, res",
"def test_change_quantification():\n # Run loop change detection between matrices with and without loops\n cools = COOLS + COOLS_COMP\n conds = [\"B\"] * len(COOLS) + [\"S\"] * len(COOLS_COMP)\n obs_pos = pah.change_detection_pipeline(\n cools, conds, bed2d_file=str(DATA / \"B_loops.bed2d\"), subsample=False,\n )\n diff = obs_pos.diff_score\n # Check if change was detected in the correct direction (disappearing)\n # some positions\n assert len(diff[diff < 0]) >= len(diff) * 0.3",
"def test_born_gradcheck_2d_cfl():\n run_born_gradcheck_2d(propagator=scalarbornprop,\n dt=0.002,\n atol=2e-7,\n rtol=1e-8,\n nt_add=100)",
"def c_cycles(self):\n return self._c_cycles, self._linear_combinations",
"def cycle_closure(G, steps, verbose=False):\n bad = []\n\n for i in nx.simple_cycles(G):\n if len(i) == steps:\n path = pathway(i)\n total = 0\n errors = []\n for step in path:\n total += get_attr(G,step[0], step[1])\n errors.append(get_attr(G,step[0], step[1], 'ddg'))\n total_error = combine_errors(errors)\n\n if total > total_error:\n total = np.round(total, 1)\n total_error = np.round(total_error, 1)\n print(f\"Cycle {i} does not close\")\n print(f\"Closure: {total}\")\n print(f\"Cycle error: {total_error}\")\n bad.append(i)\n elif verbose:\n # print anyway\n print(f\"Cycle {i}\")\n print(f\"Closure: {total}\")\n print(f\"Cycle error: {total_error}\")\n return bad",
"def bound_sum_iterations_L_on_the_difference(self,dynamic,l,err_C0):\n M = self.bound_on_norms_of_powers(dynamic, project_left = False, project_right = False)\n return M*l*err_C0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the identity loss. L_idt = lamda_idt [lamA [Expectation of L1_norm(F(A) A)] + lamB [Expectation of L1_norm(G(B) B)]] | def __identity_loss(self, identA, identB):
loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \
self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB)))
return loss | [
"def l1_loss(D, G, real_data, generated_data, losses, options):\n return torch.nn.L1Loss()(generated_data, real_data)",
"def one_step(i_t, h_tm1):\n h_t = self.activation(T.dot(i_t, self.W) + T.dot(h_tm1, self.W_rec) + self.b)\n return h_t",
"def l1_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n with tf.name_scope(\"l1_loss\"):\n return tf.reduce_sum(tf.abs(obs - actual) , 1)",
"def ggml_cross_entropy_loss(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData:\n ...",
"def forward(self, output, target):\n fake_A, fake_B, idt_A, idt_B = output\n #Generators are trained to trick the discriminators so the following should be ones\n self.adv_loss_A = -torch.mean(self.dualgan.D_A(fake_A)) \n self.adv_loss_B = -torch.mean(self.dualgan.D_B(fake_B))\n \n #Reconstruction loss\n self.rec_loss_A = F.l1_loss(self.dualgan.G_A(fake_B), self.real_A)\n self.rec_loss_B = F.l1_loss(self.dualgan.G_B(fake_A), self.real_B)\n \n #Identity loss\n self.id_loss_A = F.l1_loss(idt_A, self.real_A)\n self.id_loss_B = F.l1_loss(idt_B, self.real_B)\n \n return self.l_adv*(self.adv_loss_A+self.adv_loss_B)+self.l_rec*(self.rec_loss_A+self.rec_loss_B)+self.l_idt*(self.id_loss_A+self.id_loss_B)",
"def opL(x):\n return np.dot(opL_mat(x.size), x)",
"def backward_G(self):\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # lambda_shading_A = self.opt.lambda_shading_A\n # lambda_shading_B = self.opt.lambda_shading_B\n # Identity loss\n if lambda_idt > 0:\n # G_A should be identity if real_B is fed: ||G_A(B) - B||\n self.idt_A = self.netG_A(self.real_B)\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n # G_B should be identity if real_A is fed: ||G_B(A) - A||\n self.idt_B = self.netG_B(self.real_A)\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt_A = 0\n self.loss_idt_B = 0\n\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)\n # GAN loss D_B(G_B(B))\n self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B\n self.loss_G.backward()",
"def test_single_label():\n nml_obj = npairs.NpairsMultilabelLoss()\n # batch size = 4, hidden size = 2\n y_true = tf.constant(\n [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=tf.int64\n )\n # features of anchors\n f = tf.constant(\n [[1.0, 1.0], [1.0, -1.0], [-1.0, 1.0], [-1.0, -1.0]], dtype=tf.float32\n )\n # features of positive samples\n fp = tf.constant(\n [[1.0, 1.0], [1.0, -1.0], [-1.0, 1.0], [-1.0, -1.0]], dtype=tf.float32\n )\n # similarity matrix\n y_pred = tf.matmul(f, fp, transpose_a=False, transpose_b=True)\n loss = nml_obj(y_true, y_pred)\n\n # Loss = 1/4 * \\sum_i log(1 + \\sum_{j != i} exp(f_i*fp_j^T-f_i*f_i^T))\n # Compute loss for i = 0, 1, 2, 3 without multiplier 1/4\n # i = 0 => log(1 + sum([exp(-2), exp(-2), exp(-4)])) = 0.253846\n # i = 1 => log(1 + sum([exp(-2), exp(-4), exp(-2)])) = 0.253846\n # i = 2 => log(1 + sum([exp(-2), exp(-4), exp(-2)])) = 0.253846\n # i = 3 => log(1 + sum([exp(-4), exp(-2), exp(-2)])) = 0.253846\n # Loss = (0.253856 + 0.253856 + 0.253856 + 0.253856) / 4 = 0.253856\n\n np.testing.assert_allclose(loss, 0.253856, rtol=1e-06, atol=1e-06)\n\n # Test sparse tensor\n y_true = tf.sparse.from_dense(y_true)\n loss = nml_obj(y_true, y_pred)\n np.testing.assert_allclose(loss, 0.253856, rtol=1e-06, atol=1e-06)",
"def logit_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n target_ids = args.target_ids.reshape(logits.shape[0], 1)\n return logits.gather(-1, target_ids).squeeze(-1)",
"def unlabeled_loss(self, x):\n\tqy_l = dgm.forwardPassCat(x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x')\n\tx_r = tf.tile(x, [self.n_y,1])\n\ty_u = tf.reshape(tf.tile(tf.eye(self.n_y), [1, tf.shape(self.x_u)[0]]), [-1, self.n_y])\n\tn_u = tf.shape(x)[0] \n\tlb_u = tf.transpose(tf.reshape(self.labeled_loss(x_r, y_u), [self.n_y, n_u]))\n\tlb_u = tf.reduce_sum(qy_l * lb_u, axis=-1)\n\tqy_entropy = -tf.reduce_sum(qy_l * tf.log(qy_l + 1e-10), axis=-1)\n\treturn lb_u + qy_entropy",
"def backward_G(self):\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # Identity loss\n if lambda_idt > 0:\n # G_A should be identity if real_B is fed: ||G_A(B) - B||\n self.idt_A = self.netG_A(self.real_B)\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n # G_B should be identity if real_A is fed: ||G_B(A) - A||\n self.idt_B = self.netG_B(self.real_A)\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt_A = 0\n self.loss_idt_B = 0\n\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)\n # GAN loss D_B(G_B(B))\n self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n\n\n # ่ฎก็ฎๅๅฒๆๅคฑ๏ผๆฅไฝฟGๅSeg็ฝ็ป๏ผไฝฟๅๅฒๆญฃ็กฎ\n self.CrossEntropyLoss = self.criterionSeg(self.pre, self.label.long().squeeze(1))\n self.L2_loss = self.L2_loss_net(self.netSeg)\n self.loss_Seg = self.CrossEntropyLoss + self.L2_loss\n\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B + self.loss_Seg\n self.loss_G.backward()",
"def ap_entropy(X, M, R):",
"def backward_G(self):\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # Identity loss\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)\n # GAN loss D_B(G_B(B))\n self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_A + self.loss_G_B + \\\n self.loss_cycle_A + self.loss_cycle_B + \\\n self.loss_idt_A + self.loss_idt_B\n\n if self.opt.online_distillation or self.opt.normal_distillation:\n\n self.Tfake_A = self.teacher_model.fake_A\n self.Tfake_B = self.teacher_model.fake_B\n current_distillation_Afeatures = self.get_distillation_features(AorB='A')\n current_distillation_Bfeatures = self.get_distillation_features(AorB='B')\n\n self.teacher_model.netD_A(self.fake_B.detach()) # output teacher discriminator feature for netG_A\n self.teacher_model.netD_B(self.fake_A.detach()) # output teacher discriminator feature for netG_B\n teacher_discriminator_Afeatures = list(self.teacher_model.total_netD_A_features.values())\n current_distillation_Afeatures[-len(self.discriminator_extract_layers):] = teacher_discriminator_Afeatures[:]\n teacher_discriminator_Bfeatures = list(self.teacher_model.total_netD_B_features.values())\n current_distillation_Bfeatures[-len(self.discriminator_extract_layers):] = teacher_discriminator_Bfeatures[:]\n\n self.loss_content_A = 0.0\n self.loss_gram_A = 0.0\n self.loss_content_B = 0.0\n self.loss_gram_B = 0.0\n self.loss_L1_A = 0.0\n self.loss_L1_B = 0.0\n for i, feature in enumerate(current_distillation_Afeatures):\n\n if i < 4: # generator feature should transform channel dimension consistent with teacher\n feature = self.transform_A_convs[i](feature)\n target_feature = self.target_distillation_A_features[i]\n # import pdb\n # pdb.set_trace()\n # print(feature.size(), target_feature.size())\n self.loss_gram_A += self.criterionMSE(self.gram(feature), self.gram(target_feature.detach()))\n self.loss_content_A += self.criterionMSE(feature, target_feature.detach())\n self.loss_L1_A += self.criterionL1(self.fake_B, self.Tfake_B.detach())\n for i, feature in enumerate(current_distillation_Bfeatures):\n\n if i < 4: # generator feature should transform channel dimension consistent with teacher\n feature = self.transform_B_convs[i](feature)\n target_feature = self.target_distillation_B_features[i]\n self.loss_gram_B += self.criterionMSE(self.gram(feature), self.gram(target_feature.detach()))\n self.loss_content_B += self.criterionMSE(feature, target_feature.detach())\n self.loss_L1_B += self.criterionL1(self.fake_A, self.Tfake_A.detach())\n\n self.loss_gram_A = self.opt.lambda_gram * self.loss_gram_A\n self.loss_content_A = self.opt.lambda_content * self.loss_content_A\n self.loss_L1_A = self.opt.lambda_L1 * self.loss_L1_A\n self.loss_gram_B = self.opt.lambda_gram * self.loss_gram_B\n self.loss_content_B = self.opt.lambda_content * self.loss_content_B\n self.loss_L1_B = self.opt.lambda_L1 * self.loss_L1_B\n self.loss_G += self.loss_gram_A + self.loss_gram_B\n self.loss_G += self.loss_content_A + self.loss_content_B\n self.loss_G += self.loss_L1_A + self.loss_L1_B\n\n self.loss_G.backward()\n\n self.L1_sparsity()",
"def loss_fn(pert):\n logits = model_fn(x + pert)[0]\n loss_multiplier = 1 if targeted else -1\n return loss_multiplier * _margin_logit_loss(logits,\n y.expand(len(pert)))",
"def logistic_L1_loss(y, X, w, lamb):\r\n\r\n cost = logistic_L0_loss(y, X, w) + cost_L1_regularizer(w, lamb)\r\n\r\n return cost",
"def update_kl_loss(p, lambdas, T, Cs):\r\n tmpsum = sum([lambdas[s] * np.dot(T[s].T, Cs[s]).dot(T[s])\r\n for s in range(len(T))])\r\n ppt = np.outer(p, p)\r\n\r\n return np.exp(np.divide(tmpsum, ppt))",
"def loss_function(inst):\n return -np.mean(np.log(inst))",
"def identity_block(input_tensor):\n y = tf.keras.layers.Conv2D(filters=12, kernel_size=(3, 3), padding=\"same\")(\n input_tensor\n )\n y = tf.keras.layers.ReLU()(y)\n y = tf.keras.layers.Conv2D(filters=24, kernel_size=(3, 3), padding=\"same\")(y)\n out = tf.keras.layers.Add()([y, input_tensor])\n out = tf.keras.layers.ReLU()(out)\n return out",
"def estimatePseudonormalsCalibrated(I, L):\n\n a=scipy.sparse.csr_matrix(I)\n\n b1 = np.linalg.inv(L@L.T)\n b2 = b1@L\n\n B=b2@a \n return B"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses a tensorflow.SequenceExample into an image and caption. | def parse_sequence_example(serialized, image_id, image_feature, caption_feature):
context, sequence = tf.parse_single_sequence_example(
serialized,
context_features={
image_id : tf.FixedLenFeature([], dtype=tf.int64),
image_feature: tf.FixedLenFeature([], dtype=tf.string)
},
sequence_features={
caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
})
encoded_image_id = context[image_id]
encoded_image = context[image_feature]
caption = sequence[caption_feature]
return encoded_image_id, encoded_image, caption | [
"def parse_sequence_example(serialized, image_feature, caption_feature):\n context, sequence = tf.parse_single_sequence_example(\n serialized,\n context_features={\n image_feature: tf.FixedLenFeature([], dtype=tf.string)\n },\n sequence_features={\n caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n })\n\n encoded_image = context[image_feature]\n caption = sequence[caption_feature]\n return encoded_image, caption",
"def parse_sequence_example(serialized, image_feature, caption_feature):\n\tcontext, sequence = tf.parse_single_sequence_example(\n\t\t\tserialized,\n\t\t\tcontext_features={\n\t\t\t\t\timage_feature: tf.FixedLenFeature([], dtype=tf.string)\n\t\t\t},\n\t\t\tsequence_features={\n\t\t\t\t\tcaption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n\t\t\t})\n\n\tencoded_image = context[image_feature]\n\tcaption = sequence[caption_feature]\n\treturn encoded_image, caption",
"def _parse_tfexample(example):\n\n ## parse\n features = tf.parse_single_example(example, KEYS2FEATURES)\n\n image = tf.image.decode_png(features['image/encoded'])\n label = tf.image.decode_png(features['label/encoded'])\n # label is decoded as a 3-D png image\n label = label[..., 0]\n im_path = features['image/path']\n la_path = features['label/path']\n\n return image, label, im_path, la_path",
"def example_parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'bytesImg': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features['bytesImg'], tf.uint8)\n image.set_shape([init_params['data_params']['image_size'][0] * init_params['data_params']['image_size'][1]])\n\n # Normalize the values of the image from the range [0, 255] to [-0.5, 0.5]\n image = tf.cast(image, tf.float32)\n label = tf.cast(features['label'], tf.int32)\n return image, tf.one_hot(label, init_params['data_params']['num_classes'])",
"def parser(self, serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/height': tf.FixedLenFeature([1], tf.int64),\n 'image/width': tf.FixedLenFeature([1], tf.int64),\n }\n )\n\n image = tf.image.decode_png(features['image/encoded'], dtype=tf.uint8)\n image = tf.cast(image, tf.float32) / 255\n label = tf.cast(features['image/class/label'], tf.int32)\n\n return image, label",
"def prepare_example(image_path, annotations, label_map_dict):\n print(\"encoding %s\" % image_path)\n with tf.gfile.GFile(image_path, 'rb') as fid:\n encoded_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_png)\n image = pil.open(encoded_png_io)\n\n if image.format != 'PNG':\n raise ValueError('Image format error')\n\n key = hashlib.sha256(encoded_png).hexdigest()\n # obtain attributes\n width, height = image.size\n img_filename = image_path.split('/')[-1]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n occlud = []\n\n xmin.append(int(annotations[2]) / width)\n ymin.append(int(annotations[3]) / height)\n xmax.append(int(annotations[4]) / width)\n ymax.append(int(annotations[5]) / height)\n class_name = annotations[1]\n classes_text.append(class_name)\n classes.append(label_map_dict[class_name])\n classes_text = [class_text.encode('utf-8') for class_text in classes_text]\n trun, occ = annotations[6].split(',')\n truncated.append(int(trun))\n occlud.append(int(occ))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_png),\n 'image/format': dataset_util.bytes_feature('png'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.int64_list_feature(occlud),\n }))\n return example",
"def read_tfrecord(example):\n features = {\n \"image\": tf.io.FixedLenFeature([], tf.string), # tf.string = bytestring (not text string)\n \"class\": tf.io.FixedLenFeature([], tf.int64), # shape [] means scalar\n }\n # decode the TFRecord\n example = tf.io.parse_single_example(example, features)\n\n image = tf.image.decode_jpeg(example['image'], channels=3)\n image = tf.cast(image, tf.float32)/ 255.0\n image = tf.reshape(image, [TARGET_SIZE,TARGET_SIZE, 3])\n\n class_label = tf.cast(example['class'], tf.int32)\n\n return image, class_label",
"def single_example_parser(serialized_example):\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(features['label'], tf.int32)\n \n image = train_preprocess_fn(image)\n label = tf.one_hot(label, NUM_CLASSES)\n \n return image, label",
"def parse_labeled_example(\n example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys):\n features = {}\n for attr_key in image_attr_keys:\n features[attr_key] = tf.FixedLenFeature((), tf.string)\n for attr_key in label_attr_keys:\n features[attr_key] = tf.FixedLenFeature((), tf.int64)\n parsed_features = tf.parse_single_example(example_proto, features)\n image_only_keys = [i for i in image_attr_keys if 'image' in i]\n view_image_key = image_only_keys[view_index]\n image = preprocessing.decode_image(parsed_features[view_image_key])\n preprocessed = preprocess_fn(image, is_training=False)\n attributes = [parsed_features[k] for k in label_attr_keys]\n task = parsed_features['task']\n return tuple([preprocessed] + attributes + [task])",
"def parse_record(raw_record):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.FixedLenFeature((), tf.int64),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpg'),\n 'label/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n #tf.FixedLenFeature([2448 * 2448], tf.float32),\n 'label/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n # height = tf.cast(parsed['image/height'], tf.int32)\n # width = tf.cast(parsed['image/width'], tf.int32)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)\n image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image.set_shape([None, None, 3])\n\n label = tf.image.decode_image(\n tf.reshape(parsed['label/encoded'], shape=[]), 1)\n label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))\n label.set_shape([None, None, 1])\n\n #label = tf.reshape(parsed['label/encoded'], [2448, 2448, 1])\n #label = tf.to_int32(label)\n return image, label",
"def decode(self, tf_seq_example_string_tensor):\n serialized_example = tf.reshape(tf_seq_example_string_tensor, shape=[])\n decoder = slim_example_decoder.TFSequenceExampleDecoder(\n self._context_keys_to_features, self._sequence_keys_to_feature_lists,\n self._items_to_handlers)\n keys = decoder.list_items()\n tensors = decoder.decode(serialized_example, items=keys)\n tensor_dict = dict(list(zip(keys, tensors)))\n tensor_dict[fields.InputDataFields.groundtruth_boxes].set_shape(\n [None, None, 4])\n tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.cast(\n tensor_dict[fields.InputDataFields.num_groundtruth_boxes],\n dtype=tf.int32)\n tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.cast(\n tensor_dict[fields.InputDataFields.groundtruth_classes], dtype=tf.int32)\n tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.cast(\n tf.stack([\n tensor_dict[fields.InputDataFields.image_height],\n tensor_dict[fields.InputDataFields.image_width]\n ]),\n dtype=tf.int32)\n tensor_dict.pop(fields.InputDataFields.image_height)\n tensor_dict.pop(fields.InputDataFields.image_width)\n\n def default_groundtruth_weights():\n \"\"\"Produces weights of 1.0 for each valid box, and 0.0 otherwise.\"\"\"\n num_boxes_per_frame = tensor_dict[\n fields.InputDataFields.num_groundtruth_boxes]\n max_num_boxes = tf.reduce_max(num_boxes_per_frame)\n num_boxes_per_frame_tiled = tf.tile(\n tf.expand_dims(num_boxes_per_frame, axis=-1),\n multiples=tf.stack([1, max_num_boxes]))\n range_tiled = tf.tile(\n tf.expand_dims(tf.range(max_num_boxes), axis=0),\n multiples=tf.stack([tf.shape(num_boxes_per_frame)[0], 1]))\n return tf.cast(\n tf.greater(num_boxes_per_frame_tiled, range_tiled), tf.float32)\n\n tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(\n tf.greater(\n tf.size(tensor_dict[fields.InputDataFields.groundtruth_weights]),\n 0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],\n default_groundtruth_weights)\n\n if self._fully_annotated:\n tensor_dict[fields.InputDataFields.is_annotated] = tf.ones_like(\n tensor_dict[fields.InputDataFields.num_groundtruth_boxes],\n dtype=tf.bool)\n else:\n tensor_dict[fields.InputDataFields.is_annotated] = tf.cast(\n tensor_dict[fields.InputDataFields.is_annotated], dtype=tf.bool)\n\n return tensor_dict",
"def _parse_example(self, example, scale_to_0_1: bool = False):\n\n features = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'mask': tf.FixedLenFeature([], tf.string),\n }\n parsed_example = tf.parse_single_example(example, features)\n\n image = tf.decode_raw(parsed_example['image'], self.serialized_image_raw_dtype)\n image = tf.reshape(image, (self.image_width, self.image_width, self.image_channels))\n image = tf.cast(image, tf.float32)\n if scale_to_0_1:\n image /= 255.\n\n mask = tf.decode_raw(parsed_example['mask'], self.serialized_mask_raw_dtype)\n mask = tf.reshape(mask, (self.image_width, self.image_width, self.mask_channels))\n mask = tf.cast(mask, tf.float32) / 255.\n return image, mask",
"def create_sequence_example(inner_image_path,\n inner_sample):\n\n # serialize a pointer to the disk location of the image features\n # copying data for every training example would consume too much storage\n image_path_feature = tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[bytes(inner_image_path, \"utf-8\")]))\n\n # add all other tokens to the tf record\n words_feature = tf.train.FeatureList(\n feature=[tf.train.Feature(\n int64_list=tf.train.Int64List(value=[t])) for t in inner_sample.words])\n tags_feature = tf.train.FeatureList(\n feature=[tf.train.Feature(\n int64_list=tf.train.Int64List(value=[t])) for t in inner_sample.tags])\n\n # create the dictionary of features to save\n context_dict = dict(image_path=image_path_feature)\n sequence_dict = dict(words=words_feature, tags=tags_feature)\n\n # create a sequence example\n return tf.train.SequenceExample(\n context=tf.train.Features(feature=context_dict),\n feature_lists=tf.train.FeatureLists(\n feature_list=sequence_dict))",
"def read_tfrecord(example, labeled=True):\n if labeled:\n TFREC_FORMAT = {\n 'image': tf.io.FixedLenFeature([], tf.string), \n 'target': tf.io.FixedLenFeature([], tf.int64), \n }\n else:\n TFREC_FORMAT = {\n 'image': tf.io.FixedLenFeature([], tf.string), \n 'image_name': tf.io.FixedLenFeature([], tf.string), \n }\n example = tf.io.parse_single_example(example, TFREC_FORMAT)\n image = decode_image(example['image'])\n if labeled:\n label_or_name = tf.cast(example['target'], tf.int32)\n else:\n label_or_name = example['image_name']\n return image, label_or_name",
"def get_example(self, filename, label):\n # read matrix data and save its shape \n data = self.fn2video(filename)\n t, h, w, c = data.shape\n \n # save video as list of encoded frames using tensorflow's operation \n img_bytes = [tf.image.encode_jpeg(frame, format='rgb') for frame in data]\n with tf.Session() as sess: \n img_bytes = sess.run(img_bytes)\n \n sequence_dict = {}\n # create a feature for each encoded frame\n img_feats = [tf.train.Feature(bytes_list=\\\n tf.train.BytesList(value=[imgb])) for imgb in img_bytes]\n # save video frames as a FeatureList\n sequence_dict['video_frames'] = tf.train.FeatureList(feature=img_feats)\n\n # also store associated meta-data\n context_dict = {}\n context_dict['filename'] = _bytes_feature(str(filename).encode('utf-8'))\n context_dict['label'] = _int64_feature(label)\n context_dict['temporal'] = _int64_feature(t)\n context_dict['height'] = _int64_feature(h)\n context_dict['width'] = _int64_feature(w)\n context_dict['depth'] = _int64_feature(c)\n\n # combine list + context to create TFRecords example \n sequence_context = tf.train.Features(feature=context_dict)\n sequence_list = tf.train.FeatureLists(feature_list=sequence_dict)\n example = tf.train.SequenceExample(context=sequence_context, \\\n feature_lists=sequence_list)\n\n return example",
"def parse_sequence_example(serialized_example, num_views):\n context_features = {\n 'task': tf.FixedLenFeature(shape=[], dtype=tf.string),\n 'len': tf.FixedLenFeature(shape=[], dtype=tf.int64)\n }\n view_names = ['view%d' % i for i in range(num_views)]\n fixed_features = [\n tf.FixedLenSequenceFeature(\n shape=[], dtype=tf.string) for _ in range(len(view_names))]\n sequence_features = dict(zip(view_names, fixed_features))\n context_parse, sequence_parse = tf.parse_single_sequence_example(\n serialized=serialized_example,\n context_features=context_features,\n sequence_features=sequence_features)\n views = tf.stack([sequence_parse[v] for v in view_names])\n lens = [sequence_parse[v].get_shape().as_list()[0] for v in view_names]\n assert len(set(lens)) == 1\n seq_len = tf.shape(sequence_parse[view_names[-1]])[0]\n return context_parse, views, seq_len",
"def parse_record(raw_record):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.FixedLenFeature((), tf.int64),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'label/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n # height = tf.cast(parsed['image/height'], tf.int32)\n # width = tf.cast(parsed['image/width'], tf.int32)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)\n image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image.set_shape([None, None, 3])\n\n label = tf.image.decode_image(\n tf.reshape(parsed['label/encoded'], shape=[]), 1)\n label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))\n label.set_shape([None, None, 1])\n\n\n return image, label",
"def create_cat_tf_example(label, label_text, img_path, img_name):\n\t\n\twith tf.gfile.FastGFile(img_path + img_name, 'rb') as fid:\n\t encoded_image = fid.read() \n\n\tencoded_image_data = sess.run(resize_image, {encoded_jpg_ph: encoded_image}) # I think this may not be the right way of doing this\n\tb_filename = str.encode(img_name)\n\n\timage_format = b'jpg'\n\txmins = [10.0 / width]\n\txmaxs = [(width - 10) / width]\n\tymins = [10.0 / height]\n\tymaxs = [(height - 10.0) / height]\n\t# classes_text = [str.encode(label_text)]\n\tclasses_text = []\n\tif label_text:\n\t\tclasses_text.append(label_text.encode('utf8'))\n\tclasses = []\n\t# if label == 1:\n\tclasses.append(int(label))\n\t# print(classes_text, classes, b_filename)\n\ttf_example = tf.train.Example(features=tf.train.Features(feature={\n\t\t'image/height': dataset_util.int64_feature(height),\n\t\t'image/width': dataset_util.int64_feature(width),\n\t\t'image/filename': dataset_util.bytes_feature(b_filename),\n\t\t'image/source_id': dataset_util.bytes_feature(b_filename),\n\t\t'image/encoded': dataset_util.bytes_feature(encoded_image_data),\n\t\t# 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n\t\t'image/format': dataset_util.bytes_feature(image_format),\n\t\t'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n\t\t'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n\t\t'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n\t\t'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n\t\t'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n\t\t'image/object/class/label': dataset_util.int64_list_feature(classes),\n\t}))\n\treturn tf_example",
"def sequence_example_to_model_input(seq_example_string, num_labels, feat_shape):\n import tensorflow as tf\n context_definition = {\n \"target\": tf.io.FixedLenFeature(shape=[num_labels], dtype=tf.float32),\n }\n sequence_definition = {\n \"inputs\": tf.io.FixedLenSequenceFeature(shape=feat_shape[1:], dtype=tf.float32)\n }\n context, sequence = tf.io.parse_single_sequence_example(\n seq_example_string,\n context_features=context_definition,\n sequence_features=sequence_definition\n )\n return sequence[\"inputs\"], context[\"target\"]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run demo, testing whether input words are beer related. | def run_demo():
while True:
embeddings = beer_emb.embed_doc(input("Test if words are beer-related: "),
word_filter=False)
for word_vec in embeddings:
print(is_beer_related(word_vec)) | [
"def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))",
"def test_demo_runs(self):\n self.star.run_demo()",
"def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)",
"def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")",
"def ShowExamples(word, cfd, corpus):\n print \"Exemples for word: \", word\n flag = 0\n tagged_sentences = corpus.tagged_sents(tagset='universal')\n words_tags = cfd[word]\n examples = []\n for tag in words_tags:\n for sent in tagged_sentences:\n for word_tag in sent:\n if word_tag[0] == word and word_tag[1] == tag:\n print tag, \" ---> \", untag(sent)\n examples.append(sent)\n flag = 1\n break\n if flag == 1:\n break\n flag = 0",
"def test_win(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"ant\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')",
"def test_get_words(self):\n\n get_words = getattr(word, 'get_words')\n runner = CliRunner()\n result = runner.invoke(get_words, [])\n\n logger.debug(\"result output: {}\".format(result.output))\n\n self.assertTrue(result.exit_code == 0)",
"def test_choose(self):\n # Run test\n self.word.choose()\n\n # Evaluate test\n self.assertIn(self.word.show(), [w.lower() for w in TESTS_WORDS_LIST])",
"def handle_demo(resp, show_parse, verbose):\n\n # -------------------------------------------------------------------------\n # CUSTOMISE DEMO_FILE CONTENTS TO THE PURPOSE OF THIS BOT (NO CHANGES\n # REQUIRED IN THE CODE HERE)\n # -------------------------------------------------------------------------\n\n demo_delay = 3\n print_settings('Running automated demo')\n for ent in resp['entities']:\n if ent['entity'].lower() == 'speed':\n if ent['value'].lower() in ['quick', 'fast']:\n demo_delay = 0\n prompt_text = '>'\n with open(DEMO_FILE, 'r') as f:\n for line in f:\n say_text(prompt_text + line)\n time.sleep(demo_delay)\n check_input(line, show_parse, verbose)\n time.sleep(demo_delay)\n\n print_settings('Automated demo complete')",
"def demo_mode(capital):\n try:\n if sys.argv[1] == \"--demo\":\n print(capital[1])\n except IndexError:\n pass",
"def ShowExamples_v2(word, cfd, corpus):\n print \"Exemples for word: \", word\n tagged_sentences = corpus.tagged_sents(tagset='universal')\n words_tags = cfd[word]\n examples = {}\n\n for tag in words_tags:\n sent = next((sent for sent in tagged_sentences if (word, tag) in sent))\n print tag, \" ---> \", untag(sent)\n examples[tag] = ' '.join(untag(sent))\n\n return examples",
"def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )",
"def test_words_words(self):\n words = self.page.words\n for word in [\"BEFORE\", \"YOU\", \"BUY\"]:\n self.assertTrue(word in words)",
"def main():\n answers_style = drink_style_input()\n drink = drink_make(answers_style)\n print \"\"\n print \"Your drink includes:\"\n for ingredient in drink:\n print \"A {}\".format(ingredient)",
"def demo():\n from nodebox_linguistics_extended.parser.nltk_lite.corpora import brown\n from nodebox_linguistics_extended.parser.nltk_lite import tag\n import sys\n\n print(\"Training taggers.\")\n\n # Create a default tagger\n t0 = tag.Default(\"nn\")\n\n t1 = tag.Unigram(cutoff=1, backoff=t0)\n t1.train(brown.tagged(\"a\"), verbose=True)\n\n t2 = tag.Affix(-3, 5, cutoff=2, backoff=t0)\n t2.train(brown.tagged(\"a\"), verbose=True)\n\n t3 = tag.Regexp([(r\".*ed\", \"vbd\")], backoff=t0) # no training\n\n t4 = tag.Lookup({\"the\": \"dt\"}, backoff=t0)\n\n test_tokens = []\n num_words = 0\n\n print(\"=\" * 75)\n print(\"Running the taggers on test data...\")\n print(\" Default (nn) tagger: \", end=\" \")\n sys.stdout.flush()\n _demo_tagger(t0, brown.tagged(\"b\"))\n\n print(\" Unigram tagger: \", end=\" \")\n sys.stdout.flush()\n _demo_tagger(t1, list(brown.tagged(\"b\"))[:1000])\n\n print(\" Affix tagger: \", end=\" \")\n sys.stdout.flush()\n _demo_tagger(t2, list(brown.tagged(\"b\"))[:1000])\n\n print(\" Regexp tagger: \", end=\" \")\n sys.stdout.flush()\n _demo_tagger(t3, list(brown.tagged(\"b\"))[:1000])\n\n print(\" Lookup tagger: \", end=\" \")\n sys.stdout.flush()\n _demo_tagger(t4, list(brown.tagged(\"b\"))[:1000])",
"def test_show(self):\n # Prepare test\n self.word.choose()\n\n # Run test\n self.word.show()\n w2 = self.word.show()\n\n # Evaluate test\n self.assertEqual(w2, EMPTY_WORD)",
"def test_demo_runs():\n materials.plot_utils_demo.main()",
"def main(words, s):\n if words:\n words = int(words)\n click.echo(lorem.words(words))\n\n # Returns a lorem ipsum sentence\n elif s:\n click.echo(lorem.sentence())\n\n # Returns a lorem ipsum paragraph by default\n else:\n click.echo(lorem.paragraph())",
"def test_text_classifier_vaporise(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load parsed beautifulsoup object holding the full html | def load_parsed(self):
with open(self.fname) as f:
self.parsed = BeautifulSoup(f.read(), features="html.parser") | [
"def load_website(self):\n# r = urllib.request.urlopen(self.url).read()\n r = requests.get(self.url).content \n self.soup = bs(r, \"lxml\")",
"def parse(html):\n\n return BeautifulSoup(html, 'html.parser')",
"def update_html(self):\n self.html = self.driver.page_source\n self.soup = BeautifulSoup(self.html, features=\"lxml\")",
"def beautifulsoup(self):\n file_name = os.path.join(self.data_dir, \"data.html\")\n with open(file_name, \"r\") as f:\n soup = BeautifulSoup(f, \"lxml\")\n soup.prettify()",
"def __get_html_parser(self, url: str):\r\n page = requests.get(url)\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n return soup",
"def __init__(self):\n\t\tself.bs = BeautifulSoup(\"\", \"lxml\")",
"def get_soup(self):\r\n import BeautifulSoup\r\n return BeautifulSoup.BeautifulSoup(self.data)",
"def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html",
"def parse(html, encoding='utf-8'):\n if isinstance(html, unicode):\n return bs4.BeautifulSoup(html, 'html.parser')\n\n return bs4.BeautifulSoup(html, 'html.parser', from_encoding=encoding)",
"def load_and_parse_page(self, url):\n logging.info(\"load \" + url)\n\n if self.is_cached(url):\n html_of_page = self.get_from_cache(url)\n else:\n try:\n web_page_socket = urllib2.urlopen(url)\n html_of_page = decode_html(web_page_socket.read())\n self.put_in_cache(url, html_of_page)\n except urllib2.HTTPError as err:\n logging.critical(err.url)\n logging.critical(dir(err))\n raise\n\n etree_document = self.parser.parse(html_of_page)\n return etree_document",
"def parse_page(url):\n r = requests.get(url, headers=HEADERS)\n r.encoding = 'utf-8'\n return BeautifulSoup(r.text)",
"def load_data(self):\n with open(self.FILE, 'r') as html_file:\n document = html_file.read()\n self.HTML = document",
"def load_html(filename):\n with open(filename, encoding=\"utf-8\") as f:\n html = f.read()\n return BeautifulSoup(html, \"lxml\")",
"def parse_page(url):\n r = requests.get(url, headers=_HEADERS)\n r.encoding = 'utf-8'\n return BeautifulSoup(r.text, 'lxml')",
"def convert_to_soup(html):\n soup = BeautifulSoup(html, 'html.parser')\n \n return soup",
"def get_soup(link):\n request_object = requests.get(link)\n soup = BeautifulSoup(request_object.content, 'html.parser')\n return soup",
"def make_soup(url):\r\n htmlFile = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(htmlFile)\r\n return soup",
"def soup(self) -> Soup:\n return Soup(self.html)",
"def convert_content(self, html):\n\n try:\n dom = BeautifulSoup(html, 'html.parser')\n return self.parse_content(dom)\n except:\n return html"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterator over maintext paragraph elements; this includes footnotes. | def _paragraphs_raw(self):
for par in self.parsed.find_all("p")[self.PAR_START:]:
yield par | [
"def linked_text_paragraphs(self):\n for par in self._main_paragraphs_raw():\n par_links = par.find_all('a')\n if len(par_links) == 0:\n self.main_count += len(par.text)\n yield par.text\n else:\n for el in par.contents:\n if el.name is None:\n #this is plain text\n self.main_count += len(str(el))\n yield str(el)\n elif el.name == \"a\" and \"href\" in el.attrs:\n id = el[\"href\"].lstrip('#')\n try:\n foot_par = self._get_footnote_par(id)\n except NoFootnoteError:\n self.log(f\"Could not find footnote for {id}, skipping.\")\n self.footnote_count += len(foot_par.text)\n yield foot_par.text",
"def paragraphIterator(text,\t# text (string) to search for paragraphs\n ):\n start = 0\n endPara = text.find(PARAGRAPH_BOUNDARY, start)\n while endPara != -1:\n yield text[start : endPara].strip()\n start = endPara + PARAGRAPH_BOUNDARY_LEN\n endPara = text.find(PARAGRAPH_BOUNDARY, start)\n yield text[start: ].strip()",
"def iter_main_text(self, element):\n if element.tag == 'note':\n return\n if element.text:\n yield element.text\n for e in element:\n for se in self.iter_main_text(e):\n yield se\n if e.tail:\n yield e.tail",
"def paragraphs(self):\n return [el for el in self.elements if isinstance(el, Paragraph)]",
"def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)",
"def paragraphs(self):\n return re.findall(\"(<p>.+?</p>)\", self.body, re.I | re.S)",
"def footnotes(self):\n # TODO: Elements (e.g. Tables) can contain nested Footnotes\n return [el for el in self.elements if isinstance(el, Footnote)]",
"def _main_paragraph_text(cls, html):\n\n soup = BeautifulSoup(html, 'html.parser')\n min_length = 30\n feature_text = []\n\n for p in soup.find_all('p'):\n text_content = MainTextExtractor._remove_chars(p.get_text(strip=True))\n if text_content and (len(text_content) > min_length):\n feature_text.append(text_content)\n\n return ' \\n'.join(feature_text)",
"def paragraphs(self, data=True):\n return self.nodes(self.max_depth, data)",
"def get_next_paragraph(self):\n docstring = self.docstring\n lines = []\n index = self.index\n\n for line in docstring[index:]:\n if is_empty(line):\n break\n lines.append(line)\n\n del docstring[index:(index + len(lines))]\n return lines",
"def end_paragraph(self):\n raise NotImplementedError",
"def get_paragraphs():\n soup = get_html()\n paragraphs = []\n for i in soup.findAll('div', {'class': 'faq-list1__hide'}):\n p = str(i.get_text().strip())\n paragraphs.append(p)\n return paragraphs",
"def __split_paragraphs(self):\n split_index = -1\n for index in range(len(self.parts)):\n if type(self.parts[index]) == VParagraph and self.parts[index].list_type is None:\n if self.parts[index].find_new_lines() != -1:\n split_index = index\n break\n if split_index != -1:\n index = self.parts[split_index].find_new_lines()\n if len(self.parts[split_index].parts) == index - 1:\n return\n new_texts = self.parts[split_index][index].text.split(\"\\n\")\n first_paragraph = VParagraph()\n first_paragraph.parts = self.parts[split_index][:index]\n second_paragraph = VParagraph()\n second_paragraph.parts = self.parts[split_index][index:]\n first_paragraph.parts.extend([VText(x) for x in new_texts[:-1]])\n second_paragraph.parts[0] = VText(new_texts[-1])\n parts = self.parts[:split_index]\n parts.append(first_paragraph)\n parts.append(second_paragraph)\n if len(self.parts) > split_index + 1:\n parts.extend(self.parts[split_index+1:])\n self.parts = parts\n self.__split_paragraphs()",
"def generate_new_book(text):\n\n for paragraph in text:\n for sentence in paragraph:\n for word in sentence:\n print(word, end=' ')\n print()\n print()",
"def paragraph(self, on, **kw):\n if self._terse:\n return ''\n FormatterBase.paragraph(self, on)\n tag = 'p'\n if on:\n tagstr = self._open(tag, **kw)\n else:\n tagstr = self._close(tag)\n return tagstr",
"def prop_paragraphs(self):\n # Init variables\n author_names = self.authors\n prop_authors_paragraphs = []\n paragraph_names = []\n i = 1\n for paragraph in self.paragraphs:\n # Initialize a dictionary containing participation proportion for each authors\n prop_authors = {author_name: 0 for author_name in author_names}\n # Only take into account the real paragraphs (not the new lines)\n if not paragraph.new_line:\n # Create the label of the paragraph\n paragraph_names.append('p' + str(i))\n i += 1\n for op in paragraph.operations:\n prop_authors[op.author] += abs(op.context[\n 'proportion_paragraph']) # increment with the corresponding prop\n prop_authors_paragraphs.append(prop_authors)\n return paragraph_names, prop_authors_paragraphs",
"def _get_text(self, element):\n # for text in element.itertext():\n for text in self.iter_main_text(element):\n yield text.strip()",
"def _process_layout(self, layout):\r\n # Here we just group text into paragraphs\r\n elements = []\r\n for lt_obj in layout:\r\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\r\n elements.append(Paragraph(lt_obj.get_text().strip()))\r\n elif isinstance(lt_obj, LTFigure):\r\n # Recursive...\r\n elements.extend(self._process_layout(lt_obj))\r\n return elements",
"def get_div_paragraphs(text_divs, namespace=NAMESPACE):\n div_pars = []\n for div in text_divs:\n div_pars.extend(div.findall('tei:p', namespace))\n return div_pars"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks whether an element contains footnote text. | def is_footnote_text(self, par):
return (par is not None) and ("foot" in par.attrs.get("class", [])) | [
"def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)",
"def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))",
"def is_footnote(self):\n return self.style['float'] == 'footnote'",
"def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"",
"def is_footnote(self, laparams, surrounding_objects):\n first_letter = self._objs[0]\n footnote_font_size = 999\n font_size = max([l.matrix[0] for l in self._objs[:20] if isinstance(l, LTChar)])\n first_letters_is_number = re.search(r'^(\\d{1,4})', self.get_text())\n\n if first_letters_is_number:\n footnote_font_size = max([letter.matrix[0] for letter in self._objs[first_letters_is_number.start(): first_letters_is_number.end()]])\n\n font_size_in_range = font_size <= laparams.footnote_max_def_size + 0.3 and abs(footnote_font_size - laparams.footnote_font_size) < 0.3\n\n too_close_objs = [\n obj for obj in surrounding_objects\n if obj.x0 - first_letter.x1 < laparams.footnote_min_def_distance and abs(obj.y1 - first_letter.y1) < 1 and obj is not self\n ]\n\n return font_size_in_range and first_letters_is_number and len(too_close_objs) == 0",
"def hasText(self):\n return len(self.text_) > 0",
"def contains_text(self, text):\n return bool(self.find_elements_by_text(text))",
"def has_text(p):\n return '' != etree.tostring(p, encoding=str, method='text').strip()",
"def has_text(self):\n try:\n first = self.text_planets()[0]\n except IndexError:\n first = None\n\n return first is not None",
"def hasText(self):\n return self.__text is not None",
"def is_description_empty_tbd(text):\n if text == 'TBD' or text == 'TODO' or text == '<p>TBD</p>\\r\\n' or text == '<p>c</p>\\r\\n' or \\\n text == '<p>...</p>\\r\\n' or text is None or text.lower() == 'tbd' or text.lower() == 'todo':\n return 1\n else:\n return 0",
"def is_plugin_note(self, note):\n return bool(self.regex.match(note))",
"def assert_text_present(self, text, msg=None):\r\n e = driver.find_element_by_tag_name('body')\r\n assert text in e.text",
"def is_plain_text(self):\n return self._tag == 'plain_text'",
"def __is_text_format_tag(self, html, pos):\n return self.__extract_html_tag(html, pos) in cf.TAGS_FORMAT_TEXT",
"def is_text( self ):\n return self.get_main_type() == 'text'",
"def rn_element_text_should_be(self, elementLocator=None, text=''):\n element_text = self.rn_get_element_attribute(elementLocator, 'Text')\n if element_text==text:\n return True\n raise AssertionError(\"The element text is \\\"%s\\\" while expected: \\\"%s\\\"\" %\n (element_text, text))",
"def footnotes(self):\n # TODO: Elements (e.g. Tables) can contain nested Footnotes\n return [el for el in self.elements if isinstance(el, Footnote)]",
"def isHTML(text: unicode) -> bool:\n ..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks whether an element is a link adjacent to footnote text. | def is_footnote_link(self, par):
return self.is_footnote_text(par.find_next_sibling('p')) | [
"def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)",
"def is_link(fragment):\n return (isinstance(fragment, scrapely.htmlpage.HtmlTag) and\n fragment.tag == 'a' and\n fragment.tag_type == scrapely.htmlpage.HtmlTagType.OPEN_TAG)",
"def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))",
"def is_href_valid(self, link):\n url = str(link['href'])\n # if it doesn't lead to a wiki page\n if not url.startswith(\"/wiki/\"):\n return False\n\n wikipedia_classes = [\"external_text\", \"mw-disambig\", \"infobox-data\"]\n # if the href has a class\n if link.get(\"class\") is not None:\n link_class = \"_\".join(link.get(\"class\"))\n # if the class is an external text class, or a disambiguation link\n if any(wiki_class in link_class for wiki_class in wikipedia_classes):\n return False\n\n if 'wikimedia' in url or 'wiktionary' in url:\n return False\n wikipedia_keywords = [\"Help\", \"Category\", \"Wikipedia\", \"Template\", \"File\", \"Talk\", \"Special\", \"Portal\"]\n if any(keyword + ':' in url for keyword in wikipedia_keywords):\n return False\n if '#' in url:\n return False\n # if the page is a file\n if re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z]$\", url) or re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z]$\", url):\n return False\n\n # if the href is enclosed in brackets\n if WikiPage.is_substring_enclosed_in_brackets(link, link.parent.parent):\n return False\n\n wikipedia_not_needed_tags = ['small', 'sup', 'i']\n if link.parent.name in wikipedia_not_needed_tags:\n return False\n\n # if the href shows two different spellings. like in: https://en.wikipedia.org/wiki/Carbon_fibers\n # Carbon fibers ~or~ carbon fibres - here or is the href.\n\n if link.contents == [\"or\"]:\n return False\n\n parents_classes = [p.get(\"class\") for p in link.parents if p.get(\"class\") is not None]\n parents_classes = [str(\"_\".join(p)) for p in parents_classes]\n parents_ids = [p.get(\"id\") for p in link.parents if p.get(\"id\") is not None]\n\n # 'toc' - the Contents menu class\n # 'mw-editsection' - the Edit section\n # 'thumbcaption' - a Photo Caption\n # 'hlist' - a list like in: https://en.wikipedia.org/wiki/January\n wikipedia_classes_to_ignore = [\"thumbcaption\", \"infobox\", \"navigation-not-searchable\", \"sidebar\", \"box-text\",\n \"toc\", \"mw-editsection\", \"thumb\", \"hlist\", \"navbox\"]\n\n for p_class in parents_classes:\n\n if any(class_to_ignore in p_class for class_to_ignore in wikipedia_classes_to_ignore):\n return False\n\n # if it is a coordinates href\n if \"coordinates\" in parents_ids:\n return False\n\n '''\n Update 13.04.2021:\n ------------------\n Someone edited the \"Epistemology\" page. and changed the first link <a>branches<a/>.\n Instead of pointing to the page \"Branches of science\", it was changed to point to \"Outline of philosophy\".\n Which creates a loop. I chose to ignore it manually, and instead click on the next link.\n ( which happens to be Philosophy :) )\n This changed also caused some of the \"paths\" in the PDF files,\n generated before that date to be slightly outdated. But the concept stays the same :)\n \n Update 08.05.2021:\n ------------------\n they fixed it since :)\n \"Epistemology\" -> branches of philosophy : \"https://en.wikipedia.org/wiki/Outline_of_philosophy\" ->\n -> Philosophy.\n \n #if \"Outline_of_philosophy\" in url:\n # return False\n '''\n\n return True",
"def is_link(self):\n return self.__is_link",
"def snippet_is_link(snippet): #{{{\n global RX_LINK\n match=re.search(RX_LINK, snippet)\n return match is not None",
"def is_footnote(self):\n return self.style['float'] == 'footnote'",
"def is_Link(input_list):\n check = ''\n check = list_to_string(input_list,' ')\n if check.lower() == 'link':\n return True\n return False",
"def hasLink(self,nodeA,nodeB):\n nodeNumPrev = -1\n for node in self.n:\n nodeNum = abs(int(node.num))\n if nodeNum == abs(nodeB) and nodeNumPrev == abs(nodeA):\n return True\n nodeNumPrev = nodeNum\n return False",
"def is_link_at(self, x, y, link):\n return (x, y) in self._chips and self._chips[x, y].router.is_link(link)",
"def is_linked(self) -> bool:\n return self.parent_path.is_symlink() \\\n and (not self.child_path.is_symlink()) \\\n and self.child_path.samefile(self.parent_path)",
"def supports_link(self):\n return 'link' in self.config",
"def _is_addr_on_link(ip, link):\n addr_list = IPR.get_addr(label=link)\n for addr in addr_list:\n if ip == addr.get_attr('IFA_ADDRESS'):\n LOG.debug(\n 'Address {} found on link {}'.format(ip, link))\n return True\n LOG.debug('Address {} not found on link {}'.format(ip, link))\n return False",
"def isPostLink(self, rel, type = None): #$NON-NLS-1$\r\n return self._isInRelList(rel, ZAtomRelTypes.ATOM_POST_LINK_REL_LIST)",
"def check_link(self, link):\n false_links = [\"wikipedia:\", \"w:\", \"wikitionary:\", \"wikt:\", \"wikinews:\",\n \"n:\", \"wikibooks:\", \"b:\", \"wikiquote:\", \"q:\", \"wikisource:\",\n \"s:\", \"wikispecies:\", \"species:\", \"wikiversity\", \"v:\", \n \"wikivoyage:\", \"voy:\", \"wikimedia:\", \"foundation:\", \"wmf:\", \n \"commonds:\", \"c:\", \"chapter:\", \"metawikipedia:\", \"meta:\", \n \"m:\", \"incubator:\", \"outreach:\", \"mw:\", \"mediazilla:\", \n \"bugzilla:\", \"testwiki:\", \"wikitech:\", \"wikidata:\", \"d:\",\n \"phabricator:\", \"phab:\", \"talk:\", \"user talk:\", \"file:\", \n \"user:\", \"template:\", \"category:\", \"file talk:\", \n \"category talk:\", \"image:\", \"media:\", \"special:\", \n \"help:\", \"portal:\", \"portal talk:\", \"\\#\"]\n is_bad = any(false_link in link.lower() for false_link in false_links)\n if is_bad or link[0] == \":\":\n return False\n else:\n return True",
"def isLastAnchorLink(self, index):\n beacon_ref = self.beardistChain[index][0][2]\n # check if reference beacon is used\n if beacon_ref != self.referenceBeacon: return False\n # count number of reference beacon occurrences\n count = 0\n for link in self.beardistChain:\n beacon_from = link[0][2]\n if beacon_from == beacon_ref: count += 1\n # check count\n return True if count == 1 else False",
"def is_link(token):\n\n pattern = r'ht{2}p(s|)\\:\\/\\/(w{3}.|)[\\w]+\\.[\\w]+\\/[\\w\\d]+'\n return re.match(pattern, token)",
"def hasEntityLink(self, link):\r\n return self.feed_handler.hasEntityLink(link)",
"def is_printer_link(href, link_text):\r\n if not re.search(r'(?i)\\b(?:print|printer)\\b', link_text):\r\n return False\r\n if re.search(r'(?i)print[\\s-]*(?:edition|advertising|ads)\\b', link_text):\r\n return False\r\n if re.search(r'(?i)\\s*javascript:', href):\r\n return False\r\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks whether a paragraph element is part of a footnote. | def is_footnote(self, par):
if par.find_next_sibling('p') is None:
return False
return self.is_footnote_text(par) or self.is_footnote_link(par) | [
"def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))",
"def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))",
"def is_footnote(self):\n return self.style['float'] == 'footnote'",
"def is_footnote(self, laparams, surrounding_objects):\n first_letter = self._objs[0]\n footnote_font_size = 999\n font_size = max([l.matrix[0] for l in self._objs[:20] if isinstance(l, LTChar)])\n first_letters_is_number = re.search(r'^(\\d{1,4})', self.get_text())\n\n if first_letters_is_number:\n footnote_font_size = max([letter.matrix[0] for letter in self._objs[first_letters_is_number.start(): first_letters_is_number.end()]])\n\n font_size_in_range = font_size <= laparams.footnote_max_def_size + 0.3 and abs(footnote_font_size - laparams.footnote_font_size) < 0.3\n\n too_close_objs = [\n obj for obj in surrounding_objects\n if obj.x0 - first_letter.x1 < laparams.footnote_min_def_distance and abs(obj.y1 - first_letter.y1) < 1 and obj is not self\n ]\n\n return font_size_in_range and first_letters_is_number and len(too_close_objs) == 0",
"def _is_single_paragraph(node: nodes.field_body) -> bool:\n if len(node) == 0:\n return False\n elif len(node) > 1:\n for subnode in node[1:]: # type: Node\n if not isinstance(subnode, nodes.system_message):\n return False\n if isinstance(node[0], nodes.paragraph):\n return True\n return False",
"def is_paragraph(self, line, column):\n no_paragraph_x0 = column.x0 + self.citing_space(column)\n return self._parameters.is_paragraph(self.meta, line, no_paragraph_x0) and\\\n not self.is_line(line, column)",
"def check_marked_paragraph(paragraph, number):\n\n\tq = 0 # ััะตััะธะบ ะฝะฐะนะดะตะฝะฝัั
ะผะฐัะบะตัะพะฒ\n\tchars = '<> ' # ะฒะพะทะผะพะถะฝัะต ัะธะผะฒะพะปั ะฒ ะบะฐัะตัะบะต\n\n\tfor i in range(len(paragraph.runs)):\n\t\tif \"<>\" in paragraph.runs[i].text: # ะตัะปะธ ะฒ ัะตะบััะต ะบะฐัะตัะบะธ ะฒัััะตัะฐะตััั ะผะฐัะบะตั\n\t\t\tfor c in paragraph.runs[i].text: # ะฟัะพะฒะตััะตะผ ะบะฐะถะดัะน ัะธะผะฒะพะป ะฒ ะบะฐัะตัะบะต\n\t\t\t\tif c not in chars: # ะตัะปะธ ะพะฝ ะฝะต ะฒั
ะพะดะธั ะฒ ัะฟะธัะพะบ ัะฐะทัะตัะตะฝะฝัั
ัะธะผะฒะพะปะพะฒ\n\t\t\t\t\treturn False\n\t\t\tq += 1 # ะตัะปะธ ะฟัะพะฒะตัะบะฐ ะฟัะพะนะดะตะฝะฐ, ัะฒะตะปะธัะธะฒะฐะตะผ ััะตััะธะบ\n\t\telif \"<\" in paragraph.runs[i].text and \">\" in paragraph.runs[i+1].text: # ะตัะปะธ ะผะฐัะบะตั ัะฐะทะดะตะปะตะฝ ะฝะฐ ะดะฒะต ัะพัะตะดะฝะธะต ะบะฐัะตัะบะธ\n\t\t\tfor c in paragraph.runs[i].text: # ะฟัะพะฒะตััะตะผ ะบะฐะถะดัั ะธะท ะบะฐัะตัะพะบ\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tfor c in paragraph.runs[i+1].text:\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tq += 1\n\n\tif q != number: # ะตัะปะธ ะบะพะปะธัะตััะฒะพ ะผะฐัะบะตัะพะฒ ะฝะต ัะพะฒะฟะฐะปะพ ั ัะบะฐะทะฐะฝะฝัะผ ะฒ ะฒัะฒะพะดะต\n\t\treturn False\n\telse:\n\t\treturn True",
"def should_be_compact_paragraph(self, node):\n\n if isinstance(node.parent, nodes.container):\n if 'non-paragraph' not in node.parent.attributes['classes']:\n return False\n\n # noinspection PyUnresolvedReferences\n return super().should_be_compact_paragraph(node)",
"def _has_definition(self):\n footerReference = self._sectPr.get_footerReference(self._hdrftr_index)\n return False if footerReference is None else True",
"def check_paragraph(self, para, links_para):\n #Return False if no paragraphs found\n if para is None:\n return False\n\n links = para.find_all('a')\n #Return False if no links found\n if links is None:\n return False\n\n #Return True if one link is valid in the paragraph\n for link in links:\n if self.check_link(link, links_para):\n return True\n return False",
"def check_paragraph(line):\n if len(line) > 3 and line[:3] == 'โ
โ
โ
':\n return '<p>' + line[3:] + '</p>'\n else:\n return line",
"def footnotes(self):\n # TODO: Elements (e.g. Tables) can contain nested Footnotes\n return [el for el in self.elements if isinstance(el, Footnote)]",
"def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)",
"def paragraph_mentions(text: str, keyword: str) -> bool:\n soup = BeautifulSoup(text, \"html5lib\")\n paragraphs = [p.get_text() for p in soup('p')]\n\n return any(keyword.lower() in paragraph.lower()\n for paragraph in paragraphs)",
"def paragraph_mentions(text: str, keyword: str) -> bool:\n \n soup = BeautifulSoup(text, \"html5lib\")\n paragraphs = [p.get_text() for p in soup(\"p\")]\n \n return any(keyword.lower() in paragraph.lower() for paragraph in paragraphs)",
"def has_PTM(self)->bool:\r\n\t\tif '(' in self._peptide:\r\n\t\t\treturn True\r\n\t\treturn False",
"def is_plugin_note(self, note):\n return bool(self.regex.match(note))",
"def is_valid_paragraphs(args, skip=False):\n if is_valid_file_and_directory(args) or skip:\n if args.paragraphs is not None:\n return True\n return False",
"def eligible_for_paragraph_to_block_substitution(node: tinydocutils.nodes.Node) -> bool:\n return (\n isinstance(node, tinydocutils.nodes.paragraph)\n and len(node.children) == 1\n and isinstance(node.children[0], tinydocutils.nodes.substitution_reference)\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Walk over pararaphs in the main text. If a footnote link is found, jump to that paragraph, then back to the main text. | def linked_text_paragraphs(self):
for par in self._main_paragraphs_raw():
par_links = par.find_all('a')
if len(par_links) == 0:
self.main_count += len(par.text)
yield par.text
else:
for el in par.contents:
if el.name is None:
#this is plain text
self.main_count += len(str(el))
yield str(el)
elif el.name == "a" and "href" in el.attrs:
id = el["href"].lstrip('#')
try:
foot_par = self._get_footnote_par(id)
except NoFootnoteError:
self.log(f"Could not find footnote for {id}, skipping.")
self.footnote_count += len(foot_par.text)
yield foot_par.text | [
"def is_footnote_link(self, par):\n return self.is_footnote_text(par.find_next_sibling('p'))",
"def test_forward_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Serviceโs StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and propertyโ before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Serviceโs StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and propertyโ before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"15.0\", \"15.0\"),\n command_name=\"forward-paragraph\",\n )",
"def add_paragraph_marks(text, keep_line_endings=True, maxlength=72):\n\n # add # after line that ends with full stop, question and exclamation marks:\n ptrn = r\"([.ุ!] *[\\r\\n]+(?:PageV\\w{2}P\\d+[abAB]?[\\r\\n]+)?)([^\\r\\n#P\\Z])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n\n # add # after section titles (but not before page numbers and sub-titles)\n ptrn = r\"(### .+[\\r\\n]+(?:PageV\\w{2}P\\d+[\\r\\n]+)?)([^\\r\\n#P\\Z])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n\n if keep_line_endings:\n # add the tildas for continued lines:\n new_text = \"\"\n for line in re.split(r\"([\\r\\n]+)\", text):\n if not line.startswith((\"P\", \"#\", \"~~\")) \\\n and not re.match(r\"[\\r\\n]+\", line):\n line = \"~~\"+line\n new_text += line\n else:\n # move page number to the previous line:\n ptrn = r\"([^ \\r\\n.ุ!]) *[\\r\\n]+(PageV[^P]+P[\\w]+) *[\\r\\n]+\"\n text = re.sub(ptrn, r\"\\1 \\2 \", text)\n # Add paragraph signs before every new line:\n ptrn = r\"([\\r\\n]+)([^\\r\\n#P\\s])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n # break long lines into shorter lines:\n new_text = wrap(text, maxlength)\n\n new_text = re.sub(\"~~#\", \"#\", new_text)\n new_text = re.sub(r\"~~([^\\n]+%~%)\", r\"# \\1\", new_text)\n new_text = re.sub(r\"~~\\.\\./\", \"../\", new_text)\n\n return new_text",
"def test_back_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Serviceโs StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and propertyโ before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Serviceโs StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and propertyโ before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"6.7\", \"6.7\"),\n command_name=\"back-paragraph\",\n )",
"def textparse(self,\r\n analysetext,\r\n depth=0,\r\n keys=None,\r\n re_entering=False,\r\n newindex=Index(1)):\r\n if keys is None:\r\n keys = set()\r\n if LEFTNOTE not in analysetext \\\r\n or extract.embedded_extract(analysetext)[2] == 0:\r\n return\r\n #test if it contains embedded text\r\n\r\n## ee = extract.embedded_extract(RIGHTNOTE.join(LEFTNOTE.\r\n##join(analysetext.split(LEFTNOTE)[1:]).split(RIGHTNOTE)[:-1]),eliminate = True)\r\n\r\n ee_temp = extract.embedded_extract(analysetext)\r\n embeddedlist = ee_temp[0]\r\n\r\n if depth-1 in self.pass_key_dict:\r\n\r\n self.pass_key_dict[depth] = self.pass_key_dict[depth-1]\r\n else:\r\n self.pass_key_dict[depth] = [[list(keys)], []]\r\n\r\n emb_len = str(len(embeddedlist))\r\n\r\n for a_temp, phrase in enumerate(embeddedlist):\r\n if a_temp<10 or (a_temp>9 and a_temp<100\r\n and a_temp%10 == 0) or (a_temp>99\r\n and a_temp%100==0):\r\n #display counter for embedded notes\r\n print()\r\n print(str(a_temp)+'/'+emb_len)\r\n\r\n\r\n\r\n\r\n\r\n\r\n if extract.embedded_extract(phrase)[2] > 1:\r\n\r\n\r\n if phrase[0] == LEFTNOTE and phrase[-1] == RIGHTNOTE:\r\n newindex = self.textinterpret(\r\n extract.embedded_extract(\r\n RIGHTNOTE.join(LEFTNOTE.join(phrase.split(LEFTNOTE)[1:])\r\n .split(RIGHTNOTE)[:-1]),\r\n eliminate=True)[1],\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n else:\r\n newindex = self.textinterpret(\r\n extract.embedded_extract(\r\n phrase,\r\n eliminate=True)[1],\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n newindex = self.textparse(phrase[1:-1],\r\n depth+1,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n\r\n\r\n else:\r\n\r\n newindex = self.textinterpret(phrase,\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n print()\r\n return newindex",
"def fix_footnotes(case_el, warnings):\n case_pq = PyQuery(case_el)\n # fix footnotes\n # footnotes look like this (since <small> is already stripped)\n # <p>--------</p>\n # <p>Notes:</p>\n # <p>\n # <sup>\n # <a href=\"#fn1\" name=\"fr1\">1</a>\n # </sup> text text text </p>\n # notes label can look like `<strong><br/> --------</strong>` -- NE2d/990/990ne2d139_12.xml\n notes_el = case_pq('p:contains(\"Notes:\")').filter(lambda i, el: strip_tags(PyQuery(el).text()).strip() == 'Notes:')\n refs = {}\n notes_section = None\n if notes_el:\n notes_section = notes_el.closest('article, section')\n footnote_index = 0\n opinion_index = 1\n footnote_el = None\n\n # before and after footnote sections there is a paragraph of either 8 or 15 hyphens\n footnote_breaks = ['-' * 8, '-' * 15]\n\n # remove footnote break before footnote section\n # can have tags in the footnote break -- A3d/50/50a3d607_29.xml\n prev_notes_el = notes_el.prev()\n if strip_tags(prev_notes_el.text()).strip() not in footnote_breaks:\n warnings.append(\"Unexpected element before notes el.\")\n else:\n prev_notes_el.remove()\n\n # remove \"Notes:\"\n old_footnote_el = notes_el.next()\n notes_el.remove()\n\n # step through each footnote element\n while old_footnote_el:\n # sometimes <a> tag gets out of <p> tag -- SE2d/590/590SE2d53.xml\n # put it inside a new <p>\n if old_footnote_el[0].tag == 'a':\n old_footnote_el = wrap_with(old_footnote_el, PyQuery(etree.Element('p')))\n\n link_el = old_footnote_el('a').eq(0)\n if not link_el:\n # this could be the end of footnotes, in which case stop\n if strip_tags(old_footnote_el.text()).strip() in footnote_breaks:\n old_footnote_el.remove()\n break\n # or could be a second paragraph of the previous footnote, in which case append\n if footnote_el:\n footnote_el.append(old_footnote_el)\n old_footnote_el = footnote_el.next()\n continue\n else:\n # if there's a non-footnote before the first footnote, we don't know what's going on,\n # so quit processing\n warnings.append(\"Unexpected non-footnote element.\")\n break\n label = link_el.text()\n footnote_index += 1\n footnote_id = f'footnote_{opinion_index}_{footnote_index}'\n footnote_el = PyQuery(renderer.make_footnote_el(id=footnote_id, label=label))\n refs[link_el.attr('href').lstrip('#')] = [footnote_id, footnote_el]\n while link_el.parent()[0].tag == 'sup':\n link_el = link_el.parent()\n link_el.remove()\n\n # remove space at beginning of footnote left over from removing footnote number\n if old_footnote_el[0].text:\n old_footnote_el[0].text = old_footnote_el[0].text.lstrip()\n\n wrap_with(old_footnote_el, footnote_el)\n old_footnote_el = footnote_el.next()\n\n # fix footnote references (<small> is already stripped)\n # ...<sup><a href=\"#fr1\" name=\"fn1\">1</a></sup>... typical\n # ...<sup id=\"co_fnRef_B00012045229866_ID0E4F\">1</sup> BR/590/590 B.R. 577.xml\n # ...<a href=\"#1\" name=\"fn1\" id=\"fn1\">1</a>... NW2d/781/781NW2d5512010WIApp33_29.xml\n for section in case_pq('.head-matter, .opinion').items():\n for old_ref_pq in section('a, sup[id]').items():\n label = old_ref_pq.text()\n if old_ref_pq[0].tag == 'a':\n ref_name = old_ref_pq.attr('name')\n if not (ref_name and ref_name.startswith('fn')):\n continue\n else:\n ref_name = \"fn\" + label\n ref, footnote_el = refs.get(ref_name, ['orphan', None])\n if footnote_el:\n # move footnotes from end of document to correct section -- see NW2d/906/906 N.W.2d 436_Replace.xml\n if section != notes_section:\n section.append(footnote_el)\n else:\n warnings.append(f\"Unmatched ref {repr(str(old_ref_pq))}\")\n ref_el = etree.Element('a', {'class': 'footnotemark', 'href': '#' + ref, 'id': 'ref_' + ref})\n ref_el.text = label\n while old_ref_pq.parent()[0].tag == 'sup':\n old_ref_pq = old_ref_pq.parent()\n PyQuery(ref_el).insert_before(old_ref_pq)\n old_ref_pq.remove()",
"def is_footnote(self, par):\n if par.find_next_sibling('p') is None:\n return False\n return self.is_footnote_text(par) or self.is_footnote_link(par)",
"def _has_page_jump(text):\n # Determines matches with format strings.\n for format_tuple in _FORMAT_STRINGS:\n jump = _get_jump_with_pattern(text, format_tuple)\n if jump:\n return jump\n\n # Recognizes common OCR for \"From page 1\".\n match = _match_pattern(text, r\"(^Frompagel$){e<=3}\")\n if match and text[-1] == 'l':\n return -1",
"def is_footnote_text(self, par):\n return (par is not None) and (\"foot\" in par.attrs.get(\"class\", []))",
"def test_extend_to_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Serviceโs StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and propertyโ before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Serviceโs StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and propertyโ before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"8.0\", \"13.33\"),\n command_name=\"extend-to-paragraph\",\n )",
"def process_paragraph(self):\n emit(r\"\\par \")\n self.process_children()\n emit(\"\\n\\n\")",
"def _create_notes_paragraph(notes):\n para = nodes.paragraph()\n para.append(nodes.strong(text=\"Notes: \"))\n # links could start with http:// or https://\n link_idxs = [m.start() for m in re.finditer('https?://', notes)]\n start_idx = 0\n for link_idx in link_idxs:\n # assume the notes start with text (could be empty)\n para.append(nodes.inline(text=notes[start_idx:link_idx]))\n # create a URL node until the next text or the end of the notes\n link_end_idx = notes.find(\" \", link_idx)\n if link_end_idx == -1:\n # In case the notes end with a link without a blank\n link_end_idx = len(notes)\n uri = notes[link_idx:link_end_idx + 1]\n para.append(nodes.reference(\"\", uri, refuri=uri))\n start_idx = link_end_idx + 1\n\n # get all text after the last link (could be empty) or all of the\n # text if no link was given\n para.append(nodes.inline(text=notes[start_idx:]))\n return para",
"def para_it_belongs(elem_op_to_look_for):\n for para_i, paragraph in enumerate(self.paragraphs):\n if (not paragraph.new_line) \\\n and paragraph.abs_position \\\n <= elem_op_to_look_for.abs_position \\\n <= paragraph.abs_position + paragraph.get_length():\n return para_i\n return -1",
"def moveDownOneParagraph(self):\r\n self.SendScintilla(QsciScintilla.SCI_PARADOWN)",
"def testParagraphs(self):\n\n textractor = Textractor(paragraphs=True)\n\n # Extract text as sentences\n paragraphs = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of paragraphs is as expected\n self.assertEqual(len(paragraphs), 13)",
"def __split_paragraphs(self):\n split_index = -1\n for index in range(len(self.parts)):\n if type(self.parts[index]) == VParagraph and self.parts[index].list_type is None:\n if self.parts[index].find_new_lines() != -1:\n split_index = index\n break\n if split_index != -1:\n index = self.parts[split_index].find_new_lines()\n if len(self.parts[split_index].parts) == index - 1:\n return\n new_texts = self.parts[split_index][index].text.split(\"\\n\")\n first_paragraph = VParagraph()\n first_paragraph.parts = self.parts[split_index][:index]\n second_paragraph = VParagraph()\n second_paragraph.parts = self.parts[split_index][index:]\n first_paragraph.parts.extend([VText(x) for x in new_texts[:-1]])\n second_paragraph.parts[0] = VText(new_texts[-1])\n parts = self.parts[:split_index]\n parts.append(first_paragraph)\n parts.append(second_paragraph)\n if len(self.parts) > split_index + 1:\n parts.extend(self.parts[split_index+1:])\n self.parts = parts\n self.__split_paragraphs()",
"def print_poem(self):\n for index, verse in enumerate(self.verses):\n for line in verse:\n print(line)\n if index != len(self.verses) - 1:\n print('')",
"def test_backward_kill_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Serviceโs StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and propertyโ before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Serviceโs StormReady program to help them guard against the ravages of Mother\n Nature.\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and propertyโ before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"backward-kill-paragraph\",\n )",
"def _determineParagraphStartPosition(splittedText, currentText):\r\n \r\n begin = 0\r\n for paragraph in splittedText:\r\n if paragraph != currentText:\r\n begin += len(paragraph) + 1\r\n else:\r\n break\r\n return begin"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Material saver. Saves material and their properties the JSON file for type building elements. If the Project parent is set, it automatically saves it to the file given in Project.data. Alternatively you can specify a path to a file with Materials. If this file does not exist, a new file is created. | def save_material(material, data_class):
data_class.material_bind["version"] = "0.7"
add_to_json = True
warning_text = ("Material with same name and same properties already "
"exists in JSON, consider this material or revising your "
"properties")
for id, check in data_class.material_bind.items():
if id != "version":
if check["name"] == material.name and \
check["density"] == material.density and \
check["thermal_conduc"] == material.thermal_conduc and \
check["heat_capac"] == material.heat_capac and \
check[
"thickness_default"] == material.thickness_default and \
check["thickness_list"] == material.thickness_list:
warnings.warn(warning_text)
print(material.name)
add_to_json = False
break
if add_to_json is True:
data_class.material_bind[
material.material_id] = collections.OrderedDict()
data_class.material_bind[
material.material_id]["name"] = material.name
data_class.material_bind[
material.material_id]["density"] = material.density
data_class.material_bind[
material.material_id]["thermal_conduc"] = material.thermal_conduc
data_class.material_bind[
material.material_id]["heat_capac"] = material.heat_capac
data_class.material_bind[
material.material_id][
"thickness_default"] = material.thickness_default
data_class.material_bind[
material.material_id]["thickness_list"] = material.thickness_list
data_class.material_bind[
material.material_id]["solar_absorp"] = material.solar_absorp
with open(utilities.get_full_path(data_class.path_mat), 'w') as file:
file.write(json.dumps(
data_class.material_bind,
indent=4,
separators=(',', ': '))) | [
"def save_materials_properties(read_dir, saving_name='Materials_info'):\n info_df = get_materials_properties(read_dir)\n save_this(\n info_df,\n read_dir,\n saving_name,\n authorized=True,\n save_type='csv',\n save_index=False)\n # print('**SAVED**')\n # print(f'path: {read_dir}/Nesr/{saving_name}.CSV')",
"def save_material(filename, mat):\n out = np.array([mat.wav, mat.eps.real, mat.eps.imag,\n mat.mu.real, mat.mu.imag]).T\n header = \"Wavelength\\teps_real\\teps_imag\\tmu_real\\tmu_imag\"\n miepy.array_io.save(filename, out, header=header)",
"def WriteStructuralMaterialsjson(save_path,dic_in_json_format):\n complete_name=os.path.join(save_path,\"StructuralMaterials.json\") \n with open(complete_name, \"w\") as save_file:\n save_file.write(dic_in_json_format)\n if(DEBUG):\n print(\"StructuralMaterials.json written\")",
"def save_and_reload_scene():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.save_and_reload_scene\")\n\n current_file = mc.file(save=True)\n flg.info(\"Current File: {}\".format(current_file))\n mc.file(current_file, ignoreVersion=True, open=True, force=True)",
"def _append_material(mat, tree, buffer_items, mat_hashes):\n # materials are hashable\n hashed = hash(mat)\n # check stored material indexes to see if material\n # has already been added\n if mat_hashes is not None and hashed in mat_hashes:\n return mat_hashes[hashed]\n\n # convert passed input to PBR if necessary\n if hasattr(mat, 'to_pbr'):\n as_pbr = mat.to_pbr()\n else:\n as_pbr = mat\n\n # a default PBR metallic material\n result = {\"pbrMetallicRoughness\": {}}\n try:\n # try to convert base color to (4,) float color\n result['baseColorFactor'] = visual.color.to_float(\n as_pbr.baseColorFactor).reshape(4).tolist()\n except BaseException:\n pass\n\n try:\n result['emissiveFactor'] = as_pbr.emissiveFactor.reshape(3).tolist()\n except BaseException:\n pass\n\n # if name is defined, export\n if isinstance(as_pbr.name, str):\n result['name'] = as_pbr.name\n\n # if alphaMode is defined, export\n if isinstance(as_pbr.alphaMode, str):\n result['alphaMode'] = as_pbr.alphaMode\n\n # if alphaCutoff is defined, export\n if isinstance(as_pbr.alphaCutoff, float):\n result['alphaCutoff'] = as_pbr.alphaCutoff\n\n # if doubleSided is defined, export\n if isinstance(as_pbr.doubleSided, bool):\n result['doubleSided'] = as_pbr.doubleSided\n\n # if scalars are defined correctly export\n if isinstance(as_pbr.metallicFactor, float):\n result['metallicFactor'] = as_pbr.metallicFactor\n if isinstance(as_pbr.roughnessFactor, float):\n result['roughnessFactor'] = as_pbr.roughnessFactor\n\n # which keys of the PBRMaterial are images\n image_mapping = {\n 'baseColorTexture': as_pbr.baseColorTexture,\n 'emissiveTexture': as_pbr.emissiveTexture,\n 'normalTexture': as_pbr.normalTexture,\n 'occlusionTexture': as_pbr.occlusionTexture,\n 'metallicRoughnessTexture': as_pbr.metallicRoughnessTexture}\n\n for key, img in image_mapping.items():\n if img is None:\n continue\n # try adding the base image to the export object\n index = _append_image(\n img=img,\n tree=tree,\n buffer_items=buffer_items)\n # if the image was added successfully it will return index\n # if it failed for any reason, it will return None\n if index is not None:\n # add a reference to the base color texture\n result[key] = {'index': len(tree['textures'])}\n # add an object for the texture\n tree['textures'].append({'source': index})\n\n # for our PBRMaterial object we flatten all keys\n # however GLTF would like some of them under the\n # \"pbrMetallicRoughness\" key\n pbr_subset = ['baseColorTexture',\n 'baseColorFactor',\n 'roughnessFactor',\n 'metallicFactor',\n 'metallicRoughnessTexture']\n # move keys down a level\n for key in pbr_subset:\n if key in result:\n result[\"pbrMetallicRoughness\"][key] = result.pop(key)\n\n # if we didn't have any PBR keys remove the empty key\n if len(result['pbrMetallicRoughness']) == 0:\n result.pop('pbrMetallicRoughness')\n\n # which index are we inserting material at\n index = len(tree['materials'])\n # add the material to the data structure\n tree['materials'].append(result)\n # add the material index in-place\n mat_hashes[hashed] = index\n\n return index",
"def export_material(self, bo, bm):\n\n # Sometimes, a material might need to be single-use. Right now, the most apparent example\n # of that situation is when a lightmap image is baked. Wavesets are in the same boat, but\n # that's a special case as of the writing of this code.\n single_user = self._requires_single_user_material(bo, bm)\n if single_user:\n mat_name = \"{}_AutoSingle\".format(bm.name) if bo.name == bm.name else \"{}_{}\".format(bo.name, bm.name)\n self._report.msg(\"Exporting Material '{}' as single user '{}'\", bm.name, mat_name, indent=1)\n hgmat = None\n else:\n mat_name = bm.name\n self._report.msg(\"Exporting Material '{}'\", mat_name, indent=1)\n hsgmat = self._mgr.find_key(hsGMaterial, name=mat_name, bl=bo)\n if hsgmat is not None:\n return hsgmat\n\n hsgmat = self._mgr.add_object(hsGMaterial, name=mat_name, bl=bo)\n slots = [(idx, slot) for idx, slot in enumerate(bm.texture_slots) if self._can_export_texslot(slot)]\n\n # There is a major difference in how Blender and Plasma handle stencils.\n # In Blender, the stencil is on top and applies to every layer below is. In Plasma, the stencil\n # is below the SINGLE layer it affects. The main texture is marked BindNext and RestartPassHere.\n # The pipeline indicates that we can render 8 layers simultaneously, so we will collect all\n # stencils and apply this arrangement. We're going to limit to 6 stencils however. 1 layer for\n # main texture and 1 piggyback.\n num_stencils = sum((1 for i in slots if i[1].use_stencil))\n if num_stencils > _MAX_STENCILS:\n raise ExportError(\"Material '{}' uses too many stencils. The maximum is {}\".format(bm.name, _MAX_STENCILS))\n stencils = []\n restart_pass_next = False\n\n # Loop over layers\n for idx, slot in slots:\n # Prepend any BumpMapping magic layers\n if slot.use_map_normal:\n if bo in self._bump_mats:\n raise ExportError(\"Material '{}' has more than one bumpmap layer\".format(bm.name))\n du, dw, dv = self.export_bumpmap_slot(bo, bm, hsgmat, slot, idx)\n hsgmat.addLayer(du.key) # Du\n hsgmat.addLayer(dw.key) # Dw\n hsgmat.addLayer(dv.key) # Dv\n\n if slot.use_stencil:\n stencils.append((idx, slot))\n else:\n tex_layer = self.export_texture_slot(bo, bm, hsgmat, slot, idx)\n if restart_pass_next:\n tex_layer.state.miscFlags |= hsGMatState.kMiscRestartPassHere\n restart_pass_next = False\n hsgmat.addLayer(tex_layer.key)\n if slot.use_map_normal:\n self._bump_mats[bo] = (tex_layer.UVWSrc, tex_layer.transform)\n # After a bumpmap layer(s), the next layer *must* be in a\n # new pass, otherwise it gets added in non-intuitive ways\n restart_pass_next = True\n if stencils:\n tex_state = tex_layer.state\n if not tex_state.blendFlags & hsGMatState.kBlendMask:\n tex_state.blendFlags |= hsGMatState.kBlendAlpha\n tex_state.miscFlags |= hsGMatState.kMiscRestartPassHere | hsGMatState.kMiscBindNext\n curr_stencils = len(stencils)\n for i in range(curr_stencils):\n stencil_idx, stencil = stencils[i]\n stencil_name = \"STENCILGEN_{}@{}_{}\".format(stencil.name, bm.name, slot.name)\n stencil_layer = self.export_texture_slot(bo, bm, hsgmat, stencil, stencil_idx, name=stencil_name)\n if i+1 < curr_stencils:\n stencil_layer.state.miscFlags |= hsGMatState.kMiscBindNext\n hsgmat.addLayer(stencil_layer.key)\n\n # Plasma makes several assumptions that every hsGMaterial has at least one layer. If this\n # material had no Textures, we will need to initialize a default layer\n if not hsgmat.layers:\n layer = self._mgr.find_create_object(plLayer, name=\"{}_AutoLayer\".format(bm.name), bl=bo)\n self._propagate_material_settings(bm, layer)\n hsgmat.addLayer(layer.key)\n\n # Cache this material for later\n mat_list = self._obj2mat.setdefault(bo, [])\n mat_list.append(hsgmat.key)\n\n # Looks like we're done...\n return hsgmat.key",
"def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)",
"def saveCallback(self):\n\n ## TODO // TEST IT\n self._pathsDict[\"sceneFile\"] = self.getSceneFile()\n try:\n openSceneInfo = self.getOpenSceneInfo()\n if not openSceneInfo:\n return\n except TypeError:\n return\n if openSceneInfo[\"jsonFile\"]:\n jsonInfo = self._loadJson(openSceneInfo[\"jsonFile\"])\n if jsonInfo[\"ReferenceFile\"]:\n absRefFile = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"ReferenceFile\"])\n # TODO : ref => Dict\n absBaseSceneVersion = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"Versions\"][int(jsonInfo[\"ReferencedVersion\"]) - 1][\"RelativePath\"])\n # if the refererenced scene file is the saved file (saved or saved as)\n if self._pathsDict[\"sceneFile\"] == absBaseSceneVersion:\n # copy over the forReference file\n try:\n shutil.copyfile(self._pathsDict[\"sceneFile\"], absRefFile)\n print \"Scene Manager Update:\\nReference File Updated\"\n except:\n pass",
"def saveButtonAction(self):\r\n savePath = cmds.fileDialog2(ds=2, fm=1, ff='MAF Files (*.animMAF)')[0]\r\n newMasterDict = {}\r\n topNode = cmds.ls(sl=1)[0]\r\n topNodeShort = topNode.split(\":\")[-1]\r\n\r\n newMasterDict[topNodeShort] = self.loadedData\r\n newMasterDict['_init'] = self.loadedInit\r\n\r\n self.saveNewMAF(savePath, newMasterDict)",
"def save(self, filename=None):\n if filename is None:\n filename = \"morse_smale_complex.json\"\n with open(filename, \"w\") as fp:\n fp.write(self.to_json())",
"def put_custom_material(self, key, material):\n self._load()\n res = None\n\n try:\n if dict_get(material, ['laser_type']) == \"MrBeamII-1.0\":\n material[\"laser_model\"] = '0'\n del material[\"laser_type\"]\n if \"model\" in material:\n material[\"device_model\"] = material.pop(\"model\")\n if \"compatible\" in material:\n material.pop(\"compatible\")\n if \"customBeforeElementContent\" in material:\n material.pop(\"customBeforeElementContent\")\n\n self.custom_materials[key.strip()] = material\n res = True\n except:\n self._logger.exception(\n \"Exception while putting materials: key: %s, data: %s\", key, material\n )\n res = False\n if res:\n res = self._save()\n return res",
"def test_save_materials(temp_dir):\n image1 = [[[0, 0, 0], [0, 0, 0]], [[255, 255, 255], [255, 255, 255]]]\n image2 = [[[0, 0, 0], [255, 255, 255]], [[255, 255, 255], [0, 0, 0]]]\n image3 = [[[255, 255, 255], [255, 255, 255]], [[0, 0, 0], [0, 0, 0]]]\n\n data = [\n (\"image1.png\", Image.fromarray(np.array(image1, dtype=np.uint8))),\n (\"image2.png\", Image.fromarray(np.array(image2, dtype=np.uint8))),\n (\"image3.png\", Image.fromarray(np.array(image3, dtype=np.uint8))),\n ]\n save_materials(temp_dir, data, step=1)\n\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image1.png\"))\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image2.png\"))\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image3.png\"))",
"def write(self, path):\n file_loader = infer_format(path)\n pref_path = f\"designer::{file_loader.name.lower()}\"\n pref.set_default(pref_path, {})\n with open(path, 'w') as dump:\n # generate an upto-date tree first\n self.generate()\n dump.write(file_loader(node=self.root).generate(**pref.get(pref_path)))",
"def add_default_material(self):\r\n self.material = class_from_string(\r\n BaseFramework._configuration._default_material\r\n )()",
"def save_meta(self):\n # jOut = os.path.join(self.meta[\"wdir\"], meta_file)\n with open(self.meta_filepath, \"w\") as f:\n json.dump(self.meta, f)",
"def write_savefile(state: PhysicsState, file: Path):\n if file.suffix.lower() != '.json':\n # Ensure a .json suffix.\n file = file.parent / (file.name + '.json')\n log.info(f'Saving to savefile {file.resolve()}')\n\n savefile_json_dict = google.protobuf.json_format.MessageToDict(\n state.as_proto(),\n including_default_value_fields=False,\n preserving_proto_field_name=True,\n use_integers_for_enums=False,\n )\n\n for i, component in enumerate(savefile_json_dict['engineering']['components']):\n component['name'] = strings.COMPONENT_NAMES[i]\n\n with open(file, 'w') as outfile:\n json.dump(savefile_json_dict, outfile, indent=2)\n\n return file",
"def save(self):\n json.dump(\n {\n \"root\": self.root.sha(),\n \"experiment_arguments\": self.experiment_arguments,\n },\n open(self.directory / \"experiment.json\", \"w\"),\n indent=4,\n )",
"def writeMaterial(mxPointer, mat, mtl_dict):\n\n if not(mat) or not(mxPointer) or (mtl_dict is None):\n return None\n\n if scName(mat):\n self.tobj.pprint (\"**** A MATERIAL NAME HAS BEEN REQUESTED THAT CANNOT BE HANDLED BY THE EXPORTER AND WILL BE IGNORED\")\n return None\n \n mName=mat.name\n self.tobj.pprint (\"Material requested: \", mName)\n if mName[-4:].lower()!=\".mxm\":\n self.tobj.pprint (\"! not a maxwell material, skipping\")\n return None\n\n if mName in mtl_dict:\n self.tobj.pprint (\"already exported - giving reference\")\n return mtl_dict[mName]\n\n mPath = mat.get(\"mxm-path\", None)\n tmpDict ={}\n if mPath: #ok, search the material into the file system\n mPath = bpy.path.abspath(mPath)\n self.tobj.pprint (\"specific search in: \", mPath)\n tmpDict = self.scan4Mat(mPath)\n\n mFullName = None\n if mName in tmpDict:\n mFullName = tmpDict[mName]\n elif mName in self.MXMCache:\n mFullName = self.MXMCache[mName]\n\n if mFullName:\n self.tobj.pprint (\"not yet exported - creating\")\n tmpMat = Mx.readMaterial(mFullName)\n MxMat = Mx.addMaterial(tmpMat)\n mtl_dict[mName] = MxMat\n return MxMat\n\n self.tobj.pprint (\"! no reference found in search directories\")\n return None",
"def save(self):\n\t\tself._merge_light_dict_temp()\n\t\tif self.changed:\n\t\t\tif not os.path.isdir('./resources/lightDict'):\n\t\t\t\tos.makedirs('./resources/lightDict')\n\t\t\theader = json.dumps(\n\t\t\t\t{\n\t\t\t\t\t'forge_index': self._forge_to_index\n\t\t\t\t}\n\t\t\t).encode()\n\t\t\twith open(f'./resources/lightDict/{self.pyUbiForge.game_functions.game_identifier}.ld', 'wb') as f:\n\t\t\t\tnumpy.uint32(len(header)).tofile(f)\n\t\t\t\tf.write(header)\n\t\t\t\t_, index = numpy.unique(self._light_dictionary_numpy[:, :2], axis=0, return_index=True)\n\t\t\t\tself._light_dictionary_numpy[index, :].tofile(f)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new Settings, reading from a default location for the given domain (~/Library/Preferences/%s.plist). | def __init__(self, domain='com.markfickett.gors'):
settingsDir = os.path.expanduser(self.__SETTINGS_DIR)
if not os.path.isdir(settingsDir):
os.makedirs(settingsDir)
self.__settingsFileName = os.path.join(settingsDir,
domain + '.plist')
if os.path.isfile(self.__settingsFileName):
self.__settings = plistlib.readPlist(
self.__settingsFileName)
else:
self.clear()
self.__currentGroupNames = [] | [
"def create(domain: str) -> None:\n explain_step(f\"Creating default configuration file for website {domain}\")\n run_command(\n f\"webserver genconf -q {domain} | sudo tee /etc/webserver/conf.d/{domain}.conf > /dev/null\"\n )",
"def load_settings(runcard: dict) -> Settings:\n return Settings(**runcard[\"settings\"])",
"def make_pref_file():\r\n pref_dict = {\"default_user\": None}\r\n\r\n with open(os.path.join(os.path.dirname(__file__), \"preferences.json\"), \"w\") as pref:\r\n pref.write(json.dumps(pref_dict, indent=4))\r\n\r\n return pref_dict",
"def create_default_settings():\n from flaskbb.fixtures.settings import fixture\n create_settings_from_fixture(fixture)",
"def create_domain(DomainName=None):\n pass",
"def load_settings(self):\n\n self.domains = []\n self.clear_settings()\n api_keys = self.api_key_instance.get_api_keys()\n if api_keys:\n for domain, api_key in list(api_keys.items()):\n self.domains.append(domain)\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains))).setText(\n domain\n )\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains))).setText(\n api_key\n )\n\n # Hide un-populated domain rows\n for entry in range(len(self.domains) + 1, 11):\n getattr(self.dlg, \"uTextDomain{0}\".format(entry)).hide()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(entry)).hide()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(entry)).hide()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(entry)).hide()",
"def copy_default_settings(filepath):\n dirname = os.path.dirname(filepath)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n with open(filepath, 'w') as fp:\n key = base64.b64encode(os.urandom(KEY_LENGTH))\n\n output = CONFIG_TEMPLATE % dict(default_key=key)\n fp.write(output)",
"def get_domain_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/domain\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def populate_domain_data(self, domain):\n self.domain_resolve(domain)\n domain_data = server.get_domain_data(domain)['data']['userdata']\n\n self.domain_data[domain] = self.domain_resolve(domain)\n\n if domain in self.domain_data.keys():\n try:\n self.domain_data[domain]['documentroot'] = domain_data['documentroot']\n self.domain_data[domain]['ip'] = domain_data['ip']\n except KeyError:\n self.domain_data[domain]['documentroot'] = \"No domain data found, admin should check\"\n self.domain_data[domain]['ip'] = \"No domain data found, admin should check\"",
"def get_preferences_about_site(self, site_domain):\n return dict(WebsitePreferences.objects.filter(site__domain=site_domain).values_list('key__name','val'))",
"def _auth_default_domain(self, config):\n\n identity_version = config.get('identity_api_version', '')\n auth_type = config.get('auth_type', None)\n\n # TODO(mordred): This is a usability improvement that's broadly useful\n # We should port it back up into os-client-config.\n default_domain = config.get('default_domain', None)\n if (identity_version == '3' and\n not auth_type.startswith('v2') and\n default_domain):\n\n # NOTE(stevemar): If PROJECT_DOMAIN_ID or PROJECT_DOMAIN_NAME is\n # present, then do not change the behaviour. Otherwise, set the\n # PROJECT_DOMAIN_ID to 'OS_DEFAULT_DOMAIN' for better usability.\n if (\n auth_type in (\"password\", \"v3password\", \"v3totp\") and\n not config['auth'].get('project_domain_id') and\n not config['auth'].get('project_domain_name')\n ):\n config['auth']['project_domain_id'] = default_domain\n\n # NOTE(stevemar): If USER_DOMAIN_ID or USER_DOMAIN_NAME is present,\n # then do not change the behaviour. Otherwise, set the\n # USER_DOMAIN_ID to 'OS_DEFAULT_DOMAIN' for better usability.\n # NOTE(aloga): this should only be set if there is a username.\n # TODO(dtroyer): Move this to os-client-config after the plugin has\n # been loaded so we can check directly if the options are accepted.\n if (\n auth_type in (\"password\", \"v3password\", \"v3totp\") and\n not config['auth'].get('user_domain_id') and\n not config['auth'].get('user_domain_name')\n ):\n config['auth']['user_domain_id'] = default_domain\n return config",
"def default_user_settings(self) -> pulumi.Input['DomainUserSettingsArgs']:\n return pulumi.get(self, \"default_user_settings\")",
"def load_from_defaults(self):\n default_settings = import_module('mindinsight.conf.defaults')\n for setting in dir(default_settings):\n if setting.isupper():\n setattr(self, setting, getattr(default_settings, setting))\n self._default_settings.add(setting)",
"def setup_domain(domain):\n bucket = BUCKET_MANAGER.get_bucket(domain)\n\n zone = DOMAIN_MANAGER.find_hosted_zone(domain) \\\n or DOMAIN_MANAGER.create_hosted_zone(domain)\n\n endpoint = util.get_endpoint(BUCKET_MANAGER.get_region_name(bucket))\n a_record = DOMAIN_MANAGER.create_s3_domain_record(zone, domain, endpoint)\n print(\"Domain configure: http://{}\".format(domain))\n print(\"A record created: {}\".format(a_record))",
"def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )",
"def createConfig():\n config = ConfigParser()\n config.add_section(\"database\")\n config.set(\"database\", \"url\", \"../pos.db\")\n\n\n config.add_section(\"firsttime\")\n config.set(\"firsttime\" , \"db-installed\" , \"0\")\n\n\n with open(\"settings.ini\", \"w\") as config_file:\n config.write(config_file)",
"def findSettingsFile():\n settingsName = 'oct-fire-settings.json'\n userPath = os.path.expanduser('~')\n if os.path.exists(settingsName):\n return settingsName\n elif os.path.exists(os.path.join(userPath, settingsName)):\n return os.path.join(userPath, settingsName)\n elif os.path.exists(os.path.join(userPath, 'Desktop', settingsName)):\n return os.path.join(userPath, 'Desktop', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Documents', settingsName)):\n return os.path.join(userPath, 'Documents', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Downloads', settingsName)):\n return os.path.join(userPath, 'Downloads', settingsName)\n raise Exception('Could not locate settings file')",
"def create_default_settings_file(self, filename):\n new_config = configparser.SafeConfigParser()\n\n new_config[self.BLUE_SECTION_NAME] = {}\n new_config[self.BLUE_SECTION_NAME][self.MAX_VALUE_NAME] = \"50\"\n new_config[self.BLUE_SECTION_NAME][self.MIN_VALUE_NAME] = \"20\"\n\n new_config[self.YELLOW_SECTION_NAME] = {}\n new_config[self.YELLOW_SECTION_NAME][self.MAX_VALUE_NAME] = \"70\"\n new_config[self.YELLOW_SECTION_NAME][self.MIN_VALUE_NAME] = \"0\"\n\n new_config[\"Program_1\"] = {}\n new_config[\"Program_1\"][self.START_TIME_NAME] = \"7:30\"\n new_config[\"Program_1\"][self.STOP_TIME_NAME] = \"9:00\"\n new_config[\"Program_1\"][self.SUNRISE_DURATION_NAME] = \"15\"\n new_config[\"Program_1\"][self.SUNSET_DURATION_NAME] = \"0\"\n\n new_config[\"Program_2\"] = {}\n new_config[\"Program_2\"][self.START_TIME_NAME] = \"16:30\"\n new_config[\"Program_2\"][self.STOP_TIME_NAME] = \"21:30\"\n new_config[\"Program_2\"][self.SUNRISE_DURATION_NAME] = \"0\"\n new_config[\"Program_2\"][self.SUNSET_DURATION_NAME] = \"30\"\n\n with open(filename, \"w\") as configfile:\n new_config.write(configfile)\n\n self.myLogger.info (\"Default Configuration File created at \" + filename)\n\n return filename",
"def settings(instance):\n with open(instance.root_dir + '/Config/config.yml') as config:\n config = yaml.load(config)\n instance.name = config['name']\n instance.port = config['web']['port']\n # default host\n instance.host = \"http://localhost\"\n if 'host' in config['web']:\n instance.host = config['web']['host']\n instance.debug = config['debug']\n return instance"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the full name of the given keyName under the current group. If extant is True, only return extant keys; otherwise return None. | def __getKey(self, keyNameRaw, extant=True):
fullKeyName = self.__DELIMITER.join(
self.__currentGroupNames + [str(keyNameRaw)])
if extant and (fullKeyName not in self.__settings):
return None
return fullKeyName | [
"def extract_key_name(self):\n # quick and dirty regex parsing..\n # consider using gnupg.\n _, out, _ = self.as_user('/usr/bin/gpg --list-keys')\n patterns = [\n 'pub\\s+.*?uid\\s+debrepo.*?sub\\s+\\w+/(\\w+)\\s+[\\w-]+$',\n '^pub.*?\\n\\s+(.*?)\\nuid',\n ]\n keyname = None\n out_str = out.decode('utf8')\n for pattern in patterns:\n m=re.search(pattern, out_str, flags=re.M|re.DOTALL)\n if m:\n keyname=m.group(1)\n break\n return keyname",
"def key_name(self) -> str:\n return pulumi.get(self, \"key_name\")",
"def cmek_key_name(self) -> Optional[str]:\n return pulumi.get(self, \"cmek_key_name\")",
"def get_name(self):\n return m2.x509_extension_get_name(self.x509_ext)",
"def actual_key(self, key):\n key_list = []\n if key.scope == Scope.children:\n key_list.append('children')\n elif key.scope == Scope.parent:\n key_list.append('parent')\n else:\n key_list.append([\"usage\", \"definition\", \"type\", \"all\"][key.scope.block])\n\n if key.block_scope_id is not None:\n key_list.append(key.block_scope_id)\n if key.student_id:\n key_list.append(key.student_id)\n return \".\".join(key_list)",
"def GroupsExtension_getPackageName():\n return _libsbml.GroupsExtension_getPackageName()",
"def getKey(self, namespace, ns_key):\n namespace = self._fixNS(namespace)\n if namespace == BARE_NS:\n return ns_key\n\n ns_alias = self.namespaces.getAlias(namespace)\n\n # No alias is defined, so no key can exist\n if ns_alias is None:\n return None\n\n if ns_alias == NULL_NAMESPACE:\n tail = ns_key\n else:\n tail = '%s.%s' % (ns_alias, ns_key)\n\n return 'openid.' + tail",
"def getKeyPath(self, keyPath):\n parent = self\n parts = keyPath.split(\".\")\n for part in parts[:-1]:\n child = parent.get(part, None)\n if child is None:\n return None\n parent = child\n return parent.get(parts[-1], None)",
"def kms_key_name(self) -> str:\n return pulumi.get(self, \"kms_key_name\")",
"def get_filtered_filename(filename, ext, filename_key):\n try:\n image_name, extension = filename.rsplit('.', 1)\n except ValueError:\n image_name = filename\n extension = 'jpg'\n\n if ext is None:\n ext = extension\n\n return \"%(image_name)s__%(filename_key)s__.%(ext)s\" % ({\n 'image_name': image_name,\n 'filename_key': filename_key,\n 'ext': ext\n })",
"def get_env_key(obj, key=None):\n return str.join('_', [obj.__module__.replace('.','_').upper(),\n key.upper()])",
"def getName(kb):\n\tassertKeyBinder(kb) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\treturn kb[\"name\"]",
"def sub_key(dirname):\n return SUB_PREFIX + dirname",
"def key_pair_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"key_pair_name\")",
"def _get_key(arguments):\n # Get the base path.\n if arguments.get(\"felix\"):\n base = CONFIG_PATH\n elif arguments.get(\"node\"):\n base = BGP_HOST_PATH % {\"hostname\": hostname}\n else:\n base = BGP_GLOBAL_PATH\n\n # Determine the actual name of the field. Look this up from the config\n # data, otherwise just use the name.\n config_data = _get_config_data(arguments)\n name = arguments[\"<NAME>\"]\n if name in config_data:\n name, _ = config_data[name]\n\n return base + name",
"def dic_get_subkeys_value(self, key):\n current_dic = self.dic\n sub_keys = key.split('.')\n for k in sub_keys[:-1]:\n current_dic = current_dic[k]\n return current_dic[sub_keys[-1]]",
"def key_name(self):\n return self._key_name",
"def fname(key):\n return key.rsplit(\"/\", 1)[-1]",
"def GetSubkeyByName(self, name):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function gets the total occurrences of words and syllables in the original Unicode Garshana corpus. To do this, it opens a .csv file with utf16 encoding, and splits on commans, expecting the line of sumerian text to be in the 8th column. Filters annotations from each line, and tracks the occurrence of each word and syllable. All combinations of unigrams, bigrams, and trigrams are treated as individual syllables. | def get_counts(data):
word_count = {}
syll_count = {}
infile = data.corpus
try:
open_file = codecs.open(infile, 'r', encoding='utf-16')
for line in open_file:
line = line.lower()
# Remove tablet indexing info and line numbers. Grab only text data
line = line.split(',')
text = clean_line(line[7])
# Update the occurrences of the words in the line
for word in text.split():
count = word_count.setdefault(word, 0)
word_count[word] = count + 1
# Track occurrences of syllables
update_syllable_count(word, syll_count)
open_file.close()
except IOError:
print("Cannot open: " + infile)
return (word_count, syll_count) | [
"def total_syllables(target_text):\n\n splited_text = target_text.split()\n count = 0\n for word in splited_text:\n count = count + word_syllables(word)\n return count",
"def get_analyze_per_file(self):\n \"\"\"Exclude tags, exclude binary (img), count words without non literal characters and digits\"\"\"\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n df_tmp = pd.DataFrame(columns=['word', 'cnt', 'word_low'])\n w_cnt = 0\n word_counter = {}\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n for word in word_list:\n\n if word not in word_counter:\n word_counter[word] = 1\n else:\n word_counter[word] = word_counter[word] + 1\n w_cnt += 1\n\n for word, occurance in word_counter.items():\n df_tmp = df_tmp.append({'word': '{:15}'.format(word), 'cnt': '{:3}'.format(occurance),\n 'word_low': '{:15}'.format(word).lower()}, ignore_index=True)\n df_tmp = df_tmp.sort_values(by='word_low')\n df_tmp.loc[(df_tmp.word != df_tmp.word_low), 'word'] = df_tmp.cnt\n df_tmp.loc[(df_tmp.word == df_tmp.cnt), 'cnt'] = 0\n df_tmp.loc[(df_tmp.word == df_tmp.word_low), 'word'] = 0\n df_tmp['word'] = df_tmp.word.astype(int)\n df_tmp['cnt'] = df_tmp.cnt.astype(int)\n df_tmp = df_tmp.groupby(['word_low'])['cnt', 'word'].sum().reset_index()\n conn = sqlite3.connect('for_python_ht.db')\n try:\n try:\n sqlite_for_ht.CreateTableSingle.delete_table(f_3, self.filename)\n print(datetime.now(), '-', self.filename, 'Table deleted at the start point')\n except Exception:\n print(datetime.now(), '-', 'Something went wrong')\n traceback.print_exc()\n df_tmp.to_sql(name=self.filename, con=conn, index=False)\n print(datetime.now(), '-', self.filename, 'Table created and filled with data')\n except Exception:\n print(datetime.now(), '-', 'file with name {} already exists'.format(self.filename))\n traceback.print_exc()\n print(datetime.now(), '-', 'word analyse for', self.filename, 'done')\n sqlite_for_ht.HandleTemp.update_table(f_2, 'status', 'Done', self.filename)\n return None",
"def count(self, corenlp_fpath, aliases):\n\n\t\talias_indices = self.get_indices(aliases)\n\t\tcorenlp_xml = ET.parse(corenlp_fpath).getroot()\n\n\t\tunigram_cnts = Counter([(t[1].text, t[4].text[0])\n\t\t for i, t\n\t\t in enumerate(corenlp_xml.iter('token'))\n\t\t if i not in alias_indices])\n\n\t\tcharacter_cnts = self.get_character_cnts(aliases)\n\t\tcharacter_ranks = self.get_character_ranks(aliases)\n\n\t\tfor c, cnt in character_cnts.iteritems():\n\t\t unigram_cnts[('ALIAS-%d' % character_ranks[c], 'N')] = cnt\n\n\t\treturn unigram_cnts",
"def count_syllables_in_line(line):\n ws = line.rstrip('.').split()\n return sum([count_syllables_in_word(w) for w in ws])",
"def get_frequencies(dataset: Iterable[Tuple[str, str]] or str) -> Counter:\n if isinstance(dataset, str):\n with open(dataset) as file:\n dataset = tuple(csv.reader(file))\n\n word_frequencies = Counter()\n\n for sentence, label in dataset:\n for word in whitespace_regex.split(sentence):\n if word is not None and len(word) != 0:\n word_frequencies[word] += 1\n\n return word_frequencies",
"def count_words(filename):",
"def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n self.all_unigrams = 0\n ##Your code here\n ngrams_list = []\n count_ngrams = 0\n \n for sequence in corpus:\n \n ngrams_list = get_ngrams(sequence,1)\n \n for ngram in ngrams_list:\n if ngram in self.unigramcounts:\n count_ngrams = self.unigramcounts.get(ngram)\n self.unigramcounts[ngram] = count_ngrams + 1\n self.all_unigrams += 1\n else:\n self.unigramcounts[ngram] = 1\n self.all_unigrams += 1\n \n sequence = sequence[1:-1] \n \n ngrams_list = get_ngrams(sequence,2)\n \n\n for ngram in ngrams_list:\n if ngram in self.bigramcounts:\n count_ngrams = self.bigramcounts.get(ngram)\n self.bigramcounts[ngram] = count_ngrams + 1\n else:\n self.bigramcounts[ngram] = 1\n \n sequence = sequence[1:-1]\n \n ngrams_list = get_ngrams(sequence,3)\n \n\n for ngram in ngrams_list:\n if ngram in self.trigramcounts:\n count_ngrams = self.trigramcounts.get(ngram)\n self.trigramcounts[ngram] = count_ngrams + 1\n else:\n self.trigramcounts[ngram] = 1\n \n \n \n return self.unigramcounts, self.bigramcounts, self.trigramcounts, self.all_unigrams",
"def updateWordCounts():\n emaildata = loadEmailData()\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)",
"def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")",
"def _count_vocab(self,raw_documents, fixed_vocab=False):\n if fixed_vocab:\n vocabulary = self.vocabulary_\n else:\n # Add a new value when a new vocabulary item is seen\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n\n analyze = super().build_analyzer()\n \n j_indices = []\n indptr = []\n\n values = array.array(str('f'))\n indptr.append(0)\n for doc in raw_documents:\n #doc = tupla[0]\n feature_counter = {}\n #texttlist = doc.split(sep=\" \")\n for feature in analyze(doc):#texttlist:\n try:\n \n # Ignore out-of-vocabulary items for fixed_vocab=True\n feature_idx = vocabulary[feature]\n #print(feature_idx)\n #fti_feature = calc_fti(feature,raw_documents)\n \n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n #print(feature_counter[feature_idx])\n except KeyError:\n # Ignore out-of-vocabulary items for fixed_vocab=True\n continue\n\n\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n if not fixed_vocab:\n # disable defaultdict behaviour\n vocabulary = dict(vocabulary)\n if not vocabulary:\n raise ValueError(\"empty vocabulary; perhaps the documents only\"\n \" contain stop words\")\n\n if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1\n if _IS_32BIT:\n raise ValueError(('sparse CSR array has {} non-zero '\n 'elements and requires 64 bit indexing, '\n 'which is unsupported with 32 bit Python.')\n .format(indptr[-1]))\n indices_dtype = np.int64\n\n else:\n indices_dtype = np.int32\n \n j_indices = np.asarray(j_indices, dtype=indices_dtype)\n indptr = np.asarray(indptr, dtype=indices_dtype)\n \n #print (vocabulary)\n X = sp.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.float32)\n X.sort_indices() \n \n self.vocabulary_calculated = vocabulary\n\n return vocabulary, X",
"def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()",
"def gather_counts(directory):\n\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n counts_quad = defaultdict(int)\n counts_qui = defaultdict(int)\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n filetext = f.readlines()\n filetext = [\"<s>\\n\"]*4 + filetext + [\"</s>\\n\"]*4\n filetext = list(filter(lambda t: t.strip() != \"\", filetext))\n for i in range(len(filetext)-4):\n a, b, c = filetext[i].strip()+\"\\n\", filetext[i+1].strip()+\"\\n\", filetext[i+2].strip()+\"\\n\"\n d, e = filetext[i+3].strip()+\"\\n\", filetext[i+4].strip()+\"\\n\"\n counts_un[a] += 1\n counts_bi[a+b] += 1\n counts_tri[a+b+c] += 1\n counts_quad[a+b+c+d] += 1\n counts_qui[a+b+c+d+e] += 1\n counts_un[\"</s>\\n\"] += 4\n counts_bi[\"</s>\\n</s>\\n\"] += 3\n counts_un[\"</s>\\n</s>\\n</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri, counts_quad, counts_qui",
"def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return",
"def _count(self):\n words = [word.lower() for word in self.corpus.words()]\n bigrams_words = bigrams(words)\n for bigram in bigrams_words:\n self._bigrams[bigram] += 1",
"def tokenization(self, path_to_corpora):\n print(\"Starting tokenization\")\n train_set_pr_document = []\n document_mapping = []\n total_document_count = 0\n \n for filename in os.listdir(path_to_corpora):\n with open(path_to_corpora + filename, \"r\") as article_file:\n article_text = article_file.readlines()\n if len(article_text) < self.minimum_character_limit:\n print(len(article_text))\n continue\n else:\n print(\"WUP\")",
"def identify_causal_words(df, df_causal_terms, flag='causal', show_data=True):\n# causal_wds = df_top_terms[df_top_terms['causal'] == 1]['term'].values\n# bad_wds = top_term_df[top_term_df['causal'] == 0]['term'].values\n\n df[flag+'_wds'] = df['text'].apply(lambda x: [wd for wd in re.findall('\\w+', x.lower()) if wd in df_causal_terms.term.values])\n df['n_'+flag+'_wds'] = df[flag+'_wds'].apply(lambda x: len(x))\n \n if(show_data):\n print(\"%d out of %d sentences include %s words\" % (df[df['n_'+flag+'_wds']>0].shape[0], df.shape[0], flag))",
"def count_syllables(words):\n\n\n count = 0\n\n for word in words:\n word_count = count_syllables_in_word(word)\n count = count + word_count\n return count",
"def count_ngrams(self, corpus):\n \n self.unigramcounts = defaultdict(int)\n self.bigramcounts = defaultdict(int)\n self.trigramcounts = defaultdict(int)\n\n self.sentence_counts = 0\n self.word_count = 0\n\n for line in corpus:\n sequence = line\n self.sentence_counts +=1\n\n unigrams = get_ngrams(sequence, n=1)\n for gram in unigrams:\n self.word_count += 1\n self.unigramcounts[gram] +=1\n\n bigrams = get_ngrams(sequence, n=2)\n for gram in bigrams:\n self.bigramcounts[gram] +=1\n\n trigrams = get_ngrams(sequence, n=3)\n for gram in trigrams:\n self.trigramcounts[gram] +=1\n\n #self.unigramcounts[('START')] = self.sentence_counts *2\n self.bigramcounts[('START', 'START')] = self.sentence_counts\n\n #return self",
"def get_text_measures(parsed_text: dict) -> dict:\n total_syllables = 0\n total_complex_words = 0\n total_long_words = 0\n total_unique_words = set()\n\n filtered_word_tokens = word_token_filter(parsed_text['tagged'])\n total_sentences = get_sentence_count(parsed_text['sentence_tokens'])\n parts_of_speech = get_parts_of_speech(parsed_text['tagged'])\n total_words = get_words_count(filtered_word_tokens)\n words_frequency = get_words_freq(filtered_word_tokens)\n total_characters = get_characters_count(parsed_text['word_tokens'])\n\n for word_token in filtered_word_tokens:\n total_unique_words.add(word_token)\n syllable = get_syllables_counter(word_token)\n total_syllables += syllable\n\n if len(word_token) >= LONG_WORD_SYLLABLES:\n total_long_words += 1\n\n if syllable >= COMPLEX_WORD_SYLLABLES and not word_token[0].isupper():\n total_complex_words += 1\n\n if not total_words:\n raise ValueError(\"I can't do this, there's no words there!\")\n\n characters_per_word = total_characters / total_words\n syllables_per_word = total_syllables / total_words\n word_per_sentence = total_words / total_sentences\n kincaid = kincaid_grade_level(total_syllables, total_words, total_sentences)\n ari_ = ari(total_characters, total_words, total_sentences)\n coleman_liau = coleman_liau_index(total_characters, total_words, total_sentences)\n flesch = flesch_reading_ease(total_syllables, total_words, total_sentences)\n fog = gunning_fog_index(total_words, total_complex_words, total_sentences)\n absolute_score = get_absolute_score(kincaid, ari_, coleman_liau, flesch, fog)\n print(\"-\" * 20, '\\n', \"ABSOLUTE READABILITY\", \"\\n\", \"-\" * 20)\n print(f\"Text has {absolute_score} general score\")\n\n stats = dict([\n ('Average number of characters per word', characters_per_word),\n ('Average number of syllables per word', syllables_per_word),\n ('Average number of words per sentence', word_per_sentence),\n ('Number of characters', total_characters),\n ('Syllables', total_syllables),\n ('Number of words', total_words),\n ('Unique words', len(total_unique_words)),\n ('Number of sentences', total_sentences),\n ('Number of long words', total_long_words),\n ('Number of complex words', total_complex_words),\n ('Words frequency', words_frequency)\n ])\n\n readability_grades = dict([\n ('Kincaid', kincaid),\n ('ARI', ari_),\n ('Coleman-Liau', coleman_liau),\n ('Flesch Reading Ease', flesch),\n ('Gunning Fog Index', fog)\n ])\n\n return dict([\n ('READABILITY GRADES', readability_grades),\n ('TEXT INFO', stats),\n ('PARTS OF SPEECH', parts_of_speech)\n ])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the accounting period that is currently valid. Valid is an accounting_period when the current date lies between begin and end of the accounting_period | def get_current_valid_accounting_period():
current_valid_accounting_period = None
for accounting_period in AccountingPeriod.objects.all():
if accounting_period.begin < date.today() and accounting_period.end > date.today():
return accounting_period
if not current_valid_accounting_period:
raise AccountingPeriodNotFound() | [
"def getCurrentValidAccountingPeriod():\n currentValidAccountingPeriod = None\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.begin < date.today() and accountingPeriod.end > date.today():\n return accountingPeriod\n if currentValidAccountingPeriod == None:\n raise NoFeasableAccountingPeriodFound()",
"def valid_period(self):\n return self._valid_period",
"def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end",
"def _find_period(self):\n\n from ..detector.auger2014 import (period_1_start, period_1_end, period_2_start, period_2_end,\n period_3_start, period_3_end, period_4_start, period_4_end)\n\n # check dates\n period = []\n for y, d in np.nditer([self.year, self.day]):\n d = int(d)\n test_date = date(y, 1, 1) + timedelta(d)\n\n if period_1_start <= test_date <= period_1_end:\n period.append(1)\n elif period_2_start <= test_date <= period_2_end:\n period.append(2)\n elif period_3_start <= test_date <= period_3_end:\n period.append(3)\n elif test_date >= period_3_end:\n period.append(4)\n else:\n print('Error: cannot determine period for year', y, 'and day', d)\n \n return period",
"def __current_billing_cycle(self):\n end_period_day = self._user_geocoder_config.period_end_date.day\n today = date.today()\n if end_period_day > today.day:\n temp_date = today + relativedelta(months=-1)\n date_from = latest_valid_date(temp_date.year, temp_date.month, end_period_day)\n else:\n date_from = latest_valid_date(today.year, today.month, end_period_day)\n\n return date_from, today",
"def period(self):\n return self._period",
"def get_chart_period(self,req):\n now=int(DATE())\n period=INT(req.period) # allow for it having been a string\n if period>9999: # assume it is a month\n if period<(now//100): # a valid complete previous month\n prior=True# this is a previous month\n else:\n period=now//100 # default to current month\n prior=False\n start=period*100+1\n end=self.nextperiod(period)*100+1\n else: # assume it is a year\n if period and (period<(now//10000)): # a prior year\n prior=True# this is a previous year\n else:\n##\n# period=now//100 # default to current month\n# prior=False\n# start=period*100+1\n# end=self.nextperiod(period)*100+1\n##\n period=now//10000 # default to current year\n prior=False\n start=period*10000+101\n end=self.nextperiod(period)*10000+101\n return period,start,end,prior",
"def getPeriod(self):\n return self.period",
"def reporting_period_ends(instance,dei_namespace):\n\n reporting_period_end_for_legal_entity = {}\n\n dim_LegalEntityAxis = instance.dts.resolve_concept(xml.QName('LegalEntityAxis',dei_namespace))\n concept_DocumentPeriodEndDate = instance.dts.resolve_concept(xml.QName('DocumentPeriodEndDate',dei_namespace))\n for fact in instance.facts.filter(concept_DocumentPeriodEndDate):\n # Amendment: Use the period end date of the context and not the DocumentPeriodEndDate value! \n end_date = fact.period_aspect_value.end\n\n legal_entity = dimension_value(fact,dim_LegalEntityAxis)\n if legal_entity not in reporting_period_end_for_legal_entity or reporting_period_end_for_legal_entity[legal_entity][1] < end_date:\n reporting_period_end_for_legal_entity[legal_entity] = (fact,end_date)\n\n return reporting_period_end_for_legal_entity",
"def planning_period(self):\n return self._planning_period",
"def limit_period(self):\n return self._limit_period",
"def billing_period(self) -> Optional[str]:\n return pulumi.get(self, \"billing_period\")",
"def period_bounds(self, date, period='month'):\n\n if not(isinstance(date, pd.Timestamp)):\n date = pd.Timestamp(self.parse_datetime(date))\n\n return self._start_period(date, period=period), self._last_tick_current_period(date, period=period)",
"def period_end(self):\n return self._period_end",
"def get_period(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YPwmOutput.PERIOD_INVALID\n res = self._period\n return res",
"def getPeriod(self):\n return _yarp.RFModule_getPeriod(self)",
"def remaining_days_in_current_period(self):\n try:\n return self.count_days_from_now(self.current_period_ends_at)\n except AttributeError:\n return 0",
"def period_start(self):\n return self._period_start",
"def valid_period(self, valid_period):\n self._valid_period = valid_period"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform mp3 file into wav format calling bash and using mpg123 or ffmpeg. | def mp3_to_wav(mp3_file, wav_file, encoder='mpg123'):
if encoder == 'mpg123':
bash_command = ['mpg123', '-w', wav_file, '--mono', mp3_file]
else:
bash_command = ['ffmpeg', '-i', mp3_file, wav_file]
subprocess.run(bash_command) | [
"def mp3_to_wav(input):\n sound = AudioSegment.from_file(input, format='mp3')\n sound.export(name_wav(input), format='wav')\n print(name_wav(input))",
"def wav_to_mp3(input):\n sound = AudioSegment.from_file(input, format='wav')\n sound.export(name_mp3(input), format='mp3')",
"def ogg2wav(oggfile, wavfile):\n process = subprocess.run(['ffmpeg', '-i', oggfile, \"-ar\", \"16000\", wavfile])\n if process.returncode != 0:\n raise Exception(\"something went wrong when converting voice data\")",
"def mp3_to_wav(self, file):\n sound = AudioSegment.from_mp3(file)\n file = file[:-4]\n sound.export(file+\".wav\", format=\"wav\")",
"def convert_to_wav(mp3_filename):\n\n wav_filename = mp3_filename[:-4] + \".wav\"\n complete_mp3FileName = os.path.join(MP3_FOLDER, mp3_filename)\n complete_wavFileName = os.path.join(WAV_FOLDER, wav_filename)\n\n mp3_file = AudioSegment.from_mp3(complete_mp3FileName)\n mp3_file.export(complete_wavFileName, format=\"wav\")\n\n print(f\"The mp3 file {complete_mp3FileName} was successfully converted to \" \\\n + f\"the wav file {complete_wavFileName}.\")",
"def convert_to_mp3(self,path, filename):\n\n codec = \"libmp3lame\"\n mp3_filename = filename + \".mp3\"\n\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-ab\", \"128k\",\n mp3_filename\n ]\n\n return command",
"def _convert_to_wav(self):\n cmd = ' '.join(['ffmpeg -i', self.music_file.name, \n '-y -acodec pcm_s16le -ac 1 -ar 44100', self.wave_file.name])\n self._seek_all()\n ret = call(cmd, shell=True)\n return ret",
"def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)",
"def convert_to_mp3(flac_file=None, output_dir=None):\n file_name = os.path.basename(flac_file).replace('.flac', '.mp3')\n out_file = os.path.join(output_dir, file_name)\n ffmpeg = (\"ffmpeg -i {0} -codec:a libmp3lame\"\n \" -aq 0 -ab 320k -ar 48000 -codec:v mjpeg {1}\")\n ffmpeg = ffmpeg.format(\n shlex.quote(flac_file),\n shlex.quote(out_file))\n\n try:\n return subprocess.call(ffmpeg, shell=True)\n except KeyboardInterrupt:\n print(\"Quitting...\")\n return subprocess.call(['rm', '-fv', out_file])\n return 255",
"def wav2ogg(wavfile, oggfile):\n process = subprocess.run(['ffmpeg', '-i', wavfile, oggfile])\n if process.returncode != 0:\n raise Exception(\"something went wrong when converting voice data\")",
"def wave(args):\n\tfiles = pathlib.Path(args.input)\n\ti = 1\n\tfor file in files.iterdir():\n\t\tparent = str(file.parent)\n\t\tfilename = pathlib.PurePosixPath(file).stem\n\t\toutput_name = \"music\"+str(i).zfill(4)\n\t\tos.system(\"ffmpeg -i \"+str(pathlib.PurePath(parent,file.name))+\" -ar 24000 \"+str(pathlib.PurePath(parent,output_name))+\".wav\")\n\t\ti+=1",
"def avi_to_wav(avi_file, wav_file):\n # Executable program on Windows for AV editing\n ffmpeg = os.path.join(os.path.dirname(__file__), 'ffmpeg', 'bin', 'ffmpeg.exe')\n print(ffmpeg)\n command = ffmpeg + \" -i \" + avi_file + \" -ab 160k -ac 1 -ar 10000 -vn \" + wav_file\n subprocess.call(command, shell=True)",
"def transcode(oggfilename, mp3filename=None):\n try:\n wavfilename = \"%s%d.wav\" % (WAV_FILENAME_PREFIX, getpid())\n if mp3filename is None: mp3filename = \"%s.mp3\" % path.basename(oggfilename)[:-4]\n oggsize = file_size_human(oggfilename)\n stdout.write(\"%s (%s)\\n\" % (oggfilename, oggsize))\n oggdict = ogg_info_dict(oggfilename)\n encode_cmd = lame_cmd_base\n for k in oggdict.keys():\n k = k.upper()\n knote = ''\n if k in ogg_to_id3.keys():\n if k == 'GENRE' and oggdict[k].lower() not in LAME_GENRES:\n knote = \"[WARNING: Unrecognized by LAME so MP3 genre will be 'Other']\"\n encode_cmd = \"%s %s %s\" % (encode_cmd, ogg_to_id3[k], shell_quote(oggdict[k]))\n stdout.write(\" %s: %s %s\\n\" % (str(k), str(oggdict[k]), knote))\n stdout.write(\"%s \" % mp3filename)\n stdout.flush()\n decode_cmd = \"oggdec --quiet -o %s %s 2>/dev/null\" % (shell_quote(wavfilename), shell_quote(oggfilename))\n system(decode_cmd)\n wavsize = 0\n try:\n wavsize = file_size(wavfilename)\n except:\n pass\n if wavsize <= 0:\n stdout.write(\"[FAILED] OGG did not decode to intermediate WAV\\n\\n\")\n return (file_size(oggfilename), 0)\n encode_cmd = \"%s %s %s 2>/dev/null\" % (encode_cmd, wavfilename, shell_quote(mp3filename))\n system(encode_cmd)\n try:\n mp3size = file_size_human(mp3filename)\n except:\n stdout.write(\"[FAILED] OGG decoded but MP3 encoding and/or tagging failed\\n\\n\")\n return (file_size(oggfilename), 0)\n stdout.write(\"(%s)\\n\\n\" % mp3size)\n except Exception, e:\n stdout.write(str(e))\n try:\n unlink(wavfilename)\n except:\n pass\n return (file_size(oggfilename), file_size(mp3filename))",
"def analyze_mp3(mp3filespec):\n \n # Make a temporary working directory for storing the wav file\n # that soundstretch should analyze\n wavfilespec = tempfile.NamedTemporaryFile(suffix='.wav') \n \n # Use lame to make a wav representation of the mp3 file to be analyzed\n wav_command = 'sox %s %s' % (mp3filespec, wavfilespec.name)\n subprocess.call([wav_command], shell=True, stderr=open(os.devnull, 'w'))\n \n # Call soundstretch to analyze the wav file\n bpm_command = 'soundstretch %s -bpm' % wavfilespec.name\n p = subprocess.Popen([bpm_command], shell=True,stdout=subprocess.PIPE)\n output = p.communicate()[0]\n \n # Delete temporary working directory and its contents\n #shutil.rmtree(workingdir)\n\n bpm_suggestion = _get_bpm_from_soundstretch(output)\n\n return fit_bpm_in_window(bpm_suggestion)",
"def encode(self, filename=None, mp3=None):\n if not mp3 and filename.lower().endswith('.wav'):\n mp3 = False\n else:\n mp3 = True\n if mp3:\n foo, tempfilename = tempfile.mkstemp(\".wav\")\n os.close(foo)\n else:\n tempfilename = filename\n fid = open(tempfilename, 'wb')\n # Based on Scipy svn\n # http://projects.scipy.org/pipermail/scipy-svn/2007-August/001189.html\n fid.write('RIFF')\n fid.write(struct.pack('<i', 0)) # write a 0 for length now, we'll go back and add it later\n fid.write('WAVE')\n # fmt chunk\n fid.write('fmt ')\n if self.data.ndim == 1:\n noc = 1\n else:\n noc = self.data.shape[1]\n bits = self.data.dtype.itemsize * 8\n sbytes = self.sampleRate * (bits / 8) * noc\n ba = noc * (bits / 8)\n fid.write(struct.pack('<ihHiiHH', 16, 1, noc, self.sampleRate, sbytes, ba, bits))\n # data chunk\n fid.write('data')\n fid.write(struct.pack('<i', self.data.nbytes))\n self.data.tofile(fid)\n # Determine file size and place it in correct\n # position at start of the file.\n size = fid.tell()\n fid.seek(4)\n fid.write(struct.pack('<i', size - 8))\n fid.close()\n if not mp3:\n return tempfilename\n # now convert it to mp3\n if not filename.lower().endswith('.mp3'):\n filename = filename + '.mp3'\n try:\n bitRate = MP3_BITRATE\n except NameError:\n bitRate = 128\n\n try:\n ffmpeg(tempfilename, filename, bitRate=bitRate, verbose=self.verbose)\n except:\n log.warning(\"Error converting from %s to %s\", tempfilename, filename)\n\n if tempfilename != filename:\n if self.verbose:\n log.warning(sys.stderr, \"Deleting: %s\", tempfilename)\n os.remove(tempfilename)\n return filename",
"def file_to_mp3(file_name: str) -> str:\n split = file_name.split(\".\")\n split[-1] = \"mp3\"\n output = \".\".join(split)\n\n os.system(f'ffmpeg -y -i \"{file_name}\" -vn -c:a libmp3lame \"{output}\" -hide_banner -loglevel panic')\n os.remove(file_name)\n\n return output",
"def play_mp3(self, raw_audio):\r\n # Save MP3 data to a file\r\n with open(\"files/response.mp3\", 'wb') as f:\r\n f.write(raw_audio)\r\n\r\n \r\n subprocess.call(['amixer', 'sset', 'PCM,0', '90%'])\r\n # Convert mp3 response to wave (pyaudio doesn't work with MP3 files)\r\n sound = AudioSegment.from_mp3(\"files/response.mp3\")\r\n\r\n sound.export(\"files/response.wav\", format=\"wav\")\r\n\r\n # Play a wave file directly\r\n self.play_wav('files/response.wav')",
"def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)",
"def decode_to_wav(self, filename):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Limit arrays of frequency and features by maximum frequency and bottom frequency. | def limit_by_freq(freq, features, upper_limit, lower_limit=None):
# Copy into arrays, in order to apply mask
freq = np.array(freq, dtype=np.float)
features = np.array(features, dtype=np.float)
# Mask for bottom limit
if lower_limit is not None:
bottom_mask = freq >= lower_limit
features = features[bottom_mask]
freq = freq[bottom_mask]
# Mask for upper limit
upper_mask = freq <= upper_limit
features = features[upper_mask]
freq = freq[upper_mask]
return freq, features | [
"def findMaximal(freqSet):",
"def max_filter(counts, filter_size):\n return maximum_filter(counts, size=(filter_size, 1),\n mode='reflect', origin=(filter_size - 1)//2)",
"def truncate(data,mask,freq,minfreq,maxfreq):\n new_data = []\n new_freq = []\n new_mask = []\n for i in range(0,len(freq)):\n if freq[i]>minfreq:\n if freq[i]<maxfreq:\n new_data.append(data[i])\n new_freq.append(freq[i])\n new_mask.append(mask[i])\n new_data = array(new_data)\n new_mask = array(new_mask)\n new_freq = array(new_freq)\n\n return new_data,new_mask,new_freq",
"def trim_by_maximum(self):\n if self.maximum:\n self.peaks = self.peaks[:int(self.maximum)]",
"def __restrict_features_freq(self, min_count=1):\n col_idx = self.X.tocsc().nonzero()[1]\n counter = np.bincount(col_idx)\n print(\"Counter:\", len(counter))\n include_cols = np.where(counter > min_count)[0]\n return include_cols",
"def fit_features(data, max_features):\n ndata = []\n for rec in data:\n rec = list(rec)\n if len(rec) > max_features:\n rec = rec[:max_features]\n elif len(rec) < max_features:\n rec = rec + (max_features - len(rec)) * [0.0]\n ndata.append(rec)\n return np.array(ndata)",
"def _limit_features(self, X, vocabulary, high=None, low=None,\n limit=None):\n if high is None and low is None and limit is None:\n return X, set()\n\n # Calculate a mask based on document frequencies\n \"\"\"\n mask is boolean array [true, true....(word length)...] with which it will filter out the false word\n \"\"\"\n\n # use min_df, max_df to filter word, create mask\n dfs = _document_frequency(X)\n tfs = np.asarray(X.sum(axis=0)).ravel()\n mask = np.ones(len(dfs), dtype=bool)\n if high is not None:\n mask &= dfs <= high\n if low is not None:\n mask &= dfs >= low\n if limit is not None and mask.sum() > limit:\n mask_inds = (-tfs[mask]).argsort()[:limit]\n new_mask = np.zeros(len(dfs), dtype=bool)\n new_mask[np.where(mask)[0][mask_inds]] = True\n mask = new_mask\n\n if self.whitelist:\n # get whitelist words' id in vocabulary\n white_word_ids = list()\n for word in self.whitelist:\n if word in vocabulary.keys():\n wordid = int(vocabulary[word]) # vocabulary is word2id\n white_word_ids.append(wordid)\n\n # change the boolean in mask of whitelist words\n for id in white_word_ids:\n mask[id] = 1\n\n # maps old indices to new, use the mask to filter words in vocabulary\n new_indices = np.cumsum(mask) - 1\n removed_terms = set()\n for term, old_index in list(six.iteritems(vocabulary)):\n if mask[old_index]:\n vocabulary[term] = new_indices[old_index]\n else:\n del vocabulary[term]\n removed_terms.add(term)\n kept_indices = np.where(mask)[0]\n if len(kept_indices) == 0:\n raise ValueError(\"After pruning, no terms remain. Try a lower\"\n \" min_df or a higher max_df.\")\n return X[:, kept_indices], removed_terms # X -- csr_matrix",
"def _cutoff(xdata, ydata, btype, fs, ff):\r\n try:\r\n# print ff\r\n if ff != None:\r\n nPts = int(1./(((xdata.max()-xdata.min())/xdata.shape[0])*(ff/10.)))\r\n else:\r\n nPts = 0\r\n if nPts%2 == 0:\r\n nPts = nPts + 1\r\n if nPts < xdata.shape[0]:\r\n nPts = xdata.shape[0]\r\n# print nPts\r\n window = np.hanning(ydata.shape[0])\r\n freq = FourierFrequency(xdata, nPts)\r\n index = np.argsort(freq)\r\n tdf = FourierTransform(ydata*window, nPts)\r\n tdf = abs(tdf)\r\n pp = _maxima(tdf[index], freq[index], lookahead = 1)\r\n# mm = _minima(tdf[index], freq[index], lookahead=1)\r\n pp, hh = np.array(np.array(pp).T[0]), np.array(np.array(pp).T[1])\r\n# mm = np.array(np.array(mm).T[0])#, np.array(np.array(mm).T[1])\r\n ind = np.where(pp == min(abs(pp)))[0][0]\r\n ind2 = np.where(hh == max(hh[(ind+1):]))[0][0]\r\n for u, i in enumerate(freq):\r\n if i > abs(pp[ind2])*1.5 or i < -abs(pp[ind2])*1.5 or (i < abs(pp[ind2])/2. and i > -abs(pp[ind2])/2.) or (tdf[u] > hh[ind2]*1.05): #(abs(i) < abs(mm[indmin])) or \r\n tdf[u] = 0.\r\n def lor2(x, A0, x0, gamma0):\r\n return A0*(1/np.pi)*(gamma0/2)/((x-x0)**2+(gamma0/2)**2)+A0*(1/np.pi)*(gamma0/2)/((x+x0)**2+(gamma0/2)**2)\r\n lmod2 = lmf.Model(lor2)\r\n lmod2.make_params()\r\n lmod2.set_param_hint('A0', value=max(tdf), min=max(tdf)/1000.)\r\n lmod2.set_param_hint('x0', value=abs(pp[ind2]), min=0.)\r\n lmod2.set_param_hint('gamma0', value=1., min=0.)\r\n result2 = lmod2.fit(tdf[index], x=freq[index])\r\n# print result2.values.get('x0'), result2.values.get('gamma0')\r\n if btype=='high':\r\n if result2.values.get('x0')-result2.values.get('gamma0') > 0.:\r\n# print \"frequency: \", result2.values.get('x0')-result2.values.get('gamma0')\r\n if hh[ind2] != max(hh[(ind+1):]):\r\n print \"False\", \" maximum\", \"\\n\", \"\\n\", \"\\n\"\r\n return result2.values.get('x0')-result2.values.get('gamma0')\r\n else:\r\n# print \"failed: 0\"\r\n return 0.\r\n elif btype=='low':\r\n return result2.values.get('x0')+result2.values.get('gamma0')\r\n except Exception:\r\n pass\r\n finally:\r\n pass",
"def _top_features(self, **kwargs):\r\n file = kwargs.get('file')\r\n num = kwargs.get('num')\r\n min_frequency = kwargs.get('min_frequency')\r\n if num is None and min_frequency is None:\r\n num = 1000\r\n features = []\r\n with open(file, 'r') as fh:\r\n for row in fh:\r\n feature, frequency = row.strip().split('\\t')\r\n frequency = int(frequency)\r\n if ((num is not None and len(features) == num) or\r\n (min_frequency is not None and frequency < min_frequency)):\r\n break\r\n features.append((feature, frequency))\r\n return features",
"def max_frequency(sig, FS):\n\n f, fs = plotfft(sig, FS)\n t = np.cumsum(fs)\n\n try:\n ind_mag = np.where(t > t[-1]*0.95)[0][0]\n except:\n ind_mag = np.argmax(t)\n f_max = f[ind_mag]\n\n return f_max",
"def max_frequency(document):\n max_f = 1\n for w in set(document):\n max_f = max(max_f, augmented_term_frequency(w, document))\n return max_f",
"def filter_freq(self,nu_min=None,nu_max=None,exclude=False):\n \n msk1=np.ones(self.data['nu_data'].size, dtype=bool)\n msk2=np.ones(self.data['nu_data'].size, dtype=bool)\n if nu_min is not None:\n msk1= self.data['nu_data'] >= nu_min\n \n if nu_max is not None:\n msk2= self.data['nu_data'] <= nu_max\n \n msk=msk1*msk2\n \n if exclude==True:\n msk=np.invert(msk)\n self.data=self.data[msk]\n\n print (\"---> data len after filtering=%d\" % len(self.data))",
"def filtermax(f, maxfiltsize=10):\n # Maximum filter to ignore deeper fluxes of absorption lines\n f_maxfilt = maximum_filter1d(f, size=maxfiltsize)\n # Find points selected by maximum filter\n idxmax = np.array([i for i in range(len(f)) if f[i]-f_maxfilt[i] == 0.])\n\n return f_maxfilt, idxmax",
"def FoldChangeFilterBasedOnMaxFC(X, data_headers, cutoff=0.5):\n XX = Linear(X.copy(), data_headers)\n X_ToMin = XX[data_headers] / XX[data_headers].min(axis=0)\n Xidx = np.any(X_ToMin.values >= X_ToMin.max().values * cutoff, axis=1)\n return X.iloc[Xidx, :]",
"def prune(self, min_freq):\n new_forward = {}\n new_backward = [\"OOV\"]\n new_freq = [0]\n j = 1\n for i in xrange(1,len(self.backward)):\n f = self.backward[i]\n if self.freq[i] >= min_freq:\n new_forward[f] = j\n new_backward.append(f)\n new_freq.append(self.freq[i])\n j += 1\n self.forward = new_forward\n self.backward = new_backward\n self.freq = new_freq\n self.counter = j",
"def rough_frequency_samples(m1, m2, flow, fmax, df_min):\n kmin = int(flow / df_min)\n kmax = int(fmax / df_min)\n k = kmin\n ksamples = []\n while k < kmax:\n ksamples.append(k)\n k += int(1.0 / rough_time_estimate(m1, m2, k * df_min) / df_min)\n ksamples.append(kmax)\n return numpy.array(ksamples)",
"def max_output_buffer(self, *args, **kwargs):\n return _ncofdm_swig.FreqOffCalc_sptr_max_output_buffer(self, *args, **kwargs)",
"def limit(self, X, maxVal):\n for vec in X:\n self.limitVec(vec, maxVal)",
"def maximums(self):\n # The maximums are at the peaks position but might be swallowed by \n # other peaks\n maximums = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n val = func(pos, pos, height, width)\n if val >= self.__call__(pos, count=False)[0]:\n maximums.append((val, pos))\n return sorted(maximums, reverse=True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if song is already transformed into temp. | def check_wav(song, source_folder, temp_folder, encoder='mpg123'):
# Name of files
song_name, extension = os.path.splitext(song)
mp3_file = os.path.join(source_folder, song)
if '.wav' != extension:
wav_file = os.path.join(temp_folder, song_name + '.wav')
try:
if not os.path.isfile(wav_file):
mp3_to_wav(
mp3_file=mp3_file,
wav_file=wav_file,
encoder=encoder)
else:
pass
except MemoryError:
logger.error('MemoryError: %s MP3 couldn\'t be transformed into WAV', song_name)
else: # Already a wav file
copyfile(mp3_file, os.path.join(temp_folder, song_name)) | [
"def check_exists(music_file, raw_song, meta_tags):\n log.debug('Cleaning any temp files and checking '\n 'if \"{}\" already exists'.format(music_file))\n songs = os.listdir(const.args.folder)\n for song in songs:\n if song.endswith('.temp'):\n os.remove(os.path.join(const.args.folder, song))\n continue\n # check if a song with the same name is already present in the given folder\n if os.path.splitext(song)[0] == music_file:\n log.debug('Found an already existing song: \"{}\"'.format(song))\n if internals.is_spotify(raw_song):\n # check if the already downloaded song has correct metadata\n # if not, remove it and download again without prompt\n already_tagged = metadata.compare(os.path.join(const.args.folder, song),\n meta_tags)\n log.debug('Checking if it is already tagged correctly? {}',\n already_tagged)\n if not already_tagged:\n os.remove(os.path.join(const.args.folder, song))\n return False\n\n log.warning('\"{}\" already exists'.format(song))\n if const.args.overwrite == 'prompt':\n log.info('\"{}\" has already been downloaded. '\n 'Re-download? (y/N): '.format(song))\n prompt = input('> ')\n if prompt.lower() == 'y':\n os.remove(os.path.join(const.args.folder, song))\n return False\n else:\n return True\n elif const.args.overwrite == 'force':\n os.remove(os.path.join(const.args.folder, song))\n log.info('Overwriting \"{}\"'.format(song))\n return False\n elif const.args.overwrite == 'skip':\n log.info('Skipping \"{}\"'.format(song))\n return True\n return False",
"def isTemp(self,object):\n return (object in self.tempObjects)",
"def previous_song(self) -> bool:\n if self._current_song_id < 1:\n return False\n else:\n self._current_song_id -= 1\n return True",
"def test_transform_track_album_based_on_album_title_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 3')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)",
"def song_check(song):\n msg = choose_song(song)\n return msg != ERROR",
"def test_transform_track_album_based_on_album_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 2',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, False)",
"def test_apply_transform_single_album_no_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)",
"def test_single_track_with_transform(self):\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.app.load_data()\n\n self.add_mp3()\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist 2', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist 2')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)\n self.assertEqual(album.last_transform, tf_pk)",
"def test_apply_transform_single_track_no_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)",
"def test_repair_file(self):\n\n audio_path = self.converter.audio\n self.assertTrue(audio_path.endswith('.wav'))\n # Make sure it can be loaded in moviepy\n clip = AudioFileClip(audio_path)",
"def test_transform_track_album_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 3')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)",
"def __rh_song(self,row):\r\n return self.data[row].song != None",
"def check_exists(music_file, raw_song, islist=True):\n songs = os.listdir(args.folder)\n for song in songs:\n if song.endswith('.temp'):\n os.remove(os.path.join(args.folder, song))\n continue\n # check if any song with similar name is already present in the given folder\n file_name = misc.sanitize_title(music_file)\n if song.startswith(file_name):\n # check if the already downloaded song has correct metadata\n already_tagged = metadata.compare(os.path.join(args.folder, song), generate_metadata(raw_song))\n\n # if not, remove it and download again without prompt\n if misc.is_spotify(raw_song) and not already_tagged:\n os.remove(os.path.join(args.folder, song))\n return False\n\n # do not prompt and skip the current song\n # if already downloaded when using list\n if islist:\n print('Song already exists')\n return True\n # if downloading only single song, prompt to re-download\n else:\n prompt = input('Song with same name has already been downloaded. '\n 'Re-download? (y/n): ').lower()\n if prompt == 'y':\n os.remove(os.path.join(args.folder, song))\n return False\n else:\n return True\n return False",
"def cut_and_eq(song_name):\r\n print(\"[{}] STATUS: Loading...\".format(song_name))\r\n sound_file = AudioSegment.from_mp3(song_name)\r\n print(\"[{}] STATUS: Loaded, now processing...\".format(song_name))\r\n sound_file = match_target_amplitude(sound_file, TARGET_VOLUME) # Amplify beforehand to prevent over-zealous cutting\r\n chunks = split_on_silence(sound_file, SILENCE_CUTOFF, THRESHOLD, keep_silence=ACCEPTABLE_SILENCE)\r\n\r\n if len(chunks) > 1:\r\n print(\"[{}] ERROR: Too many chunks ({}) cannot export\".format(song_name, len(chunks)))\r\n return song_name\r\n else:\r\n output = AudioSegment.empty()\r\n for chunk in chunks:\r\n output += chunk\r\n\r\n new_name = song_name.split(\".\")[0]\r\n print(\"[{}] STATUS: Processed, now exporting...\".format(song_name))\r\n metadata = mediainfo(song_name).get('TAG',{})\r\n output.export(OUTPUT_NAME_FORMAT.format(new_name), format=OUTPUT_FORMAT, tags=metadata)\r\n print(\"[{}] STATUS: Exported to {} - cleaned.{}\".format(song_name, new_name, OUTPUT_FORMAT))\r\n return None",
"def test_transform_track_title_based_on_album_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 3',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 3')\n self.assertEqual(track.transformed, False)",
"def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0",
"def test_transform_track_artist_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 2', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_album=True, pattern_album='Album',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)",
"def test_apply_transform_single_track_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='New Artist')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'New Artist')",
"def test_get_all_need_transform_one_track_another_already_applied(self):\n track = Track(artist='Artist', album='Album', title='Title', last_transform=1)\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n track = Track(artist='Artist', album='Album', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 2)\n\n tracks = Track.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(tracks), 1)\n self.assertEqual(tracks[0].pk, pk)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the free space in Gigabits. | def get_free_gb():
mem_info = get_mem_info()
free_gb = float(mem_info['MemAvailable'].value) / 10**6
return free_gb | [
"def free_space(self):\n if not self.partitions:\n free = self.size - self.partition_start\n else:\n free = self.size - self.current_usage\n\n if self.type == GPT:\n free -= Size(GPT_BACKUP_SIZE)\n else:\n free -= Size(1)\n\n log.debug('Free space: %d' % free.bytes)\n return free",
"def get_cgts_vg_free_space():\n\n try:\n # Determine space in cgts-vg in GiB\n vg_free_str = subprocess.check_output( # pylint: disable=not-callable\n ['vgdisplay', '-C', '--noheadings', '--nosuffix',\n '-o', 'vg_free', '--units', 'g', 'cgts-vg'],\n close_fds=True, universal_newlines=True).rstrip()\n cgts_vg_free = int(float(vg_free_str))\n except subprocess.CalledProcessError:\n LOG.error(\"Command vgdisplay failed\")\n raise Exception(\"Command vgdisplay failed\")\n\n return cgts_vg_free",
"def _get_free_space(conversion_dir):\n try:\n out = utils.execute('df', '--portability', '--block-size', '1',\n conversion_dir,\n run_as_root=True)[0]\n out = out.splitlines()[1]\n available = int(out.split()[3])\n except Exception:\n msg = _(\"Failed to get the available free space.\")\n LOG.exception(msg)\n raise exception.GutsException(msg)\n\n return available",
"def _get_free_capacity(self):\n\n capacity = np.ones(len(self.grid.T)) * len(self.grid)\n capacity -= np.count_nonzero(self.grid, axis=0)\n return capacity",
"def get_space_used():\n fs.get_space_used()",
"def get_available_space(self):\n return self.maxsize - len(self)",
"def _get_memory_total_free(self):\n return self.__memory_total_free",
"def RAM_free(self):\n free = self.get_value('1.3.6.1.4.1.10002.1.1.1.1.2.0')\n return int(free)",
"def est_free_ram():\n return psutil.virtual_memory().free",
"def freeRAM():\n return truncateFloat(psutil.virtual_memory().available / (1*(10**9)), 2)",
"def get_space_usage(self):\n raise NotImplementedError",
"def tablespace_free(self, name):\n sql = '''select trunc((tablespace_size - used_space) * 8 / 1024 /1024,2) free_size\n from dba_tablespace_usage_metrics\n where tablespace_name = '{0}' '''.format(name)\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print i[0]",
"def get_vram_free():\n stats = GPUStats()\n vram = stats.get_card_most_free()\n logger.verbose(\"Using device %s with %sMB free of %sMB\",\n vram[\"device\"],\n int(vram[\"free\"]),\n int(vram[\"total\"]))\n return int(vram[\"card_id\"]), int(vram[\"free\"]), int(vram[\"total\"])",
"def get_vram_free(self):\n stats = GPUStats()\n vram = stats.get_card_most_free()\n if self.verbose:\n print(\"Using device {} with {}MB free of {}MB\".format(\n vram[\"device\"],\n int(vram[\"free\"]),\n int(vram[\"total\"])))\n return int(vram[\"card_id\"]), int(vram[\"free\"]), int(vram[\"total\"])",
"def total_reserved_space(self):\n return self._total_reserved_space",
"def _get_free_space(folder):\n if platform.system() == 'Windows':\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(\n ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes)\n )\n return free_bytes.value\n else:\n return os.statvfs(folder).f_bfree * os.statvfs(folder).f_frsize",
"def get_free(self):\n return int(self.free_cores)",
"def used_storage_space(self):\n return self._used_storage_space",
"def fs_total_reserved_space(self):\n return self._fs_total_reserved_space"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
True if it can't run, else otherwise. Condition is Gb of RAM memory available. | def ram_condition(min_gb=3):
return get_free_gb() < min_gb | [
"def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')",
"def memory_check(self) -> bool:\n available_bytes = psutil.virtual_memory().available\n cur_rss = self.mem_status.memory_info().rss\n\n if cur_rss < self.init_mem_rss:\n self.init_mem_rss = cur_rss\n estimated_model_size_mb = (cur_rss - self.init_mem_rss) >> 20\n available_mb = available_bytes >> 20\n model_size_memory_ratio = estimated_model_size_mb / available_mb\n\n early_stop = False\n if model_size_memory_ratio > 1.0:\n logger.warning(f'Warning: Large model size may cause OOM error if training continues')\n early_stop = True\n\n if available_mb < 512: # Less than 500 MB\n logger.warning(f'Warning: Low available memory may cause OOM error if training continues')\n early_stop = True\n\n if early_stop:\n logger.warning('Warning: Early stopped model prior to optimal result to avoid OOM error. '\n 'Please increase available memory to avoid subpar model quality.')\n logger.warning(f'Available Memory: {available_mb} MB, Estimated Model size: {estimated_model_size_mb} MB')\n return True\n elif self.verbose or (model_size_memory_ratio > 0.25):\n logging.debug(f'Available Memory: {available_mb} MB, Estimated Model size: {estimated_model_size_mb} MB')\n return False",
"def check_enough_memory(input_bytes, factor=1, verbose=True):\n mem = virtual_memory()\n load = factor*input_bytes\n if load>mem.total:\n return False # total physical memory is smaller than input\n if verbose: \n print(\"System available memory = {:} bytes\".format(mem.available))\n if load>=mem.available:\n return False # available memory is not enough\n else:\n return True # enough available memory",
"def has_enough_memory(self, required_memory):\r\n if self._memory >= required_memory:\r\n return True\r\n return False",
"def isRestrictToExecuteMemory(program: ghidra.program.model.listing.Program) -> bool:\n ...",
"def isMemSufficient():\n logging.info(f\"ๆฃๆฅๅ
ๅญๆฏๅฆๅ
่ถณ\")\n interface = dbus_interface()\n result = interface.IsMemSufficient()\n logging.info(f'result: {result}')\n if isinstance(result, dbus.Boolean):\n logging.info(f\"ๆฃๆฅๅ
ๅญๆฏๅฆๅ
่ถณ: {bool(result)}\")\n return True\n else:\n logging.info(f\"IsMemSufficient ่ฟๅ็ๆฐๆฎ็ฑปๅไธๆฏ้ขๆ็bus.Boolean๏ผๅฎ้
็ฑปๅไธบ{type(result)}\")\n return False",
"def device_out_of_memory(self) -> bool:\n return pulumi.get(self, \"device_out_of_memory\")",
"def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0",
"def __isMemoryAvailable(self) :\n #mem_free = psutil.phymem_usage()[2]\n\n #print \"Memory free = \" + str(mem_free)\n success = False\n found = False\n almost_size = 0\n size = 0\n self.free = 0\n line = \"\"\n freeMagnitude = None\n\n #####\n # Set up and run the command\n cmd = [\"/usr/bin/top\", \"-l\", \"1\"]\n\n proc = Popen(cmd, stdout=PIPE, stderr=PIPE)\n\n while True:\n line = proc.stdout.readline().strip()\n #####\n # Split on spaces\n line = line.split()\n #####\n # Get the last item in the list\n found = line[-1]\n almost_size = line[:-1]\n size = almost_size[-1]\n\n found = str(found).strip()\n #almost_size = almost_size.strip()\n size = str(size).strip()\n\n self.logger.log(lp.INFO, \"size: \" + str(size))\n self.logger.log(lp.INFO, \"found: \" + str(found))\n\n if re.search(\"unused\", found) or re.search(\"free\", found):\n #####\n # Found the data we wanted, stop the search.\n break\n proc.kill()\n\n #####\n # Find the numerical value and magnitute of the ramdisk\n if size:\n sizeCompile = re.compile(\"(\\d+)(\\w+)\")\n\n split_size = sizeCompile.search(size)\n freeNumber = split_size.group(1)\n freeMagnitude = split_size.group(2)\n \n freeNumber = str(freeNumber).strip()\n freeMagnitude = str(freeMagnitude).strip()\n\n if re.match(\"^\\d+$\", freeNumber.strip()):\n if re.match(\"^\\w$\", freeMagnitude.strip()):\n if freeMagnitude:\n #####\n # Calculate the size of the free memory in Megabytes\n if re.search(\"G\", freeMagnitude.strip()):\n self.free = 1024 * int(freeNumber)\n self.free = str(self.free)\n elif re.search(\"M\", freeMagnitude.strip()):\n self.free = freeNumber\n self.logger.log(lp.DEBUG, \"free: \" + str(self.free))\n self.logger.log(lp.DEBUG, \"Size requested: \" + str(self.diskSize))\n if int(self.free) > int(float(self.diskSize)/(2*1024)):\n success = True\n print(str(self.free))\n print(str(success))\n return success",
"def ram_sanity_check(array=None):\n cost = est_free_ram() - est_array_size(array)\n return {\"available\": bool(cost > 0), \"bytes\": int(cost)}",
"def is_over_threshold(self):\n return self.get_memory_used() > self.memory_threshold",
"def hasmem(mem):\n nonlocal state\n if mem <= state[MEM]:\n return True\n else:\n state[STATUS] = OOM\n return False",
"def service_fits(self, service: Service) -> bool:\n return service.resource_reservation.memory.min < self.node_free_memory()",
"def checkLiveMigrateMemory(self):\n overhead_kb = 0\n if arch.type == \"x86\":\n # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than \n # the minimum that Xen would allocate if no value were given.\n overhead_kb = self.info['VCPUs_max'] * 1024 + \\\n (self.info['memory_static_max'] / 1024 / 1024) * 4\n overhead_kb = ((overhead_kb + 1023) / 1024) * 1024\n # The domain might already have some shadow memory\n overhead_kb -= xc.shadow_mem_control(self.domid) * 1024\n if overhead_kb > 0:\n balloon.free(overhead_kb, self)",
"def ready_to_fire(self):\n if self.state-self.memory > self.threshold:\n return True",
"def mem_avail():\n return psutil.virtual_memory().available",
"def testExcessiveRamUsage(self):\n c = Simulation()\n c.set_simulation_parameters(\n seed=1,\n task=36,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=100000000000,\n sample_size=0.1,\n max_time=10,\n )\n c.set_map_files(sample_file=\"sample/large_mask.tif\", fine_file=\"sample/large_fine.tif\")\n with self.assertRaises(MemoryError):\n c.optimise_ram(ram_limit=16)",
"def is_out_of_memory(self):\n\n return self._state == \"OUT_OF_MEMORY\"",
"def install_check_memory(self, syscfg):\n if not syscfg.is_enough_ram():\n total_mem = syscfg.get_total_usable_mem()\n print('\\nTotal memory in the system is low: %d MB, installation requires at least 2GB'\n % int(math.ceil(total_mem/1024/1024)))\n\n print('New swap file will be installed in /var')\n print('It will take approximately 2 minutes')\n code, swap_name, swap_size = syscfg.create_swap()\n if code == 0:\n print('\\nNew swap file was created %s %d MB and activated'\n % (swap_name,int(math.ceil(total_mem/1024/1024))))\n else:\n print('\\nSwap file could not be created. Please, inspect the problem and try again')\n return self.return_code(1)\n\n # Recheck\n if not syscfg.is_enough_ram():\n print('Error: still not enough memory. Please, resolve the issue and try again')\n return self.return_code(1)\n print('')\n return 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The camera that took the image | def camera(self):
return self.__camera | [
"def camera(self):\r\n return self.__camera",
"def camera(self):\n return self._camera",
"def get_camera(self):\n return self._camera",
"def query_camera(self):\n ok, orig_pic = self.vs.read() # Read video stream\n if ok: # If no errors\n orig_pic = imutils.rotate(orig_pic, angle=self.camera_rot)\n curr_pic = imutils.resize(orig_pic, width=self.image_width)\n return curr_pic, orig_pic\n else:\n return None, None",
"def get_camera_information(self):\n\n if len(pm.ls('render_cam'))>0:\n camera=pm.ls('render_cam')[0]\n else:\n camera=pm.ls('persp')[0]\n\n return camera, camera.depthOfField.get()",
"def camera_entity(self):\n return self._camera",
"def get_image(self):\n return self.camera.getImage()",
"def camera_id(self):\n return self._camera_id",
"def get_camera_parameters(self):\n\t\treturn self._camera_parameters",
"def rightCam(viewCam):\n\n pass",
"def get_image(self, camera):\n # read is the easiest way to get a full image out of a VideoCapture object.\n _, im = camera.read()\n return im",
"def snapshot(self):\n return self.camera.snapshot(0)",
"def capture_image(self) -> Image.Image:\n # get the PIL image from the camera\n return self.camera.capture_image()",
"def camera_info(self):\n info= self.client.simGetCameraInfo(self.camera_name)\n\n return print(info)",
"def read_camera(self):\n _, frame = self.camera.read()\n return self.mirror(frame)",
"def create_camera(self):\n pass",
"def getActiveCamera(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALVisionRecognition\")\n return self.proxy.getActiveCamera()",
"def get_camera_parameters(self):\n return self._camera_parameters",
"def camera_entity(self):\n return self._camera_entity_id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Property for the exterior orientation parameters | def exteriorOrientationParameters(self):
return self.__exteriorOrientationParameters | [
"def exteriorOrientationParameters(self):\r\n return self.__exteriorOrientationParameters",
"def orient(self):\n return self.__ph.get('orient', PH_ORIENT_HORZ)",
"def orientation(self, v=None):\n if v is None:\n v = self._DEFAULT_ORIENTATION\n if isinstance(v, str):\n v = v.lower()\n v = {'horizontal': 'h', 0: 'h', 'left-to-right': 'h',\n 'vertical': 'v', 1: 'v', 'top-to-bottom': 'v',\n 'right-to-left': 'hr', 'bottom-to-top': 'vr'}.get(v, v)\n if v not in ('h', 'v', 'hr', 'vr'):\n raise ValueError('%s.orientation got unknown value %r' % (self.id, v))\n return v",
"def OrientationAngle(self):\r\n return self.OrientationAngle",
"def _set_orientation(self, orientation):\n self.x = orientation['x']\n self.y = orientation['y']\n self.z = orientation['z']\n self.a = orientation['a']\n self.b = orientation['b']\n self.g = orientation['g']\n return",
"def orientation(self):\n o_tmp = self._orientation + 180\n o_tmp %= 360\n return o_tmp - 180",
"def orientation(self):\n return self.slider.orientation()",
"def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p",
"def give_orientation(pose, orr_array):\n pose.orientation.x = orr_array[0]\n pose.orientation.y = orr_array[1]\n pose.orientation.z = orr_array[2]\n pose.orientation.w = orr_array[3]",
"def orientation(self) -> str:\n return self._origin.orientation",
"def orientation(self) -> str:\n return self._widget._mgui_get_orientation()",
"def setup_orientation_annotation(self) :\n \n # Anatomical directions in LPS convention, numpy order\n directions_anatomical = {\n \"L\" : (0,0,+1),\n \"R\" : (0,0,-1),\n \"P\" : (0,+1,0),\n \"A\" : (0,-1,0),\n \"I\" : (-1,0,0),\n \"S\" : (+1,0,0),\n }\n \n # Index directions, numpy order\n directions_index = {\n \"+x\" : (0,0,+1),\n \"-x\" : (0,0,-1),\n \"+y\" : (0,+1,0),\n \"-y\" : (0,-1,0),\n \"+z\" : (-1,0,0),\n \"-z\" : (+1,0,0),\n }\n \n directions = (directions_anatomical \n if self.display_coordinates in [\"physical\", \"nearest_axis_aligned\"]\n else directions_index)\n \n # Window locations\n locations = {\n \"up\" : (1,0),\n \"down\" : (-1,0),\n \"left\" : (0,-1),\n \"right\" : (0,1)\n }\n \n for location, p in locations.items() :\n matrix = self._3d_world_to_slice\n direction = numpy.dot(self._3d_slice_to_world, numpy.hstack((0, p)))\n \n # Find closest in-slice direction based on dot product\n closest = None\n max_distance = -1\n for name, d in directions.items() :\n distance = numpy.dot(d, direction)\n if distance > max_distance :\n max_distance = distance\n closest = name\n \n # Set text\n index = self._orientation_annotation_index[location]\n self._orientation_annotation.SetText(index, closest)",
"def orientation(self) -> int:\n return self._prefs.get(PREF_ORIENTATION, 1)",
"def get_orientation(self):\n\n if (len(self.orientations) > 1 and\n self.orientations[-1]==self.orientations[-2]): \n orientation = not self.orientations[-1]\n else:\n orientation = random.choice((HORIZONTAL, VERTICAL))\n self.orientations.append(orientation)\n return orientation",
"def exterior_angle(self):\n n = self.__getitem__(2)\n return 2*S.Pi/n",
"def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')",
"def rotation_mode():\r\n pass",
"def PerspectiveCenter(self):\r\n return self.exteriorOrientationParameters[0:3]",
"def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The rotation matrix of the image Relates to the exterior orientation | def rotationMatrix(self):
R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],
self.exteriorOrientationParameters[5])
return R | [
"def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]",
"def rotation(self):\n\t\treturn self.piv.a.rotate.v",
"def matrix(self):\n return self._rotation",
"def get_rotation_as_rotation_mat(self):\n return self._rotation_mat",
"def rotation(self):\n return self.transform.getRotation() + [0]",
"def rotation_90(image):\n image_rotated = flip_h(image)\n image_rotated = transpose(image_rotated)\n\n return image_rotated",
"def rotation(self):\n return self._rotation * 180. / np.pi",
"def get_rotation():\n return _rotation * 90",
"def _image_rotation(img:np.ndarray, angle:Real, num:int, verbose:int=0) -> np.ndarray:\n l_rotated = []\n nrows,ncols = img.shape[:2]\n _angle = abs(angle)\n angles = uniform(-_angle, _angle, num)\n\n if verbose >= 1:\n print(f\"angles = {angles}\")\n \n for a in angles:\n M = cv2.getRotationMatrix2D((ncols/2,nrows/2),a,1)\n l_rotated.append(cv2.warpAffine(img,M,(ncols,nrows)))\n \n return l_rotated",
"def rotation(self):\n return self.eman.component_for_entity(self.e, Movement).angle",
"def rotation (self):\n return self.eman.component_for_entity (self.e, Movement).angle",
"def rotation_angle(self):\n return self.container['rotation_angle']",
"def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))",
"def rotation(self) -> CameraRotationType:\n return self._rotation",
"def physical_rotation(self):\n if getattr(self, '_body', None) is not None:\n return self._body.angle\n\n return -np.deg2rad(self.rotation)",
"def camera_rotation(self) -> CameraRotationType:\n return self._rotation",
"def rotation(self) -> float:\n xs, ys = self.xcoords.data, self.ycoords.data\n rot = 0\n if xs.ndim == 2:\n ddx1 = xs[0, -1] - xs[0, 0]\n ddy1 = ys[0, -1] - ys[0, 0]\n if not np.isclose(ddx1, 0):\n rot = math.degrees(math.atan(ddy1 / ddx1))\n else:\n rot = -90\n if ddx1 < 0:\n rot = 180 + rot\n elif ddy1 < 0:\n rot = 360 + rot\n return rot",
"def rotate(self):\n val = None\n try:\n \"\"\"Get rotation tags\"\"\"\n f = open(self._name, 'rb')\n tags = exifread.process_file(f)\n f.close()\n orientation = tags[\"Image Orientation\"]\n val = orientation.values\n\n except:\n return True\n\n if 3 in val:\n rotation = 180\n\n elif 6 in val:\n rotation = 270\n\n elif 8 in val:\n rotation = 90\n\n else:\n rotation = 0\n\n self._image = pygame.transform.rotate(self._image, rotation)",
"def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Compute inner orientation parameters | def ComputeInnerOrientation(self, imagePoints):
# implementing observation vectors
imagePoints = imagePoints.reshape(np.size(imagePoints), 1)
fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)
n = int(len(imagePoints)) # number of observations
u = 6 # 6 orientation parameters
A = np.zeros((n, u)) # A matrix (n,u)
j = 0
for i in range(len(imagePoints)):
if i % 2 == 0:
A[i, 0] = 1;
A[i, 1] = 0;
A[i, 2] = fMarks[j];
A[i, 3] = fMarks[j + 1];
A[i, 4] = 0
A[i, 5] = 0
else:
A[i, 0] = 0;
A[i, 1] = 1;
A[i, 2] = 0;
A[i, 3] = 0;
A[i, 4] = fMarks[j];
A[i, 5] = fMarks[j + 1]
j += 2
X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))
v = np.dot(A, X) - imagePoints
adjustment_results = {"params": X, "residuals": v, "N": np.dot(np.transpose(A), A)}
self.__innerOrientationParameters = X # updating the inner orientation params
return adjustment_results | [
"def ComputeInverseInnerOrientation(self):\r\n inner = self.__innerOrientationParameters\r\n matrix = np.array([[inner['a1'], inner['a2']], [inner['b1'], inner['b2']]])\r\n # inverse matrix\r\n inv_matrix = np.linalg.inv(matrix)\r\n return {'a0*': -inner['a0'], 'a1*': inv_matrix[0, 0], 'a2*': inv_matrix[0, 1],\r\n 'b0*': -inner['b0'], 'b1*': inv_matrix[1, 0], 'b2*': inv_matrix[1, 1]}",
"def ComputeInnerOrientation(self, imagePoints):\r\n if self.camera.fiducialMarks == 'no fiducials': # case of digital camera\r\n pixel_size = 0.0024 # [mm]\r\n a1 = 1 / pixel_size\r\n b2 = -1 / pixel_size\r\n a2 = 0\r\n b1 = 0\r\n a0 = self.camera.principalPoint[0] / pixel_size\r\n b0 = self.camera.principalPoint[1] / pixel_size\r\n self.__innerOrientationParameters = {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n return {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n else:\r\n\r\n # observation vector\r\n l = np.matrix(imagePoints).flatten('F').T\r\n\r\n # fiducial marks - camera system\r\n fc = self.camera.fiducialMarks\r\n\r\n # A matrix (16X6)\r\n j = len(imagePoints[:, 0])\r\n A = np.zeros((len(l), 6))\r\n for i in range(j):\r\n A[i, 0:3] = np.array([1, fc[i, 0], fc[i, 1]])\r\n A[i + j, 3:] = np.array([1, fc[i, 0], fc[i, 1]])\r\n\r\n # N matrix\r\n N = (A.T).dot(A)\r\n # U vector\r\n U = (A.T).dot(l)\r\n # adjusted variables\r\n X = (np.linalg.inv(N)).dot(U)\r\n # v remainders vector\r\n v = A.dot(X) - l\r\n\r\n # sigma posteriory\r\n u = 6\r\n r = len(l) - u\r\n sigma0 = ((v.T).dot(v)) / r\r\n sigmaX = sigma0[0, 0] * (np.linalg.inv(N))\r\n # update field\r\n self.__innerOrientationParameters = {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0],\r\n 'b1': X[4, 0],\r\n 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}\r\n\r\n return {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0], 'b1': X[4, 0], 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}",
"def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T",
"def ComputeGeometricParameters(self):\r\n # algebraic inner orinetation paramters\r\n x = self.__innerOrientationParameters\r\n tx = x['a0']\r\n ty = x['b0']\r\n tetha = np.arctan((x['b1'] / x['b2']))\r\n gamma = np.arctan((x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha))\r\n / (x['b1'] * np.sin(tetha) + x['b2'] * np.cos(tetha)))\r\n sx = x['a1'] * np.cos(tetha) - x['a2'] * np.sin(tetha)\r\n sy = (x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha)) / (np.sin(gamma))\r\n\r\n return {'translationX': tx, 'translationY': ty, 'rotationAngle': tetha,\r\n 'scaleFactorX': sx, 'scaleFactorY': sy, 'shearAngle': gamma}",
"def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}",
"def get_orientation(self, visited):\n #print(visited)\n if visited:\n rot = mathutils.Quaternion(self.helical_axisParam, self.get_angle(2))\n a = self.positions[2]\n b = self.positions[3]\n else:\n rot = mathutils.Quaternion(self.helical_axisParam, self.get_angle(0))\n a = self.positions[0]\n b = self.positions[1]\n n = mathutils.Vector(self.east)\n n.rotate(rot)\n return a, b, n",
"def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2",
"def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0",
"def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()",
"def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters",
"def orientation(p, q, r):\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if val > 0:\n # Clockwise orientation\n return 1\n elif val < 0:\n # Counterclockwise orientation\n return 2\n else:\n # Colinear orientation\n return 0",
"def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs",
"def __idiv__(self, *args):\n return _almathswig.Pose2D___idiv__(self, *args)",
"def exteriorOrientationParameters(self):\r\n return self.__exteriorOrientationParameters",
"def calc_outer_surface_area(self):\n return pi * self.outer_dia * self.length",
"def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p",
"def cone_orientation(self):\n if self.is3d:\n orientation = lib.D3DVECTOR()\n _check(\n self._native_buffer3d.GetConeOrientation(ctypes.byref(orientation))\n )\n return orientation.x, orientation.y, orientation.z\n else:\n return 0, 0, 0",
"def cone_orientation(self):\r\n if self.is3d:\r\n orientation = lib.D3DVECTOR()\r\n _check(\r\n self._native_buffer3d.GetConeOrientation(ctypes.byref(orientation))\r\n )\r\n return orientation.x, orientation.y, orientation.z\r\n else:\r\n return 0, 0, 0",
"def calc_inner_surface_area(self):\n return pi * self.inner_dia * self.length"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the geometric inner orientation parameters | def ComputeGeometricParameters(self):
# extracting inner orientation params
a0 = self.innerOrientationParameters[0]
b0 = self.innerOrientationParameters[1]
a1 = self.innerOrientationParameters[2]
a2 = self.innerOrientationParameters[3]
b1 = self.innerOrientationParameters[4]
b2 = self.innerOrientationParameters[5]
# computing algebric params
tx = a0;
ty = b0
theta = np.arctan(b1 / b2)
gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))
sx = a1 * np.cos(theta) - a2 * np.sin(theta)
sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)
return {"translationX": tx, "translationY": ty, "rotationAngle": np.rad2deg(theta), "scaleFactorX": sx,
"scaleFactorY": sy, "shearAngle": np.rad2deg(gamma)} | [
"def ComputeGeometricParameters(self):\r\n # algebraic inner orinetation paramters\r\n x = self.__innerOrientationParameters\r\n tx = x['a0']\r\n ty = x['b0']\r\n tetha = np.arctan((x['b1'] / x['b2']))\r\n gamma = np.arctan((x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha))\r\n / (x['b1'] * np.sin(tetha) + x['b2'] * np.cos(tetha)))\r\n sx = x['a1'] * np.cos(tetha) - x['a2'] * np.sin(tetha)\r\n sy = (x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha)) / (np.sin(gamma))\r\n\r\n return {'translationX': tx, 'translationY': ty, 'rotationAngle': tetha,\r\n 'scaleFactorX': sx, 'scaleFactorY': sy, 'shearAngle': gamma}",
"def ComputeInverseInnerOrientation(self):\r\n inner = self.__innerOrientationParameters\r\n matrix = np.array([[inner['a1'], inner['a2']], [inner['b1'], inner['b2']]])\r\n # inverse matrix\r\n inv_matrix = np.linalg.inv(matrix)\r\n return {'a0*': -inner['a0'], 'a1*': inv_matrix[0, 0], 'a2*': inv_matrix[0, 1],\r\n 'b0*': -inner['b0'], 'b1*': inv_matrix[1, 0], 'b2*': inv_matrix[1, 1]}",
"def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation",
"def ComputeInnerOrientation(self, imagePoints):\r\n if self.camera.fiducialMarks == 'no fiducials': # case of digital camera\r\n pixel_size = 0.0024 # [mm]\r\n a1 = 1 / pixel_size\r\n b2 = -1 / pixel_size\r\n a2 = 0\r\n b1 = 0\r\n a0 = self.camera.principalPoint[0] / pixel_size\r\n b0 = self.camera.principalPoint[1] / pixel_size\r\n self.__innerOrientationParameters = {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n return {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n else:\r\n\r\n # observation vector\r\n l = np.matrix(imagePoints).flatten('F').T\r\n\r\n # fiducial marks - camera system\r\n fc = self.camera.fiducialMarks\r\n\r\n # A matrix (16X6)\r\n j = len(imagePoints[:, 0])\r\n A = np.zeros((len(l), 6))\r\n for i in range(j):\r\n A[i, 0:3] = np.array([1, fc[i, 0], fc[i, 1]])\r\n A[i + j, 3:] = np.array([1, fc[i, 0], fc[i, 1]])\r\n\r\n # N matrix\r\n N = (A.T).dot(A)\r\n # U vector\r\n U = (A.T).dot(l)\r\n # adjusted variables\r\n X = (np.linalg.inv(N)).dot(U)\r\n # v remainders vector\r\n v = A.dot(X) - l\r\n\r\n # sigma posteriory\r\n u = 6\r\n r = len(l) - u\r\n sigma0 = ((v.T).dot(v)) / r\r\n sigmaX = sigma0[0, 0] * (np.linalg.inv(N))\r\n # update field\r\n self.__innerOrientationParameters = {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0],\r\n 'b1': X[4, 0],\r\n 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}\r\n\r\n return {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0], 'b1': X[4, 0], 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}",
"def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T",
"def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results",
"def get_orientation(self, visited):\n #print(visited)\n if visited:\n rot = mathutils.Quaternion(self.helical_axisParam, self.get_angle(2))\n a = self.positions[2]\n b = self.positions[3]\n else:\n rot = mathutils.Quaternion(self.helical_axisParam, self.get_angle(0))\n a = self.positions[0]\n b = self.positions[1]\n n = mathutils.Vector(self.east)\n n.rotate(rot)\n return a, b, n",
"def interior_angle(self):\n return (self.edges - 2)*(100/self.edges)",
"def get_rotation_parameters(isometric_matrix):\n if np.linalg.det(isometric_matrix) < 0:\n isometric_matrix = np.dot(np.diag([-1, 1, 1]), isometric_matrix)\n angel = acos((np.sum(np.diag(isometric_matrix)) - 1) / 2) * 180 / pi\n square_diff = (isometric_matrix - isometric_matrix.T) ** 2\n denominator = sqrt(np.sum(square_diff) / 2)\n x = (isometric_matrix[2, 1] - isometric_matrix[1, 2]) / denominator\n y = (isometric_matrix[0, 2] - isometric_matrix[2, 0]) / denominator\n z = (isometric_matrix[1, 0] - isometric_matrix[0, 1]) / denominator\n return isometric_matrix, np.array((x, y, z)), angel",
"def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs",
"def shape_info(fname):\n VX = np.genfromtxt(fname,comments='#',usecols=0,delimiter=',')\n VY = np.genfromtxt(fname,comments='#',usecols=1,delimiter=',')\n V = np.genfromtxt(fname,comments='#',usecols=(0,1),delimiter=',')\n \n \n nv = np.size(VX)\n\n vxmax = np.amax(VX)\n vxmin = np.amin(VX)\n xspan = abs(vxmax-vxmin)\n \n vymax = np.amax(VY)\n vymin = np.amin(VY)\n yspan = abs(vymax-vymin)\n L = max(xspan,yspan)\n B = min(xspan,yspan)\n\n VX /= L\n VY /= L\n V /= L\n L /= L\n B /= L\n\n elongation = L/B\n A_ratio = polygon_area(VX,VY)/(L*B) \n a = 0.\n for i in range(nv-1):\n I1 = i-1\n I2 = i\n I3 = i+1\n \n if i == 0: I1 = -1\n\n # Vectors to the midpoint of each face connecting vertex i\n M1 = np.array([(VX[I1]+VX[I2])/2.,(VY[I1]+VY[I2])/2.])\n M2 = np.array([(VX[I2]+VX[I3])/2.,(VY[I2]+VY[I3])/2.])\n \n # Vector magnitudes\n m1 = np.linalg.norm(M1)\n m2 = np.linalg.norm(M2)\n \n # Normalise the two midpoint vectors\n U1 = M1/m1\n U2 = M2/m2\n \n # Their average magnitude\n m = (m1+m2)/2.\n # New vectors for use in calculation\n N1 = V[i] + U1*m\n N2 = V[i] + U2*m\n\n # Length of the chord between them\n S = np.linalg.norm(N1-N2)\n \n # Calculate angle between N1 & N2\n cost = np.sum(N1*N2)/(np.linalg.norm(N1)*np.linalg.norm(N2))\n\n # Do not allow angles > pi\n thet = np.arccos(cost)\n if thet > np.pi: thet -= np.pi\n alph = np.pi-thet\n \n # radius = rad of circle that is tangential at N1 & N2\n a += S*.5/(np.sin(alph/2.))\n ac = a/float(nv-1)\n q = B/3.\n roundness = ac/q\n\n return elongation,A_ratio,roundness",
"def get_rotation_axis(bottom,upper):\n\n center_top = np.mean(upper, axis=0)\n center_bottom = np.mean(bottom, axis=0)\n rotation_axis = center_top - center_bottom\n return rotation_axis.ravel()",
"def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2",
"def interior_angle(self):\n n = self.__getitem__(2)\n return (n - 2)*S.Pi/n",
"def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0",
"def getRotationBase( self, direction='NW'):\r\n if( direction == 'center' ):\r\n x0,y0,x1,y1 = self.cavans.bbox( self.handler )\r\n return complex( (x0-x1)/2, (y0-y1)/2 )\r\n \r\n xy = self.getXY() \r\n minVal = 100000\r\n minX = 100000\r\n minY = 100000\r\n if( direction == 'NW' ):\r\n for x,y in xy:\r\n if( x + y < minVal ):\r\n minVal = x+y\r\n minX = x\r\n minY = y\r\n elif( direction == 'NE' ):\r\n for x,y in xy:\r\n if( -x + y < minVal ):\r\n minVal = -x+y\r\n minX = x\r\n minY = y\r\n elif( direction == 'SW' ):\r\n for x,y in xy:\r\n if( x - y < minVal ):\r\n minVal = x-y\r\n minX = x\r\n minY = y\r\n elif( direction == 'SE' ):\r\n for x,y in xy:\r\n if( -x - y < minVal ):\r\n minVal = -x-y\r\n minX = x\r\n minY = y\r\n else:\r\n print 'WARNING: Possible incorrect usage of GraphicalForm.getOffset()'\r\n return complex( 0,0 )\r\n ##print 'rotation base is ', minX, minY\r\n return complex( minX,minY )",
"def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle",
"def unit_vectors(exif_obj):\n camera_yaw = exif_obj[\"camera_yaw\"]\n lat, lon = exif_obj[\"img_lat\"], exif_obj[\"img_lon\"]\n declination = geomag.declination(lat, lon)\n camera_yaw += declination # compensate for magnetic variation\n alpha = camera_yaw * np.pi / 180\n n = [-np.cos(alpha), -np.sin(alpha)]\n e = [-np.sin(alpha), np.cos(alpha)]\n return np.array(n), np.array(e)",
"def greenhouse_orientation():\n \n # NEED TO CHECK THIS WITH COMPASS (OR IPHONE)\n orientation_angle = 90 # angle between east-west line and the length of the greenhouse (0-90 degree)\n orientation_angle = float(orientation_angle)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the parameters of the inverse inner orientation transformation | def ComputeInverseInnerOrientation(self):
a0 = self.innerOrientationParameters[0]
b0 = self.innerOrientationParameters[1]
a1 = self.innerOrientationParameters[2]
a2 = self.innerOrientationParameters[3]
b1 = self.innerOrientationParameters[4]
b2 = self.innerOrientationParameters[5]
mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])
mat = la.inv(mat)
return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T | [
"def ComputeInverseInnerOrientation(self):\r\n inner = self.__innerOrientationParameters\r\n matrix = np.array([[inner['a1'], inner['a2']], [inner['b1'], inner['b2']]])\r\n # inverse matrix\r\n inv_matrix = np.linalg.inv(matrix)\r\n return {'a0*': -inner['a0'], 'a1*': inv_matrix[0, 0], 'a2*': inv_matrix[0, 1],\r\n 'b0*': -inner['b0'], 'b1*': inv_matrix[1, 0], 'b2*': inv_matrix[1, 1]}",
"def pose2DInverse(*args):\n return _almathswig.pose2DInverse(*args)",
"def GetInverseTransformationForDh(params): \n \n transformation = np.empty([4, 4])\n \n #inverse dh transformation matrix\n \n transformation[0][0] = np.cos(params[2])\n transformation[0][1] = np.sin(params[2])\n transformation[0][2] = 0\n transformation[0][3] = -1*params[0]\n \n transformation[1][0] = -1*np.sin(params[2])*np.cos(params[3])\n transformation[1][1] = np.cos(params[2])*np.cos(params[3])\n transformation[1][2] = np.sin(params[3])\n transformation[1][3] = -1*params[1]*np.sin(params[3])\n \n transformation[2][0] = np.sin(params[2])*np.sin(params[3])\n transformation[2][1] = -1*np.cos(params[2])*np.sin(params[3])\n transformation[2][2] = np.cos(params[3])\n transformation[2][3] = -1*params[1]*np.cos(params[3])\n \n transformation[3][0] = 0\n transformation[3][1] = 0\n transformation[3][2] = 0\n transformation[3][3] = 1\n \n return transformation",
"def _inverse(self):\n\t\trotation_matrix = self.pose_mat[:3,:3]\n\t\ttranslation_vector = self.pose_mat[:3,3]\n\t\t\n\t\trot = np.transpose(rotation_matrix)\n\t\ttrans = - np.matmul(np.transpose(rotation_matrix), translation_vector)\n\t\treturn Transformation(rot, trans, ask_inv=False)",
"def inverse(self):\n ((c, ms, x),(s, c2, y), (z1, z2, o)) = self.matrix\n return Transform([[c, s, (-c*x)-(s*y)],\n [-s, c, (s*x)-(c*y)],\n [0, 0, 1]])",
"def inverse(self):\n return _almathswig.Pose2D_inverse(self)",
"def __idiv__(self, *args):\n return _almathswig.Pose2D___idiv__(self, *args)",
"def transformInverse(*args):\n return _almathswig.transformInverse(*args)",
"def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results",
"def getInverseMatrix(self) -> CMatrix4:\n ...",
"def ComputeInnerOrientation(self, imagePoints):\r\n if self.camera.fiducialMarks == 'no fiducials': # case of digital camera\r\n pixel_size = 0.0024 # [mm]\r\n a1 = 1 / pixel_size\r\n b2 = -1 / pixel_size\r\n a2 = 0\r\n b1 = 0\r\n a0 = self.camera.principalPoint[0] / pixel_size\r\n b0 = self.camera.principalPoint[1] / pixel_size\r\n self.__innerOrientationParameters = {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n return {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n else:\r\n\r\n # observation vector\r\n l = np.matrix(imagePoints).flatten('F').T\r\n\r\n # fiducial marks - camera system\r\n fc = self.camera.fiducialMarks\r\n\r\n # A matrix (16X6)\r\n j = len(imagePoints[:, 0])\r\n A = np.zeros((len(l), 6))\r\n for i in range(j):\r\n A[i, 0:3] = np.array([1, fc[i, 0], fc[i, 1]])\r\n A[i + j, 3:] = np.array([1, fc[i, 0], fc[i, 1]])\r\n\r\n # N matrix\r\n N = (A.T).dot(A)\r\n # U vector\r\n U = (A.T).dot(l)\r\n # adjusted variables\r\n X = (np.linalg.inv(N)).dot(U)\r\n # v remainders vector\r\n v = A.dot(X) - l\r\n\r\n # sigma posteriory\r\n u = 6\r\n r = len(l) - u\r\n sigma0 = ((v.T).dot(v)) / r\r\n sigmaX = sigma0[0, 0] * (np.linalg.inv(N))\r\n # update field\r\n self.__innerOrientationParameters = {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0],\r\n 'b1': X[4, 0],\r\n 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}\r\n\r\n return {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0], 'b1': X[4, 0], 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}",
"def au_inverse(self):\n if self._au_inverse is None:\n epsilon = 1e-10 * tf.eye(2 * self.filter_inducing.n_values,\n dtype=GLOBAL_DTYPE)\n self._au_inverse = tf.matrix_inverse(\n self.filter_inducing.augmented_covariance() + self._eps_filter,\n name=\"au_inverse\"\n )\n return self._au_inverse",
"def inverse(self):\n return self.transform().inverse().pose()",
"def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_GetInverse(self, *args)",
"def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi",
"def inverse(A):\n## return np.transpose( inverse_transpose(A), (0,2,1))\n return np.swapaxes( inverse_transpose(A), -1,-2)",
"def getInverseLambda(self):\n return self.invShape",
"def inverse(self):\n rotation_matrix = self.pose_mat[:3, :3]\n translation_vector = self.pose_mat[:3, 3]\n\n rot = np.transpose(rotation_matrix)\n trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)\n return Transformation(rot, trans)",
"def __idiv__(self, *args):\n return _almathswig.Rotation3D___idiv__(self, *args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms camera points to image points | def CameraToImage(self, cameraPoints):
# setting up the required matrices
a0 = self.innerOrientationParameters[0]
b0 = self.innerOrientationParameters[1]
a1 = self.innerOrientationParameters[2]
a2 = self.innerOrientationParameters[3]
b1 = self.innerOrientationParameters[4]
b2 = self.innerOrientationParameters[5]
if np.isscalar(a0):
R = np.array([[a1, a2], [b1, b2]])
T = np.array([[a0], [b0]])
else:
R = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])
T = np.array([[a0[0]], [b0[0]]])
cameraPoints = cameraPoints.T
# computing the transformation to the image system
return (T + np.dot(R, cameraPoints)).T | [
"def CameraToImage(self, cameraPoints):\r\n # get algebric parameters\r\n inner = self.__innerOrientationParameters\r\n\r\n imgPoints = np.zeros((len(cameraPoints[:, 0]), 2))\r\n for i in range(len(cameraPoints[:, 0])):\r\n imgPoints[i, 0] = inner['a0'] + inner['a1'] * cameraPoints[i, 0] + inner['a2'] * cameraPoints[i, 1]\r\n imgPoints[i, 1] = inner['b0'] + inner['b1'] * cameraPoints[i, 0] + inner['b2'] * cameraPoints[i, 1]\r\n\r\n return imgPoints",
"def cam2pixel(points_1, proj_rot_2_1, proj_tr_2_1):\n b, _, h, w = points_1.size()\n cam_coords_flat = points_1.reshape(b, 3, -1) # [B, 3, H*W]\n # apply rotation\n if proj_rot_2_1 is not None:\n pcoords = proj_rot_2_1 @ cam_coords_flat\n else:\n pcoords = cam_coords_flat\n # apply translation\n if proj_tr_2_1 is not None:\n pcoords = pcoords + proj_tr_2_1 # [B, 3, H*W]\n\n x = pcoords[:, 0]\n y = pcoords[:, 1]\n z = pcoords[:, 2].clamp(min=1e-3)\n\n x_norm = 2*(x / z) / (w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n y_norm = 2*(y / z) / (h-1) - 1 # Idem [B, H*W]\n\n coords = torch.stack([x_norm, y_norm], dim=2) # [B, H*W, 2]\n return coords.reshape(b, h, w, 2)",
"def ImageToCamera(self, imagePoints):\r\n # get the inverse inner orientation param\r\n inv_param = self.ComputeInverseInnerOrientation()\r\n\r\n camPoints = np.zeros((len(imagePoints[:, 0]), 2))\r\n for i in range(len(imagePoints[:, 0])):\r\n camPoints[i, 0] = inv_param['a1*'] * (imagePoints[i, 0] + inv_param['a0*']) + inv_param['a2*'] * (\r\n imagePoints[i, 1] + inv_param['b0*'])\r\n camPoints[i, 1] = inv_param['b1*'] * (imagePoints[i, 0] + inv_param['a0*']) + inv_param['b2*'] * (\r\n imagePoints[i, 1] + inv_param['b0*'])\r\n\r\n return camPoints",
"def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img",
"def project_points(points, cam_matrix, trans, rot):\n\n # STEP 1: Transform pointcloud into new reference frame.\n points = np.dot(rot, points) + trans[:, None]\n\n # STEP 2: Project new pointcloud onto image frame using K matrix.\n # gives a 3 x N array of image plane coordinates in homogenous coordinates.\n homo_pixel_coords = np.dot(cam_matrix, points)\n\n # STEP 3: Convert homogenous coordinates to regular 2D coordinates.\n # To do this, you need to divide the first two coordinates of homo_pixel_coords\n # by the third coordinate.\n pixel_coords = homo_pixel_coords[:2] / homo_pixel_coords[2]\n\n # STEP 4: Convert to integers. Take the floor of pixel_coords then cast it\n # to an integer type, like numpy.int32\n pixel_coords = np.int32(np.floor(pixel_coords))\n\n return pixel_coords",
"def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image",
"def convert_image_point_to_global_coordinates(points, camera_location):\n # TODO: The camera should take photos which record the camera_location, and scale factors etc.\n # This should be a method on such an image.\n\n # Convert to numpy object for a clean notation\n points = np.array(points)\n camera_location = np.array(camera_location)\n scale_factors = np.array([config.Y_PIXELS_TO_MILLIMETRE_SCALE, config.X_PIXELS_TO_MILLIMETRE_SCALE])\n camera_resolution = np.array(config.CAMERA_RESOLUTION)\n\n # Do the computation\n image_centre = camera_resolution / 2\n return camera_location + scale_factors * (points - image_centre)",
"def project_to_image_plane(self, point_in_world):\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n image_width = self.config['camera_info']['image_width']\n image_height = self.config['camera_info']['image_height']\n\n # get transform between pose of camera and world frame\n trans = None\n try:\n now = rospy.Time.now()\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", now, rospy.Duration(1.0))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", now)\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n #TODO Use tranform and rotation to calculate 2D position of light in image\n # KB 10Oct2017\n # commenting line below, and using dummy values, so that tf_classifier.py can be tested \n # x, y = self.project_to_image_plane(light.pose.pose.position)\n x = 0\n y = 0\n\n return (x, y)",
"def project_to_image_plane(self, point_in_world):\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n image_width = self.config['camera_info']['image_width']\n image_height = self.config['camera_info']['image_height']\n\n # get transform between pose of camera and world frame\n trans = None\n try:\n now = rospy.Time.now()\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", now, rospy.Duration(1.0))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", now)\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n #TODO Use tranform and rotation to calculate 2D position of light in image\n\n x = 0\n y = 0\n\n return (x, y)",
"def project_points(self, points):\n frame_center = self.get_frame_center()\n distance = self.get_distance()\n rot_matrix = self.get_rotation_matrix()\n\n points = points - frame_center\n points = np.dot(points, rot_matrix.T)\n zs = points[:, 2]\n for i in 0, 1:\n if self.exponential_projection:\n # Proper projedtion would involve multiplying\n # x and y by d / (d-z). But for points with high\n # z value that causes weird artifacts, and applying\n # the exponential helps smooth it out.\n factor = np.exp(zs / distance)\n lt0 = zs < 0\n factor[lt0] = distance / (distance - zs[lt0])\n else:\n factor = distance / (distance - zs)\n factor[(distance - zs) < 0] = 10 ** 6\n # clip_in_place(factor, 0, 10**6)\n points[:, i] *= factor\n points = points + frame_center\n return points",
"def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr",
"def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations",
"def transformImage(originalImage, oldPoints, newPoints):\n # originalImage = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)\n\n newImage = np.full(originalImage.shape, fill_value=255, dtype=np.uint8)\n newImage[newPoints[0], newPoints[1]] = originalImage[oldPoints[0], oldPoints[1]]\n\n return newImage",
"def cameraFromSpace(self, points):\n points = np.array(points)\n return np.dot(points - self.t, self.R.T)",
"def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed",
"def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points",
"def transform(x, y, M):\n xy_t = np.squeeze(\n cv2.perspectiveTransform(\n np.dstack(\n [\n x,\n y\n ]\n ),\n np.asarray(M)\n )\n )\n return xy_t[:, 0], xy_t[:, 1]",
"def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped",
"def project(points, cameras):\n\n #list of projections\n projections = []\n\n if np.ndim(points) == 1:\n points = points[:, np.newaxis]\n for camera in cameras:\n p = np.dot(camera, points)\n projections.append(p / p[2, :])\n\n return projections"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute exterior orientation parameters. This function can be used in conjecture with ``self.__ComputeDesignMatrix(groundPoints)`` and ``self__ComputeObservationVector(imagePoints)`` | def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):
# cameraPoints = self.ImageToCamera(imagePoints)
cameraPoints = imagePoints
self.__ComputeApproximateVals(cameraPoints, groundPoints)
l0 = self.__ComputeObservationVector(groundPoints.T)
l0 = np.reshape(l0, (-1, 1))
l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0
A = self.__ComputeDesignMatrix(groundPoints.T)
N = np.dot(A.T, A)
u = np.dot(A.T, l)
deltaX = np.dot(la.inv(N), u)
# update orientation pars
self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))
while la.norm(deltaX) > epsilon:
l0 = self.__ComputeObservationVector(groundPoints.T)
l0 = np.reshape(l0, (-1, 1))
l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0
A = self.__ComputeDesignMatrix(groundPoints.T)
N = np.dot(A.T, A)
u = np.dot(A.T, l)
deltaX = np.dot(la.inv(N), u)
# update orientation pars
self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))
# compute residuals
l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1))
v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1)
if (np.size(A, 0) - np.size(deltaX)) != 0:
sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX))
sigmaX = sig[0] * la.inv(N)
else:
sigmaX = None
return [self.exteriorOrientationParameters, sigmaX, v] | [
"def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):\r\n # compute control points in camera system using the inner orientation\r\n camera_points = self.ImageToCamera(imagePoints)\r\n\r\n # compute approximate values for exteriror orientation using conformic transformation\r\n self.ComputeApproximateVals(camera_points, groundPoints)\r\n lb = camera_points.flatten().T\r\n\r\n dx = np.ones([6, 1]) * 100000\r\n itr = 0\r\n # adjustment\r\n while np.linalg.norm(dx) > epsilon and itr < 100:\r\n itr += 1\r\n X = self.exteriorOrientationParameters.T\r\n l0 = self.ComputeObservationVector(groundPoints).T\r\n L = lb - l0\r\n A = self.ComputeDesignMatrix(groundPoints)\r\n N = np.dot(A.T, A)\r\n U = np.dot(A.T, L)\r\n dx = np.dot(np.linalg.inv(N), U)\r\n X = X + dx\r\n self.exteriorOrientationParameters = X.T\r\n\r\n v = A.dot(dx) - L\r\n\r\n # sigma posteriory\r\n u = 6\r\n r = len(L) - u\r\n if r != 0:\r\n sigma0 = ((v.T).dot(v)) / r\r\n sigmaX = sigma0 * (np.linalg.inv(N))\r\n else:\r\n sigma0 = None\r\n sigmaX = None\r\n\r\n return self.exteriorOrientationParameters, sigma0, sigmaX",
"def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters",
"def exteriorOrientationParameters(self):\r\n return self.__exteriorOrientationParameters",
"def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results",
"def ComputeInnerOrientation(self, imagePoints):\r\n if self.camera.fiducialMarks == 'no fiducials': # case of digital camera\r\n pixel_size = 0.0024 # [mm]\r\n a1 = 1 / pixel_size\r\n b2 = -1 / pixel_size\r\n a2 = 0\r\n b1 = 0\r\n a0 = self.camera.principalPoint[0] / pixel_size\r\n b0 = self.camera.principalPoint[1] / pixel_size\r\n self.__innerOrientationParameters = {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n return {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n else:\r\n\r\n # observation vector\r\n l = np.matrix(imagePoints).flatten('F').T\r\n\r\n # fiducial marks - camera system\r\n fc = self.camera.fiducialMarks\r\n\r\n # A matrix (16X6)\r\n j = len(imagePoints[:, 0])\r\n A = np.zeros((len(l), 6))\r\n for i in range(j):\r\n A[i, 0:3] = np.array([1, fc[i, 0], fc[i, 1]])\r\n A[i + j, 3:] = np.array([1, fc[i, 0], fc[i, 1]])\r\n\r\n # N matrix\r\n N = (A.T).dot(A)\r\n # U vector\r\n U = (A.T).dot(l)\r\n # adjusted variables\r\n X = (np.linalg.inv(N)).dot(U)\r\n # v remainders vector\r\n v = A.dot(X) - l\r\n\r\n # sigma posteriory\r\n u = 6\r\n r = len(l) - u\r\n sigma0 = ((v.T).dot(v)) / r\r\n sigmaX = sigma0[0, 0] * (np.linalg.inv(N))\r\n # update field\r\n self.__innerOrientationParameters = {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0],\r\n 'b1': X[4, 0],\r\n 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}\r\n\r\n return {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0], 'b1': X[4, 0], 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}",
"def ComputeGeometricParameters(self):\r\n # algebraic inner orinetation paramters\r\n x = self.__innerOrientationParameters\r\n tx = x['a0']\r\n ty = x['b0']\r\n tetha = np.arctan((x['b1'] / x['b2']))\r\n gamma = np.arctan((x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha))\r\n / (x['b1'] * np.sin(tetha) + x['b2'] * np.cos(tetha)))\r\n sx = x['a1'] * np.cos(tetha) - x['a2'] * np.sin(tetha)\r\n sy = (x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha)) / (np.sin(gamma))\r\n\r\n return {'translationX': tx, 'translationY': ty, 'rotationAngle': tetha,\r\n 'scaleFactorX': sx, 'scaleFactorY': sy, 'shearAngle': gamma}",
"def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}",
"def _get_end_effector_orientation(self):\n orient_quat = p.getLinkState(self.arm, 5, computeForwardKinematics=True)[1]\n orient_euler = p.getEulerFromQuaternion(orient_quat)\n return np.array(orient_euler)",
"def _get_end_effector_orientation(self):\n orient_quat = p.getLinkState(\n self.robot,\n linkIndex=8,\n computeForwardKinematics=True,\n physicsClientId=self.id)[1]\n # orient_euler = p.getEulerFromQuaternion(orient_quat)\n return np.array(orient_quat)",
"def get_orientation(self, visited):\n #print(visited)\n if visited:\n rot = mathutils.Quaternion(self.helical_axisParam, self.get_angle(2))\n a = self.positions[2]\n b = self.positions[3]\n else:\n rot = mathutils.Quaternion(self.helical_axisParam, self.get_angle(0))\n a = self.positions[0]\n b = self.positions[1]\n n = mathutils.Vector(self.east)\n n.rotate(rot)\n return a, b, n",
"def ComputeInverseInnerOrientation(self):\r\n inner = self.__innerOrientationParameters\r\n matrix = np.array([[inner['a1'], inner['a2']], [inner['b1'], inner['b2']]])\r\n # inverse matrix\r\n inv_matrix = np.linalg.inv(matrix)\r\n return {'a0*': -inner['a0'], 'a1*': inv_matrix[0, 0], 'a2*': inv_matrix[0, 1],\r\n 'b0*': -inner['b0'], 'b1*': inv_matrix[1, 0], 'b2*': inv_matrix[1, 1]}",
"def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p",
"def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()",
"def _get_xl_orientation_parameterisation(self, experiment_id):\n\n param_set = self._exp_to_param[experiment_id]\n xl_op = None\n if param_set.xl_ori_param is not None:\n xl_op = self._xl_orientation_parameterisations[param_set.xl_ori_param]\n\n return xl_op",
"def exterior_angle(self):\n n = self.__getitem__(2)\n return 2*S.Pi/n",
"def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T",
"def orientation(self):\n directions = self._directions_of_edges()[0]\n orientation = []\n for C in self.pd_code():\n if C[0] == C[1] or C[2] == C[3]:\n orientation.append(-1)\n elif C[1] == C[2] or C[0] == C[3]:\n orientation.append(1)\n elif directions[C[1]] == C:\n orientation.append(-1)\n else:\n orientation.append(1)\n return orientation",
"def get_orientation_vector(self):\n return rotate_vector([1, 0, 0][:self.get_dim()], self.orientation)",
"def get_motor_params_ext(self):\r\n steps_per_rev = c_double()\r\n gear_box_ratio = c_double()\r\n pitch = c_double()\r\n self.KCube.CC_GetMotorParamsExt(self.serial, byref(steps_per_rev),\r\n byref(gear_box_ratio), byref(pitch))\r\n return steps_per_rev.value, gear_box_ratio.value, pitch.value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforming ground points to image points | def GroundToImage(self, groundPoints):
X0 = float(self.exteriorOrientationParameters[0])
Y0 = float(self.exteriorOrientationParameters[1])
Z0 = float(self.exteriorOrientationParameters[2])
xp = float(self.camera.principalPoint[0])
yp = float(self.camera.principalPoint[1])
R = self.rotationMatrix
r11 = float(R[0, 0])
r12 = float(R[0, 1])
r13 = float(R[0, 2])
r21 = float(R[1, 0])
r22 = float(R[1, 1])
r23 = float(R[1, 2])
r31 = float(R[2, 0])
r32 = float(R[2, 1])
r33 = float(R[2, 2])
f = self.camera.focalLength
camPoints = []
for i in range(groundPoints.shape[0]):
x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (
groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (
groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))
y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (
groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (
groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))
camPoints.append([x, y])
# return self.CameraToImage(np.array(camPoints))
return (np.array(camPoints)) | [
"def GroundToImage(self, groundPoints):\r\n X0_1 = self.exteriorOrientationParameters[0]\r\n Y0_1 = self.exteriorOrientationParameters[1]\r\n Z0_1 = self.exteriorOrientationParameters[2]\r\n O1 = np.array([X0_1, Y0_1, Z0_1]).T\r\n R1 = self.RotationMatrix\r\n x1 = np.zeros((len(groundPoints), 1))\r\n y1 = np.zeros((len(groundPoints), 1))\r\n f = self.camera.focalLength\r\n\r\n for i in range(len(groundPoints)):\r\n lamda1 = -f / (np.dot(R1.T[2], (groundPoints[i] - O1).T)) # scale first image\r\n x1[i] = lamda1 * np.dot(R1.T[0], (groundPoints[i] - O1).T)\r\n y1[i] = lamda1 * np.dot(R1.T[1], (groundPoints[i] - O1).T)\r\n camera_points1 = np.vstack([x1.T, y1.T]).T\r\n # img_points1 = self.CameraToImage(camera_points1)\r\n img_points1 = camera_points1\r\n return img_points1",
"def GroundToImage_RzRyRz(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix_RzRyRz\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))",
"def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img",
"def transformImage(originalImage, oldPoints, newPoints):\n # originalImage = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)\n\n newImage = np.full(originalImage.shape, fill_value=255, dtype=np.uint8)\n newImage[newPoints[0], newPoints[1]] = originalImage[oldPoints[0], oldPoints[1]]\n\n return newImage",
"def project(pos,scale):\n # First 3 points of position vector, for the head, collarbone,\n # and tailbone, are not needed in the points vector which\n # only stores the image position of the joints (those three\n # can be calculated from the rest of the joint positions)\n points = np.copy(pos[3:,:2])*scale \n return points",
"def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points",
"def project_2d(img, points, target_size=Dimension(300, 70)):\n # First, bring the points in the right order for projection.\n tmp = order_polygon(points)\n origin_points = [\n tmp[0],\n tmp[3],\n tmp[1],\n tmp[2],\n ]\n origin_points = np.float32(origin_points)\n destination_points = np.float32([\n [0, 0],\n [target_size.width, 0],\n [0, target_size.height],\n [target_size.width, target_size.height]\n ])\n transformation = cv2.getPerspectiveTransform(\n origin_points, destination_points)\n output = cv2.warpPerspective(\n img,\n transformation,\n (target_size.width, target_size.height))\n return output",
"def transform(self, previousimage):",
"def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations",
"def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels",
"def cam2pixel(points_1, proj_rot_2_1, proj_tr_2_1):\n b, _, h, w = points_1.size()\n cam_coords_flat = points_1.reshape(b, 3, -1) # [B, 3, H*W]\n # apply rotation\n if proj_rot_2_1 is not None:\n pcoords = proj_rot_2_1 @ cam_coords_flat\n else:\n pcoords = cam_coords_flat\n # apply translation\n if proj_tr_2_1 is not None:\n pcoords = pcoords + proj_tr_2_1 # [B, 3, H*W]\n\n x = pcoords[:, 0]\n y = pcoords[:, 1]\n z = pcoords[:, 2].clamp(min=1e-3)\n\n x_norm = 2*(x / z) / (w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n y_norm = 2*(y / z) / (h-1) - 1 # Idem [B, H*W]\n\n coords = torch.stack([x_norm, y_norm], dim=2) # [B, H*W, 2]\n return coords.reshape(b, h, w, 2)",
"def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()",
"def project_to_image(pts_3d, P):\n n = pts_3d.shape[0]\n pts_3d_extend = np.hstack((pts_3d, np.ones((n, 1))))\n # print(('pts_3d_extend shape: ', pts_3d_extend.shape))\n pts_2d = np.dot(pts_3d_extend, np.transpose(P)) # nx3\n pts_2d[:, 0] /= pts_2d[:, 2]\n pts_2d[:, 1] /= pts_2d[:, 2]\n return pts_2d[:, 0:2]",
"def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr",
"def process_warp(src_img, result_img: np.zeros,\n tri_affines: np.matrix, dst_points: np.array,\n delaunay) -> None:\n roi_coords = grid_coordinates(dst_points)\n # indices to vertices. -1 if pixel is not in any triangle\n roi_tri_indices = delaunay.find_simplex(roi_coords)\n\n for simplex in enumerate(delaunay.simplices):\n coords = roi_coords[roi_tri_indices == simplex[0]]\n num_coords = len(coords)\n out_coords = np.dot(tri_affines[simplex[0]],\n np.vstack((coords.T, np.ones(num_coords))))\n x, y = coords.T\n result_img[y, x] = bilinear_interpolate(src_img, out_coords)\n\n return None",
"def project_pc_to_image(points, resolution=64):\n img = []\n for i in range(3):\n canvas = np.zeros((resolution, resolution))\n axis = [0, 1, 2]\n axis.remove(i)\n proj_points = (points[:, axis] + 1) / 2 * resolution\n proj_points = proj_points.astype(np.int)\n canvas[proj_points[:, 0], proj_points[:, 1]] = 1\n img.append(canvas)\n img = np.concatenate(img, axis=1)\n return img",
"def proj(self, X, G):",
"def paste_image2(src, dst, dst_points, border=20, trans_type='similarity',\n elastic=True, border_kernel='cubic'):\n # average positions of face points\n # tranform\n t = compute_similarity_transform\n if trans_type == 'affine':\n t = compute_affine_transform\n patch_h, patch_w, _ = src.shape\n patch_points = []\n patch_points.append([0, 0])\n patch_points.append([patch_w - 1, 0])\n patch_points.append([patch_w - 1, patch_h - 1])\n patch_points.append([0, patch_h - 1])\n # 0. generate mask\n mask = make_rect_mask(patch_w, patch_h, border, elastic=elastic, kernel=border_kernel)\n # 1. crop dst image\n d2s = t(dst_points, patch_points)\n dst_patch = cv2.warpAffine(dst, d2s, (patch_w, patch_h), borderMode=cv2.BORDER_REFLECT)\n # 2. blend\n blend = mask * src + (1.0 - mask) * dst_patch\n np.clip(blend, 0, 255.0)\n blend = blend.astype(np.uint8)\n # cv2.imshow('src', src)\n # cv2.imshow('d_patch', dst_patch)\n cv2.imshow('blend', blend)\n s2d = t(patch_points, dst_points)\n # cvZero(dst_patch)\n cv2.warpAffine(dst_patch, s2d, (dst.shape[1], dst.shape[0]),\n dst=dst, borderMode=cv2.BORDER_TRANSPARENT)\n return dst",
"def geo_transform(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforming ground points to image points | def GroundToImage_RzRyRz(self, groundPoints):
X0 = float(self.exteriorOrientationParameters[0])
Y0 = float(self.exteriorOrientationParameters[1])
Z0 = float(self.exteriorOrientationParameters[2])
xp = float(self.camera.principalPoint[0])
yp = float(self.camera.principalPoint[1])
R = self.rotationMatrix_RzRyRz
r11 = float(R[0, 0])
r12 = float(R[0, 1])
r13 = float(R[0, 2])
r21 = float(R[1, 0])
r22 = float(R[1, 1])
r23 = float(R[1, 2])
r31 = float(R[2, 0])
r32 = float(R[2, 1])
r33 = float(R[2, 2])
f = self.camera.focalLength
camPoints = []
for i in range(groundPoints.shape[0]):
x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (
groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (
groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))
y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (
groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (
groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))
camPoints.append([x, y])
# return self.CameraToImage(np.array(camPoints))
return (np.array(camPoints)) | [
"def GroundToImage(self, groundPoints):\r\n X0_1 = self.exteriorOrientationParameters[0]\r\n Y0_1 = self.exteriorOrientationParameters[1]\r\n Z0_1 = self.exteriorOrientationParameters[2]\r\n O1 = np.array([X0_1, Y0_1, Z0_1]).T\r\n R1 = self.RotationMatrix\r\n x1 = np.zeros((len(groundPoints), 1))\r\n y1 = np.zeros((len(groundPoints), 1))\r\n f = self.camera.focalLength\r\n\r\n for i in range(len(groundPoints)):\r\n lamda1 = -f / (np.dot(R1.T[2], (groundPoints[i] - O1).T)) # scale first image\r\n x1[i] = lamda1 * np.dot(R1.T[0], (groundPoints[i] - O1).T)\r\n y1[i] = lamda1 * np.dot(R1.T[1], (groundPoints[i] - O1).T)\r\n camera_points1 = np.vstack([x1.T, y1.T]).T\r\n # img_points1 = self.CameraToImage(camera_points1)\r\n img_points1 = camera_points1\r\n return img_points1",
"def GroundToImage(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))",
"def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img",
"def transformImage(originalImage, oldPoints, newPoints):\n # originalImage = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)\n\n newImage = np.full(originalImage.shape, fill_value=255, dtype=np.uint8)\n newImage[newPoints[0], newPoints[1]] = originalImage[oldPoints[0], oldPoints[1]]\n\n return newImage",
"def project(pos,scale):\n # First 3 points of position vector, for the head, collarbone,\n # and tailbone, are not needed in the points vector which\n # only stores the image position of the joints (those three\n # can be calculated from the rest of the joint positions)\n points = np.copy(pos[3:,:2])*scale \n return points",
"def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points",
"def project_2d(img, points, target_size=Dimension(300, 70)):\n # First, bring the points in the right order for projection.\n tmp = order_polygon(points)\n origin_points = [\n tmp[0],\n tmp[3],\n tmp[1],\n tmp[2],\n ]\n origin_points = np.float32(origin_points)\n destination_points = np.float32([\n [0, 0],\n [target_size.width, 0],\n [0, target_size.height],\n [target_size.width, target_size.height]\n ])\n transformation = cv2.getPerspectiveTransform(\n origin_points, destination_points)\n output = cv2.warpPerspective(\n img,\n transformation,\n (target_size.width, target_size.height))\n return output",
"def transform(self, previousimage):",
"def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations",
"def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels",
"def cam2pixel(points_1, proj_rot_2_1, proj_tr_2_1):\n b, _, h, w = points_1.size()\n cam_coords_flat = points_1.reshape(b, 3, -1) # [B, 3, H*W]\n # apply rotation\n if proj_rot_2_1 is not None:\n pcoords = proj_rot_2_1 @ cam_coords_flat\n else:\n pcoords = cam_coords_flat\n # apply translation\n if proj_tr_2_1 is not None:\n pcoords = pcoords + proj_tr_2_1 # [B, 3, H*W]\n\n x = pcoords[:, 0]\n y = pcoords[:, 1]\n z = pcoords[:, 2].clamp(min=1e-3)\n\n x_norm = 2*(x / z) / (w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n y_norm = 2*(y / z) / (h-1) - 1 # Idem [B, H*W]\n\n coords = torch.stack([x_norm, y_norm], dim=2) # [B, H*W, 2]\n return coords.reshape(b, h, w, 2)",
"def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()",
"def project_to_image(pts_3d, P):\n n = pts_3d.shape[0]\n pts_3d_extend = np.hstack((pts_3d, np.ones((n, 1))))\n # print(('pts_3d_extend shape: ', pts_3d_extend.shape))\n pts_2d = np.dot(pts_3d_extend, np.transpose(P)) # nx3\n pts_2d[:, 0] /= pts_2d[:, 2]\n pts_2d[:, 1] /= pts_2d[:, 2]\n return pts_2d[:, 0:2]",
"def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr",
"def process_warp(src_img, result_img: np.zeros,\n tri_affines: np.matrix, dst_points: np.array,\n delaunay) -> None:\n roi_coords = grid_coordinates(dst_points)\n # indices to vertices. -1 if pixel is not in any triangle\n roi_tri_indices = delaunay.find_simplex(roi_coords)\n\n for simplex in enumerate(delaunay.simplices):\n coords = roi_coords[roi_tri_indices == simplex[0]]\n num_coords = len(coords)\n out_coords = np.dot(tri_affines[simplex[0]],\n np.vstack((coords.T, np.ones(num_coords))))\n x, y = coords.T\n result_img[y, x] = bilinear_interpolate(src_img, out_coords)\n\n return None",
"def project_pc_to_image(points, resolution=64):\n img = []\n for i in range(3):\n canvas = np.zeros((resolution, resolution))\n axis = [0, 1, 2]\n axis.remove(i)\n proj_points = (points[:, axis] + 1) / 2 * resolution\n proj_points = proj_points.astype(np.int)\n canvas[proj_points[:, 0], proj_points[:, 1]] = 1\n img.append(canvas)\n img = np.concatenate(img, axis=1)\n return img",
"def proj(self, X, G):",
"def paste_image2(src, dst, dst_points, border=20, trans_type='similarity',\n elastic=True, border_kernel='cubic'):\n # average positions of face points\n # tranform\n t = compute_similarity_transform\n if trans_type == 'affine':\n t = compute_affine_transform\n patch_h, patch_w, _ = src.shape\n patch_points = []\n patch_points.append([0, 0])\n patch_points.append([patch_w - 1, 0])\n patch_points.append([patch_w - 1, patch_h - 1])\n patch_points.append([0, patch_h - 1])\n # 0. generate mask\n mask = make_rect_mask(patch_w, patch_h, border, elastic=elastic, kernel=border_kernel)\n # 1. crop dst image\n d2s = t(dst_points, patch_points)\n dst_patch = cv2.warpAffine(dst, d2s, (patch_w, patch_h), borderMode=cv2.BORDER_REFLECT)\n # 2. blend\n blend = mask * src + (1.0 - mask) * dst_patch\n np.clip(blend, 0, 255.0)\n blend = blend.astype(np.uint8)\n # cv2.imshow('src', src)\n # cv2.imshow('d_patch', dst_patch)\n cv2.imshow('blend', blend)\n s2d = t(patch_points, dst_points)\n # cvZero(dst_patch)\n cv2.warpAffine(dst_patch, s2d, (dst.shape[1], dst.shape[0]),\n dst=dst, borderMode=cv2.BORDER_TRANSPARENT)\n return dst",
"def geo_transform(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms Image point to a Ray in world system | def ImageToRay(self, imagePoints):
pass # delete after implementations | [
"def _cast_ray(self, point):\n\n ray_direction_cam_frame = self.K_inv @ np.hstack([point[0], point[1], 1])\n point_on_image_plane_world_frame = self.C2W @ np.hstack([ray_direction_cam_frame, 1])\n point_on_image_plane_world_frame = point_on_image_plane_world_frame / point_on_image_plane_world_frame[3]\n\n # p1 - point in camera center, p2 - point on image plane\n p1 = self.position\n p2 = point_on_image_plane_world_frame[:2]\n color = (0, 0, 0)\n for wall in self.environment.map.walls:\n # q1, q2 - wall vertices\n q1 = wall.vertex1\n q2 = wall.vertex2\n t = intersect_ray_segment(p1, p2, q1, q2)\n if t is not None:\n # Check that point is in front of the camera\n intersection_point = q1 + t * (q2 - q1)\n direction = np.dot(p2 - p1, intersection_point - p2)\n if direction > 0:\n color = wall.get_color_at(t)\n return color",
"def project_to_image_plane(self, point_in_world):\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n image_width = self.config['camera_info']['image_width']\n image_height = self.config['camera_info']['image_height']\n\n # get transform between pose of camera and world frame\n trans = None\n try:\n now = rospy.Time.now()\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", now, rospy.Duration(1.0))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", now)\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n #TODO Use tranform and rotation to calculate 2D position of light in image\n\n x = 0\n y = 0\n\n return (x, y)",
"def project_to_image_plane(self, point_in_world):\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n image_width = self.config['camera_info']['image_width']\n image_height = self.config['camera_info']['image_height']\n\n # get transform between pose of camera and world frame\n trans = None\n try:\n now = rospy.Time.now()\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", now, rospy.Duration(1.0))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", now)\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n #TODO Use tranform and rotation to calculate 2D position of light in image\n # KB 10Oct2017\n # commenting line below, and using dummy values, so that tf_classifier.py can be tested \n # x, y = self.project_to_image_plane(light.pose.pose.position)\n x = 0\n y = 0\n\n return (x, y)",
"def ImageToRay(self, imagePoints):\r\n pass # delete after implementations\r",
"def project_to_image_plane(self, point_in_world):\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n image_width = self.config['camera_info']['image_width']\n image_height = self.config['camera_info']['image_height']\n\n # get principal point (center of image)\n cx = image_width / 2\n cy = image_height / 2\n\n # get transform between pose of camera and world frame\n trans = None\n rot = None\n try:\n now = rospy.Time.now()\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", now, rospy.Duration(1.0))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", now)\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n # Use tranform and rotation to calculate 2D position of light in image\n\n # create an numpy array containing the 3D world point\n object_point = np.array([[point_in_world.x, point_in_world.y, point_in_world.z]])\n # convert the quaternion returned from lookupTransform into euler rotation\n (roll,pitch,yaw) = tf.transformations.euler_from_quaternion(rot)\n rvec = np.array([roll,pitch,yaw])\n tvec = np.array(trans)\n # create the camera matrix from the focal lengths and principal point\n camera_matrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])\n # distortion coefficients - currently not available but per slack will be published soon\n dist_coeffs = None\n # use OpenCv projectPoints to find the corresponding point in image from 3D world point\n img_point, _ = cv2.projectPoints(object_point, rvec, tvec, camera_matrix, dist_coeffs)\n # cast to int to get a pixel value\n pixels = np.int32(img_point).reshape(-1,2)\n x = pixels[0][0]\n y = pixels[0][1]\n\n return (x, y)",
"def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image",
"def transform_to_world(self, line, flag):\n\tif flag == \"right\":\n\t info = self.rightInfo\n\telif flag == \"left\":\n\t info = self.leftInfo\n\tx1 = line[0]\n\ty1 = line[1]\n\tx2 = line[2]\n\ty2 = line[3]\n\t#print \"points\"\n\t#print x1, y1\n\t#print x2, y2\n\tpinhole_model = image_geometry.PinholeCameraModel()\n\tpinhole_model.fromCameraInfo(info)\t\n\tray1 = pinhole_model.projectPixelTo3dRay((x1, y1))\n\tray2 = pinhole_model.projectPixelTo3dRay((x2, y2))\n\t#print flag\n\t#print \"rays\"\n\t#print ray1\n\t#print ray2\n\t#plane_normal = np.cross(ray1, ray2)\t\n \tplane_normal = self.cross_product(ray1, ray2)\n\tnorm = ((plane_normal[0])**2+(plane_normal[1])**2+(plane_normal[2])**2)**0.5\n \tnormalized = []\n\tfor i in range(0, len(plane_normal)):\n\t normalized.append(plane_normal[i]/norm)\n\t#print \"PLANE NORMAL\"\n\t#print normalized\n\treturn (normalized, (0,0,0))",
"def localize_pixel(img_pos,camera : Camera,lidar : Lidar, scan : LaserScan) -> tuple:\n\n # ---OBJ--\n # x r1 /\\ r2 x\n # / \\\n #cam_ray / \\ average_ray\n # / \\\n # / \\\n # CAM ----> LID\n # \n\n # has to be 2d\n assert (img_pos.size == 2)\n\n cam_ray = camera.get_ray_through_image(img_pos)\n\n cam_ray_robot = camera.get_ray_in_robot_frame(cam_ray)\n\n cam_ray_lidar = lidar.get_ray_in_lidar_frame(cam_ray_robot)\n\n # flatten camera ray\n cam_ray_lidar_flat = lidar.get_ray_projection(cam_ray_lidar)\n\n # figure out which lidar rays correspond to the camera ray\n (ray1,ray2) = lidar.get_corresponding_lidar_rays(cam_ray_lidar_flat,scan)\n\n # if no rays found corresponding to scan data\n if ray1 is None or ray2 is None:\n return (None,None)\n\n # get the normal to the lidar hit\n intersection_normal = lidar.get_normal_to_plane(ray1,ray2)\n\n # get the distance data in horizontal plane, from lidar to object\n lidar_to_target_length = lidar.get_camera_ray_length(cam_ray_lidar_flat,ray1,ray2)\n\n # get the vector from camera to lidar (flattened to lidar plane)\n # i.e. origin of lidar frame in camera frame\n lidar_to_cam_vec = cam_ray_lidar_flat.origin\n cam_to_lidar_flat = Ray(lidar_to_cam_vec,-lidar_to_cam_vec,np.linalg.norm(lidar_to_cam_vec))\n \n # now workout the lidar to object ray, i.e. interpolate between ray1's and ray2's tips\n lidar_to_object_flat = interpolated_ray(ray1,ray2,0.5,lidar_to_target_length)\n\n # now finally workout the vector from camera to object (flattened)\n # this lets us access the true z-distance in the camera\n cam_to_object_flat = lidar_to_object_flat.get_vec() + cam_to_lidar_flat.get_vec()\n \n cam_to_object_flat_length = np.linalg.norm(cam_to_object_flat)\n\n # angle from horizontal on camera ray\n cam_ray_theta = angle_between(cam_ray_lidar.get_vec(),cam_to_object_flat)\n\n # length of original camera ray (knowing the length of its projection)\n # will fail if ray is pointing straight up or down\n cam_ray_robot.length = cam_to_object_flat_length / math.cos(cam_ray_theta)\n\n\n object_robot = cam_ray_robot.get_vec()+cam_ray_robot.origin\n\n return (object_robot,intersection_normal)",
"def transform(self, M):\n\n return Ray(M * self.origin, M * self.direction)",
"def scanner_to_world(pose, point):\n pass",
"def model_image_projection(self):\n point_world = PointStamped()\n point_world.header.frame_id = 'world'\n resp = self._get_model_srv('block', 'world')\n point_world.header.stamp = rospy.Time.now()\n point_world.point = resp.pose.position\n\n point_camera = self._get_coords_transform(point_world, 'camera1')\n position = point_camera.pointOut.point\n projected = self._get_image_projection(position.x,\n position.y,\n position.z)\n\n return projected",
"def rays(self):\n pixels = np.array([\n [u, v, 1.]\n for u, v in product(range(self.width), range(self.height))\n ], dtype=np.int32).T\n rays = project(self.camera.P_pinv, pixels)\n\n return self._camera.center, rays.T",
"def ray(self, pixel):\n # Ensure pixel is in homogenous coordinates\n if len(pixel) == 2:\n pixel = np.vstack((pixel, [1]))\n\n ray = project(self._camera.P_pinv, pixel.astype(np.float32))\n assert ray.shape == (4, 1)\n\n return self._camera.center, ray",
"def locationFromRaycast(ob, point, direction):\n\tob_mw = ob.matrix_world\n\tob_mwi = ob_mw.inverted()\n\tpoint_rel = ob_mwi * point\n\thit, loc, norm, face = ob.ray_cast(point_rel, direction)\n\tif loc == Vector((0.0, 0.0, 0.0)):\n\t\tdirection = direction * (-1) # the plane can move in both directions\n\t\thit, loc, norm, face = ob.ray_cast(point_rel, direction)\n\tloc_world = ob_mw * loc\n\treturn loc_world",
"def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img",
"def frusrum_ray(self, param_x, param_y):\n l, r, b, t, n, f = self.body.dim\n # convert normalized into near frustum space\n sm = ScaleMat(x=r - l, y=t - b)\n # .5 to compensate origin difference between OpenGL space and pane space\n offset = MoveMat(-.5, -.5, -n)\n frustum_point = sm * offset * Pnt(x=param_x, y=param_y, z=0)\n ray = gt.Ray([0, 0, 0], frustum_point.xyz)\n return self.tripod.plane.TM * ray",
"def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)",
"def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result",
"def project_z(r, x, y):\n\td = np.sqrt(x*x + y*y)\n\tif (d < r * 0.70710678118654752440): # Inside sphere\n\t\tz = np.sqrt(r*r - d*d)\n\telse: # On hyperbola\n\t\tt = r / 1.41421356237309504880\n\t\tz = t*t / d\n\treturn z"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generating grid of points biased by ppa (principal point delta) | def GeneratePointsImg(self, n, ppa):
x = np.linspace(0,self.camera.sensorSize,n)+ppa[0]
y = np.linspace(0,self.camera.sensorSize,n)+ppa[1]
return np.meshgrid(x, y) | [
"def sample_pareto_from_isometric_normal(\n n_points, dimension, center, random_state\n ):\n X = random_state.randn(n_points, dimension)\n Y = pareto_front(X)\n return X + center, Y",
"def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]",
"def generate_regular_pyramid_grid(self):\n z = np.arange(0, self.sensor_range+self.resolution, self.resolution)\n points_in_pyramid = np.zeros((0,3))\n\n for zz in z: \n xmax = zz*np.tan(self.cone_angle_x/2); ymax = zz*np.tan(self.cone_angle_y/2)\n NumberOfPointsX = int(2*xmax/self.resolution)+3\n NumberOfPointsY = int(2*ymax/self.resolution)+3\n \n x = np.linspace(-xmax, xmax, NumberOfPointsX)\n y = np.linspace(-ymax, ymax, NumberOfPointsY)\n xx, yy = np.meshgrid(x, y)\n xface = xx.ravel(); yface = yy.ravel()\n zface = np.ones(len(xface))* zz\n \n Pgrid = np.zeros((len(xface),3))\n Pgrid[:,0] = xface\n Pgrid[:,1] = yface\n Pgrid[:,2] = zface\n points_in_pyramid = np.concatenate((points_in_pyramid, Pgrid), 0)\n #for j in range(len(points_in_pyramid)): \n # f7 = open('points_in_cone.txt', 'a')\n # f7.write('%s, %s, %s, %s\\n' %(self.RHP_time, points_in_pyramid[j][0], points_in_pyramid[j][1], points_in_pyramid[j][2]))\n return points_in_pyramid",
"def find_a(a, p):\n t = 0\n while jacobi(t**2 - a, p) != -1:\n t = random.randint(0, p-1)\n \n p_2 = p**2\n \n i = (t**2 - a) % p_2\n j = (t + math.sqrt(i)) % p_2\n \n #x = j**(p+1)/2 % p**2\n \"\"\"x = 1\n for i in range((p+1)//2):\n x = (x * j) % p_2\n \"\"\"\n \n x = bin_ladder(j, int((p+1)/2), p_2)\n \n return x",
"def make_grid(dim=(11,4)):\n x,y = range(dim[0]),range(dim[1])\n p = np.array([[[s,i] for s in x] for i in y], dtype=np.float32)\n p[:,1::2,1] += 0.5\n p = np.reshape(p, (-1,2), 'F')\n\n ###scale height = 1\n x_scale = 1./(np.amax(p[:,0])-np.amin(p[:,0]))\n y_scale = 1./(np.amax(p[:,1])-np.amin(p[:,1]))\n\n p *=x_scale,x_scale/.5\n\n ###center x,y around (0,0)\n x_offset = (np.amax(p[:,0])-np.amin(p[:,0]))/2.\n y_offset = (np.amax(p[:,1])-np.amin(p[:,1]))/2.\n p -= x_offset,y_offset\n return p",
"def generate_points(num_points):\n for i in xrange(0, num_points):\n pass",
"def gen_n_points(n):\r\n return(np.random.uniform(-1, 3, size = n))",
"def add_points(grid, num_points):\n \n for i in range(num_points):\n # Coord for crit point\n rand_x = random.randint(0, GRID_WIDTH - 1)\n rand_y = random.randint(0, GRID_HEIGHT - 1)\n \n # Set value of crit point\n elev = (MAX_HEIGHT - MIN_HEIGHT) * random.random() + MIN_HEIGHT\n grid[rand_x][rand_y] = elev * PEAK_HEIGHT\n \n return grid",
"def project_points(pts, p, n=[0, 0, 1], x0=[0, 0, 0]):\n n = np.array(n)\n # projection matrix\n P = np.eye(3) - np.outer(p, n) / np.dot(p, n)\n return np.dot(pts, P.T) + np.dot(n, np.array(x0)) / np.dot(n, p) * p",
"def test_PRP(initial):\n return plan_route((initial[0],initial[1]), initial[2],\n # Goals:\n [(2,3),(3,2)],\n # Allowed locations:\n [(0,0),(0,1),(0,2),(0,3),\n (1,0),(1,1),(1,2),(1,3),\n (2,0), (2,3),\n (3,0),(3,1),(3,2),(3,3)])",
"def random_projection_split(data, indices, rng_state):\n dim = data.shape[1]\n\n # Select two random points, set the hyperplane between them\n left_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index += left_index == right_index\n right_index = right_index % indices.shape[0]\n left = indices[left_index]\n right = indices[right_index]\n\n # Compute the normal vector to the hyperplane (the vector between\n # the two points) and the offset from the origin\n hyperplane_offset = 0.0\n hyperplane_vector = np.empty(dim, dtype=np.float32)\n\n for d in range(dim):\n hyperplane_vector[d] = data[left, d] - data[right, d]\n hyperplane_offset -= hyperplane_vector[d] * (\n data[left, d] + data[right, d]) / 2.0\n\n # For each point compute the margin (project into normal vector, add offset)\n # If we are on lower side of the hyperplane put in one pile, otherwise\n # put it in the other pile (if we hit hyperplane on the nose, flip a coin)\n n_left = 0\n n_right = 0\n side = np.empty(indices.shape[0], np.int8)\n for i in range(indices.shape[0]):\n margin = hyperplane_offset\n for d in range(dim):\n margin += hyperplane_vector[d] * data[indices[i], d]\n\n if margin == 0:\n side[i] = tau_rand_int(rng_state) % 2\n if side[i] == 0:\n n_left += 1\n else:\n n_right += 1\n elif margin > 0:\n side[i] = 0\n n_left += 1\n else:\n side[i] = 1\n n_right += 1\n\n # Now that we have the counts allocate arrays\n indices_left = np.empty(n_left, dtype=np.int64)\n indices_right = np.empty(n_right, dtype=np.int64)\n\n # Populate the arrays with indices according to which side they fell on\n n_left = 0\n n_right = 0\n for i in range(side.shape[0]):\n if side[i] == 0:\n indices_left[n_left] = indices[i]\n n_left += 1\n else:\n indices_right[n_right] = indices[i]\n n_right += 1\n\n return indices_left, indices_right",
"def _sample_proportional(self): \n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n \n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n \n return indices",
"def test_random_create_P():\n\n max_step = 100\n n = 50\n low = 1\n tol = 1e-8\n\n P_ฮน = np.random.dirichlet(np.random.randint(low, max_step, size=n))\n P_ฮด = np.random.dirichlet(np.random.randint(low, max_step, size=n))\n P_ฮถ = np.random.dirichlet(np.random.randint(low, high=max_step, size=50),\n size=2)\n\n P = create_P(P_ฮด, P_ฮถ, P_ฮน)\n\n assert abs(P[:, 0, :, :].sum() - 1.) < tol\n assert abs(P[:, 1, :, :].sum() - 1.) < tol",
"def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point",
"def gausspp(npt):\n if npt <= 0:\n raise ValueError(\"Can't generate grid for <= 0 points\")\n return\n if npt == 1:\n xpt = np.array([0.0])\n wht = np.array([2.0])\n return xpt, wht\n\n # Each mesh is stored as a section of a big array.\n # These store its number and start index is here\n mesh_npts = [2,3,4,5,6,7,8,9,10,11,12,13,14,16,20,24,28,32,40,48,64,96]\n\n # First, look to see if the mesh is stored.\n # If not we take the largest number that is lower than that stored.\n for i in range(len(mesh_npts)):\n mesh_idx = i\n if mesh_npts[i] >= npt:\n break\n npt = mesh_npts[mesh_idx]\n n2 = int((npt+1)/2.0) # Care: Integer division!\n iof = npt\n\n # The stored grid parameters are accessed as a dict of arrays.\n x = {\n 2 : [0.577350269189626e0],\n 3 : [0.774596669241483e0, 0.0e0],\n 4 : [0.861136311594053e0, 0.339981043584856e0],\n 5 : [0.906179845938664e0, 0.538469310105683e0, 0.0e0],\n 6 : [0.932469514203152e0, 0.661209386466265e0, 0.238619186083197e0],\n 7 : [0.949107912342759e0, 0.741531185599394e0, 0.405845151377397e0, 0.0e0],\n 8 : [0.960289856497536e0, 0.796666477413627e0, 0.525532409916329e0, 0.183434642495650e0],\n 9 : [0.968160239507626e0, 0.836031107326636e0, 0.613371432700590e0, 0.324253423403809e0,\n 0.0e0],\n 10 : [0.973906528517172e0, 0.865063366688985e0, 0.679409568299024e0, 0.433395394129247e0,\n 0.148874338981631e0],\n 11 : [0.978228658146057e0, 0.887062599768095e0, 0.730152005574049e0, 0.519096129206812e0,\n 0.269543155952345e0, 0.0e0],\n 12 : [0.981560634246719e0, 0.904117256370475e0, 0.769902674194305e0, 0.587317954286617e0,\n 0.367831498998180e0, 0.125233408511469e0],\n 13 : [0.984183054718588e0, 0.917598399222978e0, 0.801578090733310e0, 0.642349339440340e0,\n 0.448492751036447e0, 0.230458315955135e0, 0.0e0],\n 14 : [0.986283808696812e0, 0.928434883663574e0, 0.827201315069765e0, 0.687292904811685e0,\n 0.515248636358154e0, 0.319112368927890e0, 0.108054948707344e0],\n 16 : [0.989400934991650e0, 0.944575023073232e0, 0.865631202387832e0, 0.755404408355003e0,\n 0.617876244402644e0, 0.458016777657227e0, 0.281603550779259e0, 0.950125098376369e-1],\n 20 : [0.993128599185095e0, 0.963971927277914e0, 0.912234428251326e0, 0.839116971822219e0,\n 0.746331906460151e0, 0.636053680726515e0, 0.510867001950827e0, 0.373706088715419e0,\n 0.227785851141645e0, 0.765265211334969e-1],\n 24 : [0.995187219997021e0, 0.974728555971309e0, 0.938274552002733e0, 0.886415527004401e0,\n 0.820001985973903e0, 0.740124191578554e0, 0.648093651936975e0, 0.545421471388839e0,\n 0.433793507626045e0, 0.315042679696163e0, 0.191118867473616e0, 0.640568928626059e-1],\n 28 : [0.996442497573954e0, 0.981303165370873e0, 0.954259280628938e0, 0.915633026392132e0,\n 0.865892522574395e0, 0.805641370917179e0, 0.735610878013632e0, 0.656651094038865e0,\n 0.569720471811402e0, 0.475874224955118e0, 0.376251516089079e0, 0.272061627635178e0,\n 0.164569282133381e0, 0.550792898840340e-1],\n 32 : [0.997263861849481e0, 0.985611511545268e0, 0.964762255587506e0, 0.934906075937740e0,\n 0.896321155766052e0, 0.849367613732570e0, 0.794483795967942e0, 0.732182118740290e0,\n 0.663044266930215e0, 0.587715757240762e0, 0.506899908932229e0, 0.421351276130635e0,\n 0.331868602282128e0, 0.239287362252137e0, 0.144471961582796e0, 0.483076656877380e-1],\n 40 : [0.998237709710559e0, 0.990726238699457e0, 0.977259949983774e0, 0.957916819213792e0,\n 0.932812808278676e0, 0.902098806968874e0, 0.865959503212259e0, 0.824612230833312e0,\n 0.778305651426519e0, 0.727318255189927e0, 0.671956684614179e0, 0.612553889667980e0,\n 0.549467125095128e0, 0.483075801686179e0, 0.413779204371605e0, 0.341994090825758e0,\n 0.268152185007254e0, 0.192697580701371e0, 0.116084070675255e0, 0.387724175060510e-1],\n 48 : [0.998771007252426e0, 0.993530172266351e0, 0.984124583722827e0, 0.970591592546247e0,\n 0.952987703160431e0, 0.931386690706554e0, 0.905879136715570e0, 0.876572020274248e0,\n 0.843588261624393e0, 0.807066204029443e0, 0.767159032515740e0, 0.724034130923815e0,\n 0.677872379632664e0, 0.628867396776514e0, 0.577224726083973e0, 0.523160974722233e0,\n 0.466902904750958e0, 0.408686481990717e0, 0.348755886292161e0, 0.287362487355455e0,\n 0.224763790394689e0, 0.161222356068892e0, 0.970046992094629e-1, 0.323801709628690e-1],\n 64 : [0.999305041735772e0, 0.996340116771955e0, 0.991013371476744e0, 0.983336253884626e0,\n 0.973326827789911e0, 0.961008799652054e0, 0.946411374858403e0, 0.929569172131939e0,\n 0.910522137078503e0, 0.889315445995114e0, 0.865999398154093e0, 0.840629296252580e0,\n 0.813265315122797e0, 0.783972358943341e0, 0.752819907260532e0, 0.719881850171611e0,\n 0.685236313054233e0, 0.648965471254657e0, 0.611155355172393e0, 0.571895646202634e0,\n 0.531279464019894e0, 0.489403145707053e0, 0.446366017253464e0, 0.402270157963992e0,\n 0.357220158337668e0, 0.311322871990211e0, 0.264687162208767e0, 0.217423643740007e0,\n 0.169644420423993e0, 0.121462819296120e0, 0.729931217877989e-1, 0.243502926634240e-1],\n 96 : [0.999689503883230e0, 0.998364375863181e0, 0.995981842987209e0, 0.992543900323762e0,\n 0.988054126329623e0, 0.982517263563014e0, 0.975939174585136e0, 0.968326828463264e0,\n 0.959688291448742e0, 0.950032717784437e0, 0.939370339752755e0, 0.927712456722308e0,\n 0.915071423120898e0, 0.901460635315852e0, 0.886894517402420e0, 0.871388505909296e0,\n 0.854959033434601e0, 0.837623511228187e0, 0.819400310737931e0, 0.800308744139140e0,\n 0.780369043867433e0, 0.759602341176647e0, 0.738030643744400e0, 0.715676812348967e0,\n 0.692564536642171e0, 0.668718310043916e0, 0.644163403784967e0, 0.618925840125468e0,\n 0.593032364777572e0, 0.566510418561397e0, 0.539388108324357e0, 0.511694177154667e0,\n 0.483457973920596e0, 0.454709422167743e0, 0.425478988407300e0, 0.395797649828908e0,\n 0.365696861472313e0, 0.335208522892625e0, 0.304364944354496e0, 0.273198812591049e0,\n 0.241743156163840e0, 0.210031310460567e0, 0.178096882367618e0, 0.145973714654896e0,\n 0.113695850110665e0, 0.812974954644249e-1, 0.488129851360490e-1, 0.162767448496020e-1]\n }\n wt = {\n 2 : [0.999999999999999e0],\n 3 : [0.555555555555556e0, 0.888888888888889e0],\n 4 : [0.347854845137454e0, 0.652145154862546e0],\n 5 : [0.236926885056189e0, 0.478628670499366e0, 0.568888888888889e0],\n 6 : [0.171324492379170e0, 0.360761573048139e0, 0.467913934572691e0],\n 7 : [0.129484966168870e0, 0.279705391489277e0, 0.381830050505119e0, 0.417959183673469e0],\n 8 : [0.101228536290376e0, 0.222381034453374e0, 0.313706645877887e0, 0.362683783378362e0],\n 9 : [0.812743883615739e-1, 0.180648160694857e0, 0.260610696402935e0, 0.312347077040003e0,\n 0.330239355001260e0],\n 10 : [0.666713443086879e-1, 0.149451349150581e0, 0.219086362515982e0, 0.269266719309996e0,\n 0.295524224714753e0],\n 11 : [0.556685671161740e-1, 0.125580369464905e0, 0.186290210927734e0, 0.233193764591990e0,\n 0.262804544510247e0, 0.272925086777901e0],\n 12 : [0.471753363865120e-1, 0.106939325995318e0, 0.160078328543346e0, 0.203167426723066e0,\n 0.233492536538355e0, 0.249147045813403e0],\n 13 : [0.404840047653160e-1, 0.921214998377279e-1, 0.138873510219787e0, 0.178145980761946e0,\n 0.207816047536889e0, 0.226283180262897e0, 0.232551553230874e0],\n 14 : [0.351194603317520e-1, 0.801580871597599e-1, 0.121518570687903e0, 0.157203167158194e0,\n 0.185538397477938e0, 0.205198463721296e0, 0.215263853463158e0],\n 16 : [0.271524594117540e-1, 0.622535239386480e-1, 0.951585116824929e-1, 0.124628971255534e0,\n 0.149595988816577e0, 0.169156519395002e0, 0.182603415044923e0, 0.189450610455068e0],\n 20 : [0.176140071391520e-1, 0.406014298003870e-1, 0.626720483341089e-1, 0.832767415767049e-1,\n 0.101930119817240e0, 0.118194531961518e0, 0.131688638449177e0, 0.142096109318382e0,\n 0.149172986472604e0, 0.152753387130726e0],\n 24 : [0.123412297999870e-1, 0.285313886289340e-1, 0.442774388174200e-1, 0.592985849154370e-1,\n 0.733464814110799e-1, 0.861901615319529e-1, 0.976186521041139e-1, 0.107444270115966e0,\n 0.115505668053726e0, 0.121670472927803e0, 0.125837456346828e0, 0.127938195346752e0],\n 28 : [0.912428259309400e-2, 0.211321125927710e-1, 0.329014277823040e-1, 0.442729347590040e-1,\n 0.551073456757170e-1, 0.652729239669989e-1, 0.746462142345689e-1, 0.831134172289009e-1,\n 0.905717443930329e-1, 0.969306579979299e-1, 0.102112967578061e0, 0.106055765922846e0,\n 0.108711192258294e0, 0.110047013016475e0],\n 32 : [0.701861000947000e-2, 0.162743947309060e-1, 0.253920653092620e-1, 0.342738629130210e-1,\n 0.428358980222270e-1, 0.509980592623760e-1, 0.586840934785350e-1, 0.658222227763619e-1,\n 0.723457941088479e-1, 0.781938957870699e-1, 0.833119242269469e-1, 0.876520930044039e-1,\n 0.911738786957639e-1, 0.938443990808039e-1, 0.956387200792749e-1, 0.965400885147279e-1],\n 40 : [0.452127709853300e-2, 0.104982845311530e-1, 0.164210583819080e-1, 0.222458491941670e-1,\n 0.279370069800230e-1, 0.334601952825480e-1, 0.387821679744720e-1, 0.438709081856730e-1,\n 0.486958076350720e-1, 0.532278469839370e-1, 0.574397690993910e-1, 0.613062424929290e-1,\n 0.648040134566009e-1, 0.679120458152339e-1, 0.706116473912869e-1, 0.728865823958039e-1,\n 0.747231690579679e-1, 0.761103619006259e-1, 0.770398181642479e-1, 0.775059479784249e-1],\n 48 : [0.315334605230600e-2, 0.732755390127600e-2, 0.114772345792340e-1, 0.155793157229440e-1,\n 0.196161604573550e-1, 0.235707608393240e-1, 0.274265097083570e-1, 0.311672278327980e-1,\n 0.347772225647700e-1, 0.382413510658310e-1, 0.415450829434650e-1, 0.446745608566940e-1,\n 0.476166584924900e-1, 0.503590355538540e-1, 0.528901894851940e-1, 0.551995036999840e-1,\n 0.572772921004030e-1, 0.591148396983960e-1, 0.607044391658940e-1, 0.620394231598930e-1,\n 0.631141922862539e-1, 0.639242385846479e-1, 0.644661644359499e-1, 0.647376968126839e-1],\n 64 : [0.178328072169600e-2, 0.414703326056200e-2, 0.650445796897800e-2, 0.884675982636400e-2,\n 0.111681394601310e-1, 0.134630478967190e-1, 0.157260304760250e-1, 0.179517157756970e-1,\n 0.201348231535300e-1, 0.222701738083830e-1, 0.243527025687110e-1, 0.263774697150550e-1,\n 0.283396726142590e-1, 0.302346570724020e-1, 0.320579283548510e-1, 0.338051618371420e-1,\n 0.354722132568820e-1, 0.370551285402400e-1, 0.385501531786160e-1, 0.399537411327200e-1,\n 0.412625632426230e-1, 0.424735151236530e-1, 0.435837245293230e-1, 0.445905581637560e-1,\n 0.454916279274180e-1, 0.462847965813140e-1, 0.469681828162100e-1, 0.475401657148300e-1,\n 0.479993885964580e-1, 0.483447622348030e-1, 0.485754674415030e-1, 0.486909570091400e-1],\n 96 : [0.796792065552010e-3, 0.185396078894692e-2, 0.291073181793495e-2, 0.396455433844469e-2,\n 0.501420274292752e-2, 0.605854550423596e-2, 0.709647079115386e-2, 0.812687692569876e-2,\n 0.914867123078339e-2, 0.101607705350080e-1, 0.111621020998380e-1, 0.121516046710880e-1,\n 0.131282295669610e-1, 0.140909417723140e-1, 0.150387210269940e-1, 0.159705629025620e-1,\n 0.168854798642450e-1, 0.177825023160450e-1, 0.186606796274110e-1, 0.195190811401450e-1,\n 0.203567971543330e-1, 0.211729398921910e-1, 0.219666444387440e-1, 0.227370696583290e-1,\n 0.234833990859260e-1, 0.242048417923640e-1, 0.249006332224830e-1, 0.255700360053490e-1,\n 0.262123407356720e-1, 0.268268667255910e-1, 0.274129627260290e-1, 0.279700076168480e-1,\n 0.284974110650850e-1, 0.289946141505550e-1, 0.294610899581670e-1, 0.298963441363280e-1,\n 0.302999154208270e-1, 0.306713761236690e-1, 0.310103325863130e-1, 0.313164255968610e-1,\n 0.315893307707270e-1, 0.318287588944110e-1, 0.320344562319920e-1, 0.322062047940300e-1,\n 0.323438225685750e-1, 0.324471637140640e-1, 0.325161187138680e-1, 0.325506144923630e-1]\n }\n\n # Now calculate the grid and weighting from these data chosen by npt\n\n mesh_r = x[npt]\n mesh_wt = wt[npt]\n\n r = np.zeros((2*n2))\n weight = np.zeros((2*n2))\n\n for i in range(n2):\n r[i] = -mesh_r[i]\n r[iof - (i + 1)] = mesh_r[i]\n weight[i] = mesh_wt[i]\n weight[iof - (i + 1)] = mesh_wt[i]\n\n return npt, r, weight",
"def guassian_point_process(x0, y0, xSigma, ySigma, nPoints):\n x = np.random.normal(loc=x0, scale=xSigma, size=(nPoints,))\n y = np.random.normal(loc=y0, scale=ySigma, size=(nPoints,))\n return x, y",
"def generar_polinomio(self):\n\t\tself.poli = 0\n\t\tfor i in range(len(self.v)):\n\t\t\tpoli2 = n(self.diferencias_divididas(self.v[0:i+1]))\n\t\t\tfor j in range(i):\n\t\t\t\tpoli2 *= self.x-self.v[j][0]\n\t\t\tself.poli = self.poli + poli2",
"def _prep_gtilde(self):\n \n # prepare shifted momenta and angles for the symmetric permutation \n self.pitilde=np.empty((self.npoints+1,self.nqpoints+1,self.nx),dtype=np.double) \n self.chitilde=np.empty((self.npoints+1,self.nqpoints+1,self.nx),dtype=np.double) \n \n thetapi=np.empty((self.npoints+1,self.nqpoints+1,self.nx),dtype=np.double)\n thetachi=np.empty((self.npoints+1,self.nqpoints+1,self.nx),dtype=np.double)\n thetapp=np.empty((self.nx),dtype=np.double)\n \n for ix in range(self.nx):\n xval=self.xp[ix] \n thetapp[ix]=np.arccos(xval)\n for jq in range(self.nqpoints+1):\n qval=self.qgrid[jq]\n for jp in range(self.npoints+1):\n if jp == self.npoints: # use onshell point for given q value \n e12=self.ed+0.75*self.qgrid[self.nqpoints]**2/self.mass \\\n -0.75*self.qgrid[jq]**2/self.mass\n if e12 > 0:\n pval=np.sqrt(self.mass*e12)\n else:\n pval=0 \n else: \n pval=self.pgrid[jp]\n \n px=-0.75*qval*np.sqrt(1.0-xval**2)\n py=0.0\n pz=-0.5*pval-0.75*qval*xval \n self.pitilde[jp,jq,ix],thetapi[jp,jq,ix],phi=self._angle(px,py,pz)\n \n px=-0.5*qval*np.sqrt(1.0-xval**2)\n py=0.0\n pz=pval-0.5*qval*xval \n self.chitilde[jp,jq,ix],thetachi[jp,jq,ix],phi=self._angle(px,py,pz)\n\n # prepare spherical harmonics and store based on lmindx \n # number of lam,mu und l,mu combinations \n nlamindx=self._lmindx(self.lammax,self.lammax)+1\n nlindx=self._lmindx(self.lmax,self.lmax)+1\n \n # array for Y_{lam mu}(hat qp) (real is sufficient since phi=0)\n ystarlam=np.empty((nlamindx,self.nx),dtype=np.cdouble)\n for lam in range(self.lammax+1):\n for mu in range(-lam,lam+1):\n ystarlam[self._lmindx(lam,mu),:]=sph_harm(mu,lam, 0, thetapp)\n \n \n # array for Y_{l mu}(-0.5p-0.75q) (real is sufficient since phi=0)\n yl=np.empty((nlindx,self.npoints+1,self.nqpoints+1,self.nx),dtype=np.cdouble)\n for l in range(self.lmax+1):\n for mu in range(-l,l+1):\n yl[self._lmindx(l,mu),:,:,:]=sph_harm(mu,l, 0, thetapi)\n \n # array for Y_{lam mu}(p-0.5q) (real is sufficient since phi=0)\n ylam=np.empty((nlamindx,self.npoints+1,self.nqpoints+1,self.nx),dtype=np.cdouble)\n for lam in range(self.lammax+1):\n for mu in range(-lam,lam+1):\n ylam[self._lmindx(lam,mu),:,:,:]=sph_harm(mu,lam, 0, thetachi)\n \n # now prepare the necessary Clebsch-Gordan coefficients\n # we need (l lam L,0 M M) and (l lam L,mu M-mu,M) for M=0!!!\n # I assume that L is smaller than the lmax or lammax therefore M=-L,L\n # the smallest index for storage \n \n cg=np.zeros((self.nalpha),dtype=np.double)\n cgp=np.zeros((self.nalpha,2*self.lmax+1),dtype=np.double)\n \n for qnset in self.qnalpha: # go through allowed l,lam combinations\n cg[qnset[\"alpha\"]]=float(CG(qnset[\"l\"],0,qnset[\"lam\"],0,self.bl,0).doit())\n for mu in range(-qnset[\"l\"],qnset[\"l\"]+1):\n cgp[qnset[\"alpha\"],mu+qnset[\"l\"]]=float(CG(qnset[\"l\"],mu,qnset[\"lam\"],-mu,self.bl,0).doit())\n\n # now we can perform the mu summation for the combination of coupled spherical harmonics \n ylylam=np.zeros((self.nalpha,self.npoints+1,self.nqpoints+1,self.nx),dtype=np.cdouble)\n for qnset in self.qnalpha: # go through allowed l,lam combinations\n alphap=qnset[\"alpha\"]\n l=qnset[\"l\"]\n lam=qnset[\"lam\"]\n for mu in range(-l,l+1):\n lmindx=self._lmindx(l,mu)\n if abs(-mu)<=lam:\n lamindx=self._lmindx(lam,-mu)\n ylylam[alphap,:,:,:]+=cgp[alphap,mu+l]*yl[lmindx,:,:,:]*ylam[lamindx,:,:,:]\n \n # bm summation then gives G but M=0!\n self.gtilde=np.zeros((self.nalpha,self.nalpha,self.npoints+1,self.nqpoints+1,self.nx),dtype=np.cdouble)\n for qnset in self.qnalpha: # go through allowed l,lam combinations\n alpha=qnset[\"alpha\"]\n l=qnset[\"l\"]\n lam=qnset[\"lam\"]\n for qnsetp in self.qnalpha: # go through allowed l,lam combinations\n alphap=qnsetp[\"alpha\"] \n lamindx=self._lmindx(lam,0) \n self.gtilde[alpha,alphap,:,:,:]+=8*m.pi**2*np.sqrt((2*l+1)/(4*m.pi))/(2*self.bl+1) \\\n *ystarlam[lamindx,:]*ylylam[alphap,:,:,:] \\\n *cg[alpha]\n \n # now we assume that there is a function on p on the left defined by p**l and on the right devided by p'**l' \n # that is interpolated using Cubherm to pi and pip \n \n # set spline elements based on grid points and shifted momenta \n self.splpitilde=Cubherm.spl(self.pgrid[0:self.npoints],self.pitilde)\n self.splchitilde=Cubherm.spl(self.qgrid[0:self.nqpoints],self.chitilde)",
"def _sample_proportional(self) -> List[int]:\n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n \n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n \n return indices"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Description When is given a directory name that exist Expected Result Shows log that directory was found | def test_has_directory_log(self, check_fn_true, caplog):
#setup
records = caplog.records
has_directory = extractor.make_has_directory(os.path.isdir)
directory_path = "./data/observed"
#when
test1 = has_directory(directory_path)
#result
assert len(records) == 1
assert records[0].message == f"It was found directory {directory_path}" | [
"def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"",
"def test_log_to_file_when_dir_does_not_exist(self):\n XKNX(log_directory=\"/xknx/is/fun\")\n\n assert not os.path.isfile(\"/xknx/is/fun/xknx.log\")",
"def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")",
"def logFolderExist():\n return path.exists(LOG_PATH)",
"def checkDirectory(directory):\n if not os.path.exists(directory):\n printError(\"This \" + directory + \" directory does not exist!\")",
"def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)",
"def is_directory_exists(dir_name):\n global current_dir\n \n sql = \"SELECT COUNT(*) FROM info WHERE name = ? AND parent = ?\"\n values = [dir_name,current_dir]\n count = do_sql_query(sql,values,is_select_query=True)[0]\n return True if int(count[0])>0 else False",
"def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))",
"def check_folder_path(self, logger_name: str):\n handlers_list = self.config_dict['loggers'][logger_name]['handlers']\n for handle_key in self.config_dict['handlers'].keys():\n if handle_key in handlers_list:\n if 'filename' in self.config_dict['handlers'][handle_key]:\n folder_path = os.path.dirname(\n self.config_dict['handlers'][handle_key]['filename'])\n if len(folder_path) > 0:\n if os.path.exists(folder_path):\n pass\n else:\n os.makedirs(folder_path)\n abnormal_info = (f'The output folder was not exist, '\n f'so it was created automatically:{folder_path}')\n self.abnormal_info_output(abnormal_info)\n return None",
"def assert_directory_exists(path: str):\n __tracebackhide__ = True\n if not os.path.isdir(path):\n _assert_helper(\"Directory\", path)",
"def Directory(self) -> str:",
"def directoryExists( self, directory_path=None ):\n\n try:\n if os.path.exists( directory_path ):\n return True\n else:\n return False\n\n except:\n print( 'Could not test the directory.' )",
"def test_is_summary_directory(self):\n summary_base_dir = tempfile.mkdtemp()\n file_count = 1\n directory_count = 1\n gen_directories_and_files(summary_base_dir, file_count, directory_count)\n\n summary_watcher = SummaryWatcher()\n flag = summary_watcher.is_summary_directory(summary_base_dir, './')\n assert flag\n flag = summary_watcher.is_summary_directory(summary_base_dir, './\\x00')\n assert not flag\n shutil.rmtree(summary_base_dir)",
"def test_is_summary_directory(self):\n summary_base_dir = tempfile.mkdtemp(dir=self.base_dir)\n file_count = 1\n directory_count = 1\n gen_directories_and_files(summary_base_dir, file_count, directory_count)\n\n summary_watcher = SummaryWatcher()\n flag = summary_watcher.is_summary_directory(summary_base_dir, './')\n assert flag\n flag = summary_watcher.is_summary_directory(summary_base_dir, './\\x00')\n assert not flag\n shutil.rmtree(summary_base_dir)",
"def dir_exists(location):\n return run('test -d \"%s\" && echo OK ; true' % (location)).endswith(\"OK\")",
"def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()",
"def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False",
"def check_dir(param):\n\n if not os.path.exists(param['logs_path']):\n os.makedirs(param['logs_path'])\n\n if not os.path.exists(param['model_path']):\n os.makedirs(param['model_path'])",
"def dir_filter(x):\n return os.path.isdir('logs/{}'.format(x))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Description When is given a directory name that doesnt exist Expected Result returns False | def test_doesnt_have_directory(self, check_fn_false):
# setup
has_directory = extractor.make_has_directory(os.path.isdir)
# when
test2 = has_directory("./data/tests")
# result
assert test2 is False | [
"def __is_directory_name(filename):\n return filename[-1] == '/'",
"def is_valid_directory(parser, arg):",
"def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"",
"def is_dir(self, path):",
"def testIsDir(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingDirPath=P(self.nonExistingDirPathStr)\r\n existingDirNoTrailingSlashPath=P(self.existingDirPathStr[:-1])\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingInvalidDirSymlinkPath=P(self.existingInvalidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n\r\n # 1\r\n self.assertEquals(existingDirPath.isDir(),True,\r\n '%r is a dir'%str(existingDirPath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingDirPath.isDir(),False,\r\n '%r does not exist'%str(nonExistingDirPath))\r\n\r\n # 3\r\n self.assertEquals(existingDirNoTrailingSlashPath.isDir(),True,\r\n '%r is a dir'%str(existingDirNoTrailingSlashPath))\r\n\r\n # 4\r\n self.assertEquals(existingValidDirSymlinkPath.isDir(),True,\r\n '%r is a dir'%str(existingValidDirSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingInvalidDirSymlinkPath.isDir(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidDirSymlinkPath))\r\n\r\n # 6\r\n self.assertEquals(existingFilePath.isDir(),False,\r\n '%r is a file'%str(existingFilePath))",
"def directory_entry_should_exist(self, path):\n base = os.path.basename(path)\n dir = os.path.dirname(path)\n if not base in os.listdir(dir):\n raise AssertionError(\"Directory entry '%s' does not exist in '%s'.\" % (base, dir))",
"def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir",
"def is_directory_exists(dir_name):\n global current_dir\n \n sql = \"SELECT COUNT(*) FROM info WHERE name = ? AND parent = ?\"\n values = [dir_name,current_dir]\n count = do_sql_query(sql,values,is_select_query=True)[0]\n return True if int(count[0])>0 else False",
"def _is_directory(input_data) -> bool:\n # TODO(cezequiel): Implement in phase 2.\n _ = input_data\n return False",
"def is_valid_directory(path):\n\n return os.path.exists(os.path.dirname(path))",
"def test_is_not_dir(self):\n code = 'with open(\"{0}\") as f:\\n\\tpass'\n code = 'os.listdir(\"{0}\")'\n typo, good = __file__, os.path.dirname(__file__)\n sugg = \"'{0}' (calling os.path.dirname)\".format(good)\n bad_code, good_code = format_str(code, typo, good)\n self.throws(bad_code, NOTADIR_OS, sugg)\n self.runs(good_code)",
"def directoryExists( self, directory_path=None ):\n\n try:\n if os.path.exists( directory_path ):\n return True\n else:\n return False\n\n except:\n print( 'Could not test the directory.' )",
"def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))",
"def is_valid_dir(ctx, param, value):\n if not os.path.isdir(value) or not os.path.exists(value):\n raise click.BadParameter(f\"Invalid directory path to {value}\")\n return value",
"def directory_exists(self, directory: str = None) -> bool:\n return os.access(directory if directory else self.get_directory(), os.R_OK)",
"def check_is_directory(val, name):\n check_path_exists(val, name)\n if not os.path.isdir(val):\n raise ValueError(name + ' of value ' + val + '\" is not a legal directory.')",
"def isValidDir(localdir):\n # TODO Create a method to validate an archive \n pass",
"def directory_is_present(self, directory_path):\n raise NotImplementedError(\"directory_is_present is not implemented\")",
"def dir_exists(location):\n return run('test -d \"%s\" && echo OK ; true' % (location)).endswith(\"OK\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Description When is given a directory name that doesnt exist Expected Result Shows log that directory wasn't found | def test_doesnt_have_directory_log(self, check_fn_false, caplog):
#setup
records = caplog.records
has_directory = extractor.make_has_directory(os.path.isdir)
directory_path = "./data/tests"
#when
test2 = has_directory(directory_path)
#result
assert len(records) == 1
assert records[0].message == f"It wasn't found directory {directory_path}" | [
"def test_log_to_file_when_dir_does_not_exist(self):\n XKNX(log_directory=\"/xknx/is/fun\")\n\n assert not os.path.isfile(\"/xknx/is/fun/xknx.log\")",
"def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")",
"def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))",
"def test_nonExistentDir(self):\n e = self.assertRaises(\n IOError, logfile.LogFile, self.name, \"this_dir_does_not_exist\"\n )\n self.assertEqual(e.errno, errno.ENOENT)",
"def test_is_not_dir(self):\n code = 'with open(\"{0}\") as f:\\n\\tpass'\n code = 'os.listdir(\"{0}\")'\n typo, good = __file__, os.path.dirname(__file__)\n sugg = \"'{0}' (calling os.path.dirname)\".format(good)\n bad_code, good_code = format_str(code, typo, good)\n self.throws(bad_code, NOTADIR_OS, sugg)\n self.runs(good_code)",
"def test_determine_valid_dirname_doesnotexist(self):\n # Define path that does not exist\n dirname = '/pathdoesnotexist'\n # Assert SystemExit raised\n with self.assertRaises(SystemExit) as err:\n get_disk_usage.determine_valid_dirname(dirname)",
"def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False",
"def checkDirectory(directory):\n if not os.path.exists(directory):\n printError(\"This \" + directory + \" directory does not exist!\")",
"def test_has_directory_log(self, check_fn_true, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/observed\"\n \n #when\n test1 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It was found directory {directory_path}\"",
"def assert_directory_exists(path: str):\n __tracebackhide__ = True\n if not os.path.isdir(path):\n _assert_helper(\"Directory\", path)",
"def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")",
"def _raise_if_directory_not_found(self, directory):\n\n if not pathlib.Path(directory).exists():\n raise DirectoryDoesNotExist(\n \"Directory does not exist : {0}\".format(directory)\n )",
"def test_optional_unknown_data_dir(self, caplog):\n\n inst_module = getattr(pysat.instruments,\n '_'.join((self.testInst.platform,\n self.testInst.name)))\n\n # Update settings for this test\n with caplog.at_level(logging.WARNING, logger='pysat'):\n self.testInst = pysat.Instrument(inst_module=inst_module,\n data_dir=\"not_a_directory\")\n\n captured = caplog.text\n assert captured.find(\"data directory doesn't exist\") >= 0\n assert self.testInst.data_dir is None\n return",
"def test_non_existing_dir(self):\n sampletxt = os.path.join(TESTDATA, 'smoothoperator')\n try:\n uploaded_layers = save_to_geonode(sampletxt, user=self.user)\n for uploaded in uploaded_layers:\n print uploaded\n except RisikoException, e:\n pass\n else:\n msg = ('Expected an exception for non existing dir')\n assert False, msg",
"def test_scan_dir_not_found(self, dir_path):\n with self.assertRaises(FileNotFoundError):\n self.file_scanner.scan(dir_path)",
"def check_path(dir_path):\n if not os.path.exists(dir_path):\n print(\"\\n[!] ERROR -> '{}' is NOT a valid Directory ...\\n\".format(dir_path))\n print(\"\\n******* ******* *******\")\n sys.exit(1)",
"def testNotADirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"not_a_directory\")",
"def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir",
"def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Description When is given a directory path that has forecast as parent folder and csv file with desired name Expected Result returns dictionary with right data | def test_forecast_folder_path(self):
#setup
filepath = ".data/forecast/Kano-KN_-9.09_7.39.json"
expected_result = {
"type": "forecast",
"city": "Kano",
"state": "KN",
"coordinates": ['-9.09', '7.39'],
"forecast": {}
}
#result
assert extractor.get_metadata_from_filepath(filepath) == expected_result | [
"def get_data():\n\n root_dir = os.getcwd()\n csv_path = os.path.join(root_dir, \"data\", \"csv\")\n\n file_names = [f for f in os.listdir(csv_path) if f.endswith(\".csv\")]\n key_names = [\n name.replace(\"olist_\", \"\")\n .replace(\".csv\", \"\")\n .replace(\"_dataset\", \"\")\n for name in file_names\n ]\n \n #create the dictionary\n values = [\n pd.read_csv(path)\n for path in [\n os.path.join(csv_path, file_name) \n for file_name in file_names]\n ]\n \n data = {key:value for (key,value) in zip(key_names, values)}\n return data",
"def songs_csv_file_path() -> Path:\n return data_dir_path().joinpath(\"songs.csv\")",
"def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details",
"def load_vac_folder(dir: str):\r\n d= {}\r\n for file in os.listdir(dir):\r\n filename = os.fsdecode(file)\r\n if filename.endswith(\".csv\"):\r\n load_vaccine_data(d, file)\r\n return d",
"def files_recuperation(folder_name, skyId):\n\tdata_name = []\n\tfor i in range(len(skyId)):\n\t\tname = skyId[i][0]\n\t\tdata_name.append(folder_name + '/Training_' + name + '.csv')\t\t\n\treturn data_name",
"def csv_path(name):\n return \"./data/%s\" % name",
"def __csvPath__(self):\n return \"%s/%s_%s_%s%s.csv\" % ( self.analysis_dir ,\n self.input_data.input_data.name ,\n self.input_data.name ,\n self.granularity ,\n self.name )",
"def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)",
"def __csvPath__(self):\n return \"%s/%s_%s_analysis.csv\" % (self.strategy.analysis_dir, self.strategy.input_data.name, self.strategy.name)",
"def __readCsvFileNames(self,CsvFolderPath):\n try:\n\n csvFiles =[ item for item in os.listdir(CsvFolderPath) if ( os.path.isfile(os.path.join(CsvFolderPath,item)) and item.split('.')[1] == 'csv')]\n self.csv_files={file:os.path.join(CsvFolderPath,file) for file in csvFiles}\n\n except:\n self.dtvt_logger.error( sys.exc_info()[0] + \"Module: Configuration.py\" + \"Method:ReadCsvFileNames\")",
"def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)",
"def _get_csv_path(name):\n return os.path.join(cwd, 'output/app_info', name)",
"def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)",
"def update_csv():\n return os.listdir('./data')",
"def collect_data(folder):\n folder = pathlib.Path(folder)\n cases = []\n for case_folder in folder.iterdir():\n print(f'start collecting data for location {case_folder.name}')\n for tr_folder in case_folder.iterdir():\n case = calculate_values(tr_folder)\n cases.append(case)\n \n df = pd.DataFrame(cases)\n print(folder.parent.joinpath(f'{folder.stem}.csv'))\n df.to_csv(folder.parent.joinpath(f'{folder.stem}.csv'), index=False)",
"def read_weatherstations(path_to_data):\n namedict = read_weatherstationnames(path_to_data)\n stations = {}\n for i in namedict:\n filename = namedict[i].replace(' ', '_') + '.csv'\n print(\"Reading\", filename)\n ws = read_station_csv(os.path.join(path_to_data, filename))\n stations[i] = ws\n return stations",
"def _read_station_dir(\n root: Union[IsmnRoot, Path, str],\n stat_dir: Union[Path, str],\n temp_root: Path,\n custom_meta_reader: list,\n) -> Tuple[dict, list]:\n infos = []\n\n if not isinstance(root, IsmnRoot):\n proc_root = True\n root = IsmnRoot(root)\n else:\n proc_root = False\n\n csv = root.find_files(stat_dir, \"*.csv\")\n\n try:\n if len(csv) == 0:\n raise IsmnFileError(\"Expected 1 csv file for station, found 0. \"\n \"Use empty static metadata.\")\n else:\n if len(csv) > 1:\n infos.append(\n f\"Expected 1 csv file for station, found {len(csv)}. \"\n f\"Use first file in dir.\")\n static_meta_file = StaticMetaFile(\n root, csv[0], load_metadata=True, temp_root=temp_root)\n station_meta = static_meta_file.metadata\n except IsmnFileError as e:\n infos.append(f\"Error loading static meta for station: {e}\")\n station_meta = MetaData(\n [MetaVar(k, v) for k, v in CSV_META_TEMPLATE.items()])\n\n data_files = root.find_files(stat_dir, \"*.stm\")\n\n filelist = []\n\n for file_path in data_files:\n try:\n f = DataFile(root, file_path, temp_root=temp_root)\n except Exception as e:\n infos.append(f\"Error loading ismn file: {e}\")\n continue\n\n f.metadata.merge(station_meta, inplace=True, exclude_empty=False)\n\n f.metadata = f.metadata.best_meta_for_depth(\n Depth(\n f.metadata[\"instrument\"].depth.start,\n f.metadata[\"instrument\"].depth.end,\n ))\n\n # If custom metadata readers are available\n if custom_meta_reader is not None:\n for cmr in np.atleast_1d(custom_meta_reader):\n cmeta = cmr.read_metadata(f.metadata)\n if isinstance(cmeta, dict):\n cmeta = MetaData([MetaVar(k, v) for k, v in cmeta.items()])\n if cmeta is not None:\n f.metadata.merge(cmeta, inplace=True)\n\n network = f.metadata[\"network\"].val\n station = f.metadata[\"station\"].val\n\n filelist.append((network, station, f))\n\n infos.append(f\"Processed file {file_path}\")\n\n if proc_root:\n root.close()\n\n return filelist, infos",
"def get_data(self):\r\n\r\n # Find the absolute path for the root dir (04-Decision-Science)\r\n # Uses __file__ as absolute path anchor\r\n root_dir = os.path.abspath('')\r\n\r\n # Use os library for Unix vs. Widowns robustness\r\n xls_path = os.path.join(root_dir, 'data')\r\n\r\n file_names = [f for f in os.listdir(csv_path) if f.endswith('.xls')]\r\n\r\n def key_from_file_name(f):\r\n if f[-4:] == '.xls':\r\n return f[:-4]\r\n\r\n # Create the dictionary\r\n data = {}\r\n for f in file_names:\r\n data[key_from_file_name(f)] = pd.read_excel(os.path.join(xls_path, f))",
"def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Description When is given a csv_filepath and output_filepath and its the first time reading it Expected Result creates a json file with right values | def test_first_time_reading_csv_file(self):
# Create a temporary directory for test files
temp_dir = "test_files/observed"
os.makedirs(temp_dir, exist_ok=True)
# Create a test CSV file
csv_filepath = os.path.join(temp_dir, "Abadia-BA_-11.56_-37.52.csv")
with open(csv_filepath, "w", newline="") as csv_file:
writer = csv.writer(csv_file, delimiter=";")
writer.writerow(["periods", "precipitation", "temperature", "max_temperature"])
writer.writerow(["2023-01-01", "5", "25", "30"])
writer.writerow(["2023-01-02", "10", "23", "28"])
# Define the expected output JSON file path
expected_output_filepath = os.path.join(temp_dir, "BA_Abadia.json")
# Call the function under test
extractor.csv_to_json(csv_filepath, temp_dir)
# Verify that the output JSON file exists
assert os.path.exists(expected_output_filepath)
# Load the output JSON file
with open(expected_output_filepath, "r") as json_file:
json_data = json.load(json_file)
# Verify the contents of the JSON file
expected_data = {
"city": "Abadia",
"state": "BA",
"coordinates": ["-11.56", "-37.52"],
"observed": {
"periods": ["2023-01-01", "2023-01-02"],
"precipitation": ["5", "10"],
"temperature": ["25", "23"],
"max_temperature": ["30", "28"]
}
}
assert json_data == expected_data
# Clean up the temporary directory and files
os.remove(csv_filepath)
os.remove(expected_output_filepath)
os.rmdir(temp_dir) | [
"def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return",
"def parse_csv_data_to_json(input_file, output_file):\n with open(input_file) as f:\n # open the output file for writing\n with open(output_file, 'w') as myfile:\n\n # read in the csv\n input_content = csv.reader(f, delimiter=',')\n\n # skip the header and store it to be used with the json objects\n field_names = next(f).strip().split(\",\")\n number_of_records_written = 0\n for x in input_content:\n # make a dictionary of keys and values for json dumping\n dictionary = dict(zip(field_names, x))\n\n # delete an fields that are empty string to suppress errors while uploading\n cleaned_dict = {k: v for k, v in dictionary.items() if v is not \"\"}\n\n # set the id of the index to the ack id\n action_and_meta_data[\"index\"][\"_id\"] = cleaned_dict.get(\"ACK_ID\")\n\n # dump the index and data to file\n json.dump(action_and_meta_data, myfile)\n myfile.write('\\n')\n json.dump(cleaned_dict, myfile)\n myfile.write('\\n')\n number_of_records_written += 1\n\n return number_of_records_written",
"def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)",
"def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")",
"def csv_to_json(abs_file_path, json_file_path):\n data = []\n\n dir_path = os.path.dirname(abs_file_path)\n os.chmod(dir_path, 0o777)\n csv_file_path = abs_file_path\n if os.path.isfile(csv_file_path):\n with open(csv_file_path, encoding='utf-8-sig') as csv_file:\n csv_reader = csv.DictReader(filter(lambda row: row[0] !='#', csv_file),delimiter=';')\n for rows in csv_reader:\n rows['_id'] = rows.get('timestamp')\n data.append(rows)\n with open(json_file_path, 'w') as json_file:\n json_file.write(json.dumps(data, indent=4))\n return 0\n return 1",
"def read_and_write_file(json_filepath, csv_filepath, column_names):\n if sys.version_info[0] < 3:\n with open(csv_filepath, 'wb+') as fout:\n csv_file = csv.writer(fout)\n csv_file.writerow(list(column_names))\n try:\n with open(json_filepath) as fin:\n for line in fin:\n line_contents = json.loads(line)\n csv_file.writerow(get_row(line_contents, column_names))\n except Exception: # handle single json outer entry files\n line_contents = json.load(open(json_filepath))\n csv_file.writerow(get_row(line_contents, column_names))\n else:\n with open(csv_filepath, 'w+') as fout:\n csv_file = csv.writer(fout)\n csv_file.writerow(list(column_names))\n try:\n with open(json_filepath) as fin:\n for line in fin:\n line_contents = json.loads(line)\n csv_file.writerow(get_row(line_contents, column_names))\n except Exception: # handle single json outer entry files\n line_contents = json.load(open(json_filepath))\n csv_file.writerow(get_row(line_contents, column_names))",
"def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return",
"def csv_to_json(csv_file_path: str, json_file_path: str):\n fieldnames = ('last_name', 'first_name', 'second_name')\n\n # read csv file\n try:\n with open(Path(csv_file_path)) as csv_file:\n csv_reader = csv.DictReader(csv_file, fieldnames)\n csv_data = {num: row for num, row in enumerate(csv_reader, start=1)}\n except FileNotFoundError as err:\n raise CustomException() from err\n\n # generate json\n try:\n with open(Path(json_file_path), 'w') as json_file:\n json.dump(csv_data, json_file, indent=2)\n except OSError as err:\n raise CustomException() from err",
"def test_read_csv(self):\r\n expected = {0: {\"ID\": \"A231\", \"Gender\": \"M\", \"Age\": \"23\", \"Sales\": \"245\", \"BMI\": \"Normal\", \"Salary\": \"20\",\r\n \"Birthday\": \"24/06/1994\"}}\r\n dir = path.realpath(path.curdir)\r\n data = dir + \"\\\\filehandler_tests\\\\data.csv\"\r\n self.filehandler = FileHandler(path.normpath(data))\r\n self.filehandler.set_file_type()\r\n result = self.filehandler.read()\r\n self.assertEqual(expected, result)",
"def test_json2csv(test_name, json_data, header_values, row_key, expected):\n json_file = create_file(json_data, 'output.json')\n output_json = json2csv(json_file, \"output_json.csv\",\n header_values=header_values,\n row_key=row_key)\n obs_out = file_2list(output_json)\n os.remove(output_json)\n assert obs_out == expected",
"def convert_csv_to_json():\n result = {}\n try:\n with open(FILE_NAME, 'r', newline='') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n for entry_id in range(len(row)):\n row[entry_id] = row[entry_id].replace(\"'\", \"\\\"\")\n result[row[0]] = [json.loads(row[1])] + [row[2]]\n except FileNotFoundError:\n print('Could not find csv')\n return False\n with open('log.json', 'w+') as output_file:\n json.dump(result, output_file)\n return True",
"def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added",
"def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)",
"def create_manifest_file(csv_file, manifest_file, s3_path):\n logger.info(\"Processing CSV file %s.\", csv_file)\n\n image_count = 0\n anomalous_count = 0\n\n with open(csv_file, newline='', encoding=\"UTF-8\") as csvfile,\\\n open(manifest_file, \"w\", encoding=\"UTF-8\") as output_file:\n\n image_classifications = csv.reader(\n csvfile, delimiter=',', quotechar='|')\n\n # Process each row (image) in the CSV file.\n for row in image_classifications:\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n source_ref = str(s3_path) + row[0]\n classification = 0\n\n if row[1].lower() == 'anomaly':\n classification = 1\n anomalous_count += 1\n\n # Create the JSON line.\n json_line = {}\n json_line['source-ref'] = source_ref\n json_line['anomaly-label'] = str(classification)\n\n metadata = {}\n metadata['confidence'] = 1\n metadata['job-name'] = \"labeling-job/anomaly-classification\"\n metadata['class-name'] = row[1]\n metadata['human-annotated'] = \"yes\"\n metadata['creation-date'] = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')\n metadata['type'] = \"groundtruth/image-classification\"\n\n json_line['anomaly-label-metadata'] = metadata\n\n output_file.write(json.dumps(json_line))\n output_file.write('\\n')\n image_count += 1\n\n logger.info(\"Finished creating manifest file %s.\\n\"\n \"Images: %s\\nAnomalous: %s\",\n manifest_file,\n image_count,\n anomalous_count)\n return image_count, anomalous_count",
"def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()",
"def main(csv_filename, criteria_field=\"to_include\"):\n\n path_field_selection = {}\n path_field_order = []\n with open(csv_filename, \"rb\") as fw:\n csv_list_dict = csv.DictReader(fw)\n\n c1_field = \"c1\"\n c2_field = \"c2\"\n c3_field = \"c3\"\n path_field = \"path\"\n\n for row_dict in csv_list_dict:\n\n if row_dict[criteria_field] == \"1\":\n path = row_dict[path_field]\n\n if path not in path_field_selection:\n path_field_selection[path] = []\n path_field_order += [path]\n else:\n pass\n\n c1 = row_dict[c1_field]\n c2 = row_dict[c2_field]\n c3 = row_dict[c3_field]\n\n fields_to_select = []\n\n if len(c1.strip()):\n fields_to_select += [c1]\n\n if len(c2.strip()):\n fields_to_select += [c2]\n\n if len(c3.strip()):\n fields_to_select += [c3]\n\n path_field_selection[path] += [fields_to_select]\n\n path_fields_list = []\n for path in path_field_order:\n path_fields_list += [[path] + path_field_selection[path]]\n\n json_file_name = csv_filename + \".json\"\n\n with open(json_file_name, \"wb\") as fw:\n json.dump(path_fields_list, fw, sort_keys=True, indent=4, separators=(',', ': '))",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def _get_csv_details(csv_in_filename, csv_out_filename):\n var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'var'))\n csv_in_path = os.path.join(var_dir, csv_in_filename)\n csv_out_path = os.path.join(var_dir, csv_out_filename)\n\n assert os.access(csv_in_path, os.R_OK), \\\n \"Unable to read CSV path: {}\".format(csv_in_path)\n\n csv_out_dir = os.path.dirname(csv_out_path)\n assert os.access(csv_out_dir, os.W_OK), \\\n \"Unable to write to CSV out dir: {}\".format(csv_out_dir)\n\n return csv_in_path, csv_out_path",
"def to_csv_json_set(self, csv_file_path, json_file_path, write_mode: str = 'w'):\n self.to_csv(csv_file_path)\n with open(json_file_path, write_mode) as f:\n json.dump(self.metadata_dict, f)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Description When is given a csv_filepath and output_filepath and already exists the file Expected Result concatenate the old json file with the values found in 2nd reading. | def test_when_file_already_exist(self):
# Create a temporary directory for test files
temp_dir = ["test_files/observed", "test_files/forecast", "test_files/output"]
for dir in temp_dir:
os.makedirs(dir, exist_ok=True)
# Create the 1st csv file
first_csv_filepath = os.path.join(temp_dir[0], "Abadia-BA_-11.56_-37.52.csv")
with open(first_csv_filepath, "w", newline="") as csv_file:
writer = csv.writer(csv_file, delimiter=";")
writer.writerow(["periods", "precipitation", "temperature", "max_temperature"])
writer.writerow(["2023-01-01", "5", "25", "30"])
writer.writerow(["2023-01-02", "10", "23", "28"])
# Creating the 2nd csv file in different directory
second_csv_filepath = os.path.join(temp_dir[1], "Abadia-BA_-11.56_-37.52.csv")
with open(second_csv_filepath, "w", newline="") as csv_file:
writer = csv.writer(csv_file, delimiter=";")
writer.writerow(["periods", "precipitation", "temperature", "max_temperature"])
writer.writerow(["2023-01-01", "5", "25", "30"])
writer.writerow(["2023-01-02", "10", "23", "28"])
# Define the expected output JSON file path
expected_output_filepath = os.path.join(temp_dir[2], "BA_Abadia.json")
# Call the function under test
extractor.csv_to_json(first_csv_filepath, temp_dir[2])
extractor.csv_to_json(second_csv_filepath, temp_dir[2])
# Verify that the output JSON file exists
assert os.path.exists(expected_output_filepath)
# Load the output JSON file
with open(expected_output_filepath, "r") as json_file:
json_data = json.load(json_file)
# Verify the contents of the JSON file
expected_data = {
"city": "Abadia",
"state": "BA",
"coordinates": ["-11.56", "-37.52"],
"observed": {
"periods": ["2023-01-01", "2023-01-02"],
"precipitation": ["5", "10"],
"temperature": ["25", "23"],
"max_temperature": ["30", "28"]
},
"forecast": {
"periods": ["2023-01-01", "2023-01-02"],
"precipitation": ["5", "10"],
"temperature": ["25", "23"],
"max_temperature": ["30", "28"]
},
}
# Assertion
assert json_data == expected_data
# Clean up the temporary directory and files
os.remove(first_csv_filepath)
os.remove(second_csv_filepath)
os.remove(expected_output_filepath)
for dir in temp_dir:
os.rmdir(dir) | [
"def prepare_temp_source_csv_file(self, csv_file_path):\n updated_rows = []\n with open(temp_file_dir, \"w\") as ftempout:\n column_processed = False\n for line in open(csv_file_path):\n if not column_processed:\n updated_column_name = self.get_updated_columns(line)\n ftempout.write(updated_column_name)\n column_processed = True\n else:\n ftempout.write(line)\n ftempout.write(\"\\n\")\n # below code will run if source file contains amount in integer & decimal columns\n if updated_column_name.find('amount_integer') != -1:\n with open(temp_file_dir, \"r\") as ftempread:\n reader = csv.DictReader(ftempread) # read rows into a dictionary format\n for row in reader: # read a row as {column1: value1, column2: value2,...}\n row['amount'] = int(row['amount_integer']) + int(row['amount_decimal'])/100\n row.pop('amount_integer', None)\n row.pop('amount_decimal', None)\n updated_rows.append(row)\n if updated_rows:\n keys = updated_rows[0].keys()\n with open(temp_file_dir, 'w') as ftempupdated:\n dict_writer = csv.DictWriter(ftempupdated, keys)\n dict_writer.writeheader()\n dict_writer.writerows(updated_rows)",
"def _read_existing(self):\n\n print(\"Reading file entries of previously created csv file...\")\n with open(self._output_file, 'r', encoding='utf-16') as previous_file:\n # Dismiss the csv header row\n next(previous_file)\n\n for line in previous_file:\n line_split = line.split(';')\n # First column of csv holds the file path\n self._existing_files.append(line_split[0])",
"def read_and_write_file(json_filepath, csv_filepath, column_names):\n if sys.version_info[0] < 3:\n with open(csv_filepath, 'wb+') as fout:\n csv_file = csv.writer(fout)\n csv_file.writerow(list(column_names))\n try:\n with open(json_filepath) as fin:\n for line in fin:\n line_contents = json.loads(line)\n csv_file.writerow(get_row(line_contents, column_names))\n except Exception: # handle single json outer entry files\n line_contents = json.load(open(json_filepath))\n csv_file.writerow(get_row(line_contents, column_names))\n else:\n with open(csv_filepath, 'w+') as fout:\n csv_file = csv.writer(fout)\n csv_file.writerow(list(column_names))\n try:\n with open(json_filepath) as fin:\n for line in fin:\n line_contents = json.loads(line)\n csv_file.writerow(get_row(line_contents, column_names))\n except Exception: # handle single json outer entry files\n line_contents = json.load(open(json_filepath))\n csv_file.writerow(get_row(line_contents, column_names))",
"def parse_csv_data_to_json(input_file, output_file):\n with open(input_file) as f:\n # open the output file for writing\n with open(output_file, 'w') as myfile:\n\n # read in the csv\n input_content = csv.reader(f, delimiter=',')\n\n # skip the header and store it to be used with the json objects\n field_names = next(f).strip().split(\",\")\n number_of_records_written = 0\n for x in input_content:\n # make a dictionary of keys and values for json dumping\n dictionary = dict(zip(field_names, x))\n\n # delete an fields that are empty string to suppress errors while uploading\n cleaned_dict = {k: v for k, v in dictionary.items() if v is not \"\"}\n\n # set the id of the index to the ack id\n action_and_meta_data[\"index\"][\"_id\"] = cleaned_dict.get(\"ACK_ID\")\n\n # dump the index and data to file\n json.dump(action_and_meta_data, myfile)\n myfile.write('\\n')\n json.dump(cleaned_dict, myfile)\n myfile.write('\\n')\n number_of_records_written += 1\n\n return number_of_records_written",
"def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return",
"def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")",
"def csv_to_json(abs_file_path, json_file_path):\n data = []\n\n dir_path = os.path.dirname(abs_file_path)\n os.chmod(dir_path, 0o777)\n csv_file_path = abs_file_path\n if os.path.isfile(csv_file_path):\n with open(csv_file_path, encoding='utf-8-sig') as csv_file:\n csv_reader = csv.DictReader(filter(lambda row: row[0] !='#', csv_file),delimiter=';')\n for rows in csv_reader:\n rows['_id'] = rows.get('timestamp')\n data.append(rows)\n with open(json_file_path, 'w') as json_file:\n json_file.write(json.dumps(data, indent=4))\n return 0\n return 1",
"def merge_csv_daily(output_filename, path):\n\n # import csv files from folder\n allFiles = glob.glob(path + \"*.csv\")\n\n with open(output_filename, 'wb+') as outfile:\n for i, fname in enumerate(allFiles):\n with open(fname, 'rb') as infile:\n if i != 0:\n infile.readline() # Throw away header on all but first file\n # Block copy rest of file from input to output without parsing\n shutil.copyfileobj(infile, outfile)\n # print(fname + \" has been imported.\")\n\n # adding MissingObs column back:\n df = pd.read_csv(output_filename, header=0, sep=',', index_col=[0,1], parse_dates=False)\n df.insert(loc=3, column='MissingObs', value=np.zeros((df.shape[0], )))\n df.to_csv(output_filename, sep=',')\n\n return output_filename",
"def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)",
"def create_output_file():\n filename = \"spec_list.csv\"\n if os.path.isfile(filename): #if such file exists\n i = 0\n while os.path.isfile(filename): #it will moify it's name and check\n i += 1 #wheather file with modified name exists\n filename = \"spec_list\" + str(i) + \".csv\"\n else: #if no - it will return such fiel\n f = open(filename, 'w')\n return f\n else: #if we are lucky and \"spec_list.csv files do not exist\n f = open(filename, 'w')\n return f",
"def test_two_save_call(fs):\n\n result_list = [\n Result('toto', 0, 0.1, [0.1111, 0.2222], [0.3333, 0.4444]),\n Result('titi', 0, 0.2, [0.5555, 0.6666], [0.7777, 0.8888]),\n ]\n\n output = CSVOutput('toto.csv')\n assert os.path.exists('toto.csv')\n csv_file = open('toto.csv', 'r')\n assert csv_file.readline() == 'label,timestamp,duration,pkg,dram,socket\\n'\n assert csv_file.readline() == '' # end of file\n\n result = result_list[0]\n output.add(result)\n output.save()\n\n line1 = f\"\"\"{result.label},{result.timestamp},{result.duration},{result.pkg[0]},{result.dram[0]},0\\n\"\"\"\n line2 = f\"\"\"{result.label},{result.timestamp},{result.duration},{result.pkg[1]},{result.dram[1]},1\\n\"\"\"\n\n assert line1 == csv_file.readline()\n assert line2 == csv_file.readline()\n assert csv_file.readline() == ''\n csv_file.close()\n\n output.add(result_list[1])\n output.save()\n csv_file = open('toto.csv', 'r')\n\n assert csv_file.readline() == 'label,timestamp,duration,pkg,dram,socket\\n'\n for result in result_list:\n line1 = f\"\"\"{result.label},{result.timestamp},{result.duration},{result.pkg[0]},{result.dram[0]},0\\n\"\"\"\n line2 = f\"\"\"{result.label},{result.timestamp},{result.duration},{result.pkg[1]},{result.dram[1]},1\\n\"\"\"\n\n assert line1 == csv_file.readline()\n assert line2 == csv_file.readline()\n assert csv_file.readline() == ''",
"def test_auto_aggregate_csv_file_add_to_folder(self):\n\n self._test_csv_file_add(new_folder=\"csv_folder\")",
"def merge_csv_files(csvFilenames, outpath=None):\n dataframe = pd.concat([pd.read_csv(fname, header=None) for fname in csvFilenames])\n outFilename = get_outfilename(csvFilenames[0])\n dataframe.to_csv(outFilename)\n # remove_old_files(csvFlenames)\n\n return outFilename",
"def create_manifest_file(csv_file, manifest_file, s3_path):\n logger.info(\"Processing CSV file %s.\", csv_file)\n\n image_count = 0\n anomalous_count = 0\n\n with open(csv_file, newline='', encoding=\"UTF-8\") as csvfile,\\\n open(manifest_file, \"w\", encoding=\"UTF-8\") as output_file:\n\n image_classifications = csv.reader(\n csvfile, delimiter=',', quotechar='|')\n\n # Process each row (image) in the CSV file.\n for row in image_classifications:\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n source_ref = str(s3_path) + row[0]\n classification = 0\n\n if row[1].lower() == 'anomaly':\n classification = 1\n anomalous_count += 1\n\n # Create the JSON line.\n json_line = {}\n json_line['source-ref'] = source_ref\n json_line['anomaly-label'] = str(classification)\n\n metadata = {}\n metadata['confidence'] = 1\n metadata['job-name'] = \"labeling-job/anomaly-classification\"\n metadata['class-name'] = row[1]\n metadata['human-annotated'] = \"yes\"\n metadata['creation-date'] = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')\n metadata['type'] = \"groundtruth/image-classification\"\n\n json_line['anomaly-label-metadata'] = metadata\n\n output_file.write(json.dumps(json_line))\n output_file.write('\\n')\n image_count += 1\n\n logger.info(\"Finished creating manifest file %s.\\n\"\n \"Images: %s\\nAnomalous: %s\",\n manifest_file,\n image_count,\n anomalous_count)\n return image_count, anomalous_count",
"def test_update_csv(): # ***Incomplete test\n ##########################\n # Arrange.\n outfp = \"outfp\"\n csv_file = \"csv_file\"\n\n ##########################\n # Act.\n #x = update_csv(outfp,\n #\t\tcsv_file)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def csv_merge(file1, file2, key_list, file_out):\r\n\timport copy\r\n\tif file1[-2:]=='gz':\r\n\t\tfin1= gzip.open(file1,'rb')\r\n\telse:\r\n\t\tfin1= open(file1,'rb')\r\n\tif file2[-2:]=='gz':\r\n\t\tfin2= gzip.open(file2,'rb')\r\n\telse:\r\n\t\tfin2= open(file2,'rb')\r\n\tif file_out[-2:]=='gz':\r\n\t\tfout= gzip.open(file_out,'wb')\r\n\telse:\r\n\t\tfout= open(file_out,'wb')\r\n\r\n\tinfile1 = csv.DictReader(fin1,delimiter=',')\r\n\tinfile2 = csv.DictReader(fin2,delimiter=',')\r\n\tfield_list = infile1.fieldnames+infile2.fieldnames\r\n\r\n\theader_new=[]\r\n\tempty_row = {}\r\n\tfor key in field_list:\r\n\t\tif not(key in empty_row):\r\n\t\t\tempty_row[key]=''\r\n\t\t\theader_new.append(key)\r\n\t#header_new.append('merge_key') # do not attach merge key and ind\r\n\t#header_new.append('merge_ind')\r\n\toutfile = csv.writer(fout)\r\n\toutfile.writerow(header_new)\r\n\r\n\trow1 = infile1.next()\r\n\tcreate_merge_key(row1, key_list)\r\n\trow2 = infile2.next()\r\n\tcreate_merge_key(row2, key_list)\r\n\tEOF1_flag=0\r\n\tEOF2_flag=0\r\n\twhile EOF1_flag==0 and EOF2_flag==0:\r\n\t\tif row1['merge_key']<row2['merge_key'] or EOF2_flag==1:\r\n\t\t\ttemp=copy.deepcopy(empty_row)\r\n\t\t\ttemp.update(row1)\r\n\t\t\ttemp['merge_ind']=\"10\"\r\n\t\t\toutfile.writerow([temp[var] for var in header_new])\r\n\t\t\ttry:\r\n\t\t\t\trow1 = infile1.next()\r\n\t\t\t\tcreate_merge_key(row1, key_list)\r\n\t\t\texcept:\r\n\t\t\t\tEOF1_flag=1\r\n\t\t\t\r\n\t\tif row1['merge_key']>row2['merge_key']or EOF1_flag==1:\r\n\t\t\ttemp=copy.deepcopy(empty_row)\r\n\t\t\ttemp.update(row2)\r\n\t\t\ttemp['merge_ind']=\"01\"\r\n\t\t\toutfile.writerow([temp[var] for var in header_new])\r\n\t\t\ttry:\r\n\t\t\t\trow2 = infile2.next()\r\n\t\t\t\tcreate_merge_key(row2, key_list)\r\n\t\t\texcept:\r\n\t\t\t\tEOF2_flag=1\r\n\t\t\t\r\n\t\tif row1['merge_key']==row2['merge_key']:\r\n\t\t\ttemp=copy.deepcopy(row2)\r\n\t\t\ttemp.update(row1)\r\n\t\t\ttemp['merge_ind']=\"11\"\r\n\t\t\toutfile.writerow([temp[var] for var in header_new])\r\n\t\t\ttry:\r\n\t\t\t\trow1 = infile1.next()\r\n\t\t\t\tcreate_merge_key(row1, key_list)\r\n\t\t\texcept:\r\n\t\t\t\tEOF1_flag=1\r\n\t\t\ttry:\r\n\t\t\t\trow2 = infile2.next()\r\n\t\t\t\tcreate_merge_key(row2, key_list)\r\n\t\t\texcept:\r\n\t\t\t\tEOF2_flag=1",
"def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)",
"def test_json2csv(test_name, json_data, header_values, row_key, expected):\n json_file = create_file(json_data, 'output.json')\n output_json = json2csv(json_file, \"output_json.csv\",\n header_values=header_values,\n row_key=row_key)\n obs_out = file_2list(output_json)\n os.remove(output_json)\n assert obs_out == expected",
"def mergeTwoFiles(file1, file2, out_name):\n with open(file1, 'r') as first_file:\n with open(file2, 'r') as second_file:\n with open(out_name, 'w', newline = '') as output_file:\n reader1, reader2 = csv.reader(first_file), csv.reader(second_file)\n writer = csv.writer(output_file)\n\n current_line1 = next(reader1)\n current_line2 = next(reader2)\n\n # Mege the files as in Merge Sort: Compare the current lower lines of each file and the lowest is written first\n # then another line from the chosen file is retrieven. The process continues until one file runs out of lines\n while current_line1 != None and current_line2 != None:\n # Check that the obtained line is not an empty line or other irregular row\n if len(current_line1) < 11: \n try:\n current_line1 = next(reader1)\n except StopIteration:\n current_line1 = None\n continue\n \n if len(current_line2) < 11:\n try:\n current_line2 = next(reader2)\n except StopIteration:\n current_line2 = None\n continue\n \n if int(current_line1[10]) > int(current_line2[10]):\n writer.writerow(current_line2)\n # Get the next line of the other file\n try:\n current_line2 = next(reader2)\n except StopIteration:\n current_line2 = None\n else:\n writer.writerow(current_line1)\n # Get the next line of the other file\n try:\n current_line1 = next(reader1)\n except StopIteration:\n current_line1 = None\n\n # One of the readers ran out of lines, so write the remaining lines of the other reader as they are\n if current_line1 == None:\n for remaining_line in reader2:\n writer.writerow(remaining_line)\n if current_line2 == None:\n for remaining_line in reader2:\n writer.writerow(remaining_line)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Description When is given a csv_filepath and output_filepath and one of the columns has blank character Expected Result creates a json file ignoring blank column | def test_blank_column(self):
# Create a temporary directory for test files
temp_dir = "test_files/observed"
os.makedirs(temp_dir, exist_ok=True)
# Create a test CSV file
csv_filepath = os.path.join(temp_dir, "Abadia-BA_-11.56_-37.52.csv")
with open(csv_filepath, "w", newline="") as csv_file:
writer = csv.writer(csv_file, delimiter=";")
writer.writerow(["periods", "precipitation", "temperature", ""])
writer.writerow(["2023-01-01", "5", "25", ""])
writer.writerow(["2023-01-02", "10", "23", ""])
# Define the expected output JSON file path
expected_output_filepath = os.path.join(temp_dir, "BA_Abadia.json")
# Call the function under test
extractor.csv_to_json(csv_filepath, temp_dir)
# Verify that the output JSON file exists
assert os.path.exists(expected_output_filepath)
# Load the output JSON file
with open(expected_output_filepath, "r") as json_file:
json_data = json.load(json_file)
# Verify the contents of the JSON file
expected_data = {
"city": "Abadia",
"state": "BA",
"coordinates": ["-11.56", "-37.52"],
"observed": {
"periods": ["2023-01-01", "2023-01-02"],
"precipitation": ["5", "10"],
"temperature": ["25", "23"]
}
}
assert json_data == expected_data
# Clean up the temporary directory and files
os.remove(csv_filepath)
os.remove(expected_output_filepath)
os.rmdir(temp_dir) | [
"def parse_csv_data_to_json(input_file, output_file):\n with open(input_file) as f:\n # open the output file for writing\n with open(output_file, 'w') as myfile:\n\n # read in the csv\n input_content = csv.reader(f, delimiter=',')\n\n # skip the header and store it to be used with the json objects\n field_names = next(f).strip().split(\",\")\n number_of_records_written = 0\n for x in input_content:\n # make a dictionary of keys and values for json dumping\n dictionary = dict(zip(field_names, x))\n\n # delete an fields that are empty string to suppress errors while uploading\n cleaned_dict = {k: v for k, v in dictionary.items() if v is not \"\"}\n\n # set the id of the index to the ack id\n action_and_meta_data[\"index\"][\"_id\"] = cleaned_dict.get(\"ACK_ID\")\n\n # dump the index and data to file\n json.dump(action_and_meta_data, myfile)\n myfile.write('\\n')\n json.dump(cleaned_dict, myfile)\n myfile.write('\\n')\n number_of_records_written += 1\n\n return number_of_records_written",
"def test_to_csv_with_no_rows_returns_none(self):\n output = row_handling.to_csv(rows=[], csv_path=self.csv_path)\n assert output is None",
"def read_and_write_file(json_filepath, csv_filepath, column_names):\n if sys.version_info[0] < 3:\n with open(csv_filepath, 'wb+') as fout:\n csv_file = csv.writer(fout)\n csv_file.writerow(list(column_names))\n try:\n with open(json_filepath) as fin:\n for line in fin:\n line_contents = json.loads(line)\n csv_file.writerow(get_row(line_contents, column_names))\n except Exception: # handle single json outer entry files\n line_contents = json.load(open(json_filepath))\n csv_file.writerow(get_row(line_contents, column_names))\n else:\n with open(csv_filepath, 'w+') as fout:\n csv_file = csv.writer(fout)\n csv_file.writerow(list(column_names))\n try:\n with open(json_filepath) as fin:\n for line in fin:\n line_contents = json.loads(line)\n csv_file.writerow(get_row(line_contents, column_names))\n except Exception: # handle single json outer entry files\n line_contents = json.load(open(json_filepath))\n csv_file.writerow(get_row(line_contents, column_names))",
"def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return",
"def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added",
"def test_json2csv(test_name, json_data, header_values, row_key, expected):\n json_file = create_file(json_data, 'output.json')\n output_json = json2csv(json_file, \"output_json.csv\",\n header_values=header_values,\n row_key=row_key)\n obs_out = file_2list(output_json)\n os.remove(output_json)\n assert obs_out == expected",
"def convert_from_csv_to_JSON(csv_data, header=False):\n sys.stderr.write(\"NotImplemented yet!\")\n sys.exit(1)",
"def csv_to_json(abs_file_path, json_file_path):\n data = []\n\n dir_path = os.path.dirname(abs_file_path)\n os.chmod(dir_path, 0o777)\n csv_file_path = abs_file_path\n if os.path.isfile(csv_file_path):\n with open(csv_file_path, encoding='utf-8-sig') as csv_file:\n csv_reader = csv.DictReader(filter(lambda row: row[0] !='#', csv_file),delimiter=';')\n for rows in csv_reader:\n rows['_id'] = rows.get('timestamp')\n data.append(rows)\n with open(json_file_path, 'w') as json_file:\n json_file.write(json.dumps(data, indent=4))\n return 0\n return 1",
"def initial_csv_wrangling(csv_file):\n df = pd.read_csv(csv_file)\n df = df.fillna('')\n columns = list(df.columns)\n\n # check that \"url\" column exists (required)\n if 'url' not in columns:\n raise Exception('Input csv file requires a \"url\" column, which does not seem to exist. Exiting.')\n\n # check if \"pos_concepts\" column exists and parse accordingly (not required)\n if 'pos_concepts' in columns:\n print('Found \"pos_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['pos_concepts'] = df['pos_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"neg_concepts\" column exists and parse accordingly (not required)\n if \"neg_concepts\" in columns:\n print('Found \"neg_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['neg_concepts'] = df['neg_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"metadata\" column exists and load accordingly (not required)\n if \"metadata\" in columns:\n print('Found \"metadata\" column. Attempting to ingest.')\n try:\n df['metadata'] = df['metadata'].replace('','{}').map(json.loads)\n except:\n raise Exception('Value in \"metadata\" column does not seem to be a properly JSON formatted str.')\n\n return df",
"def create_csv_with_blank_rows(filepath):\n with open(filepath, 'w') as open_file:\n open_file.write(\n '\"foo\", \"bar\", \"baz\"\\n'\n ',,\\n'\n '1, 2, 3\\n'\n ',,\\n'\n '2, \"b\", \"c\"\\n')",
"def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")",
"def clean_csv(drive, filename):\n filename_json = filename.split('.')[0] + \".json\"\n df = pd.read_csv(filename, skiprows=10, sep='delimiter', engine='python')\n # steps to clean the csv based on sample file\n df.columns = df.columns.str.replace('\"', '')\n df.columns = df.columns.str.replace(\" \", \"_\")\n df.columns = df.columns.str.replace(\"-\", \"_\")\n columns = ''.join(df.columns)\n columns = columns.split(',')\n columns.append(\"extra_column\")\n df = pd.read_csv(filename, skiprows=11, sep=',', names=columns)\n del df['extra_column']\n df.to_json(filename_json, orient=\"records\", lines=True)\n # remove file to reduce storage usage\n os.remove(drive + filename)\n return filename_json",
"def convert_csv_to_json():\n result = {}\n try:\n with open(FILE_NAME, 'r', newline='') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n for entry_id in range(len(row)):\n row[entry_id] = row[entry_id].replace(\"'\", \"\\\"\")\n result[row[0]] = [json.loads(row[1])] + [row[2]]\n except FileNotFoundError:\n print('Could not find csv')\n return False\n with open('log.json', 'w+') as output_file:\n json.dump(result, output_file)\n return True",
"def prepare_temp_source_csv_file(self, csv_file_path):\n updated_rows = []\n with open(temp_file_dir, \"w\") as ftempout:\n column_processed = False\n for line in open(csv_file_path):\n if not column_processed:\n updated_column_name = self.get_updated_columns(line)\n ftempout.write(updated_column_name)\n column_processed = True\n else:\n ftempout.write(line)\n ftempout.write(\"\\n\")\n # below code will run if source file contains amount in integer & decimal columns\n if updated_column_name.find('amount_integer') != -1:\n with open(temp_file_dir, \"r\") as ftempread:\n reader = csv.DictReader(ftempread) # read rows into a dictionary format\n for row in reader: # read a row as {column1: value1, column2: value2,...}\n row['amount'] = int(row['amount_integer']) + int(row['amount_decimal'])/100\n row.pop('amount_integer', None)\n row.pop('amount_decimal', None)\n updated_rows.append(row)\n if updated_rows:\n keys = updated_rows[0].keys()\n with open(temp_file_dir, 'w') as ftempupdated:\n dict_writer = csv.DictWriter(ftempupdated, keys)\n dict_writer.writeheader()\n dict_writer.writerows(updated_rows)",
"def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return",
"def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)",
"def convert_to_csv():\n for c_file in CSV_FILES:\n # Selects all rows where type belongs to a specific file (e.g. - node --> nodes.csv)\n file_data = [row for row in DATA if ('type' in row) and (row['type'] == c_file[:-5])]\n for row in file_data:\n DATA.remove(row) # Remove selected rows from DATA. We no longer need to iterate through them\n\n # We only need a 'type' column to differentiate between nodes_tags and ways_tags\n # Remove 'type' from the current rows if they don't belong to those files\n if 'tag' not in c_file:\n for row in file_data:\n row.pop('type')\n\n with io.open(c_file, 'wb') as csv_file:\n csv_writer = unicodecsv.writer(csv_file, sql_schema.schema_keys[c_file[:-4]])\n\n # Put the keys in the header of the csv.\n header = []\n for key in sql_schema.schema_keys[c_file[:-4]]: # Specifies order of keys\n if key in file_data[0].keys(): # Only get keys that are actually in the data\n header.append(key.decode('utf-8')) # Converts to unicode\n csv_writer.writerow(header)\n\n # Converts all data into unicode and writes to the csv\n for i, row in enumerate(file_data):\n uni_row = [] # Holds the row with unicode values\n for v in sql_schema.schema_keys[c_file[:-4]]:\n if v in row:\n if isinstance(row[v], unicode): # For unicode, add it as is\n uni_row.append(row[v])\n elif isinstance(row[v], int): # For integers, convert it to a string then to unicode, then add\n uni_row.append(str(row[v]).decode('utf-8'))\n else: # If it's a string, convert it to unicode, then add\n uni_row.append(row[v].decode('utf-8'))\n csv_writer.writerow(uni_row) # Write the modified row to the csv\n\n print 'Loaded '+c_file",
"def csv_to_json(csv_file_path: str, json_file_path: str):\n fieldnames = ('last_name', 'first_name', 'second_name')\n\n # read csv file\n try:\n with open(Path(csv_file_path)) as csv_file:\n csv_reader = csv.DictReader(csv_file, fieldnames)\n csv_data = {num: row for num, row in enumerate(csv_reader, start=1)}\n except FileNotFoundError as err:\n raise CustomException() from err\n\n # generate json\n try:\n with open(Path(json_file_path), 'w') as json_file:\n json.dump(csv_data, json_file, indent=2)\n except OSError as err:\n raise CustomException() from err",
"def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a log file. | def delete_log(file_path):
if os.path.exists(file_path):
print('Deleting log %s...' % file_path)
os.remove(file_path)
else:
raise ValueError("File %r doesn't exists - cannot delete." % file_path) | [
"def remove_log():\n os.remove(_log_filename)",
"def delete_log():\n log_path = Path.cwd() / \"premise.log\"\n if log_path.exists():\n log_path.unlink()",
"def delete_file(fileName):\n os.remove(fileName)\n print (\"Deleteing file: \" + str(fileName))\n write_log()\n read_log()",
"def delete_file(url):\n with open(DAG_PATH + 'log.txt', 'a') as f:\n f.write(f\"{datetime.now()} deleting {url}...\\n\")\n filepath = os.getcwd() + \"/../data/\" + url.split(\"/\")[-1]\n os.remove(filepath)",
"def deleteGmlLoaderLogFile(logFile, command, logger):\n \n if os.path.isfile(logFile) == True:\n reader = open(logFile)\n \n for line in reader:\n if re.search(\"TransactionHandler - Rollback transaction\", line) != None:\n logger.error(\"TransactionHandler - Rollback transaction for \" + command)\n \n reader.close()\n message = \"Delete \" + logFile + \" \" + str(time.strftime(\"%d.%m.%Y %H:%M:%S\", time.gmtime(os.path.getmtime(logFile)))) + \" \" + str(os.path.getsize(logFile)) + \" bytes\"\n logger.info(message)\n os.remove(logFile)",
"def delete_file(file_path):\n pass",
"def delete_record_file(self, record_file, logStat):\n result = self.storage_delete_file(record_file.group, record_file.storage)\n if result:\n logStat(deleted=True, file_obj=record_file)\n record_file.delete()\n return result",
"def delete_file(self):\n target_file = choose_random_file(self.__save.get_root())\n file_log = str(target_file)\n parent_dir = target_file.get_parent()\n parent_dir.remove_entry(target_file)\n target_file.set_parent(self.__save.get_trash())\n self.__save.get_trash().add_entry(target_file)\n virus_id = randint(self.__save.get_virus_files()[0] + 1, self.__save.get_virus_files()[1])\n self.__save.log_deletion(virus_id, file_log)",
"def delete_file(path):\n\n os.remove(path)",
"def delete_file(filename):\n\tprint client.file_delete(filename)",
"def delete_logs(self):\n if self.etw_log is not None:\n files = sorted(glob.glob(self.etw_log + '*'))\n for path in files:\n try:\n os.remove(path)\n except Exception:\n pass",
"def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def _clear_log(log_path):\n\n\twith logging._lock:\n\t\twith open(log_path, 'w'):\n\t\t\tpass",
"def delete(self, *, log):\n LogSchema().delete(log)",
"def delete_file(self):\n # DELETE /files/{user_id}/{path}\n pass",
"def delete(self, *args, **kwargs):\n self.file.delete(save=False)",
"def delete_file(path) -> None:\n path.unlink()\n print(path.absolute(), 'DELETED')",
"def remove_log_files(self):\n log_files = self.get_log_files()\n for file in log_files:\n os.remove(file)",
"def __delete_tmp_file(self, filename: str) -> None:\n os.remove(filename)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
retorna o valor de graus Farenheit convertido para Celsius | def toCelsius(farenheit):
return (farenheit - 32)*5 / 9 | [
"def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##",
"def fahr_to_celsius(temp_fahrenheit):\n\n converted_temp=(temp_fahrenheit-32)/1.8\n return converted_temp",
"def fahr_to_celsius(temp):\n tempInCel = (temp - 32) * 5/9\n return tempInCel",
"def fahrenheit_to_celsius(tempf):\n return (tempf-32)*5/9",
"def fahrenheit_to_celsius():\n fahrenheit = ent_temperature.get()\n celsius = (5 / 9) * (float(fahrenheit) - 32)\n lbl_result[\"text\"] = f\"{round(celsius, 2)} \\N{DEGREE CELSIUS}\"",
"def fahrenheit_to_celsius(temp_f):\n temp_c = (temp_f - 32.0) * (5.0/9.0)\n return temp_c",
"def Celsius2Fahrenheit(self,tempC):\n return 1.8*tempC + 32.0",
"def celsius(fahrenheit):\n return 5 / 9 * (fahrenheit - 32)",
"def celsius_to_fahrenheit(tempc):\n return (tempc*(9/5))+32",
"def fahrenheit_to_celsius():\n fahrenheit = entry_temp.get()\n # If entry field is empty, convert temp\n if fahrenheit != \"\":\n celsius = (5 / 9) * (float(fahrenheit) - 32)\n # Rounds celsius to 2 decimal places and converts\n # to text\n label_result[\"text\"] = f\"{round(celsius,2)} \\N{DEGREE CELSIUS}\"\n # Do nothing if entry field is empty\n else:\n pass",
"def convert_to_celsius(fahrenheit):\n return (fahrenheit - 32) * 5 / 9",
"def fahrenheit_to_kelvin():\n\n fahrenheit = entry_temp3.get()\n\n if fahrenheit != \"\":\n celsius = (5 / 9) * (float(fahrenheit) - 32) + 273.15\n\n label_result3[\"text\"] = f\"{round(celsius, 2)} \\N{DEGREE SIGN}K\"\n else:\n pass",
"def convertCelsiusToFahrenheit(celsius):\n fahrenheit = (celsius * 1.8) + 32\n return fahrenheit",
"def kelvin_to_celsius(temp_kelvin):\r\n return temp_kelvin - 275.15",
"def celsius_to_fahrenheit():\n celsius = entry_temp2.get()\n # If entry field is empty, convert temp\n if celsius != \"\":\n fahrenheit = (float(celsius) * (9/5) + 32)\n # Rounds fahrenheit to 2 decimal places and\n # converts to text\n label_result2[\"text\"] = f\"{round(fahrenheit,2)} \\N{DEGREE FAHRENHEIT}\"\n # Do nothing if entry field is empty\n else:\n pass",
"def convert_to_celsius(fahrenheit):\n\n return int(round((fahrenheit - 32) * 5 / 9))",
"def kelvin_to_celsius(temp):\n return temp - 273.15",
"def kelvin_to_farenheit(temp_kelvin):\r\n temp_celsius = kelvin_to_celsius(temp_kelvin)\r\n return temp_celsius * 9 / 5 + 32",
"def fahrenheitToCelcius(fahrenheit:float, ndigits = 2)->float:\n return round((float(fahrenheit) - 32) * 5 / 9, ndigits)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return github API URL as string | def get_api_url(self):
url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \
self.repo, self.product)
return url | [
"def get_GitHubAPIURL(self) -> str:\r\n return self.githubAPIURL",
"def github_url(self):\n return self.github.replace('.git', '')",
"def github_url(self) -> str:\n remote_url = self._repo.remotes[REMOTE].url\n # if we have an ssh remote, convert it to a URL\n for ssh_login in URL_REPLACEMENTS:\n if remote_url.startswith(ssh_login):\n logging.debug(f\"Remote URL starts with ssh_login: {remote_url}\")\n remote_url = remote_url.lstrip(ssh_login)\n remote_url = URL_REPLACEMENTS[ssh_login] + remote_url\n remote_url = remote_url.rstrip(\".git\")\n return remote_url",
"def api_repo_url(org_name):\n return 'https://api.github.com/orgs/{}/repos'.format(org_name)",
"def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"",
"def repo_link(repo):\n return \"https://github.com/\" + repo",
"def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))",
"def repo_url(self) -> str:\n return pulumi.get(self, \"repo_url\")",
"def get_gurl():\n accept_header = (\"Accept\", \"application/vnd.github.v3+json\")\n user_agent_header = constants.USER_AGENT\n gurl = Gurl(headers=(accept_header, user_agent_header))\n return gurl",
"def build_url(self, project_name):\n url = (\"%s/DefaultCollection/%s/_apis/git/repositories?api-version=%s\" % (self.instance, project_name, self.api_version))\n return url",
"def format_url(self, data):\n git_url = urlparse(data[\"git_url\"])\n\n url = \"oauth2:{0}@{1}\".format(data[\"token\"], git_url.netloc)\n return git_url._replace(netloc=url).geturl()",
"def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url",
"def test_build_url(self):\n s = self.build_session()\n url = s.build_url(\"gists\", \"123456\", \"history\")\n assert url == \"https://api.github.com/gists/123456/history\"",
"def get_api_url() -> str:\n\n site = pywikibot.Site()\n url = site.protocol() + \"://\" + site.hostname() + site.apipath()\n return url",
"def get_github_url(repo, path=''):\n github_branches = {\n 'IATI-Schemas': 'version-2.03',\n 'IATI-Codelists': 'version-2.03',\n 'IATI-Rulesets': 'version-2.03',\n 'IATI-Extra-Documentation': 'version-2.03',\n 'IATI-Codelists-NonEmbedded': 'master',\n }\n return 'https://github.com/IATI/{0}/blob/{1}/{2}'.format(repo, github_branches[repo], path)",
"def getProjectURL():",
"def get(self):\n return {'url': self._generate_github_auth_uri(g.user)}",
"def get_api_url() -> str:\n\n\tsite = pywikibot.Site()\n\turl = site.protocol() + \"://\" + site.hostname() + site.apipath()\n\treturn url",
"def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a specific tag's data from Github API. | def get_tag(self, sha):
return self.get_url_data(self.api_url + 'tags/' + sha) | [
"def remote_tag(tag):\n url = \"%s/git/refs/tags\" % get_github_api_url()\n for result in requests.get(url).json():\n try:\n if result[\"ref\"] == \"refs/tags/%s\" % tag:\n return result\n except TypeError:\n return",
"def get_tag(self, tag):\n resp = self.get(_u.build_uri(\"tag\", domain=self.domain),\n data={'tag': tag})\n return utils.handle_response(resp)",
"def get(self, tag):\n return self._list(url='/tags/%s' % tag, response_key='data')",
"def get(self, endpoint, params=None):\n res = requests.get(\"https://api.github.com/\" + endpoint,\n auth=requests.auth.HTTPBasicAuth(self.credentials['username'], self.credentials['token']),\n params=params)\n return res.json()",
"def info(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/{0}?access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n print(request.headers)\n return request.json()",
"def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]",
"def remote_tags():\n url = \"%s/git/refs/tags\" % get_github_api_url()\n for result in requests.get(url).json():\n ref = result[\"ref\"]\n version = ref.split(\"/\")[-1]\n if version is not None:\n yield version",
"def _get_tag_target(repo, sha1):\n obj = repo.get(sha1)\n if obj.type == pygit2.GIT_OBJ_TAG:\n # Get the tag's target object\n obj = repo.get(obj.target)\n return obj",
"def get_response(self):\n link_param = self.get_params()\n link = f'https://api.github.com/' + link_param\n response = requests.get(link, data={'Accept': 'application/vnd.github.v3+json'}).json()\n return response",
"def search_tag(tag, page):\n search_url = \"https://stockx.com/api/browse?_tags=%s&page=%d\" % (tag, page)\n try:\n response_json = requests.get(search_url, headers=HEADERS).json()\n except (JSON_ERRORS, requests.exceptions.RequestException):\n return None\n return response_json",
"def get_release(client, logger, args):\n check_repo(client, logger, args.repo)\n\n logger.info(\"Getting release for tag {}\".format(args.tag))\n success, release = client.get_release(args.repo, args.tag)\n if not success:\n logger.error(\"Release {} {} not found.\".format(args.repo, args.tag))\n sys.exit(1)\n print(\n json.dumps(release, indent=2, separators=(\",\", \": \"), sort_keys=True)\n )",
"def call_github_api(url, token, method=\"get\", **kargs):\n\n headers = None if token is None else {\"Authorization\": \"token %s\" % token}\n r = getattr(requests, method)(url, headers=headers, **kargs)\n if r.status_code not in (200, 201):\n logging.error(\"Error response: {}\".format(r.content))\n r.raise_for_status()\n return r.json()",
"async def fetch_tag(self, user_id, tag):\n\n c = self.conn.cursor()\n c.execute('''select content from tags where user_id=? and tag=?''',\n (user_id, tag, ))\n\n content = c.fetchall()\n if not content:\n return None\n\n return content[0][0]",
"def __get_efo_github_data(self, efo_release: str) -> Dict[str, str]:\n if efo_release == \"latest\":\n url = self.EFO_RELEASE_API_TEMPLATE.format(efo_release)\n else:\n url = self.EFO_RELEASE_API_TEMPLATE.format(f\"tags/{efo_release}\")\n response = requests.get(url)\n response.raise_for_status() # In case of HTTP errors, this will be caught by the @retry decorator.\n return response.json()",
"def _fetch_latest_version(cls) -> str:\n response = requests.get(\n \"https://api.github.com/repos/datahub-project/datahub/releases/latest\"\n )\n response.raise_for_status()\n return json.loads(response.text)[\"tag_name\"]",
"def github_get_pull_request(n):\n url = 'http://github.com/api/v2/json/pulls/sympy/sympy/%d'\n data = json.load(urlopen(url % n))\n return data[\"pull\"]",
"def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))",
"def posts_get(self, tag=\"\", dt=\"\", url=\"\", hashes=[], meta=True, **kwds):\n return self.request(\"posts/get\", tag=tag, dt=dt, url=url,\n hashes=hashes, meta=meta, **kwds)",
"def get_tags(docker_hub_client, orgname, args, per_page=PER_PAGE):\n resp = docker_hub_client.get_tags(\n orgname, args.reponame, args.page, per_page=per_page\n )\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readable_memory_format(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n total_pages = int(((resp['content']['count'] - 1)/per_page) + 1)\n return total_pages\n print('This repo has no tags')\n return None\n\n code = resp['code']\n print(f'Error {code} fetching tags for: {orgname}/{args.reponame}')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Github API can only return all tags, but we only want the latest. | def get_latest_tags(self):
start = len(self.tags) - self.num_comparisons
tags = self.tags
latest = []
for i in xrange(len(tags)):
if i >= start:
parts = tags[i]['ref'].split('/')
release_num = parts[2]
sha = tags[i]['object']['sha']
tag = [release_num, sha]
latest.append(tag)
return latest | [
"def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]",
"def remote_tags():\n url = \"%s/git/refs/tags\" % get_github_api_url()\n for result in requests.get(url).json():\n ref = result[\"ref\"]\n version = ref.split(\"/\")[-1]\n if version is not None:\n yield version",
"def get_latest_tag(self) -> str:\n # don't check_returncode as it will exit non-zero if no tags\n latest_tagged_commit = self.rev_list(\n '--tags', '--max-count=1', check_returncode=False)\n if len(latest_tagged_commit) != 1:\n # should return one result, otherwise there are no tags\n return False\n return self.describe('--tags', latest_tagged_commit[0])[0]",
"def do_latest_tag(args, image_name_tag, image_name):\n if args.latest is True:\n if tag(image_name_tag, image_name+':latest'):\n push(args, image_name+':latest')",
"def get_last_tag(github_config, owner, repo):\n tags_url = \"/\".join([github_config.api_url, \"repos\", owner, repo, \"tags\"])\n tags_response = requests.get(tags_url, headers=github_config.headers)\n tags_response.raise_for_status()\n tags_json = tags_response.json()\n return tags_json[0][\"name\"]",
"def get_tags(docker_hub_client, orgname, args, per_page=PER_PAGE):\n resp = docker_hub_client.get_tags(\n orgname, args.reponame, args.page, per_page=per_page\n )\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readable_memory_format(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n total_pages = int(((resp['content']['count'] - 1)/per_page) + 1)\n return total_pages\n print('This repo has no tags')\n return None\n\n code = resp['code']\n print(f'Error {code} fetching tags for: {orgname}/{args.reponame}')",
"def get_latest_tag(self, repo):\n latest_tag = None\n tags_name = [self._clean_tag_name(tag.name) for tag in repo.tags]\n sem_versions = [\n semantic_version.Version(tag_name) for tag_name in tags_name\n ]\n if sem_versions:\n latest_tag = max(sem_versions)\n self._logger.debug('Found latest tag %s', latest_tag)\n return latest_tag",
"def _fetch_latest_version(cls) -> str:\n response = requests.get(\n \"https://api.github.com/repos/datahub-project/datahub/releases/latest\"\n )\n response.raise_for_status()\n return json.loads(response.text)[\"tag_name\"]",
"def get_latest_tags(project):\n project.refresh_from_db()\n return [tag.name for tag in project.tags.all()]",
"def latest_tag(self):\n return self._log_template('{latesttag}')",
"def latest_repo_release(url: str) -> str:\n\n resp = requests.get(url)\n try:\n repo_information = resp.json()\n except json.JSONDecodeError:\n raise PluginException(f\"Could not decode API response {resp.text}\")\n\n try:\n tag = re.findall(SEMVER_REGEX, repo_information[\"tag_name\"])[0]\n except (KeyError, IndexError):\n raise PluginException(\n f\"Could not extract repo tag from response {repo_information}\"\n )\n\n return tag",
"def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))",
"def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)",
"def get_last_tag_by_date(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n output = output.splitlines()\n if len(output) == 0:\n return ''\n return output[-1]",
"def pull_latest(image):\n latest_image = image['RepoTags'][0].split(':')[0] + ':latest'\n log.debug(f'Pulling image: {latest_image}')\n cli.api_client.pull(latest_image, auth_config=check_credentials())\n return cli.api_client.inspect_image(latest_image)",
"def remote_tag(tag):\n url = \"%s/git/refs/tags\" % get_github_api_url()\n for result in requests.get(url).json():\n try:\n if result[\"ref\"] == \"refs/tags/%s\" % tag:\n return result\n except TypeError:\n return",
"def major_tags(self, owner, repo):\n cursor = \"null\"\n tags_list = []\n url = \"https://api.github.com/graphql\"\n\n while True:\n query = {\"query\" :\n \"\"\"\n query {\n repository(owner: \"%s\", name: \"%s\") {\n tags: refs(refPrefix: \"refs/tags/\", first: 100, after: \"%s\") {\n edges {\n cursor\n tag: node {\n name\n target {\n ... on Tag {\n tagger {\n date\n }\n }\n }\n }\n }\n }\n }\n }\n \"\"\" % (owner, repo, cursor)\n }\n r = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', self.GITHUB_API_KEY), json=query)\n raw = r.text\n data = json.loads(json.loads(json.dumps(raw)))\n tags = data['data']['repository']['tags']['edges']\n for i in tags:\n try:\n tags_list.append({'date' : i['tag']['target']['tagger']['date'], 'release' : i['tag']['name']})\n except KeyError:\n pass\n if data['data']['repository']['tags']['edges'] == []:\n break\n else:\n cursor = data['data']['repository']['tags']['edges'][-1]['cursor']\n\n major_versions = []\n pattern = re.compile(\"[0-9]+\\.[0]+\\.[0]+$\")\n for i in tags_list:\n try:\n if re.search(pattern, i[\"release\"]) != None:\n major_versions.append(i)\n except AttributeError:\n pass\n\n return pd.DataFrame(major_versions)",
"def test_find_latest_remote_tag(self):\n\n # Create a tag, check that it is found on the right commit\n first_commit = self.create_commit(self.dependency_repo, \"1.txt\", \"Added 1.txt\")\n self.create_commit(self.dependency_repo, \"2.txt\", \"Added 2.txt\")\n v2_release_commit = self.tag_dependency(\"v2.0\")\n self.create_commit(self.dependency_repo, \"3.txt\", \"Added 3.txt\")\n tag_found = find_latest_remote_tag(self.submodule, \"main\", \"v*\")\n self.assertEqual(v2_release_commit.hexsha, tag_found.commit.hexsha)\n\n # Create a tag on an older commit, check that the most recent tag\n # (in branch sequential order) is found, not the most recent one\n # in chronological order\n self.dependency_repo.create_tag(\n \"v1.0\", first_commit.hexsha, message=f\"Release v1.0\"\n )\n tag_found = find_latest_remote_tag(self.submodule, \"main\", \"v*\")\n self.assertEqual(v2_release_commit.hexsha, tag_found.commit.hexsha)\n\n # Check that the wildcard is respected, by looking specifically for v1* tags\n tag_found = find_latest_remote_tag(self.submodule, \"main\", \"v1*\")\n self.assertEqual(first_commit.hexsha, tag_found.commit.hexsha)\n\n # Create a newer tag on another branch, check that it is not found\n self.dependency_repo.create_head(\n \"release/v2.0\", commit=v2_release_commit.hexsha\n )\n self.dependency_repo.git.checkout(\"release/v2.0\")\n self.create_commit(self.dependency_repo, \"2_1.txt\", \"Added 2_1.txt\")\n v2_1_release_commit = self.tag_dependency(\"v2.1\")\n\n tag_found = find_latest_remote_tag(self.submodule, \"main\", \"v*\")\n self.assertEqual(v2_release_commit.hexsha, tag_found.commit.hexsha)\n\n # But the newest tag should be found if we specify the release branch\n tag_found = find_latest_remote_tag(self.submodule, \"release/v2.0\", \"v*\")\n self.assertEqual(v2_1_release_commit.hexsha, tag_found.commit.hexsha)",
"def get_repo_tags(self, repo):\n url = self.BASE_URL + '/repositories/' + self.owner + '/' + repo + '/refs/tags'\n query_string = {\"pagelen\": 100}\n r = requests.get(url, auth=(self.username, self.password), params=query_string)\n result = r.json()\n\n if r.status_code == 200:\n result = r.json()\n return result['values']\n\n return result['error']['message']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return github tag release URL as string | def get_url_tag_release(self, release_num):
url = 'https://{}/{}/{}/releases/tag/{}'.format(
HOST_GITHUB,
self.repo,
self.product,
release_num
)
return url | [
"def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url",
"def latest_repo_release(url: str) -> str:\n\n resp = requests.get(url)\n try:\n repo_information = resp.json()\n except json.JSONDecodeError:\n raise PluginException(f\"Could not decode API response {resp.text}\")\n\n try:\n tag = re.findall(SEMVER_REGEX, repo_information[\"tag_name\"])[0]\n except (KeyError, IndexError):\n raise PluginException(\n f\"Could not extract repo tag from response {repo_information}\"\n )\n\n return tag",
"def github_url(self):\n return self.github.replace('.git', '')",
"def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]",
"def repo_link(repo):\n return \"https://github.com/\" + repo",
"def get_github_url(repo, path=''):\n github_branches = {\n 'IATI-Schemas': 'version-2.03',\n 'IATI-Codelists': 'version-2.03',\n 'IATI-Rulesets': 'version-2.03',\n 'IATI-Extra-Documentation': 'version-2.03',\n 'IATI-Codelists-NonEmbedded': 'master',\n }\n return 'https://github.com/IATI/{0}/blob/{1}/{2}'.format(repo, github_branches[repo], path)",
"def ticket_url_or_tag(tag: str) -> str:\n url = _url_if_url(get_url_from_tag, tag)\n return _value_with_url(tag, url) if url else tag",
"def infer_release_repo_from_env(repository):\n base = os.environ.get('BLOOM_RELEASE_REPO_BASE', None)\n if base is None:\n return None\n url = base + repository + '-release.git'\n try:\n urlopen(Request(url))\n except URLError:\n return None\n except HTTPError:\n return None\n return url",
"def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"",
"def get_GitHubAPIURL(self) -> str:\r\n return self.githubAPIURL",
"def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag",
"def get_api_url(self):\n\n url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \\\n self.repo, self.product)\n return url",
"def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))",
"def repo_url(self) -> str:\n return pulumi.get(self, \"repo_url\")",
"def latest_stable_release_url(self):\n\n return self.release_url(self.latest_stable_release_version())",
"def pypi_url_to_github_url(pypi_package_url):\n project_response = requests.get(pypi_package_url)\n soup = BeautifulSoup(project_response.content, \"html5lib\", from_encoding=\"UTF8\")\n for element in soup.select(\"div.sidebar-section a i.fa-github\"):\n github_url = element.parent.get(\"href\")\n if is_github_project_url(github_url):\n return github_url\n return None",
"def get_github_library_version(name, url):\n while True:\n # For the release, make sure the default versions do not include \"-dev\"\n version = raw_input(\"Version of %s?: \" % name)\n if not url_exists(\"%s/releases/tag/%s\" % (url, version)):\n print_warning(\"The version of %s is not valid. Ensure you've chosen a correct value by checking the \"\n \"GitHub releases for exact naming at \"\n \"%s/releases before you continue.\" % (name, url))\n return version",
"def repo_tag(self):\n return '%s/gcloud/%s' % (constants.APPENGINE_REGISTRY, self._tag)",
"def _infer_tarball_url():\n try:\n with click.open_file('app.json', 'r') as f:\n contents = f.read()\n\n app_json = json.loads(contents)\n except IOError:\n return None\n\n repository = app_json.get('repository')\n\n if not repository:\n return None\n else:\n return app_json.get('repository') + '/tarball/master/'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return github tag commit SHA URL as string | def get_url_tag_commit(self, git_sha):
url = 'https://{}/{}/{}/commit/{}'.format(
HOST_GITHUB,
self.repo,
self.product,
git_sha
)
return url | [
"def github_url(self):\n return self.github.replace('.git', '')",
"def get_github_hash(owner: str, repo: str, path: str) -> str:\n url = FILE_API_URL.format(owner=owner, repo=repo, path=path.lstrip('/'))\n res = requests.get(url)\n res_json = res.json()\n most_recent_commit = res_json[0]\n return most_recent_commit['sha']",
"def _get_github_sha(github_install_url: str):\n repository = Path(github_install_url).stem.split('#egg', 1)[0]\n organisation = Path(github_install_url).parent.stem\n with urllib.request.urlopen(f'https://api.github.com/repos/{organisation}/{repository}/commits/master') as response:\n return json.loads(response.read())['sha']",
"def repo_link(repo):\n return \"https://github.com/\" + repo",
"def get_github_url(repo, path=''):\n github_branches = {\n 'IATI-Schemas': 'version-2.03',\n 'IATI-Codelists': 'version-2.03',\n 'IATI-Rulesets': 'version-2.03',\n 'IATI-Extra-Documentation': 'version-2.03',\n 'IATI-Codelists-NonEmbedded': 'master',\n }\n return 'https://github.com/IATI/{0}/blob/{1}/{2}'.format(repo, github_branches[repo], path)",
"def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url",
"def get_commit_for_tag(github_config, owner, repo, tag):\n tag_url = \"/\".join(\n [\n github_config.api_url,\n \"repos\",\n owner,\n repo,\n \"git\",\n \"refs\",\n \"tags\",\n tag,\n ]\n )\n tag_json = {}\n\n while \"object\" not in tag_json or tag_json[\"object\"][\"type\"] != \"commit\":\n tag_response = requests.get(tag_url, headers=github_config.headers)\n tag_json = tag_response.json()\n\n if tag_response.status_code != 200:\n raise GitHubError(\n \"Unable to get tag {}. {}\".format(tag, tag_json[\"message\"])\n )\n\n # If we're given a tag object we have to look up the commit\n if tag_json[\"object\"][\"type\"] == \"tag\":\n tag_url = tag_json[\"object\"][\"url\"]\n\n return tag_json[\"object\"][\"sha\"]",
"def github_svn_rev2hash(tag: str, rev): # pragma: no cover\n uri = f'https://github.com/wikimedia/{tag}/!svn/vcc/default'\n request = fetch(uri, method='PROPFIND',\n data=\"<?xml version='1.0' encoding='utf-8'?>\"\n '<propfind xmlns=\\\"DAV:\\\"><allprop/></propfind>',\n headers={'label': str(rev),\n 'user-agent': 'SVN/1.7.5 {pwb}'})\n dom = xml.dom.minidom.parse(BytesIO(request.content))\n hsh = dom.getElementsByTagName('C:git-commit')[0].firstChild.nodeValue\n date = dom.getElementsByTagName('S:date')[0].firstChild.nodeValue\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n return hsh, date",
"def _get_ref_sha(repo, ref):\n \n if _sha_pat.match(ref):\n return ref\n out = check_output(['git', 'ls-remote', repo, ref])\n return out.decode('utf8').split()[0]",
"def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]",
"def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"",
"def cmd_get_sha(ref):\n return ['git', 'rev-parse', ref]",
"def get_url_tag_release(self, release_num):\n\n url = 'https://{}/{}/{}/releases/tag/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n release_num\n )\n return url",
"def _getGitShaString(dist=None):\r\n if dist==None:\r\n shaStr='n/a'\r\n else:\r\n import subprocess\r\n #see if we're in a git repo and fetch from there\r\n proc = subprocess.Popen('git rev-parse --short HEAD',\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE,\r\n cwd='.', shell=True)\r\n repo_commit, _ = proc.communicate()\r\n del proc#to get rid of the background process\r\n if repo_commit:\r\n shaStr=repo_commit.strip()#remove final linefeed\r\n else:\r\n shaStr='n/a'\r\n return \"__git_sha__='%s'\" %shaStr",
"def get_version(git_repo, commit):\n version = git_repo.rev_parse(commit, short=7)\n try:\n version = \"%s@%s\" % (git_repo.find_tag(commit), version)\n except GitRepositoryError:\n pass\n\n return version",
"def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))",
"def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag",
"def get_hash(repo, ref='HEAD'):\n return subprocess.check_output(['git', 'rev-parse', '--verify', ref],\n cwd=repo).rstrip()",
"def get_tag(self, sha):\n return self.get_url_data(self.api_url + 'tags/' + sha)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse CHANGELOG for latest tag. | def get_changelog(self, commit_sha):
url = 'https://{}/{}/{}/' + commit_sha + '/CHANGELOG'
url = url.format(HOST_GITHUB_RAW, self.repo, self.product)
req = requests.get(url)
lines = req.text
first = self.latest_tags[self.num_comparisons - 1][VERS]
last = self.latest_tags[self.num_comparisons - 2][VERS]
flag = False
log = ''
for line in lines.splitlines():
if first in line:
flag = True
if last in line:
flag = False
if flag:
log += line + '\n'
return log | [
"def parse_log():\n\thistory = {}\n\ttemp_commit = ''\n\tlogs = open('gda_git_logs_temp.txt', 'r')\n\n\tfor line in logs:\n\t\twords = line.split(' ')\n\t\tif words[0] == 'commit':\n\t\t\ttemp_commit = words[1].rstrip()\n\t\telif words[0] == 'Date:':\n\t\t\tyear = words[7]\n\t\t\tmonth = month_to_num(words[4])\n\t\t\tyymm = year + '-' + month\n\t\t\tif yymm in history:\n\t\t\t\ttemp = history[yymm]\n\t\t\t\ttemp.append(temp_commit)\n\t\t\t\thistory[yymm] = temp\n\t\t\telse:\n\t\t\t\thistory[yymm] = [temp_commit]\n\t\telse:\n\t\t\tpass\n\n\treturn history",
"def _parse_version(self, doc):\n return latest_version",
"def latest_tag(self):\n return self._log_template('{latesttag}')",
"def semver_from(changelog: Path) -> Version:\n with open(changelog) as f:\n matches = SEMVER_RE.finditer(f.read())\n versions: List[Version] = []\n is_unreleased = False\n for match in matches:\n version = match.groupdict()[\"version\"]\n if version.lower() == \"unreleased\":\n is_unreleased = True\n else:\n versions.append(Version.parse(version))\n\n versions.sort()\n latest = versions[-1]\n print(latest)\n return latest.bump_prerelease() if is_unreleased else latest",
"def parse(self, chalog):\n totalv = 0\n with open(chalog) as f:\n prevline = ''\n version = ''\n for line in f:\n lline = line.lower().strip()\n if len(lline) == 0:\n continue\n if lline.find('-*- coding: utf-8 -*-') != -1:\n continue\n if lline.startswith('changes with apache'):\n #start of a version\n lline = lline.replace('changes with apache', '')\n version = lline.strip()\n totalv += 1 \n print version\n elif lline.startswith('*)'):\n if len(prevline) > 0:\n #handle the previous one\n cha = {}\n cha['version'] = version\n cha['changes'] = prevline\n self.cmts.append(cha)\n prevline = line\n else:\n prevline += line\n else:\n prevline += ' ' + line\n #the last one\n cha = {}\n cha['version'] = version\n cha['changes'] = prevline\n self.cmts.append(cha) \n print len(self.cmts)\n print totalv",
"def parse_svn_log(xml):\n logs = []\n tree = Xml_parser.fromstring(xml)\n for log in tree:\n logs.append({\n 'revision': log.attrib['revision'],\n 'author': log[0].text,\n 'date': log[1].text,\n 'comment': log[2].text\n })\n return logs",
"def parse_changelog(filename):\n with open(filename, 'r') as changelog:\n for line in changelog.readlines():\n if re.match(r'^ .*<.*@.*> [A-Z][a-z][a-z], [0-9][0-9]', line):\n return re.split(r'^ .*<.*@.*>', line)[1].strip()",
"def parse_tag(self, tag):\n \n mytag = \"latest\"\n mydigest = None\n\n regex = \"([\\w\\d\\.\\-]+)@?([\\w\\d\\.\\-]*)$\"\n\n regex_matched = re.match(regex, tag)\n mytag = regex_matched.group(1)\n mydigest = regex_matched.group(2)\n \n if regex_matched is None:\n mytag = \"latest\"\n\n return (mytag, mydigest)",
"def parse(self, text):\n \n self.clear()\n lines = text.split(\"\\n\")\n self.logger.info(\"Parsing Git history\")\n \n for line in lines:\n if len(line) == 0:\n # Line is a spacer\n pass\n \n elif line[0] == ' ':\n # Line is part of a commit message\n pass\n \n else:\n # Line is part of a commit header\n spaceIdx = line.find(' ')\n if spaceIdx == -1:\n self.logger.warn(\"Skipping unrecognizable history line: \" + line)\n continue\n \n keyword = line[:spaceIdx]\n content = line[spaceIdx+1:]\n self.logger.debug(\"Found key-value pair: {0} {1}\".format(keyword, content))\n \n self._handleKeyValue(keyword, content)\n \n # Grab the last commit\n self._commits[self._currentCommit.hashKey] = self._currentCommit\n self._currentCommit = None\n \n # Finalize the commit tree\n self._resolveCommits()",
"def parse_changelog(changelog):\n parsed_changelog = {}\n first_line = changelog[:changelog.find('\\n')+1]\n\n parsed_changelog['comment'] = changelog[changelog.find('\\n')+1:]\n parsed_changelog['date'] = first_line[first_line.find('*')+1:first_line.find('20')+4]\n parsed_changelog['mark'] = re.findall(r'\\s+\\-\\s[\\s\\S]*', first_line)\n if parsed_changelog['mark'] == []:\n parsed_changelog['mark'] = re.findall(r'[\\d\\.\\s-]*', first_line)\n if parsed_changelog['mark'] == []:\n parsed_changelog['mark'] = ''\n else:\n parsed_changelog['mark'] = parsed_changelog['mark'][-2]\n else:\n parsed_changelog['mark'] = parsed_changelog['mark'][0]\n\n parsed_changelog['author'] = first_line[first_line.find(parsed_changelog['date'])+(len(parsed_changelog['date'])):first_line.rfind(parsed_changelog['mark'])]\n\n return parsed_changelog",
"def gettime(self, tag):\n cmd = ['git', 'log', '--pretty=format:\"%ct\"', \"-1\", tag]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n if data == b'':\n return [], []\n time_stamp = []\n this_tag = []\n for seconds in data.decode(\"utf-8\").split(\"\\n\"):\n month = round((int(seconds.strip('\"')) - ReleaseTime.base) / ReleaseTime.month_time)\n if month not in time_stamp:\n time_stamp.append(month)\n this_tag.append(tag[0:4])\n else:\n pass\n return time_stamp, this_tag",
"def parse_svn_log_xml(xml_string):\r\n l = []\r\n tree = ET.fromstring(xml_string)\r\n for entry in tree.findall('logentry'):\r\n d = {}\r\n d['revision'] = int(entry.get('revision'))\r\n # Some revisions don't have authors, most notably\r\n # the first revision in a repository.\r\n author = entry.find('author')\r\n d['author'] = author is not None and author.text or None\r\n d['date'] = svn_date_to_timestamp(entry.find('date').text)\r\n # Some revisions may have empty commit message\r\n message = entry.find('msg')\r\n message = message is not None and message.text is not None \\\r\n and message.text.strip() or \"\"\r\n # Replace DOS return '\\r\\n' and MacOS return '\\r' with unix return '\\n'\r\n d['message'] = message.replace('\\r\\n', '\\n').replace('\\n\\r', '\\n'). \\\r\n replace('\\r', '\\n')\r\n paths = d['changed_paths'] = []\r\n for path in entry.findall('.//path'):\r\n copyfrom_rev = path.get('copyfrom-rev')\r\n if copyfrom_rev:\r\n copyfrom_rev = int(copyfrom_rev)\r\n paths.append({\r\n 'path': path.text,\r\n 'action': path.get('action'),\r\n 'copyfrom_path': path.get('copyfrom-path'),\r\n 'copyfrom_revision': copyfrom_rev,\r\n })\r\n l.append(d)\r\n return l",
"def _parseHistoryUpdate(self, msg):\n logging.debug(\"Parsing a history update message\")\n\n try:\n self.source = msg.findtext(\"src\")\n self.days_since_birth = msg.findtext(\"dsb\")\n # Ignore the unit timestamp in preference for a\n # computer generated timestamp that can more\n # easily be used when updating data points at\n # sites like Cosm.\n #timestamp = msg.findtext(\"time\")\n if self.config.use_utc_timestamps:\n timestamp = datetime.datetime.utcnow()\n else:\n timestamp = datetime.datetime.now()\n\n history = msg.find(\"hist\")\n self.days_since_wiped = history.findtext(\"dsw\")\n sensor_type = int(history.findtext(\"type\"))\n\n # Add a new key for the sensor type if one does not yet exist.\n if sensor_type not in self.historicSensorData:\n self.historicSensorData[sensor_type] = {}\n\n # Add a new key for the timeout handler\n if sensor_type not in self.historicalDataUpdateCompleteForSensorType:\n self.historicalDataUpdateCompleteForSensorType[sensor_type] = None\n\n # Start a callback timer, for this particular sensor type, that will be\n # used to detect the completion of the historic data message cycle and\n # pass the collected historic data to the handleHistoryUpdate method.\n # If a message is received within the timeout window then delay the\n # timer and additional timeout period.\n #\n if self.historicalDataUpdateCompleteForSensorType[sensor_type] is None:\n self.historicalDataUpdateCompleteForSensorType[sensor_type] = reactor.callLater(self.historicDataMessageTimeout,\n self._historicalDataUpdateCompleted,\n sensor_type)\n else:\n # delay history data completed job another timeout period.\n self.historicalDataUpdateCompleteForSensorType[sensor_type].delay(self.historicDataMessageTimeout)\n\n sensor_units = history.findtext(\"units\")\n\n for data_element in history.findall(\"data\"):\n sensor_instance = int(data_element.findtext(\"sensor\"))\n\n if sensor_instance not in self.historicSensorData[sensor_type]:\n sensorHistoricalData = txcurrentcost.SensorHistoryData(sensor_type, sensor_instance, sensor_units)\n self.historicSensorData[sensor_type][sensor_instance] = sensorHistoricalData\n\n logging.debug(\"Processing historical data for sensor %s\" % sensor_instance)\n\n datapoints = []\n for historical_element in data_element:\n tag = historical_element.tag\n value = historical_element.text\n if tag == \"sensor\":\n # ignore the sensor element that has already been inspected.\n continue\n datapoints.append((tag, value))\n\n historicalSensorData = self.historicSensorData[sensor_type][sensor_instance]\n historicalSensorData.storeDataPoints(timestamp, datapoints)\n\n except Exception, ex:\n logging.exception(ex)\n logging.error(\"Problem processing history update message\")\n return",
"def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']",
"def get_changelogs(current_ver: str) -> Union[str, None]:\n from .. import asset_dir\n with open(os.path.join(asset_dir, \"changelog\"), \"r\") as f:\n in_block = False\n changelog = \"\"\n\n for line in f.readlines(): # iterate through changelog file\n line = line.strip(\"\\n\") # strip from newline characters and whitespace\n if line.startswith(\"#\"): # if current line indicates the start of a new version block:\n version_str = line[1:] # actual version number comes after a number sign\n if in_block:\n changelog += \"</ul>\" # close ul tag if we were in a block previously\n in_block = False\n if parse_version(version_str) > parse_version(current_ver): # Check if there are any change logs for our version\n in_block = True\n changelog += f\"<h3>{version_str}</h3>\" # add a headline stating the version number for this block\n changelog += \"<ul>\" # open ul tag (gets closed later)\n elif in_block:\n if len(line) > 2:\n changelog += f\"<li>{line[2:]}</li>\" # add a changelog bullet point; slice removes \"bullet point\" from raw config string\n if len(changelog) > 0:\n if not changelog.endswith(\"</ul>\"):\n changelog += \"</ul>\" # close last ul tag after we've iterated over all options in the changelog\n return changelog\n else:\n return None # return None if there are no changelogs",
"def process_git_tag(regex, inputtag):\n\ttry: \n\t\tgitre = re.compile(regex)\n\t\tmatch = gitre.search(inputtag)\n\t\tgroups = match.groupdict()\n\t\tversion = groups.get('version', '.unknown')\n\t\tdate = groups.get('date', '')\n\t\tgitmeta = groups.get('gitmeta', '')\n\t\tif date:\n\t\t\tversion = '.'.join([version, ''.join(date.split('-'))])\n\texcept (AttributeError, EnvironmentError, OSError):\n\t\tversion, gitmeta = '.unknown', ''\n\n\treturn version, gitmeta",
"def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info",
"def changes(self):\n changes = deque(next(self.git_log).strip(\"\\x00\").split(\"\\x00\"))\n while changes:\n status = changes.popleft()\n if status.startswith(\"R\"):\n # matched R status change\n status = \"R\"\n old = changes.popleft()\n new = changes.popleft()\n if (mo := self._ebuild_re.match(old)) and (mn := self._ebuild_re.match(new)):\n try:\n old_pkg = atom_cls(f\"={mo.group('category')}/{mo.group('package')}\")\n new_pkg = atom_cls(f\"={mn.group('category')}/{mn.group('package')}\")\n yield status, [old_pkg, new_pkg]\n except MalformedAtom:\n continue\n else:\n # matched ADM status change\n path = changes.popleft()\n if mo := self._ebuild_re.match(path):\n try:\n pkg = atom_cls(f\"={mo.group('category')}/{mo.group('package')}\")\n yield status, [pkg]\n except MalformedAtom:\n continue",
"def getVersionHistory(self, text):\n #if self.group == \"Core\":\n # import pdb; pdb.set_trace()\n extractor =r'.*\\+node\\S+?\\<\\< %s \\>\\>.*?\\#\\@\\+at(.*)\\#\\@\\-at.*\\-node.*?\\<\\< %s \\>\\>.*'\n for name in (\"version history\", \"change log\"):\n searcher = re.compile(extractor % (name, name), re.DOTALL+re.M)\n match = searcher.match(text)\n if match:\n version_text = match.groups()[0]\n self.versions = version_text.replace(\"#\", \"\")\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs release notes for Bugzilla service deployment ticket. | def get_release_notes(self):
notes = self.output.get_header('RELEASE NOTES')
notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \
self.repo, self.product) + '\n'
notes += self.output.get_sub_header('COMPARISONS')
notes += self.get_comparison(self.latest_tags[0][VERS],
self.latest_tags[1][VERS])
if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1):
notes += self.get_comparison(self.latest_tags[1][VERS],
self.latest_tags[2][VERS])
if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW:
notes += self.get_comparison(self.latest_tags[2][VERS],
self.latest_tags[3][VERS])
tag_data = self.get_tag(self.latest_tags[3][SHA])
notes += self.output.get_sub_header('TAGS')
notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\n'
notes += self.get_url_tag_commit(tag_data["object"]["sha"]) + '\n'
changelog = self.get_changelog(tag_data["object"]["sha"])
if changelog:
notes += self.output.get_sub_header('CHANGELOG')
notes += changelog
return notes | [
"def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes",
"def release_notes(self) -> str | None:\n return \"Release notes\"",
"def main():\n parser = argparse.ArgumentParser(description='Creates tickets for release certification')\n parser.add_argument('-u', '--username', help='jira username', default='admin')\n parser.add_argument('-p', '--password', help='jira password', default='admin')\n parser.add_argument('-c', '--config', help='path to config file', default='./options.ini')\n parser.add_argument('-j', '--jira', help='url of jira server', default='http://localhost:8080')\n\n args = parser.parse_args()\n\n jira_user = args.username\n jira_pass = args.password\n jira_server = args.jira\n config_file_path = args.config\n CONFIG.read(config_file_path)\n\n parent_ticket = config_map('JiraOptions')['parent_ticket']\n apprenda_version = config_map('VersionInfo')['to_version']\n jira_project = config_map('JiraOptions')['project']\n jira_issue_type = config_map('JiraOptions')['issue_type']\n jira = JIRA(jira_server, basic_auth=(jira_user, jira_pass))\n\n parent_issue = jira.issue(parent_ticket)\n ticket_list = []\n\n # create clean install tickets\n clean_strings = config_map('CleanInstallSection')\n for cloud in ['single', 'hybrid']:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(clean_strings['summary'], apprenda_version, cloud)\n ticket_to_add.format_description(clean_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create upgrade tickets\n from_versions = json.loads(config_map('VersionInfo')['from_versions'])\n upgrade_strings = config_map('UpgradeSection')\n\n # single cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"single\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # hybrid cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"hybrid\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create testing tickets for other tasks\n for section in CONFIG.sections():\n if 'Ticket' in section:\n strings = config_map(section)\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(strings['summary'], apprenda_version)\n ticket_to_add.format_description(strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n print 'Created {0} tickets, now sending them to Jira'.format(len(ticket_list))\n # send issues to jira and create tickets and links\n issues = jira.create_issues(field_list=ticket_list)\n\n for item in issues:\n jira.create_issue_link(\n type=\"Task of Story\",\n outwardIssue=item['issue'].key,\n inwardIssue=parent_issue.key,\n )\n\n print 'Finished linking issues, exiting.'",
"def generate_release_notes(project_id, endstr = ' <br>', **config):\n\n gl = gitlab.Gitlab(**config)\n project = gl.projects.get(project_id)\n\n if not project.mergerequests.list(state='merged'):\n raise ValueError(f\"There is not merged merge request for project {project_id} {project.name}\")\n\n if not project.releases.list():\n log = f\"Changelog of {project.name}:{endstr}\"\n last_date = '0000-01-01T00:00:00Z'\n else:\n last_release = project.releases.list()[0]\n log = f\"Changelog since release {last_release.name} of {project.name}:{endstr}\"\n last_date = last_release.released_at\n\n page = 1\n list_mrs = project.mergerequests.list(state='merged',\n order_by='updated_at',\n updated_after=last_date,\n page=page)\n if not list_mrs:\n log += f\"There is no merged merge request after {last_date}\"\n return log\n\n while list_mrs:\n for mr in list_mrs:\n line = f\" * {mr.title} (@{mr.author['username']}){endstr}\"\n log += line\n\n page += 1\n list_mrs = project.mergerequests.list(state='merged',\n order_by='updated_at',\n updated_after=last_date,\n page=page\n )\n\n return log",
"def show_release_notes(self, data):\n notes = json.load(open(RELEASE_NOTES))\n message = ''\n for version, notes in notes.items():\n message += '\\n\\n**' + version + '**\\n\\n' + notes\n self.create_message(\n message,\n roomId=data['roomId']\n )",
"def show_release_notes_for(self, data):\n target = re.search('show release notes for (.*)', self.message_text).group(1)\n raw_notes = json.load(open(RELEASE_NOTES))\n message = ''\n for version, notes in raw_notes.items():\n if version == target:\n message += '\\n\\n**' + version + '**\\n\\n' + notes\n break\n else:\n message = '\"' + str(target) + '\" is not a valid release. Please use one of:\\n\\n- ' + '\\n- '.join(raw_notes.keys())\n self.create_message(\n message,\n roomId=data['roomId']\n )",
"def GitHub_release_text(version):\n shortversion = get_sympy_short_version(version)\n htmltable = table(version)\n out = \"\"\"\\\nSee https://github.com/sympy/sympy/wiki/release-notes-for-{shortversion} for the release notes.\n\n{htmltable}\n\n**Note**: Do not download the **Source code (zip)** or the **Source code (tar.gz)**\nfiles below.\n\"\"\"\n out = out.format(shortversion=shortversion, htmltable=htmltable)\n print(blue(\"Here are the release notes to copy into the GitHub release \"\n \"Markdown form:\"))\n print()\n print(out)\n return out",
"def make_release_notes(src, dst) -> str:\n result = _subprocess(['git', 'log', '--pretty=format:\"%s\"', f\"origin/{src}...origin/{dst}\"])\n commits = \"\\n\".join([f\"- {i[1:-1]}\" for i in result.split(\"\\n\")])\n\n if args.release_notes:\n with open(args.release_notes, 'w') as f:\n f.write(commits)\n\n return commits",
"def prepare_changelog() -> None:\n global config\n print(f\"Generating changelog for release {config.new_version}\")\n print(\"\")\n\n # clone repository\n checkout_code()\n # generate changelog with new release list of commits\n generate_changelog()\n # commit\n check_run(\n [\"git\", \"commit\", \"-a\", \"-m\", f\"Update changelog for release {config.new_version}\"], cwd=config.source_dir\n )",
"def add_bug_reporting():\n section = \"\"\"\nEvery care is taken to try to ensure that this documentation comes to you error free.\nIf you do find an error - please report the problem on :\n`GitHub <{{cookiecutter.project_gh}}>`_\nor\nby email to : `{{cookiecutter.author}} <mailto:{{cookiecutter.author_email}}?Subject={{cookiecutter.project_repo}}%20Error>`_\n\"\"\".split(\"\\n\")\n\n max_len = max(map(len,section))\n max_len = max_len + (max_len % 2)\n\n with open(os.path.join(PROJECT_DIRECTORY,\"README.rst\"), \"a\") as readme:\n readme.write(\"+\" + \"-\"*max_len + \"+\\n\")\n readme.write(\"|\" + \" \"*(max_len/2-2) + \"Bugs\" + \" \"*(max_len/2-2) + \"+\\n\")\n readme.write(\"+\" + \"=\"*max_len + \"+\\n\")\n for l in section:\n readme.write(\"|\" + l + \" \"*(max_len-len(l)) + \"|\\n\")\n readme.write(\"+\" + \"-\"*max_len + \"+\\n\")",
"def get_release_note(comments):\n release_note = \"\"\n i = 0\n for comment in comments:\n #pprint.pprint(comment)\n #print \"**** Comment-{0}: {1}\".format(i, comment['body'])\n #print \"**** Comment-{index}: {body}\".format(\n # index=i,\n # body=comment['body']\n # )\n #print \"\\tURL: {0}\".format(comment['html_url'])\n #print \"\\tURL: {url}\".format(url=comment['html_url'])\n #comment['body'].index('Changed make')\n if comment['body'].lower().find('changed make') >= 0:\n #print \"Found 'Release Note'\"\n release_note = comment['body']\n #else:\n #print \"No 'Release Note' found\"\n\n i += 1\n # print \"----------------------------------------------------------\\n\"\n return release_note",
"def create_release(release_files, changelog=\"\", output=\"\") -> str:\n release_notes = \"\"\n if 'TRAVIS_TAG' not in os.environ or not os.environ['TRAVIS_TAG']:\n print('No git tag: not deploying anything')\n return release_notes\n elif os.environ['TRAVIS_SECURE_ENV_VARS'] != 'true':\n print('No secure environment variables: not deploying anything')\n return release_notes\n elif len(release_files) == 0:\n print('No file to release')\n return release_notes\n else:\n print('Creating release from tag {}'.format(os.environ['TRAVIS_TAG']))\n\n headers = {\n 'User-Agent': 'Deploy-Script',\n 'Authorization': 'token {}'.format(os.environ['GH_TOKEN'])\n }\n\n changelog_content = ''\n if changelog:\n with open(changelog, 'r') as changelog_file:\n changelog_content = changelog_file.read()\n\n create_raw_data = {\n \"tag_name\": os.environ['TRAVIS_TAG'],\n \"body\": \"\\n\\n{}\".format(changelog_content)\n }\n\n # if a release exist with this tag_name delete it first\n # this allows to create the release from github website\n url = '/repos/{repo_slug}/releases/tags/{tag}'.format(\n repo_slug=os.environ['TRAVIS_REPO_SLUG'],\n tag=os.environ['TRAVIS_TAG'])\n conn = http.client.HTTPSConnection('api.github.com')\n conn.request('GET', url, headers=headers)\n response = conn.getresponse()\n release = json.loads(response.read().decode())\n\n if 'upload_url' not in release:\n print('Failed to create release!')\n print('Github API replied:')\n print('{} {}'.format(response.status, response.reason))\n print(repr(release))\n exit(-1)\n\n conn = http.client.HTTPSConnection('uploads.github.com')\n for release_file in release_files:\n _, filename = os.path.split(release_file)\n headers['Content-Type'] = 'application/zip'\n url = '{release_url}?name={filename}'.format(release_url=release['upload_url'][:-13], filename=filename)\n print('Upload to {}'.format(url))\n\n with open(release_file, 'rb') as f:\n data = f.read()\n conn.request('POST', url, data, headers)\n\n response = conn.getresponse()\n result = response.read()\n if response.status != 201:\n print('Failed to upload filename {filename}'.format(filename=filename))\n print('Github API replied:')\n print('{} {}'.format(response.status, response.reason))\n print(repr(json.loads(result.decode())))\n print('File:')\n print(' Size: {}'.format(os.path.getsize(release_file)))\n\n if output:\n with open(output, 'w') as f:\n print(\"Writing release notes\")\n print(release_notes)\n f.write(release_notes)",
"def freshservice_release_task_create(\n self,\n ticket_id: int,\n due_date: str,\n notify_before: int,\n title: str,\n description: str,\n status: int = None,\n ) -> Dict[str, Any]:\n data = remove_empty_elements({\n \"description\": description,\n \"due_date\": due_date,\n \"notify_before\": notify_before,\n \"title\": title,\n \"status\": status\n })\n\n return self._http_request('POST',\n f'api/v2/releases/{ticket_id}/tasks',\n json_data=data)",
"def CompileRelease(self, repo_settings):\n release_file = \"Origin: \" + repo_settings['name'] + \"\\n\"\n release_file += \"Label: \" + repo_settings['name'] + \"\\n\"\n release_file += \"Suite: stable\\n\"\n release_file += \"Version: 1.0\\n\"\n release_file += \"Codename: ios\\n\"\n release_file += \"Architectures: iphoneos-arm\\n\"\n release_file += \"Components: main\\n\"\n release_file += \"Description: \" + repo_settings['description'].replace(\"\\n\\n\", \"\\n .\\n \").replace(\"\\n\", \"\\n \") + \"\\n\"\n\n return release_file",
"def release_notes(self, release_notes):\n self._release_notes = release_notes",
"def release_candidate(args: RCArguments) -> None:\n with VERSION.open('r') as f:\n version = f.read().rstrip('-devel')\n major, minor, _ = version.split('.')\n date = _calculate_release_start(major, minor)\n\n data = read_calendar()\n\n with CALENDAR_CSV.open('w', newline='') as f:\n writer = csv.writer(f)\n writer.writerows(data)\n\n writer.writerow([f'{major}.{minor}', date.isoformat(), f'{major}.{minor}.0-rc1', args.manager])\n for row in range(2, 4):\n date = date + datetime.timedelta(days=7)\n writer.writerow([None, date.isoformat(), f'{major}.{minor}.0-rc{row}', args.manager])\n date = date + datetime.timedelta(days=7)\n writer.writerow([None, date.isoformat(), f'{major}.{minor}.0-rc4', args.manager, OR_FINAL.format(f'{major}.{minor}')])\n\n commit(f'docs: Add calendar entries for {major}.{minor} release candidates.')",
"def default_changelog(release_link_format: str, breaking_change_token: str = \"BREAKING\"):\n return Changelog(\n header=\"\"\"# Changelog\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog] and this project adheres to\n[Semantic Versioning].\n\nTypes of changes are:\n* **Security** in case of vulnerabilities.\n* **Deprecated** for soon-to-be removed features.\n* **Added** for new features.\n* **Changed** for changes in existing functionality.\n* **Removed** for now removed features.\n* **Fixed** for any bug fixes.\"\"\",\n config=ChangelogConfig(\n release_link_format=release_link_format,\n breaking_change_token=breaking_change_token,\n ),\n releases=OrderedDict(\n {\n ReleaseTag(\"Unreleased\"): ReleaseSection(entries={}, timestamp=None),\n }\n ),\n links=OrderedDict(\n {\n \"Unreleased\": release_link_format.format(previous_tag=\"initial\", tag=\"HEAD\"),\n \"Keep a Changelog\": \"http://keepachangelog.com/en/1.0.0/\",\n \"Semantic Versioning\": \"http://semver.org/spec/v2.0.0.html\",\n },\n ),\n )",
"def test_get_release_with_notes(self):\n release_id = self._create_release()\n response = self._post_releases_notes(release_id, \"this is a test message\")\n self.assertEqual(204, response.status_code)\n\n output = self._get_releases(release_id=release_id)\n self.assertIn('notes', output['releases'][0])\n\n notes = output['releases'][0]['notes']\n self.assertIn(\n 'test note lorem ipsum',\n notes\n )",
"def create_release_notes(yaml_file, realease_notes_file, application_name):\n try:\n with open(yaml_file) as input_file: # read yaml file AND CONVERT IT INTO DICTIONARY\n release_dict=yaml.load(input_file, Loader=yaml.FullLoader)\n logging.info(\"FILE CONVERTED TO DICTIONARY SUCCESSFULLY\")\n \n \n except (FileNotFoundError,FileExistsError) as error: #file doesn't exist\n logging.warning(\"yaml file is not exist or damaged\")\n return None\n \n except yaml.scanner.ScannerError as error: # yaml file syntax error\n logging.warning(\"wrong yaml format\")\n return None\n \n\n with open(realease_notes_file,\"w\") as output_file :# create release note and write on it\n for key,value in release_dict.items():\n output_file.write(f\"{key}: \\n\")\n if type(value) == dict:\n for key2,value2 in value.items():\n output_file.write(f\" {key2}: {value2} \\n\")\n else:\n for value2 in value:\n output_file.write(f\" {value2} \\n\")\n output_file.write(\"\\n\")\n logging.info(\"RELEASE NOTES FILE CREATED SUCCESSFULLY\") \n return release_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the sampling_time of this PcrTestRecordResult. ๆ ธ้
ธๆฃๆต้ๆ ทๆถ้ด | def sampling_time(self):
return self._sampling_time | [
"def samplingTime(self):\n return self._AWG.samplingTime_ns()",
"def _get_sampling_time(self):\n return 1/self.sampling_frequency*TIME_CONVERSION['s'][self.time_unit]",
"def sampling_rate(self):\n return self.track.sampling_rate",
"def sampling_rate(self):\n return self.librarycall('get_sampleRate')",
"def GetTimeRecorded(self):\n return self.time",
"def test_time(self):\n return self._test_time",
"def test_time(self) -> float:\n return self._test_time",
"def getRecordingTime(self) :\n tmpRecordingTime = 0\n \n if self.hasStartedRecording :\n # the signal is still recording\n self.stopRecordingTime = t.time()\n tmpRecordingTime = (self.stopRecordingTime - self.startRecordingTime)\n \n return round(self.recordingTime + tmpRecordingTime, 2)",
"def samples(self):\n\n return self._timeBase__samples.value",
"def time_profile(self):\n return self.__time_profile",
"def start_time(self):\n return RPR.GetAudioAccessorStartTime(self.id)",
"def pc_work_time(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_work_time(self)",
"def sample(self):\n return self._sample",
"def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime",
"def get_sample_rate(self):\n time_diffs = np.diff(self.get_time()).mean()\n return 1000/time_diffs",
"def sampling_time(self, sampling_time):\n self._sampling_time = sampling_time",
"def sample_rate(self):\n if self.has_data():\n try:\n return round(\n 1.0\n / np.float64(\n (\n np.median(\n np.diff(self.dataset.coords[\"time\"].to_index())\n / np.timedelta64(1, \"s\")\n )\n )\n ),\n 0,\n )\n except AttributeError:\n self.logger.warning(\n \"Something weird happend with xarray time indexing\"\n )\n\n raise ValueError(\n \"Something weird happend with xarray time indexing\"\n )\n return self.run_metadata.sample_rate",
"def _get_second(self):\n return self.datetime.second",
"def get_time(self):\n if not self.simulated:\n return datetime.now()\n else:\n return self.simulated_time"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the sampling_time of this PcrTestRecordResult. ๆ ธ้
ธๆฃๆต้ๆ ทๆถ้ด | def sampling_time(self, sampling_time):
self._sampling_time = sampling_time | [
"def setSamplingTime(self, time):\n return self._AWG.setSamplingTime_ns(time)",
"def sampling_time(self):\n return self._sampling_time",
"def set_sample_interval(self, secs):\n self._sample_interval_secs = secs",
"def _reset_sampling_time(self):\n member = self.get_member(str('sampling_time')) # HINT C API\n member.reset(self)",
"def sampling_date(self, sampling_date):\n\n self._sampling_date = sampling_date",
"def _get_sampling_time(self):\n return 1/self.sampling_frequency*TIME_CONVERSION['s'][self.time_unit]",
"def update_sample_time(self, NewSampleTime):\n if NewSampleTime > 0:\n ratio = NewSampleTime / self.sampleTime\n self.ki *= ratio\n self.kd /= ratio\n self.sampleTime = NewSampleTime",
"def set_sample_delay(self, delay):\n self.sampleDelay = delay/1000.",
"def samplingTime(self):\n return self._AWG.samplingTime_ns()",
"def time_to_sample(self, time):\n return time * self.freq",
"def test_time(self, test_time):\n self._test_time = test_time",
"def sample_rate(self, sample_rate):\n\n self._sample_rate = sample_rate",
"def sample(self, sample):\n self._sample = sample",
"def startSampling(self,calTime, maxTime):\n self.DEVICE.pyStart(calTime,maxTime)",
"def sample_to(self, sample_to):\n self._sample_to = sample_to",
"def timing(self, timing):\n\n self._timing = timing",
"def configure_timing(self, sample_rate, acquisition_time,\n pretrigger_time, sampling_mode):\n sample_rate = strict_range(sample_rate, (15260, 1e9))\n acquisition_time = strict_discrete_range(\n acquisition_time, (1e-09, 68.711), 1e-09)\n # acquisition is also limited by buffer size,\n # which depends on sample rate as well as acquisition time\n pretrigger_time = strict_range(pretrigger_time, (0, 10))\n try:\n pyvb.MsoSamplingMode(sampling_mode)\n except Exception:\n try:\n sampling_mode = pyvb.MsoSamplingMode[sampling_mode.upper()]\n except Exception:\n raise ValueError(\n \"Sampling Mode may be 0, 1, 'SAMPLE' or 'PEAK_DETECT'\")\n\n self.mso.configure_timing(\n sample_rate, acquisition_time, pretrigger_time, sampling_mode)",
"def on_sampling_timer(self, event):\n self.sampling_timer.Stop()",
"def set_timebase(self, dt, duration, segment_index=0, oversample=0):\n if len(self._channels_dict) == 0:\n self.raise_exception('Must call set_channel(...) before setting the timebase')\n\n self._oversample = oversample\n self._timebase_index = int(round(self._get_timebase_index(float(dt))))\n num_samples_requested = int(round(duration/dt))\n if self.IS_PS2000 or self.IS_PS3000:\n ret = self.get_timebase(self._timebase_index, num_samples_requested, oversample)\n self._sampling_interval, self._max_samples, self._time_units = ret\n else:\n ret = self.get_timebase2(self._timebase_index, num_samples_requested, segment_index, oversample)\n self._sampling_interval, self._max_samples = ret\n\n self._num_samples = int(round(duration/self._sampling_interval))\n\n # determine the TimeUnits enum from the sample interval\n for unit in enums.PS5000ATimeUnits:\n num_seconds_float = self._sampling_interval / (10 ** (3 * unit.value) * 1e-15)\n if num_seconds_float < 1e9: # use <9 digits to specify the streaming sampling interval\n self._streaming_sampling_interval = int(round(num_seconds_float))\n self._streaming_time_units = unit\n break\n\n self._allocate_buffer_memory()\n\n if abs(dt - self._sampling_interval) / dt > 1e-6:\n self.log_warning('The sampling interval is %.6e seconds, requested %.6e seconds',\n self._sampling_interval, dt)\n\n return self._sampling_interval, self._num_samples"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the test_time of this PcrTestRecordResult. ๆ ธ้
ธๆฃๆต็ปๆๆดๆฐๆถ้ด | def test_time(self):
return self._test_time | [
"def test_time(self) -> float:\n return self._test_time",
"def update_time(self):\n return self._update_time",
"def get_update_time(self):\n return self._utime",
"def updated_time(self):\n return self._updated_time",
"def GetTimeRecorded(self):\n return self.time",
"def last_update_time(self):\n return self._last_update_time",
"def last_update(self: DetailedForecast) -> datetime:\n return self.update_time",
"def get_time_stamp(self):\n return self.__measurement_utc_time",
"def test_time(self):\n result = self.test_client.time\n\n assert result == \"15093049123\"",
"def last_check_time(self):\n last_check = Check.query.filter_by(monitor_id=self.id).order_by(Check.timestamp.desc()).first()\n if last_check is None:\n return datetime.utcfromtimestamp(0)\n return last_check.timestamp",
"def test_time(self, test_time):\n self._test_time = test_time",
"def get_time(self):\n if not self.simulated:\n return datetime.now()\n else:\n return self.simulated_time",
"def settled_time(self):\n return self._settled_time",
"def get_time(self):\n return self.block.create_time",
"def modified_time(self) -> float:\n return self._modified_time",
"def get_last_successful_run_time(self):\n last_run_time = self.job_run_dao.get_last_successful_runtime(self.job)\n return last_run_time if last_run_time else self.from_date",
"async def fetch_time(self, params={}):\n response = await self.publicGetTimestamp(params)\n return self.safe_integer(response, 'serverTime')",
"def query_time(self):\n return self._query_time",
"def check_last_run_table(self, component):\n logging.info(\"Getting the last run time in seconds for component: {0}\".format(component))\n last_record_time = '2000-01-01 00:00:00'\n last_run = LastRun.objects.filter(component=component).values('last_run')\n for last_run in last_run:\n last_record_time = (timezone.now() - last_run['last_run']).total_seconds()\n return last_record_time"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the test_time of this PcrTestRecordResult. ๆ ธ้
ธๆฃๆต็ปๆๆดๆฐๆถ้ด | def test_time(self, test_time):
self._test_time = test_time | [
"def setTestTime(self, testTime):\r\n return self._domInstance.setAttribute('time', testTime)",
"def latest_test_usage_record_time(self, latest_test_usage_record_time):\n\n self._latest_test_usage_record_time = latest_test_usage_record_time",
"def test_time(self) -> float:\n return self._test_time",
"def set_time(self, time):\n pass",
"def update_testcase_duration(self, report):\n total = self.durations.get(report.nodeid, 0.0)\n total += getattr(report, 'duration', 0.0)\n self.durations[report.nodeid] = total\n\n testcase = self.tests_by_nodeid.get(report.nodeid)\n if testcase is not None:\n testcase.attr.time = total",
"def set_time(self, time):\n self._time = time",
"def test_time(self):\n return self._test_time",
"def set_time(self, set_time):\n\n self._set_time = set_time",
"def test_result(self, test_result):\n self._test_result = test_result",
"def test_set(self, test: base.DataType) -> None:\n self.test = test",
"def track_start_time(self, test_class, test_name, start_time):\n if test_class is None or test_name is None:\n return\n\n test_key = \"{}.{}\".format(test_class, test_name)\n self.start_time_by_test[test_key] = start_time",
"def set_test(self, test):\n\n self.test = test",
"def update_time(self, update_time):\n self._update_time = update_time",
"def update_time(self, update_time):\n\n self._update_time = update_time",
"def mark_timeout(self, test):\n if not test:\n LOGGER.warn('Empty or None test name passed to standard_json_util')\n return\n\n if test in self.tests:\n self.tests[test]['actual'] = self.tests[test]['actual'] + \" TIMEOUT\"\n self.tests[test]['is_unexpected'] = True\n else:\n self.tests[test] = {\n 'expected': 'PASS',\n 'actual': 'TIMEOUT',\n 'is_unexpected': True\n }",
"def settled_time(self, settled_time):\n\n self._settled_time = settled_time",
"def set_contest_date_time(contest, date_time, field):\n c = biv.load_obj(contest)\n assert type(c) == pem.E15Contest\n dt = _local_date_time_as_utc(c, date_time)\n assert hasattr(c, field), \\\n '{}: has no attr {}'.format(c, field)\n setattr(c, field, dt)\n _add_model(c)",
"def performTest(test_id, lname, tresult):\n#update the test_record with the test result and the current date (test_date)\n try:\n tdate = time.strftime('%d/%m/%Y')\n except:\n return \"Error! Date not in requested format.\"\n tdate = 'TO_DATE(\\'{}\\', \\'dd/mm/yyyy\\')'.format(tdate)\n updateStr=('UPDATE test_record SET result = \\'{}\\', test_date = {} WHERE test_id = {}').format(tresult, tdate, test_id)\n try:\n cur.execute(updateStr)\n except:\n return \"Invalid entry for one or more fields. Please check that all your responses are valid values.\"\n con.commit()\n\n # find the record that was updated to display\n selectStr=('SELECT * FROM test_record WHERE test_id = {}').format(test_id) \n try:\n cur.execute(selectStr)\n except:\n return \"Invalid entry for one or more fields. Please check that all your responses are valid values.\"\n return \"Test record updated successfully.\"",
"def test_upload_time(self):\n time_val = 1345427105 # just a realistic time val, ~ 8/19/12 6:45pm\n self.assertIsNone(self.model.last_uploaded(),\n \"Did not return None even though upload entry doesn't exist\")\n self.model.set_last_uploaded(time_val)\n self.assertEquals(time_val, self.model.last_uploaded(),\n \"Created initial val, but appears incorect\")\n self.model.set_last_uploaded(time_val + 10)\n self.assertEquals(time_val + 10, self.model.last_uploaded(),\n \"Updated value, but appears incorrect\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the test_result of this PcrTestRecordResult. ๆ ธ้
ธๆฃๆต็ปๆ,ๅฏ้ๅผๅ
ๆฌ: \"positive\",ๅณ้ณๆง \"negative\",ๅณ้ดๆง \"unknown\",ๆช็ฅ | def test_result(self):
return self._test_result | [
"def get_test_result(self):\n failure_info_list = self._outcome.errors[1][1]\n if not failure_info_list:\n return 'OK'\n elif failure_info_list[0].__name__ == 'AssertionError':\n return 'FAIL'\n else: # 'NameError'\n return 'ERROR'",
"def get_test_status(self) -> str:\n return self.__test_result[Result.__RESULT]",
"def getTestResult(self):\r\n if self._testResult:\r\n return self._testResult\r\n\r\n nodes = self._domInstance.findall('testresult')\r\n\r\n if len(nodes) == 0:\r\n self._testResult = None\r\n else:\r\n self._testResult = TestResultNode(nodes[0], self._testSetModel)\r\n\r\n return self._testResult",
"def parse_verifier_result(self):\n stat = self.get_verifier_result(self.verification_id)\n try:\n num_executed = stat['num_tests'] - stat['num_skipped']\n try:\n self.result = 100 * stat['num_success'] / num_executed\n except ZeroDivisionError:\n self.result = 0\n if stat['num_tests'] > 0:\n LOGGER.info(\"All tests have been skipped\")\n else:\n LOGGER.error(\"No test has been executed\")\n return\n\n with open(os.path.join(self.res_dir, \"rally.log\"),\n 'r', encoding='utf-8') as logfile:\n output = logfile.read()\n\n success_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} success ',\n output):\n success_testcases.append(match)\n failed_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} fail',\n output):\n failed_testcases.append(match)\n skipped_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} skip(?::| )',\n output):\n skipped_testcases.append(match)\n\n self.details = {\"tests_number\": stat['num_tests'],\n \"success_number\": stat['num_success'],\n \"skipped_number\": stat['num_skipped'],\n \"failures_number\": stat['num_failures'],\n \"success\": success_testcases,\n \"skipped\": skipped_testcases,\n \"failures\": failed_testcases}\n except Exception: # pylint: disable=broad-except\n self.result = 0\n\n LOGGER.info(\"Tempest %s success_rate is %s%%\",\n self.case_name, self.result)",
"def getResult(self):\r\n return self._domInstance.getAttribute('result')",
"def get_result(test: TestRun):\n\n try:\n results = test.results\n results['results_log'] = test.results_log.as_posix()\n\n except (TestRunError, TestRunNotFoundError) as err:\n results = {'id': test.full_id}\n for field in BASE_FIELDS[1:]:\n results[field] = None\n\n results['result'] = \"Test not found: {}\".format(err)\n\n return results",
"def get_test_results(self):\n element = self.find_element_by_id(self.results_id, wait=True)\n\n if element:\n return element.text\n else:\n return False",
"def test_status(self) -> str:\n return self._test_status",
"def GetResult(self, playerjm):\n return self.score / len(self.scores)",
"def create_test_result(\n cls,\n result: bool,\n reason: str=None,\n text_tokens: Union[Tuple[Any], List[Any], Set[Any]]=(),\n tooltip: Union[int, str, CommonLocalizationUtils.LocalizedTooltip]=None,\n tooltip_tokens: Iterator[Any]=(),\n icon=None,\n influence_by_active_mood: bool=False\n ) -> CommonTestResult:\n return CommonTestResult(\n result,\n reason=reason.format(*text_tokens) if reason is not None else reason,\n tooltip_text=tooltip,\n tooltip_tokens=tooltip_tokens,\n icon=icon,\n influenced_by_active_mood=influence_by_active_mood\n )",
"def get_test(self):\n return self.id_test, self.x_test",
"def get_results(self):\n error_dict = {'error_code_test': self.error_code_test,\n 'error_text_test': self.error_text_test}\n\n return self.testresults, error_dict, self.checkstats",
"def testReport(self):\n return self._server.get_build_test_report(self._job.name, self.number)",
"def test_result_color(cls, test_result):\n\n return cls._RESULT_COLOR_MAPS_[test_result]",
"async def compare_result(self, query_type, test, result):\n if self.per_test_insertion:\n expected = self._translate_column_names(test['expected'])\n else:\n expected = test['expected']\n\n for key, value in expected.items():\n if value == 'ignore':\n continue\n\n if not isinstance(result, dict) or key not in result:\n self.num_fails += 1\n self.failed_tests.append(test['name'])\n\n print(' Expected: \"{}\": {}'.format(key, value))\n print(' Result: \"{}\": {}'.format(key, result))\n print(' Status: Failed')\n return\n\n if not self.compare_values(value, result[key]):\n time.sleep(self.sleep_time * 3)\n query_ = test['query']\n if isinstance(query_, dict):\n query_.update({\"bypass-cache\": True})\n try:\n result2 = await self.execute_query(query_type, test)\n result2 = ujson.loads(result2)\n if self.compare_values(value, result2[key]):\n print(\" Passed at second try\")\n continue\n except SlicingDiceException as e:\n print(str(e))\n\n self.num_fails += 1\n self.failed_tests.append(test['name'])\n\n print(' Expected: \"{}\": {}'.format(key, value))\n print(' Result: \"{}\": {}'.format(key, result[key]))\n print(' Status: Failed')\n return\n\n self.num_successes += 1\n\n print(' Status: Passed')",
"def result(self):\n if self.__json:\n return self.__json[\"result\"]\n else:\n return {}",
"def result_code(self):\n return self._result_code",
"def results(self) -> str:\n if self.passed:\n passfail_str = \"PASS\"\n else:\n passfail_str = \"FAIL\"\n\n string = f\"{self._result_header}\\nTest Results (Tol. +/-{self._tolerance*100:2.2}%): {passfail_str}\\n\"\n\n string += f\"Max Deviation: {self.max_r_deviation:2.3}%\\nAbsolute Mean Deviation: {self.avg_abs_r_deviation:2.3}%\"\n return string",
"def get_value(self, result):\n rel_value = result.norm_acc\n abs_value = result.accuracy\n return rel_value, abs_value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the test_result of this PcrTestRecordResult. ๆ ธ้
ธๆฃๆต็ปๆ,ๅฏ้ๅผๅ
ๆฌ: \"positive\",ๅณ้ณๆง \"negative\",ๅณ้ดๆง \"unknown\",ๆช็ฅ | def test_result(self, test_result):
self._test_result = test_result | [
"def SetTestResult(test_info: paranoid_pb2.TestInfo,\n test_result: paranoid_pb2.TestResultsEntry):\n if not test_info.paranoid_lib_version:\n # Stores version value in test_info. As checks can be updated and become\n # stronger, this attribute can be useful to know when it makes sense to\n # re-execute a check against a crypto artifact.\n test_info.paranoid_lib_version = version.__version__\n\n if test_result.result:\n # When a key/signature is vulnerable to at least one test,\n # paranoid_pb2.TestInfo.weak should reflect that. We never set it to False,\n # as unknown weaknesses may exist.\n test_info.weak = True\n\n old_test_result = GetTestResult(test_info, test_result.test_name)\n if old_test_result:\n old_test_result.result |= test_result.result # update\n old_test_result.severity = max(old_test_result.severity,\n test_result.severity)\n else:\n test_info.test_results.append(test_result) # add new",
"def setTestResult(self, rlt):\n self.__testResult = rlt\n\n total_count = TestScriptSymbolTable.get_value_from_sym_tab(\"total_count\", TestScriptSymbolTable.test_result_tab) + 1\n TestScriptSymbolTable.insert_sym_tab(\"total_count\", total_count, TestScriptSymbolTable.test_result_tab)\n #if rlt == 'PASS':\n if 'PASS' in rlt:\n pass_count = TestScriptSymbolTable.get_value_from_sym_tab(\"pass_count\", TestScriptSymbolTable.test_result_tab) + 1\n TestScriptSymbolTable.insert_sym_tab(\"pass_count\", pass_count, TestScriptSymbolTable.test_result_tab)\n else:\n fail_count = TestScriptSymbolTable.get_value_from_sym_tab(\"fail_count\", TestScriptSymbolTable.test_result_tab) + 1\n TestScriptSymbolTable.insert_sym_tab(\"fail_count\", fail_count, TestScriptSymbolTable.test_result_tab)\n \n #self.generateFinalResult()",
"def SetReturnResult(self, result):\n callResult = self._Call(\"SetReturnResult\", result)",
"def test_results(self, test_results):\n\n self._test_results = test_results",
"def setResult(self, result):\r\n \r\n #Updates the parent statistics \r\n \r\n return self._domInstance.setAttribute('result', result)",
"def SetUnexpectedFailure(test_result):\n test_result['status'] = 'FAIL'\n test_result['expected'] = False\n logging.error('Processing failed for test %s', test_result['testPath'])",
"def set_test_failed(self):\n self.set_result(Status.FAILED)",
"def set_test_passed(self):\n self.set_result(Status.PASSED)",
"def set_result(self, result, update=False):\n self._result = result\n if update is False:\n self._set_done()",
"def set_result(self, result):\n self._result = result\n self._result_set = True\n self._invoke_callbacks(self)",
"def add_test_from_result(self, dbtestresult):\n testclass = module.get_class(dbtestresult.testimplementation)\n testclass.set_test_options()\n args, kwargs = parse_args(dbtestresult.arguments)\n testinstance = testclass(self.config)\n entry = TestEntry(testinstance, args, kwargs, False)\n self._add_with_prereq(entry)",
"def result_code(self, result_code):\n\n self._result_code = result_code",
"def __call__(self, result, test):\n if not self.__prepared:\n self.__prepared = True\n plug_result = self.config.plugins.prepareTestResult(result)\n if plug_result is not None:\n self.__result = result = plug_result\n if self.__result is not None:\n result = self.__result\n return ResultProxy(result, test, config=self.config)",
"def result_code(self, result_code):\n self._result_code = result_code",
"def set_result_data(self, result, score, normal_time=\"NT\", match_report_url=\"\"):\n self.result = result\n self.score = score\n self.normal_time = normal_time\n self.match_report_url = match_report_url\n self.excitement_index = self.calc_excitement_index()",
"def result(self, result: Item):\n\n self._result = result",
"def _AddResult(self):\n if not self._results:\n result = analyzer_result.AnalyzerResult()\n result.attribute_name = 'test_result'\n result.attribute_value = 'is_vegetable'\n self._results.append(result)",
"def set_test(self, test):\n\n self.test = test",
"async def compare_result(self, query_type, test, result):\n if self.per_test_insertion:\n expected = self._translate_column_names(test['expected'])\n else:\n expected = test['expected']\n\n for key, value in expected.items():\n if value == 'ignore':\n continue\n\n if not isinstance(result, dict) or key not in result:\n self.num_fails += 1\n self.failed_tests.append(test['name'])\n\n print(' Expected: \"{}\": {}'.format(key, value))\n print(' Result: \"{}\": {}'.format(key, result))\n print(' Status: Failed')\n return\n\n if not self.compare_values(value, result[key]):\n time.sleep(self.sleep_time * 3)\n query_ = test['query']\n if isinstance(query_, dict):\n query_.update({\"bypass-cache\": True})\n try:\n result2 = await self.execute_query(query_type, test)\n result2 = ujson.loads(result2)\n if self.compare_values(value, result2[key]):\n print(\" Passed at second try\")\n continue\n except SlicingDiceException as e:\n print(str(e))\n\n self.num_fails += 1\n self.failed_tests.append(test['name'])\n\n print(' Expected: \"{}\": {}'.format(key, value))\n print(' Result: \"{}\": {}'.format(key, result[key]))\n print(' Status: Failed')\n return\n\n self.num_successes += 1\n\n print(' Status: Passed')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the confidence of this PcrTestRecordResult. | def confidence(self):
return self._confidence | [
"def confidence(self):\n return self.__confidence",
"def confidence(self) -> float:\n return self._confidence",
"def confidence(self) -> float:\n return float(self.class_scores[self.class_num])",
"def confidence(self):\n score_uncertanity = 1-self.score\n clarity_uncertainty = 1-self.clarity\n return 1 - ((score_uncertanity * clarity_uncertainty) ** 0.5)",
"def detection_confidence(self):\n return self._detection_confidence",
"def confidence_rating_in_percentage(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"confidence_rating_in_percentage\")",
"def get_medie_confidence(self):\n return self.__medie_confidence",
"def confidence_level(self):\n z = self.z_score\n if isinstance(z, string_types):\n return z\n\n z = abs(round(z, 3))\n\n if z == 0.0:\n return \"No Change\"\n elif z < 1.65:\n return \"No Confidence\"\n elif z < 2.33:\n return \"95% Confidence\"\n elif z < 3.08:\n return \"99% Confidence\"\n return \"99.9% Confidence\"",
"def confidence(self):\n return np.prod([edge.confidence for edge in self])",
"def predicted_confidence(self):\n sorted_clips = sorted(self.clips, key=lambda x: x.classifier_best_score)\n best_guess = sorted_clips[-1].classifier_best_score\n return best_guess",
"def get_min_confidence(self):\n return self.__min_confidence",
"def min_confidence(self) -> float:\n return self._min_confidence",
"def get_confidence(t):\n return t[1]",
"def confidence(self):\n\n choices = self.choices\n\n # Get the chi-squared between the top two choices, if more than two choices exist\n if len(choices) >= 2:\n csq = chi_squared(*choices)\n confident = is_confident(csq, len(choices)) if len(choices) <= 10 else None\n else:\n csq = None\n confident = False\n\n return (csq, confident)",
"def landmarking_confidence(self):\n return self._landmarking_confidence",
"def ocr_result(self):\n return self._ocr_result",
"def confidence_multiplier(self):\n pass",
"def confidence(self, filename):\n f = open(filename, 'rb')\n content = list(f.read())\n f.close()\n\n file_entropy = self.entropy(content)\n\n return (round(file_entropy / 8 * 100), filename)",
"def key_confidence(self):\n return self.h5.root.analysis.songs.cols.key_confidence[self.songidx]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the words_block_count of this PcrTestRecordResult. ไปฃ่กจๆฃๆต่ฏๅซๅบๆฅ็ๆๅญๅๆฐ็ฎใ | def words_block_count(self):
return self._words_block_count | [
"def getblockcount(self):\n return self.call('getblockcount')",
"def word_count(self):\n return self._word_count",
"def word_count(self):\n return self.index.word_count()",
"def words_block_count(self, words_block_count):\n self._words_block_count = words_block_count",
"def size_words(self):\n assert self.is_block()\n hd = self.hd()\n if hd is None:\n return None\n return hd >> 10",
"def _raw_word_count(self, job):\n return sum(len(sentence.words) for sentence in job)",
"def total_words(self):\n return len(strip_tags('%s %s' % (self.lead, self.content)).split())",
"def test_wordCount(self):\n words = []\n for line in self.output:\n words.extend(string.split(line))\n wordCount = len(words)\n sampleTextWordCount = len(self.sampleSplitText)\n self.failUnlessEqual(wordCount, sampleTextWordCount)",
"def wordcount(self):\n return int(self._fetch_element('user_wordcount'))",
"def get_total_words(self):\n return self.total_words",
"def nblocks(self):\n return count(self)",
"def kv_block_count(self):\n return self._kv_block_count",
"def wordCount(self) -> int:\n return len(self.wordList)",
"def num_blocks(self) -> int:\n return len(self._blocks)",
"def total_height_blocks(validator):\n res = 0\n for bhash, b in validator.processed.items():\n if isinstance(b, Block):\n res += 1\n return res",
"def getBlockedWords(self):\n return self.blocklist[\"words\"]",
"def test_block_size(self):\n return self._test_block_size",
"def get_length(self):\n\t\treturn len(self._blocks)",
"def paragraph_count(self):\n return len(self.units)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the words_block_count of this PcrTestRecordResult. ไปฃ่กจๆฃๆต่ฏๅซๅบๆฅ็ๆๅญๅๆฐ็ฎใ | def words_block_count(self, words_block_count):
self._words_block_count = words_block_count | [
"def words_block_count(self):\n return self._words_block_count",
"def block_count(self, block_count):\n\n self._block_count = block_count",
"def words_count(self, words_count):\n\n self._words_count = words_count",
"def words_block_list(self, words_block_list):\n self._words_block_list = words_block_list",
"def kv_block_count(self, kv_block_count):\n self._kv_block_count = kv_block_count",
"def word_count(self, word_count):\n\n self._word_count = word_count",
"def blocks(self, blocks: int):\n\n self._blocks = blocks",
"def locked_words_count(self, locked_words_count):\n\n self._locked_words_count = locked_words_count",
"def setSplitCount(self, count):\n pass",
"def test_wordCount(self):\n words = []\n for line in self.output:\n words.extend(string.split(line))\n wordCount = len(words)\n sampleTextWordCount = len(self.sampleSplitText)\n self.failUnlessEqual(wordCount, sampleTextWordCount)",
"def word_count(self, word_count):\n if word_count is None:\n raise ValueError(\"Invalid value for `word_count`, must not be `None`\")\n\n self._word_count = word_count",
"def part_count(self, part_count):\n\n self._part_count = part_count",
"def test_block_size(self, test_block_size):\n self._test_block_size = test_block_size",
"def set_max_tweet_response(self, count):\n self.count = count",
"def blocks_percentage(self, blocks_percentage):\n\n self._blocks_percentage = blocks_percentage",
"def test_words_count(self):\n self.assertEqual(52, len(self.page.words))",
"def result_count(self, result_count):\n\n self._result_count = result_count",
"def completed_words_count(self, completed_words_count):\n\n self._completed_words_count = completed_words_count",
"def confirmed_words_count(self, confirmed_words_count):\n\n self._confirmed_words_count = confirmed_words_count"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the words_block_list of this PcrTestRecordResult. ่ฏๅซๆๅญๅๅ่กจ,่พๅบ้กบๅบไปๅทฆๅฐๅณ,ไปไธๅฐไธใ | def words_block_list(self):
return self._words_block_list | [
"def getBlockedWords(self):\n return self.blocklist[\"words\"]",
"def getBlocklist(self):\n return self.blocklist",
"def words_block_list(self, words_block_list):\n self._words_block_list = words_block_list",
"def get_blocks(self):\n return self.blocks",
"def get_blocks(self):\n cmd = \"\"\" SELECT * FROM %s; \"\"\" %(TABLE_BLOCKCHAIN)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()",
"def kv_block_list(self):\n return self._kv_block_list",
"def getBlocks(self):\n return self.blocks",
"def get_blocks(self) -> list:\n self.clingo = ClingoBridge() # reset clingo\n\n base = ('base', '')\n self.clingo.add_file('initial-states.lp')\n self.clingo.run([base], n=1)\n output = self.clingo.output[0]\n\n blocks = []\n for atom in output:\n if atom.name == 'block':\n blocks.append(atom)\n\n return blocks",
"def content_blocks_list(self):\n\n subtitle_blocks_list = self._get_article_subtitle_blocks(self.article_id)\n paragraph_blocks_list = self._get_article_paragraph_blocks(self.article_id)\n image_blocks_list = self._get_article_image_blocks(self.article_id)\n audio_blocks_list = self._get_article_audio_blocks(self.article_id)\n video_blocks_list = self._get_article_video_blocks(self.article_id)\n sorted_content_blocks_list = sorted(subtitle_blocks_list + paragraph_blocks_list\n + image_blocks_list + audio_blocks_list\n + video_blocks_list, key=lambda block: block['content_order'])\n return sorted_content_blocks_list",
"def get_block_names(self)->List[str]:\n\n # also a nice idea: self._block_names = list(filter(lambda x: not x.startswith(\"_\") and x.isupper(), vars(imd).keys()))list(self._blocks.keys())\n\n return list(filter(lambda x: not x.startswith(\"_\") and x.isupper(), vars(self).keys()))",
"def blocks(self):\n rng = (self.start, self.end-self.start)\n return foundry.text_blocks(self._parent, rng)",
"def format_blocks(self):\n\n block_text = []\n for el, text in self._block_text.items():\n self.soft_break(el, text)\n content = ''.join(text)\n if content:\n block_text.append((content, self.additional_context + self.construct_selector(el)))\n return block_text",
"def get_block_names(self) -> List[str]:\n\n # also a nice idea: self._block_names = list(filter(lambda x: not x.startswith(\"_\") and x.isupper(), vars(imd).keys()))list(self._blocks.keys())\n\n return list(filter(lambda x: not x.startswith(\"_\") and x.isupper(), vars(self).keys()))",
"def blocks(text):\n return text.split(\"\\n\\n\")",
"def word_list(self) -> List[str]:\n return self._word_list",
"def get_block_names(self):\n return self._parse_response(self.client.service.GetBlockNames())",
"def getBlockList(userID) -> list:\n return getUserData(userID)[\"block_list\"]",
"def words_block_count(self):\n return self._words_block_count",
"def GetBlocks(state):\n result = []\n last_pos = 0\n for entry in state:\n pos = entry['pos']\n # Calculate block start points from the beginning of individual lines.\n blocks = [(s[0]-last_pos, s[1]-s[0]) for s in entry['blocks']]\n # Add one end marker block.\n blocks.append((pos-last_pos, 0))\n result.append(blocks)\n last_pos = pos\n return result"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the words_block_list of this PcrTestRecordResult. ่ฏๅซๆๅญๅๅ่กจ,่พๅบ้กบๅบไปๅทฆๅฐๅณ,ไปไธๅฐไธใ | def words_block_list(self, words_block_list):
self._words_block_list = words_block_list | [
"def words_block_list(self):\n return self._words_block_list",
"def kv_block_list(self, kv_block_list):\n self._kv_block_list = kv_block_list",
"def words_block_count(self, words_block_count):\n self._words_block_count = words_block_count",
"def add_blocks(self, block_list):\n blocks = block_list.copy() ## Fixes bug??\n for block in block_list:\n x, y = block\n self[x, y] = True",
"def setup_blocklist(self):\n \"\"\"TODO the parsing seems pretty ineffective. maybe i should do something.\"\"\"\n blocklist = Blocklist()\n\n self.statusbar.emit(\"%s - getting & parsing blocklist\" % self.status)\n blocklist.setup_rules()\n rules = blocklist.get_rules()\n\n self.statusbar.emit(\"%s - setting blocklist\" % self.status)\n filter = lt.ip_filter()\n exceptions = 0\n for rule in rules:\n try:\n filter.add_rule(rule['from'], rule['to'], rule['block'])\n except:\n exceptions += 1\n if exceptions > 10:\n return False\n self.session.set_ip_filter(filter)\n self.statusbar.emit(\"%s\" % self.status)",
"def AddWords(cls, word_list, words):\n entity = WordList.get_by_id(word_list)\n if not entity:\n return \"word list {} does not exist\".format(word_list)\n entity.words = list(set(entity.words) | set(words))\n entity.numWords = len(entity.words)\n entity.put()\n return None",
"def blocks(self, blocks: int):\n\n self._blocks = blocks",
"def RemoveWords(cls, word_list, words):\n entity = WordList.get_by_id(word_list)\n if not entity:\n return \"word list {} does not exist\".format(word_list)\n entity.words = list(set(entity.words) - set(words))\n entity.numWords = len(entity.words)\n entity.put()\n return None",
"def words_block_count(self):\n return self._words_block_count",
"def set_workflow_steps(self, steps_list):\n self._data_dict[self.KEY_WF_STEPS] = steps_list",
"def SetKeyWords(self, kw_lst):\n # Parse Keyword Settings List simply ignoring bad values and badly\n # formed lists\n self._code['keywords'] = list()\n kwlist = \"\"\n for keyw in kw_lst:\n if len(keyw) != 2:\n continue\n else:\n if not isinstance(keyw[0], int) or \\\n not isinstance(keyw[1], basestring):\n continue\n else:\n kwlist += keyw[1]\n super(EditraBaseStc, self).SetKeyWords(keyw[0], keyw[1])\n\n # Can't have ? in scintilla autocomp list unless specifying an image\n # TODO: this should be handled by the autocomp service\n if '?' in kwlist:\n kwlist.replace('?', '')\n\n kwlist = kwlist.split() # Split into a list of words\n kwlist = list(set(kwlist)) # Remove duplicates from the list\n kwlist.sort() # Sort into alphabetical order\n\n self._code['keywords'] = kwlist",
"def decode(self, string_list):\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(self._decode_block_string(block_string))\n return blocks_args",
"def words(self, value):\n self._words = value\n for w in self._words:\n w.parent_token = self",
"def getBlockedWords(self):\n return self.blocklist[\"words\"]",
"def words(self, value):\n self._words = value",
"def getBlocklist(self):\n return self.blocklist",
"def blockWord(self, word):\n if word.lower() not in self.getBlockedWords():\n self.blocklist[\"words\"].append(word.lower())\n return True\n return False",
"def extract_blocks(pool, listnos, num_blocks):\n assert len(listnos) % num_blocks == 0, \"The number of lists to append must be divisable by the number of blocks\"\n\n wordlists = {}\n for word in pool:\n wordlists[word['listno']] = wordlists.get(word['listno'], []) + [word]\n\n blocks = []\n for i in range(len(listnos)):\n listno = listnos[i]\n wordlist = [word.copy() for word in wordlists[listno]]\n for word in wordlist:\n word['blockno'] = i//num_blocks\n word['block_listno'] = i\n blocks += wordlist\n\n return blocks",
"def setBlockedCookies(self, list_):\n if not self.__loaded:\n self.__load()\n \n self.__exceptionsBlock = list_[:]\n self.__exceptionsBlock.sort()\n self.__saveTimer.changeOccurred()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prompt user for input and continue to do so until input is valid. This function takes two required inputs, the message to display, and the limit of characters required. If the user enters something too long, they are prompted again until the input is correct. If the optional isNumber parameter is True, then it will also continue to prompt the user until a valid number is input. | def LimitedInput(message, limit, isNumber=False):
keepAsking = True
while keepAsking:
answer = input(message)
if len(answer) > limit:
print("The input must be", limit, "characters or less.")
else:
keepAsking = False
if isNumber is True and CheckNumber(answer) is False:
print("The input must be a number.")
keepAsking = True
return answer | [
"def ask_num(message, min, max):\n while True:\n try:\n number = int(input(message))\n except:\n print(\"that was not a number\")\n continue\n if max >= number >= min:\n break\n return number",
"def ask_user():\r\n password_lenght = 0\r\n while password_lenght == 0:\r\n try:\r\n password_lenght = int(input(\"How long password you want? Enter the number... \"))\r\n if password_lenght <= 0:\r\n print(\"Try to enter any number greater than 0...\")\r\n continue\r\n return password_lenght\r\n except Exception:\r\n continue",
"async def input_number(ctx, client:discord.Client,\n message:str=\"Please enter a number within 10 seconds.\"):\n await ctx.send(message)\n try:\n message = await client.wait_for(\"message\", timeout=10,\n check=lambda message: message.author == ctx.message.author)\n except asyncio.TimeoutError:\n raise commands.UserInputError(\"Timed out waiting.\")\n try:\n return int(message.clean_content.lower())\n except ValueError:\n raise commands.UserInputError(\"Not a valid number.\")",
"def get_number_as_string(message, min_length):\r\n invalid_number = True\r\n # Loop Sequence\r\n while invalid_number:\r\n # Remove trailing, leading and spaces in-bewteen\r\n number_text = input(message).strip().replace(\" \",\"\")\r\n # Check if input is a number\r\n if not number_text.isdigit():\r\n print(INVALID_NUMBER_MSG)\r\n # Check if input is a valid length\r\n elif len(number_text) < min_length:\r\n print(f\"The number must be at least {min_length} digits long.\")\r\n else:\r\n invalid_number = False\r\n return number_text",
"def validacionEntero(message='Ingrese un numero entero: ', min=0,max=sys.maxsize):\n validacion = False\n while validacion == False:\n try: \n numero = inputNumber('entero',message)\n if numero >= min and numero <= max:\n validacion = True\n return numero\n else: \n print(\"Ingrese una opcion entre {} y {}\".format(min,max))\n except:\n return 'Error, argumento invalido'",
"def getSecretMessage(limit):\n\n\tsecret = None\n\twhile secret == None or len(secret) not in range(1, limit+1):\n\t\tsecret = raw_input(\"Enter the secret message (Max length %d): \" % limit)\n\t\tif len(secret) > limit:\n\t\t\tprint \"Invalid message: too long!\"\n\t\telif len(secret) < 1:\n\t\t\tprint \"Invalid message: empty input!\"\n\n\treturn secret",
"def validate_num_input(self, user_input, max_num) -> bool:\n\n try:\n num = int(user_input)\n if num < 1 or num > max_num:\n raise ValueError(\n f'This should be a number between 1 and {max_num}!')\n except ValueError as e:\n print(f'Invalid data: {e}, please try again.\\n')\n return False\n return True",
"def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid",
"def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))",
"def confirm():\n end_loop = False\n while not end_loop:\n confirmation = input(\"\"\"Would you like to continue with your choice?\n[1] No [2] Yes\nEnter a number please: \"\"\")\n if not confirmation or confirmation.isspace():\n print(\"You have not entered anything!\")\n try_again()\n elif confirmation.isnumeric() == True:\n if 0 < int(confirmation) < 3:\n if int(confirmation) == 1:\n confirmation = False\n return confirmation\n else:\n confirmation = True\n return confirmation\n end_loop = True\n else:\n print(\"You have not entered a valid number. Please enter a number between 1 and 2.\")\n else:\n print(\"Please enter a number only.\")\n try_again()",
"def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)",
"def handling_exceptions():\n while True:\n try:\n int(input(\"Please enter a number\"))\n break\n except ValueError:\n print(\"Oops ! That was no valid number. Try again . . .\")\n return",
"def check_input():\n err_mes = 'Invalid input! Input must not be even and must be greater than 5.'\n while True:\n board_size = int(input('Enter board size: '))\n if not input:\n print(err_mes)\n elif board_size % 2 == 0 or board_size <= 5:\n print(err_mes)\n else:\n break\n return board_size",
"def get_user_number():\n while True:\n try:\n user_number = int(input(\"Choose the number: \"))\n break\n except ValueError:\n print(\"It's not a number\")\n return user_number",
"def clean_input(prompt='Error'): # A special input function that will reject a\r\n # user's input of text when a number is requested -- if no prompt is\r\n # specified in the program, it will display \"Error\"\r\n text = True\r\n phrase = '0'\r\n while text:\r\n phrase = input(prompt + '\\n')\r\n try: # Adapted from an example in the ThinkPython textbook (15.7) -\r\n # Checks whether the input is a number, positive or negative. If\r\n # not, rejects the input and user gets to try again\r\n float(phrase)\r\n text = False\r\n except ValueError:\r\n print(\"Error: Non-Numeric Entry Detected\")\r\n # if phrase.isnumeric(): # Checks for a positive number (negative\r\n # rejected as well as text) - replaced with superior form from textbook\r\n # example\r\n # return float(phrase) # Return the number the user entered\r\n # else:\r\n # print(\"Error: Non-Numeric Entry Detected\")\r\n return float(phrase) # Return the number the user entered\r",
"def input_num(number=0, m_type=False):\r\n\r\n if number == 0:\r\n number = \"\"\r\n while True:\r\n try:\r\n if m_type:\r\n res = float(input(f\"ะะฒะตะดะธัะต ัะธัะปะพ {number}:\"))\r\n else:\r\n res = int(input(f\"ะะฒะตะดะธัะต ัะธัะปะพ {number}:\"))\r\n except ValueError:\r\n print(\"Error! ะญัะพ ะฝะต ัะธัะปะพ, ะฟะพะฟัะพะฑัะนัะต ัะฝะพะฒะฐ.\")\r\n else:\r\n break\r\n return res",
"def force_number(user_input):\n while True:\n try:\n # Checks if value is an interger.\n number = int(input(user_input))\n break\n except ValueError:\n print(\"Please enter a valid number\")\n return number",
"def get_number():\n\n while True:\n user_number_str = input('Digite um nรบmero para saber o seu fatorial: ').strip()\n\n if user_number_str.isnumeric():\n return int(user_number_str)\n else:\n print('Valor invรกlido.')",
"def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function returns True if userInput can be converted to a number and returns False if it cannot. | def CheckNumber(userInput):
try:
float(userInput)
return True
except(ValueError):
return False | [
"def validate_num_input(self, user_input, max_num) -> bool:\n\n try:\n num = int(user_input)\n if num < 1 or num > max_num:\n raise ValueError(\n f'This should be a number between 1 and {max_num}!')\n except ValueError as e:\n print(f'Invalid data: {e}, please try again.\\n')\n return False\n return True",
"def is_number(n):\n try:\n float(n)\n return True\n except ValueError:\n return False",
"def check_if_input_is_int(self):\n try:\n int(self.input)\n except ValueError:\n return False\n else:\n return True",
"def check_one_digit():\n if tokenize_user_input[1].isdigit:\n return True\n else:\n print(\"Please try with one digit number.\")",
"def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False",
"def isnumber(x):\n try:\n float(x)\n return True\n except:\n return False",
"def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False",
"def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False",
"def force_number(user_input):\n while True:\n try:\n # Checks if value is an interger.\n number = int(input(user_input))\n break\n except ValueError:\n print(\"Please enter a valid number\")\n return number",
"def input_isvalid(numstr, target):\n try:\n numstr = int(numstr)\n except ValueError:\n return False\n return numstr - 1 >= 0 and numstr - 1 <= len(target) - 1",
"def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True",
"def _is_number(string: str) -> bool:\n try:\n int(string)\n return True\n except ValueError:\n return False",
"def _is_numeric(some_num):\n try:\n float(some_num)\n return True\n except:\n return False",
"def _check_message_is_number(message):\n try:\n float(message)\n return True\n except ValueError:\n return False",
"def is_number(n):\n return isinstance(n, (int, float))",
"def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))",
"def _is_num(self, s):\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def _is_number(obj):\n try:\n float(obj)\n return True\n except:\n pass\n return False",
"def is_num(self, text) -> bool:\n try:\n int(text)\n return True\n except ValueError:\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function prompts the user for a date using the message variable. User will continue to be prompted until the format is correct. The date format is very specific in the format DD/MM/YYYYY This function will confirm there are the right number of characters, the / are in the right place, the input are numbers, the days are between 1 and 31, the months are between 1 and 12, and the year is between 2000 and 3000 (roll on year 3k bug!) | def DateInput(message):
askAgainMessage = "The date must be in the format DD/MM/YYYY"
keepAsking = True
while keepAsking:
answer = input(message)
# First we check if there are two / by splitting using / and looking
# for 3 items in the returned list.
dateCheck = answer.split(sep="/")
if len(dateCheck) is not 3:
print(askAgainMessage)
else:
# If all is order, we can assign the 3 items to day, month, year
day = dateCheck[0]
month = dateCheck[1]
year = dateCheck[2]
# Next we check each item has the right amount of characters
# and they can all be converted into numbers.
if (len(day) == 2 and len(month) == 2 and len(year) == 4 and
CheckNumber(day) and CheckNumber(month) and
CheckNumber(year)):
day = int(day)
month = int(month)
year = int(year)
if (day > 0 and day < 32 and month > 0 and month < 13 and
year > 2000 and year < 3000):
keepAsking = False
else:
print(askAgainMessage)
else:
print(askAgainMessage)
return answer | [
"def check_date(message, param):\n while True:\n try:\n day, month, year = input(message).split(param)\n return str(datetime.datetime(int(year), int(month), int(day)).strftime(\"%d/%m/%Y\"))\n except ValueError:\n continue",
"def read_date(type_date):\n \n correct_date = False\n while not correct_date :\n correct_type = False\n while not correct_type: \n try:\n d=int(input(\"dia: \"))\n if(d>0 and d<=31):\n dia= str(d)\n dia = dia.rjust(2,\"0\")\n correct_type = True\n else: \n raise \n except:\n print('Entrada invalida, intentelo de nuevo')\n \n correct_type = False \n while not correct_type: \n try:\n m=int(input(\"mes: \"))\n if(m>0 and m<=12):\n mes = str(m)\n mes= mes.rjust(2,\"0\")\n correct_type = True\n else:\n raise\n except:\n print('Entrada invalida, intentelo de nuevo')\n \n \n correct_type = False \n while not correct_type: \n try:\n \n a=int(input(\"ano: \"))\n if(a>0):\n ano = str(a)\n ano= ano.rjust(4,\"0\")\n correct_type = True\n else:\n raise \n except:\n print('Entrada invalida, intentelo de nuevo') \n \n date_aux =ano+\"-\"+mes+\"-\"+dia\n \n if (type_date =='despues'):\n if date.fromisoformat(date_aux) < date.today():\n print('La fecha ingresada es anterior a la fecha actual, intentelo de nuevo.')\n else:\n correct_date = True\n elif (type_date =='antes'):\n \n if date.fromisoformat(date_aux) > date.today():\n print('La fecha ingresada es posterior a la fecha actual, intentelo de nuevo.')\n else:\n correct_date = True\n \n date_aux =dia+\"/\"+mes+\"/\"+ano \n return date_aux",
"def validate_input(date_string):\n #I decided to make sure the input was valid by checking each individual piece. I did this by splitting the input string by the dashes.\n #I checked first that the month value was between 1 and 12. I then checked depending on the month if the day value was valid.\n #I also made sure to check that the year was greater than 1000.\n #For February, I made a specific check for if it was a leap year or not. If the year inputted is not a leap year and the user entered\n #29 as the day value, it throws an error. Finally, once all values are checked and are valid, they are put into a tuple.\n splitdate = date_string.split(\"-\")\n if splitdate[0] != '' and splitdate[1] != '' and splitdate[2] != '':\n if int(splitdate[0]) >= 1 and int(splitdate[0]) <= 12:\n if int(splitdate[0]) == 1 or int(splitdate[0]) == 3 or int(splitdate[0]) == 5 or int(splitdate[0]) == 7 or int(splitdate[0]) == 8 or int(splitdate[0]) == 10 or int(splitdate[0]) == 12:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 31:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 4 or int(splitdate[0]) == 6 or int(splitdate[0]) == 9 or int(splitdate[0]) == 11:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 30:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 2:\n if int(splitdate[2]) % 4 == 0 or int(splitdate[2]) % 1000 == 0:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 29:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[1]) >= 1 and int(splitdate[1]) <= 28:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n return None",
"def ask_for_date(self, year_text, month_text, day_text):\n year = input(year_text)\n month = input(month_text)\n day = input(day_text)\n return f\"{day}/{month}/{year}\"",
"def is_valid_date(date):\n\n try:\n parse(date)\n return date\n except:\n new_date = raw_input(\"Invalid date, try again: YYYY-MM-DD \")\n return is_valid_date(new_date)",
"def validate(date_text): \n try:\n datetime.datetime.strptime(date_text, '%Y-%m-%d')\n #print(date_text)\n except ValueError:\n print('Error')\n raise ValueError('Invalid date input') \n #export_historical_data()\n #raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")",
"def validate_date(input_date):\n\n\t# Date to be in format: DD/MM/YYYY -> len() = 10\n\tif len(input_date) != 10:\n\t\treturn False\n\t\n\t# Parse string for specific date/month/year\n\ttry:\n\t\ttest_date = int(input_date[0:2])\n\t\ttest_month = int(input_date[3:5])\n\t\ttest_year = int(input_date[6:10])\n\texcept ValueError:\n\t\treturn False\n\t\t\n\ttoday = date.today()\n\ttest_date = date(test_year, test_month, test_date)\n\t\n\tdays_difference = test_date - today\n\t\t\n\t# Date can't be before today or beyond 6 months from today\n\tif test_date < today or days_difference.days > 185:\n\t\treturn False\n\t\t\n\treturn True",
"def get_new_dob():\n error_msg = \"Invalid. Please use ddmmyyyy format\\n\\te.g.: 01121990 for 01 Dec 1990\"\n while True:\n dob = input(\"Please enter date of birth for account recovery (ddmmyyyy): \")\n if len(dob) != 8:\n # checks if there are enough characters\n print(error_msg)\n else:\n try:\n # attempts to convert slices into integers\n day = int(dob[0:2])\n month = int(dob[2:4])\n year = int(dob[4:8])\n except:\n print(error_msg)\n continue\n \n if 0 < day <= 31 and 0 < month <= 12 and year < 2020:\n # checks that the day, month, and year contain valid numbers\n return dob\n else:\n print(\"Please enter a valid date.\")",
"def ex8() :\r\n print(\" - Date Calculator - \")\r\n import datetime\r\n today = datetime.date.today()\r\n print(today)\r\n try : #try catch method, in case user enters non-date, or 31st Feb etc.\r\n userDate = input(\"Please enter the date to check in a dd/mm/yy format: \") #userDate is string\r\n userDate = datetime.datetime.strptime(userDate, '%d/%m/%Y').date() #userDate is date_object\r\n if userDate < today : print(\"Invalid input, date is in the past\")\r\n elif userDate == today: print(\"That's today you dum-dum, answer is 0 days.\")\r\n else:\r\n delta = userDate - today #calculate difference\r\n delta = str(delta) #date_object don't work with split only str\r\n delta = delta.split(\",\") #unorthodox method to delete time (0:00:0) from the days\r\n print(\"The number of days between today (\",today,\") and entered date (\",userDate,\") are \",delta[0],\".\")\r\n except ValueError as e :\r\n print(\"Not a valid date.\")",
"def verifying_date_format(input_date):\n\timport datetime\n\n\tinput_date = input_date\n\n\ttry:\n\t\tyear = int(input_date[:4])\n\t\tmonth = int(input_date[5:7])\n\t\tday = int(input_date[-2:])\n\t\tconverted_date = str(datetime.date(year,month,day))\n\t\tcorrect_date = True\n\texcept ValueError:\n\t\tcorrect_date = False\n\n\treturn (correct_date, converted_date)",
"def getdatefromuser():\n date_str = raw_input(\"Enter the date cutoff in mm/dd/yyyy format: \")\n date_parts = re.split('[-/]', date_str)\n return date(*[int(elt) for elt in [date_parts[2], date_parts[0], date_parts[1]]])",
"def getdate(s=\"\"):\n global parseoutput, YearGuess\n nums=re.findall(reuf,s)\n nums=map(float,nums) #Convert from strings to floats\n if len(nums)==2:\n month=0\n #May be broken Canopus UTdate with month name\n s=string.upper(s)\n for i in range(12):\n if string.find(s, months[i])>=0:\n month=i+1\n assert month, \"Only two numbers, and month name not found in '\"+s+\"'\"\n nums=[nums[0],month,nums[1]]\n monthname=1\n else:\n monthname=0\n\n assert len(nums)==3, \"Too many/few numbers in '\"+s+\"'\"\n\n #Now either nums[0] is the year and nums[2] is the day, or vice-versa\n #either way, nums[1] is the month\n\n month=int(nums[1])\n assert (month>=1) and (month<=12), \"Month invalid in '\"+s+\"'\"\n\n if nums[0]>100:\n #If first number is >100, it must be a 4-digit-year, so the third must be a day\n assert (nums[2]>=1) and (nums[2]<=mlen[month-1]) and (nums[0]>1980) and (nums[0]<2050), \"YMD, Day invalid in '\"+s+\"'\"\n day=nums[2]\n year=nums[0]\n if YearGuess:\n if year <> YearGuess:\n parseoutput += \"Best guess at year is \"+`YearGuess`+\", clashes with \"+`year`+\" from \"+s\n else:\n YearGuess = int(year)\n return (year,month,day),1 #confident it's YMD\n\n if nums[2]>100:\n #If third number is >100, it must be a 4-digit-year, so the first must be a day\n assert (nums[0]>=1) and (nums[0]<=mlen[month-1]) and (nums[2]>1980) and (nums[2]<2050), \"DMY, Day invalid in '\"+s+\"'\"\n day=nums[0]\n year=nums[2]\n if YearGuess:\n if year <> YearGuess:\n parseoutput += \"Best guess at year is \"+`YearGuess`+\", clashes with \"+`year`+\" from \"+s\n else:\n YearGuess = int(year)\n return (year,month,day),1 #confident it's DMY\n\n #OK, at this point all three numbers are less than or equal to 100\n\n if nums[0]>50:\n #If first number is >50, it must be a pre-2000 2-digit-year, so the third must be a day\n assert (nums[2] >= 1) and (nums[2] <= mlen[month-1]), \"YMD, Day invalid in '\"+s+\"'\"\n day=nums[2]\n if nums[0]<100:\n year=nums[0]+1900\n else:\n year=nums[0]\n if YearGuess:\n if year <> YearGuess:\n parseoutput += \"Best guess at year is \"+`YearGuess`+\", clashes with \"+`year`+\" from \"+s\n else:\n YearGuess = int(year)\n return (year,month,day),1 #confident it's YMD\n\n if nums[2]>50:\n #If third number is >50, it must be a pre-2000 2-digit-year, so the first must be a day\n assert (nums[0] >= 1) and (nums[0] <= mlen[month-1]), \"DMY, Day invalid in '\"+s+\"'\"\n day=nums[0]\n if nums[2]<100:\n year=nums[2]+1900\n else:\n year=nums[2]\n if YearGuess:\n if year <> YearGuess:\n parseoutput += \"Best guess at year is \"+`YearGuess`+\", clashes with \"+`year`+\" from \"+s\n else:\n YearGuess = int(year)\n return (year,month,day),1 #confident it's DMY\n\n #At this point, all numbers are <=50, so could conceivably be in either order. Try yearguess first\n\n if YearGuess:\n yg = int(str(int(YearGuess))[-2:])\n if (nums[0] == YearGuess) or (nums[0] == yg):\n assert (nums[2] >= 1) and (nums[2] <= mlen[month-1]), \"YMD, Day invalid in '\"+s+\"'\"\n year = YearGuess\n day = nums[2] \n return (year,month,day),1 #confident it's YMD\n elif (nums[2] == YearGuess) or (nums[2] == yg):\n assert (nums[0] >= 1) and (nums[0] <= mlen[month-1]), \"DMY, Day invalid in '\"+s+\"'\"\n year = YearGuess\n day = nums[0] \n return (year,month,day),1 #confident it's DMY\n\n #First and last number are both valid days, so we hope the LARGER one is the day\n\n if ( (nums[2]>nums[0]) and (not monthname) ) or (dateorder=='YMD'):\n assert (nums[2] >= 1) and (nums[2] <= mlen[month-1]), \"guess YMD, Day invalid in '\"+s+\"'\"\n day=nums[2]\n year=nums[0]+2000\n if dateorder == 'YMD':\n return (year,month,day),0.5 #Relatively sure since we have a specified date order\n else:\n print \"Warning - guessing at YMD order for '\"+s+\"'\"\n parseoutput += \"Warning - guessing at YMD order for '\"+s+\"'\\n\"\n return (year,month,day),0 #Only guess it's YMD\n else:\n assert (nums[0] >= 1) and (nums[0] <= mlen[month-1]), \"guess DMY, Day invalid in '\"+s+\"'\"\n day=nums[0]\n year=nums[2]+2000\n if dateorder == 'DMY':\n return (year,month,day),0.5 #Relatively sure since we have a specified date order\n else:\n print \"Warning - guessing at DMY order for '\"+s+\"'\"\n parseoutput += \"Warning - guessing at DMY order for '\"+s+\"'\\n\"\n return (year,month,day),0 #Only guess it's DMY",
"def get_date(prompt,format):\n while True:\n value = input(prompt)\n if (len(value) == 0):\n return None\n else:\n try:\n value = value.rstrip()\n output = datetime.strptime(value, format)\n return output\n except ValueError:\n print(\"Cannot parse time: {0}\\n\".format(value))",
"def generate_calendar():\n\n while True:\n user_cal = input(\"Enter a month and year as numbers, separated by a space: \")\n contains_letters = bool([letter for letter in user_cal if letter in letters])\n\n if contains_letters == True:\n print(\"Invalid input. Try again. \\n\")\n\n else: \n if user_cal == '':\n print(\"Current month:\" , datetime.now().month)\n break\n\n elif len(user_cal) == 1:\n return c.formatmonth(2019, int(user_cal))\n break\n \n elif len(user_cal) > 1:\n list_user_input = user_cal.split(\" \")\n month = int(list_user_input[0])\n year = int(list_user_input[1])\n \n return c.formatmonth(year, month)\n break",
"def date_validate(self, date): # Is it a poem?)\n date = list(map(lambda x: str(x), date))\n if len(date[1]) == 1:\n date[1] = '0' + date[1]\n if len(date[2]) == 1:\n date[2] = '0' + date[2]\n if len(date[3]) == 1:\n date[3] = '0' + date[3]\n if len(date[4]) == 1:\n date[4] = '0' + date[4]\n return \"{}.{}.{} {}:{}\".format(date[2],date[1],date[0], date[3], date[4])",
"def user_input():\n birthday = input(\"What is your birthday (Please format it by mm/dd/yyyy): \")\n year = int(birthday.strip().split(\"/\")[2])\n day = int(birthday.strip().split(\"/\")[1])\n month = int(birthday.strip().split(\"/\")[0])\n return year, month, day",
"def validate_date(ctx, param, value):\n\ttry:\n\t\tdate = datetime.datetime.strptime(value, '%Y-%m-%d').strftime('%d/%m/%Y')\n\t\treturn date\n\texcept ValueError:\n\t\traise click.BadParameter(\"Incorrect format, datetime format should match %Y-%m-%d\")",
"def opt_user_input_date(prompt, value, do_ask_test=None, is_valid_test=None, invalid_msg=None, error_on_none=True, **kwargs):\n do_ask_test = _get_do_ask_fxn(do_ask_test)\n if is_valid_test is None:\n is_valid_test = lambda value: isinstance(value, (dt.date, dt.datetime))\n elif is_valid_test is not None and invalid_msg is None:\n UIErrorWrapper.raise_error(UIValueError('If is_valid_test is given, invalid_msg should be to so that the error '\n 'message matches the test'))\n elif invalid_msg is None:\n invalid_msg = 'The given value must be an instance of datetime.date or datetime.datetime' if invalid_msg is None else invalid_msg\n\n response = _optional_input(user_input_date, prompt, value, do_ask_test, is_valid_test, invalid_msg, **kwargs)\n if response is None and error_on_none:\n UIErrorWrapper.raise_error(UIOptNoneError('The user failed to provide a value'))\n\n return response",
"def get_borndied(summary):\n\n result = re.findall(\"\\d{4}\", summary)\n\n if len(result) >= 2:\n borndied = result[0] + \"-\" + result[1]\n if not raw_input(\"Accept %s? [y]/n: \" % (borndied)):\n return borndied\n\n else:\n borndied = result[0] + \"-\"\n if not raw_input(\"Accept %s? [y]/n: \" % (borndied)):\n return borndied\n\n elif len(result) >= 1:\n borndied = result[0] + \"-\"\n\n resp = raw_input(\"Accept %s? [y]/n: \" % (borndied))\n if not resp:\n return borndied\n\n else:\n return raw_input(\"Enter Dates: \")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
takes first row of tworow belief np array and converts it to dict indexed by label of positive beliefs | def np_to_belief(np_array,labels):
return dict((l,np_array[0,i]) for i,l in enumerate(labels)) | [
"def faces_to_dict(faces):\n label_list = np.unique(faces)\n adj_dict = dict()\n for label in label_list:\n adj_list = list(np.unique(faces[np.where(faces == label)[0]]))\n adj_list.remove(label)\n adj_dict[label] = adj_list\n return adj_dict",
"def label_counts(self):\n d={}\n for item in self._y:\n if item in d:\n d[item] += 1\n else:\n d[item] = 1\n \n #for key,value in d.items():\n # print(str(k)+':'+str(v)) \n \n #easier way to do\n #unique,counts = np.unique(self._y,return_counts=True) \n #d = dict(zip(unique,counts))\n #return dict({1:0, -1:0})\n\n return d",
"def elan_annotation_to_binary(annotation_data):\n label_dict = {}\n for annotation in annotation_data:\n label = 1 if annotation[2] == 'Engaged' else 0\n label_dict[\"{0},{1}\".format(annotation[0], annotation[1])] = label\n return label_dict",
"def MAP_labeling(beliefs):\r\n return np.argmin(beliefs, axis=2)",
"def one_hot_encoding(raw_feats, ohe_dict_broadcast, num_ohe_feats):\n return SparseVector(num_ohe_feats,[(ohe_dict_broadcast.value.get((feat[0],feat[1])),1) for feat in raw_feats])",
"def one_hot_encoding(raw_feats, ohe_dict_broadcast, num_ohe_feats):\n return SparseVector(num_ohe_feats,[(ohe_dict_broadcast.value.get((feat[0],feat[1])),1) for feat in raw_feats if (feat[0],feat[1]) in ohe_dict_broadcast.value])",
"def init_label_dict(num_classes):\n label_dict={}\n for i in range(num_classes):\n label_dict[i]=(0,0,0)\n return label_dict",
"def feature_dict(sent, i):\n # WORK HERE!!\n return {}",
"def one_hot_vocab_encoding(w2vp: W2VPreprocessor \n ) -> Dict[str, np.ndarray]:\n return {\n w: i for i, w in enumerate(w2vp.vocabulary)\n }",
"def coherent_subsequent_states(Infomap_labels):\r\n unique_labels= np.unique(Infomap_labels)\r\n dictionary= {}\r\n for i in range(len(unique_labels)):\r\n label_index=[]\r\n for j in range(len(Infomap_labels)):\r\n if unique_labels[i]==Infomap_labels[j]:\r\n label_index.append(j)\r\n subsequent=groupSequence(label_index)\r\n \r\n dictionary[i]=subsequent\r\n \r\n return dictionary",
"def make_represented_genders(metric_df, label_lang):\n return dict(metric_df[['bias_value', 'bias_label']].drop_duplicates().to_dict('split')['data'])",
"def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}",
"def _get_classify_labels(df):\n labels = np.ones((len(df), 1), dtype=dtype) * 2\n labels[df['A-coref']] = 0\n labels[df['B-coref']] = 1\n return labels",
"def get_label_map(labels):\n label_map = dict()\n for i,v in enumerate(np.ravel(labels.data)):\n if v in label_map.keys():\n label_map.get(v).append(i)\n else:\n label_map[v] = [i]\n return label_map",
"def feature_key_and_substance_value(self, as_labels=False):\n dict_ = {}\n for feature_key, feature_value in self.features().items():\n if as_labels:\n dict_.update({feature_value['label']: feature_value['substance']})\n else:\n dict_.update({feature_key: feature_value['substance']})\n return dict_",
"def encode_ST_labels(labels):\n return np.array([1 if sentiment == 'bullish' else 0 for sentiment in labels])",
"def get_training_labels():\n\n\tmapping = dict()\n\tmapping[constants.ASCause.apsp] = 0\n\tmapping[constants.ASCause.bl] = 1\n\tmapping[constants.ASCause.ce] = 2\n\tmapping[constants.ASCause.dfl] = 3\n\tmapping[constants.ASCause.lrssi] = 4\n\tmapping[constants.ASCause.pwr_state] = 5\n\treturn mapping",
"def labels_dict(self):\n return dict(zip(self.labels, self.gens))",
"def onehot_encoding(labels):\n #dataframe = self.cf\n unique = np.unique(labels).tolist()\n one_hot_labels = np.zeros((len(labels), len(unique)))\n for i in range(len(labels)):\n #print(cf.iloc[i,loc])\n pitch = labels[i]\n ind = unique.index(pitch)\n one_hot_labels[i, ind] = 1\n return one_hot_labels, unique"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
takes a list of votes and predicts based on threshold returns true iff fraction of true votes >= f | def thresh_vote(lst, f):
if len(lst) == 0: # guess 0 by default (appropriate for our dataset)
q = 0
else:
q = float(sum(lst)) / len(lst)
return q >= f | [
"def _fit_threshold(self):\n self.threshold = 0\n current_best = 0\n for i in range(1000):\n old = self.threshold\n self.threshold = i/1000\n f = f1_score(self.y, self.predict(self.pred, self.X_text))\n if f <= current_best:\n self.threshold = old\n else:\n current_best = f",
"def returnPreds(preds, threshold):\n predicted_labels = []\n for pred in preds:\n if pred >= threshold:\n predicted_labels.append(1)\n else:\n predicted_labels.append(0)\n\n return predicted_labels",
"def ensemble_vote(int_img, classifiers):\n return 1 if sum([c.get_vote(int_img) for c in classifiers]) >= 0 else 0",
"def classify(self, predictions_list, threshold, predicates):\n classifications = []\n\n for i in range(len(predictions_list)):\n if predictions_list[i][0] >= threshold[predicates[i]]:\n classifications.append(1)\n else:\n classifications.append(0)\n\n return classifications",
"def predictRating(toPredict, candidateList):\n\n ratingRelevantCandidates = []\n\n #Remove candidates with no rating specified\n for candidate in candidateList:\n currentCandidate = candidate[1]\n\n if float(currentCandidate['vote_avg']) > 0:\n ratingRelevantCandidates.append((float(currentCandidate['vote_avg']), candidate))\n\n #print(\"ratings::::::::\",currentCandidate['vote_avg'])\n\n #Remove outlier candidates based on rating\n ratingMean = np.mean([x[0] for x in ratingRelevantCandidates])\n print(\"ratingMean\", ratingMean)\n ratingSD = np.std([x[0] for x in ratingRelevantCandidates])\n print(\"ratingSD\", ratingSD)\n\n finalRatings = [x for x in ratingRelevantCandidates if (float(x[0]) < ratingMean + ratingSD)]#1.5 *\n finalRatings = [x for x in finalRatings if (float(x[0]) > ratingMean - ratingSD)]#.75 *\n\n finalRatingCandidatesWithWeight = []\n\n #Weight each candidate based on vote count, direct and actor popularity and matching score from part 1\n for candidate in finalRatings:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n voteCountPoints = int(candidate[1][1]['vote_count'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRatings]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints \\\n + PREDICTION_VOTECOUNT_WEIGHT * voteCountPoints\n\n finalRatingCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRatingCandidateWeights = np.sum([float(x[0]) for x in finalRatingCandidatesWithWeight])\n sumRatingTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRatingCandidatesWithWeight])\n\n ratingPrediction = float(sumRatingTimesCandidateWeight / sumRatingCandidateWeights)\n\n return ratingPrediction",
"def adjusted_classes(pred_prob, threshold):\n return [1 if y >= threshold else 0 for y in pred_prob]",
"def apply_threshold(pred_prob_labels, threshold):\n\n return list(map(lambda pred: 1 if pred >= threshold else 0,\n pred_prob_labels))",
"def thresh_pred(in_pred: np.ndarray, thresh_pred: float = .5):\n out_pred = in_pred.copy()\n out_pred[in_pred >= thresh_pred] = 1\n out_pred[in_pred < thresh_pred] = 0\n\n return out_pred",
"def evaluate(labels, predictions):\n positive_count = 0\n positive = 0\n negative_count = 0\n negative = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n positive_count+=1\n if predictions[i] == 1:\n positive +=1\n else:\n negative_count+=1\n if predictions[i] == 0:\n negative +=1\n\n sensitivity = positive / positive_count\n specificity = negative / negative_count\n\n return (sensitivity, specificity)",
"def threshold_predictions(predictions, thr=0.5):\n thresholded_preds = np.copy(predictions)[:]\n low_values_indices = thresholded_preds < thr\n thresholded_preds[low_values_indices] = 0\n low_values_indices = thresholded_preds >= thr\n thresholded_preds[low_values_indices] = 1\n return thresholded_preds.astype(np.int)",
"def adjust_predictions_by_threshold(y_predprob,threshold = 0.5,meta = None):\n\n # we want to achieve a high recall with the first build_model -> lower threshold\n if meta is None:\n\n # adjust the threshold\n threshold = threshold #- 0.4\n\n # safety:\n if threshold <= 0.2:\n\n threshold = 0.2\n\n print(f\"The new adjusted threshold in order to gain high recall is: {round(threshold, 2)}.\")\n print(\"-\" * 10)\n\n else:\n pass\n\n # empty list to save the predictions\n predictions = []\n\n # loop through predictions\n for x in y_predprob:\n\n # if probability for positive return bigger then ceratin threshold declare return as positive\n if x[1] > threshold:\n\n return_ = 1\n\n else:\n\n if meta is None:\n return_ = -1\n\n else:\n return_ = 0\n\n # append to list\n predictions.append(return_)\n\n return predictions",
"def _get_one_prediction(threshold, value):\n if value < threshold[0]:\n return 0\n elif threshold[0] <= value < threshold[1]:\n return 1\n elif threshold[1] <= value < threshold[2]:\n return 2\n elif threshold[2] <= value < threshold[3]:\n return 3\n else:\n return 4",
"def far_score(target: torch.Tensor, preds: torch.Tensor, threshold: float = 0.5) -> float:\n\ttarget = target.cpu().detach().numpy()\n\tpreds = (preds.cpu().detach().numpy() > threshold).astype(int)\n\ttn, fp, fn, tp = confusion_matrix(target, preds).ravel()\n\treturn fp / (fp + tn)",
"def accuracy(targets: List[int], preds: Union[List[float], List[List[float]]], \n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)",
"def apply_threshold(probs, thred):\n preds = [1 if p >= thred else -1 for p in probs]\n return np.array(preds)",
"def SoftVotingClassifier(probabilities1, probabilities2, probabilities3):\n probabilities1 = np.array(probabilities1)\n probabilities2 = np.array(probabilities2)\n probabilities3 = np.array(probabilities3)\n preds_ensemble=[]\n for i in range(probabilities1.shape[0]): #aquรญ et vindrร , per cada imatge, un vector\n list_probs=[]\n for j in range(probabilities1.shape[1]):\n list_probs.append((probabilities1[i][j]+probabilities2[i][j]+probabilities3[i][j])/3)\n print('list_probs',list_probs)\n preds_ensemble.append(list_probs.index(max(list_probs))) #et torna la posiciรณ del mร xim, s'ha de comprovar que vagi de 0 a 6\n\n return preds_ensemble",
"def accuracy(targets: List[int],\n preds: Union[List[float], List[List[float]]],\n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)",
"def specificity(\n targets: List[int], preds: List[float], threshold: float = 0.5\n) -> float:\n hard_preds = [1 if p > threshold else 0 for p in preds]\n tn, fp, _, _ = confusion_matrix(targets, hard_preds).ravel()\n return tn / float(tn + fp)",
"def ensembleVote(x, classes, ensemble):\n votes = np.array([0 for kk in range(len(classes))])\n for i in ensemble:\n votes = votes + classProbs(x, ensemble[i][\"tree\"], classes)\n maxVote = 0\n loc = None\n for ind, vote in enumerate(votes):\n if vote > maxVote:\n maxVote = vote\n loc = ind\n prediction = classes[loc]\n return prediction"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes dictionaries of predicted and ground truth and returns confusion matrix | def confusion_matrix(predicted, gt):
tp = [k for k in predicted if predicted[k] and gt[k]]
tn = [k for k in predicted if not predicted[k] and not gt[k]]
fp = [k for k in predicted if predicted[k] and not gt[k]]
fn = [k for k in predicted if not predicted [k] and gt[k]]
return tp, tn, fp, fn | [
"def confusion_matrix(actual, predictions):\n\n if predictions.shape[0] != actual.shape[0]:\n raise ValueError(\"predictions and actual must be the same length!\")\n\n confuse_matrix = np.random.uniform(0, 10, size=(2, 2))\n # True Positive (TP): we predict a label of 1 (positive), and the true label is 1.\n TP = np.sum(np.logical_and(predictions == 1, actual == 1))\n\n # True Negative (TN): we predict a label of 0 (negative), and the true label is 0.\n TN = np.sum(np.logical_and(predictions == 0, actual == 0))\n\n # False Positive (FP): we predict a label of 1 (positive), but the true label is 0.\n FP = np.sum(np.logical_and(predictions == 1, actual == 0))\n\n # False Negative (FN): we predict a label of 0 (negative), but the true label is 1.\n FN = np.sum(np.logical_and(predictions == 0, actual == 1))\n\n confuse_matrix[0, 0] = TN\n confuse_matrix[0, 1] = FP\n confuse_matrix[1, 0] = FN\n confuse_matrix[1, 1] = TP\n\n return confuse_matrix",
"def confusion_matrix(\n pred: np.ndarray, gt: np.ndarray, num_classes: int\n) -> np.ndarray:\n # row: ground truth\n # column: prediction\n coding = gt * num_classes + pred\n counting = np.bincount(coding, minlength=num_classes * num_classes)\n return counting.reshape(num_classes, num_classes)",
"def confusion_matrix(prediction, truth):\n\n confusion_vector = prediction / truth\n true_positives = torch.sum(confusion_vector == 1).item()\n false_positives = torch.sum(confusion_vector == float('inf')).item()\n true_negatives = torch.sum(torch.isnan(confusion_vector)).item()\n false_negatives = torch.sum(confusion_vector == 0).item()\n\n return true_positives, false_positives, true_negatives, false_negatives",
"def get_confusion_matrix(predictions, y):\n matrix = [0,0,0,0]\n #iterate predictions and compare with y data\n for i in range(len(predictions)):\n if(predictions[i] == 1 and y[i] == 1): #true positive\n matrix[0] += 1\n elif(predictions[i] == 0 and y[i] == 0): #true negative\n matrix[1] += 1\n elif(predictions[i] == 1 and y[i] == 0): #false positive\n matrix[2] += 1\n elif(predictions[i] == 0 and y[i] == 1): #false negative\n matrix[3] += 1\n return matrix",
"def Confusion_Matrix(predicted_labels: list, actual_labels: list):\n labels = set(actual_labels)\n\n predicted_labels = list(map(custom_round, predicted_labels))\n\n matrix = pd.DataFrame(index=labels, columns=labels)\n\n matrix = matrix.fillna(0)\n\n for i in range(len(actual_labels)):\n matrix[actual_labels[i]][predicted_labels[i]] += 1\n m = matrix.values\n\n plt.matshow(m, cmap=plt.cm.Blues)\n\n for i in range(2):\n for j in range(2):\n c = m[j, i]\n plt.text(i, j, str(c), va='center', ha='center')\n\n plt.show()",
"def confusion_matrix(expected, predicted):\n\n retval = numpy.zeros((10,10), dtype=float)\n\n for k in range(10):\n pred_k = predicted[expected==k] # predictions that are supposed to be 'k'\n retval[:,k] = numpy.array([len(pred_k[pred_k==p]) for p in range(10)])\n retval[:,k] /= len(pred_k)\n\n return retval",
"def confusion_mat(model: keras.Model, evaluation_data: np.ndarray, evaluation_labels: np.ndarray):\n pred = np.zeros([1,5])\n # let the model predict the output for the data\n for d in evaluation_data:\n pred = np.concatenate((pred, model.predict(np.expand_dims(d,axis=0))), axis=0)\n\n # print(pred[1:,...])\n # print(evaluation_labels)\n # compute the confusion matrix\n mat = sklearn.metrics.confusion_matrix(evaluation_labels, pred[1:,...].argmax(axis=1)+1)\n\n return mat/mat.sum(axis=1, keepdims=True)",
"def _confusion_matrix(self, y_true, y_pre):\n return confusion_matrix(y_true, y_pre)",
"def Evaluate_Prediction(prediction_mask, true_mask, feature_dict, \n test_name = 'Test'):\n \n # true_mask has 3 layers but they are redundant\n true_mask = true_mask[:,:,0]\n \n # Convert from Prob to 0,1,2...\n prediction_mask = prediction_mask.argmax(axis = 2) + 1 \n\n # Compute confusion matrix -- subtract 1 so that first label is \"0\" \n conf = custom_confusion_matrix(prediction_mask.flatten(), true_mask.flatten(), feature_dict)\n \n # Convert mask to proper shape for loss function - shape should have 4 dimensions with one-hot encoding\n true_mask = Expand_Mask(mask = true_mask, num_class = len(feature_dict)) ## to 0,1\n true_mask = np.expand_dims(true_mask, axis=0)\n true_mask = true_mask.astype(np.float)\n\n # Convert prediction into proper shape for loss function\n prediction_mask = Expand_Mask(mask = prediction_mask, num_class = len(feature_dict)) #to 0,1\n prediction_mask = np.expand_dims(prediction_mask, axis=0) \n prediction_mask = prediction_mask.astype(np.float)\n \n score = {'Test':test_name, \n 'Dice':Dice_Coef_Multilabel(true_mask, prediction_mask).numpy(), \n 'Accuracy':np.mean(tf.metrics.categorical_accuracy(true_mask, prediction_mask)), \n 'CE':np.mean(tf.metrics.categorical_crossentropy(true_mask, prediction_mask))}\n \n return [score, conf]",
"def confusion_matrix(pred, gt, thres=0.5):\n TP = np.sum((gt == 1) & (pred > thres))\n FP = np.sum((gt == 0) & (pred > thres))\n TN = np.sum((gt == 0) & (pred <= thres))\n FN = np.sum((gt == 1) & (pred <= thres))\n return (TP, FP, TN, FN)",
"def confusion_matrix(\n true_labels,\n predicted_labels\n ) -> np.array:\n n_samples_true, n_samples_predicted = len(true_labels), len(predicted_labels)\n if n_samples_true != n_samples_predicted:\n raise ValueError()\n n_classes = len(set(true_labels))\n matrix = np.zeros((n_classes,n_classes))\n for i in range(len(true_labels)):\n true_label = true_labels[i]\n predicted_label = predicted_labels[i]\n matrix[predicted_label][true_label] += 1\n return matrix",
"def radio_confusion_matrix(ground_truth: Radio,\n prediction: Radio) -> ConfusionMatrixMetricValue:\n key = get_identifying_key([prediction.answer], [ground_truth.answer])\n prediction_id = getattr(prediction.answer, key)\n ground_truth_id = getattr(ground_truth.answer, key)\n return [1, 0, 0, 0] if prediction_id == ground_truth_id else [0, 1, 0, 1]",
"def make_metrics(self, predictions):\n\n pred_idx = []\n pred_classes = []\n\n target_idx = []\n target_classes = []\n target_count = len(self._dataset.class_idx2text)\n\n for data_id, pred in predictions.items():\n target = self._dataset.get_ground_truth(data_id)\n\n pred_idx.append(pred[\"class_idx\"])\n pred_classes.append(self._dataset.class_idx2text[pred[\"class_idx\"]])\n\n target_idx.append(target[\"class_idx\"])\n target_classes.append(target[\"class_text\"])\n\n metrics = {\n \"accuracy\": simple_accuracy(pred_idx, target_idx),\n }\n\n if target_count == 2:\n # binary class\n f1_metric = f1(pred_idx, target_idx)\n metrics.update(f1_metric)\n\n matthews_corr_metric = matthews_corr(pred_idx, target_idx)\n metrics.update(matthews_corr_metric)\n return metrics",
"def confusion_matrix(df):\n rows, true_counts = np.unique(df[\"label\"].values, return_counts=True)\n cols, predicted_counts = np.unique(df[\"label\"].values, return_counts=True)\n\n matrix = np.ndarray(shape=(len(rows), len(cols)), dtype=float)\n for ri, row in enumerate(rows):\n for ci, col in enumerate(cols):\n matrix[ri][ci] = len(df[(df.label == row) & (df.classification == col)])\n\n return matrix, rows, cols",
"def confusionMatrix(model, x_test, y_true):\n\n if model == None:\n _confusion_matrix = confusion_matrix(y_true, x_test).ravel()\n else:\n _confusion_matrix = confusion_matrix(y_true, model.predict(x_test)).ravel()\n\n return _confusion_matrix",
"def create_confusion_matrix(actual, predicted, category):\n conf_matrix = dict()\n conf_matrix['TP'], conf_matrix['FP'], conf_matrix['TN'], conf_matrix['FN'] = 0, 0, 0, 0\n\n print('The category is: {}'.format(category))\n for sentence in predicted:\n if sentence in actual[predicted[sentence]] and predicted[sentence] == category:\n print('TP: Actual: {}, Predicted: {}'.format(category, category))\n conf_matrix['TP'] += 1\n elif sentence in actual[predicted[sentence]]:\n print('TN: Actual: not category, Predicted: not category'.format(predicted[sentence]))\n conf_matrix['TN'] += 1\n elif sentence not in actual[predicted[sentence]] and predicted[sentence] == category:\n print('FP: Actual: not category, Predicted: {}'.format(category))\n conf_matrix['FP'] += 1\n else:\n print('FN: Actual: {}, Predicted: {}'.format(category, predicted[sentence]))\n conf_matrix['FN'] += 1\n\n return conf_matrix",
"def confusion_matrix(y_true, y_pred):\n skplt.plot_confusion_matrix(y_true, y_pred, normalize=True)\n plt.show()",
"def _prep_confusion_matrix(self, y_test, y_pred, labels):\n\n # Calculate confusion matrix and flatten it to a simple array\n if len(y_test.shape) == 1:\n confusion_array = metrics.confusion_matrix(y_test, y_pred).ravel()\n\n # Structure into a DataFrame suitable for Qlik\n result = []\n i = 0\n for t in labels:\n for p in labels:\n result.append([str(t), str(p), confusion_array[i]])\n i = i + 1\n self.model.confusion_matrix = pd.DataFrame(result, columns=[\"true_label\", \"pred_label\", \"count\"])\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)\n # Handle confusion matrix format for multi-label classification\n else:\n confusion_array = metrics.multilabel_confusion_matrix(y_test, y_pred)\n result = pd.DataFrame(confusion_array.reshape(-1, 4), columns=[\"true_negative\", \"false_positive\", \"false_negative\", \"true_positive\"])\n self.model.confusion_matrix = pd.DataFrame(np.arange(len(confusion_array)), columns=[\"step\"])\n self.model.confusion_matrix = pd.concat([self.model.confusion_matrix, result], axis=1)\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)",
"def get_confusion_matrix_elements(groundtruth_list, predicted_list):\n _assert_valid_lists(groundtruth_list, predicted_list)\n\n if _all_class_1_predicted_as_class_1(groundtruth_list, predicted_list) is True:\n tn, fp, fn, tp = 0, 0, 0, np.float64(len(groundtruth_list))\n\n elif _all_class_0_predicted_as_class_0(groundtruth_list, predicted_list) is True:\n tn, fp, fn, tp = np.float64(len(groundtruth_list)), 0, 0, 0\n\n else:\n tn, fp, fn, tp = sklearn.metrics.confusion_matrix(groundtruth_list, predicted_list).ravel()\n tn, fp, fn, tp = np.float64(tn), np.float64(fp), np.float64(fn), np.float64(tp)\n\n return tn, fp, fn, tp"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns argmax, max of dictionary | def argmax(d):
return max(d.iteritems(), key=operator.itemgetter(1)) | [
"def keywithmaxval(d):\n\treturn max(d, key=lambda k: d[k])",
"def max(self):\n try:\n res = {\n 'target': self.target.max(),\n 'params': dict(\n zip(self.keys, self.params[self.target.argmax()])\n )\n }\n except ValueError:\n res = {}\n return res",
"def keyMax(d): \n\tv=list(d.values())\n\tk=list(d.keys())\n\treturn k[v.index(max(v))]",
"def keyWithMaxVal(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]",
"def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]",
"def keywithmaxval(d):\n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]",
"def argmax(self, values: pdarray) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"argmax\")\n return k, cast(pdarray, v)",
"def _max_key(self, data):\n dict_max = max([key for sub_data in data.values()\n for key in sub_data])\n key_max = self.x_max\n if dict_max > self.x_max:\n key_max = dict_max\n return key_max",
"def most_occured(dict):\n\n max = dict['e']\n max_alpha = 'e'\n\n for i, j in zip(dict.values(), dict.keys()):\n\n if max < i:\n max = i\n max_alpha = j\n \n return max_alpha",
"def max_in_dic(value_as_number_dic):\n\n is_init = False\n max_val = 0\n max_key = -1\n for (key, val) in value_as_number_dic.items():\n if not is_init:\n max_val = val\n max_key = key\n is_init = True\n elif val > max_val:\n max_val = val\n max_key = key\n return max_key, max_val",
"def get_max(prob_matrix):\n maxi = 0\n max_guard = \"\"\n max_exit = \"\"\n for (guard, exits_dic) in prob_matrix.items():\n new_maxi = max(maxi, max(exits_dic.values()))\n if new_maxi > maxi:\n maxi = new_maxi\n max_guard = guard\n max_exit = max(exits_dic, key=exits_dic.get)\n\n return (maxi, max_guard, max_exit)",
"def data_dict_max(data_dict, feature):\n name = max(filter(lambda k: isinstance(data_dict[k][feature],\n (int, float)), data_dict), key=lambda k: data_dict[k][feature])\n\n return name, data_dict[name][feature]",
"def argmax(self, evidence={}):\n if len(evidence)==0:\n return self.v.ind2sub(self.t.argmax())\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n return self.v.ind2sub( self.t[ax].argmax() )",
"def key_of_max(d):\n keys = list(d.keys())\n keys.sort()\n return max(keys, key=lambda x: d[x])",
"def max_map(freq_map):\n\n max_val = max(freq_map.values())\n return max_val",
"def maximumLikelihood(self):\n _, val = max((prob, val) for val, prob in self.items())\n return val",
"def get_max_power(data_dic):\n print max(data_dic, key=data_dic.get), max(data_dic.values())",
"def argmax(self) -> int:\n return self.actuator_values.index(self.max)",
"def max_argmax(self, iterable):\r\n\t\treturn max(enumerate(iterable), key=lambda x: x[1])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Produce nboot bootstrap samples from applying func to data | def bootstrap(data,func,nboot):
n = len(data)
resamples = np.array([[random.choice(data) for i in range(n)]
for j in range(nboot)])
return np.apply_along_axis(func, 1, resamples) | [
"def bootstrap_replicate_1d(data, func):\r\n bs_sample = np.random.choice(data, len(data))\r\n return func(bs_sample)",
"def bootstrap_replicate_1d(data, func):\n bs_sample = np.random.choice(data, len(data))\n return func(bs_sample)",
"def bootstrap_replicate_1d(data, func):\n bs_sample = np.random.choice(data, len(data))\n return func(bs_sample)",
"def bootstrap_statistic(data, stats_fn, num_samples):\n return [stats_fn(bootstrap_sample(data))\n for _ in range(num_samples)]",
"def bootstrap(dataset, n=20):\n\n # load in file\n x = read_pproc_dataset(dataset)\n\n bootstrap_samples = []\n data = np.asarray(list(docbin_to_docs(x)), dtype=object)\n length = len(data)\n for i in range(n):\n bootstrap_samples.append(np.random.choice(data, replace=True, size=length))\n\n return bootstrap_samples",
"def bootstrap_sample(data):\n return [random.choice(data) for _ in data]",
"def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))",
"def standard_bootstrap(dataset):\n randseed=np.random.randint(0,10000)\n np.random.seed(randseed)\n \n n = dataset.shape[0]\n b = np.random.randint(0, high=n-1, size=n)\n return dataset[b]",
"def bootstrap_replicate_1d(data, func):\n\treturn func(np.random.choice(data, size=len(data)))",
"def bootstrap(f, vs, num, seed=None):\n\n resamples = samplebootstrap(vs, num, seed)\n boot = evalbootstrap(f, resamples, num)\n\n return boot, resamples",
"def dataset_augmentation(data_start, bootstrapping = 1, epurate = 1, shuffle = True):\n data = data_start\n for ii in range(bootstrapping):\n data = data.append(data_start.apply(bootstrap_sample, axis=1), ignore_index=True)\n\n#Bugged version that weirdly works well....\n# for ii in range(bootstrapping):\n # data = data.append(bootstrap_sample(data_start), ignore_index=True)\n\n for ii in range(epurate):\n data = data.append(data_start.apply(epurate_sample, axis=1), ignore_index=True)\n\n # Shuffling (Important)\n if shuffle == True:\n data = data.sample(frac=1)\n return data",
"def bootstrap_resample(X, n_boots=1000):\n return np.vstack(\n [X[np.floor(np.random.rand(len(X)) * len(X)).astype(int)] for ii in np.arange(n_boots)]).T",
"def bootstrap_sample(data: List[X]) -> List[X]:\n\treturn [random.choice(data) for _ in data]",
"def bootstrap_statistic(data: List[X],\n stats_fn: Callable[[List[X]], Stat],\n num_samples: int) -> List[Stat]:\n\treturn [stats_fn(bootstrap_sample(data)) for _ in range(num_samples)]",
"def bootstrap_statistic(data: List[X],\r\n stats_fn: Callable[[List[X]], Stat],\r\n num_samples: int) -> List[Stat]:\r\n return [stats_fn(bootstrap_sample(data)) for _ in range(num_samples)]",
"def bootstrap_train(model, X, y, bootstraps=1000, **kwargs):\n bootstrap_models = []\n for i in range(bootstraps):\n boot_idxs = np.random.choice(X.shape[0], size=X.shape[0], replace=True)\n X_boot = X[boot_idxs, :]\n y_boot = y[boot_idxs]\n M = model(**kwargs)\n M.fit(X_boot, y_boot)\n bootstrap_models.append(M)\n return bootstrap_models",
"def bootstrap_sample(test_x, test_y, model, n):\n aucs = []\n for sample in range(n):\n ind_pos = np.where(test_y.values > 0)\n ind_neg = np.where(test_y.values <= 0)\n pos_x = test_x[ind_pos[0], ]\n neg_x = test_x[ind_neg[0], ]\n pos_y = test_y.iloc[ind_pos[0]]\n neg_y = test_y.iloc[ind_neg[0]]\n resampled_pos_x, resampled_pos_y = resample(pos_x, pos_y)\n resampled_neg_x, resampled_neg_y = resample(neg_x, neg_y)\n resampled_x = scipy.sparse.vstack((resampled_pos_x, resampled_neg_x))\n resampled_y = pd.concat((resampled_pos_y, resampled_neg_y), axis=0)\n probs = model.predict_proba(resampled_x)\n aucs.append(roc_auc_score(resampled_y.replace(\n to_replace=-1, value=0), probs[:, 1]))\n # Return 95% confidence interval\n CI = (np.percentile(aucs, 2.5), np.percentile(aucs, 97.5))\n return CI",
"def generate_samples(self, n_samples):",
"def bootstrap(series, func=statistics.mean, confidence=0.9):\n n = len(series)\n n_bootstrap = 250\n digests = []\n for j in range(n_bootstrap):\n bootstrap_sample = [\n random.choice(series)\n for _ in range(n)\n ]\n digest = func(bootstrap_sample)\n digests.append(digest)\n digests.sort()\n low, mid, high = (1.0-confidence)/2.0, 0.5, (1.0+confidence)/2.0\n low, mid, high = int(low*n_bootstrap), int(mid*n_bootstrap), int(high*n_bootstrap)\n return digests[low], digests[mid], digests[high]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
trace finds the line, the filename and error message and returns it to the user | def trace():
import traceback
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, __file__, synerror | [
"def trace():\n import traceback, inspect,sys\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror",
"def trace():\n import inspect\n import traceback\n import sys\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n # script name + line number\n line = tbinfo.split(\", \")[1]\n filename = inspect.getfile(inspect.currentframe())\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror",
"def trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror",
"def process_stack_trace(trace):\n trace = str(trace)\n str_list = trace.splitlines()\n # Filter out bad re\n for expr in config.LINE_FILTERS:\n str_list = list(filter(lambda s: not expr.search(s), str_list)) # pylint: disable=cell-var-from-loop\n # Ensure the re matches\n for expr in config.LINE_MATCHES:\n str_list = list(filter(expr.search, str_list))\n # We only \"care\" about the message following the first ':' I.E.\n str_list = [word[word.find(':') + 1:] for word in str_list]\n msg = '\\n'.join(str_list)\n return msg",
"def debugger_backtrace():",
"def _ecl_trace(self, *args):\n if erract.trace:\n s = \"\"\n for a in args:\n s += str(a) + \" \"\n sys.stderr.write(s + \"\\n\")\n sys.stderr.flush()",
"def log_trace(errmsg=None):\n from traceback import format_exc\n from twisted.python import log\n print errmsg\n\n tracestring = format_exc()\n if tracestring:\n for line in tracestring.splitlines():\n log.msg('[::] %s' % line) \n if errmsg:\n try:\n errmsg = to_str(errmsg)\n except Exception, e:\n errmsg = str(e)\n for line in errmsg.splitlines():\n log.msg('[EE] %s' % line)",
"def traceback(level=0):\n string = 'Traceback:\\n'\n while not (func_name(level=level) == '<module>'):\n string += line_no(level=level+1)+', '+func_name(level=level+1)+', '+module_name(level=level+1)+'\\n'\n level += 1\n return string.rstrip()",
"def gettrace(): # real signature unknown; restored from __doc__\n pass",
"def _describe_frame(frame):\n filename, name = frame.f_code.co_filename, frame.f_code.co_name\n lineno = frame.f_lineno\n\n with open(filename) as f:\n for no, line in enumerate(f):\n if no + 1 == lineno:\n break\n\n return ' File \"%s\", line %d, in %s\\n %s\\n' % (filename, lineno, name,\n line.strip())",
"def get_trace(self):\n tblist = traceback.extract_tb(sys.exc_info()[2])\n tblist = filter(self.__filter_not_pexpect, tblist)\n tblist = traceback.format_list(tblist)\n return ''.join(tblist)",
"def trace_on_err():\n type, value, tb = sys.exc_info()\n traceback.print_exc()\n last_frame = lambda tb=tb: last_frame(tb.tb_next) if tb.tb_next else tb\n frame = last_frame().tb_frame\n ns = dict(frame.f_globals)\n ns.update(frame.f_locals)\n code.interact(local=ns)",
"def GetTraceback():\n#-------------------------------------------------------------------------------\n return traceback.format_exc()",
"def showtraceback(self):\n try:\n import traceback\n if sys.exc_info()[2] is not None:\n exc_info = traceback.extract_tb(sys.exc_info()[2])\n txt = 'Unexpected Error:'\n txt = txt + '\\n Type : '+str(sys.exc_type) \n if sys.exc_info()[1] is None: \n txt = txt + '\\n Description: '+ 'Undefined\\n'\n\n else: \n txt = txt + '\\n Description: '+str(sys.exc_info()[1]) + '\\n'\n\n if len(exc_info) > 2:\n # Ignore the first 2 tuples- in the context of the interpreter,\n # these are just the console and the exec statement and are\n # irrelevant\n txt = txt + '\\n Traceback:\\n'\n for ctuple in exc_info[2:]:\n txt = txt + '\\n file : '+os.path.basename(ctuple[0])\n txt = txt + '\\n line number: '+str(ctuple[1])\n txt = txt + '\\n function : '+str(ctuple[2])\n txt = txt + '\\n line : '+str(ctuple[3])+'\\n'\n\n else:\n txt = 'Unexpected Error:'\n txt = txt + '\\n No description available.\\n'\n except:\n txt = 'Unexpected Error:'\n txt = txt + '\\n No description available.\\n'\n\n self.write(txt)\n\n # Status of last line pushed via toplevel shell's echo function\n self.last_err = 1",
"def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack",
"def print_exception(sanitize: bool) -> None:\n if sanitize:\n tb = traceback.TracebackException(*sys.exc_info())\n if tb.exc_type == InstrProcessingFailure and tb.__cause__:\n tb = tb.__cause__\n for frame in tb.stack:\n frame.lineno = 0\n frame.filename = Path(frame.filename).name\n for line in tb.format(chain=False):\n print(line, end=\"\")\n else:\n traceback.print_exc(file=sys.stdout)",
"def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe",
"def _trace_apply(self, chunkstr, verbose):\n ...",
"def trace(msg):\n if debug_level >= 2:\n print msg"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
In this function, you will instantiate several times, given the data provided. Then, you will open "sh_additional_info.csv" and for each line in that file, perform an operation using one of the methods of one of your classes. Follow the commented instructions in this main() function. Refer to Problem Set 07 README.md for instructions and tips. | def main():
# Refer to Problem Set 07 README.md for instructions and tips.
# 6.1: Read in < sh_basic_info.csv >
basic_info = read_csv_file('sh_basic_info.csv')
# 6.2: Create instances of < SuperHeroine >
heroines = {}
for hero in basic_info:
heroines[hero['name']] = SuperHeroine(hero['name'], hero['full_name'], hero['team'],
hero['eye_color'], hero['hair_color'], hero['base'])
print(heroines)
# 6.3: Read in < sh_additional_info.csv >
additional_info = read_csv_file('sh_additional_info.csv')
# 6.4: Add powers and nemesis
for row in additional_info:
name = row["Heroine Name"]
instance_affected = heroines[name]
how_affected = row["Category"]
value = row['Value']
if how_affected == 'power':
instance_affected.add_power(value)
else:
instance_affected.add_nemesis(value)
# 6.5: Write to file
write_to_file('storm.txt',heroines['Storm'])
write_to_file('scarlet_witch.txt',heroines['Scarlet Witch'])
write_to_file('jessica_jones.txt',heroines['Jessica Jones']) | [
"def main():\n\n scenario = 2\n verbose = 3\n\n ### Generic for all scenario - Data Pre processing -\n ### Removal of ['neutrophil', 'serumLevelsOfWhiteBloodCell', 'lymphocytes'] due to the significant lack of information.\n data = PreProcess(\"./data.csv\", ['neutrophil', 'serumLevelsOfWhiteBloodCell', 'lymphocytes'])\n data.loadDataset()\n data.cleanDataAttributes()\n data.labelEncodings()\n\n if scenario == 1:\n scenario_1(data, verbose)\n elif scenario == 2:\n scenario_2(data, verbose)\n elif scenario == 3:\n scenario_3(data)\n elif scenario == 4:\n scenario_4(data)\n elif scenario == 5:\n scenario_5(data, verbose)\n elif scenario == 6:\n scenario_6(data)\n else:\n help(main)",
"def main():\n # Load in original data\n origin_data = pd.read_csv('/Users/apple/Desktop/CSE_163/cse163_project/'\n + 'Admission_Predict_Ver1.1.csv',\n sep=r'\\s*,\\s*', header=0, encoding='ascii',\n engine='python')\n\n # Research question 1\n lasso_regression(origin_data)\n\n # Research question 2\n # We drop the 'Serial No.' column because it is unrelated to our analysis.\n df = origin_data.drop(columns=['Serial No.'])\n find_correlation(df)\n boxplots_testscores_vs_admission(df)\n\n # Research question 3\n university_rating_analysis(origin_data)",
"def main():\n filename = \"data/exercise.csv\"\n analyze(filename)",
"def main():\n\n #get the csv file into a data-frame\n universities_df = pd.read_csv('universities_data.csv', encoding = 'utf-8-sig')\n universities_names_list = universities_df['name'].tolist()\n\n #get list of university objects\n url = 'http://universities.hipolabs.com/search?country=Israel'\n api_universities = Get_universities(url)\n list_of_universities = api_universities.get_universities_info()\n\n #to see if we got new entities or not for exporting to csv later..\n is_new_entities = False\n\n for university in list_of_universities:\n if university.name not in universities_names_list:\n is_new_entities = True\n universities_df= universities_df.append(pd.DataFrame({\n 'alpha_two_code': [university.alpha_two_code], \n 'country': [university.country],\n 'web_pages': [str(university.web_pages)],\n 'domains': [str(university.domains)],\n 'name': [university.name],\n 'state_province':[str(university.state_province)]}) , ignore_index = True)\n\n #export back to csv if true\n if is_new_entities: \n print('we got new entities!') \n universities_df.to_csv('universities_data.csv', encoding = 'utf-8-sig', index = False)\n else:print('no new universities for now!')",
"def main():\n\n dataframes = importing(['admissions_data', 'patient_data',\n 'diagnoses_icd_data', 'services_data',\n 'icustays'])\n merged_data = merging_data(dataframes)\n cleaned = data_cleaning(merged_data)\n\n cleaned.to_csv('raw_hospital_data.csv')",
"def main():\n\t# Load data\n\tdf = pd.read_pickle('all_adviser_data.pkl').reset_index(drop=True)\n\n\t# Determine whether advisor has complaint\n\tdf['complaint'] = np.where(df['hasCustComp'] == 'Y', 1, 0)\n\n\t# Grab where every advisor currently works\n\tdf['currEmpName'] = df.apply(helper, axis=1)\n\n\t# Perform calculations on complaints\n\tdf = df.groupby('currEmpName')['complaint'].agg(['sum', 'count', 'mean'])\n\n\t# Rename columns for clarity\n\tdf = df.rename(\n\t\tindex=str,\n\t\tcolumns={\n\t\t\t'currEmpName': 'Current Employer Name',\n\t\t\t'sum': 'Number of Complaints',\n\t\t\t'count': 'Number of Employees',\n\t\t\t'mean': 'Complaints to Employees Ratio'\n\t\t}\n\t)\n\n\t# Sort\n\tdf = df.sort_values(by='Number of Employees')\n\tdf = df.reset_index()\n\n\t# Output\n\tdf.to_csv('Employer Complaint Ratios.csv', index=False)",
"def __init__(self, \n cohort_list = [],\n dataset_ind_list = [], \n class_list = [], \n subtype_list = []):\n \n self.cohort_list = cohort_list\n self.dataset_ind_list = dataset_ind_list\n self.class_list = class_list\n self.subtype_list = subtype_list\n \n self.df = pd.read_csv(\"/nfs/ywkim/GreenCross/GitHub/NIHdataset_DX/TCGA_dataset_0531_2fold.csv\", index_col = [0])\n #๋ฐ์ดํฐ์
์ ๋ฆฌ๋ csv ๋ฅผ pandas ๋ก read\n \n assert len(self.cohort_list) > 0\n assert len(self.class_list) > 0\n assert len(self.dataset_ind_list) > 0\n #๋ฆฌ์คํธ๋ก์ ๊ธฐ๋ณธ ์ธํ๋ค์ด ๋ค์ด์๋์ง ์ฒดํฌ\n \n if len(self.subtype_list) == 0:\n self.df_called = self.df.query('cohort == @self.cohort_list & dataset_ind == @self.dataset_ind_list')\n print(\"cohort : \", self.cohort_list,\n \"\\ndataset ind : \", self.dataset_ind_list,\n \"\\nNon-subtype\")\n else:\n self.df_called = self.df.query('cohort == @self.cohort_list & dataset_ind == @self.dataset_ind_list & subtype == @self.subtype_list')\n print(\"cohort : \", self.cohort_list, \n \"\\ndataset ind : \", self.dataset_ind_list,\n \"\\nsubtype : \", self.subtype_list)\n #subtype์ ๋ณ๋ ์ง์ ํ์ง ์์ ๊ฒฝ์ฐ ๋ชจ๋ subtype ์ ๋ํด์ ์งํ\n \n \n #class_list (์์ฑ ์์ฑ) ์ค ํ๋๋ง ์ถ๋ ฅํ ๊ฒ์ธ์ง, ๋๋ค ์ถ๋ ฅํ ๊ฒ์ธ์ง ์ ํ, annotation region > 0 ์ด์๋ง ๊ฐ๊ฐ ํด๋์ค ๋ฆฌ์คํธ๋ก\n \n self.svs_list = {}\n self.access_coord = {}\n self.raw_path = {}\n\n \n for class_pn in self.class_list:\n \n self.svs_list[class_pn] = list((self.df_called.query('{}_area > 0'.format(class_pn[:3])).index))\n #์ผํฐ ๋ฒํธ๊ฐ ์ํ๋ฒณ ๋์๋ฆฌ์ธ๋ฐ ,์ ํ์๋ฆฌ๋ง์ ๋ผ์ด์์ ์ฒดํฌ์ฌ์ผ๋ก ์ด์ฉ\n #svs \n self.access_coord[class_pn] = {}\n \n for svs_name in tqdm(self.svs_list[class_pn]):\n svs_class = self.df_called.loc[svs_name]['cohort']\n svs_base_path = '/ssd5/NIH/TCGA_DX/{}/processed/patch/positive/'.format(svs_class)\n raw_base_path = '/ssd5/NIH/TCGA_DX/{}/raw/positive/'.format(svs_class)\n mask = imread(svs_base_path + svs_name + '/0000_{}_tissue_mask.png'.format(class_pn))\n self.access_coord[class_pn][svs_name] = np.where(mask > 128)\n self.raw_path[svs_name] = raw_base_path + svs_name\n \n print(class_pn, 'Memory Loaded')\n \n \n print(\"\\nclass load_no checksum\") \n for class_pn in self.class_list:\n checksum_list = [x[5] for x in self.svs_list[class_pn]]\n print(class_pn, len(self.svs_list[class_pn]), ''.join(checksum_list))",
"def __init__(self):\n self.file_name = 'data.csv'\n # Column of interest\n self._col = ['product_name', 'url', 'quantity', 'packaging']\n self._col += ['brands', 'origins', 'countries_fr', 'allergens']\n self._col += ['traces_fr', 'additives_n', 'additives_fr']\n self._col += ['nutrition_grade_fr', 'categories_fr']\n self._col += ['main_category_fr']\n\n # Check if the csv is already in the file\n try:\n with open(self.file_name, 'r'):\n pass\n except FileNotFoundError:\n CsvAnalysis.download_file()\n finally:\n # Read the csv file, and create a dataframe\n self.food_cat = pandas.read_csv(self.file_name,\n sep=\"\\t\",\n low_memory=False,\n usecols=self._col,\n encoding=\"utf8\")\n\n # Remove countries which aren't France\n mask = self.food_cat['countries_fr']\n self.food_cat = self.food_cat[mask == 'France']\n\n # Delete column countries_fr\n del self.food_cat['countries_fr']\n\n # Remove empty row countries_fr from dataframe\n columns = ['main_category_fr', 'product_name', 'nutrition_grade_fr']\n for column in columns:\n self.food_cat = self.food_cat[~self.food_cat[column].isnull()]\n\n # Remove empty row from product_name\n self.food_cat.sort_values(by='categories_fr')\n\n # Select the last value from categories_fr\n # to use it as a subcategory\n col = 'categories_fr'\n self.food_cat[col] = self.food_cat[col].str.split(',').str.get(-1)\n self.food_cat.sort_values(by='categories_fr')",
"def start_data():\n add_furniture('invoice_file.csv', 'Vinodh', 'AB123', 'Walkman', 50.00)\n add_furniture('invoice_file.csv', 'Ram', 'BC345', 'Horse', 25.00)\n add_furniture('invoice_file.csv', 'Shiva', 'KY890', 'Book',\n 10.00)\n add_furniture()",
"def main():\n print('Loading raw dataset')\n df = extract_df('agencyperformance.zip', 'finalapi.csv')\n load_df(df, 'agency_performance', if_exists='append')\n \n print('Loading star schema')\n fact_df = df.copy()\n fact_df = fact_df.astype(object).where(notnull(df), None)\n fact_df, agency_df = transform_df(\n fact_df, 'agency', ['AGENCY_ID', 'PRIMARY_AGENCY_ID'], \n id_column='AGENCY_ID', drop_columns=['PRIMARY_AGENCY_ID']\n )\n fact_df, product_df = transform_df(fact_df, 'product', \n ['PROD_ABBR', 'PROD_LINE'])\n fact_df, state_df = transform_df(fact_df, 'state', ['STATE_ABBR'])\n fact_df, vendor_df = transform_df(fact_df, 'vendor', ['VENDOR'])\n fact_df['id'] = fact_df.index + 1\n load_df(fact_df, 'insurance')\n load_df(agency_df, 'agency')\n load_df(product_df, 'product')\n load_df(state_df, 'state')\n load_df(vendor_df, 'vendor')",
"def part1():\n print('=== Starting Part 1 ===')\n data = pd.read_csv(DATA)\n\n print('Number of species:', hw2_pandas.species_count(data))\n print('Highest level pokemon:', hw2_pandas.max_level(data))\n print('Low-level Pokemon', hw2_pandas.filter_range(data, 1, 9))\n print('Average attack for fire types',\n hw2_pandas.mean_attack_for_type(data, 'fire'))\n print('Count of each Pokemon type:')\n print(hw2_pandas.count_types(data))\n print('Highest stage for each Pokemon type')\n print(hw2_pandas.highest_stage_per_type(data))\n print('Average attack for each Pokemon type')\n print(hw2_pandas.mean_attack_per_type(data))",
"def ReadData(self, fname):\n self.datafile=path.join(path.realpath('.'),fname)\n self.logLikelihood=0\n self.instanceMap = {}\n datatype=np.dtype([('ps_id', int),('trial_no', int),\\\n ('session', int),('condition', int),('length',float),\\\n ('actualCat',int),('idealCat',int),('responseCat',int),\\\n ('modelledCat', int)])\n data=[] # A temporary data structure which we are using before we\n # transfer all to numpy.\n self.testData = {} # initialise it here\n self.usedTestData = {} # The data structure we use in the code, with\n # selected instances\n self.catA=set() # Boolean saying that we don't have members in\n # category A\n self.catB=set() # same for cat B\n self.reEstimated=0\n self.changedCats = 0\n self.presentedOrder=[] # This will remember the order of presentation\n # of stimuli, for modelling of recency and forgetting. \n # We don't put the data in categories yet - we do that in the modelling\n # phase, where we build the model incrementally.\n if path.exists(fname):\n with open(fname) as f:\n for line in f:\n try:\n a=line.split(',')\n if a[5]=='D': # this is how it is represented in the\n # data from Texas\n actualcat=1\n #self.catB.append(instNo)\n else:\n actualcat=-1\n #self.catA.append(instNo)\n # ^ this is the category which is given to the Ps as\n # feedback. This is what we model as what they\n # remember / forget / use for category inference.\n pscat=-1\n if a[6]=='D':\n pscat=1\n # ^ this is the category the Ps responded. Most relevant in\n # test set.\n idealcat=-1\n if int(a[4])>30:\n idealcat=1\n # ^ this is the category which the ideal classifier\n # would put the stimulus in.\n data.append((int(a[0]), int(a[3]), int(a[1]), int(a[2]),\\\n self.AddNoise(int(a[4])), actualcat, idealcat,\\\n pscat, actualcat))\n # Here we are using actualCat as if it was modelledCat,\n # to simplify choosing instances which are presented in\n # category A or B.\n # ps_id, trial_no, session, condition, length,\n # actualCat, idealCat, responseCat, modelledCat\n except Exception, e:\n continue # say, if first line or something wrong\n self.trainingData = np.array(data,dtype=datatype)\n self.usedTrainingData = self.trainingData.copy()\n # Populate instanceMap\n for i, instance in enumerate(data):\n self.instanceMap[instance[0:4]]=i\n else:\n if self.verbose > 0:\n print \"The filename \"+fname+\" is invalid!\"",
"def test_add_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", 25.00)\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", 10.00)\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", 17.00)",
"def main():\n s = content.DataFiles()\n \n date_list = generate.get_list_dates(2016, 2016, 500)\n prod_list = list(s.get_collist_by_name(os.path.join(content.data_fldr,'food','garden_produce.csv'), 'name')[0])\n \n tbl_cust = generate.TableGenerator(8, ['STRING','PEOPLE', 'PEOPLE', 'PLACE'], ['Customer ID', 'First Name', 'Surname', 'Country'])\n tbl_cust.save_table('customers.csv')\n cust_list = list(s.get_collist_by_name('customers.csv', 'Customer ID')[0])\n \n tbl_sales = generate.TableGenerator(25, [date_list, cust_list, prod_list, 'CURRENCY'], ['Date of sale', 'Customer ID', 'Product', 'Amount'])\n tbl_sales.save_table('sales.csv')",
"def getData(university, files):\n\n if not isinstance(files, list): \n \"\"\"Check to make sure files is a list\"\"\"\n raise InvalidArgumentError('Format should be getData(\"ABC University\", [\"file1.csv\", \"file2.csv\"]')\n\n if not isinstance(university, str): \n \"\"\"Check to make sure university is a string\"\"\"\n raise InvalidArgumentError('Format should be getData(\"ABC University\", [\"file1.csv\", \"file2.csv\"]')\n \n if not all([isinstance(f, str) for f in files]):\n \"\"\"Check to make sure that all the files are strings\"\"\"\n raise InvalidArgumentError('Format should be getData(\"ABC University\", [\"file1.csv\", \"file2.csv\"]')\n\n if not all([f[-4:] == \".csv\" for f in files]):\n \"\"\"Check to make sure that all the files are in .csv format\"\"\"\n raise FileError('This program requires all files to have a .csv extension')\n\n #We will temporarily put the student test scores in a dictionary\n data = {}\n\n #Below is the code to open and read the csv files\n for this_file in files:\n try:\n f = open(this_file)\n\n #if the file did not open, an exception will be raised, and the following code will not run.\n #Assuming the file did open, there still may be problems with the data in the csv file\n #And much of the following code checks for that\n error_string = \"The file \"+str(this_file)+\" is not formatted in the correct format.\"\n for line in f:\n data_line = line.strip().split(\",\")\n if(len(data_line) != 5):\n raise FileError(error_string+\" Length != 5\")\n try:\n student_id = int(data_line[0])\n score = float(data_line[4])\n except ValueError:\n raise FileError(error_string+\" student_id or score not numeric\")\n \n #The following lines have been commented out, \n #because requiring that the school name have quotes seemed too restrictive\n #\n #if((data_line[2][0] != '\"') or (data_line[2][-1] != '\"')):\n # raise FileError(error_string+\" The school name is not surrounded by quotes as defined in the API\")\n \n #Now that we have checked that the data is in the correct form\n #We are ready to parse it, and collect the data for each student\n school = data_line[2].strip('\"')\n if(school == university):\n try:\n data[student_id].append(score)\n except KeyError:\n data[student_id] = [score]\n\n #We catch the raised errors\n except FileError as e:\n f.close()\n raise(e) \n \n except IOError:\n error_string = \"Could not open \"+ str(this_file)+\". Check that the path is correct.\"\n raise FileError (error_string)\n\n else:\n f.close()\n\n if(len(data) == 0):\n raise FileError (\"Did not find any data for \"+university)\n\n\n #Now that we have all of the student data, we will want to get the average for each student\n output = [] \n for student in data:\n output.append(sum(data[student])/len(data[student]))\n\n #Finally, we return the sorted data\n return sorted(output)",
"def load(cls):\n \n # Loop through procedures and build patient procedure lists:\n procs = csv.reader(file(PROCEDURES_FILE,'U'),dialect='excel-tab')\n header = procs.next() \n for proc in procs:\n cls(dict(zip(header,proc))) # Create a procedure instance ",
"def __init__(self):\n \n try:\n self.microbiome = input(\"Which microbiome data do you want to compile results for (CORAL or GUT)?: \").upper() \n self.directory = input(\"Full path to where CORAL and GUT folders are located in your computer: \")\n except FileNotFoundError:\n print(\"ERROR!:\\nFile or directory does not exist. Recheck your directory path and try again.\")\n # /Users/punitsundar/Documents/Metagenomic_Data\n self.no_of_tools = 2\n self.tools_list = [\"MetaSPAdes\",\"SPAdes\"]",
"def main():\n\n # parse command-line arguments using 'argparse' module\n args_dict = parse_args()\n\n # parse name & file format of input data file\n infile_name = os.path.splitext(args_dict['input_file'])[0]\n infile_format = os.path.splitext(args_dict['input_file'])[1].replace('.', '')\n\n # import data to Pandas DataFrame\n if infile_format == 'csv':\n input_df = pd.read_csv(\n args_dict['input_file'],\n header=0,\n index_col='LIBID',\n dtype=str\n )\n\n elif infile_format == 'xlsx':\n input_df = pd.read_excel(\n args_dict['input_file'],\n header=0,\n index_col='LIBID',\n dtype=str,\n engine='openpyxl'\n )\n\n # remove any empty rows\n input_df = input_df[input_df.index.notna()]\n\n outfile_name = infile_name\n\n # add requested data & set output file name\n if args_dict['geocode']:\n input_df = add_geocode_data(input_df)\n outfile_name += '_geo'\n\n if args_dict['regions']:\n input_df = add_region_data(input_df)\n outfile_name += '_reg'\n\n out_df = input_df\n\n # output to selected file format\n if args_dict['out_format'] == 'csv':\n out_df.to_csv(outfile_name + '.csv')\n\n elif args_dict['out_format'] == 'xlsx':\n out_df.to_excel(outfile_name + '.xlsx', sheet_name='wi_library_directory')",
"def __init__(self, sc, dataset_path):\n\n logger.info(\"Starting up the Recommendation Engine: \")\n\n self.sc = sc\n\n\t#Load cusomer data for later use\n\t\n logger.info(\"Loading Customer data...\")\n customer_file_path = os.path.join(dataset_path, 'tpo_customer.csv')\n customer_raw_RDD = self.sc.textFile(customer_file_path)\n customer_raw_data_header = customer_raw_RDD.take(1)[0]\n self.customer_RDD = customer_raw_RDD.filter(lambda line: line!=customer_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]))).cache()\n\tlogger.info(\"Loading Customer data success...\")\n\t#CUSTOMCUSTOMER_NAME,CUSTOMER_ADDRESS1,CUSTOMER_ADDRESS2,CUSTOMER_CITY,CUSTOMER_STATE,CUSTOMER_COUNTRY,CUSTOMER_ZIPCODE,CREATED_BY,CREATION_DATE,LAST_UPDATED_BY,LAST_UPDATE_DATE\n \n\n\n\t\n\t#Load turbine data for later use\t\n logger.info(\"Loading Turbine data...\")\n turbine_file_path = os.path.join(dataset_path, 'test_tpo_unit_config.csv')\n turbine_raw_RDD = self.sc.textFile(turbine_file_path)\n turbine_raw_data_header = turbine_raw_RDD.take(1)[0]\n self.turbine_RDD = turbine_raw_RDD.filter(lambda line: line!=turbine_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[5]),(tokens[34]),(tokens[51]),(tokens[35]))).cache()\n\tlogger.info(\"Loading Turbine data success...\")\n \n\t\n\t\n\t\n\t#Load site data for later use\t\n logger.info(\"Loading Site data...\")\n site_file_path = os.path.join(dataset_path, 'tpo_site.csv')\n site_raw_RDD = self.sc.textFile(site_file_path)\n site_raw_data_header = site_raw_RDD.take(1)[0]\n self.site_RDD = site_raw_RDD.filter(lambda line: line!=site_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]),(tokens[16]))).cache()\n\tlogger.info(\"Loading Site data success...\")\n\t\n\n\n\n\t# Load ratings data for later use\n logger.info(\"Loading Ratings data...\")\n ratings_file_path = os.path.join(dataset_path, 'ratings.csv')\n ratings_raw_RDD = self.sc.textFile(ratings_file_path)\n ratings_raw_data_header = ratings_raw_RDD.take(1)[0]\n self.ratings_RDD = ratings_raw_RDD.filter(lambda line: line!=ratings_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()\n # Load movies data for later use\n logger.info(\"Loading Movies data...\")\n movies_file_path = os.path.join(dataset_path, 'movies.csv')\n movies_raw_RDD = self.sc.textFile(movies_file_path)\n movies_raw_data_header = movies_raw_RDD.take(1)[0]\n self.movies_RDD = movies_raw_RDD.filter(lambda line: line!=movies_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),tokens[1],tokens[2])).cache()\n self.movies_titles_RDD = self.movies_RDD.map(lambda x: (int(x[0]),x[1])).cache()\n # Pre-calculate movies ratings counts\n self.__count_and_average_ratings()\n\n # Train the model\n self.rank = 8\n self.seed = 5L\n self.iterations = 10\n self.regularization_parameter = 0.1\n self.__train_model()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates x, y (memoryshared) coordinates with actual mouse position with a given frequency. | def stream(bus, address, frequency, x, y, stop_trigger):
mouse = Mouse.list_connected(bus=bus, address=address)[0]
delay = 1./frequency
while not stop_trigger:
x1, y1 = mouse.get_position_change()
x.value += x1
y.value += y1
time.sleep(delay) | [
"def update_coordinates(self):\n self.x, self.y = pygame.mouse.get_pos()",
"def refresh_pos(self):\r\n\r\n self.mouse_pos = pygame.mouse.get_pos()",
"def update(self, event):\n self.xy = [event.x, event.y]",
"def update_pointer(self):\n pointer_length = -self.pointer_frac * self.radius\n # Add pi/2 to the angle because we consider 0 radians to be pi/2 in standard position.\n x = pointer_length * math.cos(self._radians + math.pi / 2)\n y = pointer_length * math.sin(self._radians + math.pi / 2)\n self.coords(self.pointer, 0, 0, x, y)",
"def update_mouse_click(mouse_pos):\n global cur_slider_ix\n global cur_control_ix\n global mouse_pressed\n x = (mouse_pos[0] - sliders_x)\n y = (mouse_pos[1] - sliders_y)\n\n if 0 <= x < sliders_w and 0 <= y < sliders_h:\n cur_slider_ix = int(x / slider_w)\n mouse_pressed = 1\n\n x = (mouse_pos[0] - controls_x)\n y = (mouse_pos[1] - controls_y)\n if 0 <= x < controls_w and 0 <= y < controls_h:\n cur_control_ix = int(x / control_w)\n mouse_pressed = 2",
"def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass",
"def set_position(self, x, y):\n pygame.mouse.set_pos([x, y])",
"def set_position(self, updated):\n self.buff_x = updated[0]\n self.buff_y = updated[1]",
"def move_mouse(x, y):\n current_location = Mouse.Instance.Location\n point = Point(int(x) + current_location.X, int(y) + current_location.Y)\n Mouse.Instance.Location = point",
"def on_mouse_move(self, event):\n\n try:\n x = float(event.xdata)\n y = float(event.ydata)\n except TypeError:\n return\n\n self.mouse = [event.xdata, event.ydata]\n\n self.canvas.restore_region(self.background)\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # x_pan, y_pan = self.app.geo_editor.snap(event.xdata, event.ydata)\n # self.draw_cursor(x_pos=x_pan, y_pos=y_pan)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n # #### Temporary place-holder for cached update #####\n # self.update_screen_request.emit([0, 0, 0, 0, 0])\n\n if self.app.defaults[\"global_cursor_color_enabled\"] is True:\n self.draw_cursor(x_pos=x, y_pos=y, color=self.app.cursor_color_3D)\n else:\n self.draw_cursor(x_pos=x, y_pos=y)\n # self.canvas.blit(self.axes.bbox)",
"def live_plot(filename, x_value, y_value, scroll=True, refresh_rate=1000): #default is 1 sample per second\n data_file, dc_ps_dev, device_1, device_2 = setup(filename, x_value, y_value)\n absolute_time = time.time()\n # Create figure for plotting\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n x = []\n y = []\n y1 = []\n f = open(data_file, \"r\")\n lines = f.readlines()\n title = lines[0]\n x_axis, y_axis = lines[1].split(\",\")[0], lines[1].split(\",\")[1]\n f.close()\n \n f = open(data_file, \"a\")\n def animate(i):\n \"\"\"Sub-function that updates data for each new frame.\n \n \"\"\"\n # Take measurements\n item1, item2 = take_measurement(x_value, absolute_time, device_1, 102), take_measurement(y_value, absolute_time, device_2)\n save_to_file(f, item1, item2) # Save to file\n y_vals = str(item2).split(\",\") \n x.append(item1)\n y.append(float(y_vals[0]))\n if len(y_vals)>1: # Handles case with two voltages vs. time\n y1.append(float(y_vals[1]))\n\n ## DEPRECATED: Slows down code\n # Parse data file for x and y\n #f = open(data_file, \"r\")\n #lines = f.readlines()\n #f.close()\n #if len(lines) > len(x)+2:\n # for line in lines[len(x)+2:]:\n # x.append(float(line.split(\",\")[0]))\n # y.append(float(line.split(\",\")[1]))\n \n # Plot data\n if scroll and len(x)> 20: # Window length for scroll mode\n x_plot, y_plot = x[-20:], y[-20:]\n if len(y_vals)>1:\n y1_plot = y1[-20:]\n else:\n x_plot, y_plot, y1_plot = x, y, y1\n ax.clear()\n ax.plot(x_plot, y_plot)\n if len(y_vals)>1: # Handles case with two voltages vs. time\n ax.plot(x_plot, y1_plot)\n plt.title(title)\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n\n ani = animation.FuncAnimation(fig, animate, interval=int(refresh_rate))\n plt.show()\n f.close()",
"def timer_tick(self, *args):\n # Generate a new number and increment the tick count\n new_val = self._generator(self.mean, self.stddev)\n self.num_ticks += 1\n \n # grab the existing data, truncate it, and append the new point.\n # This isn't the most efficient thing in the world but it works.\n cur_data = self.viewer.data\n new_data = hstack((cur_data[-self.max_num_points+1:], [new_val]))\n new_index = arange(self.num_ticks - len(new_data) + 1, self.num_ticks+0.01)\n \n self.viewer.index = new_index\n self.viewer.data = new_data\n return",
"def mouse_move(self, x, y, modifiers):",
"def setPosition(pos):\r\n time.sleep(0.05)\r\n mouse.position = pos",
"def setX(X):\r\n time.sleep(0.05)\r\n pos = (X, mouse.position[1])\r\n mouse.position = pos",
"def xy(self, xy_position):\n print(f\"xy: {xy_position}\")\n self.device_control.xy = xy_position\n yield",
"def handle_mouse(self, x, y):\n # we are in aperture mode\n if self.aperture_id:\n if self.aperture_id not in self.aperture_model.aperture_models.keys():\n pass\n model = self.aperture_model.aperture_models[self.aperture_id]\n location = model.source.data['location'][0]\n\n if self.mode == 'width':\n width = abs(location - x)\n model.update_values(start=location - width,\n end=location + width)\n elif self.mode == 'left':\n if x < location:\n model.update_values(start=x)\n elif self.mode == 'right':\n if x > location:\n model.update_values(end=x)\n elif self.mode == 'location':\n diff = x - location\n model.update_values(location=x,\n start=model.source.data['start'][0] + diff,\n end=model.source.data['end'][0] + diff)\n\n self.last_x = x\n self.last_y = y\n return False",
"def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])",
"def update_mouse_move(mouse_pos):\n global needs_update\n\n if mouse_pressed == 1:\n # change sliders\n y = (mouse_pos[1] - sliders_y)\n if 0 <= y <= slider_h:\n val = (float(y) / slider_h - 0.5) * (num_sigmas * 2)\n current_params[int(cur_slider_ix)] = val\n needs_update = True\n elif mouse_pressed == 2:\n # change controls\n x = (mouse_pos[0] - (controls_x + cur_control_ix * control_w))\n if control_pad <= x <= control_w - control_pad:\n val = float(x - control_pad) / (control_w - control_pad * 2)\n cur_controls[int(cur_control_ix)] = val\n apply_controls()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the focal length of the telescope. | def focal_length(self):
return self.f * self.diameter | [
"def get_focal_length(self):\n return self.focal_length",
"def get_focal_length(self):\n return self._calibration_mat[0][0]",
"def focal_length(self):\n if not hasattr(self, \"_focal_length\"):\n if (self.spacecraft_name == \"VIKING ORBITER 1\"):\n if (self.sensor_name == \"VISUAL_IMAGING_SUBSYSTEM_CAMERA_A\"):\n self._focal_length = 474.398\n elif (self.sensor_name == \"VISUAL_IMAGING_SUBSYSTEM_CAMERA_B\"):\n self._focal_length = 474.448\n elif (self.spacecraft_name == \"VIKING ORBITER 2\"):\n if (self.sensor_name == \"VISUAL_IMAGING_SUBSYSTEM_CAMERA_A\"):\n self._focal_length = 474.610\n elif (self.sensor_name == \"VISUAL_IMAGING_SUBSYSTEM_CAMERA_B\"):\n self._focal_length = 474.101\n else:\n raise Exception(f\"Unknown viking instrument to get focal length: {self.spacecraft_name}, {self.sensor_name}\")",
"def estimate_focal_length(self):\n fl = (self.fiber_diameter / 2) / np.tan(np.deg2rad(self.fov / 2))\n\n return fl",
"def get_focal_len(self):\n _fov_x_half_rad = float(math.radians(self._fov_x / 2.0))\n result = float(self._width / (2.0 * math.tan(_fov_x_half_rad)))\n return result",
"def getEstimatedKinectFocalLength(width, height):\r\n verticalAngle = 43\r\n horizontalAngle = 57\r\n \r\n # focal length along x direction\r\n # fx = (w/2) / tan(hori_angle / 2)\r\n fx = (width / 2.0) / math.tan(math.radians(horizontalAngle/2.0))\r\n \r\n # focal length along y direction\r\n # fy = (h/2) / tan(vertical_angle / 2)\r\n fy = (height / 2.0) / math.tan(math.radians(verticalAngle/2.0))\r\n \r\n return fx, fy",
"def length(self) -> ir.FloatingValue:\n return ops.GeoLength(self).to_expr()",
"def getFocalLength(dist, width, pixWidth):\r\n focalLength=(pixWidth*dist)/width\r\n return(focalLength)",
"def bspb_focalLength():\n shotCam = pm.PyNode('shot_cam').getShape()\n return str(shotCam.focalLength.get())",
"def getLength(self):\n return self.road.getLength()",
"def get_focal_length(image: Image) -> Optional[Tuple[int, int, int, int]]:\r\n exif = image.getexif()\r\n if ExifTag.FOCAL_LENGTH_IN_MM not in exif:\r\n # no focal length\r\n return None\r\n if ExifTag.PIXEL_WIDTH not in exif or ExifTag.PIXEL_HEIGHT not in exif:\r\n width_px, height_px = image.size\r\n else:\r\n width_px = exif[ExifTag.PIXEL_WIDTH]\r\n height_px = exif[ExifTag.PIXEL_HEIGHT]\r\n # swap width and height if orientation is rotate 90ยฐ or 270ยฐ\r\n if ExifTag.ORIENTATION in exif and exif[ExifTag.ORIENTATION] > 4:\r\n width_px, height_px = height_px, width_px\r\n focal_length_in_mm = exif[ExifTag.FOCAL_LENGTH_IN_MM]\r\n if ExifTag.FOCAL_LENGTH_35MM in exif:\r\n focal_length_35mm = exif[ExifTag.FOCAL_LENGTH_35MM]\r\n else:\r\n model = exif[ExifTag.MODEL] if ExifTag.MODEL in exif else None\r\n if model is not None and len(_MODEL_DATABASE) == 0:\r\n _load_database()\r\n if model not in _MODEL_DATABASE:\r\n return None\r\n crop_factor = _MODEL_DATABASE[model]\r\n focal_length_35mm = round(focal_length_in_mm * crop_factor)\r\n return focal_length_35mm, focal_length_in_mm, width_px, height_px",
"def getLength(self) -> float:\n return self.length",
"def set_camera_focal_length(self, vcserver, camera_name, focal_length):\r\n\r\n cmds.setAttr(camera_name+'.focalLength', focal_length)",
"def fairing_length(self):\n return self.motor.diameter * 1.5",
"def set_focal_length(self, focal_length):\n self.focal_length = focal_length",
"def getVocalized(self,):\n\t\treturn self.vocalized;",
"def length(self):\n return self.magnitude()",
"def lengte(self):\n return self._lengte.get_waarde()",
"def auxiliary_trail_length(self):\n return self.attributes[\"_aux_length\"]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the plate scale as an `~astropy.units.Quantity`. | def plate_scale(self):
return 206265 * uu.arcsec / (self.diameter.to('mm') * self.f) | [
"def platescale(self):\n return None if self['fratio'] is None or self['diameter'] is None \\\n else 206265/self['fratio']/self['diameter']/1e3",
"def getScale(self):\n return _libsbml.Unit_getScale(self)",
"def plate_scale(platescale):\n if platescale.unit.is_equivalent(si.arcsec / si.m):\n platescale_val = platescale.to_value(si.radian / si.m)\n elif platescale.unit.is_equivalent(si.m / si.arcsec):\n platescale_val = (1 / platescale).to_value(si.radian / si.m)\n else:\n raise UnitsError(\"The pixel scale must be in angle/distance or distance/angle\")\n\n return Equivalency(\n [(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)],\n \"plate_scale\",\n {\"platescale\": platescale},\n )",
"def unit_scale(quantity):\n scales = {\n 'rate': 1.0,\n 'dt': 1.0,\n 'fluence': 1e39,\n 'peak': 1e38,\n }\n return scales.get(quantity, 1.0)",
"def GetScale(self):\n ...",
"def _get_min_scale(self) -> int:",
"def getnscale(self):\n return self.nscale",
"def get_scale(resolution, units=\"degrees\"):\r\n return resolution * INCHES_PER_UNIT[units] * DOTS_PER_INCH",
"def get_scale_factor(self):\n return self._scale_factor",
"def scale(self):\n return self._transform.scale",
"def find_scale(self):\n\n width = self.face['width'] * 2\n\n print \"hat size, \", hat.size\n print \"width: \", width\n print \"scale factor: \", width / hat.size[0]\n return map(int, (width, hat.size[1] * (width / hat.size[0])))",
"def _getQuickSetupChangeSpaceScale(self):\n \n return self._quickSetupChangeSpaceScale.value",
"def scale(spec='1', kb=1000):\n hits = re.findall(\"(\\d+)\\s*(\\w*)\", spec)\n if hits:\n (mag, unit) = hits[0]\n factor = map_size_unit(unit, kb=kb)\n rval = int(mag) * factor\n else:\n rval = 0\n return rval",
"def get_scale(units, compartmentId, volume, extracellularVolume):\r\n if compartmentId == 'c':\r\n V = volume\r\n else:\r\n V = extracellularVolume\r\n\r\n if units == 'uM':\r\n return 1. / N_AVOGADRO / V * 1e6\r\n elif units == 'mM':\r\n return 1. / N_AVOGADRO / V * 1e3\r\n elif units == 'molecules':\r\n return 1.\r\n else:\r\n raise Exception('Invalid units \"%s\"' % units)",
"def plate_scale(pixel_pitch):\n return np.arctan(pixel_pitch / FOCAL_LENGTH).to(u.arcsec)",
"def getScale(self):\n \n dag_node = OpenMaya.MFnDagNode(self.thisObj)\n transform_node = OpenMaya.MFnTransform(dag_node.parent( 0 ))\n \n util = OpenMaya.MScriptUtil()\n util.createFromDouble(0.0, 0.0, 0.0)\n pointeur = util.asDoublePtr()\n transform_node.getScale(pointeur)\n \n sx = util.getDoubleArrayItem(pointeur, 0)\n sy = util.getDoubleArrayItem(pointeur, 1)\n sz = util.getDoubleArrayItem(pointeur, 2)\n\n return sx, sy, sz",
"def scale(self):\n return self._a",
"def getScale(self):\n\t\tif self._scaleX != self._scaleY:\n\t\t\twarnings.warn(\"scaleX and scaleY have different values. Returning scaleX.\")\n\t\treturn self._scaleX",
"def getScale(self):\n return self.factor**self.turnOn"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.