query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Test that choice list exists on initialization.
|
def test_initialization_has_choices(self):
forms = self.get_forms(self.data)
for dummy, form in forms.items():
for item in form.questionnaire:
if isinstance(item, OdkPrompt):
if item.odktype in item.select_types:
msg = 'No choices found in \'{}\'.'.format(item)
self.assertTrue(item.choices is not None, msg=msg)
|
[
"def test_creation_good():\n value = \"boo\"\n choices = [\"boo\", \"foo\"]\n choice = param.Choice(value=value, choices=choices)\n assert choice.value == value\n assert choice.choices == choices",
"def test_choice_validation_success(self):\n x = TestListFieldModel()\n x.list_field_choices = ['HELLO', 'WORLD']\n x.save()",
"def test_create_choices_objects_exist(self):\n # set selected model class with existing objects\n self.widget.model_class = TestModel\n # call the _create_choices method\n widgets.ObjectIdSelect._create_choices(self.widget)\n # check whether the list contains an empty choice\n self.assertIn(BLANK_CHOICE_DASH[0], self.widget.choices)\n # create choices\n choice1 = (str(self.object1.pk), self.object1)\n choice2 = (str(self.object2.pk), self.object2)\n # check whether the list contains both TestModel objects\n self.assertIn(choice1, self.widget.choices)\n self.assertIn(choice2, self.widget.choices)\n # check whether there are 3 choices so the list contains nothing\n # but two objects of the TestModel and an empty choice\n self.assertEqual(len(self.widget.choices), 3)",
"def test_create_choices_objects_do_not_exist(self):\n # set selected model class without existing objects\n self.widget.model_class = AnotherTestModel\n # call the _create_choices method\n widgets.ObjectIdSelect._create_choices(self.widget)\n # check whether the list contains only one choice\n self.assertEqual(len(self.widget.choices), 1)\n # check whether an empty choice presents in the list\n self.assertIn(BLANK_CHOICE_DASH[0], self.widget.choices)",
"def test_choice_validation_error(self):\n x = TestListFieldModel()\n\n with self.assertRaises(ValidationError):\n x.list_field_choices = ['HELLO', 'NOT_A_VALID_CHOICE']",
"def assert_choices(self, choices):\n self.assert_in_help('choices: %s' % choices)",
"def test_choice_defined_name(self):\n name = \"What's in a name?\"\n\n class ChoiceObj(smartchoices.Choices):\n MY_CHOICE = smartchoices.Choice(name=name)\n\n # We need the second element of the only tuple\n # from the choices.\n actual_name = ChoiceObj.choices[0][1]\n self.assertEqual(name, actual_name)",
"def test_fields_lazy_choice_field():\n field = LazyChoiceField(\n name=\"lazy_choice_field\",\n choices=[(\"option1\", \"Option 1\"), (\"option2\", \"Option 2\")],\n max_length=200,\n )\n\n errors = field.check()\n assert len(errors) == 0\n\n assert field.choices == []",
"def test_unhashable_choice_data():\n combo = widgets.ComboBox()\n assert not combo.choices\n combo.choices = (\"a\", \"b\", \"c\")\n assert combo.choices == (\"a\", \"b\", \"c\")\n combo.choices = ((\"a\", [1, 2, 3]), (\"b\", [1, 2, 5]))\n assert combo.choices == ([1, 2, 3], [1, 2, 5])\n combo.choices = (\"x\", \"y\", \"z\")\n assert combo.choices == (\"x\", \"y\", \"z\")\n combo.close()",
"def test_choice(self):\n \n iterations = 1000000\n \n choices = WeightedChoice()\n choices.add_choice(1, 1)\n choices.add_choice(2, 5)\n s = [ choices.choice() for x in range(iterations) ]\n self.assertAlmostEqual(s.count(1)/len(s), 0.1667, places=2)\n \n # add another choice, then check that all of the choices have been\n # sampled at the expecetd proportions\n choices.add_choice(3, 4)\n s = [ choices.choice() for x in range(iterations) ]\n self.assertAlmostEqual(s.count(1)/len(s), 0.100, places=2)\n self.assertAlmostEqual(s.count(2)/len(s), 0.500, places=2)\n self.assertAlmostEqual(s.count(3)/len(s), 0.400, places=2)\n \n # check that all the choices have been made from the inserted values\n self.assertEqual(set(s), set([1, 2, 3]))",
"def test_choice_name_default(self):\n\n class ChoiceObj(smartchoices.Choices):\n MY_CHOICE = smartchoices.Choice()\n\n # We need the second element of the only tuple\n # from the choices.\n actual_name = ChoiceObj.choices[0][1]\n self.assertEqual('MY_CHOICE', actual_name)",
"def test_choices(self):\n result = ColorTheme.get_color_themes_choices()\n\n # skip\n if not result:\n return\n self.assertIn(('default', 'Default'), result)",
"def test_choice_validation_error(self):\n x = TestIntegerListFieldModel()\n\n with self.assertRaises(ValidationError):\n x.int_list_field_choices = [1, 2]",
"def test_filter_choices(self):\n # create a choice of TestModel (gallery_visible=True)\n ctype = ContentType.objects.get_for_model(TestModel)\n test_choice = (str(ctype.pk), ctype.name)\n # create a choice of AnotherTestModel (gallery_visible=False)\n ctype = ContentType.objects.get_for_model(AnotherTestModel)\n another_choice = (str(ctype.pk), ctype.name)\n # create a choice of WrongTestModel (has not gallery_visible)\n ctype = ContentType.objects.get_for_model(WrongTestModel)\n wrong_choice = (str(ctype.pk), ctype.name)\n # create a mock widget object\n widget = mock.MagicMock(spec=widgets.ContentTypeSelect)\n # set initial choices\n widget.choices = [\n (\"\", \"----\"),\n test_choice,\n another_choice,\n wrong_choice\n ]\n # call the _filter_choices method\n widgets.ContentTypeSelect._filter_choices(widget)\n # check whether an empty choice is in the list\n self.assertIn((\"\", \"----\"), widget.choices)\n # check whether the TestModel choice is in the list\n self.assertIn(test_choice, widget.choices)\n # check whether the AnotherTestModel choice is not in the list\n self.assertNotIn(another_choice, widget.choices)\n # check whether the WrongTestModel choice is not in the list\n self.assertNotIn(wrong_choice, widget.choices)",
"def test_resource_available_languages(self):\r\n self.assertEqual(len(self.resource.available_languages), 3)\r\n self.assertEqual(len(self.resource.available_languages_without_teams), 2)",
"def validate(candidates, choices: List[str]):\n if isinstance(candidates, List):\n for candidate in candidates:\n assert candidate in choices, f\"Specified {candidate}, but not in available list: {choices}.\"\n else:\n assert candidates in choices, f\"Specified {candidates}, but not in available list: {choices}.\"\n return candidates",
"def __init__(self, choices, num_choices):\n self._real_choices = choices\n self._last_contains_check = None\n self._num_choices = num_choices",
"def test_survey_init() -> None:\n q1 = NumericQuestion(1, \"Pick num\", 1, 5)\n q2 = MultipleChoiceQuestion(2, \"Pick text\", [\"opt 1\", \"opt 2\"])\n q3 = CheckboxQuestion(3, \"Pick multiple\", [\"a\", \"b\", \"c\"])\n q4 = YesNoQuestion(4, \"T or F\")\n q_list = [q1, q2, q3, q4]\n\n my_survey = Survey(q_list)\n\n assert isinstance(my_survey._questions, Dict)\n assert isinstance(my_survey._criteria, Dict)\n assert isinstance(my_survey._weights, Dict)\n assert isinstance(my_survey._default_criterion, HomogeneousCriterion)\n assert my_survey._default_weight == 1\n\n assert q1.id in my_survey._questions\n assert q2.id in my_survey._questions\n assert q3.id in my_survey._questions\n assert q4.id in my_survey._questions",
"def test_init():\n instance = rule.RuleList()\n assert instance.rules == []",
"def test_initialization(self):\n list_field = ['hello', 'world', 5]\n list_field_not_required = ['again', 'testing', 10]\n\n x = TestListFieldModel(\n list_field=list_field,\n list_field_not_required=list_field_not_required\n )\n\n self.assertEqual(x.list_field, list_field)\n self.assertEqual(x.list_field_not_required, list_field_not_required)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test expected results of converted questionnaire based on position.
|
def test_questionnaire(self):
forms = self.get_forms(self.data)
for datum in self.data:
expected_output = datum['outputs']
output = \
forms[datum['inputs']['file']].questionnaire[datum['position']]
# - Check Object Representation
got = str(output)
expected = expected_output['repr']
msg = '\nGot: {}\nExpected: {}'.format(got, expected)
self.assertEqual(got, expected, msg)
# - Check Object Class
got = output
expected = expected_output['class']
msg = '\nGot: {}\nExpected: {}'.format(got, expected)
# noinspection PyTypeChecker
self.assertTrue(isinstance(got, expected), msg)
|
[
"def test_positions_my_position(self):\n pass",
"def test_correct_estimates(self):\n self.assertEqual(self.ajive.common.rank, 1)\n self.assertEqual(self.ajive.blocks['x'].individual.rank, 1)\n self.assertEqual(self.ajive.blocks['y'].individual.rank, 2)",
"def test_rank_translations(self):\n self.assertPyxformXform(\n md=\"\"\"\n | survey | | | | |\n | | type | name | label | label::French (fr) |\n | | rank mylist | order | Rank | Ranger |\n | choices| | | |\n | | list_name | name | label | label::French (fr) |\n | | mylist | a | A | AA |\n | | mylist | b | B | BB |\n \"\"\",\n xml__xpath_match=[\n xpc.model_instance_choices_itext(\"mylist\", (\"a\", \"b\")),\n xpq.body_odk_rank_itemset(\"order\"), # also an implicit test for xmlns:odk\n \"/h:html/h:head/x:model/x:bind[@nodeset='/test_name/order' and @type='odk:rank']\",\n # All itemset translations.\n xpc.model_itext_choice_text_label_by_pos(\"default\", \"mylist\", (\"A\", \"B\")),\n xpc.model_itext_choice_text_label_by_pos(\n \"French (fr)\", \"mylist\", (\"AA\", \"BB\")\n ),\n # No non-itemset translations.\n xpc.model_itext_no_text_by_id(\"default\", \"/test_name/order/a:label\"),\n xpc.model_itext_no_text_by_id(\"default\", \"/test_name/order/b:label\"),\n xpc.model_itext_no_text_by_id(\"French (fr)\", \"/test_name/order/a:label\"),\n xpc.model_itext_no_text_by_id(\"French (fr)\", \"/test_name/order/b:label\"),\n ],\n )",
"def _test_positions(self):\n if not self.test_antenna_positions:\n return\n if self._is_base_subset:\n for pos in self.antenna_positions:\n if pos[2]>0:\n raise ValueError(\"Antenna placed outside of ice may cause \"\n +\"unexpected issues\")\n else:\n for sub in self.subsets:\n if hasattr(sub, '_test_positions'):\n sub._test_positions()\n elif isinstance(sub, Iterable):\n for ant in sub:\n if ant.position[2]>0:\n raise ValueError(\"Antenna placed outside of ice \"+\n \"may cause unexpected issues\")\n else:\n if sub.position[2]>0:\n raise ValueError(\"Antenna placed outside of ice \"+\n \"may cause unexpected issues\")",
"def test_projection_logic(self):",
"def get_test_question_answer(self):\n query_string = \"\"\"\n {\n \"query\": {\n \"term\" : {\"test_completed\": false}\n }\n }\n \"\"\"\n answer_doc = None\n test_answer_es = Elasticsearch([self.application.es_test_host])\n search_results = test_answer_es.search(self.application.es_test_index,\n self.application.es_test_type,\n body=query_string, size=10)\n if search_results['hits']['total'] > 0:\n answer_doc = random.choice(search_results['hits']['hits'])\n\n if not answer_doc:\n return self.generate_done_message()\n\n answer = answer_doc['_source']['answer']\n test_answer_id = answer_doc['_id']\n c_id = answer_doc['_source']['c_id']\n\n query_string = \"\"\"\n {\n \"query\": {\n \"term\" : {\"c_id\": %s}\n }\n }\n \"\"\" % c_id\n test_question_es = Elasticsearch([self.application.es_test_question_host])\n search_results = test_question_es.search(\n self.application.es_test_question_index,\n self.application.es_test_question_type, body=query_string, size=1)\n question = search_results['hits']['hits'][0]['_source']['question']\n\n return (question, answer, test_answer_id)",
"def test_pos_2():\n assert meets_criteria(111123) == True, \"Should be True\"",
"def test_position_creation(self):\n self.assertEqual(self.posi1.name, 'Job1')\n self.assertEqual(self.posi1.project, self.proj1)\n self.assertEqual(self.posi1.user, self.user1)\n self.assertIn(self.skill1, self.posi1.skills.all())",
"def quiz(location, score):\n # Initialize lists of answers and questions\n quiz_questions = [\n [\n \"\"\"What is the correct formula to find the sum of the internal\n angles of a polygon:\"\"\",\n \"\"\"What is the correct formula to find the sum of the external\n angles of a polygon:\"\"\",\n \"\"\"Substiute u = 3 and t = 5 into the following equation:\n d = ut + 3t²\"\"\"\n ],\n [\n \"What part of speech is the word jump:\",\n \"\"\"What language feature is this:\n Go clean your room right now this instance you naughty little\n devil child!\"\"\", \"\"\"What type of poem is this:\n Go clean your room right\n now this instance you naughty\n little devil child!\"\"\"\n ],\n [\n \"How many credits does a Level 1 student in 2020 need:\",\n \"How many credits will a Level 2 student need next year:\"\n ]\n ]\n quiz_answers = [[[\"n - 2 * 180\", \"(n - 2)180\", \"n - 2 * 60\", \"360\", 1],\n [\"n * 60\", \"n + 3 * 180\", \"(n + 3)180\", \"360\", 3],\n [\"15\", \"30\", \"100\", \"90\", 3]],\n [[\"Noun\", \"Verb\", \"Adjective\", \"Adverb\", 1],\n [\"Hyperbole\", \"Rhetoric\", \"Imperative\", \"Sonnet\", 2],\n [\"Sonnet\", \"Haiku\", \"Limerick\", \"Free verse\", 1]],\n [[\"80\", \"60\", \"72\", \"70\", 3], [\"80\", \"60\", \"72\", \"52\", 1]]]\n # get the question answer, and score values to use based on the users\n # location\n current_questions = quiz_questions[location - 1]\n current_answers = quiz_answers[location - 1]\n score[location - 1] = 0\n while len(current_questions) > 0:\n # Run while there are still questions left\n rand_choice = rand(0, len(current_questions) - 1)\n # pick a random question and answer\n answer = current_answers[rand_choice][\n 4] # get the integer that 'points' to the correct answer\n answer_text = current_answers[rand_choice][\n answer] # feed this integer back in to get the text of the answer\n try:\n user_input = int(\n input(f\"\"\"\n {current_questions.pop(rand_choice)}\n 1) {current_answers[rand_choice].pop(0)}\n 2) {current_answers[rand_choice].pop(0)}\n 3) {current_answers[rand_choice].pop(0)}\n 4) {current_answers[rand_choice].pop(0)}\n [1-4]: \"\"\")) # give the user the randomly selected question and possible\n # answers\n except ValueError: # if the user doesn't put in an interger, skip the\n # question and give them the error message\n out_of_range_error(4)\n user_input = None # set user_input so the program doesn't break\n # delete the question from the master list, and take user input\n current_answers.pop(rand_choice)\n # get the answers to the randomly selected question\n if user_input in (1, 2, 3, 4): # check if the users input is valid\n if user_input - 1 == answer:\n input(\"\"\"\n You got it right\"\"\")\n score[location - 1] += 1\n else:\n input(f\"\"\"\n You got it wrong.\n The answer was:\n {answer_text}\"\"\")\n return score",
"def test_question_in_question_file(self):\n answer_file = run.read_questions()\n self.assertIn(\"What is always coming but never arrives?\", answer_file)\n self.assertIn(\"I am black when clean and white when dirty. What am I?\", answer_file)\n self.assertIn(\"Sometimes I walk in front of you. Sometimes I walk behind you. It is only in the dark that I ever leave you. What am I?\", answer_file)\n self.assertIn(\"What comes down but never goes up?\", answer_file)\n self.assertIn(\"What has a bed but doesn't sleep and a mouth but never eats?\", answer_file)\n self.assertIn(\"I'm always hungry and must be fed, the finger I touch will soon turn red, what am I?\", answer_file)\n self.assertIn(\"I can't be seen but I'm not a ghost. I can crack but I don't break. I can clap but I don't have any hands. I happen after a flash but I'm not a photo. I'm loud but I'm not music. What am I?\", answer_file)\n self.assertIn(\"Who makes it, has no need of it. Who buys it, has no use for it. Who uses it doesn't know it. What is it?\", answer_file)\n self.assertIn(\"I have seas without water, coasts without sand, towns without people and mountains without land. What am I?\", answer_file)\n self.assertIn(\"I don't have eyes, but once I did see. Once I had thoughts, but now I'm white and empty. What am I?\", answer_file)\n \n self.assertNotIn(\"This is not in\", answer_file )",
"def test_convert_document(self) -> None:\n # Each tuple contains an input for `BaseTestCase.convert_document()` and the\n # expected output\n documents = (\n ([\"a\", \"b\", \"c\"], [\"a\", \"b\", \"c\"]),\n ([\"a\", (\"b\", 2), \"a\"], [\"a\", \"b\", \"b\", \"a\"]),\n ([(\"a\", 10), (\"b\", 10)], [\"a\"] * 10 + [\"b\"] * 10),\n ([], []),\n ([(\"a\", 1), (\"b\", 1)], [\"a\", \"b\"]),\n ([(\"a\", 0)], []),\n )\n for document, expected in documents:\n with self.subTest(document=document, expected=expected):\n self.assertEqual(self.convert_document(document), expected)",
"def test_convert(scitype, from_mtype, to_mtype, fixture_index):\n # retrieve from/to fixture for conversion\n from_fixture = get_examples(\n mtype=from_mtype, as_scitype=scitype, return_lossy=True\n ).get(fixture_index)\n\n to_fixture = get_examples(\n mtype=to_mtype, as_scitype=scitype, return_lossy=True\n ).get(fixture_index)\n\n # retrieve indicators whether conversion makes sense\n # to-fixture is in example dict and is not None\n cond1 = to_fixture is not None and to_fixture[0] is not None\n # from-fixture is in example dict and is not None\n cond2 = from_fixture is not None and from_fixture[0] is not None\n # from-fixture is not None and not lossy\n cond3 = cond2 and from_fixture[1] is not None and not from_fixture[1]\n\n msg = (\n f\"conversion {from_mtype} to {to_mtype} failed for fixture {fixture_index}, \"\n \"expected result (y) and converted result (x) are not equal because: \"\n )\n\n # test that converted from-fixture equals to-fixture\n if cond1 and cond2 and cond3:\n converted_fixture_i = convert(\n obj=from_fixture[0],\n from_type=from_mtype,\n to_type=to_mtype,\n as_scitype=scitype,\n )\n\n equals, deep_equals_msg = deep_equals(\n converted_fixture_i,\n to_fixture[0],\n return_msg=True,\n )\n assert equals, msg + deep_equals_msg",
"def test_area_rec(self,rec,area,points):\n\n print \"Testing %s\" % rec.rule.name\n self.assertEqual(points,rec.points,\"Error: Wrong number of points for %s. Should have been %s, was %s\" % (rec.rule.name,points,rec.points))",
"def result_check(original_ans, player_answer, stage_no):\n\n feedback_score = 0\n question_ordinal = \"\"\n feedback = {0: \"\",\n 1: \"\"}\n no_of_result = get_element_amount_from_list(list(map(compare_answer, original_ans, player_answer)), element=True)\n print(\"Number of correct: \", no_of_result)\n\n if stage_no == 1:\n if no_of_result >= 2:\n feedback_score = 1\n question_ordinal = \"first\"\n\n elif stage_no == 2:\n no_of_result = len(list(set(original_ans) & set(player_answer)))\n if no_of_result >= 3:\n feedback_score = 1\n question_ordinal = \"second\"\n\n else:\n if no_of_result == 1:\n feedback_score = 1\n\n good_result = \"Congrat! you pass our \"+question_ordinal+\" test. You did a good job\"\n bad_result = \"Unfortunately, you failed the \" + question_ordinal + \" test. No worries, you still have \" + str(\n 3 - stage_no) + \" tests left.\"\n if stage_no == 3:\n bad_result = \"Unfortunately, you failed the\" + question_ordinal + \"test\"\n\n feedback.update({0: bad_result, 1: good_result})\n\n return feedback_score, feedback.get(feedback_score)",
"def test_linear_positions_my_position(self):\n pass",
"def test_split_pos(self):\n self.func2()\n for x in self.list_algo:\n rand1 = r.randint(-100, 100)*100\n rand2 = r.randint(-100, 100)*100\n check = [rand1,rand2]\n my_list = [rand1,rand2]\n if check[0] < 0:\n check[0] += 0.04\n else:\n check[0] -= 0.04\n if check[1] < 0:\n check[1] += 0.04\n else:\n check[1] -= 0.04\n\n result = x.checkValue(my_list)\n self.assertEqual(result[0] == check[0],result[1] == check[1])",
"def test_pests_problem_type(self):\n ble = SimulatorCulture.objects.filter(display_text='Blé dur').first()\n mais = SimulatorCulture.objects.filter(display_text='Maïs').first()\n chanvre = SimulatorCulture.objects.filter(display_text='Chanvre').first()\n\n answers = {\"problem\":\"DESHERBAGE\", \"rotation\": [ble.external_id, chanvre.external_id, mais.external_id]}\n engine = Engine(answers, [], [])\n practices = engine.calculate_results()\n response_items = engine.get_suggestions(practices)\n\n # We ensure there are three suggestions, and all three address weeds\n self.assertEqual(len(response_items), 3)\n for response_item in response_items:\n suggestion = response_item.practice\n self.assertIn(Problem['DESHERBAGE'].value, suggestion.problems_addressed)",
"def administer(self):\n\n score = 0\n total_questions = 0\n\n for question in self.questions:\n evaluation = question.ask_and_evaluate()\n total_questions += 1\n\n if evaluation:\n score += 1\n\n if score >= (total_questions / 2.0):\n return \"Pass\"\n else:\n return \"Fail\"",
"def test_score_per_pos(self):\r\n MPRA_tiles_input_file = \"InputTestFilesSection3/TrainingSets/tile_prom_region/HepG2/input_data/infile.txt\"\r\n output_file = \"InputTestFilesSection3/baseprediction_expected_outcome.txt\"\r\n experiment_cell_name = 'HepG2'\r\n x = WeightFeatures.score_per_pos(MPRA_tiles_input_file, output_file, experiment_cell_name, tile_length=145,\r\n region_pos_col=0,\r\n region_chr_index=3, cell_name_index=0, region_center_index=4,\r\n region_name_sep='_',\r\n values_start_index=1, sep='\\t')\r\n\r\n assert compare_files(output_file, \"InputTestFilesSection3/baseprediction_expected_outcome.txt\")\r\n return",
"def eval(self):\n\t\tcorrect = 0\n\t\ttry:\n\t\t\ttotal = self._analogy_questions.shape[0]\n\t\texcept AttributeError as e:\n\t\t\traise AttributeError(\"Need to read analogy questions.\")\n\t\tstart = 0\n\t\twhile start < total:\n\t\t\tlimit = start + 2500\n\t\t\tsub = self._analogy_questions[start:limit, :]\n\t\t\tidx = self._predict(sub)\n\t\t\tstart = limit\n\t\t\tfor question in xrange(sub.shape[0]):\n\t\t\t\tfor j in xrange(4):\n\t\t\t\t\tif idx[question, j] == sub[question, 3]:\n\t\t\t\t\t\tcorrect += 1\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif idx[question, j] in sub[question, :3]:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak\n\t\tprint()\n\t\tprint(\"Eval %4d/%d accuracy = %4.1f%%\" % (correct, total, correct * 100.0 / total))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
takes a msg object from Python's email parser and formats it into a dictionary (which then becomes JSON that we can put in Redis)
|
def parse_msg(msg):
subject = msg.get("Subject")
return {
"subject": subject,
"sender": msg.get("Sender"),
"date": msg.get("Date"),
"size": len(bytes(msg)),
}
|
[
"def parse_message(message):\n return {\n \"msg\": message.message,\n \"sender\": message.sender.name,\n \"sent_on\": message.sent_on.strftime(\"%b %d %y - %H:%M\"),\n }",
"def parse_email(message):\n\n pass",
"def json_dumps(msg):\n return json.dumps(msg)",
"def parse_email(msg):\n # Process Art-Battle messages:\n if msg.subject.find(u'У вас новое письмо') != -1:\n m = re.match(u'.*Вам пришло новое письмо от пользователя <a href=\"http://tabun\\\\.everypony\\\\.ru/profile/(?P<user>.+?)/\".*'+\n u'Тема письма: <b>(?P<topic>.+?)</b>.*\"(?P<art_url>https?://.+?\\.(png|jpg)).*', msg.body_html, re.UNICODE|re.DOTALL)\n if m and is_art_battle_topic(m.group('topic')):\n ab = get_state().current_battle.get()\n if ab:\n art_url = imgurify(m.group('art_url'))\n ab.add_participant(m.group('user'), art_url, msg.time, msg.key)\n msg.read = True\n msg.put()\n logging.info(\"Successfully parsed Tabun email %d\" % msg.key.id())\n else:\n logging.error('No current Art-Battle')\n else:\n logging.error(\"Couldn't parse Tabun email %d\" % msg.key.id())",
"def getMsgDict(self):\n return self._msgDict or {}",
"def msg(self, msg_type, content=None, parent=None):\n msg = {}\n msg['header'] = self.msg_header()\n msg['parent_header'] = {} if parent is None else extract_header(parent)\n msg['msg_type'] = msg_type\n msg['content'] = {} if content is None else content\n return msg",
"def _parse_body(self, msg):\n # get the msg\n parts = msg.get_payload()\n msg = parts[-1]\n raw = msg.get_payload().strip()\n\n if self.regex:\n if isinstance(self.regex, re._pattern_type):\n m = self.regex.search(raw)\n if not m:\n raise ValueError('Bad regex!')\n body = m.groupdict()\n self._check_required(body)\n return body\n\n else:\n raise ValueError('Regex failed to compile!')\n\n else:\n return {'raw':raw}",
"def crds_decode(msg):\n if isinstance(msg, dict) and \"crds_encoded\" in msg:\n ascii = msg[\"crds_payload\"]\n b64 = ascii.encode(\"ascii\")\n compressed = base64.b64decode(b64)\n utf8 = gzip.decompress(compressed)\n json_str = utf8.decode()\n obj = json.loads(json_str)\n return obj\n else:\n return msg",
"def mogrify(topic, msg):\n return str(topic) + ' ' + json.dumps(msg)",
"def _parse_sns_message(self, sns_message):\n splitted_list = sns_message.split(PATTERN_LINESPLITTER)\n # Workaround for when the last parameter is not terminated with\n # the same separator pattern, then a closing quote might remain.\n if splitted_list[-1] != '' and splitted_list[-1][-1] == '\\'':\n # Cut the last character from the last item\n splitted_list[-1] = splitted_list[-1][:-1]\n result_dict = {}\n for line_item in splitted_list:\n line_item = line_item.strip()\n if PATTERN_KEYSPLITTER not in line_item:\n # Unparseable line, do not parse\n continue\n key, value = line_item.split(PATTERN_KEYSPLITTER, 1)\n result_dict[key] = self._cast_type(value)\n return result_dict",
"def _prepare_message(msg):\n msg_mime = MIMEText(msg, 'text', 'utf-8')\n msg_mime['From'] = Header(infomail.fromaddr, charset='utf-8')\n msg_mime['To'] = Header(', '.join(infomail.toaddrs),\n charset='utf-8')\n msg_mime['Subject'] = Header(\"VirtualBox images built\",\n charset='utf-8')\n return msg_mime",
"def get_message_dict(self):\n message_dict = {\n \"subject\": self.get_message_subject(),\n \"message\": self.get_message_body(),\n \"from_email\": defs.CONTACTWARE_DEFAULT_FROM_EMAIL,\n \"recipient_list\": defs.CONTACTWARE_DEFAULT_TO_EMAILS,\n }\n return message_dict",
"def CreateMessage(sender, to, subject, message_text):\n #message = MIMEText(message_text)\n message = MIMEText(message_text,'html')\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}",
"def serialize(self, msg):\n msg_header = msg.get_header()\n data = msg.get_data()\n json_str_obj = jsonpickle.encode(\n {\n 'header': msg_header,\n 'data': data,\n })\n return json_str_obj.encode() # return a byte stream",
"def jsonify(topic, msg):\n return topic + ' ' + json.dumps({'message':msg})",
"def json_load(msg):\n return json.loads(msg)",
"def process(email_msg: dict) -> dict:\n logger.debug('bn notification: %s', email_msg)\n\n # get template and fill in parts\n template = Path(f'{current_app.config.get(\"TEMPLATE_PATH\")}/BC-BN.html').read_text()\n filled_template = substitute_template_parts(template)\n\n # get filing and business json\n business = Business.find_by_identifier(email_msg['identifier'])\n filing_type = 'incorporationApplication'\n if business.legal_type in [Business.LegalTypes.SOLE_PROP.value, Business.LegalTypes.PARTNERSHIP.value]:\n filing_type = 'registration'\n filing = (Filing.get_a_businesses_most_recent_filing_of_a_type(business.id, filing_type))\n corp_type = CorpType.find_by_id(business.legal_type)\n\n # render template with vars\n jnja_template = Template(filled_template, autoescape=True)\n html_out = jnja_template.render(\n business=business.json(),\n entityDescription=corp_type.full_desc if corp_type else ''\n )\n\n # get recipients\n recipients = get_recipients(email_msg['option'], filing.filing_json, filing_type=filing_type)\n return {\n 'recipients': recipients,\n 'requestBy': 'BCRegistries@gov.bc.ca',\n 'content': {\n 'subject': f'{business.legal_name} - Business Number Information',\n 'body': html_out,\n 'attachments': []\n }\n }",
"def msg_structure(status=\"\", msg=\"\"):\n return {\n \"status\": status,\n \"msg\": msg\n }",
"def _read_message(data, msg):\n if msg.type in IGNORED_MESSAGES:\n data = _ignore(data, msg)\n elif msg.type == 'time_signature':\n # NOTE: right now we're only handling fours\n if msg.numerator == 4 and msg.denominator == 4:\n data = _dict_update(\n data,\n clocks_per_click=msg.clocks_per_click,\n notated_32nd_notes_per_beat=msg.notated_32nd_notes_per_beat)\n else:\n raise TimeSignatureException('not 4/4')\n elif msg.type == 'note_on':\n data = _note_on_update(data, msg)\n elif msg.type == 'note_off':\n data = _note_off_update(data, msg)\n\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse the given list of Models to Document instances
|
def parse_to_documents(self, models):
return map(self.parse_to_document, models)
|
[
"def parse(cls, path: str) -> List[QuoteModel]:\n if not cls.can_ingest(path):\n file_type = path.split(\".\")[-1]\n raise Exception(f\"Documents of file type {file_type} cannot be ingested\")\n\n outputs = []\n document = docx.Document(path)\n\n for paragraph in document.paragraphs:\n body, author = paragraph.text.split(\" - \")\n outputs.append(QuoteModel(body, author))\n\n return outputs",
"def create_instances_from_document( # 新增的方法\n # 目标按照RoBERTa的思路,使用DOC-SENTENCES,并会去掉NSP任务: 从一个文档中连续的获得文本,直到达到最大长度。如果是从下一个文档中获得,那么加上一个分隔符\n # document即一整段话,包含多个句子。每个句子叫做segment.\n # 给定一个document即一整段话,生成一些instance.\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n\n instances = []\n raw_text_list_list = get_raw_instance(document, max_seq_length) # document即一整段话,包含多个句子。每个句子叫做segment.\n for j, raw_text_list in enumerate(raw_text_list_list):\n raw_text_list = get_new_segment(raw_text_list) # 结合分词的中文的whole mask设置即在需要的地方加上“##”\n # 1、设置token, segment_ids\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in raw_text_list:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n # 2、调用原有的方法\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n\n return instances",
"def docParse(self):\n text = self.text\n text = self.simplify(text)\n nlp = self.nlp\n full_doc = nlp(text)\n \n # Slit into sentences and find Simple sentences\n sent_doc_ls = list(sent for sent in full_doc.sents)\n spl_ls = self.simple_find(sent_doc_ls)\n doc_ls = list(nlp.pipe(spl_ls))\n\n print(\"Finding triples (Subject-Verb-Object) from your doc...\\n\")\n # Our triples will be (ent1, rel, ent2)\n triples = self.all_triples(doc_ls) \n return triples",
"def assemble_model_pipeline(\n dataset: SummDataset, model_list: List[SummModel] = SUPPORTED_SUMM_MODELS\n) -> List[Tuple[SummModel, str]]:\n\n dataset = dataset if isinstance(dataset, SummDataset) else dataset()\n\n single_doc_model_list = list(\n filter(\n lambda model_cls: not (\n model_cls.is_dialogue_based\n or model_cls.is_query_based\n or model_cls.is_multi_document\n ),\n model_list,\n )\n )\n single_doc_model_instances = [\n model_cls(get_lxr_train_set(dataset))\n if model_cls == LexRankModel\n else model_cls()\n for model_cls in single_doc_model_list\n ]\n\n multi_doc_model_list = list(\n filter(lambda model_cls: model_cls.is_multi_document, model_list)\n )\n\n query_based_model_list = list(\n filter(lambda model_cls: model_cls.is_query_based, model_list)\n )\n\n dialogue_based_model_list = list(\n filter(lambda model_cls: model_cls.is_dialogue_based, model_list)\n )\n dialogue_based_model_instances = (\n [model_cls() for model_cls in dialogue_based_model_list]\n if dataset.is_dialogue_based\n else []\n )\n\n matching_models = []\n if dataset.is_query_based:\n if dataset.is_dialogue_based:\n for query_model_cls in query_based_model_list:\n for dialogue_model in dialogue_based_model_list:\n full_query_dialogue_model = query_model_cls(\n model_backend=dialogue_model\n )\n matching_models.append(\n (\n full_query_dialogue_model,\n f\"{query_model_cls.model_name} ({dialogue_model.model_name})\",\n )\n )\n else:\n for query_model_cls in query_based_model_list:\n for single_doc_model in single_doc_model_list:\n full_query_model = (\n query_model_cls(\n model_backend=single_doc_model,\n data=get_lxr_train_set(dataset),\n )\n if single_doc_model == LexRankModel\n else query_model_cls(model_backend=single_doc_model)\n )\n matching_models.append(\n (\n full_query_model,\n f\"{query_model_cls.model_name} ({single_doc_model.model_name})\",\n )\n )\n return matching_models\n\n if dataset.is_multi_document:\n for multi_doc_model_cls in multi_doc_model_list:\n for single_doc_model in single_doc_model_list:\n full_multi_doc_model = (\n multi_doc_model_cls(\n model_backend=single_doc_model, data=get_lxr_train_set(dataset)\n )\n if single_doc_model == LexRankModel\n else multi_doc_model_cls(model_backend=single_doc_model)\n )\n matching_models.append(\n (\n full_multi_doc_model,\n f\"{multi_doc_model_cls.model_name} ({single_doc_model.model_name})\",\n )\n )\n return matching_models\n\n if dataset.is_dialogue_based:\n return list(\n map(\n lambda db_model: (db_model, db_model.model_name),\n dialogue_based_model_instances,\n )\n )\n\n return list(\n map(lambda s_model: (s_model, s_model.model_name), single_doc_model_instances)\n )",
"def parse(cls, path: str) -> List[QuoteModel]:\n if not cls.can_ingest(path):\n file_type = path.split(\".\")[-1]\n raise Exception(f\"Documents of file type {file_type} cannot be ingested\")\n\n bodies = []\n authors = []\n with open(path, \"r\") as f:\n lines = f.readlines()\n\n for line in lines:\n bodies.append(line.split(\" - \")[0])\n authors.append(line.split(\" - \")[1])\n\n return [QuoteModel(body, author) for body, author in zip(bodies, authors)]",
"def store_document(dataset: List[Dict[str, str]]) -> List[Document]:\n documents: List[Document] = []\n for row in dataset:\n doc_row: Document = Document(\n text=row[\"text\"],\n meta={\"name\": row[\"title\"] or \"\"},\n )\n documents.append(doc_row)\n\n return documents",
"def test_all_docs_result_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n attachment_model = {} # Attachment\n attachment_model['content_type'] = 'testString'\n attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='\n attachment_model['digest'] = 'testString'\n attachment_model['encoded_length'] = 0\n attachment_model['encoding'] = 'testString'\n attachment_model['follows'] = True\n attachment_model['length'] = 0\n attachment_model['revpos'] = 1\n attachment_model['stub'] = True\n\n revisions_model = {} # Revisions\n revisions_model['ids'] = ['testString']\n revisions_model['start'] = 1\n\n document_revision_status_model = {} # DocumentRevisionStatus\n document_revision_status_model['rev'] = 'testString'\n document_revision_status_model['status'] = 'available'\n\n document_model = {} # Document\n document_model['_attachments'] = {}\n document_model['_conflicts'] = ['testString']\n document_model['_deleted'] = True\n document_model['_deleted_conflicts'] = ['testString']\n document_model['_id'] = 'testString'\n document_model['_local_seq'] = 'testString'\n document_model['_rev'] = 'testString'\n document_model['_revisions'] = revisions_model\n document_model['_revs_info'] = [document_revision_status_model]\n document_model['foo'] = { 'foo': 'bar' }\n\n docs_result_row_value_model = {} # DocsResultRowValue\n docs_result_row_value_model['rev'] = 'testString'\n\n docs_result_row_model = {} # DocsResultRow\n docs_result_row_model['caused_by'] = 'testString'\n docs_result_row_model['error'] = 'testString'\n docs_result_row_model['reason'] = 'testString'\n docs_result_row_model['doc'] = document_model\n docs_result_row_model['id'] = 'testString'\n docs_result_row_model['key'] = 'testString'\n docs_result_row_model['value'] = docs_result_row_value_model\n\n # Construct a json representation of a AllDocsResult model\n all_docs_result_model_json = {}\n all_docs_result_model_json['total_rows'] = 0\n all_docs_result_model_json['rows'] = [docs_result_row_model]\n all_docs_result_model_json['update_seq'] = 'testString'\n\n # Construct a model instance of AllDocsResult by calling from_dict on the json representation\n all_docs_result_model = AllDocsResult.from_dict(all_docs_result_model_json)\n assert all_docs_result_model != False\n\n # Construct a model instance of AllDocsResult by calling from_dict on the json representation\n all_docs_result_model_dict = AllDocsResult.from_dict(all_docs_result_model_json).__dict__\n all_docs_result_model2 = AllDocsResult(**all_docs_result_model_dict)\n\n # Verify the model instances are equivalent\n assert all_docs_result_model == all_docs_result_model2\n\n # Convert model instance back to dict and verify no loss of data\n all_docs_result_model_json2 = all_docs_result_model.to_dict()\n assert all_docs_result_model_json2 == all_docs_result_model_json",
"def save_document_vectors(url, database, posts, texts, model):\n client = MongoClient(url)\n db = client[database]\n posts[\"prepared_body\"] = texts\n for index in tqdm(posts.index):\n post = posts.loc[index]\n inferred_vector = model.infer_vector(post[\"prepared_body\"], steps=DOC2VEC_STEPS, alpha=DOC2VEC_ALPHA)\n db.comment.update_one({'_id': post[\"post_permlink\"][1:]}, {'$set': {'inferred_vector': inferred_vector.tolist()}})",
"def parse_docs(self):\n self.docs = []\n self.fstoi = {UNK_TOKEN: 0}\n self.fitos = {0: UNK_TOKEN}\n self.idx2multi = {}\n self.multi2idx = {}\n for line in self.text:\n line = line.strip()\n if self.byte_fmt:\n line = line.decode(\"utf-8\")\n doc_words = []\n doc_feats = []\n doc_multifeats = []\n for tok in line.split(\" \"):\n word, *feats = tok.split(\"|\")\n word_n = self.stoi.get(word.lower(), self.stoi[\"UNK\"])\n feats = dict(zip(self.cnames, feats))\n feats_p = []\n multifeats_p = []\n for fn, f in feats.items():\n if self.is_multi(fn):\n fs = f.split(\";\")\n fs_n = []\n for f in fs:\n # First assign global feature id\n f = f\"{fn}:{f}\"\n if f not in self.fstoi:\n new_n = len(self.fstoi)\n self.fstoi[f] = new_n\n self.fitos[new_n] = f\n f_n = self.fstoi[f]\n\n # Next map it to a one hot index\n if f_n not in self.multi2idx:\n new_n = len(self.multi2idx)\n self.multi2idx[f_n] = new_n\n self.idx2multi[new_n] = f\n\n fs_n.append(f_n)\n self.cnames2fis[fn].add(f_n)\n multifeats_p.append(fs_n)\n else:\n if fn == \"lemma\":\n # Lowercase lemmas\n f = f.lower()\n if not f:\n f = UNK_TOKEN\n else:\n f = f\"{fn}:{f}\"\n if f not in self.fstoi:\n new_n = len(self.fstoi)\n self.fstoi[f] = new_n\n self.fitos[new_n] = f\n f_n = self.fstoi[f]\n feats_p.append(f_n)\n # Update feature name\n self.cnames2fis[fn].add(f_n)\n doc_words.append(word_n)\n doc_feats.append(feats_p)\n doc_multifeats.append(multifeats_p)\n self.docs.append((doc_words, doc_feats, doc_multifeats))",
"def get_documents(corpus, list_doc_ids):\n # XML parse code adapted from\n # https://stackabuse.com/reading-and-writing-xml-files-in-python/\n corpus_filename = config.CORPUS[corpus]['corpusxml']\n if not os.path.isfile(corpus_filename):\n print(corpus_filename + ' does not exist')\n return []\n tree = xml.parse(corpus_filename)\n root = tree.getroot()\n doc_list = []\n #list_doc_ids is a list of (doc_id, score) pairs\n for doc in list_doc_ids:\n doc_id = doc[0]\n # print(doc_id)\n # print(doc[1])\n # print(root[doc_id][0].text)\n if root[doc_id][1].text == None:\n root[doc_id][\n 1].text = ' // There is no title information available. Reuters did not supply any title information for this article. //'\n if root[doc_id][2].text == None:\n root[doc_id][\n 2].text = '// There is no text body information available. Reuters did not supply any body text for this article. //'\n # print(root[doc_id][1].text)\n # print(root[doc_id][2].text)\n if corpus==config.UOTTAWA:\n doc_to_add = Document(doc_id, doc[1],\n root[doc_id][0].text + ' ' + root[doc_id][1].text,\n root[doc_id][2].text, [])\n doc_list.append(doc_to_add)\n elif corpus ==config.REUTERS:\n if root[doc_id][3].text == None:\n root[doc_id][\n 3].text = '// There is no topic information available. Reuters did not supply any body text for this article. //'\n\n doc_to_add = Document(doc_id, doc[1],\n root[doc_id][0].text + ' ' + root[doc_id][1].text,\n root[doc_id][2].text,root[doc_id][3].text)\n doc_list.append(doc_to_add)\n\n\n\n return doc_list",
"def test_event_dbmodels_to_es_documents(es):\n events = EventFactory.create_batch(2)\n\n result = ESEvent.db_objects_to_es_documents(events)\n\n assert len(list(result)) == len(events)",
"def import_doc(client: Client, input: list[str]):\n if not client.is_connected:\n ctx = click.get_current_context()\n ctx.fail(\"Import failed: Not connected to a neo4j instance.\")\n for fp in input:\n graph = read_doc(fp)\n client.import_doc(graph)",
"def train_model():\n documents = _get_all_articles()\n\n model = Doc2Vec(documents, vector_size=DOC2VEC_VECTOR_DIMENSION, window=2, min_count=1, workers=4)\n model.delete_temporary_training_data(True, True)\n model.save(DOC2VEC_MODEL_FILE_PATH)",
"def read_models():\n unigram = open(unigram_file, 'rb')\n bigram = open(bigram_file, 'rb')\n delete = open(del_file, 'rb')\n insert = open(ins_file, 'rb')\n subs = open(sub_file, 'rb')\n trans = open(trans_file, 'rb')\n count = open(count_file, 'rb')\n unigram_counts = cPickle.load(unigram)\n bigram_counts = cPickle.load(bigram)\n term_count = len(unigram_counts)\n del_dic = cPickle.load(delete)\n ins_dic = cPickle.load(insert)\n sub_dic = cPickle.load(subs)\n trans_dic = cPickle.load(trans)\n count_dic = cPickle.load(count)",
"def parse(cls, path: str) -> List[QuoteModel]:\n if not cls.can_ingest(path):\n file_type = path.split(\".\")[-1]\n raise Exception(f\"Documents of file type {file_type} cannot be ingested\")\n\n with open(path, \"rb\") as f:\n pdf = pdftotext.PDF(f)\n\n quotes = pdf[0].split(\"\\n\")\n quotes = [quote for quote in quotes if \" - \" in quote]\n\n outputs = []\n for quote in quotes:\n try:\n body, author = quote.split(\" - \")\n outputs.append(QuoteModel(body, author))\n return outputs\n except Exception as e:\n print(e)",
"def doc_transform(doc_batch):\n docs = []\n for d in doc_batch:\n words = []\n for s in d:\n words += s\n docs.append(words)\n # nw = len(words)\n return docs",
"def get_documents():\n\n DB_USER = app.config.get('DB_USER', 'postgres')\n DB_PASSWORD = app.config.get('DB_PASSWORD', 'dbpass')\n DB_NAME = app.config.get('DB_NAME', 'envirolens')\n\n DB.connect(\n database=DB_NAME,\n user=DB_USER,\n password=DB_PASSWORD\n )\n\n if DB.cursor is None:\n return jsonify({'Error' : 'The connection could not be established'})\n\n document_ids = request.json.get('document_ids', None)\n\n # If the \"document_ids\" parameter was not set:\n if document_ids is None:\n return jsonify(\n {'Message' : 'You need to provide json with \"document_ids\" : [list of documents ids] value'}\n )\n\n statement = \"\"\"SELECT * FROM documents WHERE document_id IN %s;\"\"\"\n DB.cursor.execute(statement, (tuple(document_ids), )) \n\n # Enumerating the fields\n num_fields = len(DB.cursor.description)\n field_names = [i[0] for i in DB.cursor.description]\n documents = [{ field_names[i]: row[i] for i in range(num_fields) } for row in DB.cursor.fetchall()]\n \n # Cleaning the ouput:\n # - removing fulltext field\n # - slicing down the fulltext_cleaned field to 500 chars\n # - we return only the first 10 results\n for i in range(len(documents)):\n if documents[i]['fulltext_cleaned'] is not None:\n documents[i]['fulltext_cleaned'] = documents[i]['fulltext_cleaned'][:500]\n documents[i].pop('fulltext')\n\n DB.disconnect()\n\n return jsonify(documents[:10])",
"def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n retweet_text = doc_as_list[4]\n retweet_url = doc_as_list[5]\n quote_text = doc_as_list[6]\n quote_url = doc_as_list[7]\n term_dict = {}\n tokenized_text = self.parse_sentence(full_text,tweet_id)\n #tokenized_text = [x for x in tokenized_text if x]\n strtemp = \"\"\n '''for i in range(len(tokenized_text)):\n if ((tokenized_text[i][0]).isupper()):\n if (strtemp == \"\"):\n strtemp = tokenized_text[i]\n else:\n strtemp += \" \" + tokenized_text[i]\n else:\n if (strtemp != \"\"):\n if (self.personadic.get(strtemp)):\n if(tweet_id not in self.personadic[strtemp]):\n self.personadic[strtemp].append(tweet_id)\n else:\n self.personadic[strtemp] = [tweet_id]\n strtemp = \"\"\n if (self.personadic.get(strtemp)):\n if (tweet_id not in self.personadic[strtemp]):\n self.personadic[strtemp].append(tweet_id)\n else:\n self.personadic[strtemp] = [tweet_id]'''\n\n\n doc_length = len(tokenized_text) # after text operations.\n ''' if self.doc2bowcount==0:\n self.dictionary = Dictionary([tokenized_text])\n else:\n self.dictionary.add_documents([tokenized_text])\n\n self.tweet2doc[self.tweetcount]=tweet_id\n self.tweetcount+=1\n self.tweetlist+=[tokenized_text]\n self.doc2bowcount+=1\n if(self.doc2bowcount==100000):\n self.dictionary.save('dictionary'+str(self.tweetcount))\n utils.save_obj(self.tweetlist,'tweetlist'+str(self.tweetcount))\n self.tweetlist=[]\n self.doc2bowcount=0'''\n\n maxFrecinDoc= 0\n docWordCount=0\n for term in tokenized_text:\n if(len(term)<2):\n continue\n if term not in term_dict.keys():\n term_dict[term] = 1\n else:\n term_dict[term] += 1\n if(maxFrecinDoc<term_dict[term]):\n maxFrecinDoc= term_dict[term]\n infoForDoc = maxFrecinDoc\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length, infoForDoc)\n\n return document",
"def _extract_models(self, name, from_dict):\n\n # Extract all the model list\n mlist = self._obj.get(name, [])\n\n # Convert the model from dictionary to concreate\n # python class for the model.\n mlist = [from_dict(d) for d in mlist]\n\n # Dictionaries for file mappings\n mmap = {}\n\n # For each experiment, check the model is not specified by\n # a path, if it is then get the dictionary of the model\n # and insert it into the list. Replace the path reference\n # with an index\n for eobj in self._obj[\"experiment\"]:\n value = eobj.get(name)\n if value is None:\n continue\n elif isinstance(value, str):\n if value not in mmap:\n mmap[value] = len(mlist)\n mlist.append(\n from_dict(_experimentlist_from_file(value, self._directory))\n )\n eobj[name] = mmap[value]\n elif not isinstance(value, int):\n raise TypeError(\"expected int or str, got %s\" % type(value))\n\n return mlist"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
given a model, a field name (can include lookups like 'client__name', 'client__goal__name', etc.), and the field_meta object for the immediate field related to the field_name (so for simple case of 'name', this would be the 'name' field meta object, for the complex case of 'client__name', this would be the 'client' field meta object, and for 'client__goal__name', this would also be the 'client' field meta object), parse the value of the field given by field_name from the model and return it
|
def parse_field(self, model, field_name, field_meta):
if field_meta.concrete and not (field_meta.is_relation or field_meta.one_to_one or field_meta.many_to_one or field_meta.one_to_many or field_meta.many_to_many):
# concrete field
return getattr(model, field_name)
elif field_meta.many_to_many:
# many to many
return list(getattr(model, field_name).values_list('id', flat=True))
elif field_meta.one_to_many:
# one to many
return list(getattr(model, field_name).values_list('id', flat=True))
elif field_meta.one_to_one or field_meta.many_to_one or field_meta.related_model:
# can be one-to-one, many-to-one, these we have to look for related lookups on
field_parts = self._related_lookup_parts(field_name)
if field_parts:
related_model = getattr(model, field_parts[0])
return self.parse_field(related_model, '__'.join(field_parts[1:]), related_model._meta.get_field(field_parts[1]))
else:
return getattr(model, '%s_id' % field_name)
|
[
"def _get_field_by_name(model, field):\n field_dict = {x.name: x for x in model._meta.get_fields()} # noqa\n return field_dict[field]",
"def _get_field_from_name(model, field_name):\n try:\n return model._meta.get_field(field_name)\n except FieldDoesNotExist:\n return getattr(model, field_name)",
"def get_field(model, name):\n return model._meta.get_field(name)",
"def get_field_by_relation_path(model, field_path):\n def get_related_model(model, field_name):\n related_model = model._meta.get_field(field_name).related_model\n return related_model\n\n hops = field_path.split('__')\n relation_hops, dst_field = hops[:-1], hops[-1]\n for field_name in relation_hops:\n model = get_related_model(model, field_name)\n found_field = model._meta.get_field(dst_field)\n return found_field",
"def get_field(self, field_name):\n for field in self.fields:\n if field.name == field_name:\n return field\n\n return None",
"def get_model_field(model, fieldspec):\n opts = model._meta\n if not isinstance(fieldspec, tuple):\n fieldspec = fieldspec.split(LOOKUP_SEP)\n rel = None\n for (i, name) in enumerate(fieldspec):\n if (i > 0):\n if not isinstance(rel, (ForeignKey, OneToOneField)):\n return None\n opts = rel.related_model._meta\n try:\n rel = opts.get_field(name)\n except FieldDoesNotExist:\n return None\n return rel",
"def get_recursed_field_value(obj, field=\"\"):\n fields = field.split(\"__\")\n while fields:\n field = fields.pop(0)\n if field:\n try:\n obj = getattr(obj, field)\n except AttributeError:\n return \"\"\n\n return obj",
"def get_field(obj, fieldspec):\n for f in fieldspec.split(LOOKUP_SEP):\n if (obj is None):\n return AbnormalValues.NULL_REF\n if not isinstance(obj, models.Model):\n raise TypeError('Expected a Django model')\n obj = getattr(obj, f, None)\n return obj",
"def _get_field(self, field_name):\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the 'extra__kubernetes__' prefix \"\n f\"when using this method.\"\n )\n if field_name in self.conn_extras:\n return self.conn_extras[field_name] or None\n prefixed_name = f\"extra__kubernetes__{field_name}\"\n return self.conn_extras.get(prefixed_name) or None",
"def get_custom_field_value(obj, field_slug):\r\n if not callable(getattr(obj, 'custom_fields')):\r\n return None\r\n\r\n if not obj.custom_fields():\r\n return None\r\n\r\n return obj.custom_fields().get(field_slug)",
"def __extractField(self, raw: dict, name: str):\n if not 'fields' in raw:\n return None\n fields = raw['fields']\n if not name in fields:\n return None\n return fields[name]",
"def try_get_field(self, field_name: str) -> Optional[fields.Field]:\n prefix = field_name.split(\"$\")[0]\n if prefix not in self.field_prefix_map:\n return None\n\n field = self.field_prefix_map[prefix]\n if isinstance(field, fields.BaseTemplateField):\n # We use the regex here since we want to also match template fields.\n if \"$\" in field_name and not re.match(field.get_regex(), field_name):\n return None\n return field",
"def get_field(self, field_name):\n\n field_names = field_name.split('.')\n return _find_field(self.__msg, field_names)",
"def _get_field(extras: dict, field_name: str):\n backcompat_prefix = \"extra__dataprep__\"\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix \"\n \"when using this method.\"\n )\n if field_name in extras:\n return extras[field_name] or None\n prefixed_name = f\"{backcompat_prefix}{field_name}\"\n return extras.get(prefixed_name) or None",
"def get_field(self, name):\n return self._fields[name]",
"def _getModelField(self):\r\n if not self.instance:\r\n return None\r\n modelField = getattr(self.instance.__class__._meta, \"get_field_by_name\")(self.instanceFieldName)[0]\r\n return modelField",
"def _meta(self, field, **kwargs):\n try:\n return self.meta[field][0]\n except (KeyError, IndexError):\n if 'default' in kwargs:\n return kwargs['default']\n else:\n raise KeyError('Required metadata not found: %s' % field)",
"def field(self):\n\n _field = self.model._meta.fields.get(self.field_name, None)\n\n if isinstance(self._accessor, six.text_type):\n spec = self._accessor\n if spec[0] == ':':\n key_paths = spec[1:].split('.')\n # can be used to access nested JSONField\n for p in key_paths:\n try:\n p = int(p)\n except ValueError:\n pass\n _field = _field[p]\n elif callable(self._accessor):\n _field = self._accessor(_field)\n\n ctx = self.model._meta.database.get_sql_context()\n if self.field_type:\n _field = _field.cast(self.field_type().ddl_datatype(ctx).sql)\n\n return _field",
"def get_field_value(instance, field_name):\n return str(getattr(instance, field_name))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
search through the projects installed apps, for each looking for the presence of a jekyll.py file (or whatever the overriden name is in config.JEKYLL_COLLECTIONS_FILENAME)
|
def discover_collections():
collections = []
apps = config.JEKYLL_COLLECTIONS_INCLUDE_APPS or settings.INSTALLED_APPS
for app in apps:
try:
jekyll_collection_module = __import__('%s.%s' % (app, config.JEKYLL_COLLECTIONS_MODULE), fromlist=[app])
except ImportError:
continue
for name, cls in inspect.getmembers(jekyll_collection_module):
if inspect.isclass(cls) and cls != JekyllCollection and issubclass(cls, JekyllCollection):
collections.append(cls())
return collections
|
[
"def detect_flask_apps():\n\n matches = []\n for root, dirnames, filenames in os.walk(os.getcwd()):\n for filename in fnmatch.filter(filenames, \"*.py\"):\n full = os.path.join(root, filename)\n if \"site-packages\" in full:\n continue\n\n full = os.path.join(root, filename)\n\n with io.open(full, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n for line in lines:\n app = None\n\n # Kind of janky..\n if \"= Flask(\" in line:\n app = line.split(\"= Flask(\")[0].strip()\n if \"=Flask(\" in line:\n app = line.split(\"=Flask(\")[0].strip()\n\n if not app:\n continue\n\n package_path = full.replace(os.getcwd(), \"\")\n package_module = package_path.replace(os.sep, \".\").split(\".\", 1)[1].replace(\".py\", \"\")\n app_module = package_module + \".\" + app\n\n matches.append(app_module)\n\n return matches",
"def apps() -> List[str]:\n with Configuration() as config:\n return config.get_apps()",
"def _load_all_apps(self, ctx: Context) -> None:\n for path in self.apps_dir.iterdir():\n if path.stem.startswith('_'):\n continue\n\n self.load_app(path.stem)",
"def getAppsOnPage(url, baseUrl):\n soup = requestHTML(url)\n\n apps = soup.find_all('div', {'class': 'category-template-title'})\n for app in apps:\n link = app.find('a', href=True)\n link = baseUrl + link.get('href')\n appName = scrapeAppData(link)\n if not appName:\n continue\n\n collectAllVersions(baseUrl, link, './apks/')\n collectAllReviews(baseUrl, appName, './reviews/')\n\n return apps is not None",
"async def display_available_apps(ctx):\n # get all available application files.\n description = ''\n for file in os.listdir('cogs'):\n if file.endswith('.py') and not file.startswith('bot'):\n description += f'- {file.replace(\".py\", \"\")}\\n'\n\n await send_embed(ctx, title=get_dev_title(), text=description)",
"def _has_pyproject_file() -> bool:\n return any(\"pyproject.toml\" in path for path in glob.glob(\"*\"))",
"def GetListOfAvailableApplications():\n kratos_path = GetKratosMultiphysicsPath()\n import os, re\n\n apps = [\n f.split('.')[0] for f in os.listdir(kratos_path) if re.match(r'.*Application*', f)\n ]\n\n return apps",
"def detect_django_settings():\n\n matches = []\n for root, dirnames, filenames in os.walk(os.getcwd()):\n for filename in fnmatch.filter(filenames, \"*settings.py\"):\n full = os.path.join(root, filename)\n if \"site-packages\" in full:\n continue\n full = os.path.join(root, filename)\n package_path = full.replace(os.getcwd(), \"\")\n package_module = package_path.replace(os.sep, \".\").split(\".\", 1)[1].replace(\".py\", \"\")\n\n matches.append(package_module)\n return matches",
"def load_apps(self):\n apps_list = self.db.find(API.APPS_COLLECTION_NAME, {})\n for app in apps_list:\n self.load_app(app)",
"def get_app_list():\n for name, data in sorted(app_data.APPS.iteritems()):\n yield {'name': name, 'explanation': data.get('short_explanation', name)}",
"def list():\n click.echo()\n for name, (_, doc) in app.scripts.items():\n click.echo(' {:8} {}'.format(name, doc))\n click.echo()",
"def get_enabled_apps():\n return ditto_apps.enabled()",
"def get_project_list(config):\r\n eggs_dir = config.get('eggs_dir', 'eggs')\r\n if os.path.exists(eggs_dir):\r\n projects = os.listdir(eggs_dir)\r\n else:\r\n projects = []\r\n try:\r\n projects += [x[0] for x in config.cp.items('settings')]\r\n except NoSectionError:\r\n pass\r\n return projects",
"def find_assets():\n for name in ['static', 'templates']:\n for entry in os.scandir('project'):\n if entry.is_file():\n yield entry.path",
"def get_apps(verbose=True):\n from balsam.core.models import ApplicationDefinition as App\n from balsam.scripts import postgres_control\n import os\n try:\n apps = App.objects.all()\n if verbose:\n print(f'Found {len(apps)} apps in {os.environ[\"BALSAM_DB_PATH\"]}:')\n for i,app in enumerate(apps):\n print(f'{i}: {app.name}')\n return apps\n except Exception as e:\n activate_database(db=os.environ[\"BALSAM_DB_PATH\"])\n return None",
"def _print_projects():\n project_dir = projects_path()\n print(' '.join(\n ['aeriscloud'] +\n [\n pro\n for pro in os.listdir(project_dir)\n if os.path.exists(os.path.join(project_dir, pro,\n '.aeriscloud.yml'))\n ]\n ))",
"def get_installed_modules_to_add_to_installed_apps():\n\treturn [item for item in os.listdir(BASE_DIR) if item.startswith('module_')]",
"def get_plugin_apps(self, multiplexer, logdir):\n raise NotImplementedError()",
"def getAvailablePrograms():\r\n repoPaths = paths.getRepoPaths()\r\n availablePrograms = []\r\n for path in repoPaths:\r\n availablePrograms += os.listdir(path)\r\n return availablePrograms"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
given a collection, atomically write the collections' data to location. Meaning, if any document in the collection fails to generate/write, the entire operation aborts
|
def atomic_write_collection(collection, build_dir):
counter = 0
collection_dir = os.path.join(build_dir, collection.location)
try:
for doc in collection.docs:
doc.write(collection_dir)
counter += 1
except (exceptions.DocGenerationFailure, exceptions.CollectionSizeExceeded) as exc:
logger.error('atomic write failed! (%s)' % str(exc))
fs.remove_dir(collection_dir)
raise exc
return counter
|
[
"def _sync_collection_mp(self, dbname, collname):\n self._logger.info('>>>> %s.%s' % (dbname, collname))\n doc_q = multiprocessing.Queue()\n ev = multiprocessing.Event()\n ev.clear()\n processes = []\n for i in range(0, 4):\n p = multiprocessing.Process(target=self._write_document, args=(dbname, collname, doc_q, ev))\n p.start()\n processes.append(p)\n n = 0\n cursor = self._src_mc[dbname][collname].find(\n filter=self._query,\n cursor_type=pymongo.cursor.CursorType.EXHAUST,\n no_cursor_timeout=True,\n modifiers={'$snapshot': True})\n for doc in cursor:\n while doc_q.qsize() > 10000:\n time.sleep(0.2) # wait subprocess consume\n doc_q.put(doc)\n n += 1\n if n % 10000 == 0:\n self._logger.info('[%s] push %d, size: %d' % (self._current_process_name, n, doc_q.qsize()))\n ev.set()\n for p in processes:\n p.join()\n self._logger.info('==== %s.%s %d, qsize %d' % (dbname, collname, n, doc_q.qsize()))",
"def _sync_collection(self, dbname, collname):\n self._logger.info(\"[%s] sync collection '%s.%s'\" % (self._current_process_name, dbname, collname))\n while True:\n try:\n n = 0\n #docs = [] \n reqs = []\n batchsize = 1000\n cursor = self._src_mc[dbname][collname].find(filter=self._query,\n cursor_type=pymongo.cursor.CursorType.EXHAUST,\n no_cursor_timeout=True,\n modifiers={'$snapshot': True})\n count = cursor.count()\n if count == 0:\n self._logger.info('[%s] \\t skip empty collection' % (self._current_process_name))\n return\n for doc in cursor:\n #docs.append(doc)\n #if len(docs) == batchsize:\n # self._dst_mc[dbname][collname].insert_many(docs)\n # docs = []\n reqs.append(ReplaceOne({'_id': doc['_id']}, doc, upsert=True))\n if len(reqs) == batchsize:\n self._bulk_write(dbname, collname, reqs, ordered=False)\n reqs = []\n n += 1\n if n % 10000 == 0:\n self._logger.info('[%s] \\t %s.%s %d/%d (%.2f%%)' % (self._current_process_name, dbname, collname, n, count, float(n)/count*100))\n #if len(docs) > 0:\n # self._dst_mc[dbname][collname].insert_many(docs)\n if len(reqs) > 0:\n self._bulk_write(dbname, collname, reqs, ordered=False)\n self._logger.info('[%s] \\t %s.%s %d/%d (%.2f%%)' % (self._current_process_name, dbname, collname, n, count, float(n)/count*100))\n return\n except pymongo.errors.AutoReconnect:\n self._src_mc.close()\n self._src_mc = self.reconnect(self._src_host,\n self._src_port,\n username=self._src_username,\n password=self._src_password,\n w=self._w)",
"def _bulk_write(self, dbname, collname, requests, ordered=True, bypass_document_validation=False):\n while True:\n try:\n self._dst_mc[dbname][collname].bulk_write(requests,\n ordered=ordered,\n bypass_document_validation=bypass_document_validation)\n except pymongo.errors.AutoReconnect:\n self._dst_mc.close()\n self._dst_mc = self.reconnect(self._dst_host,\n self._dst_port,\n username=self._dst_username,\n password=self._dst_password,\n w=self._w)\n else:\n return",
"def __exit__(self, exc_type, exc_value, exc_tb):\n if exc_type is None:\n self._db.Write(self.batch, self.write_sync)",
"def flush(self):\n self._assert_open()\n if self._requires_flush:\n if self._file is None:\n logger.debug(\"Flushed collection.\")\n else:\n logger.debug(\"Flush collection to file '{}'.\".format(self._file))\n self._file.truncate(0)\n self.dump(self._file)\n self._file.flush()\n self._requires_flush = False\n else:\n logger.debug(\"Flushed collection (no changes).\")",
"def update_collection(self, collection, doc):\n\n\t\ttry:\t\n\t\t\tself.db[collection].update({'_id' : ObjectId(doc['_id'])},\n\t\t\t\t\t\t\t\t\tdoc,\n\t\t\t\t\t\t\t\t\tupsert = False)\n\t\texcept Exception as e:\n\t\t\tlogging.error(\"[{}] : {}\".format(sys._getframe().f_code.co_name,e))\n\t\t\texit(1)",
"def can_write_collection(view):\n \n @wraps(view)\n def inner_decorator(request, collection, *args, **kwargs):\n \n out = createBaseResponseObject()\n database = kwargs.get(database, settings.MONGO_SERVER_DEFAULT_DB)\n\n try:\n #check user and collection\n collectionInstance = SketchCollection.objects.get(name=collection, database=database)\n wa = collectionInstance.hasWriteAccess(request.user)\n if wa:\n return view(request, collection, database=database, *args, **kwargs)\n \n except SketchCollection.DoesNotExist:\n #TODO: we could limit the number of collections here\n return view(request, collection, database=database, *args, **kwargs)\n \n \n except Exception, e:\n out['status'] = 0\n out['errors'] = [str(e)]\n return HttpResponse(json.dumps(out))\n \n out['status'] = 0\n out['errors'] = ['You must own collection %s or have the right to write to it.' % collection]\n return HttpResponse(json.dumps(out))\n\n return inner_decorator",
"async def set_data_in_db(self):\n try:\n result = await self._data_table.bulk_write(self._data[0], ordered=False)\n print('Insertion result %s' % repr(result.bulk_api_result))\n except pymongo.errors.BulkWriteError as bwe:\n result = bwe.details",
"def insert_one_to_collection(self, collection, doc):\n\n\t\ttry:\n\t\t\tself.db[collection].insert_one(doc)\n\t\texcept Exception as e:\n\t\t\tlogging.error(\"[{}] : {}\".format(sys._getframe().f_code.co_name,e))\n\t\t\texit(1)",
"def save_cache(self, collection, data, cache_key):\n cache = self.__create_cache(data, cache_key)\n collection.insert_one(cache)",
"def write_and_print_content(media_db, collection, file_name):\n media_collection = media_db[collection]\n\n json_content = read_json_file(file_name)\n media_collection.insert_many(json_content)\n\n print_mdb_collection(media_collection)\n\n return media_collection",
"def update_data_collection(self, mx_collection, wait=False): \n\tprint \"*************************\"\n\tprint \"mx collection DATA\"\n\tprint mx_collection\n \tlogging.getLogger(\"ispyb_client\").debug(\"asdlfjksdh f\")\n if self.__disabled:\n return\n\tmx_collection['collection_id'] = 8818\n if self.__collection:\n if 'collection_id' in mx_collection:\n try:\n # Update the data collection group\n self.store_data_collection_group(mx_collection)\n \n data_collection = ISPyBValueFactory().\\\n from_data_collect_parameters(mx_collection)\n \n self.__collection.service.\\\n storeOrUpdateDataCollection(data_collection)\n except WebFault:\n logging.getLogger(\"ispyb_client\").\\\n exception(\"ISPyBClient: exception in update_data_collection\")\n except URLError:\n logging.getLogger(\"ispyb_client\").exception(_CONNECTION_ERROR_MSG)\n else:\n logging.getLogger(\"ispyb_client\").error(\"Error in update_data_collection: \" + \\\n \"collection-id missing, the ISPyB data-collection is not updated.\")\n \n else:\n logging.getLogger(\"ispyb_client\").\\\n exception(\"Error in update_data_collection: could not connect\" + \\\n \" to server\")",
"def set_attachment(self, doc, blob, name, collection=None):\r\n\r\n # If there is already a file with the given name for this doc, then we will delete it\r\n # after writing the new file\r\n attachments = doc.get('_attachments', [])\r\n name_matches = [a for a in attachments if a[0] == name]\r\n\r\n # the filename is set to something so that fs.list() will display the file\r\n new_file_id = self.gfs.put(blob, filename='%s_%s' % (doc['_id'], name))\r\n logger.info('stored blob of %i bytes with id=%s and filename %s_%s' % (\r\n len(blob), str(new_file_id), doc['_id'], name))\r\n\r\n new_attachments = ([a for a in attachments if a[0] != name]\r\n + [(name, new_file_id)])\r\n\r\n try:\r\n ii = 0\r\n doc = self.update(doc, {'_attachments': new_attachments},\r\n collection=collection)\r\n # there is a database leak until we actually delete the files that\r\n # are no longer pointed to by new_attachments\r\n while ii < len(name_matches):\r\n self.gfs.delete(name_matches[ii][1])\r\n ii += 1\r\n except:\r\n while ii < len(name_matches):\r\n logger.warning(\"Leak during set_attachment: old_file_id=%s\" % (\r\n name_matches[ii][1]))\r\n ii += 1\r\n raise\r\n assert len([n for n in self.attachment_names(doc) if n == name]) == 1\r\n #return new_file_id\r",
"def commit(\n self,\n batch: aimrocks.WriteBatch\n ):\n self.writable_db.write(batch)",
"def set_attachment(self, doc, blob, name, collection=None):\n\n # If there is already a file with the given name for this doc, then we will delete it\n # after writing the new file\n attachments = doc.get(\"_attachments\", [])\n name_matches = [a for a in attachments if a[0] == name]\n\n # the filename is set to something so that fs.list() will display the file\n new_file_id = self.gfs.put(blob, filename=\"{}_{}\".format(doc[\"_id\"], name))\n logger.info(\n \"stored blob of %i bytes with id=%s and filename %s_%s\"\n % (len(blob), str(new_file_id), doc[\"_id\"], name)\n )\n\n new_attachments = [a for a in attachments if a[0] != name] + [\n (name, new_file_id)\n ]\n\n try:\n ii = 0\n doc = self.update(\n doc, {\"_attachments\": new_attachments}, collection=collection\n )\n # there is a database leak until we actually delete the files that\n # are no longer pointed to by new_attachments\n while ii < len(name_matches):\n self.gfs.delete(name_matches[ii][1])\n ii += 1\n except:\n while ii < len(name_matches):\n logger.warning(\n \"Leak during set_attachment: old_file_id=%s\" % (name_matches[ii][1])\n )\n ii += 1\n raise\n assert len([n for n in self.attachment_names(doc) if n == name]) == 1\n # return new_file_id",
"def update_cluster_by_collection(collection):\n boundary_type = BoundaryType.objects.all().order_by('-level')[0]\n boundaries = Boundary.objects.filter(\n type=boundary_type).filter(\n geometry__contains=collection.site.get_geometry()).values_list(\n 'id', flat=True)\n boundaries = list(set(boundaries))\n\n task_update_cluster.delay(boundaries)",
"def save(self, coll_to_save):\n with open(LOCAL_FILE, 'w') as f:\n pickle.dump(coll_to_save, f)",
"def save(self, coll_to_save):\n # Serialize collections\n id_list = []\n for collection in coll_to_save:\n coll_dict = {}\n coll_dict['jp_collection'] = jsonpickle.encode(collection,\n keys=True)\n\n new_id = self._dbcollection.save(coll_dict)\n\n # Add _id if it exists\n if collection.db_id not in (None, ''):\n coll_dict['_id'] = ObjectId(collection.db_id)\n id_list.append(coll_dict['_id'])\n else:\n # new entry in cloud, update id_list\n id_list.append(new_id)\n\n # Delete documents that are in cloud but not in local\n to_del = [doc_id['_id'] for doc_id in\n self._dbcollection.find(fields=['_id'])\n if doc_id['_id'] not in id_list]\n\n if len(to_del) > 0:\n for doc_id in to_del:\n self._dbcollection.remove({'_id': ObjectId(doc_id)})",
"def update(self):\n if self.col:\n time0 = time.time()\n gen = self.datasets()\n if not self.col.count():\n try: # perform bulk insert operation\n while True:\n if not self.col.insert(\\\n itertools.islice(gen, self.cache_size)):\n break\n except InvalidOperation:\n pass\n else: # we already have records, update their ts\n for row in gen:\n spec = dict(dataset=row['dataset'])\n self.col.update(spec, {'$set':{'ts':time0}})\n # remove records with old ts\n self.col.remove({'ts':{'$lt':time0-self.expire}})\n print \"%s DBSDaemon updated %s collection in %s sec, nrec=%s\" \\\n % (dastimestamp(), self.dbcoll, time.time()-time0, self.col.count())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pic of day access. .pod recent gives you the most recent, .pod with no arguments gives you a random pod, .pod followed by either a date (MM/DD/YY format only) or a number will return either the closest POD to that date or the POD matching that number. .pod list will give you a link to a dump of all PODs.
|
def command_pod(bot, user, channel, args):
settings = _import_yaml_data()
user = settings['db']['user']
pw = settings['db']['pass']
conn = psycopg2.connect(host="localhost", database="quassel", user=user, password=pw)
cursor = conn.cursor()
## Once again, this query is specially crafted to only return things I've said
## in a specific channel that begin with a specific string ("PIC OF DAY:").
cursor.execute("SELECT message,time FROM backlog WHERE message ~ 'PIC OF DAY:' AND bufferid=11 AND senderid=6")
result = cursor.fetchall()
length = len(result)
conn.close()
try:
## This block evaluates if args was a number
int(args)
text = "POD \x02%s/%s\x02 \x02\x0313|\x03\x02 " % (int(args), length) + result[int(args) - 1][0] + " \x02\x0313|\x03\x02 Date posted: \x02%s\x02" % (result[int(args) - 1][1].strftime("%x %X"))
except ValueError:
## If args wasn't a number, we come down here to where the real fun is
if args.strip(): ## if it wasn't blank
if args.strip() == "recent":
text = "Most recent POD \x02\x0313|\x03\x02 " + result[-1][0] + " \x02\x0313|\x03\x02 Date posted: \x02%s\x02" % (result[-1][1].strftime("%x %X"))
elif args.strip() == "list":
text = "POD list \x02\x0313|\x03\x02 " + _list(result) # goes and gets the list
else:
try:
## checks the date syntax.
## TODO: look into dateutil library to make arbitrary datestrings possible
date_object = datetime.datetime.strptime(args.strip(), "%m/%d/%y")
except ValueError:
bot.say(channel, "Invalid date syntax.")
## This block searches for the closest match to the input time by comparing
## timedeltas. We initialize delta_Match to be the max timedelta possible,
## and then iterate through looking for the closest (least absolute value)
## timedelta.
match = 0
delta_match = datetime.timedelta.max
for x in range(len(result)):
delta = date_object - result[x][1]
if abs(delta.total_seconds()) < abs(delta_match.total_seconds()):
match = x
delta_match = delta
text = "Search result | POD \x02%s/%s\x02 \x02\x0313|\x03\x02 " % (match + 1, length) + result[match][0] + " \x02\x0313|\x03\x02 Date posted: \x02%s\x02" % (result[match][1].strftime("%x %X"))
else:
## Here is the boringest part, where we just return the specified POD.
res_num = random.randint(1, length + 1)
text = "POD \x02%s/%s\x02 \x02\x0313|\x03\x02 " % (res_num, length) + result[res_num - 1][0] + " \x02\x0313|\x03\x02 Date posted: \x02%s\x02" % (result[res_num - 1][1].strftime("%x %X"))
if channel == user:
bot.say(bot.factory.getNick(user), text)
else:
bot.say(channel, text)
return
|
[
"def pod_finder(pod_list):\n pod_scores = {}\n\n for pod in pod_list:\n # convert pod to dict\n pod = dict(pod)\n\n if pod.get('@title') in RESULT_PODS:\n return pod\n\n score = 0\n\n # meh pods\n if pod.get('@title') in NOT_PODS:\n score -= 100\n\n if isinstance(pod['subpod'], list):\n # subpod has an image\n score += 10 + (len(pod['subpod']) * 10)\n else:\n # subpod is singular\n\n # plain text\n if pod['subpod'].get('plaintext'):\n score += 50\n\n # image\n if pod['subpod'].get('img'):\n score += 30\n\n pod_scores[pod['@id']] = score\n\n # return pod with highest score\n best_id = max(pod_scores, key=pod_scores.get)\n return discord.utils.find(lambda pod: pod['@id'] == best_id, pod_list)",
"def podinfo(bot, trigger):\n # Input check\n # TODO: Return a random podcast if none provided? From top 10?\n user_input = trigger.group(2)\n if not user_input:\n return bot.reply(\"I need a podcast to lookup!\")\n # Fetch\n try:\n results = _fetch(bot, API_SEARCH_URL.format(\n query = \"\\\"{}\\\"\".format(quote_plus(user_input)),\n type = \"podcast\"\n ))\n genres = _fetch(bot, API_GENRES_URL)\n genres = genres[\"genres\"]\n except APIKeyMissing:\n return bot.reply(\"I'm missing the API Key\")\n if not results[\"results\"]:\n # broaden the search\n results = _fetch(bot, API_SEARCH_URL.format(\n query = quote_plus(user_input),\n type = \"podcast\"\n ).replace(\"&only_in=title\",''))\n if not results[\"results\"]:\n return bot.reply(\"Sorry, I couldn't find anything by that query\")\n # Parse\n # for now let's assume first result is what we want\n result = results[\"results\"][0]\n pod_id = result[\"id\"] # let's get some details\n details = _fetch(bot, API_DETAIL_URL.format(\n type = \"podcasts\",\n id = pod_id,\n sort = \"recent_first\"\n ))\n if details:\n latest_ep_info = \"{4} {1} ({0}) | published {2} | Listen @ {3}\".format(\n pendulum.duration(seconds=details[\"episodes\"][0][\"audio_length_sec\"]).in_words(),\n bold(details[\"episodes\"][0][\"title\"]),\n pendulum.from_timestamp(details[\"episodes\"][0][\"pub_date_ms\"]/1000).diff_for_humans(),\n _shorten(bot, details[\"episodes\"][0][\"listennotes_url\"]),\n bold(color(\"[Latest Episode]\", \"orange\"))\n )\n else:\n latest_ep_info = None\n name = result[\"title_original\"]\n author = result[\"publisher_original\"]\n desc = _normalizeWhitespace(result[\"description_original\"]).strip()\n eps = result[\"total_episodes\"]\n link = details.get(\"website\") or result[\"listennotes_url\"]\n pod_genres = []\n for id_ in result[\"genre_ids\"]:\n for genre in genres:\n if id_ == genre[\"id\"]:\n pod_genres.append(genre[\"name\"])\n first_ep = pendulum.from_timestamp(result[\"earliest_pub_date_ms\"]/1000)\n latest_ep = pendulum.from_timestamp(result[\"latest_pub_date_ms\"]/1000)\n replies = []\n replies.append(f\"{bold(color('[Podcast]', 'orange'))} {bold(name)} by {author} | {desc}\")\n replies.append(f\"{bold(color('[Info]', 'orange'))} {eps} episodes\"\n f\" | Genre(s): {', '.join(pod_genres)}\"\n f\" | First Published: {first_ep.format('YYYY')} | \"\n f\"Most Recent: {latest_ep.format('M/D/YYYY')} | \"\n f\"See more @ {_shorten(bot, link)}\")\n if latest_ep_info:\n replies.append(latest_ep_info)\n for reply in replies:\n bot.say(reply, max_messages=2)\n return",
"def list_pod(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_pod\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/pods'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1PodList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def pod( self, namespace, name, current_time=None ):\n return self._pods.lookup( namespace, name, current_time )",
"def _list(db_tuples):\r\n ret_str = \"\"\r\n hashy = hashlib.sha224() # sha224 hashes to ascii characters only, which makes them serializable.\r\n ## Builds the POD list up, along with the hash query.\r\n for x in range(len(db_tuples)):\r\n build_str = 'POD %s: \"%s\", posted on %s\\n' % (x + 1, db_tuples[x][0], db_tuples[x][1].strftime(\"%x %X\"))\r\n hashy.update(build_str)\r\n ret_str = ret_str + build_str\r\n\r\n ## pods.txt stores a dict(str -> str)\r\n ## dict(hash of POD list -> url of pastebin)\r\n hash_dict = yaml.load(file(\"modules/pods.txt\"))\r\n hashy_str = hashy.digest() # Do the hashing\r\n\r\n with open(\"modules/pods.txt\", \"a\") as hash_file:\r\n try:\r\n url = hash_dict[hashy_str] # If it's present, fantastic!\r\n except KeyError:\r\n # If not, go do things with the internet\r\n ret_str = urllib.quote(ret_str)\r\n results = requests.post(\"http://ix.io\", \"f:1=%s\" % ret_str)\r\n url = results.content.encode('utf-8').strip()\r\n hash_dict[hashy_str] = url\r\n yaml.dump(hash_dict, hash_file)\r\n\r\n return url",
"def pageviews_of(title, date):\n url = 'https://de.wikipedia.org/w/api.php?action=query&format=json&prop=pageviews&titles=' + title\n\n query = requests.get(url).json()['query']\n pagehitlist = list(query['pages'].values())\n return pagehitlist[0]['pageviews'][date]",
"def iter_pod_images(pod):\n metadata = pod.obj.get(\"metadata\", {})\n containers = pod.obj.get(\"spec\", {}).get(\"containers\", [])\n for container in containers:\n yield {\n \"namespace\": metadata.get(\"namespace\", \"default\"),\n \"pod_name\": metadata.get(\"name\", \"\"),\n \"container_name\": container.get(\"name\", \"\"),\n \"image\": container.get(\"image\", \"\"),\n }",
"def pod_discover(cfg,namespace):\n url = \"https://\"+cfg['endpoint']+\":\"+cfg['port']+'/api/v1/namespaces/'+namespace+'/pods'\n r = s.request('GET',url,headers={'Authorization': 'Bearer '+cfg['token']},verify=False)\n result = json.loads(r.text)\n data={\"data\":[]}\n for item in result[\"items\"]:\n obj={\"{#NAME}\":item['metadata']['name'].encode('utf-8'),\"{#NAMESPACE}\":namespace,\"{#SELFLINK}\":item['metadata']['selfLink'].encode('utf-8')}\n data[\"data\"].append(obj)\n print json.dumps(data,indent=2)",
"def pod_table(pods):\n\n def id_and_containers(pod):\n \"\"\"Extract the pod ID and container names from the given pod JSON.\n\n :param pod: the pod JSON to read\n :type pod: {}\n :returns: the entry for the ID+CONTAINER column of the pod table\n :rtype: str\n \"\"\"\n\n pod_id = pod['id']\n container_names = sorted(container['name'] for container\n in pod['spec']['containers'])\n\n container_lines = ('\\n |-{}'.format(name) for name in container_names)\n return pod_id + ''.join(container_lines)\n\n key_column = 'ID+TASKS'\n fields = OrderedDict([\n (key_column, id_and_containers),\n ('INSTANCES', lambda pod: len(pod.get('instances', []))),\n ('VERSION', lambda pod: pod['spec'].get('version', '-')),\n ('STATUS', lambda pod: pod['status']),\n ('STATUS SINCE', lambda pod: pod['statusSince']),\n ('WAITING', lambda pod: pod.get('overdue', False))\n ])\n\n tb = table(fields, pods, sortby=key_column)\n tb.align[key_column] = 'l'\n tb.align['VERSION'] = 'l'\n tb.align['STATUS'] = 'l'\n tb.align['STATUS SINCE'] = 'l'\n tb.align['WAITING'] = 'l'\n\n return tb",
"def pods_all(ctx):\n ctx.run(KUBERNETES_GET_PODS_ALL_CMD)",
"def get_podspec(podname):\n command = \"pod spec cat '%s'\" % (podname)\n return_code, out, _ = shell.cmd(command)\n if return_code != 0:\n if 'Unable to find a pod with name matching' in out:\n logging.error(\"Make sure you have 'pod setup' executed.\")\n return {}\n return json.loads(out)",
"def watch_pod_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_pod_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/watch/pods'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"async def latest(self, ctx):\n\t\tr = requests.get('http://rain.thecomicseries.com/comics/')\n\t\tif(r.status_code != 200):\n\t\t\treturn await ctx.send('Could not find the most recent comic')\n\t\thtml = r.content\n\t\tstart = html.find(b'class=\"heading\">Comic ')\n\t\tstop = html.find(b' ',start+23)\n\t\tcurUpdate = int(html[start+22:stop].decode(\"utf-8\"))\n\t\toutput = 'Comic Fury: http://rain.thecomicseries.com/comics/\\nSmackJeeves: http://rainlgbt.smackjeeves.com/comics/\\n'\n\t\tif curUpdate == len(self.lookup['CF']):\n\t\t\tif self.lookup['CF'][str(curUpdate)][3] != '':\n\t\t\t\toutput += 'Deviant art: https://www.deviantart.com/jocelynsamara/art/' + self.lookup['CF'][str(curUpdate)][3] + '\\n'\n\t\t\toutput += '\\nPage list is up to date'\n\t\t\toutput = 'Title: {}\\n'.format(self.lookup['CF'][str(curUpdate)][0]) + output\n\t\telse:\n\t\t\toutput += '\\nPage list is out of date by {} updates'.format(curUpdate-len(self.lookup['CF']))\n\t\tem = discord.Embed(title=\"Latest Page\", description=output, colour=cfg.colors['green'])\n\t\treturn await ctx.send(embed=em)",
"def query_pod( self, namespace, pod ):\n if not pod or not namespace:\n return {}\n\n query = '/api/v1/namespaces/%s/pods/%s' % (namespace, pod)\n return self.query_api( query )",
"def get_daily_devotion():\n resp = requests.get(\"https://devotionalium.com/api/v2?lang=en\")\n \n data = resp.json()\n text = data['1']['text']\n\n ref = data['1']['referenceLong']\n # readUrl = data['1']['readingUrl']\n photoUrl = data['photo']['url']\n # date = data['date']\n\n return render_template(\"devotion.html\", text=text, ref=ref, photoUrl=photoUrl )",
"def get_latest_imagery_date(overlay):\n meta = _sources[overlay]\n uri = _server_uri + _dir_info['path']\n \n # find a good date to start from, assuming tomorrow\n search_date = datetime.now() + timedelta(days=1)\n assert search_date > datetime(2015, 8, 1) # start of imagery (ignoring 2012)\n last_pub_date = None\n for i in range(7):\n r = requests.get(uri.format(subdir=meta['subdir'], date=search_date))\n if r.status_code != 404:\n n = len(get_overlay_image_list(overlay, date=search_date))\n if n == 48:\n last_pub_date = search_date\n break\n search_date += timedelta(days=-1) \n return last_pub_date",
"def read_pod_events(self, pod):\n try:\n return self._client.list_namespaced_event(\n namespace=pod.metadata.namespace, field_selector=f\"involvedObject.name={pod.metadata.name}\"\n )\n except HTTPError as e:\n raise AirflowException(f\"There was an error reading the kubernetes API: {e}\")",
"def read_pod_logs(\n self,\n pod: V1Pod,\n tail_lines: int | None = None,\n timestamps: bool = False,\n since_seconds: int | None = None,\n ):\n additional_kwargs = {}\n if since_seconds:\n additional_kwargs[\"since_seconds\"] = since_seconds\n\n if tail_lines:\n additional_kwargs[\"tail_lines\"] = tail_lines\n\n try:\n return self._client.read_namespaced_pod_log(\n name=pod.metadata.name,\n namespace=pod.metadata.namespace,\n container=\"base\",\n follow=True,\n timestamps=timestamps,\n _preload_content=False,\n **additional_kwargs,\n )\n except HTTPError as e:\n raise AirflowException(f\"There was an error reading the kubernetes API: {e}\")",
"def bestpodcasts(bot, trigger):\n results = _fetch(bot, API_BEST_URL)\n podcasts = results.get(\"podcasts\")\n if not podcasts:\n return bot.say(\"Sorry, I couldn't retreive anything from Listennotes.com\")\n genres = _fetch(bot, API_GENRES_URL)\n genres = genres[\"genres\"]\n replies = []\n link = results.get(\"listennotes_url\")\n replies.append(f\"{bold(color('[Top 5 Podcasts]', 'orange'))} via {link}\")\n for idx,pod in enumerate(podcasts[:5]):\n desc = _normalizeWhitespace(pod[\"description\"])\n if len(desc) >= 200:\n desc = desc[:199].strip() + \"…\" \n pod_genres = []\n for id_ in pod[\"genre_ids\"]:\n for genre in genres:\n if id_ == genre[\"id\"]:\n pod_genres.append(genre[\"name\"])\n replies.append(f\"{bold(color('[#' + str(idx+1) + ']', 'orange'))} \"\n f\"{bold(pod['title'])} by {pod['publisher']}\"\n f\" | {desc} {_shorten(bot, pod['listennotes_url'])} | \"\n f\"{', '.join(pod_genres)}\"\n )\n for reply in replies:\n bot.say(reply)\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The most convoluted way to do kind of thing ever invented, I think. The objective is to store the pastebin links for POD lists, so we don't waste time resending them every time somebody asks. Instead, we waste time hashing the returns every time. I'm honestly not sure which is better. Probably just resending them.
|
def _list(db_tuples):
ret_str = ""
hashy = hashlib.sha224() # sha224 hashes to ascii characters only, which makes them serializable.
## Builds the POD list up, along with the hash query.
for x in range(len(db_tuples)):
build_str = 'POD %s: "%s", posted on %s\n' % (x + 1, db_tuples[x][0], db_tuples[x][1].strftime("%x %X"))
hashy.update(build_str)
ret_str = ret_str + build_str
## pods.txt stores a dict(str -> str)
## dict(hash of POD list -> url of pastebin)
hash_dict = yaml.load(file("modules/pods.txt"))
hashy_str = hashy.digest() # Do the hashing
with open("modules/pods.txt", "a") as hash_file:
try:
url = hash_dict[hashy_str] # If it's present, fantastic!
except KeyError:
# If not, go do things with the internet
ret_str = urllib.quote(ret_str)
results = requests.post("http://ix.io", "f:1=%s" % ret_str)
url = results.content.encode('utf-8').strip()
hash_dict[hashy_str] = url
yaml.dump(hash_dict, hash_file)
return url
|
[
"def get_links(subreddit_list: Dict[str, int]) -> List[str]:\n global driver\n\n assert driver is not None\n\n driver.get(\"https://old.reddit.com\")\n\n # prompt the user to log in\n print(\"Logged in accounts see 100 posts instead of 25\")\n input(\"Log into your reddit account in the chromedriver. Press enter when you're done...\")\n\n\n for subreddit_name in subreddit_list:\n subreddit_base = f\"https://old.reddit.com/r/{subreddit_name}/\"\n print(f\"Making sure {subreddit_base} exists...\")\n driver.get(subreddit_base)\n random_wait()\n assert driver.current_url.casefold() == subreddit_base.casefold()\n\n # may be some links that arent images, those can be dealt with later/handled manually\n image_links = []\n for subreddit_name, number_of_pages in subreddit_list.items():\n # first top page, sorted by all\n driver.get(f\"https://old.reddit.com/r/{subreddit_name}/top/?sort=top&t=all\")\n pages_left = int(number_of_pages)\n while pages_left > 0:\n images_found = 0\n for post in driver.find_elements_by_css_selector(\"#siteTable > div.link\"):\n # if this is a promoted post/advertisement\n if len(post.find_elements_by_css_selector(\".promoted-tag\")) == 0:\n image_links.append(\n post.find_element_by_css_selector(\"a.title\").get_attribute(\"href\")\n )\n images_found += 1\n print(f\"Added {images_found} possible images from {driver.current_url}\")\n random_wait()\n # dont need to go to the next page when we're on the last one (1 page left)\n if pages_left != 1:\n # go to the next page\n driver.find_element_by_css_selector(\"span.next-button\").click()\n pages_left -= 1\n\n driver.quit()\n\n with open(os.path.join(this_dir, \"links.txt\"), \"w\") as link_cache:\n link_cache.write(\"\\n\".join(image_links))\n\n return image_links",
"def get_latest_post():\n red = redis.Redis(host = 'localhost', db = config.subfeed_db)\n reddit = praw.Reddit(user_agent=config.my_user_agent,\n client_id=config.my_client_id,\n client_secret=config.my_client_secret) \n subfeed_key = \"sorted_lfc\"\n unique_new_list = []\n subreddit = reddit.subreddit(config.subfeed_subreddit)\n for submission in subreddit.hot(limit=config.subfeed_limit):\n current_time = int(time.time())\n present_in_db = red.zadd(subfeed_key, submission.id, current_time)\n if present_in_db == 1:\n submission_link = \"https://www.reddit.com\" + submission.permalink\n unique_new_list.append(submission_link)\n return unique_new_list",
"def link_gever_document(gever_doc_uid):",
"def prepare_links(links_in_post):\r\n aggregated_links = []\r\n for link in links_in_post:\r\n aggregated_links.append(link)\r\n if not aggregated_links:\r\n all_links = '[]'\r\n else:\r\n all_links = list(set(aggregated_links))\r\n all_links = str(all_links)\r\n return all_links",
"def compute_hashes(\n links: list[DistributionMetadata],\n temporary_directory: Union[Path, str],\n old_package_list_json: Union[Path, str, None] = None,\n) -> list[DistributionMetadata]:\n raise NotImplementedError(\"sorry :(\")",
"def get_url(url='http://', fileglob='', prompt_name='', repo_chk='', contains=[],\n excludes=[], filelist=[]):\n print(f'Enter {prompt_name} URL. (\"sss\" at end of URL to skip)')\n if fileglob:\n print('Do not include filenames in the URL. A search of the URL')\n print('will be made up to 10 levels deep')\n while True:\n url = rlinput(f'Enter URL: ', url)\n if url.endswith('sss'):\n url = None\n break\n if repo_chk:\n url = url if url.endswith('/') else url + '/'\n try:\n # Basic response test\n cmd = f'curl --max-time 2 -I {url}'\n url_info, err, rc = sub_proc_exec(cmd)\n except:\n pass\n else:\n if 'http:' in url or 'https:' in url:\n response = re.search(r'HTTP\\/\\d+.\\d+\\s+200\\s+ok', url_info, re.IGNORECASE)\n if response:\n repo_mrkr = {'yum': '/repodata/', 'ana': 'repodata.json',\n 'pypi': '/simple/'}\n print(response.group(0))\n if repo_chk:\n ss = repo_mrkr[repo_chk]\n elif fileglob:\n ss = fileglob\n elif url[-1] != '/':\n ss = os.path.basename(url)\n url = os.path.dirname(url)\n cmd = ('wget -r -l 10 -nd -np --spider '\n f'--accept={ss} {url}')\n reply, err, rc = sub_proc_exec(cmd)\n err = err.replace('%2B', '+')\n if rc == 0:\n if repo_chk:\n regx = 'http.+' + repo_mrkr[repo_chk]\n elif fileglob:\n regx = fileglob_to_regx(fileglob)\n _found = re.findall(regx, err)\n # remove dups\n _found = list(set(_found))\n\n found = []\n # Include items containing any element of 'contains'\n # and exclude items containing any element of 'excludes'\n # If no item meets criteria, then use any / all\n # items but include a warning.\n if repo_chk:\n for _url in _found:\n if (any([item for item in contains if item in\n _url]) and not any([item for item in\n excludes if item\n in _url])):\n found.append(_url)\n\n if found:\n _list = found\n elif _found:\n _list = _found\n if repo_chk:\n print(bold('\\nWarning. The following url(s) were '\n 'found but do not match the '\n 'search criteria'))\n else:\n _list = []\n if _list:\n ch, sel = get_selection(_list, allow_none=True)\n if ch != 'N':\n if repo_chk:\n sel = sel.rstrip('/')\n url = os.path.dirname(sel)\n if files_present(url, filelist):\n break\n else:\n print('\\nChosen URL does not appear to '\n 'be valid. File check failed.')\n if get_yesno('Use selection anyway'):\n break\n else:\n url = sel\n break\n\n else:\n print('No match found.')\n else:\n print(f'Error reading url. {reply}')\n\n else:\n print('Invalid url')\n err = re.search('curl: .+', err)\n if err:\n print(err.group(0))\n tmp = re.search(r'HTTP\\/\\d+.\\d+\\s+.+', url_info)\n if tmp:\n print(tmp.group(0))\n\n elif 'file:///' in url:\n response = re.search(r'Content-Length:\\s+\\d+', url_info)\n if response:\n if repo_chk == 'yum':\n ss = '/repodata'\n elif repo_chk == 'ana':\n ss = '/repodata.json'\n elif repo_chk == 'pypi':\n ss = '/simple'\n if repo_chk:\n ss = url + ss\n elif fileglob:\n ss = url + fileglob\n ss = '/' + ss.lstrip('file:/')\n files = glob(ss, recursive=True)\n\n if files:\n ch, sel = get_selection(files, allow_none=True)\n if ch != 'N':\n url = 'file://' + os.path.dirname(sel) + '/'\n break\n else:\n print('No match found.')\n\n elif 'file:' in url:\n print('Proper file url format: \"file:///path/to/file')\n response = ''\n else:\n response = ''\n return url",
"def link(oldver='r12',dates='58*',newver='r13',fields='*', tels=['lco25m','apo1m','apo25m'] ) :\n\n # exposure/TELESCOPE/MJD directories\n dirs=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/exposures/*/'+dates+'/')\n mklinks(dirs,-4,-2,oldver=oldver)\n\n # cal/TELESCOPE/MJD directories\n dirs=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/cal/*/'+dates+'/')\n mklinks(dirs,-4,-2,oldver=oldver)\n\n # visit/TELESCOPE/FIELD/PLATE/MJD directories and visit/TELESCOPE/FIELD/*VisitSum files\n for tel in tels :\n if tel == 'apo1m' :\n dirs=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/visit/'+tel+'/*/'+dates+'/*')\n mklinks(dirs,-5,-1,oldver=oldver,newver=newver)\n else :\n dirs=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/visit/'+tel+'/'+fields+'/*/'+dates+'/*')\n mklinks(dirs,-6,-1,oldver=oldver,newver=newver)\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/visit/'+tel+'/'+fields+'/*VisitSum*'+dates+'*')\n mklinks(files,-4,-1,oldver=oldver)\n\n # stars/TELESCOPE/FIELD/apStar and apField\n for tel in tels :\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/stars/'+tel+'/'+fields+'/a?Star*')\n mklinks(files,-4,-1,oldver=oldver,newver=newver)\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/stars/'+tel+'/'+fields+'/a?Field*')\n mklinks(files,-4,-1,oldver=oldver,newver=newver)\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/stars/'+tel+'/'+fields+'/plots/*.gif')\n mklinks(files,-5,-1,oldver=oldver,newver=newver)\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/stars/'+tel+'/'+fields+'/plots/*.jpg')\n mklinks(files,-5,-1,oldver=oldver,newver=newver)\n\n # calibration files\n for caldir in ['bpm', 'darkcorr','detector','flatcorr','flux','littrow','lsf','persist','psf','telluric','trace','wave'] :\n try : os.makedirs('cal/'+caldir)\n except : pass\n files =glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/cal/'+caldir+'/*')\n mklinks(files,-3,-1,oldver=oldver)",
"def getAllUniqueImgLinks(baseUrl):\n\tgetBodyLinks = getAllImages(baseUrl) \n\t#remove all pound sign elements\n\tsubList = removeFromList(getBodyLinks, '#')\t\n\t#remove all non base url links\n\t#mainList = removeNonSubUrlLinks(subList)\n\tmainList = subList\n\t#remove all duplicates\n\tfinalList = list(set(mainList))\t\t\n\t#Strip out similar duplicates by removing all / from urls that need it\n\tmasterList = set(map(lambda url: url.rstrip('/'), finalList))\n\tlistOfLinks = list(masterList)\n\treturn listOfLinks",
"def insert_links(content: str):\n url = repo.html_url + \"/pull\"\n content = re.sub(r\"#(\\d+)\", rf\"[#\\1]({url}/\\1)\", content)\n return re.sub(r\"@(\\S+)\", rf\"[@\\1]({GITHUB_URL}/\\1)\", content)",
"def fetch_links(self, normalized_name):\n package_url = f\"{self.index_url}/{normalized_name}\"\n text = CACHE.get(\n path_or_url=package_url,\n as_text=True,\n force=not self.use_cached_index,\n )\n links = collect_urls(text)\n # TODO: keep sha256\n links = [l.partition(\"#sha256=\") for l in links]\n links = [url for url, _, _sha256 in links]\n return links",
"def citing_me():",
"def join():\n\n f_links = film_links()\n character = char_names()\n for i in character:\n if i['url'] in f_links: # PERFORMING THE CONDITION CHECK\n del i['url'] \n return character",
"def getArchiveURLFor(weblogentry):",
"def getAllUniqueLinks(baseUrl):\n\tgetBodyLinks = getVisibleLinks(baseUrl) \n\t#remove all pound sign elements\n\tsubList = removeFromList(getBodyLinks, '#')\t\n\t#remove all non base url links\n\t#mainList = removeNonBaseUrlLinks(subList)\n\tmainList = subList\n\t#remove all duplicates\n\tfinalList = list(set(mainList))\t\t\n\t#Strip out similar duplicates by removing all / from urls that need it\n\tmasterList = set(map(lambda url: url.rstrip('/'), finalList))\n\tlistOfLinks = list(masterList)\n\treturn listOfLinks",
"def get_fresh(old_issue_list, new_issue_list):\n old_urls = {x['url'] for x in old_issue_list}\n return [x for x in new_issue_list if x['url'] not in old_urls]",
"def get_obs_for_link(link, start_date=None, end_date=None, reset_cache=False, cache_folder='cache'):\n observations = []\n # the file name is quite absurd, but hopefully unique\n file_name = './' + cache_folder + '/' + \"\".join(re.split(\"[^a-zA-Z0-9]*\", link)) + '.json'\n # just parsing some dates\n today = str(datetime.datetime.utcnow())\n def utcparse(x): return date_parser.parse(x).replace(tzinfo=tzutc())\n parsed_start_date = (utcparse(start_date)\n if start_date\n else date_parser.parse(DEFAULT_START_DATE))\n parsed_end_date = (utcparse(end_date)\n if end_date\n else datetime.datetime.now(datetime.timezone.utc))\n # whether or not to write out a new cache\n do_update = True\n if not reset_cache:\n # if there is no folder to store caches in, create one\n if not os.path.isdir('./' + cache_folder):\n os.mkdir('./' + cache_folder)\n try:\n with open(file_name) as cache_file:\n # load and parse all the observations\n observations = list(map(lambda x: (x[0], utcparse(x[1])), json.load(cache_file)))\n # no observations? time to rebuild the cache\n if len(observations) < 1: \n \traise FileNotFoundError \n # the last cached observation\n end_observations = observations[-1][1]\n if parsed_end_date > end_observations:\n \"\"\"\n if the requested end date goes beyond the cache, we need more data\n note the [:-6]. This is a hacky bandaid because \n for some reason the date format wasn't working\n also note the [1:]\n this is because the first element is duplicated \n in the cache and when getting new data\n \"\"\"\n observations += get_obs_for_link_uncached(link, str(end_observations)[:-6], str(today))[1:]\n else:\n do_update = False\n except FileNotFoundError:\n # if there is no existing cache, create one with all the data you can get\n observations = get_obs_for_link_uncached(link, DEFAULT_START_DATE, today)\n else:\n # or if the cache is requested to be reset\n observations = get_obs_for_link_uncached(link, DEFAULT_START_DATE, today)\n\n # write the data back out to the cache\n if do_update:\n with open(file_name, 'w') as cache_file:\n # convert to string because json can't write otherwise\n jsonible_observations = list(map(lambda x: (x[0], str(x[1])), observations))\n json.dump(jsonible_observations, cache_file)\n\n # now to actually give the requester what they wanted\n start_index = bs.search(observations, (None, parsed_start_date), key=lambda x: x[1])\n end_index = bs.search(observations, (None, parsed_end_date), key=lambda x: x[1])\n\n # slice all the observations to what the request was\n return observations[start_index:end_index]",
"def getUrls(domain):\n wayback_urls = set()\n history = requests.get(API_URL + domain).text.splitlines()\n for line in history:\n record = parse_wayback_record(line)\n if record.mimetype == \"text/html\":\n url = domain + record.path\n wayback_url = BASE_URL + record.timestamp + \"/\" + url\n wayback_urls.add(wayback_url)\n return wayback_urls",
"def link_modifier(self, search_string, page_location):\n for response in self.response['results']:\n if response['type'] != 'page':\n continue\n\n # copy the response\n response_copy = {'id': response['id'], 'type': response['type'],\n 'title': response['title'], 'version': {}, 'body': {}}\n response_copy['body']['storage'] = {}\n response_copy['body']['storage']['representation'] = response['body']['storage'][\n 'representation']\n response_copy['body']['storage']['value'] = response['body']['storage']['value']\n response_copy['version']['number'] = response['version']['number'] + 1\n response_body = response_copy['body']['storage']['value']\n\n bs = BeautifulSoup(response_body, \"html.parser\")\n matches = bs.findAll(text=re.compile(r'\\b' + search_string + r'\\b'))\n\n if not matches:\n return\n\n change_count = 0\n for match in matches:\n grand_parent = match.parent.parent.name\n\n # check if word is part of a markdown\n if \"ac:\" in grand_parent:\n if grand_parent == \"ac:link\":\n try:\n existing_link = match.parent.previous_sibling['ri:content-title']\n except:\n print \"Error: detected self referencing link at: {}\"\\\n .format(response['title'])\n continue\n if existing_link != page_location:\n match.parent.previous_sibling['ri:content-title'] = page_location\n change_count += 1\n else:\n continue\n else:\n continue\n else:\n # don't add links in tables\n # for parent in match.parents:\n # if \"table\" in parent:\n # continue\n substituted = re.sub(r'\\b' + search_string + r'\\b',\n self.LINK1 + page_location + self.LINK2 +\n search_string + self.LINK3, match)\n match.replaceWith(BeautifulSoup(substituted, \"html.parser\"))\n change_count += 1\n\n if change_count:\n # do replacement\n response_copy['body']['storage']['value'] = bs.encode('utf-8')\n self.to_be_updated.append(response_copy)\n self.responses.append(response)\n else:\n continue",
"def link_hashtag(hashtag, tag_links):\r\n\t\r\n\tglobal c, db\r\n\r\n\ttry:\r\n\t\tfor tag in tag_links:\r\n\t\t\tif tag.name != hashtag:\r\n\t\t\t\tc.execute(\"SELECT * FROM ig_hashtags_link WHERE hashtag1='%s' AND hashtag2='%s' LIMIT 1\" % (hashtag, tag.name))\r\n\t\t\t\tdb_hashtag = c.fetchone()\r\n\t\t\t\tif not db_hashtag:\r\n\t\t\t\t\tc.execute(\"SELECT * FROM ig_hashtags_link WHERE hashtag1='%s' AND hashtag2='%s' LIMIT 1\" % (tag.name, hashtag))\r\n\t\t\t\t\tdb_hashtag = c.fetchone()\r\n\t\t\t\t\tif not db_hashtag:\r\n\t\t\t\t\t\tc.execute(\"INSERT IGNORE INTO ig_hashtags_link(hashtag1,hashtag2,frequency) VALUES('%s', '%s', '%s')\" % (hashtag, tag.name, 1))\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tc.execute(\"UPDATE ig_hashtags_link SET frequency=frequency + 1 WHERE hashtag1='%s' AND hashtag2='%s'\" % (tag.name, hashtag))\r\n\t\t\t\telse:\r\n\t\t\t\t\tc.execute(\"UPDATE ig_hashtags_link SET frequency=frequency + 1 WHERE hashtag1='%s' AND hashtag2='%s'\" % (hashtag, tag.name))\r\n\t\t\t\tif int(db_hashtag[2]) > min_freq: #remember to change min_freq in view.py (class search)\r\n\t\t\t\t\tc.execute(\"INSERT IGNORE INTO ig_hashtags(hashtag) VALUES('%s')\" % (tag.name))\r\n\t\t\tdb.commit()\r\n\texcept Exception as e:\r\n\t\ttry:\r\n\t\t\tdb.rollback()\r\n\t\texcept:\t\r\n\t\t\tdbConnect()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override to indicate that a test has finished (it may already have failed or errored)
|
def notifyTestFinished(self, test):
pass
|
[
"def end_test(self):",
"def wait_test_done(self):\n self.test_thread.join()\n self.logger.info('Test thread is done')",
"def testResultDone(self):\n ray.init(num_cpus=1, num_gpus=1)\n runner = TrialRunner(BasicVariantGenerator())\n kwargs = {\n \"stopping_criterion\": {\n \"training_iteration\": 2\n },\n \"resources\": Resources(cpu=1, gpu=1),\n }\n runner.add_trial(Trial(\"__fake\", **kwargs))\n trials = runner.get_trials()\n\n runner.step()\n self.assertEqual(trials[0].status, Trial.RUNNING)\n runner.step()\n self.assertNotEqual(trials[0].last_result[DONE], True)\n runner.step()\n self.assertEqual(trials[0].last_result[DONE], True)",
"def finishedTestcase(self):\n\n print('We have now finished the testcase, {:s}'.format(self.testcase))\n\n if self.movies:\n print('You can find the png and movie output of the simulation within ./{0:s}/png and ./{0:s}'\n .format(self.testcase))\n else:\n print('You can find the png output of the simulation within ./{:s}/png'.format(self.testcase))",
"def _succeed(self):\n print(self.test_case + ': succeeded')\n exit(0)",
"def EndTest(self):\n self.end_time = self._GetTimeString()\n self._SummaryTestToRecord()\n self._WriteToReport()",
"def finished(self, result):\n if result:\n #logger(\"Finished\")\n QgsMessageLog.logMessage(\n 'RandomTask \"{name}\" completed\\n'.format(\n name=self.description(),\n ),\n MESSAGE_CATEGORY, Qgis.Success)\n \n else:\n if self.exception is None:\n QgsMessageLog.logMessage(\n 'RandomTask \"{name}\" not successful but without '\\\n 'exception (probably the task was manually '\\\n 'canceled by the user)'.format(\n name=self.description()),\n MESSAGE_CATEGORY, Qgis.Warning)\n else:\n QgsMessageLog.logMessage(\n 'RandomTask {name} Exception: {exception}'.format(\n name = self.description(),\n exception = self.exception),\n MESSAGE_CATEGORY, Qgis.Critical)\n raise self.exception",
"def test_case_passed(self):\n self.__set_test_case_result(result='PASSED', message='')",
"def end_test(self, line):\n if self._testcase is None:\n raise Exception(\"Invalid current testcase\")\n if self._test is None:\n raise Exception(\"Invalid current test\")\n failed = \"[ FAILED ]\" in line\n\n # windows crash is a failure\n seh = False\n for line in self._output:\n if \"error: SEH exception\" in line:\n seh = True\n break\n outcome = PASSED\n if seh:\n outcome = CRASHED\n self._output = [\"SEH Exception\"] + self._output\n elif failed:\n outcome = FAILED\n\n self._tests[self._test] = (\n outcome,\n self._output[:-1], # cut the [ OK/FAILED ] line\n self._error[:],\n )\n\n if failed:\n self._fail_count += 1\n self.out(\"X\" if seh else \"F\", end=\"\", verbose=0)\n else:\n self._pass_count += 1\n self.out(\".\", end=\"\", verbose=0)\n self._test = None\n self._output = []\n self._error = []",
"def end_run(self):\n self.complete_a_run()",
"def end(self):\n self.remove_phases()\n self.unsuccessful_run()",
"def assertFinished(self, msg):\n finishStates = [u'finish']\n state = self.macro_executor.getState()\n #TODO buffer is just for debugging, attach only the last state\n state_buffer = self.macro_executor.getStateBuffer()\n msg = msg + '; State history=%s' % state_buffer\n self.assertIn(state, finishStates, msg)",
"def complete(self):\n self.state = 2\n self.finished = datetime.now()",
"def async_tests(self):\n self._testingfinished.clear() # Clear the event flag\n self.thread = Thread(target=self.dotests) # Create the thread\n self.thread.start() # And start it",
"def set_successful_finish(self):\n\n self.successful = True",
"def finished(self, finished):\n \n self._finished = finished",
"def finish(self):\n for action in self._pending_actions:\n if not action.is_done:\n action.done('failed')\n self._pending_actions = []",
"def finish_trial(self):\n success = self.cur_trial.finish(time.time())\n self.trial_seq = []\n self.trial_seq.append(self.cur_trial)\n self.trial_amount += 1\n self.cur_trial = None\n return success",
"def job_done(self, job):\n self.job_set_status(job, 'DONE', \"exitCode=0\")",
"def run_finalize_test():\n sys.addaudithook(TestFinalizeHook())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get contours of image X (2D matrix). v is the value of an intensity threshold.
|
def get_contours(X, v):
return measure.find_contours(X, v)
|
[
"def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'):\n return skimage.measure.find_contours(\n x, level, fully_connected=fully_connected, positive_orientation=positive_orientation\n )",
"def find_contours(img):\n img_copy = img.copy()\n im2, contours, h = cv2.findContours(img_copy, 1, 2)\n return contours",
"def find_contours(image):\n thresh = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 101, 10)\n #finds the contours in the mask of the thresholded image.\n contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n return contours",
"def contours(image,debug=False):\n\timgray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\tif debug: cv2.imshow('gray_scale_contour',imgray)\n\tim2, contours, hierarchy = cv2.findContours(imgray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n\treturn contours,hierarchy",
"def draw_contours(self):\n copy = self.img.copy()\n if self.contours is not None:\n cv2.drawContours(copy, self.contours, -1, (0, 255, 0), 7)\n\n return copy",
"def get_contours(image, factor=3):\n _, image_thresh = cv2.threshold(\n image, image.mean() + image.std()*factor, 255, cv2.THRESH_TOZERO)\n _, contours, _ = cv2.findContours(image_thresh, 1, 2)\n return contours",
"def point_contour(x, y, data):\n try:\n from scipy.ndimage import label, binary_fill_holes\n from skimage.measure import find_contours\n except ImportError:\n raise ImportError(\"Image processing in Glue requires SciPy and scikit-image\")\n\n # Find the intensity of the selected pixel\n inten = data[y, x]\n\n # Find all 'islands' above this intensity\n labeled, nr_objects = label(data >= inten)\n\n # Pick the object we clicked on\n z = (labeled == labeled[y, x])\n\n # Fill holes inside it so we don't get 'inner' contours\n z = binary_fill_holes(z).astype(float)\n\n # Pad the resulting array so that for contours that go to the edge we get\n # one continuous contour\n z = np.pad(z, 1, mode='constant')\n\n # Finally find the contours around the island\n xy = find_contours(z, 0.5, fully_connected='high')\n\n if not xy:\n return None\n\n if len(xy) > 1:\n warnings.warn(\"Too many contours found, picking the first one\")\n\n # We need to flip the array to get (x, y), and subtract one to account for\n # the padding\n return xy[0][:, ::-1] - 1",
"def findContours(image):\n _, contours, _ = cv2.findContours(image ,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours",
"def _gen_contours(self):\n # check to see if the number of factors to contour is > 1, otherwise \n if self.Y_.ndim < 2:\n z = np.asarray(self.Y_)\n # get the values of the manifold embedding\n x1 = self.X_[:, 0]\n x2 = self.X_[:, 1]\n x1g, x2g, zg = self._gen_contour(x1, x2, z)\n self.contours_[0] = np.nan_to_num(zg)\n else:\n col = 0\n while col < self.Y_.shape[self.Y_.ndim-1]:\n z = np.asarray(self.Y_)[:, col]\n # get the values of the manifold embedding\n x1 = self.X_[:, 0]\n x2 = self.X_[:, 1]\n x1g, x2g, zg = self._gen_contour(x1, x2, z)\n self.contours_[col] = np.nan_to_num(zg) # zero out the non-contoured points in the 2D space\n col += 1 # go to the next column",
"def _get_box_by_contours(contour):\n rect = cv2.minAreaRect(contour)\n box = cv2.boxPoints(rect)\n box = resort_points(box)\n return box",
"def get_Contours(self, img,t1, t2, opp, opp2):\n _, thresh1 = cv2.threshold(img, 122, 51, cv2.THRESH_BINARY)\n contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n big_cnt = []\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 25000 and area < 65000:\n # print(cnt)\n big_cnt.append(cnt)\n return big_cnt",
"def create_mask(img, contours):\n binary_mask = np.zeros(np.shape(img), dtype=np.uint8)\n cv2.drawContours(binary_mask, contours, -1, (255,255,255), -1)\n return binary_mask",
"def filter_contours(contours, low=400, high=10000):\n return [cnt for cnt in contours \\\n if low < cv2.contourArea(cnt) < high]",
"def contourf(self, x, y, v, zdir='z', offset=0, levels=None, **kw):\n # pylint: disable=unused-argument\n # Don't use contour for now (although we might want to later)\n self.pcolor(x, y, v, zdir='z', offset=offset, **kw)",
"def find_object(img):\r\n\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n _, thresh = cv2.threshold(gray,250,255,cv2.THRESH_BINARY_INV)\r\n\r\n contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,\r\n cv2.CHAIN_APPROX_SIMPLE)\r\n return contours",
"def convertANDthreshold(roi, threshold_value = 230):\n \n # convert to hsv and extract value channel\n hsv_image = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)\n _,_,value_channel = cv2.split(hsv_image)\n \n # apply filters and threshold\n blurred = cv2.GaussianBlur(value_channel, (11, 11), 0)\n _, thresh = cv2.threshold(blurred, threshold_value, 255,\n cv2.THRESH_BINARY)\n thresh = cv2.erode(thresh, None, iterations=2)\n thresh = cv2.dilate(thresh, None, iterations=4)\n \n # find contours\n _,contours, _ = cv2.findContours(thresh, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)\n \n return thresh, contours",
"def __idiv__(self, v: 'stdcomplexF') -> \"vnl_diag_matrixCF &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixCF___idiv__(self, v)",
"def draw_contour_on_image(source, points_x, points_y):\n\n # Copy the image source to prevent modifying the original image\n src = np.copy(source)\n\n points = []\n for px, py in zip(points_x, points_y):\n points.append([px, py])\n\n points = np.array(points, np.int32)\n points = points.reshape((-1, 1, 2))\n\n image = cv2.polylines(src, [points], isClosed=True, color=(0, 255, 0), thickness=2)\n\n return image",
"def rebuild_from_vector(vector, tree, image_size, contour=False, threshold=0):\n image_bit_level = int(np.log2(image_size))\n current_image = np.zeros([image_size, image_size])\n for path in sorted(tree):\n value = vector[tree[path]]\n (x, y, prefix_len) = binary_path_to_coordinates(path)\n (x_bot, x_top, y_bot,\n y_top) = transform_region_to_coordinates(x, y, prefix_len, image_bit_level)\n if value < threshold:\n value = 0\n count = value / 2**(2 * (image_bit_level - prefix_len))\n\n # Build a grid image without filling the regions.\n if contour:\n current_image[x_bot:x_top + 1, y_bot:y_bot + 1] += 1\n current_image[x_bot:x_top + 1, y_top:y_top + 1] += 1\n current_image[x_bot:x_bot + 1, y_bot:y_top + 1] += 1\n current_image[x_top:x_top + 1, y_bot:y_top + 1] += 1\n else:\n current_image[x_bot:x_top + 1, y_bot:y_top + 1] = count\n return current_image"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a 2D array X, where each row represents a 2D vector, interpolate between these points using Bspline. Return x and y coords with n points each.
|
def interpolate(X, n, s=2.0):
tck, u = splprep(X.T, u=None, s=s, per=0)
u_new = np.linspace(u.min(), u.max(), n)
x_new, y_new = splev(u_new, tck, der=0)
return x_new, y_new
|
[
"def bspline(x, n):\n ax = -abs(asarray(x))\n # number of pieces on the left-side is (n+1)/2\n funclist, condfuncs = _bspline_piecefunctions(n)\n condlist = [func(ax) for func in condfuncs]\n return piecewise(ax, condlist, funclist)",
"def interpolate_1d(array, x):\r\n array = np.asarray(array)\r\n if x <= 0:\r\n y = array[0]\r\n elif x >= array.size - 1:\r\n y = array[-1]\r\n else:\r\n x_fractional, x_lower = np.modf(x)\r\n x_lower = int(x_lower)\r\n y_lower, y_upper = array[x_lower : x_lower + 2]\r\n y = y_lower + x_fractional * (y_upper - y_lower)\r\n return y",
"def Interpolate(x, y, data):\n\n widthToUse = data.shape[1]\n heightToUse = data.shape[0]\n\n ix=numba.int32(x)\n iy=numba.int32(y)\n\n xIndex = np.zeros((4,), dtype=numba.int32)\n yIndex = np.zeros((4,), dtype=numba.int32)\n\n# Set X indexes\n# p is the index of the rightmost influencing spline\n p = (ix + 2) if (0.0 <= x) else (ix + 1)\n for k in range(4):\n xIndex[k] = -1 if (p<0 or p>=widthToUse) else p\n p -= 1\n\n# Set Y indexes\n p = (iy + 2) if (0.0 <= y) else (iy + 1)\n for k in range(4):\n yIndex[k] = -1 if (p<0 or p>=heightToUse) else p\n p -= 1\n\n\n# Compute how much the sample depart from an integer position\n# [ conditional because int rounds down for positive numbers and up for negative numbers ]\n\n ex = x - ((ix) if (0.0 <= x) else (ix - 1))\n ey = y - ((iy) if (0.0 <= y) else (iy - 1))\n\n xWeight = np.zeros((4,), dtype=numba.float64)\n yWeight = np.zeros((4,), dtype=numba.float64)\n\n\n# Set X weights for the image and derivative interpolation\n for (weight, e) in [(xWeight, ex), (yWeight, ey)]:\n s = 1.0 - e\n weight[0] = 0.5 * e * e * e / 3.0 \n weight[1] = 2.0 / 3.0 - (2.0 - s) * 0.5 * s * s\n weight[2] = 2.0 / 3.0 - (2.0 - e) * 0.5 * e * e \n weight[3] = 0.5 * s * s * s / 3.0 \n\n\n\n ival = 0.0\n for j in range(4):\n s = 0.0\n iy=yIndex[j]\n if iy != -1:\n for i in range(4):\n ix=xIndex[i]\n if ix!=-1:\n s += xWeight[i]*data[iy][ix]\n ival+=yWeight[j] * s\n return ival",
"def spline_liniara(X, Y, pointx):\n # PAS 1 Initializari\n n = X.shape[0] - 1\n a = np.zeros([n])\n b = np.zeros([n])\n\n # PAS 2 Calcul coeficienti\n for j in range(n):\n a[j] = Y[j]\n b[j] = (Y[j+1] - Y[j]) / (X[j+1] - X[j])\n\n # PAS 3 Gasire interval si intoarcere valoare\n for j in range(n):\n if X[j] <= pointx <= X[j+1]:\n\n return a[j] + b[j] * (pointx - X[j])\n\n return -1",
"def interpolate(self, x, fval):\n return np.einsum('i...,i...', self.lagrange(x), self.extract_fvec(fval))",
"def Interpolate_derivs(x, y, data):\n \n widthToUse = data.shape[1]\n heightToUse = data.shape[0]\n\n ix=numba.int32(x)\n iy=numba.int32(y)\n\n xIndex = np.zeros((4,), dtype=numba.int32)\n yIndex = np.zeros((4,), dtype=numba.int32)\n\n# Set X indexes\n# p is the index of the rightmost influencing spline\n p = (ix + 2) if (0.0 <= x) else (ix + 1)\n for k in range(4):\n xIndex[k] = -1 if (p<0 or p>=widthToUse) else p\n p -= 1\n\n# Set Y indexes\n p = (iy + 2) if (0.0 <= y) else (iy + 1)\n for k in range(4):\n yIndex[k] = -1 if (p<0 or p>=heightToUse) else p\n p -= 1\n\n\n# Compute how much the sample depart from an integer position\n# [ conditional because int rounds down for positive numbers and up for negative numbers ]\n\n ex = x - ((ix) if (0.0 <= x) else (ix - 1))\n ey = y - ((iy) if (0.0 <= y) else (iy - 1))\n\n xWeight = np.zeros((4,), dtype=numba.float64)\n dxWeight = np.zeros((4,), dtype=numba.float64)\n yWeight = np.zeros((4,), dtype=numba.float64)\n dyWeight = np.zeros((4,), dtype=numba.float64)\n\n# Set X weights for the image and derivative interpolation\n for (weight, e) in [(xWeight, ex), (yWeight, ey)]:\n s = 1.0 - e\n weight[0] = 0.5 * e * e * e / 3.0 \n weight[1] = 2.0 / 3.0 - (2.0 - s) * 0.5 * s * s\n weight[2] = 2.0 / 3.0 - (2.0 - e) * 0.5 * e * e \n weight[3] = 0.5 * s * s * s / 3.0 \n\n for (dweight, e) in [(dxWeight, ex), (dyWeight, ey)]:\n s = 1.0 - e\n dweight[0] = 0.5 * e * e \n dweight[1] = - ( 1.5 * s * s - 2 * s)\n dweight[2] = 1.5 * e * e - 2 * e\n dweight[3] = - 0.5 * s * s \n\n\n\n ival = 0.0\n ival_dx = 0.0\n ival_dy = 0.0\n for j in range(4):\n s = 0.0\n s_dx = 0.0\n# s_dy = 0.0\n iy=yIndex[j]\n if iy != -1:\n for i in range(4):\n ix=xIndex[i]\n if ix!=-1:\n s += xWeight[i]*data[iy][ix]\n s_dx += dxWeight[i]*data[iy][ix]\n #s_dy += xWeight[i]*data[iy][ix]\n s_dy = s\n ival += yWeight[j] * s\n ival_dx += yWeight[j] * s_dx\n ival_dy += dyWeight[j] * s_dy\n return ival, ival_dx, ival_dy",
"def interpolate1d(x_new, Y, X):\n\n out = np.interp(x=x_new.flatten(), xp=X, fp=Y)\n\n return np.reshape(out, x_new.shape)",
"def get_interpolated_value(self, x):\n if len(self.ydim) == 1:\n return get_linear_interpolated_value(self.x, self.y, x)\n else:\n return [get_linear_interpolated_value(self.x, self.y[:, k], x)\n for k in range(self.ydim[1])]",
"def get_weights(x, n):\n \n widthToUse = n+3\n\n ix=numba.int32(x)\n\n xIndex = np.zeros((4,), dtype=numba.int32)\n# Set X indexes\n# p is the index of the rightmost influencing spline\n p = (ix + 2) if (0.0 <= x) else (ix + 1)\n for k in range(4):\n xIndex[k] = -1 if (p<0 or p>=widthToUse) else p\n p -= 1\n\n# Compute how much the sample depart from an integer position\n# [ conditional because int rounds down for positive numbers and up for negative numbers ]\n\n ex = x - ((ix) if (0.0 <= x) else (ix - 1))\n\n xWeight = np.zeros((4,), dtype=numba.float64)\n# dxWeight = np.zeros((4,), dtype=numba.float64)\n\n# Set X weights for the image and derivative interpolation\n e = ex\n s = 1.0 - e\n xWeight[0] = 0.5 * e * e * e / 3.0 \n xWeight[1] = 2.0 / 3.0 - (2.0 - s) * 0.5 * s * s\n xWeight[2] = 2.0 / 3.0 - (2.0 - e) * 0.5 * e * e \n xWeight[3] = 0.5 * s * s * s / 3.0 \n\n \"\"\"\n for (dweight, e) in [(dxWeight, ex), (dyWeight, ey)]:\n s = 1.0 - e\n dweight[0] = 0.5 * e * e \n dweight[1] = - ( 1.5 * s * s - 2 * s)\n dweight[2] = 1.5 * e * e - 2 * e\n dweight[3] = - 0.5 * s * s \n \"\"\"\n\n\n return xIndex, xWeight",
"def _evaluate_spline(self, x):\n\n mu = index(x, self.t)\n B = evaluate_non_zero_basis_splines(x, mu, self.t, self.p)\n C = self.c[mu - self.p:mu - self.p + len(B)]\n B = np.reshape(B, (len(B), 1))\n\n # TODO: Dot product here? More elegant\n result = sum([c * b for c, b in zip(C, B)])\n return result",
"def line_sample2d(x,y,z,x1,y1):\n from scipy.interpolate import RectBivariateSpline as rbs\n # Extract the values along the line, using cubic interpolation\n f = rbs(x,y,z.T)\n return f.ev(x1,y1)\n #return scipy.ndimage.map_coordinates(z, np.vstack((y,x)))",
"def bsplinefunc(knots, points):\n m, n, degree, dim = get_lengths_and_degree(knots, points)\n try:\n dummy = points[0][0]\n pts = points\n except IndexError:\n pts = [[i] for i in points]\n def c(t):\n pt = np.zeros(dim)\n for i in range(n+1):\n basis = bsplinebasis(knots, i, degree)(t)\n pt += map(lambda x: x * basis, pts[i])\n return pt\n return c",
"def Cubic_Spline(self, x_values, y_values, n_points, x_start=np.log(1e-10), x_end=0):\n\t\tTemp_interp = interpolate.splrep(x_values, y_values)\n\t\tx_new = np.linspace(x_start, x_end, n_points)\n\t\ty_new = interpolate.splev(x_new, Temp_interp, der=0)\n\t\treturn x_new, y_new",
"def Piecewise_Linear_Interpolation_Function(x,data):\n #print(x)\n if x>data[-1][0]:\n return data[-1][1]\n for i in range(len(data)):\n #print(i,data[i][0])\n if (data[i][0]<=x and data[i+1][0]>=x):\n index=i\n break\n x1=data[index][0]\n y1=data[index][1]\n x2=data[index+1][0]\n y2=data[index+1][1]\n return y1*(x-x2)/(x1-x2)+y2*(x-x1)/(x2-x1)",
"def _interpolate(self, x, y):\n d = int(np.sqrt(self.window_.shape[0]))\n square_proj_win = self.window_.reshape(d, d)\n \n # snap to grid dimensions\n x_snap = max(0, min(x, d-1))\n y_snap = max(0, min(x, d-1))\n p_snap = np.array([x_snap, y_snap])\n\n # get min and max coords\n x_min = np.floor(x_snap)\n y_min = np.floor(y_snap)\n x_max = min(x_min + 1, d-1)\n y_max = min(y_min + 1, d-1)\n\n points = np.zeros([4, 2])\n points[0,:] = np.array([x_min, y_min])\n points[1,:] = np.array([x_min, y_max])\n points[2,:] = np.array([x_max, y_min])\n points[3,:] = np.array([x_max, y_max])\n \n # compute the value using bilinear interpolation\n val = 0.0\n num_pts = 4\n for i in range(num_pts):\n p = points[i,:].astype(np.uint16)\n u = square_proj_win[p[1], p[0]]\n w = np.prod(-np.abs(p - p_snap) + 1)\n val = val + w * u\n return val",
"def interp_spline(x, xvals, yvals, nochecks=False):\n spl = CubicSpline(xvals, yvals, nochecks=nochecks)\n return spl(x)",
"def phi_spline(i, x, N):\n if x < t_x(i + 1, N) and x >= t_x(i, N):\n return g2((x - t_x(i, N)) / (t_x(i + 1, N) - t_x(i, N)))\n elif x < t_x(i, N) and x >= t_x(i - 1, N):\n return g1((x - t_x(i - 1, N)) / (t_x(i, N) - t_x(i - 1, N)))\n else:\n return 0",
"def smooth(y, sigma, axis=-1, interpolation='spline'):\n \n if axis == -1:\n axis = y.ndim - 1\n elif axis == 0 or axis == 1:\n pass\n else:\n raise ValueError('axis has to be 0, 1 or -1')\n \n y = y.copy()\n x = np.arange(y.shape[axis]) \n\n if y.ndim == 1:\n w = np.isnan(y)\n \n if w.any():\n \n if interpolation == 'spline':\n y[w] = 0.\n spl = UnivariateSpline(x, y, w=np.logical_not(w), k=3)\n y[w] = spl(x[w])\n elif interpolation == 'linear':\n cregs = contiguous_regions(w, minlen=0)\n for cr in cregs:\n if cr[0] > 0:\n y0 = y[cr[0]-1]\n \n if cr[1] < y.shape[axis]-1:\n y1 = y[cr[1]+1]\n ynew = np.linspace(y0, y1, cr[1]+1-cr[0]+2, endpoint=True)\n y[cr[0]: cr[1]+1] = ynew[1:-1] \n\n else: # cr[1] is last value\n y[cr[0]:] = y0 \n else: # cr[0] is first value\n y[:cr[1]+1] = y[cr[1]+1]\n\n elif y.ndim == 2:\n \n if axis == 0:\n y = y.T\n \n for i in range(y.shape[0]):\n w = np.isnan(y[i])\n if w.any():\n if interpolation == 'spline':\n y[i, w] = 0.\n spl = UnivariateSpline(x, y[i], w=(np.logical_not(w)).astype(int), k=3)\n y[i, w] = spl(x[w])\n elif interpolation == 'linear':\n cregs = contiguous_regions(w, minlen=0)\n for cr in cregs:\n if cr[0] > 0:\n y0 = y[i, cr[0]-1]\n \n if cr[1] < y.shape[1]-1:\n y1 = y[i, cr[1]+1]\n ynew = np.linspace(y0, y1, cr[1]+1-cr[0]+2, endpoint=True)\n y[i, cr[0]: cr[1]+1] = ynew[1:-1]\n else: # cr[1] is last value\n y[i, cr[0]:] = y0 \n else: # cr[0] is first value\n y[i, :cr[1]+1] = y[i, cr[1]+1]\n\n else:\n raise ValueError('Only 1 or 2 dimensional input arrays are supported.')\n \n return gaussian_filter1d(y, sigma, axis=axis)",
"def psi_spline(i, x, N):\n if i > 0 and i < N:\n if x < t_x(i + 1, N) and x >= t_x(i, N):\n return g4((x - t_x(i, N)) / (t_x(i + 1, N) - t_x(i, N))) * (t_x(i + 1, N) - t_x(i, N))\n elif x < t_x(i, N) and x >= t_x(i - 1, N):\n return g3((x - t_x(i - 1, N)) / (t_x(i, N) - t_x(i - 1, N))) * (t_x(i, N) - t_x(i - 1, N))\n else:\n return 0\n elif i == 0:\n if x < t_x(1, N) and x >= 0:\n return g4(x / t_x(1, N)) * t_x(1, N)\n else:\n return 0\n elif i == N:\n if x < 1 and x >= t_x(N - 1, N):\n return g3((x - t_x(N - 1, N)) / (1 - t_x(N - 1, N))) * (1 - t_x(N - 1, N))\n else:\n return 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given an image X (2D matrix), extract a contour consisting of n points. s controls the smoothness of the contour, where s=0 is a sharp interpolation and higher s makes it smoother.
|
def get_shape(X, n=50, s=5):
v = X.mean() # use mean value of all entries
cs = get_contours(X, v)
if len(cs) == 0:
raise ValueError('Unable to extract contour.')
# get only outside contour
c = sorted(cs, key=len, reverse=True)[0]
R = np.array([[0, -1], [1, 0]])
c = c.dot(R) + np.array([[0, 28]])
x, y = interpolate(c, n=n, s=s)
return np.array([[x[i], y[i]] for i in range(len(x))])
|
[
"def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'):\n return skimage.measure.find_contours(\n x, level, fully_connected=fully_connected, positive_orientation=positive_orientation\n )",
"def point_contour(x, y, data):\n try:\n from scipy.ndimage import label, binary_fill_holes\n from skimage.measure import find_contours\n except ImportError:\n raise ImportError(\"Image processing in Glue requires SciPy and scikit-image\")\n\n # Find the intensity of the selected pixel\n inten = data[y, x]\n\n # Find all 'islands' above this intensity\n labeled, nr_objects = label(data >= inten)\n\n # Pick the object we clicked on\n z = (labeled == labeled[y, x])\n\n # Fill holes inside it so we don't get 'inner' contours\n z = binary_fill_holes(z).astype(float)\n\n # Pad the resulting array so that for contours that go to the edge we get\n # one continuous contour\n z = np.pad(z, 1, mode='constant')\n\n # Finally find the contours around the island\n xy = find_contours(z, 0.5, fully_connected='high')\n\n if not xy:\n return None\n\n if len(xy) > 1:\n warnings.warn(\"Too many contours found, picking the first one\")\n\n # We need to flip the array to get (x, y), and subtract one to account for\n # the padding\n return xy[0][:, ::-1] - 1",
"def get_contour_sample(img_orig, sample_interval=5):\n img = img_orig.copy()\n img[img > 10] = 255\n element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))\n mask = pad_image_cnt(img, None, (0, 0), (300, 300), bg=(1500,1500))\n# print mask\n for j in range(99):\n mask = cv2.dilate(mask, element)\n# cv2.imshow(\"mask_dilate\", mask)\n# cv2.waitKey()\n cnts, hier = cv2.findContours(mask.copy(), cv2.cv.CV_RETR_EXTERNAL, \n cv2.cv.CV_CHAIN_APPROX_TC89_L1)\n if len(cnts) == 1:\n break\n# cv2.imshow(\"mask_dilate\", mask)\n# cv2.waitKey()\n \n mask = cv2.erode(mask, element, iterations=j+1)\n# cv2.imshow(\"mask\", mask)\n# cv2.waitKey()\n# cv2.destroyWindow(\"mask\")\n cnts, hier = cv2.findContours(mask.copy(), cv2.cv.CV_RETR_EXTERNAL, \n cv2.cv.CV_CHAIN_APPROX_NONE)\n img_contour_pts = np.squeeze(np.vstack(cnts))\n img_contour_sample = img_contour_pts[range(0, img_contour_pts.shape[0], sample_interval), :]\n img_contour_sample = img_contour_sample - np.matlib.repmat((300,300),\n img_contour_sample.shape[0], 1)\n# draw_contours(img_contour_sample, (800,800), show=True)\n return img_contour_sample",
"def contour(self, bit_diameter, count=1, overlap=0.5):\n if self.depth != 'f' or self.channels != 1:\n raise ValueError('Invalid image type for contour cut '+\n '(requires floating-point, 1-channel image)')\n\n max_distance = max(self.array.flatten())\n levels = [bit_diameter/2]\n step = bit_diameter * overlap\n if count == -1:\n while levels[-1] < max_distance:\n levels.append(levels[-1] + step)\n levels[-1] = max_distance\n else:\n for i in range(count-1):\n levels.append(levels[-1] + step)\n levels = (ctypes.c_float*len(levels))(*levels)\n\n ptr = ctypes.POINTER(ctypes.POINTER(Path_))()\n path_count = libfab.find_paths(\n self.width, self.height, self.pixels,\n 1./self.pixels_per_mm, len(levels),\n levels, ptr)\n\n paths = [Path.from_ptr(ptr[i]) for i in range(path_count)]\n libfab.free_paths(ptr, path_count)\n\n return Path.sort(paths)",
"def get_contours(image, factor=3):\n _, image_thresh = cv2.threshold(\n image, image.mean() + image.std()*factor, 255, cv2.THRESH_TOZERO)\n _, contours, _ = cv2.findContours(image_thresh, 1, 2)\n return contours",
"def get_Skeletonized_contour(image, RegionProposalMask, smallest_size, contour_thres, contour_dilationparameter, cell_region_opening_factor, \r\n cell_region_closing_factor, scanning_voltage, points_per_contour, sampling_rate):\r\n cleared = RegionProposalMask.copy()\r\n clear_border(cleared)\r\n # label image regions, prepare for regionprops\r\n label_image = label(cleared)\r\n \r\n CellSequenceInRegion = 0\r\n CellSkeletonizedContourDict = {}\r\n# dtype = [('No.', int), ('Mean intensity', float), ('Mean intensity in contour', float), ('Contour soma ratio', float)]\r\n \r\n for region in regionprops(label_image,intensity_image = image): # USE first image in stack before perfusion as template \r\n \r\n # skip small images\r\n if region.area > smallest_size:\r\n \r\n # draw rectangle around segmented coins\r\n minr, minc, maxr, maxc = region.bbox\r\n \r\n #region_mean_intensity = region.mean_intensity #mean intensity of the region, 0 pixels in label are omitted.\r\n \r\n # Based on the boundingbox for each cell from first image in the stack, raw image of slightly larger region is extracted from each round.\r\n RawRegionImg = image[max(minr-4,0):min(maxr+4, image[0].shape[0]), max(minc-4,0):min(maxc+4, image[0].shape[0])] # Raw region image \r\n \r\n RawRegionImg_for_contour = RawRegionImg.copy()\r\n \r\n #---------Get the cell filled mask-------------\r\n filled_mask_bef, MeanIntensity_Background = imageanalysistoolbox.get_cell_filled_mask(RawRegionImg = RawRegionImg, region_area = region.area, \r\n cell_region_opening_factor = cell_region_opening_factor, \r\n cell_region_closing_factor = cell_region_closing_factor)\r\n \r\n filled_mask_convolve2d = imageanalysistoolbox.smoothing_filled_mask(RawRegionImg, filled_mask_bef = filled_mask_bef, region_area = region.area, threshold_factor = 2)\r\n \r\n # Set the edge lines to zero so that we don't have the risk of unclosed contour at the edge of image.\r\n if minr == 0 or minc == 0:\r\n filled_mask_convolve2d[0,:] = False\r\n filled_mask_convolve2d[:,0] = False\r\n if maxr == image[0].shape[0] or maxc == image[0].shape[0]:\r\n filled_mask_convolve2d[filled_mask_convolve2d.shape[0]-1, :] = False\r\n filled_mask_convolve2d[:, filled_mask_convolve2d.shape[1]-1] = False\r\n \r\n # Find contour along filled image\r\n contour_mask_thin_line = imageanalysistoolbox.contour(filled_mask_convolve2d, RawRegionImg_for_contour.copy(), contour_thres) \r\n# plt.figure()\r\n# plt.imshow(contour_mask_thin_line)\r\n# plt.show()\r\n # after here intensityimage_intensity is changed from contour labeled with number 5 to binary image\r\n# contour_mask_of_cell = imageanalysistoolbox.inwarddilationmask(contour_mask_thin_line.copy() ,filled_mask_convolve2d, contour_dilationparameter)\r\n #--------------------------------------------------------------\r\n# print(len(np.where(contour_mask_thin_line == 1)[0]))\r\n if len(np.where(contour_mask_thin_line == 1)[0]) > 0:\r\n #-------------------Sorting and filtering----------------------\r\n clockwise_sorted_raw_trace = ProcessImage.sort_index_clockwise(contour_mask_thin_line)\r\n [X_routine, Y_routine], filtered_cellmap = ProcessImage.tune_contour_routine(contour_mask_thin_line, clockwise_sorted_raw_trace, filtering_kernel = 1.5)\r\n #--------------------------------------------------------------\r\n \r\n #----------Put contour image back to original image.-----------\r\n ContourFullFOV = np.zeros((image.shape[0], image.shape[1]))\r\n ContourFullFOV[max(minr-4,0):min(maxr+4, image[0].shape[0]), max(minc-4,0):min(maxc+4, image[0].shape[0])] = filtered_cellmap.copy()\r\n \r\n X_routine = X_routine + max(minr-4,0)\r\n Y_routine = Y_routine + max(minc-4,0)\r\n #--------------------------------------------------------------\r\n \r\n figure, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,10))\r\n ax1.imshow(ContourFullFOV, cmap = plt.cm.gray)\r\n ax2.imshow(filtered_cellmap*2+RawRegionImg, cmap = plt.cm.gray)\r\n # ax2.imshow(ContourFullFOV*2+image, cmap = plt.cm.gray)\r\n # ax2.imshow(filled_mask_convolve2d, cmap = plt.cm.gray) \r\n # figure.tight_layout()\r\n plt.show()\r\n \r\n #------------Organize for Ni-daq execution---------------------\r\n voltage_contour_routine_X = (X_routine/ContourFullFOV.shape[0])*scanning_voltage*2-scanning_voltage\r\n voltage_contour_routine_Y = (Y_routine/ContourFullFOV.shape[1])*scanning_voltage*2-scanning_voltage\r\n \r\n #--------------interpolate to get 500 points-------------------\r\n x_axis = np.arange(0,len(voltage_contour_routine_X))\r\n f_x = interpolate.interp1d(x_axis, voltage_contour_routine_X, kind='cubic')\r\n newx = np.linspace(x_axis.min(), x_axis.max(), num=points_per_contour)\r\n X_interpolated = f_x(newx)\r\n \r\n y_axis = np.arange(0,len(voltage_contour_routine_Y))\r\n f_y = interpolate.interp1d(y_axis, voltage_contour_routine_Y, kind='cubic')\r\n newy = np.linspace(y_axis.min(), y_axis.max(), num=points_per_contour)\r\n Y_interpolated = f_y(newy)\r\n \r\n #-----------speed and accelation check-------------------------\r\n # contour_x_speed = np.diff(X_interpolated)/time_gap\r\n # contour_y_speed = np.diff(Y_interpolated)/time_gap\r\n time_gap = 1/sampling_rate\r\n contour_x_acceleration = np.diff(X_interpolated, n=2)/time_gap**2\r\n contour_y_acceleration = np.diff(Y_interpolated, n=2)/time_gap**2\r\n \r\n if AccelerationGalvo < np.amax(abs(contour_x_acceleration)):\r\n print(np.amax(abs(contour_x_acceleration)))\r\n if AccelerationGalvo < np.amax(abs(contour_y_acceleration)):\r\n print(np.amax(abs(contour_y_acceleration)))\r\n \r\n X_interpolated = np.around(X_interpolated, decimals=3)\r\n Y_interpolated = np.around(Y_interpolated, decimals=3)\r\n \r\n ContourArray_forDaq = np.vstack((X_interpolated,Y_interpolated))\r\n \r\n CellSkeletonizedContourDict['DaqArray_cell{}'.format(CellSequenceInRegion)] = ContourArray_forDaq\r\n CellSkeletonizedContourDict['ContourMap_cell{}'.format(CellSequenceInRegion)] = ContourFullFOV\r\n CellSequenceInRegion += 1\r\n #--------------------------------------------------------------\r\n \r\n \r\n return CellSkeletonizedContourDict",
"def get_contours(X, v):\n return measure.find_contours(X, v)",
"def show_contours(image):\n global coords\n global corners\n io.imshow(image)\n for point in coords:\n plt.scatter([point[1]], [point[0]], s=2, c='r')\n for point in corners:\n plt.scatter([point[1]], [point[0]], s=2, c='w')\n plt.show()",
"def red_contour(image):\n b, g, r = cv2.split(image)\n bw0 = (r[:,:]>150).astype(np.uint8)*255\n\n bw1 = cv2.divide(r, g[:, :] + 1)\n bw1 = (bw1[:, :] > 1.5).astype(np.uint8)*255\n bw1 = np.multiply(bw1, bw0).astype(np.uint8) * 255\n bw2 = cv2.divide(r, b[:,:]+1)\n bw2 = (bw2[:, :] > 1.5).astype(np.uint8)*255\n\n bw = np.multiply(bw1, bw2).astype(np.uint8) * 255\n kernel = np.ones((5, 5), np.uint8)\n bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, kernel)\n bw = cv2.dilate(bw, kernel, iterations=1)\n _, bw = cv2.threshold(bw,0,255,0)\n\n # Now get the actual contours. Note that contour detection requires a\n # single channel image. Also, we only want the max one as that should be\n # where the sewn patch is located.\n (_, cnts, _) = cv2.findContours(bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnt_largest = max(cnts, key = lambda cnt: cv2.contourArea(cnt))\n\n # Find the centroid in _pixel_space_. Draw it.\n try:\n M = cv2.moments(cnt_largest)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return (cX,cY)\n except:\n print(\"PROBLEM: CANNOT FIND CORNER ...\")",
"def contours(image,debug=False):\n\timgray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\tif debug: cv2.imshow('gray_scale_contour',imgray)\n\tim2, contours, hierarchy = cv2.findContours(imgray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n\treturn contours,hierarchy",
"def contourApproximation(approx_input,step_size=0.005):\r\n\r\n\timg = approx_input\r\n\tret,thresh = cv2.threshold(img,127,255,0)\r\n\timc2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\r\n\tcurr_len=0\r\n\tfor item in contours:\r\n\t\t# Pick the longest contour if there are many.\r\n\t\tif len(item)>curr_len:\r\n\t\t\tcurr_len=len(item)\r\n\t\t\tcnt=item\r\n\t# Epsilon constrols the approximation accurarcy\r\n\tepsilon = step_size*cv2.arcLength(cnt,True)\r\n\tapprox = cv2.approxPolyDP(cnt,epsilon,True)\r\n\tzeros_image=np.zeros(imc2.shape)\r\n\tuseable_points=[]\r\n\tfor num in range(len(approx)):\r\n\t useable_points.append((approx[num][0][0],approx[num][0][1]))\r\n\tuseable_points_initial=useable_points\r\n\tuseable_points = np.array(useable_points, np.int32)\r\n\tuseable_points = useable_points.reshape((-1,1,2))\r\n\tif testing==True:\r\n\t\tprint(\"Reordered the points to reflect the pixel IDs of the points on the countour.\")\r\n\t\tprint(useable_points);\r\n\t# Method 1: Using cv2.polylines\r\n\timg_approximation1=cv2.polylines(0*zeros_image.copy(),[useable_points],True,(255,255,255),10,1)\r\n\t# Method 2: Using cv2.drawContours - works too\r\n\t#img_approximation2=cv2.drawContours(0*zeros_image.copy(),[approx],0,(255,255,255),10)\r\n\tif view_plots==True:\r\n\t\tplt.subplot('121')\r\n\t\tplt.title(\"Approximated contour\")\r\n\t\tplt.imshow(img_approximation1)\r\n\t\tplt.show()\r\n\r\n\treturn useable_points_initial",
"def draw_contour_on_image(source, points_x, points_y):\n\n # Copy the image source to prevent modifying the original image\n src = np.copy(source)\n\n points = []\n for px, py in zip(points_x, points_y):\n points.append([px, py])\n\n points = np.array(points, np.int32)\n points = points.reshape((-1, 1, 2))\n\n image = cv2.polylines(src, [points], isClosed=True, color=(0, 255, 0), thickness=2)\n\n return image",
"def find_contours(img):\n img_copy = img.copy()\n im2, contours, h = cv2.findContours(img_copy, 1, 2)\n return contours",
"def isocontour(im, isovalue=None):\n \n # Check image\n if not isinstance(im, np.ndarray) or (im.ndim != 2):\n raise ValueError('im should be a 2D numpy array.')\n \n # Get isovalue\n if isovalue is None:\n isovalue = 0.5 * (im.min() + im.max())\n isovalue = float(isovalue) # Will raise error if not float-like value given\n \n # Get the contours\n data = find_contours(im, isovalue)\n \n # Build the contour as we used to return it. It is less rich, but easier\n # to visualize.\n data2 = []\n for contour in data:\n n = contour.shape[0] * 2 - 2\n contour2 = np.empty((n, 2), np.float32)\n contour2[0::2] = contour[:-1]\n contour2[1::2] = contour[1:]\n data2.append(np.fliplr(contour2))\n \n # Return as pointset\n return vv.Pointset(np.row_stack(data2))",
"def stack_contours(self, interesting_contour):",
"def find_contours(image):\n thresh = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 101, 10)\n #finds the contours in the mask of the thresholded image.\n contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n return contours",
"def contour(container, imtype, component, filter_size): # noqa\n intensity_colormap = ColorPalette.fromPreset('mmi')\n imtdict = container.getIMTGrids(imtype, component)\n gridobj = imtdict['mean']\n grid = gridobj.getData()\n metadata = gridobj.getGeoDict().asDict()\n if imtype == 'MMI':\n sgrid = grid\n units = 'mmi'\n elif imtype == 'PGV':\n sgrid = np.exp(grid)\n units = 'cms'\n else:\n sgrid = np.exp(grid) * 100.0\n units = 'pctg'\n if filter_size > 0:\n fgrid = median_filter(sgrid, size=filter_size)\n else:\n fgrid = sgrid\n\n interval_type = 'log'\n if imtype == 'MMI':\n interval_type = 'linear'\n intervals = getContourLevels(\n np.min(fgrid), np.max(fgrid), itype=interval_type)\n\n lonstart = metadata['xmin']\n latstart = metadata['ymin']\n lonspan = np.abs(metadata['xmax'] - lonstart)\n latspan = np.abs(metadata['ymax'] - latstart)\n nlon = metadata['nx']\n nlat = metadata['ny']\n\n line_strings = [] # dictionary of MultiLineStrings and props\n\n for cval in intervals:\n contours = measure.find_contours(fgrid, cval)\n #\n # Convert coords to geographic coordinates; the coordinates\n # are returned in row, column order (i.e., (y, x))\n #\n new_contours = []\n plot_contours = []\n for ic, coords in enumerate(contours): # coords is a line segment\n if len(coords) <= 20: # skipping little contour islands?\n continue\n\n mylons = coords[:, 1] * lonspan / nlon + lonstart\n mylats = (nlat - coords[:, 0]) * latspan / nlat + latstart\n contours[ic][:, 0] = mylons[:]\n contours[ic][:, 1] = mylats[:]\n plot_contours.append(contours[ic])\n new_contours.append(contours[ic].tolist())\n\n if len(new_contours):\n mls = MultiLineString(new_contours)\n props = {\n 'value': cval,\n 'units': units\n }\n if imtype == 'MMI':\n color_array = np.array(intensity_colormap.getDataColor(cval))\n color_rgb = np.array(\n color_array[0:3] * 255, dtype=int).tolist()\n props['color'] = '#%02x%02x%02x' % tuple(color_rgb)\n if (cval * 2) % 2 == 1:\n props['weight'] = 4\n else:\n props['weight'] = 2\n line_strings.append(\n {\n 'geometry': mapping(mls),\n 'properties': props\n }\n )\n return line_strings",
"def _gen_contours(self):\n # check to see if the number of factors to contour is > 1, otherwise \n if self.Y_.ndim < 2:\n z = np.asarray(self.Y_)\n # get the values of the manifold embedding\n x1 = self.X_[:, 0]\n x2 = self.X_[:, 1]\n x1g, x2g, zg = self._gen_contour(x1, x2, z)\n self.contours_[0] = np.nan_to_num(zg)\n else:\n col = 0\n while col < self.Y_.shape[self.Y_.ndim-1]:\n z = np.asarray(self.Y_)[:, col]\n # get the values of the manifold embedding\n x1 = self.X_[:, 0]\n x2 = self.X_[:, 1]\n x1g, x2g, zg = self._gen_contour(x1, x2, z)\n self.contours_[col] = np.nan_to_num(zg) # zero out the non-contoured points in the 2D space\n col += 1 # go to the next column",
"def findContours(image):\n _, contours, _ = cv2.findContours(image ,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Replace the existing cls.__init__() method with a new one which also initialises the field generators and similar bookkeeping.
|
def augment_init_method(cls):
orig_init = cls.__init__
def new_init(self, *args, **kwargs):
super(CustomGenerator, self).__init__() # TODO: does this behave correctly with longer inheritance chains?
orig_init(self, *args, **kwargs)
self.orig_args = args
self.orig_kwargs = kwargs
self.ns_gen_templates = TohuNamespace()
self.ns_gen_templates.update_from_dict(self.__class__.__dict__)
self.ns_gen_templates.update_from_dict(self.__dict__)
self.ns_gen_templates.set_owner(self.__class__)
self._mark_field_generator_templates()
self.ns_gens = self.ns_gen_templates.spawn()
self.ns_gens.set_owner(self)
self._update_namespace_with_field_generators()
self._set_field_names()
self._set_tohu_items_name()
self._set_tohu_items_cls()
cls.__init__ = new_init
|
[
"def _init_fields(self) -> None:\n ...",
"def __init__(self):\n for field in self.get_fields():\n setattr(self, field, None)",
"def _init_fields(self, schema_name, containing_cls):\n self._schema_name = schema_name\n self.containing_cls = containing_cls",
"def __init__(self):\n\n super().__init__()\n\n # a dictionary containing information of registered validators.\n # example: dict(type[BaseEntity] |\n # str domain: dict(str name: AbstractValidatorBase instance))\n self._validators = Context()\n\n # a dictionary containing all registered for find validators.\n # example: dict(type[BaseEntity] |\n # str domain: dict(str name: AbstractValidatorBase instance))\n self._for_find_validators = Context()\n\n # a dict containing a map between all python types and form field types.\n # for example: {type | tuple[type] python_type: str form_field_type}\n self._type_map = self._get_python_to_field_type_map()",
"def __post_init__(self):\n for name, field_type in self.__annotations__.items():\n if not isinstance(self.__dict__[name], field_type):\n setattr(self, name, field_type(self.__dict__[name]))",
"def _init(self, base_class, translator_class, app=None):\r\n if app is None:\r\n self.app = _CustomSphinx(srcdir=None, confdir=None, outdir=None, doctreedir=None,\r\n buildername='memoryhtml')\r\n else:\r\n self.app = app\r\n builder = self.app.builder\r\n builder.fignumbers = {}\r\n base_class.__init__(self, builder)\r\n self.translator_class = translator_class\r\n self.builder.secnumbers = {}\r\n self.builder._function_node = []\r\n self.builder.current_docname = None\r\n self.base_class = base_class",
"def __init__(self):\n self._field, self.ships = create_field()\n self._withships = field_with_ships(self._field)",
"def __init__ ( self ) :\n\n None",
"def _add_auto_defining(init):\n\n @functools.wraps(init)\n def decorated(self, *args, **kwargs):\n \"\"\"The decorated initializer\"\"\"\n\n # Get the names of the defining fields.\n argnames = _get_argnames(init)\n\n # Assign all the values given to the initializer.\n for field, value in itertools.chain(\n zip(argnames[1:], args),\n kwargs.items()\n ):\n setattr(self, field, value)\n\n # Invoke the actual initializer.\n init(self, *args, **kwargs)\n\n return decorated",
"def __init__(\n self,\n field: SourcesField,\n *,\n for_sources_types: Iterable[type[SourcesField]] = (SourcesField,),\n enable_codegen: bool = False,\n ) -> None:\n object.__setattr__(self, \"field\", field)\n object.__setattr__(self, \"for_sources_types\", tuple(for_sources_types))\n object.__setattr__(self, \"enable_codegen\", enable_codegen)\n\n self.__post_init__()",
"def _create_fields(self):\r\n pass",
"def __init__(self):\n self.basename = self.basename or self.__class__.__name__.lower()\n self.set_fields()",
"def __init__(self) -> None:\n # Lazy-initialize serializers\n self._ser_handle: Optional[TypeSerializer] = None\n self._deser_handle: Optional[TypeDeserializer] = None",
"def __init__(self):\n self._factory_methods = {\n Noise.SALT_AND_PEPPER: PreprocessorFactory._create_salt_and_pepper,\n Noise.MISSING_BLOCK: PreprocessorFactory._create_missing_block,\n Noise.UNIFORM: PreprocessorFactory._create_uniform,\n Noise.GAUSSIAN: PreprocessorFactory._create_gaussian,\n }",
"def __init__(self, *args):\n super(Base, self).__init__()",
"def constructor(self, **kwargs):\n if len(kwargs) > 0:\n self.__dict__.update(kwargs)",
"def _doInstanceInit(self, instance, insField):\r\n if insField != '' or insField is not None:\r\n self.instanceFieldName = insField\r\n self.instance = instance",
"def _pre_init(self, **kwargs) -> None:\n raise NotImplementedError",
"def __init__(self, obj, **adapted_methods):\n self.object = obj\n self.__dict__.update(adapted_methods)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mark field generator templates as such so that an indication of this is included in the tohu_name. This is purely convenience for easier debugging.
|
def _mark_field_generator_templates(self):
for g in self.ns_gen_templates.all_generators:
g.is_custom_generator_template = True
|
[
"def gen_fake(self, field_name, fake):\r\n ...",
"def custom_template_formatters(self):\n return sorted(f'{k}{self.TEMPLATE_ASSIGNER}{v.template}'\n for k, v in self.custom_template_items)",
"def template_field_names(self, template_field_names):\n\n self._template_field_names = template_field_names",
"def special_format_field(self, obj, format_spec):\n raise NotImplementedError()",
"def __format_names(self):\n self.formats = list(map(lower_and_add_dot, self.formats))\n self.lop = list(filter(lambda x: x[-4:].lower() in self.formats, self.lop))",
"def template_name_field(self):\n return '%s/%s_detail.html' % (\n self.content_type.app_label, self.content_type.model\n )",
"def generate_name(self):\n return self._generate_name",
"def _gen_defining_field_names(cls):\n return itertools.islice(\n cls.__fields__.keys(), 0, cls.__defining_count__\n )",
"def name(self):\n return self.field.name if self.field else 'unknown_%d' % self.def_num",
"def template_formatters(self):\n return sorted(f'{k}{self.TEMPLATE_ASSIGNER}{v.template}'\n for k, v in self.template_map.items())",
"def get_template_names(self):\n tpl = super(Teacher_previous_employmentView, self).get_template_names()[0]\n app = self.model._meta.app_label\n mdl = 'teacher_previous_employment'\n #self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))\n self.template_name = tpl[:8]+'teacher_previous_employment/'+tpl[8:]\n return [self.template_name]",
"def _get_from_field(self):\n return '{} AS {}'.format(\n self.table.name, self.name)",
"def set_field_html_name(self, field, new_name):\n old_render = field.widget.render\n field.widget.render = lambda name, value, **kwargs: \\\n old_render(new_name, value, **kwargs)",
"def generate_name(self, generate_name):\n\n self._generate_name = generate_name",
"def __str__(self):\n # TODO: ideally this should just loop through the ATTRIBUTES so it doesn't need touching for new ones\n output = \"------ FIELD {} ({}/{}/{}): {}(type), {}(datatype), {}(role), {}(aggregation)\".format(\n self.name, self.caption, self.alias, self.id, self.type, self.datatype, self.role, self.default_aggregation)\n return output",
"def _generate_title(setup):\n groom_str = \" Groomed\" if \"groomed\" in setup.region['name'] else ''\n return '%s %s%s %s' % (setup.jet_algo, setup.region['label'], groom_str, setup.angle.name)",
"def RenameTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def custom_template_formatter(self):\n return self.FORMATTER_DELIMITER.join(self.custom_template_formatters)",
"def render_record_annotation(self):\n return '{}: {}'.format(self.name, self.type_str)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
build a trie from all patterns and append a "$" as a stop sign for each pattern
|
def build_trie(patterns):
tree = dict()
tree[0] = {}
idx = 1
for pattern in patterns:
cur = tree[0]
for char in pattern:
if char in cur:
cur = tree[cur[char]]
else:
cur[char] = idx
tree[idx] = {}
cur = tree[idx]
idx += 1
cur["$"] = None
# print(tree)
return tree
|
[
"def make_pattern(paths, _main=True):\n patterns = {}\n flag = False\n for path in paths:\n if path.startswith('/'):\n path = path[1:]\n splitted = path.split('/',1)\n if len(splitted)==1:\n if patterns:\n assert flag,`flag,paths,patterns, path,splitted`\n flag = True\n word = splitted[0]\n i = 0\n while i<len(word):\n if word[i].isdigit():\n break\n i += 1\n \n splitted = [word[:i], word[i:]]\n l = patterns.get(splitted[0], None)\n if l is None:\n l = patterns[splitted[0]] = set ([])\n map(l.add, splitted[1:])\n r = []\n for prefix in sorted(patterns.keys()):\n lst = list(patterns[prefix])\n if len (lst)==1:\n if flag:\n r.append(prefix + lst[0])\n else:\n r.append(prefix +'/'+ lst[0])\n elif lst:\n if prefix:\n subpattern = make_pattern(lst, _main=False)\n if subpattern is None:\n if _main:\n return ','.join(paths)\n raise NotImplementedError (`lst, prefix, paths, patterns`)\n else:\n return None\n if ',' in subpattern:\n subpattern = '{%s}' % (subpattern)\n if flag:\n r.append(prefix+subpattern)\n else:\n r.append(prefix+'/'+subpattern)\n else:\n slst = sorted(map(int,lst))\n #assert slst == range(slst[0], slst[-1]+1),`slst, lst`\n if len (slst)==1:\n r.append(str (slst[0]))\n elif slst == range (slst[0], slst[-1]+1):\n r.append('%s:%s' % (slst[0],slst[-1]))\n else:\n return None\n raise NotImplementedError(`slst`,`prefix`,`paths`)\n else:\n r.append(prefix)\n return ','.join(r)",
"def build_trie(lexicon, include_only=False):\n trie = {}\n for pattern, category_names in lexicon.items():\n if include_only:\n category_names = [c for c in category_names if c in include_only]\n if category_names:\n cursor = trie\n for char in pattern:\n if char == \"*\":\n cursor[\"*\"] = category_names\n break\n if char not in cursor:\n cursor[char] = {}\n cursor = cursor[char]\n cursor[\"$\"] = category_names\n return trie",
"def __init__(self):\n patterns = [(r\"œ\", \"oe\"), (r\"æ\", \"ae\"), (r\"Œ\", \"OE\"), (r\"Æ\", \"AE\")]\n self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]",
"def __init__(self, patterns):\n\n self.wildcard_patterns = {}\n literals = []\n \n for j, pattern in enumerate(patterns):\n pattern = pattern.strip()\n wildcards = pattern.count('*')\n \n if wildcards == 0:\n literals.append(pattern)\n\n elif wildcards == 1:\n if not pattern.endswith('*'):\n ve = \"Pattern %i invalid: * can only appear at end of pattern.\" % j\n raise ValueError(ve)\n\n self.add_wildcard_pattern(pattern)\n \n else:\n ve = \"Pattern %i invalid: * can only appear once.\" % j\n raise ValueError(ve)\n\n self.literals = self.make_literal_set(literals)\n self.optimize_wildcard_patterns()\n self.pattern_lengths = sorted(self.wildcard_patterns.keys())",
"def build_suffix_tree(text):\r\n result = []\r\n # Implement this function yourself\r\n #text = text[:-1]\r\n #print(\"text is\", text)\r\n tree = []\r\n tree.append([])\r\n counter = 0\r\n for i in range(len(text)):\r\n pat = text[i:]\r\n #print()\r\n #print(i, \"pat is\", pat)\r\n cn, flag = 0, 0\r\n head = 0\r\n while not flag:\r\n cnprev = cn\r\n #print(\"cn is now\", cn)\r\n for j in range(len(tree[cn])):\r\n (pos, l, d) = tree[cn][j]\r\n if text[pos] == pat[0 + head]:\r\n #print(\"Match!\")\r\n lab = text[pos:pos + l]\r\n n = strcmp(pat[head:], lab)\r\n #print(\"n is\", n)\r\n if n == len(pat) == len(lab):\r\n flag = 1\r\n elif n == len(lab) and n != len(pat):\r\n if d > 0:\r\n cn = d\r\n head += len(lab)\r\n #print(\"Moved over\", lab)\r\n else:\r\n #print(\"ACHTUNG!\")\r\n pass # Undefined behavior\r\n else:\r\n #print(\"Common part is\", pat[head:head + n])\r\n counter += 1\r\n tree[cn][j] = (pos, n, counter)\r\n cn = counter\r\n tree.append([])\r\n tree[cn].append((head + i + n, len(pat[head + n:]), 0))\r\n tree[cn].append((pos + n, len(lab[n:]), d))\r\n #print(\"First part is\", pat[head + n:])\r\n #print(\"Second part is\", lab[n:])\r\n flag = 1\r\n break\r\n if cn == cnprev and not flag:\r\n tree[cn].append((i + head, len(pat[head:]), 0))\r\n flag = 1\r\n #print(i, \"tree is\", tree)\r\n #tree[0].append((len(text), 0, 0))\r\n for i in range(len(tree)):\r\n for j in range(len(tree[i])):\r\n (pos, l, d) = tree[i][j]\r\n result.append(text[pos:pos + l])\r\n return result",
"def __init__(self):\n patterns = [(r\"j\", \"i\"), (r\"v\", \"u\"), (r\"J\", \"I\"), (r\"V\", \"U\")]\n self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]",
"def pattern_table():\n dnnl_patterns = list()\n dnnl_patterns.append(make_qnn_conv2d_pattern())\n dnnl_patterns.append(make_qnn_dense_pattern())\n dnnl_patterns.append(make_dense_bias_sum_pattern())\n dnnl_patterns.append(\n (\n \"dnnl.conv2d_bias_sum_relu\",\n make_conv_bias_sum_relu_pattern(\"nn.conv2d\"),\n make_sum_pattren_predicate(add_checker),\n )\n )\n dnnl_patterns.append(\n (\n \"dnnl.conv2d_bias_sum\",\n make_conv_bias_sum_relu_pattern(\"nn.conv2d\", False),\n make_sum_pattren_predicate(add_checker),\n )\n )\n\n elt_list = [\"nn.relu\", \"tanh\", \"sigmoid\", \"clip\", \"gelu\", \"swish\", \"mish\", None]\n for with_bias in [True, False]:\n for elt in elt_list:\n if not with_bias and not elt:\n continue\n for conv_name in [\n \"nn.conv1d\",\n \"nn.conv2d\",\n \"nn.conv3d\",\n \"nn.conv2d_transpose\",\n \"nn.conv3d_transpose\",\n ]:\n dnnl_patterns.append(make_dnnl_pattern(conv_name, with_bias, elt))\n dnnl_patterns.append(make_dnnl_pattern(\"nn.dense\", with_bias, elt))\n return dnnl_patterns",
"def string_reconstruction(patterns):\n adj_list = de_bruijn_graph_fromkmer(patterns)\n path = euler_path(adj_list)\n text = genome_path(path)\n return text",
"def build_suffix_tree(text):\r\n result = []\r\n # Implement this function yourself\r\n trie_root = Node(-1, -1, 0)\r\n for i in range(len(text)):\r\n current_node = trie_root\r\n add_node_to_tree(current_node, i, text)\r\n cat_to_result(trie_root, text, result) \r\n return result",
"def constructPattern(self, strg):\n\t\tgroups = self.__groups\n\t\ttry:\n\t\t\twords = strg.split(' ')\n\t\texcept:\n\t\t\twords = [str(strg)]\n\t\tresult = [[] for index in words]\n\t\twords = pd.Series(words)\n\t\twhile (words.size > 0):\n\t\t\tfor key in groups:\n\t\t\t\tpattern = '(?:^['+groups[key]+']+)'\n\t\t\t\tmatch = words.str.contains(pattern)\n\t\t\t\tfor index, cond in match.iteritems():\n\t\t\t\t\tif cond:\n\t\t\t\t\t\tif key == 'num':\n\t\t\t\t\t\t\tp = regex.compile('([0-9]{1,})')\n\t\t\t\t\t\t\tnum_match = p.findall(words[index])\n\t\t\t\t\t\t\tsize = len(num_match[0])\n\t\t\t\t\t\telif key == 'sym':\n\t\t\t\t\t\t\tp = regex.compile('(['+groups['sym']+']+)')\n\t\t\t\t\t\t\tsym_match = p.findall(words[index])\n\t\t\t\t\t\t\tsymbol = sym_match[0]\n\t\t\t\t\t\tentry = size if key=='num' else key\n\t\t\t\t\t\tentry = symbol if key=='sym' else entry\n\t\t\t\t\t\tresult[index].append(entry)\n\t\t\t\twords = words.str.replace(pattern, '', 1)\n\t\t\t\twords.replace('', pd.np.nan, inplace=True)\n\t\t\t\twords.dropna(inplace=True)\n\t\t\t\tif words.size == 0:\n\t\t\t\t\tbreak;\n\t\treturn result",
"def buildTries(path=\".\"):\n with open(getLtwa(path), \"r\", encoding=\"utf-16_le\") as ltwa_file:\n # Skip first line with field names\n ltwa_file.readline()\n pt = Trie()\n st = Trie()\n lwt = Trie()\n\n for line in ltwa_file:\n word, abbrv, lang = line.split(\"\\t\")\n word = cleanWord(word)\n\n words = word.split()\n # check if it is a compound expression\n if len(words) > 1:\n lastword = words.pop()\n if lastword.endswith(\"-\"):\n lastword = lastword[:-1] + \"*\"\n ct = lwt.search(lastword)[0]\n words.reverse()\n if ct is None:\n ct = Trie()\n ct.insert(words, abbrv)\n lwt.insert(lastword, ct)\n else:\n ct.insert(words, abbrv)\n words.reverse()\n\n else:\n # replaces - at the end of the words with * to\n # not confuse with hyphenated words\n if word.endswith(\"-\"):\n word = word[:-1] + \"*\"\n if word.startswith(\"-\"):\n st.insert(word[1:], abbrv)\n else:\n pt.insert(word, abbrv)\n\n return pt, st, lwt",
"def optimize_wildcard_patterns(self):\n\n ct = 0\n for size in self.wildcard_patterns:\n self.wildcard_patterns[size] = self.make_literal_set(self.wildcard_patterns[size])\n ct += len(self.wildcard_patterns[size])",
"def _insert_patterns(self):\n self._add_finders()\n self._add_separators()\n self._add_alignment_patterns()\n self._add_timing_pattern()\n self._add_reserved_areas()\n self._add_version_info()",
"def _store ( self , defs , nowarn ):\n\n while True:\n l = defs.readline() # next macro rule\n# print \"rule input=\" , l\n if len(l) == 0: break # EOF check\n dl = definitionLine.DefinitionLine(l,False)\n left = dl.left # pattern to be matched\n tail = dl.tail # transformation to apply to match\n if left == None or tail == None:\n self._err(l=l)\n continue\n mp = ellyWildcard.convert(left)\n if mp == None:\n self._err('bad wildcards',l)\n continue\n pe = mp[-1]\n if pe != ellyWildcard.cALL and pe != ellyWildcard.cEND:\n mp += ellyWildcard.cEND # pattern must end in $ if it does not end in *\n if not _checkBindings(mp,tail):\n self._err('bad bindings in substitution',l)\n continue\n if not nowarn and not _checkExpansion(mp,tail):\n self._err('substitution longer than original string',l,0)\n r = [ mp , tail ]\n# print \"rule =\" , [ left , tail ]\n pat = r[0] # get coded pattern\n if pat == None:\n self._err('no pattern',l)\n continue\n c = pat[0] # first char of pattern\n # check type to see how to index rule\n# print 'c=' , ord(c)\n p = pat\n while c == ellyWildcard.cSOS: # optional sequence?\n k = p.find(ellyWildcard.cEOS) # if so, find the end of sequence\n if k < 0 or k == 1: break # if no end or empty sequence, stop\n k += 1\n if k == len(pat): break # should be something after sequence\n m = ellyChar.toIndex(pat[1]) # index by first char of optional sequence\n self.index[m].append(r) # (must be non-wildcard)\n p = p[k:] # move up in pattern\n c = p[0] # but check for another optional sequence\n\n if c == ellyWildcard.cSOS:\n self._err(l=l)\n continue # bad sequence, skip this rule\n\n# print 'c=' , ord(c)\n if ellyChar.isLetterOrDigit(c): # check effective first char of pattern\n m = ellyChar.toIndex(c)\n self.index[m].append(r) # add to index under alphanumeric char\n elif ellyChar.isText(c):\n self.index[0].append(r) # add to index under punctuation\n elif not c in ellyWildcard.Matching:\n if c == ellyWildcard.cEND:\n print >> sys.stderr , '** macro warning: pattern can have empty match'\n print >> sys.stderr , '* at [' , l , ']'\n else:\n dc = '=' + str(ord(c) - ellyWildcard.X)\n self._err('bad wildcard code' , dc)\n continue\n elif c == ellyWildcard.cANY or c == ellyWildcard.cALL:\n self.anyWx.append(r) # under general wildcards\n elif c == ellyWildcard.cCAN:\n self.index[0].append(r) # under punctuation\n elif c == ellyWildcard.cDIG or c == ellyWildcard.cSDG:\n self.digWx.append(r) # under digit wildcards\n elif c == ellyWildcard.cSAN:\n self.digWx.append(r) # under both digit and\n self.letWx.append(r) # letter wildcards\n elif c == ellyWildcard.cSPC or c == ellyWildcard.cEND:\n self._err('bad wildcard in context',l)\n continue # wildcards unacceptable here\n else:\n self.letWx.append(r) # everything else under letter wildcard\n\n self.count += 1 # count up macro substitution\n\n if self._errcount > 0:\n print >> sys.stderr , '**' , self._errcount , 'macro errors in all'\n print >> sys.stderr , 'macro table definition FAILed'\n raise ellyException.TableFailure",
"def build_trie(self):\n\t\ttry:\n\t\t\twith open(self.corpus_filename, 'r+') as f:\n\t\t\t\tphrases = f.read().splitlines()\n\t\texcept FileNotFoundError:\n\t\t\traise\n\t\tfor phrase in phrases:\n\t\t\tself.trie.insert(phrase)",
"def build_regexp(self, numbers):\n exp = '\\[*'\n for i in numbers:\n exp += str(i) + '\\W*'\n exp += '\\]*'\n return exp",
"def compilePatterns(self):\n if self.verbose > 0:\n print(\"compile patterns...\"); sys.stdout.flush()\n for tagId in self.tags:\n tag = self.tags[tagId]\n self.patterns[tagId] = re.compile(tag + self.regexpMotif)",
"def make_trie(trie, filename):\n\n def dot_node(node):\n label = \"\"\n bad_node = ',shape=\"ellipse\",color=\"red\"'\n style = \"\"\n\n if len(node.succs) != 26:\n style = bad_node\n label = \"len(node.succs) != 26\"\n elif node.succ_count != ib002_count_not_none(node.succs):\n style = bad_node\n label = \"succ_count == \" + str(node.succ_count)\n elif not node.accepting and node.succ_count == 0:\n style = bad_node\n label = \"non-accepting leaf\"\n elif node.accepting:\n style = ',shape=\"doublecircle\"'\n\n f.write('{} [label=\"{}\"{}]\\n'.format(id(node), label, style))\n\n for i, s in enumerate(node.succs):\n if s is not None:\n dot_node(s)\n char = chr(i + ord('a'))\n f.write('{} -> {} [label=\" {}\"]\\n'.format(id(node),\n id(s), char))\n\n with open(filename, 'w') as f:\n f.write(\"digraph {\\n\")\n f.write('node [shape=\"circle\",ordering=\"out\"]\\n')\n if trie.root is not None:\n dot_node(trie.root)\n f.write(\"}\\n\")",
"def __init__(self, patterns):\r\n self.patterns = patterns\r\n self.edges = []\r\n self.nodes = dict()\r\n self.stringPath = []\r\n self.sequence = ''"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the first entry of the table with its field equals to value
|
def get_entry(table, field, value):
return query_read('SELECT * FROM ' + table + ' WHERE ' + field + ' = ?', [value], one=True)
|
[
"def get_table_row(self, key_field, key_value):\n\n if self.table == []:\n self._get_table_list()\n for row in self.table:\n if row[key_field] == key_value:\n return row\n return None",
"def select_single(self, table, rownum):\n\n with self.connection:\n return self.cursor.execute(f'SELECT * FROM {table} WHERE id = ?',\n (rownum,)).fetchall()[0]",
"def first(self):\n return self.asc__id().get_one()",
"def get_by(session, model, primary_key_value, primary_key=None):\r\n result = query_by_primary_key(session, model, primary_key_value,\r\n primary_key)\r\n return result.first()",
"def query_and_return_the_first_row_where(statement):\n\n db = current.db\n s3db = current.s3db\n\n cmd = \"db(%s).select(\\\n limitby=(0,1) ).first()\" % statement\n logger.info(\"Executing query %s\" % cmd)\n\n output = eval(cmd)\n return output",
"def get_single_value(self, header_to_get: str, match_header: str, match_value):\n\t\treturn self._database_api.execute_custom_query_one_result('SELECT ' + header_to_get + ' FROM ' + self.table_name + ' WHERE ' + match_header + ' = ' + self._validify_value(match_value))[0]",
"def first(self):\n try:\n row = self.cursor_strategy.fetchone()\n except BaseException as e:\n self.connection._handle_dbapi_exception(\n e, None, None, self.cursor, self.context\n )\n\n try:\n if row is not None:\n return self.process_rows([row])[0]\n else:\n return None\n finally:\n self.close()",
"def get_entries(table, field, value):\n\n return query_read('SELECT * FROM ' + table + ' WHERE ' + field + ' = ?', [value])",
"def get_one(cur, query):\n\tnummatches = cur.execute(query)\n\treturn cur.fetchone()",
"def find_one(self, where_dict):\n result = self.find(where_dict)\n return result[0] if result else None",
"def get_by_column(self, session: Session, column_name: str, column_value):\n return cast(BaseModel, self.model).get_one(session, column_name, column_value)",
"def query_item(self, *args):\n table = args[0]\n column = args[1]\n value = args[2]\n query_item = \"\"\"\n SELECT * FROM {} WHERE {} = '{}';\n \"\"\".format(table, column, value)\n cursor.execute(query_item)\n item = cursor.fetchone()\n return item",
"def read_table_item(table, pk_name, pk_value):\n response = table.get_item(Key={pk_name: pk_value})\n\n return response",
"def first(query, default=None):\r\n def inner(model, *args, **kwargs):\r\n val = model.engine.execute(query, *args, **kwargs).first()\r\n\r\n if default is not None and val is None:\r\n return default\r\n else:\r\n return val\r\n\r\n return inner",
"def row_extract_bq(self, row, value):\n mask = self.df_bq_history.job_id == row[\"qid\"]\n out = self.df_bq_history.loc[mask, value]\n return out.values[0]",
"def query_one(self, conditions):\n rows = self.query_all(conditions, limit=1)\n try:\n return next(rows)\n except StopIteration:\n return None, None",
"def first_index(L, value):\n val = next(iter(filter(lambda x: x[1] == value, enumerate(L))))\n\n if val:\n return(val[0])\n else:\n raise(ValueError(\"{} is not in the list.\".format(value)))",
"async def typed_retrieve_one_query(\n database: str, data_type: Type[T], query: str, values: Optional[Tuple[Any, ...]] = None,\n) -> T:\n\n rows = await typed_retrieve_query(database, data_type, query, values)\n\n try:\n return rows[0]\n except KeyError as e:\n bot_logger.error(f'Retrieve One Query (\"{query}\"). {e}.')\n raise aiosqlite.Error(f'Failed to fetch any rows')",
"def _value_by_key(row, key):\n value = row[key].iloc[0]\n if pd.isna(value):\n return None\n return value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all entries of the table with their field equals to value
|
def get_entries(table, field, value):
return query_read('SELECT * FROM ' + table + ' WHERE ' + field + ' = ?', [value])
|
[
"def get_entry(table, field, value):\n\n return query_read('SELECT * FROM ' + table + ' WHERE ' + field + ' = ?', [value], one=True)",
"def find_by_fieldname(self, name, value):\n response = self.table.scan(\n FilterExpression=Attr(name).eq(value)\n )\n items = response.get(\"Items\", [])\n return items",
"def filter_by(self, data, field_name, value):\n return [note for note in data if note.get(field_name) == value]",
"def get_all(self, table, **query):\n if query:\n name, query = query.popitem()\n self._cursor.execute(f\"\"\"\nSELECT * FROM {table} WHERE {name}=={query!r}\n\"\"\")\n else:\n self._cursor.execute(f\"\"\"\nSELECT * FROM {table}\n\"\"\")\n return self._cursor.fetchall()",
"def get_fields_by_value(self, value, trans, other_values):\n rval = []\n val_index = self.columns['value']\n for fields in self.get_fields(trans, other_values):\n if fields[val_index] == value:\n rval.append(fields)\n return rval",
"def get_all_entries(self, table, topic=None):\n t_entries = self._db.table(table)\n res = []\n for entry in t_entries.all():\n res.append(entry['value'])\n return res",
"def find_by_field_value(self, field_name, value, op='=', collate=False):\n query = self._select().where(self.where(field_name, value, op))\n entities = yield self._query_for_entities(query)\n if collate:\n entities = self.collate(entities)\n returnValue(entities)",
"def get_records(self, **kwargs):\n sql = 'SELECT * FROM %s WHERE' % (self.table)\n for key in kwargs:\n if not kwargs[key]:\n continue\n sql += ' %s=\"%s\" AND' % (key, kwargs[key])\n sql = sql[:-4]\n print(sql)\n return self.curs.execute(sql).fetchall()",
"def search(table_name, value, limit=None): # TODO: partial searches\n \n db = sqlite3.connect(\"family.db\") # TODO build a ROOT and relative file structure\n c = db.cursor()\n \n # TODO build a global cache for ever-searches like this \n tables = [_[0] for _ in c.execute(\"SELECT name FROM sqlite_master WHERE type='table';\").fetchall()] # TODO make less hacky \n\n if table_name not in tables:\n print \"No table %s to search\" % tables\n return\n \n fields = [_[0] for _ in c.execute('select * from %s' % table_name).description]\n \n if type(value) is not dict:\n global table_glob\n \n for field in table_glob[table_name].search_order:\n result_set = c.execute(\"select * from %s where %s = ?\" % (table_name, field), [value]).fetchall() # TODO: make injection-safe\n if len(result_set) > 0:\n return result_set # TODO: make limit work\n\n else:\n for i in range(len(fields)):\n pass # TODO something",
"def find_matching_objects(self, field, value):\n model = field.model\n name = get_name(model)\n field_name = field.field_name\n objects = [model_object for model_object in self.objects_tree[ \\\n name].values() if getattr(model_object, field_name) == value]\n return objects",
"def query_item(self, *args):\n table = args[0]\n column = args[1]\n value = args[2]\n query_item = \"\"\"\n SELECT * FROM {} WHERE {} = '{}';\n \"\"\".format(table, column, value)\n cursor.execute(query_item)\n item = cursor.fetchone()\n return item",
"def _1_select(self, field = \"googlePlaceid\", value = None, printinstances = True):\n self.dbInstances = []\n for instance in self.json:\n if instance[field] == value:\n self.dbInstances.append(instance)\n if printinstances:\n for item in self.dbInstances:\n print(\"id:\",item[\"id\"],\"..locationName:\",item[\"locationName\"])",
"def get_table_records(self, table):\n sql = 'SELECT * FROM %s' % table\n self.c.execute(sql)\n records = [tup[0] for tup in self.c.fetchall()]\n return records",
"def collection(self, value=None):\n qs = self.attribute_set.all()\n if not value:\n return qs\n else:\n ids = [a.id for a in qs if a.value==value]\n return qs.filter(id__in=ids)",
"def get_table_row(self, key_field, key_value):\n\n if self.table == []:\n self._get_table_list()\n for row in self.table:\n if row[key_field] == key_value:\n return row\n return None",
"def query_many(self, *args):\n table = args[0]\n column = args[1]\n value = args[2]\n query_item = \"\"\"\n SELECT sale_id, prod_id, quantity FROM {} WHERE {} = '{}';\n \"\"\".format(table, column, value)\n cursor.execute(query_item)\n items = cursor.fetchall()\n return items",
"def values(self):\n for item in self.table:\n if item:\n yield item.value",
"def get_items(self, data, table):\n return rdb.table(table).get_all(data)",
"def getValueIds(self, valueTable, constrain):\n\n records = (\n r\n for r in getattr(self, valueTable, {}).values()\n if G(r, constrain[0]) == constrain[1]\n )\n return {G(r, N._id) for r in records}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a ballot from an user or update if if exists
|
def add_update_ballot(voter, poll, choices):
db = get_db()
with db:
poll = get_poll(poll)
if poll is None or poll['closed'] is True or len(poll['choices']) != len(choices):
return False
ballot = []
for choice, grade in choices.iteritems():
ballot.append((voter, poll['uid'], choice, grade))
get_db().executemany("INSERT OR REPLACE INTO ballots (voter, poll, choice, grade) VALUES (?, ?, ?, ?)", ballot)
return True
|
[
"def add_points(user_id, points):\n db = sqlite3.connect('Ranking/Rankings.db')\n adder = db.cursor()\n if int(points) < 0:\n adder.execute('UPDATE ranks SET Points = {} WHERE UserID = {}'.format(0, user_id))\n else:\n cur_points = get_points(user_id)\n adder.execute('UPDATE ranks SET Points = {} WHERE UserID = {}'.format(points + int(cur_points), user_id))\n\n db.commit()\n db.close()",
"def userSellShipObj(self, user : basedUser.BasedUser, ship : shipItem.Ship):\n user.credits += ship.getValue()\n self.shipsStock.addItem(ship)\n user.inactiveShips.removeItem(ship)",
"def add_bet(self, bet):\n\n self.bets.append(bet)\n self.users.append(bet.user)",
"def addballot(self, ls):\n self.ballots.append(ls)",
"def add_user(self, user):\n if not user.id in self.queue:\n self.queue.append(user.id)",
"def add_user(self, user_id, ranking):\n if type(ranking) is not list:\n ranking = list(ranking)\n self.lists[user_id] = ranking",
"async def give(ctx, user: discord.Member, name: str):\n org_user = ctx.message.author\n guild = org_user.guild\n\n user_info = db.user(user)\n user_badges = await user_info.badges()\n badges = await db.guild(guild).badges()\n\n badge_id = f\"{name}_{guild.id}\"\n\n if name not in badges:\n return await ctx.send(\"**That badge doesn't exist in this guild!**\")\n if badge_id in user_badges:\n return await ctx.send(f\"**{get_user_name(user)} already has that badge!**\")\n\n user_badges[badge_id] = badges[name]\n await user_badges.badges.set(user_badges)\n await ctx.send(f\"**{get_user_name(org_user)} has just given `{get_user_name(user)}` the `{name}` badge!**\")",
"def add(cls, user):\n cls.users[user['id']] = CachedUser(user)",
"def add_hobby():\n username = get_jwt_identity()\n\n title = request.form['title']\n\n hobbies.insert(username=username, title=title)\n\n return good_json_response('success')",
"def add_user(self, username, vec):\n self.__add_row_to_data(username, vec)\n self.__save_current_user_data()\n self.build_annoy_index()",
"def update(self, request, pk=None):\n supports = request.data.get('supports', False)\n user = request.user\n if user:\n try:\n vote = next(vote for vote in user.votes if vote.bill_id == pk)\n vote.supports = supports\n except StopIteration:\n vote = Vote.objects.create(bill_id=pk, supports=supports)\n user.votes.append(vote)\n user.save()\n return self.respond(status=201)\n return self.respond(status=412)",
"def brother_add(request, position_slug):\r\n form = BrotherForm(request.POST or None)\r\n\r\n if request.method == 'POST':\r\n if form.is_valid():\r\n instance = form.clean()\r\n user = User.objects.create_user(instance['case_ID'], instance['case_ID'] + \"@case.edu\",\r\n instance['password'])\r\n user.last_name = instance['last_name']\r\n user.save()\r\n\r\n brother = form.save(commit=False)\r\n brother.user = user\r\n brother.save()\r\n return HttpResponseRedirect('/' + position_slug)\r\n\r\n context = {\r\n 'title': 'Add New Brother',\r\n 'form': form,\r\n }\r\n return render(request, 'model-add.html', context)",
"def adduser(self, nick):\n # add user\n if not self.users.has_key(nick):\n i = GtkListItem(nick)\n i.show()\n self.list.append_items([i])\n self.users[nick] = i\n if len(self.users) == 1:\n # select the user if it's the first / only in the list\n self.setselection(nick)\n i.connect(\"button-press-event\", self.item_handler, nick)",
"def addOrEditBid(self, newBid):\n try:\n # Get attributes of the new bid\n new_plantIdentifier = newBid.getPlantIdentifier()\n new_number = newBid.getNumber()\n # Data to edit\n new_amount = newBid.getAmount()\n new_price = newBid.getPrice()\n # Get the list of bids for the player\n playerBids = self.game_obj.player.getBids()\n row = 0\n for i, bid in enumerate(playerBids):\n # Look for that particular bid (ie. check bid number and plant identifer)\n existing_plantIdentifier = bid.getPlantIdentifier()\n existing_number = bid.getNumber()\n if existing_plantIdentifier == new_plantIdentifier and new_number == existing_number:\n # Found the particular bid. Edit it and return\n self.game_obj.player.editBid(i, new_amount, new_price)\n self.warningCountdown(\"bid saved\")\n return\n elif existing_plantIdentifier == new_plantIdentifier and new_price == bid.price:\n # A bid with the same price already exists so append that bid instead of creating a new one, but only if max capacity is not reached..\n if self.game_obj.player.accumulatedPlantProduction(newBid.plant) + new_amount <= newBid.plant.getActualCapacity(self.game_obj.weather_effect):\n if globals.DEBUGGING:\n print(\"Bid should be appended\")\n self.game_obj.player.bids[i].amount = bid.amount + new_amount\n # Set labels for the corrected bid\n if row == 0:\n self.market_lineEdit_dispatch_bid1.setText(str(self.game_obj.player.bids[i].amount))\n elif row == 1:\n self.market_lineEdit_dispatch_bid2.setText(str(self.game_obj.player.bids[i].amount))\n elif row == 2:\n self.market_lineEdit_dispatch_bid3.setText(str(self.game_obj.player.bids[i].amount))\n # Set labels for the one actually saved but dismised\n if new_number == 0:\n self.market_lineEdit_dispatch_bid1.setText(\"\")\n self.market_lineEdit_price_bid1.setText(\"\")\n elif new_number == 1:\n self.market_lineEdit_dispatch_bid2.setText(\"\")\n self.market_lineEdit_price_bid2.setText(\"\")\n elif new_number == 2:\n self.market_lineEdit_dispatch_bid3.setText(\"\")\n self.market_lineEdit_price_bid3.setText(\"\")\n self.warningCountdown(\"bid already exists\")\n else:\n # New bid has amount higher than the capacity when the other bid is included\n if new_number == 0:\n self.market_lineEdit_dispatch_bid1.setText(\"\")\n self.market_lineEdit_price_bid1.setText(\"\")\n elif new_number == 1:\n self.market_lineEdit_dispatch_bid2.setText(\"\")\n self.market_lineEdit_price_bid2.setText(\"\")\n elif new_number == 2:\n self.market_lineEdit_dispatch_bid3.setText(\"\")\n self.market_lineEdit_price_bid3.setText(\"\")\n self.warningCountdown(\"overBid\")\n return\n # The bid is not found so keep looking and increment row\n row += 1\n # Went through for loop without finding the bid\n if self.game_obj.player.accumulatedPlantProduction(newBid.plant) +new_amount <= newBid.plant.getActualCapacity(self.game_obj.weather_effect):\n self.game_obj.player.appendBid(newBid)\n self.warningCountdown(\"bid saved\")\n else:\n # Bid amount is higher than the power plants capacity so drop it\n if row == 0:\n self.market_lineEdit_dispatch_bid1.setText(\"\")\n self.market_lineEdit_price_bid1.setText(\"\")\n elif row == 1:\n self.market_lineEdit_dispatch_bid2.setText(\"\")\n self.market_lineEdit_price_bid2.setText(\"\")\n elif row == 2:\n self.market_lineEdit_dispatch_bid3.setText(\"\")\n self.market_lineEdit_price_bid3.setText(\"\")\n self.warningCountdown(\"overBid\")\n except Exception as e:\n print(\"Exception in addOrEditBid():\")\n print(e)",
"def _add_user_to_list(self, user):\n self._user_list.append(user)",
"def add_user(self, version=None):\n # since id is auto-generated, need to grab the most recent added and simply increment\n # if this is the first user added, start at 1\n if not self.users:\n u_id = 1\n else:\n # otherwise, get the length of the dict (num of keys) & our new user_id is +1\n u_id = len(self.users) + 1\n\n new_user = User(version)\n new_user.id = u_id\n # user_id as key and obj as val in graph's users dict\n self.users[u_id] = new_user\n self.total_users += 1",
"def _add(player, amount):\r\n bidVars = bids[player[\"name\"]]\r\n newBid = bidVars[0].get()+amount\r\n if newBid <= player[\"cash\"]:\r\n bidVars[0].set(newBid)\r\n bidVars[1].set(f\"Current Bid: {bidVars[0].get()}\")\r\n if bidVars[0].get() > topBid.get():\r\n topBid.set(bidVars[0].get())\r\n topBidString.set(f\"Highest Bid: ${topBid.get()}\")\r\n topBidder.set(f\"Highest Bidder: {player['name']}\")\r\n else:\r\n self._log(f\"{player['name']} doesn't have ${newBid}\")",
"def submit_stv_votes(self, user, ballot_dict):\n assert self.is_preference_vote, \"Only preference votes can have ballots\"\n self.user_can_vote_now(user)\n\n self.users_voted.add(user)\n if ballot_dict == {}:\n # Empty ballot == blank vote\n # Then adding user to self.users_voted is sufficient\n # Do not create BallotContainer instance\n # Blank votes affects the quota\n return\n\n alt_pks = [int(ballot_dict[pri]) for pri in ballot_dict]\n if len(alt_pks) != len(set(alt_pks)):\n raise DuplicatePriorities(\"Ballot contains duplicate(s) of candidates\")\n alt1_pk = int(ballot_dict[1])\n alt1 = self.alternatives.get(pk=alt1_pk)\n # create BallotContainer instance\n new_ballot = BallotContainer.objects.create(\n voting=self, current_alternative=alt1\n )\n new_ballot.save()\n # Create BallotEntry instances\n for (alt_pk, pri) in zip(alt_pks, ballot_dict):\n alt = Alternative.objects.get(pk=alt_pk)\n new_entry = BallotEntry.objects.create(\n container=new_ballot, priority=pri, alternative=alt\n )\n new_entry.save()\n\n # Reset ballots/stv-results\n for alt in self.alternatives.all():\n alt.is_winner = False\n # Inital dist is called in the stv procedure, but could perhaps\n # consider calling it here too",
"def add_user(self, user: User) -> None:\n\t\tpass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a poll from the database
|
def get_poll(poll):
poll = get_entry('polls', 'uid', poll)
if poll is None:
return None
poll = dict(poll)
poll['choices'] = []
for choice in get_entries('choices', 'poll', poll['uid']):
poll['choices'].append(dict(choice))
poll['choices'].sort(key=lambda x: x['id'])
poll['closed'] = poll['end_date'] < datetime.now()
return poll
|
[
"def get_single_poll(request_ctx, id, **request_kwargs):\n\n path = '/v1/polls/{id}'\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response",
"def poll_by_uid(self, uid, context=None):\n if uid == 'latest':\n results = self.recent_polls(context=context, show_all=False, limit=1)\n else:\n results = api.content.find(UID=uid)\n if results:\n poll = results[0].getObject()\n return poll",
"async def fetch_last_poll_response(sid: str, course_name: str, poll_id: str) -> str:\n query = (\n select(Useinfo.act)\n .where(\n (Useinfo.sid == sid)\n & (Useinfo.course_id == course_name)\n & (Useinfo.div_id == poll_id)\n )\n .order_by(Useinfo.id.desc())\n )\n async with async_session() as session:\n res = await session.execute(query)\n return res.first()",
"def create_poll_instance():\n\tif can_poll_device():\n\t\treturn select.poll()\n\treturn PollSelectAdapter()",
"def _poll_from_doc(doc):\n return Poll(str(doc['_id']), doc['text'])",
"def get_poll_plugin(name, url, extra_data=None):\n try:\n return POLLS[name.lower()].Poll(url, extra_data)\n except KeyError as e:\n raise PluginNotFound(e)",
"def get_ballot_voters(poll):\n voters = query_read(\"SELECT voter FROM ballots WHERE poll = ? ORDER BY voter;\", [poll])\n\n if not voters:\n return None\n\n return [voter[0] for voter in voters]",
"def exam_poll_attempt(self):\n attempt_id = self.attempt_id\n\n self.client.get(\n '{url}/{attempt_id}'.format(url=self.attempt_api_path, attempt_id=attempt_id),\n name='timed_exam:poll_attempt'\n )",
"def get_polls(self):\n docs = self.collection.find()\n polls = [_poll_from_doc(doc) for doc in docs]\n return polls",
"def add_poll(request):\n try:\n name = request.data['poll_name']\n start_date = request.data['start_date']\n end_date = request.data['end_date']\n description = request.data['description']\n poll = Poll(name=name, start_date=start_date, end_date=end_date, \\\n description=description)\n poll.save()\n except Exception as ex:\n return exception_response(ex, f'bad poll data: {request.data}')\n\n return Response({})",
"def create_poll_record(db_url, db_user, db_password, db_name,\n team_id, channel_id, token, user_id, user_name,\n question, answers, channel_to_poll_id):\n db = connect(db_url, db_user, db_password, db_name)\n cursor = db.cursor()\n\n sql_insert = \"INSERT INTO poll (team_id, channel_id, token, \" + \\\n \"user_id, user_name, question, answers, channel_to_poll_id) \" + \\\n \"VALUES ('\" + team_id + \"', '\" + channel_id + \"', '\" + \\\n token + \"', '\" + user_id + \"', '\" + user_name + \"', '\" + \\\n question + \"', '\" + answers + \"', '\" + channel_to_poll_id + \"')\"\n\n sql_fetch = 'SELECT MAX(poll_id) FROM poll WHERE user_id = \"' + user_id + '\"'\n\n out = 0\n try:\n cursor.execute(sql_insert)\n db.commit()\n cursor.execute(sql_fetch)\n out = cursor.fetchone()[0]\n except:\n out = 0\n\n db.close()\n return out",
"def poll_id(self):\n return self._poll_id",
"def _get(self, params):\n return database_qs(Algorithm.objects.filter(pk=params['id']))[0]",
"def get_voter_ballot(voter, poll):\n ballot = query_read(\"SELECT choices.id, ballots.grade FROM choices JOIN ballots ON ballots.poll = ? and choices.id = ballots.choice and ballots.voter = ? ORDER BY choices.id;\", [poll, voter])\n\n if not ballot:\n return None\n\n return dict(ballot)",
"def get_one_note(self,idt):\n q=\"select * from note where id=%d\"%(idt)\n try:\n NoteDB.cursor.execute(q)\n result=NoteDB.cursor.fetchall()\n obj=Note(idt=result[0],msg=result[1],time=result[2])\n return obj\n except Exception as e:\n raise",
"async def get_single(self, slug: str):\n q = 'SELECT postable.id, {table}.title, {table}.slug, {table}.author_id, {table}.body, postable.created_at, ' \\\n 'postable.updated_at FROM {schema}{table} INNER JOIN {schema}postable ON {table}.id = postable.id WHERE ' \\\n '{table}.slug = $1'\n\n return await self.fetch(q, args=(slug,), single=True, flatten=True)",
"def get_poll_JSON_obj(week):\n\tif datestamp_correct_form_p(week):\n\t\tdatabase_filepath = get_database_filepath()\n\t\tcurrent_races_filepath = database_filepath + \"polldata_\" + week + \".json\"\n\t\tif os.path.isfile(current_races_filepath):\n\t\t\twith open(current_races_filepath, 'r') as infile:\n\t\t\t\tdata = json.load(infile)\n\t\t\t\treturn data\n\t\telse:\n\t\t\treturn \"No data stored for that week.\"\n\telse:\n\t\treturn \"Invalid Week Format\"",
"def create_poll(self, options):\n\n if self.poll is not None:\n return \"Can't start poll, one is running try !poll stop first\"\n try:\n self.poll = Poll(options)\n except SyntaxError:\n return \"You probably want to start the poll correctly. Nice try.\"\n return self.poll.get_current_state()",
"def create_poll_result_record(db_url, db_user, db_password, db_name, poll_id, answer):\n db = connect(db_url, db_user, db_password, db_name)\n cursor = db.cursor()\n\n sql_insert = 'INSERT INTO poll_result (poll_id, answer, votes) ' + \\\n 'VALUES (' + str(poll_id) + ', \"' + answer + '\", 0)'\n\n sql_fetch = 'SELECT MAX(poll_result_id) FROM poll_result WHERE poll_id = ' + str(poll_id)\n\n out = 0\n try:\n cursor.execute(sql_insert)\n db.commit()\n cursor.execute(sql_fetch)\n out = cursor.fetchone()[0]\n except:\n out = 0\n\n db.close()\n return out"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all polls owned by an user from the database
|
def get_own_polls(owner):
polls_db = get_entries('polls', 'owner', owner)
if polls_db is None:
return None
polls = []
for poll in polls_db:
poll = dict(poll)
poll['closed'] = poll['end_date'] < datetime.now()
polls.append(poll)
return polls
|
[
"def get_polls(self):\n docs = self.collection.find()\n polls = [_poll_from_doc(doc) for doc in docs]\n return polls",
"def get_all_by_user(username):\n id = username_to_id(username)\n\n if id == 6: #user is admin, return all jobs\n return {'jobs': [clean_job(job) for job in jobs.find({'creator':{'$gte':0}})]}\n else: #only return the user's jobs and public jobs\t\n return {'jobs': [clean_job(job) for job in jobs.find({\n '$or': [\n {'creator': username_to_id(username)},\n {'publicflag': 1}\n ]\n })]}",
"def list(self, request) -> QuerySet:\n if request.user.has_perm(\"user.can_retrieve_all_users\"):\n return self.get_queryset().all()\n elif request.user.has_perm(\"user.can_retrieve_users_in_school\"):\n #TODO: implment this\n pass \n else:\n raise PermissionError(\"You cannot retrieve users that way.\")",
"def get_queryset(self):\n user = self.request.user\n return Trip.objects.filter(owner=user)",
"def all_circles(self, user):\n return Circle.objects.filter(owner=user)",
"def list_polls(request_ctx, **request_kwargs):\n\n path = '/v1/polls'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response",
"def _user_playlists(request):\n user = request.user if request is not None else None\n return mpmodels.Playlist.objects.all().viewable_by_user(user)",
"def get_queryset(self, * args, ** kwargs):\n return self.request.user.households.all()",
"def get_all_room_users(self) -> QueryType[User]:\n return self.users.all().order_by('membership__id')",
"async def get_users():\n session: Session = Session()\n count_table = session.query(\n BorrowingUserTable.user_key,\n functions.count(\n BorrowingUserTable.key).label(\"borrowed_books\")\n ).filter(\n BorrowingUserTable.return_date == None\n ).group_by(\n BorrowingUserTable.user_key\n ).subquery()\n ret = session.query(\n UserTable,\n functions.coalesce(\n count_table.c.borrowed_books, 0\n ).label(\"borrowed_books\")\n ).outerjoin(\n count_table,\n UserTable.key == count_table.c.user_key\n ).order_by(\n UserTable.lastname,\n UserTable.firstname,\n UserTable.classname\n ).all()\n logger.info(ret)\n return ret",
"def get_user_playlists():\n playlists_info = sp.current_user_playlists(limit=50, offset=0)['items']\n playlists = []\n for playlist in playlists_info:\n playlist_dict = {}\n playlist_dict['uri'] = playlist['uri']\n playlist_dict['name'] = playlist['name']\n playlists.append(playlist_dict)\n return playlists",
"def get_user_watchlist(user):\r\n\r\n user_watchlist = Watchlist.query.filter(User.id).all() \r\n\r\n return user_watchlist",
"def get_user_playlists(user_id):\n u = User.load(user_id)\n show_collection(Playlist.for_user(u))",
"def get_voters_per_group(poll):\n\n return User.objects.filter(groups=poll.valid_groups).count()",
"def list(self, subcmd):\n\n for user in self.db.get_users():\n print(user.name)",
"def getPlaylists(self, user=None):\n pass",
"def get_pricings_for_list(event, users):\n pricings = RegConfPricing.objects.none()\n \n for user in users:\n pricings = pricings | get_available_pricings(event, user)\n pricings = pricings | get_available_pricings(event, AnonymousUser())\n \n # return the QUERYSET\n return pricings",
"def get_users(self):\r\n sql = \"SELECT * FROM user WHERE auth <> 'root' LIMIT \" + str(self.user_per_page) + \" OFFSET \" + str(self.offset)\r\n self.cur.execute(sql)\r\n data = self.cur.fetchall()\r\n return data",
"def get_all():\n return list(User.objects.all())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete a poll from the database
|
def delete_poll(poll):
get_db().execute('DELETE FROM ballots WHERE poll = ?;', [poll])
get_db().execute('DELETE FROM results WHERE poll = ?;', [poll])
get_db().execute('DELETE FROM choices WHERE poll = ?;', [poll])
get_db().execute('DELETE FROM polls WHERE uid = ?;', [poll])
get_db().commit()
|
[
"def test_delete_poll(self):\n response = self.client.delete(f\"/api/poll/{self.poll.pk}/delete/\", format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def cancel_poll_record(db_url, db_user, db_password, db_name, poll_id):\n db = connect(db_url, db_user, db_password, db_name)\n cursor = db.cursor()\n\n sql_delete_poll = \"DELETE FROM poll WHERE poll_id = \" + \\\n str(poll_id) + \" AND published = 0\"\n sql_delete_poll_result = \"DELETE FROM poll_result WHERE poll_id = \" + \\\n str(poll_id)\n\n rows_affected = 0\n try:\n cursor.execute(sql_delete_poll)\n db.commit()\n rows_affected = cursor.rowcount\n\n if rows_affected > 0:\n cursor.execute(sql_delete_poll_result)\n db.commit()\n rows_affected = rows_affected + cursor.rowcount\n except:\n rows_affected = 0\n\n db.close()\n return rows_affected",
"def delete(self, sql):",
"def delete(self, pk):",
"def delete_record():",
"def delquestion():\n try:\n id = request.form['id']\n models.Question.objects.get(id=id).delete()\n return \"\"\n except:\n print traceback.print_exc()",
"def delete_survey(self,iSurveyID):",
"def query_delete_exercise(exoId):\n\n exo = MetalExercise.query.get(exoId)\n if exo:\n db.session.delete(exo)\n db.session.commit()\n lg.warning('Deleted exercise !')",
"def delete_tag_from_db():\n tag_id = request.form['tag_id']\n tag = models.Tag.objects(id=tag_id).first()\n tag.delete()\n return \"\"",
"def delete_question(self,iQuestionID):",
"def delete_polls(context: CallbackContext, session: scoped_session) -> None:\n try:\n context.job.enabled = False\n\n # Only delete a few polls at a time to prevent RAM usage spikes\n polls_to_delete = (\n session.query(Poll)\n .filter(Poll.delete.isnot(None))\n .order_by(Poll.updated_at.asc())\n .limit(20)\n .all()\n )\n for poll in polls_to_delete:\n if poll.delete == PollDeletionMode.DB_ONLY.name:\n delete_poll(session, context, poll)\n elif poll.delete == PollDeletionMode.WITH_MESSAGES.name:\n delete_poll(session, context, poll, True)\n session.commit()\n\n except Exception as e:\n sentry.capture_job_exception(e)\n\n finally:\n context.job.enabled = True",
"def delete(self):\n for index,delTask in enumerate(self.db): \n if self.task.delete in delTask.add:\n del self.db[index]\n self.delete() ##function must be recursive to update self.db indexes",
"def delete_feedback(token):\r\n Feedback.query.filter_by(id=token).delete()\r\n db.session.commit()",
"def delete_ride(offer_id): \n db.cursor.execute(\n \"\"\"\n DELETE FROM ride_offers\n where offer_id = (%s) \n \"\"\",\n (offer_id,)\n )\n db.commit()\n return {\"message\": \"ride deleted\"}",
"def delete_record(self, identifier):\n connection = self.start_connection()\n cursor = connection.cursor()\n cursor.execute('INSERT INTO completed (Task) SELECT Task from todo WHERE Task=?', [identifier])\n cursor.execute('DELETE FROM todo WHERE Task=?', [identifier])\n self.commit_close_connection(connection)",
"def delete(self, **kwargs):\n if self.doc_id:\n doc = self._connection(self.server, self.database)[self.doc_id]\n self._connection(self.server, self.database).delete(doc)",
"def delete(self, proxy):\n self.db.delete(proxy)",
"def delete_resource(self):\r\n results = ResponsesREST.SERVER_ERROR.value\r\n query = \"DELETE FROM Resource WHERE routeSave = %s; \"\r\n param = [self.route_save]\r\n result = self.connect.send_query(query, param)\r\n if result:\r\n results = ResponsesREST.SUCCESSFUL.value\r\n return results",
"def delete(reminder_id: int, app: Flask, db: SQLAlchemy) -> int:\n reminder: ReminderModel = ReminderModelService.retrieve_by_reminder_id(\n reminder_id, app\n )\n if reminder:\n\n RemindersTimeSlotModelService.delete_all_by_reminder_id(\n reminder_id, app, db\n )\n\n if app.config[\"DEBUG\"] or app.config[\"TESTING\"]:\n\n query = \"DELETE FROM Reminders WHERE reminder_id = ?;\"\n DBMan.execute_sql_query(app, query, (reminder_id,))\n\n else:\n db.session.delete(reminder)\n db.session.commit()\n return reminder_id\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get ballot from a user from the database
|
def get_voter_ballot(voter, poll):
ballot = query_read("SELECT choices.id, ballots.grade FROM choices JOIN ballots ON ballots.poll = ? and choices.id = ballots.choice and ballots.voter = ? ORDER BY choices.id;", [poll, voter])
if not ballot:
return None
return dict(ballot)
|
[
"def get_user_bid(self):\n if self.good.deal == AUCTION:\n bid = AuctionBids.objects.filter(good=self.good, user=self.user).latest('updated')\n return bid.user_price",
"def get_points(user_id):\n db = sqlite3.connect('Ranking/Rankings.db')\n adder = db.cursor()\n adder.execute('SELECT * FROM ranks ORDER BY Points DESC')\n db_result = adder.fetchall()\n for values in db_result:\n values = str(values).replace(\"(\", \"\").replace(\")\", \"\").split(',') # Formats the returned value into ID\n if values[0] == user_id: # Checks for the user ID\n return values[1] # Returns the user points\n print(\"ERROR: NO USER ID FOUND\")\n db.close()",
"def get_ballot_voters(poll):\n voters = query_read(\"SELECT voter FROM ballots WHERE poll = ? ORDER BY voter;\", [poll])\n\n if not voters:\n return None\n\n return [voter[0] for voter in voters]",
"def test_get_user_1_bonds(self):\n with current_app.test_client() as c:\n user_1_jwt = self.generate_jwt(1)\n response = c.get('/api/bonds', query_string=dict(api_key=user_1_jwt))\n assert response.json.get('data') == [{'id': 1,\n 'isin':'TEST1',\n 'size': 100,\n 'currency': 'EUR',\n 'maturity': '1970-01-01',\n 'legal_name': 'TEST_LEI_1'}]",
"def getaccount(user):\n id = user.user_id() \n name = user.nickname()\n email = user.email()\n\n logging.info('lookup account for %s %s %s' % (id,name,email))\n # we're using id to get the account \n q = BankAccount.gql('WHERE owner = :1', id) \n r = q.get()\n logging.info('after datastore query, we have %s' % r)\n if r: # they have a bank account \n return r\n else: # we have to make them a new one \n b = BankAccount(\n id=id, # here's where we'd construct good acct numbers\n owner=id,\n balance=150,\n ownername = user.nickname()\n ) \n b.put()\n return b",
"def get_producto_bodega(ip, ib):\n producto_bodega = ProductoBodega.objects.get(producto=ip, bodega=ib)\n return producto_bodega",
"def loadBasket():\n if isUser(session.get(\"UserID\")):\n return getBasketAsJsonString(session[\"UserID\"])\n return {}",
"def user_profile():\n user_id = session[\"user_id\"]\n picks = Pick.query.filter_by(author=user_id).all()\n return render_template(\n \"profile.html\",\n picks=picks\n )",
"def get_user(user_id):\n\n try:\n return Stakeholder.objects.all().filter(pk=user_id)[0]\n except LookupError:\n # if connector supports exception, should raise userDoesNotExist\n return None\n\n # all() -> SELECT * FROM stakeholder;\n # all().filter(nick_name = 'a') -> SELECT * FROM stakeholder WHERE nick_name = 'a';",
"def get_my_latest_blood_pressure(self):\n return self.get_user_latest_blood_pressure(self.request.user)",
"def get_current_user_points(self, user_num):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT\n points\n FROM\n Users\n WHERE\n Users.id = ?\n \"\"\",\n (user_num,))\n values = c.fetchone()\n return values[0]\n except sqlite3.Error as e:\n log.error(e)\n raise Exception",
"def test_get_user_2_bonds(self):\n with current_app.test_client() as c:\n user_2_jwt = self.generate_jwt(2)\n response = c.get('/api/bonds', query_string=dict(api_key=user_2_jwt))\n assert response.json.get('data') == [{'id': 2,\n 'isin': 'TEST2',\n 'size': 1000,\n 'currency': 'EUR',\n 'maturity': '1970-01-02',\n 'legal_name': 'TEST_LEI_2'}]",
"def get_seeker_by_username(db_file, username):\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM seekers_personal WHERE username = ?;\",(username,))\n row = cur.fetchone()\n conn.commit()\n cur.close()\n conn.close()\n if row == None:\n return None\n id, fname, lname, birth_date, phone, email, city, education, hobbies, skills, username, password, bio = row\n user = Seeker(fname, lname, birth_date, phone, email, city, education, hobbies, skills, username, password, bio, id)\n return user",
"def getBorrowedBooksByUserID(self, lmsUserID):\n self.cursor.execute(\n \"SELECT * FROM BookBorrowed WHERE LmsUserID = %s AND status = 'borrowed'\", (lmsUserID,))\n res = self.cursor.fetchall()\n return res",
"def get_user_details(name=\"\"):\n try:\n # connect to the database\n conn = psycopg2.connect(\"dbname='yelp_db' user='sadipgiri'\")\n\n # we're gonna use a fancy cursor that gives us back a dictionary for each row!\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n # pull out the user given the name we have - we use the (name,) tuple combined with\n # the %s token in our query string such that name gets put in the place of %s\n cur.execute(\"\"\"SELECT * FROM \"user\" WHERE name=%s\"\"\", (name,))\n\n # there should only be one result for this query, but either way we only want one row back\n user = cur.fetchone()\n\n # make a User object and return it\n my_user = User(user)\n return my_user\n except Exception as e:\n print(\"Unable to connect to database: {0}\".format(e))",
"def verifier_bilan(pseudo, password):\r\n\r\n conn = psycopg2.connect(database=DATABASE,\r\n user=USER,\r\n host=HOST,\r\n password=PASSWORD) \r\n\r\n cur = conn.cursor()\r\n \r\n cur.execute(\"\"\"select bilan.bilan\r\n from bilan, users\r\n where pseudo = '{0}' AND\r\n password = '{1}' AND\r\n users.id = bilan.id_user\"\"\".format(pseudo, password))\r\n \r\n\r\n conn.commit() \r\n\r\n rows = cur.fetchall()\r\n liste = [i for i in rows]\r\n\r\n \r\n return liste[0][0], liste[0][1]",
"def get_single_user():",
"def leaderboard(request):\n\n # Access control - check user is logged in\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n # Get the total points of each user\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT name, SUM(value) AS sum FROM has_ach JOIN achievements \"\n \"ON has_ach.ach_id = achievements.ach_id JOIN users \"\n \"ON has_ach.user_id = users.user_id \"\n \"GROUP BY users.user_id ORDER BY sum DESC\")\n data = dictfetchall(cursor)\n\n #\n pic_url = getProfile(request)\n user_id = request.session['user_id']\n context= {'data': data,'user_id': user_id,\n 'pfp': pic_url}\n return render(request, 'leaderboard.html', context)",
"def blabbit_get_object(self):\n from django.shortcuts import get_object_or_404\n queryset = self.get_queryset()\n try:\n obj = User.objects.get(username__iexact=self.kwargs['username'])\n except User.DoesNotExist:\n obj = get_object_or_404(queryset, pk=self.kwargs['username'])\n \n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the list of all the voters from a poll
|
def get_ballot_voters(poll):
voters = query_read("SELECT voter FROM ballots WHERE poll = ? ORDER BY voter;", [poll])
if not voters:
return None
return [voter[0] for voter in voters]
|
[
"def get_voters(self) -> List['Voter']:\n return self.voters",
"def candidate_votes():\n for name in votedCandidates: \n candidateVotes.append(votes(name))\n return candidateVotes",
"def get_sorted_votes(poll: Poll, votes: List[Vote]) -> InstrumentedList:\n\n def get_user_name(vote):\n \"\"\"Get the name of user to sort votes.\"\"\"\n return vote.user.name\n\n if poll.user_sorting == UserSorting.name.name:\n votes.sort(key=get_user_name)\n\n return votes",
"def fetch_all_votes(self,office_id):\n query = \"\"\" SELECT * FROM votes \"\"\"\n # query = \"\"\"SELECT offices.name AS office, users.firstname AS firstname, users.lastname AS lastname,\n # COUNT (votes.candidate) AS votes FROM votes JOIN offices ON offices.office_id = votes.office\n # JOIN users ON users.user_id = votes.candidate GROUP BY users.firstname, users.lastname, offices.name\n # \"\"\"\n\n candidates_query = \"\"\"SELECT * FROM candidates JOIN users ON candidates.candidate=users.user_id WHERE office= '{}'\"\"\".format(office_id)\n\n candidates = database.select_from_db(candidates_query)\n\n size = len(candidates)\n\n votes = []\n\n if size > 0:\n for candidate in candidates:\n vote_query = \"\"\"SELECT candidate , COUNT(*) as votes FROM votes WHERE candidate = '{}' GROUP by candidate\"\"\".format(candidate[\"candidate_id\"])\n candidate_votes = database.select_from_db(vote_query)\n votes.append({\"candidate\":candidate[\"firstname\"], \"votes\":candidate_votes})\n\n return votes",
"def getEventListByVoter(voterUserID):\n\tquery = EventVote.query(EventVote.userid==voterUserID).order(-Event.createTime)\n\tresult = query.fetch()\n\teventlist = []\n\tfor eventvote in result:\n\t\tevent = eventvote.key.parent().get()\n\t\tmostVotedTime = getMostVotedTime(event)\n\t\teventlist.append([event.name, event.location,\n\t\t\tdatetime2str(mostVotedTime), event.key.id(), event.cancelled,\n\t\t\tevent.finalized, datetime2str(event.finaltime),\n\t\t\tgetUserInfo(event.ownerid).name\n\t\t\t])\n\treturn eventlist",
"def getVoteList(eventid):\n\tancestor_key = ndb.Key('Event', eventid)\n\tquery = EventVote.query(ancestor=ancestor_key)\n\tresult = query.fetch()\n\treturn result",
"def get_polls(self):\n docs = self.collection.find()\n polls = [_poll_from_doc(doc) for doc in docs]\n return polls",
"def list_pollutants() -> [str]:\n return [str(p) for p in Pollutant]",
"def getVoteData():\n\tvote_query \t\t= url_base + \"votes?fields=voter_ids,bill_id,vote_type\" + api_key\n\tvotes\t\t\t= urlopen(vote_query)\t\t#instance\n\tvote_data \t\t= votes.read()\t\t\t\t#JSON\n\tvote_dict \t\t= json.loads(vote_data)\t\t#dict\n\tvote_count \t\t= vote_dict[\"count\"]\t\t#int\n\tpage_count\t\t= (vote_count/50) + 1\n\tthreads \t\t= []\n\n\tfor page in range(page_count):\t\t\t\t#spawn 1 thread per page here\n\t\tthread = VoteWhip(page+1)\n\t\tthread.start()\n\t\tthreads.append(thread)\n\n\tfor thread in threads:\n\t\tthread.join()",
"async def poll_list_respondents_by_option(self, ctx, *poll_ids: str):\n\n id_fetch_point, poll_ids = await get_poll_context_channel(ctx, poll_ids)\n if id_fetch_point is None:\n return\n\n async with ctx.typing():\n async for poll, pid in gen_polls_from_ids(ctx, poll_ids, id_fetch_point):\n result_embed = discord.Embed(\n title=poll.embeds[0].title\n )\n\n try:\n for _, desc, reaction in gen_poll_options(poll):\n respondents = []\n async for user in reaction.users():\n if not user.bot:\n user_line = escape_markdown(user.name) + \"#\" + user.discriminator\n user_nick = getattr(user, 'nick', None)\n if user_nick is not None:\n user_line += \" _\" + escape_markdown(user_nick) + \"_\"\n respondents.append(user_line)\n\n if len(respondents) == 0:\n respondents = \"None\"\n else:\n respondents = \"\\n\".join(respondents)\n\n field_title = reaction.emoji + \" \" + desc\n result_embed.add_field(name=field_title, value=respondents, inline=False)\n except KeyError:\n await ctx.send(f'Error processing poll: `{pid}`')\n continue\n\n await ctx.send(embed=result_embed)",
"def get_voters_per_group(poll):\n\n return User.objects.filter(groups=poll.valid_groups).count()",
"def take_votes(self,node_list,poll_func=None):\n self.vote_handler.take_votes(node_list,poll_func)",
"def get_viewers(streamer_obj) -> list:\r\n session = requests.Session()\r\n retry = Retry(connect=500, backoff_factor=0.5)\r\n adapter = HTTPAdapter(max_retries=retry)\r\n session.mount(\"http://\", adapter)\r\n session.mount(\"https://\", adapter)\r\n\r\n try:\r\n channel_json = session.get(url=(f\"https://tmi.twitch.tv/group/user/\"\r\n f\"{streamer_obj}/chatters\")).json()\r\n broadcaster = channel_json[\"chatters\"][\"broadcaster\"]\r\n viewers = channel_json[\"chatters\"][\"viewers\"]\r\n moderators = (channel_json['chatters']['moderators'])\r\n staff = (channel_json['chatters']['staff'])\r\n vips = (channel_json['chatters']['vips'])\r\n global_mods = (channel_json['chatters']['global_mods'])\r\n admins = (channel_json['chatters']['admins'])\r\n viewers_list = viewers + staff + vips + global_mods + admins\r\n viewers_and_mods = [viewers_list, moderators, broadcaster]\r\n return viewers_and_mods\r\n except TypeError as e:\r\n print(47, \"viewer_data\", e)\r\n return []",
"def show_all_vote(message):\n keywords = r.smembers(VOTE_WORDS)\n counts = r.mget(VOTE_PREFIX.format(keyword) for keyword in keywords)\n counts = sorted(zip(keywords, map(lambda x: x or 0, counts)))\n message.send(\n '\\n'.join('{} is voted to {}'.format(k, c) for k, c in counts)\n )",
"def vol_list(server, virt=\"KVM\", pool_name=None):\n\n cmd = 'virsh -c %s vol-list %s 2>/dev/null | sed -e \"1,2 d\" -e \"$ d\"' \\\n % (virt2uri(virt), pool_name)\n ret, out = utils.run_remote(server, cmd)\n if ret != 0:\n return None\n\n return out",
"def load_poll_data():\n polls = []\n \n with open('./cogs/polls.json', 'r', encoding='utf-8') as poll_file:\n try:\n polls = json.load(poll_file)\n except json.JSONDecodeError:\n pass\n return polls",
"def _getVotes(self):\n votes = {'options': [],\n 'total': 0}\n for option in self.getOptions():\n index = option.get('option_id')\n description = option.get('description')\n option_votes = self.annotations.get(VOTE_ANNO_KEY % index, 0) # noqa: S001\n votes['options'].append({'description': description,\n 'votes': option_votes,\n 'percentage': 0.0})\n votes['total'] = votes['total'] + option_votes\n for option in votes['options']:\n if option['votes']:\n option['percentage'] = option['votes'] / float(votes['total'])\n return votes",
"def get_voter_ballot(voter, poll):\n ballot = query_read(\"SELECT choices.id, ballots.grade FROM choices JOIN ballots ON ballots.poll = ? and choices.id = ballots.choice and ballots.voter = ? ORDER BY choices.id;\", [poll, voter])\n\n if not ballot:\n return None\n\n return dict(ballot)",
"def getVotingChoiceByVID(self, vID):\n\n cursor = self.conn.cursor()\n query = \"SELECT altid, choice, votes \" \\\n \"FROM VotingQuestion inner join VotingChoice \" \\\n \"ON VotingQuestion.vID = VotingChoice.vID \" \\\n \"WHERE VotingQuestion.vID = %s;\"\n cursor.execute(query, (vID,))\n result= []\n for row in cursor:\n result.append(row)\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get cached results from the poll or compute them. After computation, ballots are destroyed.
|
def get_results(poll):
assert poll is not None, "Invalid poll: None"
if not poll['closed']:
return None
results = {}
# Get cached results
results_db = get_entries('results', 'poll', poll['uid'])
# If no cache, compute the results and store them
if len(results_db) == 0:
ballots = get_entries('ballots', 'poll', poll['uid'])
# If no ballots provide, no results
if len(ballots) == 0:
return None
# Number of ballots cast
ballots_count = len(ballots) / len(poll['choices'])
# Build data structures
choices = {}
results = {}
for choice in poll['choices']:
choices[choice['id']] = {'votes': [0] * 7}
results[choice['id']] = {'ballots': ballots_count}
# Count the number of vote for each grade for each choice
for ballot in ballots:
choices[ballot['choice']]['votes'][ballot['grade']] += 1
# Store the count in percentage for display purposes
for choice in choices:
results[choice]['percentages'] = []
for vote in choices[choice]['votes']:
results[choice]['percentages'].append(100 * vote / ballots_count)
# Transfrom the number of vote to a list of votes
for _, choice in choices.items():
votes = []
for i in range(len(choice['votes'])):
votes.extend([i] * choice['votes'][i])
choice['votes'] = votes
# Compute the median, the number of better and worse vote.
for _, choice in choices.items():
choice_compute(choice)
# Apply the grade for each choice
for choice in choices:
if choices[choice]['median'] == 0:
results[choice]['grade'] = "To reject"
elif choices[choice]['median'] == 1:
results[choice]['grade'] = "Poor"
elif choices[choice]['median'] == 2:
results[choice]['grade'] = "Acceptable"
elif choices[choice]['median'] == 3:
results[choice]['grade'] = "Fair"
elif choices[choice]['median'] == 4:
results[choice]['grade'] = "Good"
elif choices[choice]['median'] == 5:
results[choice]['grade'] = "Very Good"
elif choices[choice]['median'] == 6:
results[choice]['grade'] = "Excellent"
if choices[choice]['better'] > choices[choice]['worse']:
results[choice]['grade'] += "+"
else:
results[choice]['grade'] += "-"
# Sort the vote to etablish the ranks
ranks = rank_choices(choices, ballots_count)
for choice in results:
results[choice]['rank'] = ranks[choice]
# Store the results
results_db = []
for choice, result in results.items():
results_db.append((poll['uid'], choice, ";".join([str(rank) for rank in result['rank']]) if isinstance(result['rank'], list) else str(result['rank']), result['grade'], ";".join([str(percentage) for percentage in result['percentages']]), result['ballots']))
get_db().executemany("INSERT INTO results (poll, choice, rank, grade, percentages, ballots) VALUES (?, ?, ?, ?, ?, ?)", results_db)
# Destroy the ballots
get_db().execute('DELETE FROM ballots WHERE poll = ?', [poll['uid']])
else:
for result in results_db:
results[result['choice']] = {'rank' : int(result['rank']) if ';' not in result['rank'] else [int(vote) for vote in result['rank'].split(';')], 'grade': result['grade'], 'percentages': [int(percentage) for percentage in result['percentages'].split(';')], 'ballots': result['ballots']}
return results
|
[
"def poll(self):\n data = self.get_data()\n if data:\n self.add_metrics(data)",
"def _result(self, res):\n worker_info = self.workers[res.source]\n worker_info.pending_results -= 1\n self.pending_results -= 1\n assert worker_info.pending_results >= 0\n assert self.pending_results >= 0\n if worker_info.pending_results == 0:\n self.available.append(worker_info)\n self._log_available()\n self.results.put(res)\n\n # Try to process as many queued requests as possible.\n assert len(self.requests) == 0 or self.requests[0].target == ANY\n while len(self.requests) > 0:\n req = self.requests.popleft()\n if req.target == ANY:\n if len(self.available) > 0:\n self._request(req, force_processing=True)\n else:\n self.requests.appendleft(req)\n break\n else:\n self._request(req, force_processing=True)\n assert len(self.requests) == 0 or self.requests[0].target == ANY",
"def _load_previous_results(self):\n # Reload volumes into geometry\n self.prev_res[-1].transfer_volumes(self.model)\n\n # Store previous results in operator\n # Distribute reaction rates according to those tracked\n # on this process\n if comm.size != 1:\n prev_results = self.prev_res\n self.prev_res = Results()\n mat_indexes = _distribute(range(len(self.burnable_mats)))\n for res_obj in prev_results:\n new_res = res_obj.distribute(self.local_mats, mat_indexes)\n self.prev_res.append(new_res)",
"def __call__(self, *args: Any, **kwargs: Any) -> Any:\n # Load.\n if self.cache_file_exists():\n return self.load()\n # Compute.\n result = self.compute(*args, **kwargs)\n # Store.\n write_to_cache = self.write_to_cache\n if write_to_cache == \"auto\":\n compute_time = self.time_to_result(memoize=False)\n estimated_load_time = self.estimate_load_time(result)\n write_to_cache = estimated_load_time < compute_time\n logger.debug(\n f'{\"Going\" if write_to_cache else \"Not going\"} to cache {self}'\n f\" because estimated_load_time={estimated_load_time} \"\n f'{\"<\" if write_to_cache else \">=\"} '\n f\"compute_time={compute_time}\"\n )\n if write_to_cache:\n self.store(result)\n return result",
"def _maybe_update_curve_pools(self) -> Optional[list[gevent.Greenlet]]:\n if should_update_protocol_cache(GeneralCacheType.CURVE_LP_TOKENS) is False:\n return None\n\n return [self.greenlet_manager.spawn_and_track(\n after_seconds=None,\n task_name='Update curve pools cache',\n exception_is_error=True,\n method=self.update_curve_pools_cache,\n )]",
"def reload_result_to_cache():\n fib_objs = FibSeries.objects.all()\n with transaction.atomic():\n for obj in fib_objs:\n cache.set(obj.num_key, obj.result, timeout=CACHE_TTL)",
"def _fetch(self):\n if self._cache:\n return self._cache\n\n query_string = urlencode(self._filters)\n url = '%s?%s' % (self.base_url, query_string)\n self._cache = requests.get(url).json()\n return self._cache",
"def _ComputationWorkThread(self, withVideo):\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/individual'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/individual')\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/users'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/users')\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/videos'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/videos')\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/byAge'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/byAge')\n if not os.path.exists(PATH_TO_STATISTIC_RESULTS + '/total'):\n os.makedirs(PATH_TO_STATISTIC_RESULTS + '/total')\n vmax = 0\n step = 0.03\n\n self.userManager.StoreUserStats(PATH_TO_STATISTIC_RESULTS +\n '/total/users')\n\n pool = ProcessingPool()\n for resultId in self.resultsContainers:\n rc = self.resultsContainers[resultId]\n userId = rc.user.uid\n videoId = rc.videoId\n rc.user.ParseFormAnswers()\n sex = rc.user.sex\n age = rc.user.age\n\n self.resultsByUser[userId].append(\n rc)\n self.resultsByVideo[videoId].append(\n rc)\n self.resultsByAge[age - age % self.ageStep].append(\n rc)\n self.resultsBySex[sex].append(\n rc)\n # vmax = \\\n # max(vmax,\n # self.resultsById[resultId].positionMatrix.max()\n # )\n\n print('\\r\\033[2KProcess individual results')\n self.PrintProgress()\n def WorkerResults(step, rc):\n if rc.isNew:\n processedResult = rc.GetProcessedResult(step)\n processedResult.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/individual/{}'.format(rc.resultId),\n vmax=None\n )\n processedResult.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/individual/{}.txt'.format(rc.resultId)\n )\n return rc\n async_result = [\n pool.apipe(\n WorkerResults,\n step, self.resultsContainers[resultId]\n ) for resultId in self.resultsContainers\n ]\n for r in async_result:\n rc = r.get()\n self.resultsContainers[rc.resultId] = rc\n self.progressBar['value'] += 1\n self.PrintProgress()\n aggrUserResults = dict()\n aggrVideoResults = dict()\n aggrAgeResults = dict()\n print('\\r\\033[2KProcess results by user')\n self.PrintProgress()\n def WorkerUsers(resultsByUser, userId, step):\n aggSize = len(resultsByUser)\n if aggSize > 0:\n dumpPath = \\\n PATH_TO_STATISTIC_RESULTS+'/users/uid-{}.dump'.format(userId)\n ac = AggregateContainer.Load(dumpPath, step, aggSize)\n if ac.isNew:\n aggResult = sum(resultsByUser)\n aggResult.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/users/uid-{}'.format(userId),\n vmax=None\n )\n aggResult.StoreVision(\n PATH_TO_STATISTIC_RESULTS+'/users/uid-{}_vision'.format(userId)\n )\n aggResult.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/users/uid-{}.txt'.format(userId)\n )\n aggResult.StoreOrthodromicDistance(\n PATH_TO_STATISTIC_RESULTS +\n '/users/uid-{}_orthoDist.txt'.format(userId)\n )\n # vmax = max(vmax,\n # aggResult.aggPositionMatrix.max())\n Store(ac, dumpPath)\n return None\n async_result = [\n pool.apipe(\n WorkerUsers,\n self.resultsByUser[userId], userId, step\n ) for userId in self.resultsByUser\n ]\n for r in async_result:\n r.get()\n self.progressBar['value'] += 1\n self.PrintProgress()\n\n print('\\r\\033[2KProcess results by age')\n self.PrintProgress()\n def WorkerAge(resultsByAge, ageStep, age, step):\n aggSize = len(resultsByAge)\n if aggSize > 0:\n dumpPath = PATH_TO_STATISTIC_RESULTS+'/byAge/{}_{}.dump'.format(\n age, age + ageStep\n )\n ac = AggregateContainer.Load(dumpPath, step, aggSize)\n if ac.isNew:\n aggResult = sum(resultsByAge)\n aggResult.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/byAge/{}_{}'.format(\n age, age + ageStep),\n vmax=None\n )\n aggResult.StoreVision(\n PATH_TO_STATISTIC_RESULTS+'/byAge/{}_{}_vision'.format(\n age, age + ageStep)\n )\n aggResult.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/byAge/{}_{}.txt'.format(\n age, age + ageStep)\n )\n aggResult.StoreOrthodromicDistance(\n PATH_TO_STATISTIC_RESULTS +\n '/byAge/{}_{}_orthoDist.txt'.format(age, age + ageStep)\n )\n # vmax = max(vmax,\n # aggResult.aggPositionMatrix.max())\n Store(ac, dumpPath)\n return None\n\n async_result = [\n pool.apipe(\n WorkerAge,\n self.resultsByAge[age], self.ageStep, age, step\n ) for age in self.resultsByAge\n ]\n for r in async_result:\n r.get()\n self.progressBar['value'] += 1\n self.PrintProgress()\n\n print('\\r\\033[2KProcess results by video')\n self.PrintProgress()\n def WorkerVideo(resultsByVideo, videoId, step, withVideo):\n aggSize = len(resultsByVideo)\n if aggSize > 0:\n dumpPath = PATH_TO_STATISTIC_RESULTS+'/videos/{}.dump'.format(videoId)\n ac = AggregateContainer.Load(dumpPath, step, aggSize)\n if ac.isNew:\n aggResult = sum(resultsByVideo)\n aggResult.StoreVisionDistance(PATH_TO_STATISTIC_RESULTS + \\\n '/videos/'\n '{}_visionDistance'.format(\n videoId))\n # DEBUG\n aggResult.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/videos/{}'.format(videoId),\n vmax=None\n )\n aggResult.StoreVision(\n PATH_TO_STATISTIC_RESULTS+'/videos/{}_vision'.format(videoId)\n )\n aggResult.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/videos/{}.txt'.format(videoId)\n )\n aggResult.StoreOrthodromicDistance(\n PATH_TO_STATISTIC_RESULTS +\n '/videos/{}_orthoDist.txt'.format(videoId)\n )\n for segmentSize in [1, 2, 3]:\n aggResult.StoreAngularVelocityPerSegment(\n segmentSize=segmentSize,\n filePath=PATH_TO_STATISTIC_RESULTS+'/videos/' +\n '{}_angVelPerSegment_{}s.txt'.format(videoId,\n segmentSize)\n )\n if withVideo:\n # aggResult.WriteVideo(\n aggResult.WriteVideoVision(\n PATH_TO_STATISTIC_RESULTS+'/videos/{}.mkv'.format(videoId),\n fps=5,\n segmentSize=1/5,\n widthVideo=960,\n heightVideo=480,\n widthEqui=100,\n heightEqui=50,\n horizontalFoVAngle=110,\n verticalFoVAngle=90\n )\n # vmax = max(vmax,\n # aggResult.aggPositionMatrix.max())\n Store(ac, dumpPath)\n return None\n\n async_result = [\n pool.apipe(\n WorkerVideo,\n self.resultsByVideo[videoId], videoId, step, withVideo\n ) for videoId in self.resultsByVideo\n ]\n for r in async_result:\n r.get()\n self.progressBar['value'] += 1\n self.PrintProgress()\n\n print('\\r\\033[2KProcess results total')\n self.PrintProgress()\n aggSize = len(self.resultsContainers)\n dumpPath = PATH_TO_STATISTIC_RESULTS+'/total/{}.dump'.format('total')\n ac = AggregateContainer.Load(dumpPath, step, aggSize)\n if ac.isNew:\n aggTotal = sum(self.resultsContainers.values())\n aggTotal.StorePositions(\n PATH_TO_STATISTIC_RESULTS+'/total/{}'.format('total'),\n vmax=None\n )\n aggTotal.StoreVision(\n PATH_TO_STATISTIC_RESULTS+'/total/{}_vision'.format('total')\n )\n aggTotal.StoreAngularVelocity(\n PATH_TO_STATISTIC_RESULTS+'/total/{}.txt'.format('total')\n )\n aggTotal.StoreOrthodromicDistance(\n PATH_TO_STATISTIC_RESULTS+'/total/{}.txt'.format('orthoDist')\n )\n for segmentSize in [1, 2, 3]:\n aggTotal.StoreAngularVelocityPerSegment(\n segmentSize=segmentSize,\n filePath=PATH_TO_STATISTIC_RESULTS+'/total/' +\n '{}_angVelPerSegment_{}s.txt'.format('total',\n segmentSize),\n useRealTimestamp=False\n )\n Store(ac, dumpPath)\n self.progressBar['value'] += 1\n self.PrintProgress()\n\n # def worker(videoId, processedResult):\n # processedResult.WriteVideo(\n # 'results/statistics/videos/{}.mkv'.format(videoId),\n # fps=5,\n # segmentSize=1/5,\n # width=480,\n # height=480\n # )\n # return None\n # async_result = [\n # pool.apipe(\n # worker,\n # videoId, aggrVideoResults[videoId]\n # ) for videoId in aggrVideoResults\n # ]\n # for r in async_result:\n # r.get()\n # self.progressBar['value'] += 1\n # pool.close()\n # pool.join()\n # del self.workingThread\n\n listProcessedResult = list()\n for resultId in self.resultsContainers:\n rc = self.resultsContainers[resultId]\n videoId = resultId.split('_')[-1]\n userId = resultId[:-len(videoId)-1]\n listProcessedResult.append((userId, videoId,\n rc.GetProcessedResult(step)))\n ProcessedResult.StoreAngVelStats(listProcessedResult,\n PATH_TO_STATISTIC_RESULTS +\n '/total/stats')\n self.done = True\n self.progressBar = None\n self.workingThread = None\n if self.doneCallback is not None:\n self.doneCallback()",
"def test_caching(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n def circuit(x, c=None):\n qml.RX(x, wires=0)\n\n for i in range(c.val):\n qml.RX(x, wires=i)\n\n return qml.expval(qml.PauliZ(0))\n\n circuit = qml.QNode(circuit, dev, cache=True)\n\n # first evaluation\n circuit(0, c=0)\n # check structure\n assert len(circuit.queue) == 1\n\n # second evaluation\n circuit(0, c=1)\n # check structure\n assert len(circuit.queue) == 1",
"def _perform_wait_any(self):\n\n if len(self._results_waiting) > 0:\n return self._extract_result()\n\n all_results = []\n for client in itervalues(self._dispatcher_name_to_client):\n all_results.extend(client.get_results_all_queues())\n\n if len(all_results) > 0:\n\n for task in all_results:\n self._results_waiting.append(task)\n else:\n\n # if the queues are all empty, wait some time for things to\n # fill up. constantly pinging dispatch servers wastes their\n # time, and inhibits task server communication. the good\n # thing about queues_to_check is that it simultaneously\n # grabs information for any queues with results => one\n # client query can yield many results.\n\n # TBD: We really need to parameterize the time-out value,\n # but it isn't clear how to propagate this though the\n # solver manager interface layers.\n time.sleep(0.01)",
"def polling_results(self, script):\n if self.ret.get(\"status\", None) in [\"ok\", \"up\"]:\n results = self.calculation_results(script)\n\n else:\n results = False\n results_is_poll = self.is_polling(results)\n return results_is_poll",
"def _evaluate_parallel(self, system, state, t, dt):\n # Set the arguments to the stepper calls\n self._pool.set_state(state)\n self._pool.set_args('all', (t, dt))\n\n # Notify the threads to process\n self._pool.notify()\n\n # Access the pool data, blocking until synchronized\n self._pool.synchronize()\n\n # Combine the partially extrapolated thread worker results\n return sum([output.read() for output in self._outputs])",
"def fetch_and_accumulate(self):\n self.exercise.fetch_operations()\n for operation in self.exercise.operations:\n self.subject_accumulator.accumulate(operation)\n self.option_accumulator.accumulate(operation)",
"def get_results(self, check):\n self.lock.acquire()\n res = self.results[check]\n self.lock.release()\n return res",
"def _cached_call(self, args, kwargs, shelving=False):\n func_id, args_id = self._get_output_identifiers(*args, **kwargs)\n metadata = None\n msg = None\n\n # Whether or not the memorized function must be called\n must_call = False\n\n if self._verbose >= 20:\n logging.basicConfig(level=logging.INFO)\n _, name = get_func_name(self.func)\n location = self.store_backend.get_cached_func_info([func_id])[\n 'location']\n _, signature = format_signature(self.func, *args, **kwargs)\n\n self.info(\n dedent(\n f\"\"\"\n Querying {name} with signature\n {signature}.\n\n (argument hash {args_id})\n\n The store location is {location}.\n \"\"\"\n )\n )\n\n # Compare the function code with the previous to see if the\n # function code has changed and check if the results are present in\n # the cache.\n if self._is_in_cache_and_valid([func_id, args_id]):\n try:\n t0 = time.time()\n if self._verbose:\n msg = _format_load_msg(func_id, args_id,\n timestamp=self.timestamp,\n metadata=metadata)\n\n if not shelving:\n # When shelving, we do not need to load the output\n out = self.store_backend.load_item(\n [func_id, args_id],\n msg=msg,\n verbose=self._verbose)\n else:\n out = None\n\n if self._verbose > 4:\n t = time.time() - t0\n _, name = get_func_name(self.func)\n msg = '%s cache loaded - %s' % (name, format_time(t))\n print(max(0, (80 - len(msg))) * '_' + msg)\n except Exception:\n # XXX: Should use an exception logger\n _, signature = format_signature(self.func, *args, **kwargs)\n self.warn('Exception while loading results for '\n '{}\\n {}'.format(signature, traceback.format_exc()))\n\n must_call = True\n else:\n if self._verbose > 10:\n _, name = get_func_name(self.func)\n self.warn('Computing func {0}, argument hash {1} '\n 'in location {2}'\n .format(name, args_id,\n self.store_backend.\n get_cached_func_info([func_id])['location']))\n must_call = True\n\n if must_call:\n out, metadata = self.call(*args, **kwargs)\n if self.mmap_mode is not None:\n # Memmap the output at the first call to be consistent with\n # later calls\n if self._verbose:\n msg = _format_load_msg(func_id, args_id,\n timestamp=self.timestamp,\n metadata=metadata)\n out = self.store_backend.load_item([func_id, args_id], msg=msg,\n verbose=self._verbose)\n\n return (out, args_id, metadata)",
"def contract(self, jobs, result):\r\n for j in jobs:\r\n WorkerPool.put(self, j)\r\n\r\n r = []\r\n for i in xrange(len(jobs)):\r\n r.append(result.get())\r\n\r\n return r",
"def evaluate(self):\n self._evaluation = self._condition()\n if self.poll():\n self._target(self)\n if self._count >= 0:\n self._count -= 1\n self._previous_evaluation = self._evaluation",
"def result_group_cached(group_id, failures=False, wait=0, count=None, broker=None):\n if not broker:\n broker = get_broker()\n start = time()\n if count:\n while True:\n if (\n count_group_cached(group_id) == count\n or wait\n and (time() - start) * 1000 >= wait > 0\n ):\n break\n sleep(0.01)\n while True:\n group_list = broker.cache.get(f\"{broker.list_key}:{group_id}:keys\")\n if group_list:\n result_list = []\n for task_key in group_list:\n task = SignedPackage.loads(broker.cache.get(task_key))\n if task[\"success\"] or failures:\n result_list.append(task[\"result\"])\n return result_list\n if (time() - start) * 1000 >= wait >= 0:\n break\n sleep(0.01)",
"def _call(self, merged_args, refresh=False):\n\n cache_key = self._build_cache_key(*merged_args[1:])\n\n if refresh or not self.is_using_cache():\n output = None\n else:\n output = self._cache_get(cache_key)\n\n # First viewlet execution, forced refresh or cache timeout\n if output is None:\n output = self.viewlet_func(*merged_args)\n if self.is_using_cache():\n self._cache_set(cache_key, output)\n\n return output"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parses alternates, like 'a{b,c}d{e,f}' in a pkgdesc str. Returns ['a', ('b','c'), 'd', ('e','f')] for the given example.
|
def parse_alternates(pkgdesc):
assert(isinstance(pkgdesc, str))
parsed_pkgdesc = []
while len(pkgdesc) > 0:
i = pkgdesc.find('{')
if i == -1:
parsed_pkgdesc.append(pkgdesc)
break
parsed_pkgdesc.append(pkgdesc[:i])
pkgdesc = pkgdesc[i+1:]
i = pkgdesc.find('}')
parsed_pkgdesc.append(tuple(pkgdesc[:i].split(',')))
pkgdesc = pkgdesc[i+1:]
return parsed_pkgdesc
|
[
"def gen_alternates_recurse(pkgdesc):\n assert(isinstance(pkgdesc, list))\n if len(pkgdesc) <= 1:\n yield ''.join(pkgdesc)\n else:\n prefix = pkgdesc[0]\n alternates = pkgdesc[1]\n pkgdesc = pkgdesc[2:]\n for alt in alternates:\n for x in gen_alternates_recurse(pkgdesc):\n yield prefix + alt + x",
"def parse_alt_text(alt):\n match = re.match(r'([^\\{]*)(\\{(.*)\\})$', alt)\n if match:\n alt = match.group(1)\n args = parse_arglist(match.group(3))\n else:\n args = {}\n\n return alt, args",
"def constructDescription(self, pattern):\n\t\tresult = []\n\t\tfor word in pattern:\n\t\t\tdesc = '';\n\t\t\tif len(word) > 1:\n\t\t\t\tdesc += 'mixed '\n\t\t\t\tindex = 0\n\t\t\t\tfor chars in word:\n\t\t\t\t\tif isinstance(chars, int):\n\t\t\t\t\t\tdesc += 'numeric (len. ' + str(chars) + ')'\n\t\t\t\t\telif chars in self.__languages.keys():\n\t\t\t\t\t\tdesc += self.__languages[chars]\n\t\t\t\t\telse:\n\t\t\t\t\t\tdesc += 'symbol (' + chars + ')'\n\t\t\t\t\tif index < len(word) - 2:\n\t\t\t\t\t\tdesc += ', '\n\t\t\t\t\telif index == len(word) - 2:\n\t\t\t\t\t\tdesc += ' and '\n\t\t\t\t\telse:\n\t\t\t\t\t\tdesc += ' characters'\n\t\t\t\t\tindex += 1\n\t\t\telse:\n\t\t\t\tchars = word[0]\n\t\t\t\tif isinstance(chars, int):\n\t\t\t\t\tdesc += 'length ' + str(chars) + ' numeric'\n\t\t\t\telif chars in self.__languages.keys():\n\t\t\t\t\tdesc += self.__languages[chars] + ' word(s)'\n\t\t\t\telse:\n\t\t\t\t\tdesc += 'symbol(s): ' + chars\n\t\t\tresult.append(desc)\n\t\treturn result",
"def gen_alternates(pkgdesc):\n pkgdesc = parse_alternates(pkgdesc)\n for x in gen_alternates_recurse(pkgdesc):\n yield x",
"def parse_pkgdesc(pkgdesc):\n assert(isinstance(pkgdesc, str))\n # Find version comparisions.\n split_points = [pkgdesc.find(c) for c in '<>']\n split_points = [i for i in split_points if i != -1]\n split_points.sort()\n # Split the str.\n parsed_pkgdesc = []\n j = 0\n for i in split_points:\n parsed_pkgdesc.append(pkgdesc[j:i])\n j = i\n parsed_pkgdesc.append(pkgdesc[j:])\n \n if len(parsed_pkgdesc) == 1:\n # Do not use Dewey-style version comparision. Use glob matching.\n m = re.match('^([A-Za-z0-9_-]+)-([][!a-z0-9*?.-]+?)$', pkgdesc)\n if m:\n return m.groups()\n # Version pattern not found. Match any version.\n return (pkgdesc, '*')\n \n return tuple(parsed_pkgdesc)",
"def parse_pattern(pattern):\n pattern = strip_optional(pattern)\n parts = outer_split(pattern, sep='()')\n if len(parts) % 2 == 1 and not parts[-1]:\n parts = parts[:-1]\n names = [RE_SPLIT.split(p)[1] for p in parts[1::2]]\n parts[1::2] = ['%%(%s)s' % p for p in names]\n return ''.join(parts), names",
"def _split_basic(string):\r\n tuples = []\r\n for word in string.split(','):\r\n # Attempt to split on colon\r\n parts = word.split(':', 2)\r\n key, modifier_fn, explode = parts[0], _identity, False\r\n if len(parts) > 1:\r\n modifier_fn = functools.partial(\r\n _truncate, num_chars=int(parts[1]))\r\n if word[len(word) - 1] == '*':\r\n key = word[:len(word) - 1]\r\n explode = True\r\n tuples.append((key, modifier_fn, explode))\r\n return tuples",
"def _get_tips_from_string(tips_str):\n return [tip.strip() for tip in tips_str.split('*') if tip]",
"def parse_description(description):\n return \"\\n\".join(\n [\n a for a in description.split(\"\\n\")\n if (\"figure::\" not in a) and (\":alt:\" not in a)\n ])",
"def generate_bracketed_list(items): # TODO: rename?\n\t_list = []\n\tfor item in items:\n\t\tif \" \" in item:\n\t\t\titem = \"[[%s]]\" % item\n\t\t_list.append(item)\n\treturn \" \".join(_list)",
"def helper_function_triangle_brackets(triangle_brackets_email):\n\n formatted_triangle_bracket_list = []\n formatted_triangle_bracket_list.append(triangle_brackets_email[1].strip())\n formatted_triangle_bracket_list.append(triangle_brackets_email[2])\n formatted_triangle_bracket_list.append(triangle_brackets_email[0])\n return formatted_triangle_bracket_list",
"def extract_templates_and_params_regex_simple(text: str):\n result = []\n\n for match in NESTED_TEMPLATE_REGEX.finditer(text):\n name, params = match[1], match[2]\n\n # Special case for {{a}}\n if params is None:\n params = []\n else:\n params = params.split('|')\n\n numbered_param_identifiers = iter(range(1, len(params) + 1))\n\n params = OrderedDict(\n arg.split('=', 1)\n if '=' in arg\n else (str(next(numbered_param_identifiers)), arg)\n for arg in params)\n\n result.append((name, params))\n\n return result",
"def get_for_pair_pattern(self, pattern):",
"def parse_fancy_diary(str_: str) -> Tuple[DayRecord, ...]:\n retval: List[DayRecord] = []\n # split string on empty lines to get a list of strings representing days\n days = re.split(r\"\\n[ \\t]*\\n\", str_)\n for day in days:\n # split into header and body (body may be empty)\n # note that splitting into days may have removed terminating new line\n # if there is a holiday but no tags or entries.\n split = re.split(r\"\\n={5,}\\n?\", day, 1)\n date, holidays = _parse_header(split[0])\n # process the body if it exists\n if len(split) == 2:\n tags, entries = _parse_body(split[1])\n retval.append(DayRecord(date, holidays, tags, entries))\n else:\n retval.append(DayRecord(date, holidays, (), ()))\n return tuple(retval)",
"def scan(string):\n words = string.split(' ')\n result = []\n for word in words:\n if word in directions:\n result.append(('direction', word))\n elif word in verbs:\n result.append(('verb', word))\n elif word in stops:\n result.append(('stop', word))\n elif word in nouns:\n result.append(('noun', word))\n elif re.match(r'\\d', word):\n result.append(('number', word))\n else:\n result.append(('error', word))\n \n return result",
"def parse_sample(sample):\n return list(sample.split(\",\"))",
"def _alternative_name_guesses(self, name):\n return [\n name,\n re.sub(r\"([^\\[]+)\\[The\\]\", r\"The \\1\", name).strip(),\n re.sub(r\"([^\\[]+)\\[The\\]\", r\"\\1\", name).strip(),\n re.sub(r\"^The(.*)\", r\"\\1\", name).strip(),\n re.sub(r\"\\([^\\)]+\\)\", r\"\", name).strip(),\n re.sub(r\"United Kingdom\", r\"UK\", name).strip(),\n ]",
"def collect_string_fields(format_string) -> Iterable[Optional[str]]:\n formatter = string.Formatter()\n try:\n parseiterator = formatter.parse(format_string)\n for result in parseiterator:\n if all(item is None for item in result[1:]):\n # not a replacement format\n continue\n name = result[1]\n nested = result[2]\n yield name\n if nested:\n yield from collect_string_fields(nested)\n except ValueError as exc:\n # Probably the format string is invalid.\n if exc.args[0].startswith(\"cannot switch from manual\"):\n # On Jython, parsing a string with both manual\n # and automatic positions will fail with a ValueError,\n # while on CPython it will simply return the fields,\n # the validation being done in the interpreter (?).\n # We're just returning two mixed fields in order\n # to trigger the format-combined-specification check.\n yield \"\"\n yield \"1\"\n return\n raise IncompleteFormatString(format_string)",
"def separate_list_types(s: str) -> List[str]:\n\n types = split_high_level(\n s,\n ',',\n [\"(\", \"[\", \"{\"],\n [\")\", \"]\", \"}\"]\n )\n return types"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Recurse through the alreadyparsed pkgdesc dict, generating alternates.
|
def gen_alternates_recurse(pkgdesc):
assert(isinstance(pkgdesc, list))
if len(pkgdesc) <= 1:
yield ''.join(pkgdesc)
else:
prefix = pkgdesc[0]
alternates = pkgdesc[1]
pkgdesc = pkgdesc[2:]
for alt in alternates:
for x in gen_alternates_recurse(pkgdesc):
yield prefix + alt + x
|
[
"def gen_alternates(pkgdesc):\n pkgdesc = parse_alternates(pkgdesc)\n for x in gen_alternates_recurse(pkgdesc):\n yield x",
"def parse_alternates(pkgdesc):\n assert(isinstance(pkgdesc, str))\n parsed_pkgdesc = []\n while len(pkgdesc) > 0:\n i = pkgdesc.find('{')\n if i == -1:\n parsed_pkgdesc.append(pkgdesc)\n break\n parsed_pkgdesc.append(pkgdesc[:i])\n pkgdesc = pkgdesc[i+1:]\n i = pkgdesc.find('}')\n parsed_pkgdesc.append(tuple(pkgdesc[:i].split(',')))\n pkgdesc = pkgdesc[i+1:]\n return parsed_pkgdesc",
"def parse_pkgdesc(pkgdesc):\n assert(isinstance(pkgdesc, str))\n # Find version comparisions.\n split_points = [pkgdesc.find(c) for c in '<>']\n split_points = [i for i in split_points if i != -1]\n split_points.sort()\n # Split the str.\n parsed_pkgdesc = []\n j = 0\n for i in split_points:\n parsed_pkgdesc.append(pkgdesc[j:i])\n j = i\n parsed_pkgdesc.append(pkgdesc[j:])\n \n if len(parsed_pkgdesc) == 1:\n # Do not use Dewey-style version comparision. Use glob matching.\n m = re.match('^([A-Za-z0-9_-]+)-([][!a-z0-9*?.-]+?)$', pkgdesc)\n if m:\n return m.groups()\n # Version pattern not found. Match any version.\n return (pkgdesc, '*')\n \n return tuple(parsed_pkgdesc)",
"def vuln_alternate_iterator(filename):\n for (pkgdesc, vulntype, vulnurl) in vuln_iterator(filename):\n for x in gen_alternates(pkgdesc):\n yield (x, pkgdesc, vulntype, vulnurl)",
"def gen_task_desc(**kwargs):\n logger = logging.getLogger(__name__)\n\n suppressempty = kwargs[\"suppressempty\"]\n blend = kwargs[\"blend_info\"][\"blend\"]\n tasksprefix = kwargs[\"blend_info\"][\"tasksprefix\"]\n blend_dependencies = kwargs[\"blend_dependencies\"]\n\n\n task_desc_path = \"taskdesc-sec.template\"\n logger.debug(\"Opening file {0} to write\".format(task_desc_path))\n with open(task_desc_path,'w') as fout:\n\n for task in sorted(blend_dependencies.keys()): \n\n if blend_dependencies[task]['Leaf'] == 'false':\n continue\n\n if suppressempty and blend_dependencies[task][\"haspackages\"] == 0:\n if blend_dependencies[task]['test_always_lang']:\n logger.debug(\"Print empty task {0} because Test-always-lang is set\\n\".format(task))\n else:\n logger.debug(\"The metapackage {2} will not be created because {0} dependant are in the pool and suppressempty was set {1}\\n\".format(blend_dependencies[task][\"haspackages\"], suppressempty, task))\n continue\n\n fout.write(\"Task: {0}-{1}\\n\".format(tasksprefix, task))\n fout.write(\"Section: {0}\\n\".format(blend));\n fout.write(\"Description: {0}\\n\".format(blend_dependencies[task][\"description\"]))\n fout.write(\"{0}\".format(blend_dependencies[task][\"long_description\"])) #Already contains a newline\n fout.write(\"Relevance: 10\\n\")\n\n if blend_dependencies[task][\"Enhances\"]:\n fout.write(\"Enhances: {0}\\n\".format(blend_dependencies[task][\"Enhances\"]))\n\n if blend_dependencies[task][\"metapackage\"]:\n #No use listing a metapackage as a key package, if no metapackage exist.\n fout.write(\"Key: \\n\");\n fout.write(\" {0}-{1}\\n\".format(tasksprefix, task))\n\n fout.write(\"Packages: list\\n \")\n for header in [\"Depends\", \"Recommends\"]:\n if not blend_dependencies[task][header]:\n continue \n fout.write(\"{0}\".format(\"\\n \".join(sorted(blend_dependencies[task][header]))))\n fout.write(\"\\n\")\n\n fout.write(\"\\n\")",
"def dpkgPackages(cls, packager):\n # ask {dpkg} for my options\n alternatives = sorted(packager.alternatives(group=cls), reverse=True)\n # the supported versions\n versions = Default,\n # go through the versions\n for version in versions:\n # scan through the alternatives\n for name in alternatives:\n # if it is match\n if name.startswith(version.flavor):\n # build an instance and return it\n yield version(name=name)\n\n # out of ideas\n return",
"def add_missing_descriptions(self, desclist):\n if self.showinfo:\n self.page.dialog_data = {'descdict': dict(desclist), 'cmddict': self.cmddict}\n if show_dialog(self.page, DcCompleteDialog):\n desclist = list(self.page.dialog_data.items())\n for command, description in desclist:\n if command not in self.cmddict or not self.cmddict[command]:\n self.cmddict[command] = description\n self.desclist = desclist\n return desclist",
"def get_pkg_description(pkg):\n\n if hasattr(pkg, 'description'):\n return pkg.description or ''\n if hasattr(pkg, 'installed'):\n installedpkg = pkg.installed\n if installedpkg:\n # Use installed version description\n return installedpkg.description or ''\n\n # Get first description found in all versions.\n desc = ''\n for ver in pkg.versions:\n if ver.description:\n desc = ver.description\n break\n return desc\n\n return ''",
"def assert_all_get_desc_work(self):\n for info in self.kb.get_all_findings():\n if isinstance(info, InfoSet):\n info.get_desc()",
"def get_package_info(pkg_name):\n global package_info\n if pkg_name in package_info:\n return package_info.get(pkg_name)\n else:\n try:\n yaml_stream = check_output(['apt-cache','show',pkg_name])\n except:\n print \"Unable to find info for package: '%s'\" % pkg_name\n package_info[pkg_name] = {}\n return {}\n d = Deb822(yaml_stream)\n package_info[pkg_name] = d\n return d",
"def format_desc(self):\n return '\\nDescription:\\n{}\\n'.format(\n C(\n FormatBlock(get_pkg_description(self.package)).format(\n width=76,\n newlines=True,\n prepend=' '\n ),\n fore='green'\n )\n )",
"def dict_2_list(desc):\n\n # this function is kinda pointless if you don't give it\n # a dictionary\n assert isinstance(\n desc, dict), \"Stop right there, only dictionaries are allowed in these parts\"\n\n # generates an initial list from the coefficients\n desc_ls = list(desc['coefficients' + '-' + str(cd)])\n desc_ls.append(desc['name' +'-' +str(cd)])\n # determines whether or not there are peaks in the datasent\n if 'peakSIGMA'+'-' +str(cd) in desc.keys():\n # iterates over the number of peaks\n for i in np.arange(len(desc['peakSIGMA' +'-' +str(cd)])):\n # appends peak descriptors to the list in order of peak number\n desc_ls.append(desc['peakLocation(V)' +'-' + str(cd)][i])\n desc_ls.append(desc['peakHeight(dQdV)' +'-' + str(cd)][i])\n desc_ls.append(desc['peakSIGMA'+'-' + str(cd)][i])\n else:\n pass\n #print('here is the desc_ls with peakloc in the dict_2_list definition: ')\n #print(desc_ls)\n return desc_ls",
"def __iter_entries(self, last=False, ordered=False, pubs=EmptyI):\n\n self.load()\n if ordered:\n stems = self.pkg_names(pubs=pubs)\n else:\n stems = (\n (pub, stem)\n for pub in self.publishers(pubs=pubs)\n for stem in self.__data[pub]\n )\n\n if last:\n return (\n (pub, stem, self.__data[pub][stem][-1])\n for pub, stem in stems\n )\n\n if ordered:\n return (\n (pub, stem, entry)\n for pub, stem in stems\n for entry in reversed(self.__data[pub][stem])\n )\n return (\n (pub, stem, entry)\n for pub, stem in stems\n for entry in self.__data[pub][stem]\n )",
"def load_multiple_versions(app_path,package):\n for sublabel in package.__all__:\n submodule = get_module('%s.%s' % (package.__name__,sublabel))\n load_descriptors(app_path,submodule)",
"def parse_package(string_package):\n res = {'dsc': string_package}\n current_field = None\n for line in string_package.splitlines():\n if not line.startswith(' '):\n field, contents = line.split(\":\", 1)\n current_field = field\n contents = contents.strip()\n if field in [\"Package\", \"Source\"]:\n res[\"Source\"] = contents\n # Single line string\n elif field in [\"Format\", \"Version\", \"Maintainer\",\n \"Homepage\", \"Vcs-Browser\", \"Testsuite\",\n \"Standards-Version\", \"Section\", \"Priority\",\n \"Directory\"]:\n res[field] = contents\n elif field in [\"Vcs-Arch\", \"Vcs-Bzr\", \"Vcs-Cvs\", \"Vcs-Darcs\",\n \"Vcs-Git\", \"Vcs-Hg\", \"Vcs-Mtn\", \"Vcs-Svn\"]:\n res[field] = contents\n res[\"Vcs\"] = contents\n # Single line comma separated\n elif field in [\"Binary\", \"Uploaders\", \"Testsuite-Triggers\",\n \"Build-Depends\", \"Build-Depends-Indep\",\n \"Build-Depends-Arch\", \"Build-Conflicts\",\n \"Build-Conflicts-Indep\", \"Build-Conflicts-Arch\"]:\n res[field] = list(map(lambda x: x.strip(),\n contents.split(\",\")))\n # Single line space separated\n elif field in [\"Architecture\"]:\n res[field] = list(map(lambda x: x.strip(),\n contents.split(\" \")))\n elif field in [\"Dgit\"]:\n res[field] = contents.split(\" \")[0]\n elif field in [\"Package-List\", \"Checksums-Sha1\",\n \"Checksums-Sha256\", \"Files\"]:\n res[field] = []\n else:\n contents = line.strip()\n if current_field == \"Package-List\":\n contents = contents.split(\" \")\n contents = {\n \"name\": contents[0],\n \"type\": contents[1],\n \"section\": contents[2],\n \"priority\": contents[3],\n \"arch\": [] if len(contents) < 5 else\n contents[4].split(\"=\")[1].split(\",\")}\n res[current_field].append(contents)\n elif current_field in [\"Files\", \"Checksums-Sha1\",\n \"Checksums-Sha256\"]:\n contents = contents.split(\" \")\n contents = {\n \"checksum\": contents[0],\n \"size\": contents[1],\n \"filename\": contents[2]}\n res[current_field].append(contents)\n return res",
"def vuln_pkg_matcher_iterator(filename, pkg_list, unmatched_callback=None):\n assert(isinstance(pkg_list, list))\n for (pkgdesc, orig_pkgdesc, vulntype, vulnurl) in vuln_alternate_iterator(filename):\n pkgdesc = parse_pkgdesc(pkgdesc)\n pkgnames = fnmatch.filter(pkg_list, pkgname_filter(pkgdesc[0]))\n for pkgname in pkgnames:\n yield (pkgname, pkgdesc[1:], orig_pkgdesc, vulntype, vulnurl)\n if len(pkgnames) == 0 and unmatched_callback != None:\n unmatched_callback((pkgdesc, orig_pkgdesc, vulntype, vulnurl))",
"def get_package_info(package_name):\n log_helper = logging_helper.logging_helper.Logger()\n log_helper.logger.debug(\"Getting additional package info for %s\" % package_name)\n command = \"smart info \" + package_name\n output = shell_ops.run_command(command)\n description = ''\n version = ''\n if output.count('Name:') > 1:\n # Multiple versions available. Narrow down smart info scope to get accurate info for the current version\n response = shell_ops.run_command(\"smart query --installed \" + package_name + \" --show-format=$version\")\n version = response[response.index('[100%]') + 6:response.index('@')].replace('\\n', '')\n if 'not' in version: # Workaround for \"(not installed)\" case\n version = 'Unknown'\n\n output = output[output.rindex(version):]\n\n if 'Name' in output:\n if output.index('Name') > output.index('Description'):\n # Additional entry after description\n description = output[output.rindex(\"Description:\") + 14: output.index(\"Name\")].replace('\\n', '').strip()\n else:\n description = output[output.rindex(\"Description:\") + 14:].replace('\\n', '').strip()\n else:\n version = output[output.index(\"Version:\") + 9: output.index(\"Priority:\")].replace('\\n', '')\n version = version[:version.index('@')]\n if 'not' in version: # Workaround for \"(not installed)\" case\n version = 'Unknown'\n description = output[output.rindex(\"Description:\") + 14:].replace('\\n', '').strip()\n\n url = output[output.index(\"Reference URLs:\") + 16: output.index(\"Flags:\")].replace('\\n', '')\n my_license = output[output.index(\"License:\") + 9: output.index(\"Installed Size:\")].replace('\\n', '')\n size = output[output.index(\"Installed Size:\") + 16: output.index(\"Reference URLs:\")].replace('\\n', '')\n group = output[output.index(\"Group:\") + 7: output.index(\"License:\")].replace('\\n', '')\n summary = output[output.index(\"Summary:\") + 9: output.index(\"Description:\")].replace('\\r\\n', '')\n\n # escape special JSON charater (\") if any in description and summary\n summary = summary.replace('\"', '\\\\\"')\n description = description.replace('\"', '\\\\\"')\n\n package = {\n 'url': url,\n 'license': my_license,\n 'size': size,\n 'description': description,\n 'summary': summary,\n 'group': group,\n 'version': version\n }\n log_helper.logger.debug(\"Returning package info: \" + str(package))\n return json.dumps(package)",
"def load(cls, setup: str = \"default\") -> dict():\n\n # TODO is this necessary? idk it just feels like it will be easier to change later\n if setup != \"default\":\n print(\"ERROR: non-default setup for descs not yet implemented\")\n # TODO actual exception\n return False\n\n if DescDataHandler.descs_loaded:\n print(\"ERROR: descs already loaded\")\n # TODO exception\n return False\n\n # begin desc definition --------------------\n # descs = Dict[str, Desc] FIXME ahhhhhhhh return type\n descs = dict()\n\n def include_desc(key: str, text: str, has_condition: bool = False):\n \"\"\"\n This is a special version of include to save me time and avoid repeating the same information. This version\n handles instantiating the desc itself.\n\n :param key: the key for the new desc\n :param text: the text for the new desc\n :param has_condition: whether the text has condition(s) in it. Defaults to False\n :return:\n \"\"\"\n\n if key in descs:\n print(\"ERROR: attempt to add room with duplicate key\", key)\n # TODO actually throw exception or something\n else:\n descs[key] = Desc(key, text, has_condition)\n\n # end include method\n\n # utility / basic / critical Descs\n include_desc(\"blank_desc\", \"\")\n include_desc(\"default_main_text_desc\", \"This is the default main text.\")\n include_desc(\"default_event_text_desc\", \"\")\n include_desc(\"default_prompt_text_desc\", \"What will you do?\")\n include_desc(\"didnt_understand_desc\", \"I didn't understand that.\")\n\n include_desc(\"initial_main_text_desc\", \"This the initial main text.\")\n include_desc(\"initial_event_text_desc\", \"\")\n include_desc(\"initial_prompt_text_desc\", \"\")\n\n # \"specific\" descs - rooms, events, etc FOR TESTS --------------------\n include_desc(\"test_rm_1_desc\", \"This is the main text for test room 1.\")\n include_desc(\"test_rm_2_desc\", \"This is the main text for test room 2.\")\n include_desc(\"test_rm_3_desc\", \"This is the main text for test room 3.\")\n\n include_desc(\"test_ev_1_to_2_desc\", \"This is the event text for moving from test room 1 to test room 2.\")\n include_desc(\"test_ev_2_to_3_desc\", \"This is the event text for moving from test room 2 to test room 3.\")\n include_desc(\"test_ev_3_to_1_desc\", \"This is the event text for moving from test room 3 to test room 1.\")\n include_desc(\"test_ev_dance_desc\",\n \"You do a jig 'cuz your code works. [current_location == test_rm_3 :Yay!|No, you're in <current_location>]\", True)\n include_desc(\"test_look_at_toggle_desc\",\n \"Value of toggle: <test_toggle>, [test_toggle == True:cool!|lame.]\", True)\n include_desc(\"test_rm_3_toggle_desc\",\n \"room 3 AND toggle? [current_location == test_rm_3,test_toggle == True :Both are true. ]Nice.\"\n , True)\n\n DescDataHandler.descs_loaded = True\n return descs",
"def parse_description_into_dictionary(description):\n out_dict = {}\n pieces = description.split('--')\n for piece in pieces:\n content = piece.split('__')\n if len(content) == 2: #Not everything in the description meets the dictionary format.\n out_dict.update({content[0]: content[1]})\n return out_dict"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Yields all possible alternates from the pkgdesc str.
|
def gen_alternates(pkgdesc):
pkgdesc = parse_alternates(pkgdesc)
for x in gen_alternates_recurse(pkgdesc):
yield x
|
[
"def gen_alternates_recurse(pkgdesc):\n assert(isinstance(pkgdesc, list))\n if len(pkgdesc) <= 1:\n yield ''.join(pkgdesc)\n else:\n prefix = pkgdesc[0]\n alternates = pkgdesc[1]\n pkgdesc = pkgdesc[2:]\n for alt in alternates:\n for x in gen_alternates_recurse(pkgdesc):\n yield prefix + alt + x",
"def parse_alternates(pkgdesc):\n assert(isinstance(pkgdesc, str))\n parsed_pkgdesc = []\n while len(pkgdesc) > 0:\n i = pkgdesc.find('{')\n if i == -1:\n parsed_pkgdesc.append(pkgdesc)\n break\n parsed_pkgdesc.append(pkgdesc[:i])\n pkgdesc = pkgdesc[i+1:]\n i = pkgdesc.find('}')\n parsed_pkgdesc.append(tuple(pkgdesc[:i].split(',')))\n pkgdesc = pkgdesc[i+1:]\n return parsed_pkgdesc",
"def vuln_alternate_iterator(filename):\n for (pkgdesc, vulntype, vulnurl) in vuln_iterator(filename):\n for x in gen_alternates(pkgdesc):\n yield (x, pkgdesc, vulntype, vulnurl)",
"def parse_pkgdesc(pkgdesc):\n assert(isinstance(pkgdesc, str))\n # Find version comparisions.\n split_points = [pkgdesc.find(c) for c in '<>']\n split_points = [i for i in split_points if i != -1]\n split_points.sort()\n # Split the str.\n parsed_pkgdesc = []\n j = 0\n for i in split_points:\n parsed_pkgdesc.append(pkgdesc[j:i])\n j = i\n parsed_pkgdesc.append(pkgdesc[j:])\n \n if len(parsed_pkgdesc) == 1:\n # Do not use Dewey-style version comparision. Use glob matching.\n m = re.match('^([A-Za-z0-9_-]+)-([][!a-z0-9*?.-]+?)$', pkgdesc)\n if m:\n return m.groups()\n # Version pattern not found. Match any version.\n return (pkgdesc, '*')\n \n return tuple(parsed_pkgdesc)",
"def constructDescription(self, pattern):\n\t\tresult = []\n\t\tfor word in pattern:\n\t\t\tdesc = '';\n\t\t\tif len(word) > 1:\n\t\t\t\tdesc += 'mixed '\n\t\t\t\tindex = 0\n\t\t\t\tfor chars in word:\n\t\t\t\t\tif isinstance(chars, int):\n\t\t\t\t\t\tdesc += 'numeric (len. ' + str(chars) + ')'\n\t\t\t\t\telif chars in self.__languages.keys():\n\t\t\t\t\t\tdesc += self.__languages[chars]\n\t\t\t\t\telse:\n\t\t\t\t\t\tdesc += 'symbol (' + chars + ')'\n\t\t\t\t\tif index < len(word) - 2:\n\t\t\t\t\t\tdesc += ', '\n\t\t\t\t\telif index == len(word) - 2:\n\t\t\t\t\t\tdesc += ' and '\n\t\t\t\t\telse:\n\t\t\t\t\t\tdesc += ' characters'\n\t\t\t\t\tindex += 1\n\t\t\telse:\n\t\t\t\tchars = word[0]\n\t\t\t\tif isinstance(chars, int):\n\t\t\t\t\tdesc += 'length ' + str(chars) + ' numeric'\n\t\t\t\telif chars in self.__languages.keys():\n\t\t\t\t\tdesc += self.__languages[chars] + ' word(s)'\n\t\t\t\telse:\n\t\t\t\t\tdesc += 'symbol(s): ' + chars\n\t\t\tresult.append(desc)\n\t\treturn result",
"def dpkgPackages(cls, packager):\n # ask {dpkg} for my options\n alternatives = sorted(packager.alternatives(group=cls), reverse=True)\n # the supported versions\n versions = Default,\n # go through the versions\n for version in versions:\n # scan through the alternatives\n for name in alternatives:\n # if it is match\n if name.startswith(version.flavor):\n # build an instance and return it\n yield version(name=name)\n\n # out of ideas\n return",
"def parse_description(description):\n return \"\\n\".join(\n [\n a for a in description.split(\"\\n\")\n if (\"figure::\" not in a) and (\":alt:\" not in a)\n ])",
"def vuln_pkg_matcher_iterator(filename, pkg_list, unmatched_callback=None):\n assert(isinstance(pkg_list, list))\n for (pkgdesc, orig_pkgdesc, vulntype, vulnurl) in vuln_alternate_iterator(filename):\n pkgdesc = parse_pkgdesc(pkgdesc)\n pkgnames = fnmatch.filter(pkg_list, pkgname_filter(pkgdesc[0]))\n for pkgname in pkgnames:\n yield (pkgname, pkgdesc[1:], orig_pkgdesc, vulntype, vulnurl)\n if len(pkgnames) == 0 and unmatched_callback != None:\n unmatched_callback((pkgdesc, orig_pkgdesc, vulntype, vulnurl))",
"def _format_description(ctx: click.Context) -> ty.Generator[str, None, None]:\n help_string = ctx.command.help or ctx.command.short_help\n if help_string:\n yield from _format_help(help_string)",
"def gen_task_desc(**kwargs):\n logger = logging.getLogger(__name__)\n\n suppressempty = kwargs[\"suppressempty\"]\n blend = kwargs[\"blend_info\"][\"blend\"]\n tasksprefix = kwargs[\"blend_info\"][\"tasksprefix\"]\n blend_dependencies = kwargs[\"blend_dependencies\"]\n\n\n task_desc_path = \"taskdesc-sec.template\"\n logger.debug(\"Opening file {0} to write\".format(task_desc_path))\n with open(task_desc_path,'w') as fout:\n\n for task in sorted(blend_dependencies.keys()): \n\n if blend_dependencies[task]['Leaf'] == 'false':\n continue\n\n if suppressempty and blend_dependencies[task][\"haspackages\"] == 0:\n if blend_dependencies[task]['test_always_lang']:\n logger.debug(\"Print empty task {0} because Test-always-lang is set\\n\".format(task))\n else:\n logger.debug(\"The metapackage {2} will not be created because {0} dependant are in the pool and suppressempty was set {1}\\n\".format(blend_dependencies[task][\"haspackages\"], suppressempty, task))\n continue\n\n fout.write(\"Task: {0}-{1}\\n\".format(tasksprefix, task))\n fout.write(\"Section: {0}\\n\".format(blend));\n fout.write(\"Description: {0}\\n\".format(blend_dependencies[task][\"description\"]))\n fout.write(\"{0}\".format(blend_dependencies[task][\"long_description\"])) #Already contains a newline\n fout.write(\"Relevance: 10\\n\")\n\n if blend_dependencies[task][\"Enhances\"]:\n fout.write(\"Enhances: {0}\\n\".format(blend_dependencies[task][\"Enhances\"]))\n\n if blend_dependencies[task][\"metapackage\"]:\n #No use listing a metapackage as a key package, if no metapackage exist.\n fout.write(\"Key: \\n\");\n fout.write(\" {0}-{1}\\n\".format(tasksprefix, task))\n\n fout.write(\"Packages: list\\n \")\n for header in [\"Depends\", \"Recommends\"]:\n if not blend_dependencies[task][header]:\n continue \n fout.write(\"{0}\".format(\"\\n \".join(sorted(blend_dependencies[task][header]))))\n fout.write(\"\\n\")\n\n fout.write(\"\\n\")",
"def parse_packages_arg(names):\n did_stdin = False\n for pname in names:\n if pname.strip() == '-':\n if did_stdin:\n print_err('Already read from stdin.')\n continue\n if sys.stdin.isatty() and sys.stdout.isatty():\n print_status('\\nReading package names from stdin...\\n')\n did_stdin = True\n for word in sys.stdin.read().split():\n yield word.strip()\n elif os.path.isfile(pname):\n try:\n with open(pname, 'r') as f:\n for line in f:\n for word in line.split():\n yield word.strip()\n except EnvironmentError as ex:\n print_err(\n '\\nError reading from file: {}\\n{}'.format(pname, ex)\n )\n continue\n else:\n yield pname",
"def get_for_pair_pattern(self, pattern):",
"def format_desc(self):\n return '\\nDescription:\\n{}\\n'.format(\n C(\n FormatBlock(get_pkg_description(self.package)).format(\n width=76,\n newlines=True,\n prepend=' '\n ),\n fore='green'\n )\n )",
"def _format_epilog(ctx: click.Context) -> ty.Generator[str, None, None]:\n if ctx.command.epilog:\n yield from _format_help(ctx.command.epilog)",
"def _collapse_single_via(self, lines):\n if len(lines) == 3:\n matchobj = self._RE_PACKAGE_COMMENT.match(lines[2])\n if matchobj:\n package = matchobj.group(1)\n return [lines[0], lines[1] + ' ' + package]\n return lines",
"def get_pkg_description(pkg):\n\n if hasattr(pkg, 'description'):\n return pkg.description or ''\n if hasattr(pkg, 'installed'):\n installedpkg = pkg.installed\n if installedpkg:\n # Use installed version description\n return installedpkg.description or ''\n\n # Get first description found in all versions.\n desc = ''\n for ver in pkg.versions:\n if ver.description:\n desc = ver.description\n break\n return desc\n\n return ''",
"def __iter__(self):\n\n for s in self.specs:\n\n yield s",
"def descriptions(self):\r\n return [\r\n (m, SafeString('%s (%s)' % (v['description'], v['file-extensions'])))\r\n for m, v in self.methods.items()\r\n if m != 'POT' and m != 'HTML_LEGACY' and m != 'XHTML_LEGACY'\r\n ]",
"def getOptionDescriptions(self) -> List[unicode]:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Same as vuln_iterator, but takes alternates into account. Yields (pkgdesc, original_pkgdesc, vulntype, vulnurl).
|
def vuln_alternate_iterator(filename):
for (pkgdesc, vulntype, vulnurl) in vuln_iterator(filename):
for x in gen_alternates(pkgdesc):
yield (x, pkgdesc, vulntype, vulnurl)
|
[
"def vuln_pkg_matcher_iterator(filename, pkg_list, unmatched_callback=None):\n assert(isinstance(pkg_list, list))\n for (pkgdesc, orig_pkgdesc, vulntype, vulnurl) in vuln_alternate_iterator(filename):\n pkgdesc = parse_pkgdesc(pkgdesc)\n pkgnames = fnmatch.filter(pkg_list, pkgname_filter(pkgdesc[0]))\n for pkgname in pkgnames:\n yield (pkgname, pkgdesc[1:], orig_pkgdesc, vulntype, vulnurl)\n if len(pkgnames) == 0 and unmatched_callback != None:\n unmatched_callback((pkgdesc, orig_pkgdesc, vulntype, vulnurl))",
"def gen_alternates(pkgdesc):\n pkgdesc = parse_alternates(pkgdesc)\n for x in gen_alternates_recurse(pkgdesc):\n yield x",
"def gen_alternates_recurse(pkgdesc):\n assert(isinstance(pkgdesc, list))\n if len(pkgdesc) <= 1:\n yield ''.join(pkgdesc)\n else:\n prefix = pkgdesc[0]\n alternates = pkgdesc[1]\n pkgdesc = pkgdesc[2:]\n for alt in alternates:\n for x in gen_alternates_recurse(pkgdesc):\n yield prefix + alt + x",
"def dpkgPackages(cls, packager):\n # ask {dpkg} for my options\n alternatives = sorted(packager.alternatives(group=cls), reverse=True)\n # the supported versions\n versions = Default,\n # go through the versions\n for version in versions:\n # scan through the alternatives\n for name in alternatives:\n # if it is match\n if name.startswith(version.flavor):\n # build an instance and return it\n yield version(name=name)\n\n # out of ideas\n return",
"def __iter__(self) -> Generator[tuple[str, str, Types], None, None]:\n for cluster, namespaces in self._inv.items():\n for namespace, types in namespaces.items():\n yield cluster, namespace, types",
"def __iter__(self):\n\n for s in self.specs:\n\n yield s",
"def artifact_tester(artifacts):\n for artifact in artifacts:\n if artifact.has_number_and_year_releases:\n print(\"Warning: year and number based versions: \\n\")\n yield artifact",
"def _iter_packed_refs(cls, repo: \"Repo\") -> Iterator[Tuple[str, str]]:\n try:\n with open(cls._get_packed_refs_path(repo), \"rt\", encoding=\"UTF-8\") as fp:\n for line in fp:\n line = line.strip()\n if not line:\n continue\n if line.startswith(\"#\"):\n # \"# pack-refs with: peeled fully-peeled sorted\"\n # the git source code shows \"peeled\",\n # \"fully-peeled\" and \"sorted\" as the keywords\n # that can go on this line, as per comments in git file\n # refs/packed-backend.c\n # I looked at master on 2017-10-11,\n # commit 111ef79afe, after tag v2.15.0-rc1\n # from repo https://github.com/git/git.git\n if line.startswith(\"# pack-refs with:\") and \"peeled\" not in line:\n raise TypeError(\"PackingType of packed-Refs not understood: %r\" % line)\n # END abort if we do not understand the packing scheme\n continue\n # END parse comment\n\n # skip dereferenced tag object entries - previous line was actual\n # tag reference for it\n if line[0] == \"^\":\n continue\n\n yield cast(Tuple[str, str], tuple(line.split(\" \", 1)))\n # END for each line\n except OSError:\n return None\n # END no packed-refs file handling\n # NOTE: Had try-finally block around here to close the fp,\n # but some python version wouldn't allow yields within that.\n # I believe files are closing themselves on destruction, so it is\n # alright.",
"def packages_from_many_paths_or_urls(cls, paths_or_urls):\n dists = PypiPackage.dists_from_paths_or_urls(paths_or_urls)\n if TRACE_ULTRA_DEEP:\n print(\"packages_from_many_paths_or_urls: dists:\", dists)\n\n dists = NameVer.sorted(dists)\n\n for _projver, dists_of_package in itertools.groupby(\n dists,\n key=NameVer.sortable_name_version,\n ):\n package = PypiPackage.package_from_dists(dists_of_package)\n if TRACE_ULTRA_DEEP:\n print(\"packages_from_many_paths_or_urls\", package)\n yield package",
"def iter_versions(self):\r\n aux_field = self._field_name('aux')\r\n count_field = self._field_name('cnt')\r\n next_field = self._field_name('next')\r\n\r\n entry_offset = self['sh_offset']\r\n for _ in range(self.num_versions()):\r\n entry = struct_parse(\r\n self.version_struct,\r\n self.stream,\r\n stream_pos=entry_offset)\r\n\r\n elf_assert(entry[count_field] > 0,\r\n 'Expected number of version auxiliary entries (%s) to be > 0'\r\n 'for the following version entry: %s' % (\r\n count_field, str(entry)))\r\n\r\n version = Version(entry)\r\n aux_entries_offset = entry_offset + entry[aux_field]\r\n version_auxiliaries_iter = self._iter_version_auxiliaries(\r\n aux_entries_offset, entry[count_field])\r\n\r\n yield version, version_auxiliaries_iter\r\n\r\n entry_offset += entry[next_field]",
"def iterate(self, evtype, evsrc):\n for d in self._sub.iterate(evtype):\n for v in d.iterate(evsrc):\n yield v\n return",
"def cleaned_artifacts(importer, keep=2):\n for artifact in importer:\n if artifact.has_number_and_year_releases:\n print(\"Warning: year and number based versions: \\n\")\n artifact.clean_releases(keep=keep)\n artifact.clean_snapshots(keep=keep)\n yield artifact",
"def iter_dependencies(self, item):\n if 'ACTIVE_PAPER_DEPENDENCIES' in item.attrs:\n for dep in item.attrs['ACTIVE_PAPER_DEPENDENCIES']:\n yield self.file[dep]",
"def dev_clone_iter(xs, dev_strs):\n if isinstance(dev_strs, str):\n dev_strs = [dev_strs]\n return DevClonedIter([dev_clone(x, dev_strs) for x in xs], dev_strs)",
"def _iter_version_auxiliaries(self, entry_offset, count):\r\n name_field = self._field_name('name', auxiliary=True)\r\n next_field = self._field_name('next', auxiliary=True)\r\n\r\n for _ in range(count):\r\n entry = struct_parse(\r\n self.version_auxiliaries_struct,\r\n self.stream,\r\n stream_pos=entry_offset)\r\n\r\n name = self.stringtable.get_string(entry[name_field])\r\n version_aux = VersionAuxiliary(entry, name)\r\n yield version_aux\r\n\r\n entry_offset += entry[next_field]",
"def iterator(self) -> \"swig::SwigPyIterator *\":\n return _itkImagePython.vectoritkImageUL2_iterator(self)",
"def get_packages(package_or_collection, path):\n if isinstance(package_or_collection, Collection):\n npath = path / NormFolder(package_or_collection.name)\n yield package_or_collection, path\n for npc in package_or_collection:\n yield from get_packages(npc, npath)\n else:\n log.debug(f'{path} {package_or_collection}')\n yield package_or_collection, path",
"def iterator(self) -> \"swig::SwigPyIterator *\":\n return _itkImagePython.vectoritkImageULL2_iterator(self)",
"def parse_alternates(pkgdesc):\n assert(isinstance(pkgdesc, str))\n parsed_pkgdesc = []\n while len(pkgdesc) > 0:\n i = pkgdesc.find('{')\n if i == -1:\n parsed_pkgdesc.append(pkgdesc)\n break\n parsed_pkgdesc.append(pkgdesc[:i])\n pkgdesc = pkgdesc[i+1:]\n i = pkgdesc.find('}')\n parsed_pkgdesc.append(tuple(pkgdesc[:i].split(',')))\n pkgdesc = pkgdesc[i+1:]\n return parsed_pkgdesc"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse pkgdesc, spliting package name pattern and version constraints. Returns ('pkgname', '>ver1', '<ver2') for Deweystyle comparision. Returns ('pkgname', 'ver') for glob version matching.
|
def parse_pkgdesc(pkgdesc):
assert(isinstance(pkgdesc, str))
# Find version comparisions.
split_points = [pkgdesc.find(c) for c in '<>']
split_points = [i for i in split_points if i != -1]
split_points.sort()
# Split the str.
parsed_pkgdesc = []
j = 0
for i in split_points:
parsed_pkgdesc.append(pkgdesc[j:i])
j = i
parsed_pkgdesc.append(pkgdesc[j:])
if len(parsed_pkgdesc) == 1:
# Do not use Dewey-style version comparision. Use glob matching.
m = re.match('^([A-Za-z0-9_-]+)-([][!a-z0-9*?.-]+?)$', pkgdesc)
if m:
return m.groups()
# Version pattern not found. Match any version.
return (pkgdesc, '*')
return tuple(parsed_pkgdesc)
|
[
"def getCurrentVerData(self, pkg):\n pkgpat = re.compile(r'%s\\s' % pkg)\n\n cururl = \"%s/current.list\" % os.environ[\"EUPS_PKGROOT\"]\n curlist = urllib2.urlopen(cururl)\n desc = filter(lambda x: pkgpat.match(x), curlist.readlines())\n curlist.close()\n \n if len(desc) == 0:\n raise Error(pkg + \": unknown package\")\n desc = desc[0].split()\n if len(desc) <= 2:\n raise Error(pkg + \": current version not set\")\n if len(desc) <= 3: desc.append('')\n if len(desc) <= 4: desc.append('')\n return (desc[2], desc[3], desc[4])",
"def parse_package(string_package):\n res = {'dsc': string_package}\n current_field = None\n for line in string_package.splitlines():\n if not line.startswith(' '):\n field, contents = line.split(\":\", 1)\n current_field = field\n contents = contents.strip()\n if field in [\"Package\", \"Source\"]:\n res[\"Source\"] = contents\n # Single line string\n elif field in [\"Format\", \"Version\", \"Maintainer\",\n \"Homepage\", \"Vcs-Browser\", \"Testsuite\",\n \"Standards-Version\", \"Section\", \"Priority\",\n \"Directory\"]:\n res[field] = contents\n elif field in [\"Vcs-Arch\", \"Vcs-Bzr\", \"Vcs-Cvs\", \"Vcs-Darcs\",\n \"Vcs-Git\", \"Vcs-Hg\", \"Vcs-Mtn\", \"Vcs-Svn\"]:\n res[field] = contents\n res[\"Vcs\"] = contents\n # Single line comma separated\n elif field in [\"Binary\", \"Uploaders\", \"Testsuite-Triggers\",\n \"Build-Depends\", \"Build-Depends-Indep\",\n \"Build-Depends-Arch\", \"Build-Conflicts\",\n \"Build-Conflicts-Indep\", \"Build-Conflicts-Arch\"]:\n res[field] = list(map(lambda x: x.strip(),\n contents.split(\",\")))\n # Single line space separated\n elif field in [\"Architecture\"]:\n res[field] = list(map(lambda x: x.strip(),\n contents.split(\" \")))\n elif field in [\"Dgit\"]:\n res[field] = contents.split(\" \")[0]\n elif field in [\"Package-List\", \"Checksums-Sha1\",\n \"Checksums-Sha256\", \"Files\"]:\n res[field] = []\n else:\n contents = line.strip()\n if current_field == \"Package-List\":\n contents = contents.split(\" \")\n contents = {\n \"name\": contents[0],\n \"type\": contents[1],\n \"section\": contents[2],\n \"priority\": contents[3],\n \"arch\": [] if len(contents) < 5 else\n contents[4].split(\"=\")[1].split(\",\")}\n res[current_field].append(contents)\n elif current_field in [\"Files\", \"Checksums-Sha1\",\n \"Checksums-Sha256\"]:\n contents = contents.split(\" \")\n contents = {\n \"checksum\": contents[0],\n \"size\": contents[1],\n \"filename\": contents[2]}\n res[current_field].append(contents)\n return res",
"def compare_package(pkgname, pkgvertuple, use_system_db=False):\n # Path to the downloaded PKGBUILD of the base package\n path_base = os.path.join(BASE_PACKAGES_DIR, pkgname)\n pkgbuild_base = os.path.join(path_base, 'PKGBUILD')\n\n # Path to the PKGBUILD of the -selinux package\n path_selinux = os.path.join(SELINUX_PACKAGES_DIR, pkgname + '-selinux')\n pkgbuild_selinux = os.path.join(path_selinux, 'PKGBUILD')\n\n if not os.path.exists(path_selinux):\n logger.error(\"SELinux package directory doesn't exist ({})\".format(path_selinux))\n return False\n\n if not os.path.exists(pkgbuild_selinux):\n logger.error(\"PKGBUILD for {}-selinux doesn't exist ({})\".format(pkgname, pkgbuild_selinux))\n return False\n\n # Get current version of the SElinux package, to validate pkgvertuple\n pkgver_selinux = get_pkgbuild_pkgver(pkgbuild_selinux)\n if pkgver_selinux is None:\n logger.error(\"Failed to get the package version of {}-selinux\".format(pkgname))\n return False\n if pkgver_selinux[0] != pkgvertuple[0]:\n logger.error(\"{} is out of sync: package {}-selinux has version {} in its PKGBUILD but {} in the list\".format(\n BASE_PKGLIST_FILE, pkgname, pkgver_selinux[0], pkgvertuple[0]))\n logger.error(\"You need to update {} for example with '{} = {}-1'\".format(\n BASE_PKGLIST_FILE, pkgname, pkgver_selinux[0]))\n return False\n del pkgver_selinux\n\n # Get latest version of the base package\n pkgver_base = get_pacman_pkgver(pkgname, use_system_db)\n if pkgver_base is None:\n logger.error(\"Failed to get the package version of {} with pacman\".format(pkgname))\n return False\n\n if pkgver_base == pkgvertuple:\n logger.info(\"Package {0}-selinux is up to date (version {1[0]}-{1[1]})\".format(pkgname, pkgver_base))\n return True\n\n logger.info(\"Package {0}-selinux needs an update from {1[0]}-{1[1]} to {2[0]}-{2[1]}\".format(\n pkgname, pkgvertuple, pkgver_base))\n\n # Download the PKGBUILD of the base package, if needed\n if not os.path.exists(pkgbuild_base):\n if os.path.exists(path_base):\n logger.error(\"PKGBUILD for {} has been deleted. Please remove {}\".format(pkgname, path_base))\n return False\n if not os.path.exists(BASE_PACKAGES_DIR):\n os.makedirs(BASE_PACKAGES_DIR)\n logger.info(\"Running 'yaourt -G {}'\".format(pkgname))\n p = subprocess.Popen(\n ['yaourt', '-G', pkgname],\n cwd=BASE_PACKAGES_DIR)\n retval = p.wait()\n if retval:\n logger.error(\"yaourt exited with code {}\".format(retval))\n return False\n\n if not os.path.exists(pkgbuild_base):\n logger.error(\"yaourt hasn't created {}\".format(pkgbuild_base))\n return False\n\n pkgver_base2 = get_pkgbuild_pkgver(pkgbuild_base)\n if pkgver_base > pkgver_base2:\n logger.error(\"PKGBUILD for {} is out of date. Please remove {}\".format(pkgname, path_base))\n return False\n elif pkgver_base < pkgver_base2:\n logger.warn(\"Downloaded PKGBUILD for {} is in testing. Beware!\".format(pkgname))\n\n logger.info(\"You can now compare {} and {} to update the SELinux package\".format(path_selinux, path_base))\n logger.info(\"... git log of Arch package : {}\".format(ARCH_GITLOG_URL.format(pkgname)))\n return True",
"def discover_stack_version(project, desc):\n known_versions = (\"grizzly\", \"havana\", \"icehouse\", \"juno\",\n \"kilo\", \"liberty\", \"mitaka\", \"newton\", \"ocata\", \"pike\")\n #\n # the ideal version is Openstack Version: ....\n matches = (\n \"(^|\\n)openstack\\s*version\\s*:(?P<version>.*)\", # ideal version\n \"(^|\\n)%s(\\s*version)?\\s*:(?P<version>.*)\" % project, # nova version\n \"(^|\\n)openstack-%s-common-(?P<version>.*)\" % project, # rhel version\n \"(^|\\n)openstack-%s-compute-(?P<version>.*)\" % project, # rhel version\n r\"\\b%s-common\\s+\\d\\:(?P<version>.*)\" % project, # ubuntu dpkg\n # -l version\n r\"(?P<version>\\b(%s)\\b)\" % (\"|\".join(known_versions)), # keywords\n )\n found_version = None\n for attempt in matches:\n m = re.search(attempt, desc, re.IGNORECASE)\n if m:\n found_version = m.group('version')\n if found_version:\n break\n return version_normalize(found_version)",
"def parse_package_string(path):\n parts = path.split('.')\n\n # Is the last entry in the path capitalized?\n if parts[-1][0].isupper():\n return \".\".join(parts[:-1]), parts[-1]\n\n return path, \"\"",
"def parsed_package_spec(self) -> PackageSpec:\n m = self.package_spec_regex.match(self.package_spec)\n if not m:\n raise RuntimeError(f\"Could not parse package spec {self.package_spec}\")\n return PackageSpec(\n name=m.group('name'),\n version_comparator=m.group('comparator'),\n version_string=m.group('version_string')\n )",
"def split_version(regid):\n m = re.match(r'^(.+)-(rev_[0-9a-f]+|[0-9]+(\\.[0-9]+)?(\\.[0-9]+)?|dev|develop|master|latest)$', regid)\n if m is not None:\n return (m.group(1), m.group(2))\n return (regid, None)",
"def extract_pkg_info(pkg_name):\n logger.info('Extracting information of package \"{0}\".'.format(pkg_name))\n data = _pkg_json_info(pkg_name)\n # Extracting names which can be imported.\n if not data or not data['urls']:\n logger.warning('Package \"{0}\" no longer available.'.format(pkg_name))\n return\n\n urls = [item['url'] for item in data['urls']\n if item['filename'].endswith(ACCEPTABLE_EXT)]\n # Has not satisfied compressed package.\n if not urls:\n logger.warning('Package \"{0}\" can not unpack.'.format(pkg_name))\n return\n url = urls[0]\n\n top_levels = top_level(url, download(url))\n # Maybe package is a project, not importable...\n if not top_levels:\n logger.warning(\n 'Maybe package \"{0}\" is not importable.'.format(pkg_name))\n return\n\n # Insert into database.\n with database() as db:\n db.insert_package(pkg_name)\n package = db.query_package(pkg_name)\n for top in top_levels:\n top = top or pkg_name # empty top_level.txt\n db.insert_name(top, package.id)",
"def get_sdist_name_ver_ext(filename):\n name_ver = None\n extension = None\n\n for ext in EXTENSIONS_SDIST:\n if filename.endswith(ext):\n name_ver, extension, _ = filename.rpartition(ext)\n break\n\n if not extension or not name_ver:\n return False\n\n name, _, version = name_ver.rpartition(\"-\")\n\n if not name or not version:\n return False\n\n # weird version\n if any(\n w in version\n for w in (\n \"x86_64\",\n \"i386\",\n )\n ):\n return False\n\n # all char versions\n if version.isalpha():\n return False\n\n # non-pep 440 version \n if \"-\" in version:\n return False\n\n # single version \n if version.isdigit() and len(version) == 1:\n return False\n\n # r1 version \n if len(version) == 2 and version[0]==\"r\" and version[1].isdigit():\n return False\n\n # dotless version (but calver is OK)\n if \".\" not in version and len(version) < 3:\n return False\n\n # version with dashes selenium-2.0-dev-9429.tar.gz\n if name.endswith((\"dev\",)) and \".\" not in version:\n return False\n # version pre or post, old legacy\n if version.startswith((\"beta\", \"rc\", \"pre\", \"post\", \"final\")):\n return False\n\n return name, version, extension",
"def convert_dep(req, operator, version):\n \n deps = []\n\n #any version will do\n if not version or version == '*':\n deps.append(req)\n\n #any prefix but ~ makes things dead simple\n elif operator in ['>', '<', '<=', '>=', '=']:\n deps.append(' '.join([req, operator, version]))\n\n #oh boy, here we go...\n else:\n #split the dotted portions into a list (handling trailing dots properly)\n parts = [part if part else 'x' for part in version.split('.')]\n parts = [int(part) if part != 'x' and not '-' in part\n else part for part in parts]\n\n # 1 or 1.x or 1.x.x or ~1 or ^1\n if len(parts) == 1 or parts[1] == 'x':\n if parts[0] != 0:\n deps.append('{0} >= {1}'.format(req, parts[0]))\n deps.append('{0} < {1}'.format(req, parts[0]+1))\n\n # 1.2.3 or 1.2.3-4 or 1.2.x or ~1.2.3 or ^1.2.3 or 1.2\n elif len(parts) == 3 or operator != '~':\n # 1.2.x or 1.2\n if len(parts) == 2 or parts[2] == 'x':\n deps.append('{0} >= {1}.{2}'.format(req, parts[0], parts[1]))\n deps.append('{0} < {1}.{2}'.format(req, parts[0], parts[1]+1))\n # ~1.2.3 or ^0.1.2 (zero is special with the caret operator)\n elif operator == '~' or (operator == '^' and parts[0] == 0 and parts[1] > 0):\n deps.append('{0} >= {1}'.format(req, version))\n deps.append('{0} < {1}.{2}'.format(req, parts[0], parts[1]+1))\n #^1.2.3\n elif operator == '^' and parts[0:1] != [0,0]:\n deps.append('{0} >= {1}'.format(req, version))\n deps.append('{0} < {1}'.format(req, parts[0]+1))\n # 1.2.3 or 1.2.3-4 or ^0.0.3\n else:\n deps.append('{0} = {1}'.format(req, version))\n\n # ~1.2\n elif operator == '~':\n deps.append('{0} >= {1}'.format(req, version))\n deps.append('{0} < {1}'.format(req, parts[0]+1))\n \n #^1.2\n elif operator == '^':\n deps.append('{0} >= {1}'.format(req, version))\n deps.append('{0} < {1}'.format(req, parts[0]+1))\n \n\n return deps",
"def get_req_pkg_name(r):\n return r.replace('<', '=').replace('>', '=').replace(';', '=').split(\"=\")[0]",
"def parse_alternates(pkgdesc):\n assert(isinstance(pkgdesc, str))\n parsed_pkgdesc = []\n while len(pkgdesc) > 0:\n i = pkgdesc.find('{')\n if i == -1:\n parsed_pkgdesc.append(pkgdesc)\n break\n parsed_pkgdesc.append(pkgdesc[:i])\n pkgdesc = pkgdesc[i+1:]\n i = pkgdesc.find('}')\n parsed_pkgdesc.append(tuple(pkgdesc[:i].split(',')))\n pkgdesc = pkgdesc[i+1:]\n return parsed_pkgdesc",
"def get_pkg_description(pkg):\n\n if hasattr(pkg, 'description'):\n return pkg.description or ''\n if hasattr(pkg, 'installed'):\n installedpkg = pkg.installed\n if installedpkg:\n # Use installed version description\n return installedpkg.description or ''\n\n # Get first description found in all versions.\n desc = ''\n for ver in pkg.versions:\n if ver.description:\n desc = ver.description\n break\n return desc\n\n return ''",
"def parse_ver(version_raw):\n version_list = version_raw.split('.')\n if len(version_list) > 4:\n raise ValueError(\n 'Invalid version format, upto 4 versions required: '\n 'bit.train.data.sth'\n )\n return version_list",
"def parse_javadoc_file(fn):\n contents = open(fn, 'r').read()\n pkg = _re_package.search(contents).group(1)\n if _re_public.search(contents):\n return pkg",
"def get_package_version_and_system_architecture(pmdk_path):\n rpm_directory = path.join(pmdk_path, 'rpm')\n version = ''\n architecture = ''\n for elem in listdir(rpm_directory):\n if '.src.rpm' in elem:\n # looks for the version number of rpm package in rpm package name\n version = re.search(r'[\\s]*pmdk-([\\S]+).src.rpm', elem).group(1)\n else:\n architecture = elem\n return version, architecture",
"def strip_package_and_version(package_manager: str, package_str: str) -> Tuple[str, Optional[str]]:\n if package_manager not in ['pip3', 'pip2', 'pip', 'apt', 'conda', 'conda2', 'conda3']:\n raise ValueError(f'Unsupported package manager: {package_manager}')\n\n if package_manager in ['pip', 'pip2', 'pip3']:\n if '==' in package_str:\n t = package_str.split('==')\n return t[0], t[1]\n else:\n return package_str, None\n\n if package_manager == 'apt' or package_manager in ['conda', 'conda2', 'conda3']:\n if '=' in package_str:\n t = package_str.split('=')\n return t[0], t[1]\n else:\n return package_str, None\n\n raise ValueError(f'Unsupported package manager: {package_manager}')",
"def dpkgPackages(cls, packager):\n # ask {dpkg} for my options\n alternatives = sorted(packager.alternatives(group=cls), reverse=True)\n # the supported versions\n versions = Default,\n # go through the versions\n for version in versions:\n # scan through the alternatives\n for name in alternatives:\n # if it is match\n if name.startswith(version.flavor):\n # build an instance and return it\n yield version(name=name)\n\n # out of ideas\n return",
"def _suggest_semantic_version(s):\n result = s.strip().lower()\n for pat, repl in _REPLACEMENTS:\n result = pat.sub(repl, result)\n if not result:\n result = '0.0.0'\n\n # Now look for numeric prefix, and separate it out from\n # the rest.\n #import pdb; pdb.set_trace()\n m = _NUMERIC_PREFIX.match(result)\n if not m:\n prefix = '0.0.0'\n suffix = result\n else:\n prefix = m.groups()[0].split('.')\n prefix = [int(i) for i in prefix]\n while len(prefix) < 3:\n prefix.append(0)\n if len(prefix) == 3:\n suffix = result[m.end():]\n else:\n suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]\n prefix = prefix[:3]\n prefix = '.'.join([str(i) for i in prefix])\n suffix = suffix.strip()\n if suffix:\n #import pdb; pdb.set_trace()\n # massage the suffix.\n for pat, repl in _SUFFIX_REPLACEMENTS:\n suffix = pat.sub(repl, suffix)\n\n if not suffix:\n result = prefix\n else:\n sep = '-' if 'dev' in suffix else '+'\n result = prefix + sep + suffix\n if not is_semver(result):\n result = None\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Filters pkgname before matching.
|
def pkgname_filter(pkgname):
if re.search('^py\d{2}-', pkgname):
# Strip Python version from pkgname, as it's present in the binary package name,
# but is not present in the pkgsrc package name.
return 'py-' + pkgname[5:]
return pkgname
|
[
"def test_filter_app_by_name_pattern_no_pattern(self) -> None:\n self.assertEqual(filter_app_by_name_pattern(self.COMPATIBLE_APPS), self.COMPATIBLE_APPS)",
"def check_package_name(package_name):\n m = re.match('[a-z0-9_]{3,30}', package_name)\n return (m != None and m.group(0) == package_name)",
"def filter_pkgs(self, pkglst):\n\n return filter(\n lambda pkg: pkg_install_state(pkg, expected=self),\n pkglst\n )",
"def extract_pkg_from_import(name):\n num_letters = _import_map_num_letters()\n original_name = name\n while True:\n try:\n fllt = name[:min(len(name), num_letters)]\n import_map = _import_map_cache(fllt)\n supplying_artifacts = import_map[name]\n except KeyError:\n if '.' not in name:\n return original_name, {}, {}\n name = name.rsplit('.', 1)[0]\n pass\n else:\n break\n import_to_artifact = {name: supplying_artifacts}\n # TODO: launder supplying_pkgs through centrality scoring so we have one thing\n # but keep the rest for the more detailed reports\n supplying_pkgs = {ARTIFACT_TO_PKG[k] for k in supplying_artifacts}\n import_to_pkg = {name: supplying_pkgs}\n\n return next(iter(k for k in hubs_auths if k in supplying_pkgs), original_name), import_to_artifact, import_to_pkg",
"def test_filter_app_by_name_pattern_not_matched_pattern(self) -> None:\n self.assertEqual(filter_app_by_name_pattern(self.COMPATIBLE_APPS,\n 'not_matched_pattern'), [])",
"def filter_name(ctx, regex, input, output):\n with GoogleBenchmark(stream=input) as b:\n output.write(b.keep_name_regex(regex).json())",
"def pkg_names(self):\n return set(p['product'] for p in self.each_cpe())",
"def is_valid_package_name(package_name):\n return package_name and bool(PACKAGE_NAME_RE.match(package_name))",
"def cmd_contains_file(name, shortnamesonly=False):\n\n try:\n repat = re.compile(name)\n except Exception as ex:\n print_err('\\nInvalid search term!: {}\\n{}'.format(name, ex))\n return 1\n\n print_status(\n 'Looking for packages by file pattern',\n value=repat.pattern,\n )\n\n # Setup filename methods (long or short, removes an 'if' from the loop.)\n def getfilenameshort(s):\n return os.path.split(s)[-1]\n # Pick filename retrieval function..\n filenamefunc = getfilenameshort if shortnamesonly else str\n\n # Iterate all packages...\n totalpkgs = 0\n totalfiles = 0\n\n for pkgname in cache_main.keys():\n pkg = cache_main[pkgname]\n matchingfiles = []\n if not pkg_install_state(pkg):\n continue\n if not hasattr(pkg, 'installed_files'):\n print_err(\n '\\n'.join((\n '\\nUnable to retrieve installed files for {},',\n 'apt/apt_pkg may be out of date!'\n )).format(pkgname)\n )\n return 1\n\n for installedfile in (pkg.installed_files or []):\n shortname = filenamefunc(installedfile)\n rematch = repat.search(shortname)\n if rematch:\n # Save match for report,\n # (report when we're finished with this package.)\n matchingfiles.append(installedfile)\n\n # Report any matches.\n if matchingfiles:\n totalpkgs += 1\n totalfiles += len(matchingfiles)\n print(pkg_format(pkg, no_desc=True, no_marker=True))\n print(' {}'.format('\\n '.join(matchingfiles)))\n\n pluralfiles = 'file' if totalfiles == 1 else 'files'\n pluralpkgs = 'package.' if totalpkgs == 1 else 'packages.'\n print_status(\n '\\nFound',\n C(totalfiles, fore='blue', style='bright'),\n pluralfiles,\n 'in',\n C(totalpkgs, fore='blue', style='bright'),\n pluralpkgs,\n )\n return 0",
"def allow_all_package_names(self) -> bool:\n return pulumi.get(self, \"allow_all_package_names\")",
"def _sanitize_package(orig):\n line = orig\n # python3.3 packages (pygit2, P4Python)\n # Do these BEFORE general python3.3, since python3.3 prefix would match site-packages prefix\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:569(run)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:749(__flatten)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:877(insert)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pygit2/repository.py:58(__init__)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pygit2/repository.py:71(_common_init)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pytz/__init__.py:245(__str__)\n python_packages_re = re.compile(r'.*/site-packages/(.*)')\n m = python_packages_re.match(orig)\n if m:\n line = m.group(1)\n package_module_re = re.compile(r'([^/]+)/(.*)')\n m = package_module_re.match(line)\n if m:\n package = m.group(1)\n module = m.group(2)\n for p in ['p4python', 'pygit2']:\n if p in package:\n package = p\n line = package + \"/\" + module\n return line\n\n # python3.3 library:\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/multiprocessing/synchronize.py:296(is_set)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/os.py:671(__getitem__)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/os.py:694(__iter__)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/re.py:158(search)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/re.py:212(compile)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/tempfile.py:386(__del__)\n python33_re = re.compile(r'.*/python3.3/(.*)')\n m = python33_re.match(line)\n if m:\n line = m.group(1)\n return line\n\n # Git Fusion\n # /Users/zig/Dropbox/git-fusion-main/bin/p4gf_atomic_lock.py:177(update_all_gf_reviews)\n # /Users/zig/Dropbox/git-fusion-main/bin/p4gf_atomic_lock.py:202(update_repo_reviews)\n # /Users/zig/Dropbox/git-fusion-main/bin/p4gf_util_p4run_logged.py:49(_log_p4_request)\n # /Users/zig/Dropbox/git-fusion-main/bin/p4gf_util_p4run_logged.py:55(_log_p4_results)\n git_fusion_re = re.compile(r'.*/(p4gf_[^/]+)')\n m = git_fusion_re.match(line)\n if m:\n line = m.group(1)\n return line\n\n # Built-in (leave unchanged)\n # {built-in method chdir}\n # {built-in method discover_repository}\n # {built-in method getcwd}\n # {built-in method getfilesystemencoding}\n # {built-in method hasattr}\n # {built-in method isinstance}\n # {built-in method len}\n # {built-in method max}\n # {built-in method poll}\n # {built-in method proxy}\n # {built-in method sorted}\n # {built-in method time}\n # {method 'acquire' of '_multiprocessing.SemLock' objects}\n # {method 'add' of 'set' objects}\n # {method 'append' of 'collections.deque' objects}\n # {method 'append' of 'list' objects}\n # {method 'as_array' of 'P4API.P4Map' objects}\n # {method 'decode' of 'bytes' objects}\n\n return line",
"def update_list_of_names(self):\n ls = self.__apk_info.find({}, {'_id': 0, \"packageName\": 1})\n names = list(set(sorted([i[\"packageName\"] for i in ls])))\n names = [{'_id': i} for i in names if len(i) > 0]\n self.__package_names.insert(names)",
"def non_top_pkg_name(req, pkg):\n vers = []\n req_ver = req_version(req)\n if req_ver:\n vers.append(('required', req_ver))\n if pkg:\n vers.append(('installed', pkg.version))\n if not vers:\n return req.key\n ver_str = ', '.join(['{}: {}'.format(k, v) for k, v in vers])\n return '{} [{}]'.format(pkg.project_name, ver_str)",
"def query_by_pkgnames(pkgnames, backends):\n names = CaseInsensitiveStringList(dedup(pkgnames))\n buildables = list()\n for backend in backends:\n new_buildables = backend(names.get())\n buildables += new_buildables\n names.remove_strings([buildable.package_info.pkgname for buildable in new_buildables])\n return buildables",
"def matches_pkg(self, pkg):\n return pkg_install_state(pkg, expected=self)",
"def _is_ignored_package(self, package_name):\n\n if package_name in self._required_packages:\n return True\n\n for ignored in self._configs.ignored_packages:\n if package_name.startswith(ignored) or self._is_equal(ignored, package_name):\n return True\n\n return False",
"def top_pkg_name(pkg):\n return '{}=={}'.format(pkg.project_name, pkg.version)",
"def _filter_applications(self, filter_query):\n\n return self.clients(\"murano\").packages.filter(**filter_query)",
"def normalize_project_name(name):\r\n name = re.sub(\"_\", \"-\", name).lower()\r\n if not PACKAGE_REGEX[\"permitted_characters\"].match(name):\r\n raise ValueError(\"name contains illegal characters! (See PEP-426)\")\r\n if not (PACKAGE_REGEX[\"start_with_alphanumeric\"].match(name) and\r\n PACKAGE_REGEX[\"end_with_alphanumeric\"].match(name)):\r\n raise ValueError(\r\n \"Distribution names MUST start with and end with \" +\r\n \"an ASCII letter or digit (See PEP-426)\"\r\n )\r\n return name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Same as vuln_alternate_iterator, but matchs pkgnames against a package list, and splits up the version patterns. Yields (pkgname, (version_pattern,), original_pkgdesc, vulntype, vulnurl).
|
def vuln_pkg_matcher_iterator(filename, pkg_list, unmatched_callback=None):
assert(isinstance(pkg_list, list))
for (pkgdesc, orig_pkgdesc, vulntype, vulnurl) in vuln_alternate_iterator(filename):
pkgdesc = parse_pkgdesc(pkgdesc)
pkgnames = fnmatch.filter(pkg_list, pkgname_filter(pkgdesc[0]))
for pkgname in pkgnames:
yield (pkgname, pkgdesc[1:], orig_pkgdesc, vulntype, vulnurl)
if len(pkgnames) == 0 and unmatched_callback != None:
unmatched_callback((pkgdesc, orig_pkgdesc, vulntype, vulnurl))
|
[
"def vuln_alternate_iterator(filename):\n for (pkgdesc, vulntype, vulnurl) in vuln_iterator(filename):\n for x in gen_alternates(pkgdesc):\n yield (x, pkgdesc, vulntype, vulnurl)",
"def dpkgPackages(cls, packager):\n # ask {dpkg} for my options\n alternatives = sorted(packager.alternatives(group=cls), reverse=True)\n # the supported versions\n versions = Default,\n # go through the versions\n for version in versions:\n # scan through the alternatives\n for name in alternatives:\n # if it is match\n if name.startswith(version.flavor):\n # build an instance and return it\n yield version(name=name)\n\n # out of ideas\n return",
"def package_version_generator():\n # the inner for-loop generates a finite sequence of all valid\n # ecosystem+package+version combinations, but we need infinite sequence.\n # Thence we use outer infinite loop here\n while True:\n for ecosystem, packages in GremlinPackageGenerator.PACKAGES.items():\n yield from GremlinPackageGenerator.generate_ecosystem_package_version(ecosystem,\n packages)",
"def packages_from_many_paths_or_urls(cls, paths_or_urls):\n dists = PypiPackage.dists_from_paths_or_urls(paths_or_urls)\n if TRACE_ULTRA_DEEP:\n print(\"packages_from_many_paths_or_urls: dists:\", dists)\n\n dists = NameVer.sorted(dists)\n\n for _projver, dists_of_package in itertools.groupby(\n dists,\n key=NameVer.sortable_name_version,\n ):\n package = PypiPackage.package_from_dists(dists_of_package)\n if TRACE_ULTRA_DEEP:\n print(\"packages_from_many_paths_or_urls\", package)\n yield package",
"def iter_versions(self):\r\n aux_field = self._field_name('aux')\r\n count_field = self._field_name('cnt')\r\n next_field = self._field_name('next')\r\n\r\n entry_offset = self['sh_offset']\r\n for _ in range(self.num_versions()):\r\n entry = struct_parse(\r\n self.version_struct,\r\n self.stream,\r\n stream_pos=entry_offset)\r\n\r\n elf_assert(entry[count_field] > 0,\r\n 'Expected number of version auxiliary entries (%s) to be > 0'\r\n 'for the following version entry: %s' % (\r\n count_field, str(entry)))\r\n\r\n version = Version(entry)\r\n aux_entries_offset = entry_offset + entry[aux_field]\r\n version_auxiliaries_iter = self._iter_version_auxiliaries(\r\n aux_entries_offset, entry[count_field])\r\n\r\n yield version, version_auxiliaries_iter\r\n\r\n entry_offset += entry[next_field]",
"def gen_alternates(pkgdesc):\n pkgdesc = parse_alternates(pkgdesc)\n for x in gen_alternates_recurse(pkgdesc):\n yield x",
"def parse_pkgdesc(pkgdesc):\n assert(isinstance(pkgdesc, str))\n # Find version comparisions.\n split_points = [pkgdesc.find(c) for c in '<>']\n split_points = [i for i in split_points if i != -1]\n split_points.sort()\n # Split the str.\n parsed_pkgdesc = []\n j = 0\n for i in split_points:\n parsed_pkgdesc.append(pkgdesc[j:i])\n j = i\n parsed_pkgdesc.append(pkgdesc[j:])\n \n if len(parsed_pkgdesc) == 1:\n # Do not use Dewey-style version comparision. Use glob matching.\n m = re.match('^([A-Za-z0-9_-]+)-([][!a-z0-9*?.-]+?)$', pkgdesc)\n if m:\n return m.groups()\n # Version pattern not found. Match any version.\n return (pkgdesc, '*')\n \n return tuple(parsed_pkgdesc)",
"def gen_alternates_recurse(pkgdesc):\n assert(isinstance(pkgdesc, list))\n if len(pkgdesc) <= 1:\n yield ''.join(pkgdesc)\n else:\n prefix = pkgdesc[0]\n alternates = pkgdesc[1]\n pkgdesc = pkgdesc[2:]\n for alt in alternates:\n for x in gen_alternates_recurse(pkgdesc):\n yield prefix + alt + x",
"def package_version_generator_for_ecosystem(ecosystem='pypi'):\n packages = GremlinPackageGenerator.PACKAGES[ecosystem]\n # the inner for-loop generates a finite sequence of all valid\n # ecosystem+package+version combinations, but we need infinite sequence.\n # Thence we use outer infinite loop here\n while True:\n yield from GremlinPackageGenerator.generate_ecosystem_package_version(ecosystem,\n packages)",
"def wrong_files_lines(cls, package_lines):\n release = Test.Fixtures.rhel_release()\n for line in package_lines:\n line = line.strip()\n if len(line) > 0:\n # RHEL5 has 8 dots\n # RHEL6 has 9 dots\n if (not line.startswith(\".........\") and release.major == 6) or (not line.startswith(\"........\") and release.major == 5):\n yield line",
"def get_tool_versions() -> Generator:\n for name, func in [\n (\"blast+\", get_blast_version),\n (\"nucmer\", get_nucmer_version),\n (\"blastall\", get_blastall_version),\n (\"fastani\", get_fastani_version),\n ]:\n yield (name, func())",
"def packages(self) -> Iterator[StubRepositoryPackage]:\n yield from self.packages_index.values()",
"def list(self, package_name: str, include_pre: bool = False):\n versions = find_versions(package_name, include_pre)\n for v in sorted(set(v.version for v in versions)):\n print(v)",
"def _iter_version_auxiliaries(self, entry_offset, count):\r\n name_field = self._field_name('name', auxiliary=True)\r\n next_field = self._field_name('next', auxiliary=True)\r\n\r\n for _ in range(count):\r\n entry = struct_parse(\r\n self.version_auxiliaries_struct,\r\n self.stream,\r\n stream_pos=entry_offset)\r\n\r\n name = self.stringtable.get_string(entry[name_field])\r\n version_aux = VersionAuxiliary(entry, name)\r\n yield version_aux\r\n\r\n entry_offset += entry[next_field]",
"def get_package_names():\n pypi_packages_url = 'https://pypi.python.org/simple/'\n response = requests.get(pypi_packages_url)\n if response.status_code != 200:\n raise Exception('Error fetching URL: {url}'.format(url=pypi_packages_url))\n\n soup = BeautifulSoup(response.content, 'html.parser')\n for link in soup.find_all('a'):\n path = link.get('href')\n package = path.split('/')[2]\n yield package",
"def get_matching_versions(specs, num_versions=1):\n matching = []\n for spec in specs:\n pkg = spec.package\n\n # Skip any package that has no known versions.\n if not pkg.versions:\n tty.msg(\"No safe (checksummed) versions for package %s\" % pkg.name)\n continue\n\n pkg_versions = num_versions\n\n version_order = list(reversed(sorted(pkg.versions)))\n matching_spec = []\n if spec.concrete:\n matching_spec.append(spec)\n pkg_versions -= 1\n if spec.version in version_order:\n version_order.remove(spec.version)\n\n for v in version_order:\n # Generate no more than num_versions versions for each spec.\n if pkg_versions < 1:\n break\n\n # Generate only versions that satisfy the spec.\n if spec.concrete or v.satisfies(spec.versions):\n s = spack.spec.Spec(pkg.name)\n s.versions = VersionList([v])\n s.variants = spec.variants.copy()\n # This is needed to avoid hanging references during the\n # concretization phase\n s.variants.spec = s\n matching_spec.append(s)\n pkg_versions -= 1\n\n if not matching_spec:\n tty.warn(\"No known version matches spec: %s\" % spec)\n matching.extend(matching_spec)\n\n return matching",
"def get_package_pairs(check_pypi=False, check_github=False):\n self_packages = []\n pair_packages = []\n if check_pypi:\n # Get pypi packages for single checks\n self_packages.extend(configs.PKG_LIST)\n # Get pypi packages for pairwise checks\n pypi_pairs = list(itertools.combinations(configs.PKG_LIST, 2))\n pair_packages.extend(pypi_pairs)\n if check_github:\n # Get github head packages for single checks\n self_packages.extend(list(configs.WHITELIST_URLS.keys()))\n # Get github head packages for pairwise checks\n for gh_url in configs.WHITELIST_URLS:\n pairs = []\n gh_name = configs.WHITELIST_URLS[gh_url]\n for pypi_pkg in configs.PKG_LIST:\n if pypi_pkg != gh_name:\n pairs.append((gh_url, pypi_pkg))\n pair_packages.extend(pairs)\n\n return self_packages, pair_packages",
"def load_multiple_versions(app_path,package):\n for sublabel in package.__all__:\n submodule = get_module('%s.%s' % (package.__name__,sublabel))\n load_descriptors(app_path,submodule)",
"def iter_setup_packages(srcdir, packages):\n\n for packagename in packages:\n package_parts = packagename.split('.')\n package_path = os.path.join(srcdir, *package_parts)\n setup_package = os.path.relpath(\n os.path.join(package_path, 'setup_package.py'))\n\n if os.path.isfile(setup_package):\n module = import_file(setup_package,\n name=packagename + '.setup_package')\n yield module"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
apply github matrix `include` and `exclude` transformations
|
def expand_gh_matrix(matrix):
raw = dict(matrix)
include = raw.pop("include", [])
exclude = raw.pop("exclude", [])
merged = [
dict(collections.ChainMap(*p))
for p in [*itertools.product(*[[{k: i} for i in raw[k]] for k in raw])]
]
for m in merged:
to_yield = dict(m)
should_yield = True
for inc in include or []:
might_add = {}
should_add = True
for k, v in inc.items():
mk = m.get(k)
if mk is None:
might_add[k] = v
elif mk != v:
should_add = False
if should_add:
to_yield.update(might_add)
# if any of these match, skip yield
for exc in exclude or []:
should_yield = should_yield and not (
all(m.get(k) == v for k, v in exc.items())
)
if should_yield:
yield to_yield
|
[
"def clean_ids(include, exclude):\n\n # deal with nothing to exclude\n if (exclude is None) or (len(exclude) < 1):\n return include\n\n # check if nested\n if pyto.util.nested.is_nested(exclude):\n nested = True\n else:\n nested = False\n include = [include]\n exclude = [exclude]\n\n # work\n ids = [list(set(inc).difference(set(ex))) \\\n for inc, ex in zip(include, exclude)]\n ids = [x for x in ids if len(x) > 0]\n \n # return in the form arguments were given \n if nested:\n return ids\n else:\n return ids[0]",
"def _make_intermediates_match_xla(branch_graphs, branch_intermediates):\n new_branch_intermediates = []\n for i, branch_graph in enumerate(branch_graphs):\n other_fakeparams = _create_fakeparams(\n branch_graph,\n sum((bi for bi in branch_intermediates\n if bi is not branch_intermediates[i]), []))\n num_preceding = sum(len(bi) for bi in branch_intermediates[:i])\n new_branch_intermediates.append(other_fakeparams[:num_preceding] +\n branch_intermediates[i] +\n other_fakeparams[num_preceding:])\n return new_branch_intermediates",
"def addExtraMocIncludes(tgen):\n\n includes = utils.toList(getattr(tgen, 'includes', []))\n for task in tgen.compiled_tasks:\n node = task.inputs[0]\n if node.is_src(): # ignore dynamic inputs from tasks like the 'moc' task\n # The generated .moc files are always in the build directory\n includes.append(node.parent.get_bld())\n tgen.includes = utils.uniqueListWithOrder(includes)",
"def skip_examined_projects(df):\n pandarallel.initialize()\n df['Examined'] = df['repo_path'].parallel_apply(lambda x: os.path.isfile(x))\n df.drop(df.loc[df['Examined'] is True].index, inplace=True)\n return df",
"def filter_annotation_instances(annotations_df, include=None, exclude=None):\n df = annotations_df.drop([\"meta\", \"pointLabels\"], axis=1)\n\n if include is not None:\n included_dfs = []\n for include_rule in include:\n df_new = df.copy()\n if \"className\" in include_rule:\n df_new = df_new[df_new[\"className\"] == include_rule[\"className\"]\n ]\n if \"attributes\" in include_rule:\n for attribute in include_rule[\"attributes\"]:\n df_new = df_new[(\n df_new[\"attributeGroupName\"] == attribute[\"groupName\"]\n ) & (df_new[\"attributeName\"] == attribute[\"name\"])]\n if \"type\" in include_rule:\n df_new = df_new[df_new[\"type\"] == include_rule[\"type\"]]\n if \"error\" in include_rule:\n df_new = df_new[df_new[\"error\"] == include_rule[\"error\"]]\n included_dfs.append(df_new)\n\n df = pd.concat(included_dfs)\n\n if exclude is not None:\n for exclude_rule in exclude:\n df_new = df.copy()\n # with pd.option_context('display.max_rows', None):\n # print(\"#\", df_new[\"className\"])\n if \"className\" in exclude_rule:\n df_new = df_new[df_new[\"className\"] == exclude_rule[\"className\"]\n ]\n if \"attributes\" in exclude_rule:\n for attribute in exclude_rule[\"attributes\"]:\n df_new = df_new[\n (df_new[\"attributeGroup\"] == attribute[\"groupName\"]) &\n (df_new[\"attributeName\"] == attribute[\"name\"])]\n if \"type\" in exclude_rule:\n df_new = df_new[df_new[\"type\"] == exclude_rule[\"type\"]]\n if \"error\" in exclude_rule:\n df_new = df_new[df_new[\"error\"] == exclude_rule[\"error\"]]\n\n df = df.drop(df_new.index)\n\n result = annotations_df.loc[df.index]\n return result",
"def merge(self):",
"def git_exclude() -> List[str]:\n return []",
"def hilite(objects, unHilite=bool, replace=bool, toggle=bool):\n pass",
"def exclude(self, metadata):\n self.exclude_columns = [] #metadata.get_columns_with_semantic_type(\"https://metadata.datadrivendiscovery.org/types/CategoricalData\")\n cols = metadata.get_columns_with_semantic_type(\"http://schema.org/DateTime\")\n timecols = metadata.get_columns_with_semantic_type(\"https://metadata.datadrivendiscovery.org/types/Time\")\n for col in cols:\n self.exclude_columns.append(col)\n for col in timecols:\n self.exclude_columns.append(col)\n\n targets = metadata.get_columns_with_semantic_type(\"https://metadata.datadrivendiscovery.org/types/SuggestedTarget\")\n for t in targets:\n if t in self.exclude_columns:\n self.exclude_columns.remove(t)",
"def _processIncludes(\n src: Union[TextIO, pathlib.Path],\n out,\n includes: List[Tuple[pathlib.Path, FileMark]],\n root: pathlib.Path,\n indentation=0,\n currentFile=\"<stream>\",\n):\n\n def _beginningOfContent(line: str) -> int:\n \"\"\"\n Return the position of the first \"content\" character.\n\n This follows the YAML spec at https://yaml.org/spec/current.html#id2519916\n\n In short, it will return the position of the first character that is not\n whitespace or one of the special \"block collection\" markers (\"-\", \"?\", and \":\")\n \"\"\"\n m = _INDENT_RE.match(line)\n if m and m.group(1) is not None:\n return m.start(1)\n else:\n return 0\n\n indentSpace = \" \" * indentation\n if hasattr(src, \"getvalue\"):\n # assume stringIO\n lines = [ln + \"\\n\" for ln in src.getvalue().split(\"\\n\")]\n else:\n # assume file stream or TextIOBase, and it has a readlines attr\n lines = src.readlines()\n for i, line in enumerate(lines):\n leadingSpace = indentSpace if i > 0 else \"\"\n m = _INCLUDE_RE.match(line)\n if m:\n # this line has an !include on it\n if m.group(1) is not None:\n out.write(leadingSpace + m.group(1))\n fName = pathlib.Path(os.path.expandvars(m.group(2)))\n path = root / fName\n if not path.exists():\n raise ValueError(\n \"The !included file, `{}` does not exist from {}!\".format(\n fName, root\n )\n )\n includes.append((fName, FileMark(currentFile, i, m.start(2), root)))\n\n with open(path, \"r\") as includedFile:\n firstCharacterPos = _beginningOfContent(line)\n newIndent = indentation + firstCharacterPos\n _processIncludes(\n includedFile,\n out,\n includes,\n path.parent,\n indentation=newIndent,\n currentFile=path,\n )\n else:\n out.write(leadingSpace + line)",
"def addCoordToExclude (self, dep2ignore):\n self.exclusions.append (MavenCoord (dep2ignore))\n return",
"def addnewincludes(inclist1,inclist2):\n #come up with better names!!\n inclist1[0] = inclist1[0] | inclist2[0]\n inclist1[1] = inclist1[1] | inclist2[1]\n inclist1[2] = addnewifdefs(inclist1[2],inclist2[2])\n return(inclist1)",
"def test_multiple_includes(self):\n np = self.compile_test('multiple_includes.sv', includes = [os.path.join(defs.TEST_SRC_PREFIX, \"include_a\"), os.path.join(defs.TEST_SRC_PREFIX, \"include_b\")])\n path = np.get_any_path(Waypoints('data_i', 'data_o'))\n self.assertTrue(not path.empty())",
"def BuildNumericPipeline(control_sheet):\n #Check columns exist\n needed_cols = [\"FeatureName\", \"TransformedName\", \\\n \"Include\", \"Raw\", \"Numeric\", \"Missing_Values\", \\\n \"Numeric_Unk_Max\", \"Numeric_Cap_Max\", \"Numeric_Cap_Min\", \"Numeric_Unk_Min\", \\\n \"Impute_Strategy\", \"Impute_Value\"]\n for need_col in needed_cols:\n if need_col not in control_sheet.columns:\n raise ValueError(f\"{need_col} must be in control_sheet.columns\")\n\n # Loop over all columns\n for ii in range(0, control_sheet.shape[0]):\n if ii == 0: # create output list\n col_transformers_list = []\n\n # Check values for boolen columns\n for check_col in [\"Include\", \"Raw\", \"Shadow_Col\", \"Numeric\"]:\n if control_sheet.iloc[ii][check_col] not in [\"Y\", \"N\", \"\"]:\n raise ValueError(f'for feature-{in_feat_ii} column-{check_col} must be in [\"Y\", \"N\", \"\"]. Is {control_sheet.iloc[ii][check_col]}')\n\n # Only include Features which are to include\n if control_sheet.iloc[ii][\"Include\"]==\"Y\" and control_sheet.iloc[ii][\"Raw\"]==\"Y\" and control_sheet.iloc[ii][\"Numeric\"]==\"Y\":\n in_feat_ii = control_sheet.iloc[ii][\"FeatureName\"]\n out_feat_ii = control_sheet.iloc[ii][\"TransformedName\"]\n \n # Start with a empty list (no transformations)\n feature_union_list = []\n\n # Removing known missings ---------------------------------------------------\n # Extract string of know missing\n str_unks = control_sheet.iloc[0][\"Missing_Values\"].split(\";\")\n num_unks = [float(unk) for unk in str_unks]\n\n # If there are some values of known missings (e.g. -1) add transformer to replace them with NAs\n if len(num_unks) > 0:\n feature_union_list.append((\"unk_levels\", IdentifyUnknowns(unk_levels=[num_unks])))\n\n\n # Applying caps and collars to values ---------------------------------------------------\n # Read in caps\n unk_max = control_sheet.iloc[ii][\"Numeric_Unk_Max\"]\n cap_max = control_sheet.iloc[ii][\"Numeric_Cap_Max\"]\n cap_min = control_sheet.iloc[ii][\"Numeric_Cap_Min\"]\n unk_min = control_sheet.iloc[ii][\"Numeric_Unk_Min\"]\n # Check if any aren't NA. If so add capping transformer\n if unk_max==unk_max or cap_max==cap_max or cap_min==cap_min or cap_max==cap_max: \n feature_union_list.append((\"capping\", NumericCapping(unk_max=[unk_max], cap_max=[cap_max], cap_min=[cap_min], unk_min=[unk_min])))\n\n # Impute missing values -------------------------------------------------------\n # Impute missing values\n impute_strategy = control_sheet.iloc[ii][\"Impute_Strategy\"]\n impute_value = float(control_sheet.iloc[ii][\"Impute_Value\"])\n \n # Check value of impute_strategy\n if impute_strategy not in [\"mean\", \"median\", \"most_frequent\", \"constant\"]:\n raise ValueError(f'For feature-{in_feat_ii} Impute_Strategy must be in [\"mean\", \"median\", \"most_frequent\", \"constant\"] is {impute_strategy}')\n # Check constant value given if needed\n if impute_strategy==\"constant\" and impute_value!=impute_value:\n raise ValueError(f'For feature-{in_feat_ii} if Impute_Strategy is \"constant\", impute_value can not be NA.') \n\n feature_union_list_no_impute = feature_union_list.copy() # needed to get shadow matrix\n feature_union_list.append((\"impute\", SimpleImputer(missing_values=np.nan, strategy=impute_strategy, fill_value=impute_value)))\n\n # Add Additional encodings ---------------------------------------------------\n one_hot_encoding = control_sheet.iloc[ii][\"One_Hot_Encoding\"]\n\n if one_hot_encoding==one_hot_encoding: #Is not missing\n if re.search(\"^(uniform|quantile|kmeans)\\W*(\\d+)\", one_hot_encoding.lower()):\n strategy = re.search(\"^(uniform|quantile|kmeans)\\W*(\\d+)\", one_hot_encoding.lower()).group(1)\n n_bins = int(re.search(\"^(uniform|quantile|kmeans)\\W*(\\d+)\", one_hot_encoding.lower()).group(2))\n\n feature_union_list.append((\"one_hot_encoding\", KBinsDiscretizer(n_bins=n_bins, strategy=strategy)))\n\n elif re.search(\"^(\\s*\\d+(\\.\\d*)?\\s*;)+\\s*$\", one_hot_encoding):\n bin_edges = [float(ii) for ii in one_hot_encoding.split(\";\")[:-1]]\n\n feature_union_list.append((\"one_hot_encoding\", SetBinDiscretizer(bin_edges_internal=[bin_edges], input_features=[in_feat_ii])))\n else:\n raise ValueError(f'For feature-{in_feat_ii} if one_hot_encoding must be blank or match either \"^(uniform|quantile|kmeans)\\W*(\\d+)\" or \"^(\\s*\\d+(\\.\\d*)?\\s*;)+\\s*$\" it is {one_hot_encoding}')\n\n\n # Combine all features -------------------------------------------------------\n\n if control_sheet.iloc[ii][\"Shadow_Col\"]==\"Y\":\n # If shadow column needed as second pipeline to give extra column\n pre_col_ii = FeatureUnion([(out_feat_ii, Pipeline(feature_union_list)), \\\n (out_feat_ii + \"_NA\", Pipeline([(out_feat_ii + \"_imp\", Pipeline(feature_union_list_no_impute)), \\\n (out_feat_ii + \"_shadow\", MissingIndicator(missing_values=np.nan, features=\"all\"))]) \\\n ) \\\n ])\n else:\n # If shadow column not needed use existing pipeline\n pre_col_ii = Pipeline(feature_union_list)\n \n # Add column to transformation list\n col_transformers_list.append((out_feat_ii, pre_col_ii, [in_feat_ii]))\n\n\n return(col_transformers_list)",
"def main(git_log):\n df = pd.read_csv(git_log, sep = '|', names = ['commit', 'message', 'author', 'email'])\n df['area'] = df['message'].apply(define_area)\n df['message'] = df['message'].apply(delete_prefix)\n\n # Split commits by areas\n core = df[df['area']==Area.core.value]\n tests = df[df['area']==Area.tests.value]\n build = df[df['area']==Area.build.value]\n apps = df[df['area']==Area.apps.value]\n docs = df[df['area']==Area.docs.value]\n other = df[df['area'].isna()]\n\n # Define individual contributors\n contributors = df.groupby(['author', 'email'])\n contributors = list(contributors.groups.keys())\n\n with open('release-notes.md', 'w') as f:\n f.write('# Release Notes\\n')\n\n f.write('\\n## API / ABI / Integration Changes\\n')\n f.write('\\n**API/ABI version: 1.x.**\\n')\n\n f.write('\\n## New Features and Improvements\\n')\n f.write('\\n## Important Bug Fixes\\n')\n f.write('\\n## Build\\n')\n f.write('\\n## Documentation\\n')\n\n f.write('\\n## Contributors\\n')\n for name, email in contributors:\n f.write(f'\\n{name} <{email}>')\n f.write('\\n')\n\n f.write('\\n## Changelog\\n')\n f.write('\\n<details><summary>Click to expand/collapse</summary>')\n f.write('\\n<p>')\n f.write('\\n')\n\n if not core.empty:\n f.write('\\n### Core Functionality')\n write_into_changelog(core, f)\n\n if not tests.empty:\n f.write('\\n### Unit Tests')\n write_into_changelog(tests, f)\n\n if not build.empty:\n f.write('\\n### Build Scripts (CMake, etc.)')\n write_into_changelog(build, f)\n\n if not apps.empty:\n f.write('\\n### Sample Applications')\n write_into_changelog(apps, f)\n\n if not docs.empty:\n f.write('\\n### Documentation')\n write_into_changelog(docs, f)\n\n if not other.empty:\n f.write('\\n### Other')\n write_into_changelog(other, f)\n\n f.write('\\n</p>')\n f.write('\\n</details>')",
"def driverpreprocess(repo, ms, wctx, labels=None):\n return True",
"def test_include_exclude_basic(self):\n self.root = Path(\"testfiles/select2\")\n self.ParseTest([(\"--include\", \"testfiles/select2/3/3sub3/3sub3sub2/3sub3sub2_file.txt\"),\n (\"--exclude\", \"testfiles/select2/3/3sub3/3sub3sub2\"),\n (\"--include\", \"testfiles/select2/3/3sub2/3sub2sub2\"),\n (\"--include\", \"testfiles/select2/3/3sub3\"),\n (\"--exclude\", \"testfiles/select2/3/3sub1\"),\n (\"--exclude\", \"testfiles/select2/2/2sub1/2sub1sub3\"),\n (\"--exclude\", \"testfiles/select2/2/2sub1/2sub1sub2\"),\n (\"--include\", \"testfiles/select2/2/2sub1\"),\n (\"--exclude\", \"testfiles/select2/1/1sub3/1sub3sub2\"),\n (\"--exclude\", \"testfiles/select2/1/1sub3/1sub3sub1\"),\n (\"--exclude\", \"testfiles/select2/1/1sub2/1sub2sub3\"),\n (\"--include\", \"testfiles/select2/1/1sub2/1sub2sub1\"),\n (\"--exclude\", \"testfiles/select2/1/1sub1/1sub1sub3/1sub1sub3_file.txt\"),\n (\"--exclude\", \"testfiles/select2/1/1sub1/1sub1sub2\"),\n (\"--exclude\", \"testfiles/select2/1/1sub2\"),\n (\"--include\", \"testfiles/select2/1.py\"),\n (\"--include\", \"testfiles/select2/3\"),\n (\"--include\", \"testfiles/select2/1\"),\n (\"--exclude\", \"testfiles/select2/**\")],\n self.expected_restored_tree)",
"def make_exclude():\n # Simple utility to make IPython paths more readably, we need a lot of\n # these below\n ipjoin = lambda *paths: pjoin('IPython', *paths)\n\n exclusions = [ipjoin('external'),\n ipjoin('quarantine'),\n ipjoin('deathrow'),\n # This guy is probably attic material\n ipjoin('testing', 'mkdoctests'),\n # Testing inputhook will need a lot of thought, to figure out\n # how to have tests that don't lock up with the gui event\n # loops in the picture\n ipjoin('lib', 'inputhook'),\n # Config files aren't really importable stand-alone\n ipjoin('config', 'profile'),\n # The notebook 'static' directory contains JS, css and other\n # files for web serving. Occasionally projects may put a .py\n # file in there (MathJax ships a conf.py), so we might as\n # well play it safe and skip the whole thing.\n ipjoin('html', 'static'),\n ipjoin('html', 'fabfile'),\n ]\n if not have['sqlite3']:\n exclusions.append(ipjoin('core', 'tests', 'test_history'))\n exclusions.append(ipjoin('core', 'history'))\n if not have['wx']:\n exclusions.append(ipjoin('lib', 'inputhookwx'))\n \n if 'IPython.kernel.inprocess' not in sys.argv:\n exclusions.append(ipjoin('kernel', 'inprocess'))\n \n # FIXME: temporarily disable autoreload tests, as they can produce\n # spurious failures in subsequent tests (cythonmagic).\n exclusions.append(ipjoin('extensions', 'autoreload'))\n exclusions.append(ipjoin('extensions', 'tests', 'test_autoreload'))\n\n # We do this unconditionally, so that the test suite doesn't import\n # gtk, changing the default encoding and masking some unicode bugs.\n exclusions.append(ipjoin('lib', 'inputhookgtk'))\n exclusions.append(ipjoin('kernel', 'zmq', 'gui', 'gtkembed'))\n\n #Also done unconditionally, exclude nbconvert directories containing\n #config files used to test. Executing the config files with iptest would\n #cause an exception.\n exclusions.append(ipjoin('nbconvert', 'tests', 'files'))\n exclusions.append(ipjoin('nbconvert', 'exporters', 'tests', 'files'))\n\n # These have to be skipped on win32 because the use echo, rm, cd, etc.\n # See ticket https://github.com/ipython/ipython/issues/87\n if sys.platform == 'win32':\n exclusions.append(ipjoin('testing', 'plugin', 'test_exampleip'))\n exclusions.append(ipjoin('testing', 'plugin', 'dtexample'))\n\n if not have['pexpect']:\n exclusions.extend([ipjoin('lib', 'irunner'),\n ipjoin('lib', 'tests', 'test_irunner'),\n ipjoin('terminal', 'console'),\n ])\n\n if not have['zmq']:\n exclusions.append(ipjoin('lib', 'kernel'))\n exclusions.append(ipjoin('kernel'))\n exclusions.append(ipjoin('qt'))\n exclusions.append(ipjoin('html'))\n exclusions.append(ipjoin('consoleapp.py'))\n exclusions.append(ipjoin('terminal', 'console'))\n exclusions.append(ipjoin('parallel'))\n elif not have['qt'] or not have['pygments']:\n exclusions.append(ipjoin('qt'))\n\n if not have['pymongo']:\n exclusions.append(ipjoin('parallel', 'controller', 'mongodb'))\n exclusions.append(ipjoin('parallel', 'tests', 'test_mongodb'))\n\n if not have['matplotlib']:\n exclusions.extend([ipjoin('core', 'pylabtools'),\n ipjoin('core', 'tests', 'test_pylabtools'),\n ipjoin('kernel', 'zmq', 'pylab'),\n ])\n\n if not have['cython']:\n exclusions.extend([ipjoin('extensions', 'cythonmagic')])\n exclusions.extend([ipjoin('extensions', 'tests', 'test_cythonmagic')])\n\n if not have['oct2py']:\n exclusions.extend([ipjoin('extensions', 'octavemagic')])\n exclusions.extend([ipjoin('extensions', 'tests', 'test_octavemagic')])\n\n if not have['tornado']:\n exclusions.append(ipjoin('html'))\n\n if not have['jinja2']:\n exclusions.append(ipjoin('html', 'notebookapp'))\n\n if not have['rpy2'] or not have['numpy']:\n exclusions.append(ipjoin('extensions', 'rmagic'))\n exclusions.append(ipjoin('extensions', 'tests', 'test_rmagic'))\n\n if not have['azure']:\n exclusions.append(ipjoin('html', 'services', 'notebooks', 'azurenbmanager'))\n\n if not all((have['pygments'], have['jinja2'], have['sphinx'])):\n exclusions.append(ipjoin('nbconvert'))\n\n # This is needed for the reg-exp to match on win32 in the ipdoctest plugin.\n if sys.platform == 'win32':\n exclusions = [s.replace('\\\\','\\\\\\\\') for s in exclusions]\n \n # check for any exclusions that don't seem to exist:\n parent, _ = os.path.split(get_ipython_package_dir())\n for exclusion in exclusions:\n if exclusion.endswith(('deathrow', 'quarantine')):\n # ignore deathrow/quarantine, which exist in dev, but not install\n continue\n fullpath = pjoin(parent, exclusion)\n if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):\n warn(\"Excluding nonexistent file: %r\" % exclusion)\n\n return exclusions",
"def filter_images_by_comments(\n annotations_df,\n include_unresolved_comments=True,\n include_resolved_comments=False,\n include_without_comments=False\n):\n images = set()\n df = annotations_df[annotations_df[\"type\"] == \"comment\"]\n if include_unresolved_comments:\n images.update(\n df[df[\"commentResolved\"] == False][\"imageName\"].dropna().unique()\n )\n if include_resolved_comments:\n images.update(\n df[df[\"commentResolved\"] == True][\"imageName\"].dropna().unique()\n )\n if include_without_comments:\n all_images = set(annotations_df[\"imageName\"].dropna().unique())\n with_comments = set(df[\"imageName\"].dropna().unique())\n images.update(all_images - with_comments)\n\n return list(images)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate a very explicit env from a lock.
|
def lock_to_env(lock: Path, env: Path):
env.write_text(
P.ENV_TMPL.render(
deps=lock.read_text(encoding="utf-8")
.split(EXPLICIT)[1]
.strip()
.splitlines()
)
)
|
[
"def fixture_env_object(env_manager):\n env = Environment(\n env_id=COMMIT_HASH,\n created=multiprocessing.Event(),\n creating=multiprocessing.Event(),\n location=os.path.join(env_manager.base_dir, COMMIT_HASH),\n site_packages=os.path.join(env_manager.base_dir, COMMIT_HASH, VENV_SITE_PKGS),\n )\n return env",
"def BuildEnv(env=None, inherit_env=[]):\n if env == None:\n env = {}\n fixed_env = env.copy()\n for varname in inherit_env:\n fixed_env[varname] = os.environ[varname]\n if sys.platform == \"win32\":\n # Win32 requires certain environment variables be present\n for k in (\"COMSPEC\", \"SystemRoot\"):\n if k in os.environ and k not in fixed_env:\n fixed_env[k] = os.environ[k]\n return fixed_env",
"def expanded_env_dict():\n return generate_expanded_env_dict()",
"def create_environment(env_name):\n env = gym.make(env_name)\n\n # Fix for certain OpenAI Gym environments,\n # requiring to be reset prior to initial rendering\n if env_name in GYM_ENVS['classical_control']:\n env.reset()\n\n return env",
"def args_to_env(args):\n result = Env()\n result.set_env(args.build, args.host, args.target)\n return result",
"def build_env_name(task, shared_memory, use_image_obs):\n del task\n env_name = \"BlockPushDiscontinuous\"\n\n if use_image_obs:\n env_name = env_name + \"Rgb\"\n\n if shared_memory:\n env_name = \"Shared\" + env_name\n\n env_name = env_name + \"-v0\"\n\n return env_name",
"def _env_get(target):\n if target == 'vagrant':\n # vagrant specific settings\n env.user = 'vagrant'\n raw_ssh_config = subprocess.Popen([\"vagrant\", \"ssh-config\"], stdout=subprocess.PIPE).communicate()[0]\n ssh_config = dict([l.strip().split() for l in raw_ssh_config.split(\"\\n\") if l])\n env.user = ssh_config[\"User\"]\n env.hosts = [\"127.0.0.1:%s\" % (ssh_config[\"Port\"])]\n env.host_string = env.hosts[0] # We need to explicitly specify this for sudo and run.\n env.key_filename = ssh_config[\"IdentityFile\"]\n return\n elif target == 'localhost':\n # all environment variables relating to a developer's localhost\n env.project_home = os.getenv(\"PROJECT_HOME\")\n env.project_path = '%(project_home)s/%(project_name)s' % env\n env.user = env.local_user\n return\n elif target not in list(env.project_sites.viewkeys()):\n # handle environment that isn't specified\n print (\"Oops. There's no such site. try `fab _env_get:dev` or `fab env_get:prod`\")\n return\n\n # handle environment that was specified\n env.user = 'web'\n env.hosts = [env.project_sites[target]['NAME']]\n env.host_string = env.hosts[0]\n env.path = '/var/www/%s/%s' % (target, env.project_name)\n env.path_releases = '/var/www/%s/%s/releases' % (target, env.project_name)\n env.path_release_current = '/var/www/%s/%s/releases/current' % (target, env.project_name)\n env.project_path = '%(path_release_current)s/%(project_name)s' % env # slash prepended",
"def svn_fs_generate_lock_token(*args) -> \"char const **\":\n return _fs.svn_fs_generate_lock_token(*args)",
"def env_creator(config: dict):\n \n from gridworld import MultiAgentEnv\n\n return MultiAgentEnv(**config)",
"def _build_environment(func, bound_args):\n spec = [(\"arg\" + str(i), t) for i, t in enumerate(bound_args)]\n\n exec_glbls = dict(spec=spec)\n exec_glbls[\"jitclass\"] = jitclass\n assign_env = \"; \".join(f\"self.arg{i} = arg{i}\" for i, t in enumerate(bound_args))\n env_args = \", \".join(f\"arg{i}\" for i, t in enumerate(bound_args))\n src = f\"\"\"\n@jitclass(spec)\nclass Environment():\n def __init__(self, {env_args}):\n {assign_env}\n pass\n\"\"\"\n exec_in_file(f\"{func.__name__}_Environment_{id(func)}\", src, exec_glbls)\n return exec_glbls[\"Environment\"]",
"def get_env():\n env.output_prefix = False\n run('export | sed -e \"s/declare -x/export/g\"')",
"def generate_env(self):\n for key in sorted(list(self.spec.keys())):\n if self.spec[key]['type'] in (dict, list):\n value = f\"\\'{json.dumps(self.spec[key].get('example', ''))}\\'\"\n else:\n value = f\"{self.spec[key].get('example', '')}\"\n print(f\"export {self.env_prefix}_{key.upper()}={value}\")",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def env(name, default=None):\n name = name.upper()\n mname = 'MORPH_' + name.upper()\n return os.environ.get(mname, os.environ.get(name, default))",
"def _prepare_environment(self):\n env = {'HOME': self._make_mapping(HOME)}\n\n return env",
"def make_env(args):\r\n scenario = scenarios.load(args.env_name + \".py\").Scenario()\r\n world = scenario.make_world()\r\n done_callback = None\r\n\r\n env = MultiAgentEnv(\r\n world,\r\n reset_callback=scenario.reset_world,\r\n reward_callback=scenario.reward,\r\n observation_callback=scenario.observation,\r\n done_callback=done_callback)\r\n\r\n assert env.discrete_action_space is False, \"For cont. action, this flag must be False\"\r\n\r\n return env",
"def BuildEnv(self, unused_configuration=None):\n return os.environ.copy()",
"def patch_using_env(self):\n if self.cred_properties:\n credentials_config = self.cred_properties\n\n user = getenv(\"HERE_USER_ID\") or credentials_config[\"user\"]\n client = getenv(\"HERE_CLIENT_ID\") or credentials_config[\"client\"]\n key = (\n getenv(\"HERE_ACCESS_KEY_ID\")\n or getenv(\"HERE_ACCESS_KEY\")\n or credentials_config[\"key\"]\n )\n secret = (\n getenv(\"HERE_ACCESS_KEY_SECRET\")\n or getenv(\"HERE_ACCESS_SECRET\")\n or credentials_config[\"secret\"]\n )\n endpoint = (\n getenv(\"HERE_TOKEN_ENDPOINT_URL\")\n or getenv(\"HERE_TOKEN_ENDPOINT\")\n or credentials_config[\"endpoint\"]\n )\n credentials_config[\"user\"] = user\n credentials_config[\"client\"] = client\n credentials_config[\"key\"] = key\n credentials_config[\"secret\"] = secret\n credentials_config[\"endpoint\"] = endpoint",
"def environInject(shellName):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test to disable all the workers in the modjk load balancer
|
def test_worker_disabled(list_not_str):
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": list_not_str})
assert modjk.worker_disabled(name, "app1") == ret
|
[
"def test_get_all_workers(self):\n print(self.api.get_all_workers())\n pass",
"def test_disable_agent(self):\n pass",
"def testE_WhiteListBlackList(self):\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n workloadName = \"basicWorkload\"\n myThread = threading.currentThread()\n workload = self.createTestWorkload()\n config = self.getConfig()\n changeState = ChangeState(config)\n\n nSubs = 2\n nJobs = 10\n cacheDir = os.path.join(self.testDir, 'CacheDir')\n\n jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,\n task = workload.getTask(\"ReReco\"),\n workloadSpec = os.path.join(self.testDir,\n 'workloadTest',\n workloadName),\n bl = ['T2_US_Florida', 'T2_TW_Taiwan', 'T1_CH_CERN'])\n\n for group in jobGroupList:\n changeState.propagate(group.jobs, 'created', 'new')\n\n \n\n\n jobSubmitter = JobSubmitterPoller(config = config)\n\n # Actually run it\n jobSubmitter.algorithm()\n\n if os.path.isdir('CacheDir'):\n shutil.rmtree('CacheDir')\n shutil.copytree('%s' %self.testDir, os.path.join(os.getcwd(), 'CacheDir'))\n\n\n # Check to make sure we have running jobs\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, nJobs * nSubs)\n\n getJobsAction = self.daoFactory(classname = \"Jobs.GetAllJobs\")\n result = getJobsAction.execute(state = 'Executing', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nJobs)\n\n # All jobs should be at UCSD\n submitFile = None\n for file in os.listdir(config.JobSubmitter.submitDir):\n if re.search('submit', file):\n submitFile = file\n self.assertTrue(submitFile != None)\n #submitFile = os.listdir(config.JobSubmitter.submitDir)[0]\n self.checkJDL(config = config, cacheDir = cacheDir,\n submitFile = submitFile, site = 'T2_US_UCSD')\n\n\n # Now clean-up\n command = ['condor_rm', self.user]\n pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)\n pipe.communicate()\n\n\n\n\n\n\n # Run again and test the whiteList\n jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,\n task = workload.getTask(\"ReReco\"),\n workloadSpec = os.path.join(self.testDir,\n 'workloadTest',\n workloadName),\n wl = ['T2_US_UCSD'])\n\n for group in jobGroupList:\n changeState.propagate(group.jobs, 'created', 'new')\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n\n jobSubmitter = JobSubmitterPoller(config = config)\n\n # Actually run it\n jobSubmitter.algorithm()\n\n if os.path.isdir('CacheDir'):\n shutil.rmtree('CacheDir')\n shutil.copytree('%s' %self.testDir, os.path.join(os.getcwd(), 'CacheDir'))\n\n\n # Check to make sure we have running jobs\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, nJobs * nSubs)\n\n # You'll have jobs from the previous run still in the database\n result = getJobsAction.execute(state = 'Executing', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nJobs * 2)\n\n # All jobs should be at UCSD\n submitFile = None\n for file in os.listdir(config.JobSubmitter.submitDir):\n if re.search('submit', file):\n submitFile = file\n self.assertTrue(submitFile != None)\n self.checkJDL(config = config, cacheDir = cacheDir,\n submitFile = submitFile, site = 'T2_US_UCSD', noIndex = True)\n\n\n # Now clean-up\n command = ['condor_rm', self.user]\n pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)\n pipe.communicate()\n\n\n\n\n\n\n # Run again with an invalid whitelist\n # NOTE: After this point, the original two sets of jobs will be executing\n # The rest of the jobs should move to submitFailed\n jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,\n task = workload.getTask(\"ReReco\"),\n workloadSpec = os.path.join(self.testDir,\n 'workloadTest',\n workloadName),\n wl = ['T2_US_Namibia'])\n\n for group in jobGroupList:\n changeState.propagate(group.jobs, 'created', 'new')\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n\n jobSubmitter = JobSubmitterPoller(config = config)\n\n # Actually run it\n jobSubmitter.algorithm()\n\n\n # Check to make sure we have running jobs\n #nRunning = getCondorRunningJobs(self.user)\n #self.assertEqual(nRunning, 0)\n\n # Jobs should be gone\n getJobsAction = self.daoFactory(classname = \"Jobs.GetAllJobs\")\n result = getJobsAction.execute(state = 'Executing', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nJobs * 2)\n result = getJobsAction.execute(state = 'SubmitFailed', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nJobs)\n\n\n\n # Now clean-up\n command = ['condor_rm', self.user]\n pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)\n pipe.communicate()\n\n\n\n\n\n\n # Run again with all sites blacklisted\n jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,\n task = workload.getTask(\"ReReco\"),\n workloadSpec = os.path.join(self.testDir,\n 'workloadTest',\n workloadName),\n bl = self.sites)\n\n for group in jobGroupList:\n changeState.propagate(group.jobs, 'created', 'new')\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n\n jobSubmitter = JobSubmitterPoller(config = config)\n\n # Actually run it\n jobSubmitter.algorithm()\n\n\n # Check to make sure we have running jobs\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0)\n\n # Jobs should be gone\n getJobsAction = self.daoFactory(classname = \"Jobs.GetAllJobs\")\n result = getJobsAction.execute(state = 'Executing', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nJobs * 2)\n result = getJobsAction.execute(state = 'SubmitFailed', jobType = \"Processing\")\n self.assertEqual(len(result), nSubs * nJobs * 2)\n\n\n\n # Now clean-up\n command = ['condor_rm', self.user]\n pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)\n pipe.communicate()\n\n del jobSubmitter\n return",
"def __shutdown_workers__():\n\n #shutdown the worker executors\n for service_id in __replication_workers_executor__.keys():\n __replication_workers_executor__[service_id].shutdown(wait=True)",
"def discovery_disable_all(status=0): \n logger.info('Disabling all network discoveries')\n druleids = zapi.drule.get(output=[ 'druleid', 'iprange', 'name', 'proxy_hostid', 'status' ],\n selectDChecks='extend', filter={ 'status': 0 })\n if ( druleids.__len__() == 0 ):\n logger.info('Done')\n return\n bar = ProgressBar(maxval=druleids.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()\n i = 0\n for x in druleids:\n params_disable = {\n 'druleid': x['druleid'],\n 'iprange': x['iprange'],\n 'name': x['name'],\n 'dchecks': x['dchecks'],\n 'status': 1\n }\n out = zapi.drule.update(**params_disable)\n logger.echo = False\n if out:\n logger.debug('\\tNew status: %s (%s) --> %d' % (x['name'],out['druleids'],status))\n else:\n logger.warning('\\tFAILED to change status: %s (%s) --> %d' % (x['name'],out['druleids'],status))\n i += 1\n bar.update(i)\n logger.echo = True\n bar.finish()\n logger.info('Done')\n return",
"async def do_check_workers(self, workers):\n raise NotImplementedError",
"def test_get_all_worker_nodes(self, setup_params):\n node_api_obj = setup_params[\"node_api_obj\"]\n # Get all worker nodes\n api_response = node_api_obj.get_worker_nodes()\n node_count = len(api_response.items)\n logger.info(\"{} worker nodes returned in the list\".format(node_count))\n if len(api_response.items) > 0:\n for worker_node in api_response.items:\n assert \"node-role.kubernetes.io/worker\" in worker_node.metadata.labels.keys()\n assert api_response.kind == \"NodeList\"",
"def testTurbiniaWorkerJobsLists(self, _, __, ___, ____, _____, mock_config):\n mock_config.PSQ_TOPIC = 'foo'\n manager.JobsManager._job_classes = {}\n manager.JobsManager.RegisterJob(manager_test.TestJob1)\n manager.JobsManager.RegisterJob(manager_test.TestJob2)\n manager.JobsManager.RegisterJob(manager_test.TestJob3)\n\n # Check denylist\n TurbiniaPsqWorker(['testjob1'], [])\n self.assertListEqual(\n sorted(list(manager.JobsManager.GetJobNames())),\n ['testjob2', 'testjob3'])\n manager.JobsManager.RegisterJob(manager_test.TestJob1)\n\n # Check denylist with DISABLED_JOBS config\n mock_config.DISABLED_JOBS = ['testjob1']\n TurbiniaPsqWorker(['testjob2'], [])\n self.assertListEqual(list(manager.JobsManager.GetJobNames()), ['testjob3'])\n manager.JobsManager.RegisterJob(manager_test.TestJob1)\n manager.JobsManager.RegisterJob(manager_test.TestJob2)\n mock_config.DISABLED_JOBS = ['']\n\n # Check allowlist\n TurbiniaPsqWorker([], ['testjob1'])\n self.assertListEqual(list(manager.JobsManager.GetJobNames()), ['testjob1'])\n manager.JobsManager.RegisterJob(manager_test.TestJob2)\n manager.JobsManager.RegisterJob(manager_test.TestJob3)\n\n # Check allowlist of item in DISABLED_JOBS config\n mock_config.DISABLED_JOBS = ['testjob1', 'testjob2']\n TurbiniaPsqWorker([], ['testjob1'])\n self.assertListEqual(list(manager.JobsManager.GetJobNames()), ['testjob1'])\n manager.JobsManager.RegisterJob(manager_test.TestJob2)\n manager.JobsManager.RegisterJob(manager_test.TestJob3)",
"def test_internal_config(start_connected_longer_cluster):\n cluster = start_connected_longer_cluster\n worker = cluster.add_node()\n cluster.wait_for_nodes()\n\n cluster.remove_node(worker)\n time.sleep(1)\n assert ray.global_state.cluster_resources()[\"CPU\"] == 2\n\n time.sleep(2)\n assert ray.global_state.cluster_resources()[\"CPU\"] == 1",
"def disable_autofailover(self):\n for server in self.servers:\n rest = RestConnection(server)\n rest.update_autofailover_settings(False, 120)",
"def _reset_workers(self):\n for rpc_client in self._all_rpc_clients:\n rpc_client.reset()",
"def label_worker_nodes(self):\n # to use the cache dropping pod, worker nodes need to be labeled.\n log.info(\"Labeling the worker nodes for cache-dropping enable.\")\n try:\n helpers.label_worker_node(\n self.worker_nodes, label_key=BMO_LABEL, label_value=\"yes\"\n )\n except CommandFailed:\n # this is probably because of the nodes are already labeled, so,\n # checking if nodes are labeled and continue anyway.\n result = self.pod_obj.exec_oc_cmd(f\"get node -l {BMO_LABEL}\")\n found = [node for node in self.worker_nodes if re.search(node, result)]\n if len(found) == len(self.worker_nodes):\n log.info(\"All worker nodes are labeled\")\n else:\n log.warning(\"Labeling nodes failed, Not all workers node are labeled !\")",
"def check_workers(config):\n assert isinstance(config, ClusterConfiguration)\n config.run_on_all_aggregators(lambda rh: check_worker(config, rh))\n config.run_on_all_workers(lambda rh: check_worker(config, rh))",
"def disable_compute_neurons(self): \n self.compute_critical_neurons = False",
"def _wait_workers(self):\n self.client = get_client(self.master_address)\n logging.debug(\"client scheduler info: {}\".format(self.client.scheduler_info()))\n if int(self.world_size) <= 1:\n self.worker_portion = 1\n worker_count_min = int(self.world_size * self.worker_portion)\n\n for _ in range(100):\n time.sleep(1)\n n_workers = len(self.client.scheduler_info()[\"workers\"])\n logging.info(\"Accessed Workers: {}\".format(n_workers))\n if n_workers >= worker_count_min:\n workers = self.client.scheduler_info()[\"workers\"]\n workers_list = []\n workers_port = {}\n for k, _ in workers.items():\n workers_list.append(k)\n (ip, port) = k.replace(\"//\", \"\").split(\":\")[1:]\n if ip in workers_port:\n workers_port[ip].append(port)\n else:\n workers_port[ip] = [port]\n os.environ[\"vega_workers_list\"] = json.dumps(workers_port)\n logging.info(\"worker list: {}\".format(workers_list))\n slave_ips = list(set([item[6:].split(\":\")[0] for item in workers_list]))\n slave_ips.remove(General.cluster.master_ip)\n General.cluster.salves = slave_ips\n return 1\n return 0",
"def test_distributed_stop_with_stopping_state(self):\n\n class TestUser(User):\n @task\n def my_task(self):\n pass\n\n with mock.patch(\"locust.runners.WORKER_REPORT_INTERVAL\", new=0.3):\n master_env = Environment(user_classes=[TestUser])\n master = master_env.create_master_runner(\"*\", 0)\n\n workers = []\n for i in range(3):\n worker_env = Environment(user_classes=[TestUser])\n worker = worker_env.create_worker_runner(\"127.0.0.1\", master.server.port)\n workers.append(worker)\n\n for worker in workers:\n worker.send_message(\"client_stopped\", None)\n\n sleep(1)\n for worker in workers:\n self.assertEqual(STATE_INIT, worker.state, \"Worker sent a client_stopped, should be ready once stopped\")\n self.assertEqual(STATE_STOPPED, master.state)",
"def test_servicedef_disable(self):\n self.check_result('W0002',\n 'http://support.riverbed.com/apis/test/1.0',\n Result.DISABLED,\n 'tags:\\n'\n ' relint-disable: [ W0002 ] ')",
"def test_list_all_response_descriptor_machines_workers_worker_worker_resource(self):\n pass",
"def test_disable_job(schedule):\n schedule.opts.update({\"schedule\": {\"name\": {\"enabled\": \"foo\"}}, \"pillar\": {}})\n Schedule.disable_job(schedule, \"name\")\n assert not schedule.opts[\"schedule\"][\"name\"][\"enabled\"]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test to recover all the workers in the modjk load balancer
|
def test_worker_recover(list_not_str):
name = "loadbalancer"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret.update({"comment": list_not_str})
assert modjk.worker_recover(name, "app1") == ret
|
[
"def test_get_all_workers(self):\n print(self.api.get_all_workers())\n pass",
"def _wait_workers(self):\n self.client = get_client(self.master_address)\n logging.debug(\"client scheduler info: {}\".format(self.client.scheduler_info()))\n if int(self.world_size) <= 1:\n self.worker_portion = 1\n worker_count_min = int(self.world_size * self.worker_portion)\n\n for _ in range(100):\n time.sleep(1)\n n_workers = len(self.client.scheduler_info()[\"workers\"])\n logging.info(\"Accessed Workers: {}\".format(n_workers))\n if n_workers >= worker_count_min:\n workers = self.client.scheduler_info()[\"workers\"]\n workers_list = []\n workers_port = {}\n for k, _ in workers.items():\n workers_list.append(k)\n (ip, port) = k.replace(\"//\", \"\").split(\":\")[1:]\n if ip in workers_port:\n workers_port[ip].append(port)\n else:\n workers_port[ip] = [port]\n os.environ[\"vega_workers_list\"] = json.dumps(workers_port)\n logging.info(\"worker list: {}\".format(workers_list))\n slave_ips = list(set([item[6:].split(\":\")[0] for item in workers_list]))\n slave_ips.remove(General.cluster.master_ip)\n General.cluster.salves = slave_ips\n return 1\n return 0",
"def test_failed_worker_pod(self):\n task_d = _TaskDispatcher({\"f\": (0, 10)}, {}, {}, 1, 1)\n task_d.recover_tasks = MagicMock()\n instance_manager = InstanceManager(\n task_d,\n job_name=\"test-failed-worker-pod-%d-%d\"\n % (int(time.time()), random.randint(1, 101)),\n image_name=\"ubuntu:18.04\",\n worker_command=[\"/bin/bash\"],\n worker_args=[\"-c\", \"badcommand\"],\n namespace=\"default\",\n num_workers=3,\n restart_policy=\"Never\",\n )\n instance_manager.start_workers()\n max_check_num = 20\n for _ in range(max_check_num):\n time.sleep(3)\n counters = instance_manager.get_worker_counter()\n if counters[\"Failed\"] == 3:\n break\n\n instance_manager.stop_relaunch_and_remove_workers()\n for _ in range(max_check_num):\n time.sleep(3)\n counters = instance_manager.get_worker_counter()\n if not counters:\n break\n task_d.recover_tasks.assert_has_calls(\n [call(0), call(1), call(2)], any_order=True\n )",
"def test_01_read_all_workers(self):\n workers = self.client.get(WORKER_PATH)['results']\n for worker in workers:\n for key, val in worker.items():\n with self.subTest(key=key):\n self.assertIsNotNone(val)\n self.worker.update(choice(workers))",
"def test_reconnect_all(self):\n pass",
"def test_load_response_descriptor_machines_workers_worker_worker_resource(self):\n pass",
"async def do_check_workers(self, workers):\n raise NotImplementedError",
"def test_crash_only_one_worker_during_sampling_but_recreate(self):\n config = (\n pg.PGConfig()\n .rollouts(\n env_runner_cls=ForwardHealthCheckToEnvWorker,\n num_rollout_workers=2,\n rollout_fragment_length=10,\n num_envs_per_worker=3,\n # Re-create failed workers (then continue).\n recreate_failed_workers=True,\n )\n .training(train_batch_size=60)\n .environment(\n env=CartPoleCrashing,\n env_config={\n \"crash_after_n_steps\": 10,\n # Crash prob=100%, so test is deterministic.\n \"p_crash\": 1.0,\n # Only crash on worker with index 2.\n \"crash_on_worker_indices\": [2],\n # Make sure nothing happens during pre-checks.\n \"skip_env_checking\": True,\n },\n )\n )\n # Pre-checking disables, so building the Algorithm is save.\n algo = config.build()\n # Try to re-create for infinite amount of times.\n # The worker recreation/ignore tolerance used to be hard-coded to 3, but this\n # has now been\n for _ in range(10):\n # Expect some errors being logged here, but in general, should continue\n # as we recover from all worker failures.\n algo.train()\n # One worker has been removed.\n self.assertEqual(algo.workers.num_healthy_remote_workers(), 1)\n algo.stop()",
"def test_failover(self):\n pass",
"def test_get_all_worker_nodes(self, setup_params):\n node_api_obj = setup_params[\"node_api_obj\"]\n # Get all worker nodes\n api_response = node_api_obj.get_worker_nodes()\n node_count = len(api_response.items)\n logger.info(\"{} worker nodes returned in the list\".format(node_count))\n if len(api_response.items) > 0:\n for worker_node in api_response.items:\n assert \"node-role.kubernetes.io/worker\" in worker_node.metadata.labels.keys()\n assert api_response.kind == \"NodeList\"",
"def __shutdown_workers__():\n\n #shutdown the worker executors\n for service_id in __replication_workers_executor__.keys():\n __replication_workers_executor__[service_id].shutdown(wait=True)",
"def test_eperf_rebalance(self):\n self.spec(\"test_eperf_rebalance\")\n\n self.gated_start(self.input.clients)\n if self.parami(\"load_phase\", 0):\n self.load_phase(self.parami(\"num_nodes\", PerfDefaults.num_nodes))\n\n num_clients = self.parami(\"num_clients\", len(self.input.clients) or 1)\n\n if not self.parami(\"nru_task\", PerfDefaults.nru_task) and \\\n not self.parami(\"reb_no_fg\", PerfDefaults.reb_no_fg):\n rebalance_after = self.parami(\"rebalance_after\",\n PerfDefaults.rebalance_after)\n self.level_callbacks = [('cur-creates', rebalance_after // num_clients,\n getattr(self, \"latched_rebalance\"))]\n\n reb_cons_view = self.parami(\"reb_cons_view\", PerfDefaults.reb_cons_view)\n if reb_cons_view != PerfDefaults.reb_cons_view:\n self.set_reb_cons_view(self.input.servers[0],\n disable=(reb_cons_view == 0))\n\n reb_index_waiting = self.parami(\"reb_index_waiting\", PerfDefaults.reb_index_waiting)\n if reb_index_waiting != PerfDefaults.reb_index_waiting:\n self.set_reb_index_waiting(self.input.servers[0],\n disable=(reb_index_waiting == 0))\n\n if self.parami(\"access_phase\", 1) == 1:\n if self.parami(\"cb_stats\", PerfDefaults.cb_stats) == 1:\n # starts cbstats collection\n cbStatsCollector = CBStatsCollector()\n cb_exc = self.param(\"cb_stats_exc\", PerfDefaults.cb_stats_exc)\n frequency = self.parami(\"cb_stats_freq\",\n PerfDefaults.cb_stats_freq)\n cbStatsCollector.collect_cb_stats(servers=self.input.servers,\n cb_exc=cb_exc,\n frequency=frequency)\n\n if self.parami(\"reb_no_fg\", PerfDefaults.reb_no_fg):\n max_creates = 1\n else:\n max_creates = \\\n self.parami(\"max_creates\", PerfDefaults.max_creates)\n\n self.access_phase(ratio_sets=self.paramf('ratio_sets',\n PerfDefaults.ratio_sets),\n ratio_misses=self.paramf('ratio_misses',\n PerfDefaults.ratio_misses),\n ratio_creates=self.paramf('ratio_creates',\n PerfDefaults.ratio_creates),\n ratio_deletes=self.paramf('ratio_deletes',\n PerfDefaults.ratio_deletes),\n ratio_hot=self.paramf('ratio_hot',\n PerfDefaults.ratio_hot),\n ratio_hot_gets=self.paramf('ratio_hot_gets',\n PerfDefaults.ratio_hot_gets),\n ratio_hot_sets=self.paramf('ratio_hot_sets',\n PerfDefaults.ratio_hot_sets),\n ratio_expirations=self.paramf('ratio_expirations',\n PerfDefaults.ratio_expirations),\n max_creates=max_creates,\n proto_prefix=\"memcached-binary\",\n port=\"11211\")\n\n if self.parami(\"reb_no_fg\", PerfDefaults.reb_no_fg):\n\n start_time = time.time()\n\n if not self.parami(\"prefix\", 0):\n self.latched_rebalance(delay=0, sync=True)\n\n if self.parami(\"loop_wait_until_drained\",\n PerfDefaults.loop_wait_until_drained):\n self.wait_until_drained()\n\n if self.parami(\"loop_wait_until_repl\",\n PerfDefaults.loop_wait_until_repl):\n self.wait_until_repl()\n\n if self.parami(\"collect_stats\", 1):\n ops = {\"tot-sets\": max_creates,\n \"tot-gets\": 0,\n \"tot-items\": 0,\n \"tot-creates\": max_creates,\n \"tot-misses\": 0,\n \"start-time\": start_time,\n \"end-time\": time.time()}\n\n self.end_stats(self.sc, ops, self.spec_reference + \".loop\")\n\n if self.parami(\"cb_stats\", PerfDefaults.cb_stats) == 1:\n cbStatsCollector.stop()",
"def test_list_all_response_descriptor_machines_workers_worker_worker_resource(self):\n pass",
"def test_04_restart_network_wo_cleanup(self):\n\n # Validate the following\n # 1. When cleanup = false, router is restarted and\n # all services inside the router are restarted\n # 2. check 'uptime' to see if the actual restart happened\n\n timeout = 10\n # Network should be in Implemented or Setup stage before restart\n while True:\n networks = list_networks(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check list response returns a valid list\"\n )\n network = networks[0]\n if network.state in [\"Implemented\", \"Setup\"]:\n break\n elif timeout == 0:\n break\n else:\n time.sleep(self.services[\"sleep\"])\n timeout = timeout - 1\n\n self.debug(\n \"Restarting network with ID: %s, Network state: %s\" % (\n network.id,\n network.state\n ))\n cmd = restartNetwork.restartNetworkCmd()\n cmd.id = network.id\n cmd.cleanup = False\n self.apiclient.restartNetwork(cmd)\n\n # Get router details after restart\n list_router_response = list_routers(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid\n )\n self.assertEqual(\n isinstance(list_router_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n router = list_router_response[0]\n\n hosts = list_hosts(\n self.apiclient,\n zoneid=router.zoneid,\n type='Routing',\n state='Up',\n id=router.hostid\n )\n self.assertEqual(\n isinstance(hosts, list),\n True,\n \"Check list response returns a valid list\"\n )\n host = hosts[0]\n\n if self.hypervisor.lower() in ('vmware', 'hyperv'):\n res = get_process_status(\n self.apiclient.connection.mgtSvr,\n 22,\n self.apiclient.connection.user,\n self.apiclient.connection.passwd,\n router.linklocalip,\n \"uptime\",\n hypervisor=self.hypervisor\n )\n else:\n try:\n host.user, host.passwd = get_host_credentials(\n self.config, host.ipaddress)\n res = get_process_status(\n host.ipaddress,\n 22,\n host.user,\n host.passwd,\n router.linklocalip,\n \"uptime\"\n )\n except KeyError:\n self.skipTest(\n \"Marvin configuration has no host credentials\\\n to check router services\")\n # res = 12:37:14 up 1 min, 0 users, load average: 0.61, 0.22, 0.08\n # Split result to check the uptime\n result = res[0].split()\n self.debug(\"Router Uptime: %s\" % result)\n self.assertEqual(\n str(result[1]),\n 'up',\n \"Check router is running or not\"\n )\n if str(result[3]) == \"min,\":\n self.assertEqual(\n (int(result[2]) < 20),\n True,\n \"Check uptime is less than 20 mins or not\"\n )\n else:\n self.assertEqual(\n str(result[3]),\n 'sec,',\n \"Check uptime is in seconds\"\n )\n return",
"def identify_failures():\n global heartbeat_slaves\n while True:\n for slave_ip in heartbeat_slaves.keys():\n if heartbeat_slaves[slave_ip] != -1 and heartbeat_slaves[slave_ip] < time.time():\n print \"%s failed. Expected at time %s but current time is %s\" % (slave_ip, heartbeat_slaves[slave_ip], time.time())\n if heartbeat_slaves[slave_ip] != -1 and heartbeat_slaves[slave_ip] + 30 < time.time(): # 30 second grace period for testing\n heartbeat_lock.acquire()\n slave_ips.remove(slave_ip)\n del heartbeat_slaves[slave_ip]\n print \"Deleted %s backup\" % (slave_ip)\n heartbeat_lock.release()\n time.sleep(1)",
"def test_load_response_descriptor_machines_workers_worker_worker_resource_spaces(self):\n pass",
"def _reset_workers(self):\n for rpc_client in self._all_rpc_clients:\n rpc_client.reset()",
"def test_recover_many_partitions(self, vector, unique_database):\n\n TBL_NAME = \"test_recover_partitions\"\n FQ_TBL_NAME = unique_database + \".\" + TBL_NAME\n DB_LOCATION = '%s/%s.db/' % (WAREHOUSE, unique_database)\n\n self.execute_query_expect_success(self.client,\n \"CREATE TABLE %s (c int) PARTITIONED BY (s string)\" % (FQ_TBL_NAME))\n\n # Create 700 partitions externally\n try:\n SRC_DIR = os.path.join(\"/tmp\", unique_database, TBL_NAME)\n if os.path.exists(SRC_DIR):\n shutil.rmtree(SRC_DIR)\n os.makedirs(SRC_DIR)\n for i in range(1, 700):\n partition_dir = os.path.join(SRC_DIR, \"s=part%d/\" % i)\n os.makedirs(partition_dir)\n with open(os.path.join(partition_dir, \"test\"), 'w') as f:\n f.write(\"666\")\n self.filesystem_client.copy_from_local(SRC_DIR, DB_LOCATION)\n finally:\n shutil.rmtree(SRC_DIR)\n\n result = self.execute_query_expect_success(self.client,\n \"SHOW PARTITIONS %s\" % FQ_TBL_NAME)\n for i in range(1, 700):\n PART_DIR = \"part%d\\t\" % i\n assert not self.has_value(PART_DIR, result.data)\n self.execute_query_expect_success(self.client,\n \"ALTER TABLE %s RECOVER PARTITIONS\" % FQ_TBL_NAME)\n result = self.execute_query_expect_success(self.client,\n \"SHOW PARTITIONS %s\" % FQ_TBL_NAME)\n for i in range(1, 700):\n PART_DIR = \"part%d\\t\" % i\n assert self.has_value(PART_DIR, result.data)",
"def label_worker_nodes(self):\n # to use the cache dropping pod, worker nodes need to be labeled.\n log.info(\"Labeling the worker nodes for cache-dropping enable.\")\n try:\n helpers.label_worker_node(\n self.worker_nodes, label_key=BMO_LABEL, label_value=\"yes\"\n )\n except CommandFailed:\n # this is probably because of the nodes are already labeled, so,\n # checking if nodes are labeled and continue anyway.\n result = self.pod_obj.exec_oc_cmd(f\"get node -l {BMO_LABEL}\")\n found = [node for node in self.worker_nodes if re.search(node, result)]\n if len(found) == len(self.worker_nodes):\n log.info(\"All worker nodes are labeled\")\n else:\n log.warning(\"Labeling nodes failed, Not all workers node are labeled !\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The specified limit will try to sell until it reaches. If not successful, the order will be canceled.
|
def sell_limit(symbol, quantity, orderId, sell_price, last_price):
invalidAttempts = 0
while invalidAttempts < INVALID_ATTEMPTS_LIMIT:
order = client.sell_limit(symbol, quantity, sell_price)
if 'msg' in order:
message(order['msg'])
print ("Profit: %%%s. Buy: %.8f Sell: %.8f" % (PROFIT, float(order["price"]), sell_price))
sell_id = order['orderId']
if sell_id != None:
time.sleep(WAIT_TIME_BUY_SELL)
"""
If all sales trials fail,
the grievance is stop-loss.
"""
if STOP_LOSS > 0:
stop_order = get_order(symbol, sell_id)
stopprice = calc(float(stop_order['price']))
lossprice = stopprice - (stopprice * STOP_LOSS / 100)
status = stop_order['status']
# Order status
if status == "NEW":
if cancel_order(symbol, sell_id) == True:
# Stop loss
if last_price <= lossprice:
sell = client.sell_market(symbol, quantity)
if 'msg' in sell:
message(sell['msg'])
print ("Stop-loss, sell market, %s" % (lossprice))
if sell == True:
break
else:
continue
# Wait a while after the sale to the loss.
time.sleep (WAIT_TIME_STOP_LOSS)
else:
break
elif status == "FILLED":
print("Order filled")
break
elif status == "PARTIALLY_FILLED":
print("Order partially filled")
break
else:
continue
invalidAttempts = 0
break
else:
invalidAttempts += 1
continue
if invalidAttempts != 0:
cancel_order(symbol, orderId)
|
[
"def limit_order(self, instrument, action, qty, limit_price):\r\n # Verify action\r\n if action != 'BUY' and action != 'SELL':\r\n raise ValueError(\"Invalid action () for market order. Must be \"\r\n \"'BUY' or 'SELL'.\".format(action))\r\n \r\n limit_order = LimitOrder(\r\n action=action,\r\n totalQuantity=float(qty),\r\n lmtPrice=float(limit_price)\r\n )\r\n self.log('{}ING {} units of {} at {} LIMIT'.format(\r\n action, qty, instrument.symbol, limit_price))\r\n self.ib.placeOrder(instrument, limit_order)",
"def order_sell_stop_limit(symbol, quantity, limit_price, stop_price, time_in_force='gtc'):\n try:\n symbol = symbol.upper().strip()\n latest_price = helper.round_price(stocks.get_latest_price(symbol)[0])\n stop_price = helper.round_price(stop_price)\n limit_price = helper.round_price(limit_price)\n except AttributeError as message:\n print(message)\n return None\n\n if latest_price < stop_price:\n print('Error: stop_price must be below the current price.')\n return None\n\n payload = {\n 'account': profiles.load_account_profile(info='url'),\n 'instrument': stocks.get_instruments_by_symbols(symbol, info='url')[0],\n 'symbol': symbol,\n 'price': limit_price,\n 'quantity': quantity,\n 'ref_id': str(uuid4()),\n 'type': 'limit',\n 'stop_price': stop_price,\n 'time_in_force': time_in_force,\n 'trigger': 'stop',\n 'side': 'sell'\n }\n\n url = urls.orders()\n data = helper.request_post(url, payload)\n\n return data",
"def limit_nb_orders_displayed(self):\n logging.info('limit_nb_orders_displayed(self):')\n new_buy_orders, new_sell_orders = api.get_orders(self.currency_pair)\n\n # check sell orders\n # When sell_price_max is reached\n if self.sell_orders == []:\n self.sell_orders.append([0, Decimal('0'), self.sell_price_max \\\n + self.increment])\n new_sell_orders = self.sell_orders[:]\n\n log = 'Buy limit reached limit_nb_orders_displayed, sell_orders : ', \\\n self.sell_orders, 'new_sell_orders : ', new_sell_orders\n logging.warning(log)\n # When sell_price_max have been reached earlier\n elif self.sell_orders[0][0] == 0:\n logging.info('self.sell_orders[0][0] == 0:')\n pass\n\n else:\n # In case of a sell occured during compare_orders()\n if new_sell_orders == []:\n logging.warning('sell orders not ok, waiting for the next round')\n pass\n\n else:\n log = 'new_sell_orders[0][2]', new_sell_orders[0][2], \\\n 'self.sell_orders[-1][2]', self.sell_orders[-1][2]\n \n logging.info(log)\n # Remove sell orders if there is too much of them.\n if new_sell_orders[-1][2] - new_sell_orders[0][2] \\\n > self.increment * self.nb_orders_to_display:\n\n log = (self.sell_orders[-1][2] - new_sell_orders[0][2] \\\n > self.increment * self.nb_orders_to_display), \\\n 'sell orders to remove'\n logging.info(log)\n\n i = int((new_sell_orders[-1][2] - \\\n (new_sell_orders[0][2] + self.increment \\\n * self.nb_orders_to_display)) / self.increment)\n\n log = 'Nb of sell to remove :', i, 'from : ', self.sell_orders[-1][2]\n logging.warning(log)\n\n while i > 0:\n log = 'SELL to cancel :', self.sell_orders[-1]\n logging.info(log)\n # Remove fake order if needed\n if self.sell_orders[-1][0] == 0:\n del self.sell_orders[-1]\n\n else:\n resp = api.cancel_order(self.currency_pair, self.sell_orders[-1][0])\n\n log = 'Order canceled : ', resp\n logging.info(log)\n\n del self.sell_orders[-1]\n\n i -= 1\n # Add sell orders if there is less than nb_orders_to_display\n elif new_sell_orders[-1][2] - new_sell_orders[0][2] \\\n < self.increment * self.nb_orders_to_display:\n # Set the number of orders to execute\n if new_sell_orders[0][2] + self.nb_orders_to_display \\\n * self.increment <= self.sell_price_max:\n\n i = int((new_sell_orders[0][2] + self.nb_orders_to_display \\\n * self.increment - new_sell_orders[-1][2]) \\\n / self.increment)\n\n else:\n\n i = int((self.sell_price_max - new_sell_orders[-1][2]) \\\n / self.increment)\n logging.warning('Sell price max almost reached')\n\n price_start = self.sell_orders[-1][2] + self.increment\n\n log = 'Nb of sell orders to put : i =', i, 'from :', price_start\n logging.warning(log)\n\n sell_order_executed = api.set_several_sell_orders(self.currency_pair, \\\n price_start, \\\n self.amount, \\\n i, \\\n self.increment)\n\n for item in sell_order_executed:\n self.sell_orders.append(item)\n\n else:\n logging.warning('sell orders ok')\n\n # check buy orders\n # When buy_price_min is reached\n if self.buy_orders == []:\n self.buy_orders.append([0, Decimal('0'), self.buy_price_min - self.increment])\n new_buy_orders = self.buy_orders[:]\n\n log = 'Buy limit reached , buy_orders : ', self.buy_orders, \\\n ' new_sell_orders : ', new_sell_orders\n logging.warning(log)\n # When buy_price_min have been reached earlier.\n elif self.buy_orders[-1][0] == 0:\n logging.warning('self.buy_orders[-1][0] == 0 :')\n pass\n\n else:\n # In case of a buy occured during compare_orders()\n if new_buy_orders == []:\n logging.warning('Buy orders not ok, waiting for the next round')\n\n else:\n log = 'new_buy_orders[-1][2]', new_buy_orders[-1][2], \\\n 'new_buy_orders[0][2]', new_buy_orders[0][2]\n logging.info(log)\n # Remove orders if there is too much of them\n if new_buy_orders[-1][2] - new_buy_orders[0][2] \\\n > self.increment * self.nb_orders_to_display:\n\n log = (new_buy_orders[-1][2] - self.buy_orders[0][2] \\\n > self.increment * self.nb_orders_to_display), \\\n 'buy orders to remove'\n logging.info(log)\n\n i = int((new_buy_orders[-1][2] - \\\n (new_buy_orders[0][2] + self.increment * self.nb_orders_to_display)) \\\n / self.increment)\n\n log = 'Nb of buy order to remove : ', i, 'from : ', self.buy_orders[0][2]\n logging.warning(log)\n\n while i > 0:\n log = 'BUY to cancel :', self.buy_orders[0]\n logging.info(log)\n # Remove fake order\n if self.buy_orders[0][0] == 0:\n del self.buy_orders[0]\n\n else:\n resp = api.cancel_order(self.currency_pair, \\\n self.buy_orders[0][0])\n\n log = 'Order canceled : ', resp\n logging.info(log)\n\n del self.buy_orders[0]\n\n i -= 1\n\n elif new_buy_orders[-1][2] - new_buy_orders[0][2] \\\n < self.increment * self.nb_orders_to_display:\n # Set the good amount of orders to execute\n if new_buy_orders[-1][2] - self.nb_orders_to_display \\\n * self.increment >= self.buy_price_min:\n\n i = int((new_buy_orders[0][2] + self.nb_orders_to_display \\\n * self.increment - new_buy_orders[-1][2]) \\\n / self.increment)\n\n else:\n\n i = int((self.buy_orders[0][0] - self.buy_price_min) \\\n / self.increment)\n logging.warning('buy_price_min almost reached')\n\n price_start = self.buy_orders[0][2] - self.increment\n\n log = 'nb of buy orders to put : i =', i, 'from :', price_start\n logging.warning(log)\n\n buy_order_executed = api.set_several_buy_orders(self.currency_pair, \\\n price_start, \\\n self.amount, \\\n i, \\\n self.increment)\n\n i = 0\n for item in buy_order_executed:\n self.buy_orders.insert(i, item)\n i += 1\n\n else:\n logging.warning('buy orders ok')",
"def poll(self):\n amount, currency = get_bitcoin_sell_price()\n\n if not self.min_limit <= amount <= self.max_limit:\n message = \"Alert: Current price of 1 BTC is {:.2f} {}\".format(amount, currency)\n send_sms(self.recipient, message)\n print(message)\n else:\n print(\"Current price of 1 BTC is {:.2f} {}. Within bounds, doing nothing.\".format(amount, currency))",
"def test_place_order_limit_regular(kiteconnect):\n updated_params, order_id, order = setup_order_place(\n kiteconnect=kiteconnect,\n product=kiteconnect.PRODUCT_MIS,\n variety=kiteconnect.VARIETY_REGULAR,\n order_type=kiteconnect.ORDER_TYPE_LIMIT,\n price=True\n )\n\n assert order[-1][\"product\"] == kiteconnect.PRODUCT_MIS\n assert order[-1][\"variety\"] == kiteconnect.VARIETY_REGULAR\n\n try:\n cleanup_orders(kiteconnect, order_id)\n except Exception as e:\n warnings.warn(UserWarning(\"Error while cleaning up orders: {}\".format(e)))",
"def order_buy_stop_limit(symbol, quantity, limit_price, stop_price, time_in_force='gtc'):\n try:\n symbol = symbol.upper().strip()\n latest_price = helper.round_price(stocks.get_latest_price(symbol)[0])\n stop_price = helper.round_price(stop_price)\n limit_price = helper.round_price(limit_price)\n except AttributeError as message:\n print(message)\n return None\n\n if latest_price > stop_price:\n print('Error: stop_price must be above the current price.')\n return None\n\n payload = {\n 'account': profiles.load_account_profile(info='url'),\n 'instrument': stocks.get_instruments_by_symbols(symbol, info='url')[0],\n 'symbol': symbol,\n 'price': limit_price,\n 'quantity': quantity,\n 'ref_id': str(uuid4()),\n 'type': 'limit',\n 'stop_price': stop_price,\n 'time_in_force': time_in_force,\n 'trigger': 'stop',\n 'side': 'buy'\n }\n\n url = urls.orders()\n data = helper.request_post(url, payload)\n\n return data",
"def futures_place_limit_orders(self):\n limit_price = 0\n symbol = None\n order = None\n orders_list = list()\n\n try:\n open_positions = self.client.futures_position_information(recvWindow=RECV_WINDOW)\n\n for position in open_positions:\n if float(position['positionAmt']) == NOTHING or position['symbol'] == HEDGE_SYMBOL:\n continue\n \n symbol = position['symbol']\n entry_price = float(position['entryPrice'])\n quantity = float(position['positionAmt'])\n tick_size, step_size = self.futures_get_tick_and_step_size(symbol)\n side = SideType.SIDE_BUY\n limit_price = entry_price - (entry_price * CLOSE_PERCENT)\n\n if quantity > 0:\n limit_price = entry_price + (entry_price * CLOSE_PERCENT)\n side = SideType.SIDE_SELL\n\n limit_price = '{:.{precision}f}'.format(limit_price, precision=tick_size)\n\n # we only want to cancel the order if the new price if different than the old price\n result = self.futures_cancel_managed_limit_order(symbol, limit_price)\n\n if result:\n \"\"\"if there are no open orders that AL is managing, you are free to put in any limit order that you want!\"\"\"\n order = self.futures_create_limit_order(symbol, side, quantity, limit_price)\n if order != -1:\n orders_list.append(order)\n else:\n continue\n else:\n # self.print_log(f\"Not placing new limit order for {symbol}.\")\n pass\n\n except Exception as e:\n self.handle_exception(e, f\"Could not set limit order for {symbol}.\")\n \n if len(orders_list) != 0:\n self.futures_update_limit_orders_file(orders_list)",
"def cross_limit_order(self):\n long_cross_price = self.tick.ask_price_1\n short_cross_price = self.tick.bid_price_1\n long_best_price = long_cross_price\n short_best_price = short_cross_price\n\n for order in list(self.active_limit_orders.values()):\n # Push order update with status \"not traded\" (pending).\n if order.vt_symbol != self.tick.vt_symbol:\n continue\n\n # Check whether limit orders can be filled.\n buy_cross = (\n order.direction == Direction.LONG and order.offset == Offset.OPEN\n and order.price >= long_cross_price > 0\n )\n sell_cross = (\n order.direction == Direction.LONG and order.offset == Offset.CLOSE\n and 0 < order.price <= short_cross_price\n )\n\n short_cross = (\n order.direction == Direction.SHORT and order.offset == Offset.OPEN\n and 0 < order.price <= short_cross_price\n )\n cover_cross = (\n order.direction == Direction.SHORT and order.offset == Offset.CLOSE\n and order.price >= long_cross_price > 0\n )\n\n if not buy_cross and not sell_cross and not short_cross and not cover_cross:\n continue\n\n # Push order udpate with status \"all traded\" (filled).\n order.traded = order.volume\n order.status = Status.ALLTRADED\n\n self.active_limit_orders.pop(order.vt_client_oid)\n self.engines['St'].processOrderEvent(order)\n\n # Push trade update\n self.trade_count += 1\n\n if buy_cross:\n trade_price = min(order.price, long_best_price)\n long_qty = order.volume\n short_qty = 0\n\n if sell_cross:\n trade_price = max(order.price, short_best_price)\n long_qty = - order.volume\n short_qty = 0\n\n if short_cross:\n trade_price = max(order.price, short_best_price)\n long_qty = 0\n short_qty = order.volume\n\n if cover_cross:\n trade_price = min(order.price, long_best_price)\n long_qty = 0\n short_qty = - order.volume\n\n trade = TradeData(\n symbol=order.symbol,\n exchange=order.exchange,\n vt_client_oid=order.vt_client_oid,\n tradeid=str(self.trade_count),\n direction=order.direction,\n offset=order.offset,\n price=trade_price,\n volume=order.volume,\n long_qty=long_qty,\n short_qty=short_qty,\n time=self.datetime.strftime(\"%H:%M:%S\"),\n gateway_name=self.gateway_name,\n )\n trade.datetime = self.datetime\n self.engines['St'].processTradeEvent(trade)\n self.trades[trade.vt_tradeid] = trade",
"async def limit(symbol, side, time_in_force, quantity, price, new_client_order_id,\n iceberg_qty, recv_window, new_order_resp_type):\n payload = {\n 'symbol': symbol,\n 'side': side,\n 'type': \"LIMIT\",\n 'timeInForce': time_in_force,\n 'price': price,\n 'quantity': quantity,\n 'newOrderRespType': new_order_resp_type,\n 'recvWindow': recv_window,\n 'timestamp': get_timestamp()\n }\n\n builder = LimitOrderBuilder(endpoint='api/v3/order', payload=payload, method='POST') \\\n .add_optional_params_to_payload(new_client_order_id=new_client_order_id,\n iceberg_qty=iceberg_qty) \\\n .set_security()\n\n await builder.send_http_req()\n\n builder.handle_response().generate_output()",
"def cross_limit_order(self):\n if self.mode == BacktestingMode.BAR:\n long_cross_price = self.bar.low_price\n short_cross_price = self.bar.high_price\n long_best_price = self.bar.open_price\n short_best_price = self.bar.open_price\n else:\n long_cross_price = self.tick.ask_price_1\n short_cross_price = self.tick.bid_price_1\n long_best_price = long_cross_price\n short_best_price = short_cross_price\n\n for order in list(self.active_limit_orders.values()):\n\n #增加多策略测试\n strategy = self.orderStrategyDict[order.vt_orderid]\n\n # Push order update with status \"not traded\" (pending).\n if order.status == Status.SUBMITTING:\n order.status = Status.NOTTRADED\n strategy.on_order(order)\n\n # Check whether limit orders can be filled.\n long_cross = (\n order.direction == Direction.LONG\n and order.price >= long_cross_price\n and long_cross_price > 0\n )\n\n short_cross = (\n order.direction == Direction.SHORT\n and order.price <= short_cross_price\n and short_cross_price > 0\n )\n\n if not long_cross and not short_cross:\n continue\n\n # Push order udpate with status \"all traded\" (filled).\n order.traded = order.volume\n order.status = Status.ALLTRADED\n strategy.on_order(order)\n\n self.active_limit_orders.pop(order.vt_orderid)\n\n # Push trade update\n self.trade_count += 1\n\n if long_cross:\n trade_price = min(order.price, long_best_price)\n pos_change = order.volume\n else:\n trade_price = max(order.price, short_best_price)\n pos_change = -order.volume\n\n trade = TradeData(\n symbol=order.symbol,\n exchange=order.exchange,\n orderid=order.orderid,\n tradeid=str(self.trade_count),\n direction=order.direction,\n offset=order.offset,\n price=trade_price,\n volume=order.volume,\n time=self.datetime.strftime(\"%H:%M:%S\"),\n gateway_name=self.gateway_name,\n )\n trade.datetime = self.datetime\n #add trade strategy name, 以便于区别多策略混合效果\n trade.name = strategy.strategy_name\n\n strategy.pos += pos_change\n strategy.on_trade(trade)\n\n self.trades[trade.vt_tradeid] = trade",
"async def place_stop_limit_sell_order(\n self,\n symbol: str,\n price: Union[int, float],\n quantity: int,\n stop_price: Union[int, float],\n time_in_force: models.OrderTimeInForce = models.OrderTimeInForce.GFD,\n extended_hours: bool = False,\n ) -> str:\n instruments = await self.get_instruments(symbol=symbol)\n return await self.place_order(\n extended_hours=extended_hours,\n instrument=instruments[0][\"url\"],\n price=price,\n quantity=quantity,\n side=\"sell\",\n stop_price=stop_price,\n symbol=symbol,\n time_in_force=time_in_force.value,\n trigger=\"stop\",\n type=\"limit\",\n )",
"def list_purchases_limit(self, limit=50):\n self.connection = self.connection or self._get_connection()\n return db.get_purchases_limit(self.connection, limit)",
"def sell(self, amount):\n\n if self.qty >= amount:\n # If there are enough desserts, sell them and reduce the qty\n self.qty = self.qty - amount\n elif self.qty == 0:\n # If there are 0 desserts, indicate they're sold out\n print(f\"Sorry, these {self.dessert_type}s are sold out\")\n else:\n # There are desserts but not enough\n # Sell what's available and update qty to 0\n self.qty = 0",
"def insufficient_limit(context: Context) -> str or None:\n account = context.account\n transaction = context.transaction\n\n if account.is_active() and (account.available_limit - transaction.amount < 0):\n return 'insufficient-limit'",
"def set_limit(self, limit):\n self.limits[self.api_key] = limit",
"def test_sell_to_take_profit(self):\n scalp = ScalpingStrategy(self.LOSS_LIMIT, self.SCALP_THRESHOLD)\n self.assertIsNone(scalp.handle_tick(tick=Tick(self.AAPL, Decimal('543.5'), 100)))\n self.assertIsNone(scalp.handle_tick(tick=Tick(self.AAPL, Decimal('543.7'), 100)))\n self.assertIsNone(scalp.handle_tick(tick=Tick(self.AAPL, Decimal('544.1'), 200)))\n self.assertIsNotNone(scalp.handle_tick(tick=Tick(self.AAPL, Decimal('544.2'), 100)))\n self.assertIsNone(scalp.handle_tick(tick=Tick(self.AAPL, Decimal('587.1'), 100)))\n order = scalp.handle_tick(tick=Tick(self.AAPL, Decimal('743.2'), 200))\n self.assertEqual(Order.SIDE_SELL, order.side)\n self.assertEqual(100, order.quantity)\n self.assertEqual(self.AAPL, order.symbol)",
"def take_item(self):\n if (self.quantity > 0):\n self.quantity -= 1\n else:\n raise OutOfStock",
"def place_orders_for_market(self, market_info):\n market: ExchangeBase = market_info.market\n curr_order_amount = min(self._order_step_size, self._quantity_remaining)\n quantized_amount = market.quantize_order_amount(market_info.trading_pair, Decimal(curr_order_amount))\n quantized_price = market.quantize_order_price(market_info.trading_pair, Decimal(self._order_price))\n\n self.logger().debug(\"Checking to see if the incremental order size is possible\")\n self.logger().debug(\"Checking to see if the user has enough balance to place orders\")\n\n if quantized_amount != 0:\n if self.has_enough_balance(market_info, quantized_amount):\n if self._is_buy:\n order_id = self.buy_with_specific_market(market_info,\n amount=quantized_amount,\n order_type=OrderType.LIMIT,\n price=quantized_price)\n self.logger().info(\"Limit buy order has been placed\")\n else:\n order_id = self.sell_with_specific_market(market_info,\n amount=quantized_amount,\n order_type=OrderType.LIMIT,\n price=quantized_price)\n self.logger().info(\"Limit sell order has been placed\")\n self._time_to_cancel[order_id] = self.current_timestamp + self._cancel_order_wait_time\n\n self._quantity_remaining = Decimal(self._quantity_remaining) - quantized_amount\n\n else:\n self.logger().info(\"Not enough balance to run the strategy. Please check balances and try again.\")\n else:\n self.logger().warning(\"Not possible to break the order into the desired number of segments.\")",
"def execute_trade(self, btc_sym, eth_sym, purchase, btc_trade_limit):\n amount_btc = math.floor(btc_trade_limit/float(purchase[0][btc_sym][0]['askPrice']))\n eth_capital = (btc_trade_limit / float(purchase[0][btc_sym][0]['askPrice'])) * float(purchase[0][btc_sym][1]['bidPrice'])\n amount_eth = math.floor(eth_capital / float(purchase[1][eth_sym][1]['askPrice']))\n if(amount_btc*float(purchase[0][btc_sym][0]['askPrice']) > 0.001 and amount_eth * float(purchase[1][eth_sym][0]['bidPrice'])>0.001):\n if self.order_buy_alt(purchase[0][btc_sym][0]['symbol'], amount_btc, purchase[0][btc_sym][0]['askPrice'], 1) is True:\n print(\"1: \" + purchase[0][btc_sym][0]['symbol'] + \" \" + str(amount_btc) + \" \" + purchase[0][btc_sym][0]['askPrice'])\n \n self.order_sell_alt(purchase[0][btc_sym][1]['symbol'], amount_btc, purchase[0][btc_sym][1]['bidPrice'], 2)\n print(\"2: \" + purchase[0][btc_sym][1]['symbol'] + \" \" + str(amount_btc) + \" \" + purchase[0][btc_sym][1]['bidPrice'])\n \n if self.order_buy_alt(purchase[1][eth_sym][1]['symbol'], amount_eth, purchase[1][eth_sym][1]['askPrice'], 3) is True:\n print(\"3: \" + purchase[1][eth_sym][1]['symbol'] + \" \" + str(amount_eth) + \" \" + purchase[1][eth_sym][1]['askPrice'])\n \n self.order_sell_alt(purchase[1][eth_sym][0]['symbol'], amount_eth, purchase[1][eth_sym][0]['bidPrice'], 4)\n print(\"4: \" + purchase[1][eth_sym][0]['symbol'] + \" \" + str(amount_eth) + \" \" + purchase[1][eth_sym][0]['bidPrice'])\n\n self.remove_any_open_orders([purchase[0][btc_sym][0]['symbol'],purchase[0][btc_sym][1]['symbol'],purchase[1][eth_sym][1]['symbol'],\n purchase[1][eth_sym][0]['symbol']])",
"def setLimit(self, limit):\n self.limit = limit\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The function returning the value f[i,j] in case (i,j) in an index 'in the image', otherwise it return 0
|
def value(i,j):
if i<0 or i>=M or j<0 or j>=N:
return 0
return f[i,j]
|
[
"def getValue(self, i, j):\n return self.A[i][j] if j in self.A[i] else 0",
"def non_zero_func(x):\n\n inds = x.nonzero()[0]\n\n return inds",
"def feature_index_for_cell_face(self, cell_kji0, axis, p01):\n\n self.cache_arrays()\n if self.feature_indices is None: return None\n cell = self.grid_list[0].natural_cell_index(cell_kji0)\n face = self.face_index_map[axis, p01]\n cell_matches = np.stack(np.where(self.cell_index_pairs == cell)).T\n for match in cell_matches[:]:\n if self.face_index_pairs[match[0], match[1]] == face: return self.feature_indices[match[0]]\n return None",
"def get_value(x, y):\r\n\r\n # If array indices are out of bound, return zero\r\n if x < 0 or x >= size or y < 0 or y >= size:\r\n return False\r\n\r\n # Otherwise, return true value\r\n return height_map[x][y]",
"def nonzero_indices(x):\n return jnp.nonzero(x)[0]",
"def findIndex(p, start, finish, f):\n for i in range(start, finish):\n if f(p[i]):\n return i\n return -1",
"def apply_on_image(image, patch, idx, function):\n patch = patch[0] // 2, patch[1] // 2\n for (v, u) in zip(idx[0], idx[1]):\n if (v + patch[0] > image.shape[0]) or (v - patch[0]) < 0:\n continue\n if (u + patch[1] > image.shape[1]) or (u - patch[1]) < 0:\n continue\n image[v - patch[0]:v + patch[0], u - patch[1]:u + patch[1]] = function(\n image[v - patch[0]:v + patch[0], u - patch[1]:u + patch[1]])\n\n return image",
"def value_at(self, index):\n index = np.where(self.indices == index)[0]\n return self.data[index] if index.size != 0 else 0",
"def index2D(l, elem):\r\n for row, i in enumerate(l):\r\n for column, val in enumerate(i):\r\n if elem == val:\r\n return row, column\r\n print (\"element \" + str(elem) + \" was not found\")\r\n return -1, -1",
"def internal_virial(r,f):\n virial = 0\n for i in range(r.shape[0]):\n for dim in range(r.shape[1]):\n virial += r[i,dim]*f[i,dim]\n\n return virial",
"def sub2ind(array_shape, i, j):\n idx = i*array_shape[1] + j\n return idx",
"def getValue (self, row, column):\n value = 0\n try:\n value = __image__ [row, column]\n if value > 255 or value < 0:\n value = 0\n except:\n value = 0\n return value",
"def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageFAF33___nonzero__(self)",
"def gridCoord2index( iparlist ) :\n iz,icp,icl,ilp,ipm,ifs = iparlist \n isn = iz + nlogz*(icp + ncolorpar*(icl + ncolorlaw*(ilp + nlumipar*(ipm + npkmjd*(ifs) ) ) ) )\n return( isn )",
"def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageF2___nonzero__(self)",
"def _indexFromPsfxy(self, psfx, psfy):\n\n if not psfx.shape==psfy.shape:\n raise ValueError(\"psfx and psfy arrays are not same shape\")\n\n # First, shift psfy, psfx to reference a 0-indexed array\n y = psfy + self._origin[0]\n x = psfx + self._origin[1]\n # Mark references to invalid pixels with nopsf array\n # First note which pixels are referenced outside of grid:\n nopsf = (y < 0) | (y >= self.ny) | (x < 0) | (x >= self.nx)\n # Set them to reference pixel 0\n x = np.where(nopsf, 0, x)\n y = np.where(nopsf, 0, y)\n # Then read all indices, setting invalid ones to -1\n return np.where(nopsf, -1, self._indices[y, x])",
"def FI(image):\n a = iulib.floatarray()\n iulib.narray_of_numpy(a,transpose(image[::-1,...]))\n return a",
"def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageFAF22___nonzero__(self)",
"def get_central_slice_index_of_nonzero_region(image, view):\n [d_index, h_index, w_index] = np.where(image)\n if(view == 0):\n index_list = d_index\n elif(view == 1):\n index_list = w_index\n else:\n index_list = h_index\n idx_min = min(index_list)\n idx_max = max(index_list)\n i_cen = int((idx_min + idx_max)/2)\n return i_cen"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given image NVR, wait for the build that produced it to show up in koji. If it doesn't within the timeout, raise an error.
|
def wait_for_parent_image_build(self, nvr):
self.log.info('Waiting for Koji build for parent image %s', nvr)
poll_start = time.time()
while time.time() - poll_start < self.poll_timeout:
build = self.koji_session.getBuild(nvr)
if build:
self.log.info('Parent image Koji build found with id %s', build.get('id'))
return build
time.sleep(self.poll_interval)
raise KojiParentBuildMissing('Parent image Koji build NOT found for {}!'.format(nvr))
|
[
"def testWaitForPushImageError(self):\n stage = self.ConstructStage()\n stage.board_runattrs.SetParallel(\n 'instruction_urls_per_channel', None)\n\n self.assertEqual(stage.WaitUntilReady(), False)",
"def _wait_for_image_to_become_active(\n self, image_id: \"std::uuid\", timeout: int = 60\n ) -> None:\n if timeout < 0:\n raise ValueError(f\"Timeout cannot be negative: {timeout}\")\n start_time = time.time()\n image = None\n while time.time() < start_time + timeout:\n image = self._get_image_by_id(image_id)\n if image and image.status == \"active\":\n return\n time.sleep(0.1)\n raise Exception(\n f\"A timeout occurred while waiting for image {image_id} to enter the `active` state \"\n f\"(status={image.status if image else None})\"\n )",
"def wait(build_id):\n api = Heroku()\n\n while True:\n if api.check_build_status(build_id):\n break\n sleep(3)",
"def _assertVolumeBuildSucceededWithTimeout(\n self, volume_id, timeout, msg=None):\n\n try:\n self.verify_volume_build_suceeded(volume_id, timeout)\n except StatusProgressionError as e:\n self.fail(self._formatMessage(msg, str(e)))",
"def testWaitForPushImageSuccess(self):\n stage = self.ConstructStage()\n stage.board_runattrs.SetParallel(\n 'instruction_urls_per_channel', self.INSNS_URLS_PER_CHANNEL)\n\n self.assertEqual(stage.WaitUntilReady(), True)\n self.assertEqual(stage.instruction_urls_per_channel,\n self.INSNS_URLS_PER_CHANNEL)",
"def block_until_build(data, qi, delay=5):\n name, _ = parse_params(get_args(data))\n logger.info(\"Waiting for {} to start building\".format(name))\n while True:\n try:\n qi.poll()\n return qi.get_build()\n except (JenkinsAPIException, HTTPError):\n logger.debug(\"Waiting for {} to start...\".format(name))\n time.sleep(delay)",
"def waitTillReady(timeout=10):\n\n timeout = MOD.secCounter() + timeout\n\n while (MOD.secCounter() < timeout):\n if (isReady()):\n return\n MOD.sleep(2)\n\n raise TimeoutException(\"Timeout reached while waiting for the SIM to become ready\")",
"def _assertVolumeBuildErroredWithTimeout(\n self, volume_id, timeout, msg=None):\n\n try:\n self.verify_volume_build_has_errored(volume_id, timeout)\n except StatusProgressionError as e:\n self.fail(self._formatMessage(msg, str(e)))",
"def _assertSnapshotBuildSucceededWithTimeout(\n self, snapshot_id, timeout, msg=None):\n\n try:\n self.verify_snapshot_build_succeeded(snapshot_id, timeout)\n except StatusProgressionError as e:\n self.fail(self._formatMessage(msg, str(e)))",
"def wait_not_ready(self, timeout=None):\n\n if not self.ready_for_testing:\n return True\n\n return self.wait_not_ready_event.wait(timeout)",
"def wait_for_release(self, timeout = 0):\n target = 2 ** 31 if timeout == 0 else time.time() + timeout\n while (self.shm.touch_p > 1) and (time.time() < target): \n time.sleep(0.01)\n self._check_host_ts()\n return (self.shm.touch_p < 1)",
"def wait_for_bootup(self, timeout=10):\n end_time = time.time() + timeout\n while True:\n now = time.time()\n with self.state_update:\n self._state_received = None\n self.state_update.wait(end_time - now + 0.1)\n if now > end_time:\n raise NmtError(\"Timeout waiting for boot-up message\")\n if self._state_received == 0:\n break",
"def testTakeImageBottom(self):\r\n nVis = NAOVision(IP, PORT)\r\n nVis._subscribeToVideoProxy(1)\r\n nVis._takeImage(0)\r\n self.assertNotEquals(nVis._alImage, None)\r\n nVis._unsubscribeAll()",
"def wait(time=-1.):\n ierr = c_int()\n lib.gmshFltkWait(\n c_double(time),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshFltkWait returned non-zero error code: \",\n ierr.value)",
"def wait(self, timeout=None):\n if hasattr(self, '_result'):\n return\n try:\n self.get(timeout)\n except Exception:\n pass",
"def wait_for_key_response():\n timeout = 30.0\n while len(key_input) < 20 and timeout > 0:\n logging.debug(\"Waiting for encryption key...\")\n sleep(0.25)\n timeout -= 0.25\n if timeout == 0:\n logging.error(\n \"Error: timeout reached waiting for encryption key response.\")\n quit(2)",
"def _assertSnapshotBuildErroredWithTimeout(\n self, snapshot_id, timeout, msg=None):\n\n try:\n self.verify_snapshot_build_has_errored(snapshot_id, timeout)\n except StatusProgressionError as e:\n self.fail(self._formatMessage(msg, str(e)))",
"def on_build_failure():\n\n message_title = \"FireSim Vitis FPGA Build Failed\"\n\n message_body = \"Your FPGA build failed for quintuplet: \" + self.build_config.get_chisel_quintuplet()\n\n rootLogger.info(message_title)\n rootLogger.info(message_body)\n\n build_farm.release_build_host(self.build_config)",
"def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=30, interval=60):\n\n while retries > 0:\n time.sleep(interval)\n list_versions_response = cls.listKubernetesSupportedVersion(version_id)\n if not hasattr(list_versions_response, 'isostate') or not list_versions_response or not list_versions_response.isostate:\n retries = retries - 1\n continue\n if 'Ready' == list_versions_response.isostate:\n return\n elif 'Failed' == list_versions_response.isostate:\n raise Exception( \"Failed to download template: status - %s\" % template.status)\n retries = retries - 1\n raise Exception(\"Kubernetes supported version Ready state timed out\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct the result dict to be preserved in the build metadata.
|
def make_result(self):
result = {}
if self._base_image_build:
result[BASE_IMAGE_KOJI_BUILD] = self._base_image_build
if self._parent_builds:
result[PARENT_IMAGES_KOJI_BUILDS] = self._parent_builds
return result if result else None
|
[
"def __call__(self, results):\n\n data = {}\n img_meta = {}\n for key in self.meta_keys:\n img_meta[key] = results[key]\n data['img_metas'] = DC(img_meta, cpu_only=True)\n \n for key in self.keys:\n data[key] = results[key] \n return data",
"def build_json(self) -> Iterable[Dict[str, Union[str, bool]]]:\n\n for arch in self.archs:\n tag_suffix = f\"-{arch}\"\n yield {\n \"dockerfile\": self.dockerfile,\n \"image_name\": self.image_name,\n \"benchmark\": self.benchmark,\n \"env_var\": self.env_var,\n \"tag_suffix\": tag_suffix,\n \"arch\": arch,\n \"changed\": self.changed,\n \"tags\": \" \".join([f\"{tag}{tag_suffix}\" for tag in self.tags]),\n }",
"def metadata(self) -> dict[str, Any]:",
"def _construct_transform_metadata(self):\n return {\"TransformJob\": {\"Arn\": self.properties[\"TransformJobName\"]}}",
"def _export_result_dict(context, steps=None, messages=None):\n return {'steps': steps,\n 'messages': messages,\n 'tarball': context.getArchive(),\n 'filename': context.getArchiveFilename()}",
"def collect(self):\n\n # NOTE: we could run each command inside a try/except block to have a\n # more granular protection and be able to save data from those commands\n # that didn't fail. Otherwise, if one command fails, all the data for\n # this Build is lost.\n\n data = {}\n data[\"config\"] = {\"user\": self.config.source_config}\n data[\"os\"] = self._get_operating_system()\n data[\"python\"] = self._get_python_version()\n\n user_apt_packages, all_apt_packages = self._get_apt_packages()\n conda_packages = (\n self._get_all_conda_packages() if self.config.is_using_conda else {}\n )\n data[\"packages\"] = {\n \"pip\": {\n \"user\": self._get_user_pip_packages(),\n \"all\": self._get_all_pip_packages(),\n },\n \"conda\": {\n \"all\": conda_packages,\n },\n \"apt\": {\n \"user\": user_apt_packages,\n \"all\": all_apt_packages,\n },\n }\n data[\"doctool\"] = self._get_doctool()\n\n return data",
"def _gen_meta(self):\n meta = {\"encode_dict\" : self.encode_dict,\n \"word_length\" : self.word_len,\n \"data_length\" : self.data_length,\n \"magic_number\" : MAGIC_NUMBER}\n return meta",
"def to_dict(self, include_meta=False):\n result = super(JackalDoc, self).to_dict(include_meta=include_meta)\n if include_meta:\n source = result.pop('_source')\n return {**result, **source}\n else:\n return result",
"def reconstruct_meta_file(self):\n meta_file_content = {}\n\n # Check if `NestedMap` were saved\n map_path = os.path.join(self.objects_dir_path, 'map', 'dictionary.log')\n if os.path.isfile(map_path):\n meta_file_content['dictionary.log'] = {\n 'name': 'dictionary',\n 'type': ['map', 'nested_map'],\n 'data': None,\n 'data_path': 'map',\n }\n\n # Collect metrics meta info\n metrics_info = self.records_storage.get_artifacts_names()\n for metric_name, context_items in metrics_info.items():\n meta_file_content[metric_name] = {\n 'name': metric_name,\n 'type': 'metrics',\n 'data': None,\n 'data_path': '__AIMRECORDS__',\n 'format': {\n 'artifact_format': 'aimrecords',\n 'record_format': 'protobuf',\n },\n 'context': [list(c.items()) for c in context_items],\n }\n return meta_file_content",
"def default_metadata(self):\n\n metadata = {\n \"__template_source__\": self.ctx[\"git_url\"],\n \"__template_ref__\": self.ctx[\"branch\"],\n \"__template_id__\": self.ctx[\"identifier\"],\n \"__namespace__\": self.ctx[\"project_namespace\"],\n \"__repository__\": self.ctx[\"project_repository\"],\n \"__sanitized_project_name__\": self.ctx[\"project_name_stripped\"],\n \"__project_slug__\": self.ctx[\"project_slug\"],\n \"__project_description__\": self.ctx[\"project_description\"],\n }\n\n cli_version = os.environ.get(\"RENKU_PROJECT_DEFAULT_CLI_VERSION\") or __version__\n if is_release(cli_version):\n metadata[\"__renku_version__\"] = cli_version\n\n return metadata",
"def create_metadata_dict(data_dict, key_dict, data_dir):\n meta_dict = dict()\n # Get the project name\n project = data_dict[key_dict['Project']][0]\n\n meta_dict['Project'] = project\n # Date\n date = data_dict[key_dict['Date']][0]\n meta_dict['Date'] = date\n # Construct the datasetID\n start_times = data_dict[key_dict['Start Time']]\n # Have a regular expression filter malformed timestamps\n time_pattern = re.compile(r'\\d{2}:\\d{2}:\\d{2}')\n start_times = [time for time in start_times if time_pattern.match(time)]\n start_times.sort()\n min_start_time = start_times[0]\n dataset_id = '{0}_{1}_{2}'.format(project, date, min_start_time)\n meta_dict['Dataset ID'] = dataset_id\n # Starttime\n meta_dict['Start Time'] = min_start_time\n # Stoptime\n stop_times = data_dict[key_dict['Stop Time']]\n stop_times = [time for time in stop_times if time_pattern.match(time)]\n stop_times.sort()\n meta_dict['Stop Time'] = stop_times[-1]\n\n # Upwelling instrument\n instrument_str = data_dict[key_dict['Instrument']][0]\n instrument_name, snumber, fov = get_instrument_info(instrument_str)\n meta_dict['Upwelling Instrument Name'] = instrument_name\n meta_dict['Upwelling Instrument Serial Number'] = snumber\n meta_dict['Upwelling Instrument FOV'] = fov\n\n # Cal panel\n meta_dict['Calibration Panel'] = list(set(data_dict[key_dict['Calibration Panel']]))\n # Software\n meta_dict['Software Version'] = data_dict[key_dict['Acquisition Software']][0]\n # Target information\n meta_dict['Target'] = reps_to_targets(data_dict[key_dict['Replication']])\n # Legacy Path\n meta_dict['Legacy Path'] = data_dir[data_dir.find('sf_') + 3:] # Remove the /media/sf_ prefix from using the VM.\n # Calibration Mode\n if key_dict['Calibration Mode']:\n meta_dict['Calibration Mode'] = data_dict[key_dict['Calibration Mode']][0]\n\n # Min and Max solar zenith\n zeniths = data_dict[key_dict['Solar Zenith']]\n zeniths = filter_floats(zeniths)\n zeniths.sort()\n meta_dict['Min Solar Zenith'] = zeniths[0]\n meta_dict['Max Solar Zenith'] = zeniths[-1]\n # Min and Max solar Elevatoin\n elevs = data_dict[key_dict['Solar Elevation']]\n elevs = filter_floats(elevs)\n elevs.sort()\n meta_dict['Min Solar Elevation'] = elevs[0]\n meta_dict['Max Solar Elevation'] = elevs[-1]\n # Min & max solar azimuth\n azimuths = data_dict[key_dict['Solar Azimuth']]\n azimuths = filter_floats(azimuths)\n azimuths.sort()\n meta_dict['Min Solar Azimuth'] = azimuths[0]\n meta_dict['Max Solar Azimuth'] = azimuths[-1]\n # Min and Max lat/lon\n lats = data_dict[key_dict['Latitude']]\n if lats and not all(val == '-9999' or val == '' for val in lats): # check if GPS was active\n lats = filter_floats(lats)\n lats.sort()\n meta_dict['Min Latitude'] = lats[0]\n meta_dict['Max Latitude'] = lats[-1]\n meta_dict['Average Latitude'] = mean(lats)\n # Min and max lon (only do this if there were lats)\n lons = data_dict[key_dict['Longitude']]\n lons = filter_floats(lons)\n lons.sort()\n meta_dict['Min Longitude'] = lons[0]\n meta_dict['Max Longitude'] = lons[-1]\n meta_dict['Average Longitude'] = mean(lons)\n\n # Aux related metadata\n if 'Temperature 1' in key_dict.keys():\n temp1 = data_dict[key_dict['Temperature 1']]\n if temp1 and not all(val == '-9999' for val in temp1):\n temp1 = filter_floats(temp1)\n\n temp1.sort()\n meta_dict['Min Temperature 1'] = temp1[0]\n meta_dict['Max Temperature 1'] = temp1[-1]\n if 'Temperature 2' in key_dict.keys():\n temp2 = data_dict[key_dict['Temperature 2']]\n if temp2 and not all(val == '-9999' for val in temp2):\n temp2 = filter_floats(temp2)\n temp2.sort()\n meta_dict['Min Temperature 2'] = temp2[0]\n meta_dict['Max Temperature 2'] = temp2[-1]\n if 'Pyronometer' in key_dict.keys():\n pyro = data_dict[key_dict['Pyronometer']]\n if pyro and not all(val == '-9999' for val in pyro):\n pyro = filter_floats(pyro)\n pyro.sort()\n meta_dict['Min Pyronometer'] = pyro[0]\n meta_dict['Max Pyronometer'] = pyro[-1]\n if 'Quantum Sensor' in key_dict.keys():\n quant = data_dict[key_dict['Quantum Sensor']]\n if quant and not all(val == '-9999' for val in quant):\n quant = filter_floats(quant)\n quant.sort()\n meta_dict['Min Quantum Sensor'] = quant[0]\n meta_dict['Max Quantum Sensor'] = quant[-1]\n\n # Number of scans (cal and data)\n meta_dict['Scans Count'] = len(data_dict[key_dict['File Name']])\n\n # Finally, we maintain a set of info on project names, dates, reps, and scan numbers that go along with\n # this data so we can copy the proper files from the original directory (pics, etc.)\n\n meta_dict['scans_info'] = [data_dict['File Name'], data_dict[key_dict['Stop Time']]]\n\n return meta_dict",
"def GetResultMap(content):\n if content is None or content.get('results') is None:\n return\n\n build_result_map = {}\n for r in content.get('results'):\n if r.get('build_id') is not None:\n build_id = r.pop('build_id')\n build_result_map[build_id] = r\n\n return build_result_map",
"def make_config(self):\n if not self.search_terms:\n self.make_search_terms()\n if not self.stmts:\n self.make_gene_statements()\n config = dict()\n config['name'] = self.name\n config['human_readable_name'] = self.human_readable_name\n config['search_terms'] = [st.to_json() for st in self.search_terms]\n config['assembly'] = {\n 'belief_cutoff': 0.8,\n 'filter_ungrounded': True\n }\n if self.description:\n config['description'] = self.description\n return config",
"def _construct_metadata(self):\n if self.properties:\n return self._step_type_to_output_format_map[self.type]()\n return None",
"def build_image_objects(result_dict):\n\n for file in os.listdir(IMG_DIR):\n print(\"Current image -> \", file)\n image_obj = construct_image_object(IMG_DIR + file)\n meta_obj = construct_meta_object(IMG_DIR + file)\n\n image_obj.file_path = IMG_DIR + \"/\" + file\n image_obj.metadata = meta_obj\n\n # add image object to the object list for later iteration\n IMG_OBJS.append(image_obj)\n IMG_PATHS.append(image_obj.file_path)\n result_dict[os.path.splitext(file)[0]] = [] # file name to be replace with shared guid\n\n return result_dict\n # show_obj_prop_debug()",
"def getAnalysesDict(self):\n results = self._resultDict\n results['bug_found'] = self.foundBug\n results['failed'] = self.failed\n results['timeout_hit'] = self.ranOutOfTime\n\n\n return results",
"def _construct_processing_metadata(self):\n return {\"ProcessingJob\": {\"Arn\": self.properties[\"ProcessingJobName\"]}}",
"def build_return_dict_optim(train_losses, test_losses, train_accuracies,\n test_accuracies, model_state_dict):\n return_values = {}\n return_values['train_losses']=train_losses\n return_values['test_losses']=test_losses\n return_values['train_accuracies']=train_accuracies\n return_values['test_accuracies']=test_accuracies\n return_values['model_state_dict']=model_state_dict\n return return_values",
"def _result_to_resource(result):\n targets = [dnsutils.srv_target_to_dict(tgt)\n for tgt in result]\n return {'targets': targets}",
"def build_meta(proxy, qparams, by_entity_type):\n\n schemas = [s for s in get_schemas(proxy, qparams)]\n\n meta = {\n 'beaconId': conf.beacon_id,\n 'apiVersion': conf.api_version,\n 'returnedSchemas': schemas,\n 'returnedGranularity': conf.beacon_granularity,\n 'receivedRequestSummary': build_received_request(qparams, schemas, by_entity_type),\n }\n\n return meta"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Rename an existing overlay instance
|
def rename_overlay(self, old_lbl, new_lbl):
# NOTE: the overlay will call _on_overlay_rename after updating
self.overlay.rename_choice(old_lbl, new_lbl)
|
[
"def test_instance_rename(self):\n # create the instance\n ret_val = self.run_cloud(\n \"-p ec2-test {} --no-deploy\".format(self.instance_name), timeout=TIMEOUT\n )\n # check if instance returned\n self.assertInstanceExists(ret_val)\n\n changed_name = self.instance_name + \"-changed\"\n\n rename_result = self.run_cloud(\n \"-a rename {} newname={} --assume-yes\".format(\n self.instance_name, changed_name\n ),\n timeout=TIMEOUT,\n )\n self.assertFalse(\n self._instance_exists(),\n \"Instance wasn't renamed: |\\n{}\".format(rename_result),\n )\n self.assertInstanceExists(instance_name=changed_name)\n\n self.assertDestroyInstance(changed_name)",
"def do_rename(self, args):\n lb = self.findlb(args.loadbalancer)\n lb.name = args.name\n lb.update()",
"def rename(self):\n\n dialog_rename = Rename(self.name)\n\n if dialog_rename.exec():\n self.__update_image_name(dialog_rename.new_name)",
"def rename(cls, client, resource, new_name) :\n try :\n renameresource = service()\n if type(resource) == cls :\n renameresource.name = resource.name\n else :\n renameresource.name = resource\n return renameresource.rename_resource(client,new_name)\n except Exception as e :\n raise e",
"def set_name(self, new_name):\n\n self.img.attrib['Name'] = new_name",
"def change_image_name(self, img, newname):\r\n return self.update(img, {\"name\": newname})",
"def rename(server, name):\r\n server.update(name)",
"def change_name(self, name):\n if not utils.is_target(name):\n return\n logging.info('Changing target to {}'.format(name))\n self.name = name\n # Update the HSV variables to match the new target\n self.hsv_handler.name = name\n self.hsv_handler.reload()\n # Stop the current loop\n self.stop = True",
"def _rename_machine(self, machine, node, name):\n self.connection.ex_rename_container(node, name)",
"def renameUI():\n pass",
"def renameNamespace(*args, **kwargs):\n \n pass",
"def run(self, imap_obj: imaplib.IMAP4):\n typ, data = imap_obj.rename(self.__old_name, self.__new_name)\n self.check_response(typ, data)\n return self.__new_name",
"def make_original(self, name = None):\n self._clone = False\n self._operations = []\n self._is_original = True\n if name is not None:\n self._original_name = name\n self._image_name = str(self)",
"def _rename_node_in_reaction(self, reaction, old, new):\n reac = Reaction(reaction)\n reac.rename_species({old:new})\n return reac.name",
"def renameRegister(self,reg_cls,reg_idx,name):\n return HopperLowLevel.renameRegister(self.__segment_internal__,self.__procedure_index__,reg_cls,reg_idx,name)",
"def rename(nitro, service):\n __service = NSService()\n __service.set_name(service.get_name())\n __service.set_newname(service.get_newname())\n return __service.perform_operation(nitro, \"rename\")",
"def __nameChanged(self,ippool_obj,old_name):\n self.unloadIPpoolByName(old_name)\n self.__keepObj(ippool_obj)",
"def click_rename_icon(self, file_name):\n return self",
"def modify_instance_placement(InstanceId=None, Tenancy=None, Affinity=None, HostId=None):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a new overlay instance, and set it as selected. Once selected, the traitlets will then control the options of the new overlay.
|
def add_overlay(self, lbl):
# TODO: ability to pass options which would avoid updating the marks until all are set,
# probably by setattr(self.user_api, k, v) (and checks in advance that all are valid?)
self.overlay.add_choice(lbl, set_as_selected=True)
|
[
"def useOverlay(self, overlay: 'SbBool'=1) -> \"void\":\n return _coin.SoExtSelection_useOverlay(self, overlay)",
"def add_selection(self, pi):\n # pi: \"pick_info\", i.e. an incomplete selection.\n ann = pi.artist.axes.annotate(\n _pick_info.get_ann_text(*pi),\n xy=pi.target,\n **default_annotation_kwargs)\n ann.draggable(use_blit=True)\n extras = []\n if self._highlight:\n extras.append(self.add_highlight(pi.artist))\n if not self._multiple:\n while self._selections:\n self._remove_selection(self._selections[-1])\n sel = pi._replace(annotation=ann, extras=extras)\n self._selections.append(sel)\n self._callbacks.process(\"add\", sel)\n sel.artist.figure.canvas.draw_idle()\n return sel",
"def useOverlay(self, overlay = 1):\n return _coin.SoExtSelection_useOverlay(self, overlay)",
"def addOverlay(self, overlayObj, callback=None):\n \n self.__addOverlayLock.acquire()\n\n # first check that the widget hasn't been added already\n if hasattr(overlayObj, \"_added\"):\n if overlayObj._added:\n self.__addOverlayLock.release()\n return\n else:\n overlayObj._added = True\n\n # if this is the only thing to add, just do it now\n # otherwise it will be added by the __onObjectInfo when the previous one gets added\n self.__addOverlayQueue.append((overlayObj, callback))\n if len(self.__addOverlayQueue) == 1:\n winId = overlayObj.getWindowId()\n widgetId = overlayObj.getWidgetId()\n self.sageGate.addOverlay(overlayObj.getType(), widgetId, winId, \n overlayObj._dataToXml(overlayObj.getType()))\n\n # add to top level parents... if appropriate\n #if (not overlayObj._hasParent() and not overlayObj._eventTransparent) or \\\n # (overlayObj._hasParent() and overlayObj.parent._eventTransparent):\n if not overlayObj._hasParent():\n if overlayObj not in self.__topLevelParents:\n self.__topLevelParents.append(overlayObj)\n\n self.__addOverlayLock.release()",
"def _add_selection(self, item):\n outline = self._selection_poly(item)\n path = self._transform.map(outline)\n ppath = self._create_path(item, path)\n label = self._create_label(len(self._selection))\n selection_item = self.SelectionItem(self, ppath, outline, label)\n selection_item.setPos(self.contentsRect().topLeft())\n self._selection[item] = selection_item",
"def create_selection(self, ai_settings, action=None, text=None):\n rect = pygame.Rect(2, 176, 197, 56)\n surface = self.menu_source.subsurface(rect)\n selection = Selection(surface, ai_settings, action, text)\n self.selections.append(selection)\n return selection",
"def make_selection(self, selected, add):\n if self.data and self.lsx and self.lsy:\n if selected is None and not add:\n self.selection_group *= 0 # set all to False\n elif selected is not None:\n if not add:\n self.selection_group *= 0\n self.selection_group[selected] = 1\n self.refresh_img_selection()\n self.send_selection()",
"def _replace_selection(self):\n try:\n new = self.current_sprite_class.create_for_editor(self.selection.points)\n except (ValueError, TypeError, IndexError):\n pass\n else:\n self.hole.groups['all'].add(new)\n self.hole.groups['collidibles'].add(new)\n self.selection.kill()\n self.selection = new",
"def add(self, entry):\n\t\tif not(entry in self.entries):\n\t\t\tif entry.data.name in FIXEDENTRIES: return\n\t\t\telse:\n\t\t\t\tself.entries.append(entry)\n\t\t\t\tlog().debug('adding to selection ' + entry.data.name)\n\t\t\t\tentry.canBeSelected = False\n#\t\t\t\tself.multiselectNode.showBoundingBox(True)\n\t\tif len(self.entries) == 1 : \n\t\t\tself.multiselectNode.setPosition(entry.node._getDerivedPosition())\n\t\t\tself._ogreWindow.axis.attachTo(self.multiselectNode)\n\t\t\tself._entry.OnPositionChanging.append(self.translateSelection)\n\t\tentry.changeParentTo(self.multiselectNode)\n\t\tentry.node.showBoundingBox(True)",
"def polySelect(self, event):\n\n # .seletion: [(poly,attr), ...]\n selection = event.selection\n\n # turn any previous selection off\n if self.sel_poly_layer:\n self.pyslip.DeleteLayer(self.sel_poly_layer)\n self.sel_poly_layer = None\n\n # box OR single selection\n if selection:\n # get selected polygon points into form for point display layer\n points = []\n for (poly, d) in selection:\n try:\n del d['colour']\n except KeyError:\n pass\n try:\n del d['radius']\n except KeyError:\n pass\n for (x, y) in poly:\n points.append((x, y, d))\n\n self.sel_poly_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=True,\n colour='#ff00ff',\n radius=5, visible=True,\n show_levels=[3,4],\n name='<sel_poly>')\n\n return True",
"def polylineSelect(self, event):\n\n # .seletion: [(poly,attr), ...]\n selection = event.selection\n relsel = event.relsel\n\n # turn any previous selection off\n if self.sel_polyline_layer:\n self.pyslip.DeleteLayer(self.sel_polyline_layer)\n self.sel_polyline_layer = None\n if self.sel_polyline_layer2:\n self.pyslip.DeleteLayer(self.sel_polyline_layer2)\n self.sel_polyline_layer2 = None\n\n # box OR single selection\n if selection:\n # show segment selected first, if any\n if relsel:\n self.sel_polyline_layer2 = \\\n self.pyslip.AddPointLayer(relsel, map_rel=True,\n colour='#40ff40',\n radius=5, visible=True,\n show_levels=[3,4],\n name='<sel_polyline2>')\n\n # get selected polygon points into form for point display layer\n points = []\n for (poly, d) in selection:\n try:\n del d['colour']\n except KeyError:\n pass\n try:\n del d['radius']\n except KeyError:\n pass\n for (x, y) in poly:\n points.append((x, y, d))\n\n self.sel_polyline_layer = \\\n self.pyslip.AddPointLayer(points, map_rel=True,\n colour='#ff00ff',\n radius=3, visible=True,\n show_levels=[3,4],\n name='<sel_polyline>')\n return True",
"def on_select(self):\n self.selected = True\n for tile in self.divisions:\n if tile is not self:\n tile.group_selected = True",
"def reSelected(self):\n self.isSelected = True\n self.health -= 1\n selectedSprites.add(self)",
"def overlay_add(self, overlay_id, x, y, file_or_fd, offset, fmt, w, h, stride):\n self.command('overlay_add', overlay_id, x, y, file_or_fd, offset, fmt, w, h, stride)",
"def next_on_click(layer, event):\n if layer.mode == 'add':\n next_label()\n\n # by default, napari selects the point that was just added\n # disable that behavior, as the highlight gets in the way\n layer.selected_data = []",
"def remove_overlay(self, lbl):\r\n # NOTE: the overlay will call _on_overlay_remove after updating\r\n self.overlay.remove_choice(lbl)",
"def loadHPSelectedLayer(self, posn):\n\n self.selected_hp_layer = \\\n self.pyslip.addMonoPointLayer((posn,),\n colour=SelectHPPointColour,\n size=SelectHPPointSize,\n name='selected HP')",
"def setSelected(self, selected):\n if selected:\n self.graphics_item.setZValue(2000.0)\n self.graphics_item.setPen(self.selected_pen)\n else:\n self.graphics_item.setZValue(1000.0)\n self.graphics_item.setPen(self.deselected_pen)",
"def pointSelect(self, event):\n\n selection = event.selection\n\n if selection == self.sel_point:\n # same point(s) selected again, turn point(s) off\n self.pyslip.DeleteLayer(self.sel_point_layer)\n self.sel_point_layer = None\n self.sel_point = None\n elif selection:\n # some other point(s) selected, delete previous selection, if any\n if self.sel_point_layer:\n self.pyslip.DeleteLayer(self.sel_point_layer)\n\n # remember selection (need copy as highlight modifies attributes)\n self.sel_point = copy.deepcopy(selection)\n\n # choose different highlight colour for different type of selection\n selcolour = '#00ffff'\n if event.type == pyslip.EventSelect:\n selcolour = '#0000ff'\n\n # get selected points into form for display layer\n # delete 'colour' and 'radius' attributes as we want different values\n highlight = []\n for (x, y, d) in selection:\n del d['colour'] # AddLayer...() ensures keys exist\n del d['radius']\n highlight.append((x, y, d))\n\n # layer with highlight of selected poijnts\n self.sel_point_layer = \\\n self.pyslip.AddPointLayer(highlight, map_rel=True,\n colour=selcolour,\n radius=5, visible=True,\n show_levels=MRPointShowLevels,\n name='<sel_pt_layer>')\n\n # make sure highlight layer is BELOW selected layer\n self.pyslip.PlaceLayerBelowLayer(self.sel_point_layer,\n self.point_layer)\n # else: we ignore an empty selection\n\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove a overlay instance. If selected, the selected overlay will default to the first entry in the list.
|
def remove_overlay(self, lbl):
# NOTE: the overlay will call _on_overlay_remove after updating
self.overlay.remove_choice(lbl)
|
[
"def deSelected(self):\n self.isSelected = False\n selectedSprites.remove(self)",
"def remove(self):\n self.layers.pop()",
"def remove_selected(self):\n idx = 0\n for i in list(self.selection):\n idx = self.index(i)\n self.remove(i)\n new = max(0, (idx - 1))\n if len(self) > new:\n self.selection.add(self[new])",
"def deleteHPSelectedLayer(self):\n\n if self.selected_hp_layer:\n self.pyslip.deleteLayer(self.selected_hp_layer)\n self.selected_hp_layer = None",
"def remove_point(self):\n\t\tselection = self.list_widget.selectedItems()[0]\n\t\tindex = selection.data(QtCore.Qt.UserRole)\n\n\t\tprint(index)\n\t\tself.calibration_manager.delete_point(index)\n\n\t\tself.update_point_list()\n\t\tself.set_calibration_data()",
"def remove(self, entry=None):\n\t\tlog().debug('removing selection')\n\t\tif self._entry:\n\t\t\ttry:\n\t\t\t\tidx = self._entry.OnPositionChanging.index(self.translateSelection)\n\t\t\t\tdel self._entry.OnPositionChanging[idx]\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\t\t\n\t\tremoveAll = entry == None\n\t\tif not removeAll:\n\t\t\ttry:\n\t\t\t\tidx = self.entries.index(entry)\n\t\t\texcept ValueError:\n\t\t\t\treturn # not found\n\t\t\tself._unselectItem(self.entries[idx])\n\t\t\tdel self.entries[idx]\n\t\telse:\n\t\t\tfor e in self.entries:\n\t\t\t\tself._unselectItem(e)\n\t\t\tself.entries = []",
"def remove_selected_element(self) -> str:\r\n index_to_delete = self.lb_sel_params.curselection()[0]\r\n value_to_delete = self.lb_sel_params.get(index_to_delete)\r\n self.lb_sel_params.delete(index_to_delete)\r\n return value_to_delete",
"def delete_entry(self, *args):\n if len(self.value) > 1 and self.recycle_view_class_pool.selected:\n label = self.recycle_view_class_pool.selected[\"text\"]\n idx = self.imagenet_labels[label]\n self.value.remove(idx)\n self.set_value()",
"def removeItem(self):\r\n\t\t\r\n\t\tself.enterItem = None\r\n\t\tself.scene().removeSelItem()",
"def pop_layer(self, index):\n self._layers.pop(index)",
"def delete_position(self):\n\n # Get information on the selected position\n label = self.gui.posSelect.currentText()\n index = self.gui.posSelect.currentIndex()\n\n # Check if the selected position is the \"--None--\" label.\n if index == 0:\n return None\n\n self.gui.posSelect.removeItem(index)\n del self.gui.savedPos[label]\n save_pos_config(path=\"saved_positions.json\", data=self.gui.savedPos)\n\n # Print output statement.\n self.append_text(f\"Position deleted: {label}\")",
"def on_deselect(self):\n for tile in self.divisions:\n tile.group_selected = False\n tile.selected = False",
"def remove_selected(self):\n\n if not self.selected:\n required_field_empty_warning(self, \"Select item for removal.\")\n\n # on (row, 0) placed entity ID\n model_id = int(self.table_widget.item(self.selected[0], 0).text())\n\n if not DeleteDialog(\n \"item with ID = {0}\".format(model_id), self.model.__tablename__\n ).exec_() == QDialog.Accepted:\n return\n\n session = db.get_session()\n session.query(self.model).filter(self.model.id == model_id).delete()\n session.commit()\n self.show_table(self.model)",
"def on_remove_resource(self, event):\r\n resource_index = self.listbox_resources.GetSelection()\r\n if resource_index != wx.NOT_FOUND:\r\n resource_type = self.choice_type.GetSelection()\r\n self.resource_lists[ resource_type ].pop( resource_index )\r\n self.listbox_resources.Delete( resource_index )",
"def __onLabelRemove(self, ev):\n\n idx = self.__labelList.GetSelection()\n lut = self.__selectedLut\n value = lut[idx].value\n\n with lut.skip(self.name, 'removed'):\n lut.delete(value)\n self.__labelList.Delete(idx)",
"def pop_layer(self, index=-1):\n\n return self._layers.pop(index)",
"def deleteDeagLayer(self):\n\n if self.deag_layer:\n # deregister a box select callback for deag zones\n self.pyslip.setBoxSelectCallback(self.deag_layer, None)\n\n self.pyslip.deleteLayer(self.deag_layer)\n self.deag_layer = None\n\n if self.deag_label_layer:\n self.pyslip.deleteLayer(self.deag_label_layer)\n self.deag_label_layer = None",
"def unhighlight(self, index):\n if index == self.selected:\n self.labels[index].color_fg = self.color_bg\n self.labels[index].color_bg = self.color_fg\n else:\n self.labels[index].color_fg = self.color_fg\n self.labels[index].color_bg = self.color_bg\n self.highlighted.remove(index)",
"def _RemoveAnchor(self, anchor):\n self.Detach()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Center the values of RA and DEC based on the current zoomlimits of a viewer.
|
def center_on_viewer(self, viewer_ref=None):
if viewer_ref is None:
if not len(self.viewer.selected): # pragma: nocover
raise ValueError("no viewers selected, provide viewer reference")
viewer_ref = self.viewer.selected[0]
viewer = self.app.get_viewer(viewer_ref)
center_coord = viewer._get_center_skycoord()
self._ignore_traitlet_change = True
self.ra = center_coord.ra.to_value('deg')
self.dec = center_coord.dec.to_value('deg')
self._ignore_traitlet_change = False
self._preset_args_changed() # process ra/dec simultaneously
|
[
"def reset_zoom_and_center(self):\n self._send_to_ztv('reset-zoom-and-center')",
"def setup_limits(self):\r\n #odmakni x granice za specificni interval ovisno o tipu\r\n tmin, tmax = self.prosiri_granice_grafa(self.pocetnoVrijeme, self.zavrsnoVrijeme, 4)\r\n #set granice za max zoom out\r\n self.xlim_original = (tmin, tmax)\r\n self.ylim_original = self.axes.get_ylim()\r\n y1, y2 = self.ylim_original\r\n c = abs(y2 - y1) * 0.1\r\n self.ylim_original = (y1, y2 + c)\r\n #set granice grafa\r\n self.axes.set_xlim(self.xlim_original)\r\n self.axes.set_ylim(self.ylim_original)\r\n #set limite prilikom crtanja na zoom stack\r\n self.zoomStack.append((self.xlim_original, self.ylim_original))",
"def setup_limits(self):\r\n #odmakni x granice za specificni interval ovisno o tipu\r\n tmin, tmax = self.prosiri_granice_grafa(self.pocetnoVrijeme, self.zavrsnoVrijeme, 60)\r\n #set granice za max zoom out\r\n self.xlim_original = (tmin, tmax)\r\n self.ylim_original = self.axes.get_ylim()\r\n y1, y2 = self.ylim_original\r\n c = abs(y2 - y1) * 0.1\r\n self.ylim_original = (y1, y2 + c)\r\n #set granice grafa\r\n self.axes.set_xlim(self.xlim_original)\r\n self.axes.set_ylim(self.ylim_original)\r\n #set limite prilikom crtanja na zoom stack\r\n self.zoomStack.append((self.xlim_original, self.ylim_original))",
"def view_limits(self, vmin, vmax):\n b = self._base\n\n vmin, vmax = self.nonsingular(vmin, vmax)\n\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n vmin = _decade_less_equal(vmin, self._base)\n vmax = _decade_greater_equal(vmax, self._base)\n\n return vmin, vmax",
"def _set_lim_and_transforms(self):\n\n\n one = mtrans.one\n zero = mtrans.zero\n Point = mtrans.Point\n Bbox = mtrans.Bbox\n if self._sharex is not None:\n left=self._sharex.viewLim.ll().x()\n right=self._sharex.viewLim.ur().x()\n else:\n left=zero()\n right=one()\n if self._sharey is not None:\n bottom=self._sharey.viewLim.ll().y()\n top=self._sharey.viewLim.ur().y()\n else:\n bottom=zero()\n top=one()\n\n\n\n self.viewLim = Bbox(Point(left, bottom), Point(right, top))\n self.dataLim = mtrans.unit_bbox()\n\n self.transData = mtrans.get_bbox_transform(\n self.viewLim, self.bbox)\n self.transAxes = mtrans.get_bbox_transform(\n mtrans.unit_bbox(), self.bbox)\n\n if self._sharex:\n self.transData.set_funcx(self._sharex.transData.get_funcx())\n\n if self._sharey:\n self.transData.set_funcy(self._sharey.transData.get_funcy())",
"def zoom_and_center(self, amt):\n x,y = self.GetClientSize()[0] / 2, self.GetClientSize()[1] / 2\n x,y = self.CalcUnscrolledPosition((x,y))\n old_scale = self.scale\n # If the image gets too small, an EVT_SIZE event is fired, which\n # expands image to max-fit -- annoying when zooming out a lot.\n self.Unbind(wx.EVT_SIZE)\n if amt >= 1.0:\n self.increase_scale(amt - 1.0)\n else:\n self.decrease_scale(1.0 - amt)\n\n new_virt_pos = (x*(self.scale / old_scale), y*(self.scale / old_scale))\n self.center_viewport(new_virt_pos, (x,y))\n self.Refresh()\n self.Bind(wx.EVT_SIZE, self.onSize)",
"def _set_axes_limits(self):\n x_ax_min = max(self.data_stats.x_min,\n self.pos.x - self.data_stats.x_delta)\n x_ax_max = min(self.data_stats.x_max,\n self.pos.x + self.data_stats.x_delta)\n y_ax_min = max(self.data_stats.y_min,\n self.pos.y - self.data_stats.y_delta)\n y_ax_max = min(self.data_stats.y_max,\n self.pos.y + self.data_stats.y_delta)\n\n if x_ax_min >= x_ax_max or y_ax_min >= y_ax_max:\n raise ValueError(\"Invalid axes bounds generated - change \"\n \"scaling parameters.\")\n\n self.axes.axis([x_ax_min, x_ax_max, y_ax_min, y_ax_max])",
"def set_zoom_when_center(self, client, cnxn_id, entry, data):\n config.set(\"geography.zoom_when_center\", int(entry))",
"def afrange(self):\n if self.AFX: #if mouse event has happend\n start = self.AFX - self.AFRange #center minus the radius\n end = self.AFX + self.AFRange #center plus the radius\n else: #if no center has been chosen, take whole spectrum\n start = 0\n end = 1023\n if self.nmscale: #if scale in in nm show start and end in nm\n print \"AF will maximize signal in the range from \"+str(self.scaleinnm[start])+\"nm to \"+str(self.scaleinnm[end])+\"nm\"\n else: # if scale is in pxl\n print \"AF will maximize signal in the range from \"+str(start)+\"px to \"+str(end)+\"px\"\n return start,end",
"def setCenter(self, *args, **kwargs):\n cen = galsim.utilities.parse_pos_args(args, kwargs, 'xcen', 'ycen', integer=True)\n self._shift(cen - self.image.bounds.center())",
"def _set_lim_and_transforms(self):\n\n # the lim are theta, r\n\n Bbox = mtrans.Bbox\n Value = mtrans.Value\n Point = mtrans.Point\n self.dataLim = Bbox( Point( Value(5/4.*math.pi), Value(math.sqrt(2))),\n Point( Value(1/4.*math.pi), Value(math.sqrt(2))))\n self.viewLim = Bbox( Point( Value(5/4.*math.pi), Value(math.sqrt(2))),\n Point( Value(1/4.*math.pi), Value(math.sqrt(2))))\n\n self.transData = mtrans.NonseparableTransformation(\n self.viewLim, self.bbox,\n mtrans.FuncXY(mtrans.POLAR))\n self.transAxes = mtrans.get_bbox_transform(\n mtrans.unit_bbox(), self.bbox)",
"def center_camera(self, point: QPoint) -> None:\r\n\t\tself.camera = QPoint(point.x() - self.width()/(2*self.scale), point.y() - self.height()/(2*self.scale))\r\n\t\tself.constrain_camera()",
"def calculateCenter(self, factor):\n if (factor > 0):\n scalefactor = factor/100.0 -1\n center = vertex(0, 0, 0)\n for v in self.pts:\n center += v\n center = center / len(self.pts) \n else:\n print(\"Input must be greater than 0!\")\n scalefactor = 1\n return scalefactor, center",
"def RecalcViewLonLatLimits(self):\n\n self.view_llon = self.map_llon + self.view_offset_x / self.ppd_x\n self.view_rlon = self.view_llon + self.view_width / self.ppd_x\n\n self.view_tlat = self.map_tlat - self.view_offset_y / self.ppd_y\n self.view_blat = self.view_tlat - self.view_height / self.ppd_y",
"def center_axes(ax):\n ax.spines[\"bottom\"].set_position(\"zero\")\n ax.spines[\"left\"].set_position(\"zero\")\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n return ax",
"def on_resize(self, width, height):\n\n # call overrided function\n super().on_resize(width, height)\n\n # update camera value\n (width, height) = self.get_size()\n self.left = -self.zoom_level * width/2\n self.right = self.zoom_level * width/2\n self.bottom = -self.zoom_level * height/2\n self.top = self.zoom_level * height/2\n self.zoomed_width = self.zoom_level * width\n self.zoomed_height = self.zoom_level * height",
"def center( self, center=True ):\n\tself.centerAround = center",
"def change_offset_and_zoom(self, new_center_x, new_center_y,zoom_on_click): \n xabsoluteoffset = (new_center_x - self.width/2)*(self.xabsoluteend - self.xabsolutestart)/self.width\n yabsoluteoffset = (new_center_y - self.height/2)*(self.yabsoluteend - self.yabsolutestart)/self.height\n\n\tself.xabsolutestart += xabsoluteoffset\n\tself.yabsolutestart += yabsoluteoffset\n self.xabsoluteend += xabsoluteoffset\n self.yabsoluteend += yabsoluteoffset \n\n self.change_zoom(zoom_on_click)",
"def center(self):\n\n if self.c is not None:\n return self.c\n else:\n # compute c by factoring\n self.c = -np.dot(self.projection.R.T, self.projection.t)\n return self.c"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a path to a pickle file that contains a set of premade crossvalidation folds, this function will load the file and return the contained data. This function also performs simple checks on the object loaded to be sure it is a wellformed CVfolds object
|
def load_cv_folds(filepath):
folds = pickle.load(open(filepath, "rb"))
if not isinstance(folds, list):
raise RuntimeError("Loaded a non-list item as CV folds.")
if not isinstance(folds[0], tuple) or not len(folds[0]) == 3:
print(type(folds[0]))
print(len(folds))
raise RuntimeError("CV-folds object is malformed")
return folds
|
[
"def load_classifier():\n with open(\"classifier.pik\", 'rb') as f:\n return pickle.load(f)",
"def loadClassifier(filename):\n with open(filename, 'rb') as fid:\n return cPickle.load(fid)",
"def read_classifier(file_path):\n import cPickle\n if file_path.endswith('.classifier'):\n cls = cPickle.load(file(file_path, 'rb'))\n return cls\n else:\n print 'This file is not a classifier file!'",
"def load_objects(saved_path):\n objects = None\n if os.path.exists(saved_path):\n with open(saved_path, 'rb') as file:\n objects = pickle.load(file)\n return objects",
"def poincare_load(filename):\n with open(filename, 'rb') as input:\n data = pickle.load(input)\n return data",
"def trained_classifier_load(self, path=\"../../../datasets/multinomial_nb_classifier\"):\n \n with open(path, 'rb') as f:\n \n self.classifier = pickle.load(f)",
"def loadPickledMDP(load_from_file):\n mdp_file = os.path.join(mdp_obj_path, load_from_file)\n print \"Loading file {}.\".format(mdp_file)\n with open(mdp_file) as _file:\n list_to_unpack = pickle.load(_file)\n if not isinstance(list_to_unpack, list):\n list_to_unpack = [list_to_unpack]\n list_to_unpack.append(mdp_file)\n return list_to_unpack",
"def load_fold_data(base_folder, model_name, model_class, save_path=\"save/\"):\n base_folder = save_path + base_folder\n\n task_list = []\n for task_folder in sorted(os.listdir(base_folder)):\n if \".json\" in task_folder:\n continue\n task_folder = base_folder + \"/\" + task_folder\n\n fold_result_list = []\n for fold_folder in sorted(os.listdir(task_folder)):\n curr_folder = task_folder + \"/\" + fold_folder + \"/\"\n pred = pd.read_csv(curr_folder + \"pred.csv\")\n with open(curr_folder + \"miss_data.pkl\", \"rb\") as handle:\n miss_data = pickle.load(handle)\n \n loss_detail = load_json(curr_folder + \"loss_detail.json\")\n model = model_class.load_from_path(\n curr_folder + model_name\n )\n result_fold = FoldWalkForewardResult(\n pred=pred, missing_data=miss_data, model=model, loss_detail=loss_detail\n )\n fold_result_list.append(result_fold)\n \n task_list.append(fold_result_list)\n \n return task_list",
"def pickle_load(file_path):\n with open(file_path, 'rb') as file_ptr:\n data = pickle.load(file_ptr)\n return data",
"def parse_cv_folds(filepath):\n with open(filepath) as f:\n folds_string = f.readlines() # 1 line\n\n parens = pyparsing.nestedExpr('(', ')', content=pyparsing.Word(pyparsing.alphanums))\n folds = parens.parseString(folds_string[0]).asList()[0]\n fold_list = []\n\n for fold in folds:\n test_indices = fold[0]\n train_indices = fold[1]\n\n assert train_indices[0] == 'TRAIN' and test_indices[0] == 'TEST'\n\n fold_list += [(np.array(train_indices[1], dtype=int),\n np.array(test_indices[1], dtype=int))]\n\n return fold_list",
"def load_recipe_container(filename_pickle):\n with open(filename_pickle + '_train.pk', 'rb') as f:\n train = pickle.load(f)\n with open(filename_pickle + '_validation.pk', 'rb') as f:\n validation = pickle.load(f)\n with open(filename_pickle + '_test.pk', 'rb') as f:\n test = pickle.load(f)\n return DataContainer(train, validation, test)",
"def load_obj(path):\n with open(path, \"rb\") as f:\n return pickle.load(f)",
"def load(self):\n if os.path.exists(self.loaded_data):\n with open(self.loaded_data, 'rb') as f:\n preloaded_data = pickle.load(f)\n # Train part\n self.class2imgid = preloaded_data['class2imgid']\n self.path2class_sketch = preloaded_data['path2class_sketch']\n self.class2path_sketch = preloaded_data['class2path_sketch']\n self.path2class_image = preloaded_data['path2class_image']\n self.class2path_image = preloaded_data['class2path_image']\n self.id2path = preloaded_data['id2path']\n # Test part\n self.class2id = preloaded_data['class2id']\n self.id2class = TEST_CLASS\n self.class2imgid_test = preloaded_data['class2imgid_test']\n self.class2path_sketch_test = preloaded_data['class2path_sketch_test']\n self.class2path_image_test = preloaded_data['class2path_image_test']\n self.path2class_sketch_test = preloaded_data['path2class_sketch_test']\n self.path2class_image_test = preloaded_data['path2class_image_test']\n # Shared part\n self.loaded_image = preloaded_data['loaded_image']\n return\n self.id2class = TEST_CLASS\n self.class2id = dict()\n for idx, cls in enumerate(self.id2class):\n self.class2id[cls] = idx\n\n self.class2imgid, self.path2class_sketch, self.class2path_sketch, self.path2class_image, self.class2path_image = \\\n self.load_stats(self.stats_file_train, TRAIN_CLASS, self.sketch_files_train, self.image_files_train)\n \n self.class2imgid_test, self.path2class_sketch_test, self.class2path_sketch_test, self.path2class_image_test, self.class2path_image_test = \\\n self.load_stats(self.stats_file_test, TEST_CLASS, self.sketch_files_test, self.image_files_test)\n\n for path in self.path2class_sketch.keys():\n self.loaded_image[path] = self.load_each_image(path)\n self.id2path.append(path)\n\n for path in self.path2class_image.keys():\n self.loaded_image[path] = self.load_each_image(path)\n \n for path in self.path2class_sketch_test.keys():\n self.loaded_image[path] = self.load_each_image(path)\n\n for path in self.path2class_image_test.keys():\n self.loaded_image[path] = self.load_each_image(path)\n \n assert len(self.id2path) == len(self.path2class_sketch.keys())\n preloaded_data = dict()\n # Train part\n preloaded_data['class2imgid'] = self.class2imgid\n preloaded_data['path2class_sketch'] = self.path2class_sketch\n preloaded_data['class2path_sketch'] = self.class2path_sketch\n preloaded_data['path2class_image'] = self.path2class_image\n preloaded_data['class2path_image'] = self.class2path_image\n preloaded_data['id2path'] = self.id2path\n # Test part\n preloaded_data['class2id'] = self.class2id\n preloaded_data['class2imgid_test'] = self.class2imgid_test\n preloaded_data['class2path_sketch_test'] = self.class2path_sketch_test\n preloaded_data['class2path_image_test'] = self.class2path_image_test\n preloaded_data['path2class_sketch_test'] = self.path2class_sketch_test\n preloaded_data['path2class_image_test'] = self.path2class_image_test\n # Shared part\n preloaded_data['loaded_image'] = self.loaded_image\n \n with open(self.loaded_data, 'wb') as f:\n pickle.dump(preloaded_data, f)\n return",
"def pickle_load(path):\n return pickle.load(open(path, \"rb\"))",
"def loadPickledPolicyInferenceMDP(load_from_file):\n infered_mdp_file = os.path.join(infered_mdps_path, load_from_file)\n print \"Loading infered policy data file {}.\".format(infered_mdp_file)\n with open(infered_mdp_file) as _file:\n list_to_unpack = pickle.load(_file)\n if not isinstance(list_to_unpack, list):\n list_to_unpack = [list_to_unpack]\n list_to_unpack.append(infered_mdp_file)\n return list_to_unpack",
"def load_set():\r\n with open('mnist_data', 'rb') as f:\r\n training, validation, testing = pickle.load(f)\r\n f.close()\r\n return training, validation, testing",
"def trained_classifier_load(self, path=\"../../../../datasets/logistic_regression_classifier\"):\n \n with open(path, 'rb') as f:\n \n self.classifier = pickle.load(f)",
"def loadResources(self):\n AbstractSelection.loadResources(self)\n if self.resource_pool.has_key(data_sources.CVFOLDS):\n #fs = self.resource_pool[data_sources.CVFOLDS]\n #self.folds = fs.readFolds()\n self.folds = self.resource_pool[data_sources.CVFOLDS]\n elif self.resource_pool.has_key(data_sources.TRAIN_QIDS):\n self.folds = self.resource_pool[data_sources.TRAIN_QIDS]\n #self.folds = qsource.readFolds()",
"def loadpickle(fname):\r\n return pickle.load(open(fname, 'rb'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given the filename from an ablation file this function parsers out the identifier of the classifier used and then returns the print name which matches the identifier
|
def resolve_model_name(filename):
first_ = filename.find("_")
second_ = filename.find("_", first_ + 1)
model_name = filename[:second_]
return get_classifier_print_name(model_name)
|
[
"def get_classid_by_filename(filename):\n filename = os.path.basename(filename)\n return filename.split('_')[-1].split('.')[0]",
"def parse_id(filename):\n match = re.search('B[0-9]{2}-[0-9]{3}', filename) \n if match:\n return match.group()\n return None",
"def classifier_document_name(self, content):\n return translit(''.join([char for char in content if char.isalpha()]) + '.txt', reversed=True)",
"def getidname(ontofilename):\n\tidname={}\n\tnumtot=0\n\tprint('initializing idname from file %s' % ontofilename)\n\tparser=oboparse.Parser(open(ontofilename))\n\tfor citem in parser:\n\t\tnumtot+=1\n\t\ttry:\n\t\t\tcid=citem.tags[\"id\"][0]\n\t\t\tcname=citem.tags[\"name\"][0]\n\t\t\tif cid in idname:\n\t\t\t\tprint(\"id %s already exists!\" % cid)\n\t\t\tidname[cid]=cname\n\t\texcept:\n\t\t\tcontinue\n\tprint('loaded %d ids out of %d entries' % (len(idname),numtot))\n\treturn idname",
"def makeLabelFromFileName(self, filename):\n self.fsLabel = os.path.splitext(filename)[0]\n self.fsLabel = filter(lambda c: c.isalnum() or c in \"-_ \", self.fsLabel)\n if self.extended:\n self.fsLabel = self.fsLabel[0:16]\n else:\n self.fsLabel = self.fsLabel.upper()[0:11]",
"def GetTestNameAndISAFromFileName(filename):\n # Strip the \".json\" extension\n stripped_basename = os.path.splitext(os.path.basename(filename))[0]\n # The ISA is the last element in the filename, seperated with \"-\".\n if stripped_basename.endswith(('-a32', '-t32')):\n isa = [stripped_basename[-3:]]\n test_name = stripped_basename[:-4]\n else:\n # If the ISA is ommitted, support both.\n isa = [\"a32\", \"t32\"]\n test_name = stripped_basename\n\n return (test_name, isa)",
"def get_classifier_filename(self):\n\n # Classifier filename is parametrized by important experiment parameters.\n return 'classifier_{}_lr{}_rr{}_m{}_c{}_a{}.pkl'.format(self.classifier_params['input_feature'],\n self.classifier_params['learning_rate'],\n self.classifier_params['random_restarts'],\n self.classifier_params['num_measurements'],\n self.classifier_params['counter'],\n self.classifier_params['a_index'])",
"def _extract_identifier(identifier, path):\r\n start_pos = path.find(identifier)\r\n start_cut = path[start_pos:]\r\n \r\n end_pos_under = start_cut.find(\"_\")\r\n if end_pos_under == - 1:\r\n end_pos = float(\"inf\")\r\n else:\r\n end_pos = end_pos_under\r\n \r\n end_pos_dash = start_cut.find(\"-\")\r\n if end_pos_dash != -1 and end_pos_dash < end_pos:\r\n end_pos = end_pos_dash\r\n \r\n end_pos_dot = start_cut.find(\".\")\r\n if end_pos_dot != -1 and end_pos_dot < end_pos:\r\n end_pos = end_pos_dot\r\n \r\n if end_pos == float(\"inf\"):\r\n raise ValueError(\"Data format identifier in file path not terminated \"\r\n \"by '-', '_' or '.'.\")\r\n \r\n full_identifier = start_cut[0:end_pos]\r\n \r\n return full_identifier",
"def parse_component_name(filename):\n name, ext = os.path.splitext(os.path.split(filename)[-1])\n if ext.lower() in ['.c', '.cpp', '.cc', '.h']:\n return name\n return None",
"def get_type_and_id_from_file(file_path):\n label, sig_id = -1, -1\n if 'Genuine' in file_path:\n label = 1\n sig_id = int(file_path[-10:-7])\n elif 'Forgeries' in file_path:\n label = 0\n sig_id = int(file_path[-10:-7])\n else:\n label = 1 if file_path[-8] == '_' else 0\n sig_id = int(file_path[-7:-4])\n assert label != -1 and sig_id != -1\n return label, sig_id",
"def file_text_name(filename):\n\n file_basename = basename(filename)\n\n codefeedback = PreText(text=f\"{file_basename} file is loaded\", width=500, style={\"color\": \"black\"})\n\n return codefeedback",
"def extract_visit(filename):\n # First, make sure we are only working with a filename, not a full path+filename\n filename = os.path.basename(filename)\n if filename.startswith('hst_'):\n rootname = filename.split('_')[-2]\n visit_id = rootname[:6]\n else:\n visit_id = filename[:6]\n\n return visit_id",
"def _get_model_name_from_file(path: str):\n return os.path.basename(path).split(\".\")[0]",
"def _anno_from_nomefile(self, nomefile):\n anno = nomefile[1:3]\n if anno[0] > '5':\n anno = '19' + anno\n else:\n anno = '20' + anno\n return anno",
"def get_obs_id_from_file_name(file_name):\n bits = file_name.split('.')\n return f'{bits[0]}.{bits[1]}.{bits[3]}.{bits[4]}'",
"def title_from_filename(self, filename):\n for cpl in self.dcp._list_cpl:\n if cpl['FileName'] == filename:\n desc = \"({})\".format(\n cpl['Info']['CompositionPlaylist'].get(\n 'ContentTitleText', ''))\n return desc\n\n for pkl in self.dcp._list_pkl:\n if pkl['FileName'] == filename:\n desc = \"({})\".format(\n pkl['Info']['PackingList'].get('AnnotationText', ''))\n return desc\n\n return ''",
"def get_lab_title(fname):\n # We need to do this to preserve any .virl extension to to tell CML this\n # is an older file.\n title = os.path.basename(fname)\n if not fname.lower().endswith(\".virl\"):\n title = os.path.splitext(fname)[0]\n # Load the lab YAML to try and extract its title property\n try:\n lab_stub = CachedLab(\"bogusid\", fname)\n except Exception:\n # Someone may be trying to load a 1.x file without the .virl extension.\n click.secho(\n \"File {} does not appear to be a YAML-formatted CML topology file.\"\n \"If this is a CML/VIRL 1.x file, it must end with '.virl'\".format(fname),\n fg=\"red\",\n )\n exit(1)\n else:\n title = lab_stub.title\n\n return title",
"def get_catalog_name(filename):\n tree = ET.ElementTree()\n try:\n tree.parse(filename)\n except ET.ParseError:\n raise ValueError(\"File '{}' is not a valid XML document\"\n .format(filename))\n root = tree.getroot()\n\n try:\n return root.attrib[\"name\"]\n except KeyError:\n # Make up a name based on filename if catalog has no 'name' attribute\n basename = os.path.basename(filename)\n if basename.endswith(\".xml\"):\n return basename[:-4]\n return basename",
"def plantname_from_filename(f):\n f = Path(f)\n f = f.with_suffix('')\n name = f.name\n splits = name.split(\" - \")\n if len(splits) == 2:\n plantname = splits[0]\n el = splits[1]\n elif len(splits) == 3:\n plantname = splits[1]\n el = splits[2]\n return plantname, el"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of features this function expands the list of features by creating a min, max, avg feature for each original feature. This is used to retain data from our features when creating a row grouping to represent a single collection of EventContext pairs.
|
def expanded_features(feats):
results = list()
for feat in feats:
results.extend(["{}_min".format(feat),
"{}_avg".format(feat),
"{}_max".format(feat)])
return results
|
[
"def _aggregate_features(self, feature_dict: dict) -> tuple:\n aggregated_features = {}\n aggregation_functions = {'mean': np.mean, 'min': np.min, 'max': np.max, 'std': np.std}\n feature_names = []\n for feature, data in feature_dict.items():\n # Aggregate the feature data and store the aggregations in the aggregated features dictionary\n if feature != 'mfcc':\n for aggregation in self.aggregations:\n if aggregation not in aggregation_functions:\n raise ValueError(\n \"aggregation {0} is not associated with a valid aggregation function\".format(aggregation))\n # Apply the aggregation result and store it\n aggregation_function = aggregation_functions[aggregation]\n feature_name = '{0}-{1}'.format(feature, aggregation)\n aggregated_features[feature_name] = aggregation_function(data)\n feature_names.append(feature_name)\n else:\n # Other features can't be aggregated\n for mfcc_col in range(self.config.NUMBER_OF_MFCC_COLS):\n feature_name = 'mfcc{0}'.format(mfcc_col)\n aggregated_features[feature_name] = aggregation_functions['mean'](data[mfcc_col])\n feature_names.append(feature_name)\n return aggregated_features, feature_names",
"def apply_features(self, features):\n # feature_values is a multi-dimensional list\n # 1st dimension: Feature (class)\n # 2nd dimension: token\n # 3rd dimension: values (for this token and feature, usually just one value, sometimes more,\n # e.g. \"w2vc=975\")\n features_values = [feature.convert_window(self) for feature in features]\n\n for token in self.tokens:\n token.feature_values = []\n\n # After this, each self.token.feature_values will be a simple list\n # of feature values, e.g. [\"w2v=875\", \"bc=48\", ...]\n for feature_value in features_values:\n assert isinstance(feature_value, list)\n assert len(feature_value) == len(self.tokens), (len(feature_value), len(self.tokens))\n for token_idx in range(len(self.tokens)):\n self.tokens[token_idx].feature_values.extend(feature_value[token_idx])",
"def row_average_features(x):\r\n n_samples, m, n = x.shape\r\n row_avg = np.mean(x, axis=1)\r\n return row_avg.T",
"def normalize_features(features):\n temp_feats = np.array([])\n\n for count, f in enumerate(features):\n if f.shape[0] > 0:\n if count == 0:\n temp_feats = f\n else:\n temp_feats = np.vstack((temp_feats, f))\n count += 1\n\n mean = np.mean(temp_feats, axis=0) + 1e-14\n std = np.std(temp_feats, axis=0) + 1e-14\n\n features_norm = []\n for f in features:\n ft = f.copy()\n for n_samples in range(f.shape[0]):\n ft[n_samples, :] = (ft[n_samples, :] - mean) / std\n features_norm.append(ft)\n return features_norm, mean, std",
"def feature_collapsing(features):\n new_features = []\n group_number = 0\n mzrtgroup = []\n for feature in features:\n if feature.mzrtgroup == group_number:\n mzrtgroup.append(feature)\n else:\n # assert feature.mzrtgroup == group_number + 1 # to do: there are a case, when borders are empty\n new_features.extend(collapse_mzrtgroup(mzrtgroup, group_number))\n mzrtgroup = [feature]\n group_number = feature.mzrtgroup\n new_features.extend(collapse_mzrtgroup(mzrtgroup, group_number))\n return new_features",
"def adapt_features_list(self, features_list):\n\t\tresult = []\n\t\tfor features in features_list:\n\t\t\tfeatures = list(features)\n\t\t\tfeatures[1] = \"\"\n\t\t\tfeatures = tuple(features)\n\t\t\tresult.append(features)\n\t\treturn result",
"def _calculateAggregateFeatures(self) -> None:\n return",
"def gen_avg_transaction(self, x: list[float],\n feature_range: tuple = (0, 100_000)) -> None:\n np.random.seed(2021)\n rv = stat.expon(-0.1)\n # add noise to random sampling\n noise = stat.norm(0, 1).rvs(len(x))\n noise = np.where(noise > 1.5, noise, -1.5 * noise)\n random_vars = 3 - rv.pdf(x) + noise\n idx_reduce = np.random.choice(range(len(random_vars)),\n size=int(0.75*len(x)))\n random_vars[idx_reduce] /= 10.\n # scale\n scaler = MinMaxScaler(feature_range=feature_range)\n random_vars = np.array(random_vars).reshape(-1, 1)\n random_vars = scaler.fit_transform(random_vars).ravel()\n self._investor_data['avgTransaction'] = np.round(random_vars, 1)",
"def featurize_samples(samples, fs):\n features = []\n for sample in samples:\n # extract each sample to each sensor time & freq data\n time_data = sample[0]\n freqs = np.abs(sample[1][0][0])\n freq_data=[np.abs(sensor_freq_power[1]) for sensor_freq_power in sample[1]]\n # average freq power for all accel axes\n # Time features\n min_vals = [np.min(col_data) for col_data in time_data]\n max_vals = [np.max(col_data) for col_data in time_data]\n mean_vals = [np.mean(col_data) for col_data in time_data]\n median_vals=[np.median(col_data) for col_data in time_data]\n std_vals = [np.std(col_data) for col_data in time_data]\n var_vals = [np.var(col_data) for col_data in time_data]\n percentile_5=[np.percentile(col_data, 5) for col_data in time_data]\n percentile_10=[np.percentile(col_data, 10) for col_data in time_data]\n percentile_25=[np.percentile(col_data, 25) for col_data in time_data]\n percentile_75=[np.percentile(col_data, 75) for col_data in time_data]\n percentile_90=[np.percentile(col_data, 90) for col_data in time_data]\n percentile_95=[np.percentile(col_data, 95) for col_data in time_data]\n time_features =[]\n time_features.extend(min_vals)\n time_features.extend(max_vals)\n time_features.extend(median_vals)\n time_features.extend(mean_vals)\n time_features.extend(std_vals)\n time_features.extend(var_vals)\n time_features.extend(percentile_5)\n time_features.extend(percentile_10)\n time_features.extend(percentile_25)\n time_features.extend(percentile_75)\n time_features.extend(percentile_90)\n time_features.extend(percentile_95)\n\n total_features = time_features\n features.append(np.array(total_features))\n return(features)",
"def normalize_meanminmax(feature, feature_scale=None):\n scale = feature_scale if feature_scale is not None else (feature.mean(), feature.min(), feature.max())\n t = (feature-scale[0])/(scale[2]-scale[1])\n return t, scale",
"def col_average_features(x):\r\n n_samples, m, n = x.shape\r\n col_avg = np.mean(x, axis=2)\r\n return col_avg.T",
"def scaleFeatures():\n numFeatures = X.shape[1]-1\n for i in range(numFeatures):\n xmeans.append(np.mean(X[:,i+1]))\n xstddevs.append(np.nanstd(X[:,i+1]))\n X[:,i+1] -= xmeans[i]\n X[:,i+1] /= xstddevs[i]",
"def __convert_features(self, raw_features, convert_func):\n for i, feature in enumerate(raw_features):\n raw_features[i] = convert_func(feature)\n\n return raw_features",
"def exteact_all_features(x):\n\t_mean = mean(x)\n\tcam = count_above_mean(x)\n\tcbm = count_below_mean(x)\n\tmad = mean_abs_diff(x)\n\tsad = sum_abs_diff(x)\n\t_median = median(x)\n\t_sum = sum(x)\n\t_abs_energy = abs_energy(x)\n\t_std = std(x)\n\tvariation_coeff = variation_coefficient(x)\n\t_var = var(x)\n\t_skew = skew(x)\n\t_kurtosis = kurtosis(x)\n\tnum_peaks = number_peaks(x)\n\t_max = max(x)\n\t_min = min(x)\n\tquantile25 = quantile(x, .25)\n\tquantile75 = quantile(x, .75)\n\t_cid = cid(x)\n\t# ent = entropy(x)\n\n\treturn np.array([_mean, cam, cbm, mad, sad, _median, _sum, _abs_energy, _std, variation_coeff,\n\t\t\t\t\t _var, _skew, _kurtosis, num_peaks, _max, _min, quantile25, quantile75, _cid])",
"def apply_features(self, features):\n # feature_values is a multi-dimensional list\n # 1st dimension: Feature (class)\n # 2nd dimension: token\n # 3rd dimension: values (for this token and feature, usually just one value, sometimes more,\n # e.g. \"w2vc=975\")\n features_values = [feature.convert_window(self) for feature in features]\n\n for link in self.relations:\n link.feature_values = []\n\n # After this, each self.token.feature_values will be a simple list\n # of feature values, e.g. [\"w2v=875\", \"bc=48\", ...]\n for feature_value in features_values:\n assert isinstance(feature_value, list)\n assert len(feature_value) == len(self.relations), (len(feature_value), len(self.relations))\n for link_idx in range(len(self.relations)):\n self.relations[link_idx].feature_values.extend(feature_value[link_idx])",
"def infer_feature_range(X_train: np.ndarray) -> np.ndarray:\n X_train = X_train.reshape(X_train.shape[0], -1)\n return np.vstack((X_train.min(axis=0), X_train.max(axis=0))).T",
"def _build_feature_queries(self, features, feature_type, query_type, boost, max_clauses):\n\n features = sorted(features, key=lambda x: x[1], reverse=True)[:max_clauses]\n queries = []\n for feature, score in features:\n q = {\n query_type: {\n feature_type: {\n 'value': feature,\n 'boost': score * boost\n }\n }\n }\n queries.append(q)\n return queries",
"def transform_df_features_minmax_scaled(df_features):\n # execute transformation pipeline\n features_minmax_scaled_pipeline = Pipeline([\n ('minmax_scaler', MinMaxScaler()),\n ])\n\n # this pipeline reuses the results of the standard df_features, and adds standard scaling\n df_features_minmax_scaled_nparray = features_minmax_scaled_pipeline.fit_transform(df_features.copy())\n\n # the SciKitLearn preprocessors like StandardScaler seem to transform back into a NumPy array. We can always make\n # a DataFrame a NumPy array, and vice versa. Lets put this back into a Pandas DataFrame and put back on the feature\n # labels\n df_features_minmax_scaled = pd.DataFrame(df_features_minmax_scaled_nparray, columns = feature_map.values())\n \n # return the dataframe with all features scaled using min-max xcaling\n return df_features_minmax_scaled",
"def _resize_average(feature_maps, align_corners, index=-1, resize_size=None):\n\n if feature_maps is None:\n return None\n feature_maps_avg = 0\n\n feature_map_list = _resize_concate(\n feature_maps, align_corners, index=index, resize_size=resize_size)\n for feature_map in feature_map_list:\n feature_maps_avg += feature_map\n\n feature_maps_avg /= len(feature_map_list)\n return [feature_maps_avg]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of features, this function returns a list of all possible combinations of the features. This is akin to taking the power set of the original feature list; however, we make an acception to group all contextdependencytail features into a single feature and all eventdependencytail features into a single feature as either all or none of the data contained in each of these will need to be used to be meaningful for classification.
|
def feature_power_set(data_features):
# Find all context-dep-tail/event-dep-tail features
ctx_dep_cols = [c for c in data_features if "ctxDepTail" in c]
evt_dep_cols = [c for c in data_features if "evtDepTail" in c]
# Remove dep-tail features from overall list
reg_cols = list(set(data_features) - set(ctx_dep_cols + evt_dep_cols))
# Add lists of dep-tail features as single elements
reg_cols.append(ctx_dep_cols)
reg_cols.append(evt_dep_cols)
# Finds the power set of all features in the cleaned version of data_features
pow_set = chain.from_iterable(combinations(reg_cols, r)
for r in range(len(reg_cols)+1))
# Returns the grouped stat variant of a feature
def get_feature_stats(f):
return [f + "_min", f + "_avg", f + "_max"]
# Flatten lists in power set so that feature sets that include dep-tail
# features do not have a nested list as a member of their feature set
expanded_pow_set = list()
for feat_set in pow_set:
if len(feat_set) > 0:
new_feat_set = list()
for item in feat_set:
if isinstance(item, list):
for feat in item:
new_feat_set.extend(get_feature_stats(feat))
else:
new_feat_set.extend(get_feature_stats(item))
expanded_pow_set.append(new_feat_set)
return expanded_pow_set
|
[
"def get_original_features(features):\n # Remove _max, _min, _avg, etc. endings and remove duplicates. (Duplicates\n # are caused by the removal of the endings)\n names = list(set([feat[:feat.rfind(\"_\")] for feat in features]))\n\n # Group dep-tail features\n ctx_dep_cols = [c for c in names if \"ctxDepTail\" in c]\n evt_dep_cols = [c for c in names if \"evtDepTail\" in c]\n\n # Remove dep-tail features\n reg_names = list(set(names) - set(ctx_dep_cols + evt_dep_cols))\n\n # Add label for context-dep-tail features if any ctx-dep-tail features were\n # found in the original list of features\n if len(ctx_dep_cols) > 0:\n reg_names.append(\"CTX_DEP_TAIL_FEATS\")\n\n # Add label for event-dep-tail features if any evt-dep-tail features were\n # found in the original list of features\n if len(evt_dep_cols) > 0:\n reg_names.append(\"EVT_DEP_TAIL_FEATS\")\n\n return reg_names",
"def combine_feature_sets(features):\n return pd.concat(features, axis=1)",
"def select_features(self, features):\n return list(compress(features, self.variance_selector.get_support()))",
"def feature_collapsing(features):\n new_features = []\n group_number = 0\n mzrtgroup = []\n for feature in features:\n if feature.mzrtgroup == group_number:\n mzrtgroup.append(feature)\n else:\n # assert feature.mzrtgroup == group_number + 1 # to do: there are a case, when borders are empty\n new_features.extend(collapse_mzrtgroup(mzrtgroup, group_number))\n mzrtgroup = [feature]\n group_number = feature.mzrtgroup\n new_features.extend(collapse_mzrtgroup(mzrtgroup, group_number))\n return new_features",
"def get_deforming_components(self):\n # initialise a list\n structureArray = [ ]\n for loadpath in self.listLoadpaths:\n # append a list to structureArray\n structureArray.append(loadpath.valid_components())\n # here structureArray is\n # [[comps from loadpath 1],\n # [comps from loadpath 2],\n # ...]\n\n # return a list with all the possible combinations\n return list(itertools.product(*structureArray))",
"def eval_all_combinations(docs, labels, punct_vals,\n feature_fns, min_freqs):\n ###TODO\n\n result = []\n for punc in punct_vals:\n tokens = [tokenize(doc, punc) for doc in docs]\n for i in range(1, len(feature_fns)+1):\n combs = combinations(feature_fns, i)\n for comb in combs:\n for min_fr in min_freqs:\n dic = {}\n model = LogisticRegression()\n matr = vectorize(tokens, comb, min_fr, vocab=None)[0]\n accuracies = cross_validation_accuracy(\n model, matr, labels, 5)\n dic['punct'] = punc\n dic['features'] = comb\n dic['min_freq'] = min_fr\n dic['accuracy'] = accuracies\n #print(dic['punct'], dic['features'],dic['min_freq'], dic['accuracy'])\n result.append(dic)\n\n results_sorted = sorted(result, key=lambda k: k['accuracy'], reverse=True)\n\n return results_sorted",
"def topological_sort(derived_feature_list: List[DerivedFeature]) -> List[DerivedFeature]:\n ret = []\n # We don't want to destroy the input list\n input = derived_feature_list.copy()\n \n # Each round add the most downstream features into `ret`, so `ret` is in reversed order\n while input:\n # Process all remaining features\n current = input.copy()\n \n # In Python you should not alter content while iterating\n current_copy = current.copy()\n \n # Go over all remaining features to see if some feature depends on others\n for f in current_copy:\n for i in f.input_features:\n if i in current:\n # Someone depends on feature `i`, so `i` is **not** the most downstream\n current.remove(i)\n \n # Now `current` contains only the most downstream features in this round\n ret.extend(current)\n \n # Remove one level of dependency from input\n for f in current:\n input.remove(f)\n \n # The ret was in a reversed order when it's generated\n ret.reverse()\n \n if len(set(ret)) != len (set(derived_feature_list)):\n raise ValueError(\"Cyclic dependency detected\")\n return ret",
"def compute_all_features(self, grasps):\n num_digits = len(str(len(grasps)-1)) # for padding with zeros\n features = []\n for i, grasp in enumerate(grasps):\n logging.info('Computing features for grasp %d' %(i))\n\n feature = self._compute_feature_rep(grasp)#, '%s_%s' %(self.graspable_.key, str(i).zfill(num_digits)))\n features.append(feature)\n return features",
"def adapt_features_list(self, features_list):\n\t\tresult = []\n\t\tfor features in features_list:\n\t\t\tfeatures = list(features)\n\t\t\tfeatures[1] = \"\"\n\t\t\tfeatures = tuple(features)\n\t\t\tresult.append(features)\n\t\treturn result",
"def _generate_all_comb(seed_interactions: list, seed_interaction_order: int,\n allow_self_inter: Optional[bool] = False,\n highest_poly_order: Optional[int] = None):\n\n def get_interactions(list1, list2):\n \"\"\"Get combinatorial list of tuples\n \"\"\"\n new_list = []\n for i in list1:\n for j in list2:\n # each interaction is sorted. E.g. after sorting\n # 'abc' 'cba' 'bca' are all 'abc'\n # this is done to ensure we can use the config as the signature\n # of the trial, i.e., trial id.\n new_interaction = ''.join(sorted(i + j))\n if new_interaction not in new_list:\n new_list.append(new_interaction)\n return new_list\n\n def strip_self_inter(s):\n \"\"\"Remove duplicates in an interaction string\n \"\"\"\n if len(s) == len(set(s)):\n return s\n else:\n # return ''.join(sorted(set(s)))\n new_s = ''\n char_list = []\n for i in s:\n if i not in char_list:\n char_list.append(i)\n new_s += i\n return new_s\n\n interactions = seed_interactions.copy()\n all_interactions = []\n while seed_interaction_order > 1:\n interactions = get_interactions(interactions, seed_interactions)\n seed_interaction_order -= 1\n all_interactions += interactions\n if not allow_self_inter:\n all_interactions_no_self_inter = []\n for s in all_interactions:\n s_no_inter = strip_self_inter(s)\n if len(s_no_inter) > 1 and s_no_inter not in all_interactions_no_self_inter:\n all_interactions_no_self_inter.append(s_no_inter)\n all_interactions = all_interactions_no_self_inter\n if highest_poly_order is not None:\n all_interactions = [c for c in all_interactions if len(c) <= highest_poly_order]\n logger.info('all_combinations %s', all_interactions)\n return all_interactions",
"def get_cartesian_product(lists):\n return [i for i in itertools.product(*lists)]",
"def expanded_features(feats):\n results = list()\n for feat in feats:\n results.extend([\"{}_min\".format(feat),\n \"{}_avg\".format(feat),\n \"{}_max\".format(feat)])\n\n return results",
"def feature_list(self):\n return self.features.features()",
"def get_combinations():\n combs = list()\n for p in get_projects():\n project = get_project(p)\n vendor_info = project[\"vendors\"]\n project_name = project[\"name\"]\n for t in get_toolchains():\n vendors = get_vendors(t)\n for vendor in vendors:\n if vendor not in vendor_info:\n continue\n\n board_info = vendor_info[vendor]\n for b in get_boards():\n if b not in get_vendors()[vendor][\"boards\"]:\n continue\n\n if board_info is None or b not in board_info:\n continue\n combs.append((project_name, t, b))\n return combs",
"def generate_polynomial_feature(dataframe, features=[], target='', one_hot_encode=True):\r\n NUMBER2NOMINAL_NUM = 30\r\n NUMBER2NOMINAL_RATIO = 1/10\r\n df = dataframe\r\n\r\n # convert nominal type feature to number type feature\r\n if len(features)==0:\r\n features = df.columns\r\n if one_hot_encode == True:\r\n# cat_feats = [f for f in features if df[f].dtype == object and f!=target]\r\n cat_feats = df.select_dtypes('object').columns.tolist()\r\n cat_feats = list(set(cat_feats).intersection(set(features)))\r\n if target in cat_feats:\r\n cat_feats.remove(target)\r\n\r\n df,new_cols = one_hot_encoder(dataframe, True)\r\n df = pd.concat([df, dataframe[cat_feats]],axis=1)\r\n # convert number type feature to nominal type feature\r\n# numeric_feats = [f for f in features if df[f].dtype != object \\\r\n# and f not in new_cols and f!=target]\r\n numeric_feats = df.select_dtypes('number').columns.tolist()\r\n numeric_feats = list(set(numeric_feats).intersection(set(features)))\r\n if target in numeric_feats:\r\n numeric_feats.remove(target)\r\n numeric_feats = list(set(numeric_feats).difference(set(new_cols))) \r\n \r\n if target in cat_feats:\r\n cat_feats.remove(target)\r\n \r\n unique = df[numeric_feats].nunique()\r\n for f_ in numeric_feats:\r\n if unique[f_] <= NUMBER2NOMINAL_NUM \\\r\n and unique[f_]/df.shape[0] <= NUMBER2NOMINAL_RATIO:\r\n df[f_+'_cat'] = df[f_].astype(str)\r\n\r\n# cat_feats = [f for f in features if df[f].dtype == object and f!=target]\r\n# cat_feats2 = [f for f in features if df[f].dtype == object and f!=target]\r\n cat_feats = df.select_dtypes('object').columns.tolist()\r\n cat_feats = list(set(cat_feats).intersection(set(features)))\r\n if target in cat_feats:\r\n cat_feats.remove(target)\r\n cat_feats2 = cat_feats[:]\r\n\r\n for f_1 in cat_feats:\r\n for f_2 in cat_feats2:\r\n if f_1!=f_2:\r\n df[f_1+'_'+f_2] =df[f_1]+'_'+df[f_2]\r\n cat_feats2.remove(f_1)\r\n \r\n# numeric_feats = [f for f in features if df[f].dtype != object]\r\n# numeric_feats2 = [f for f in features if df[f].dtype != object]\r\n numeric_feats = df.select_dtypes('number').columns.tolist()\r\n numeric_feats = list(set(numeric_feats).intersection(set(features)))\r\n if target in numeric_feats:\r\n numeric_feats.remove(target)\r\n numeric_feats2 = numeric_feats[:] \r\n for f_1 in numeric_feats:\r\n for f_2 in numeric_feats2:\r\n df[f_1+'x'+f_2] = np.multiply(df[f_1],df[f_2])\r\n if f_1 != f_2:\r\n df[f_1+'/'+f_2] = np.divide(df[f_1], df[f_2])\r\n numeric_feats2.remove(f_1)\r\n\r\n return df",
"def match_cross(lsts):\n return list(map(list, zip(*itertools.product(*lsts))))",
"def list_present_features(self) -> List[str]:\n features = set(feature_path.name.replace('.encrypted', '')\n for feature_path in self.data_path.glob('features_*.parquet*'))\n features = [feature for feature in features if f'-{self.subset}' in feature]\n return sorted(features)",
"def generate_combinations(filters):\n combinations = []\n for i in range(len(filters) + 1):\n combinations += (list(itertools.combinations(filters, i)))\n\n return combinations",
"def _group_feature_refs(\n features: List[str],\n all_feature_views: List[FeatureView],\n all_request_feature_views: List[RequestFeatureView],\n all_on_demand_feature_views: List[OnDemandFeatureView],\n) -> Tuple[\n List[Tuple[FeatureView, List[str]]],\n List[Tuple[OnDemandFeatureView, List[str]]],\n List[Tuple[RequestFeatureView, List[str]]],\n Set[str],\n]:\n\n # view name to view proto\n view_index = {view.projection.name_to_use(): view for view in all_feature_views}\n\n # request view name to proto\n request_view_index = {\n view.projection.name_to_use(): view for view in all_request_feature_views\n }\n\n # on demand view to on demand view proto\n on_demand_view_index = {\n view.projection.name_to_use(): view for view in all_on_demand_feature_views\n }\n\n # view name to feature names\n views_features = defaultdict(set)\n request_views_features = defaultdict(set)\n request_view_refs = set()\n\n # on demand view name to feature names\n on_demand_view_features = defaultdict(set)\n\n for ref in features:\n view_name, feat_name = ref.split(\":\")\n if view_name in view_index:\n view_index[view_name].projection.get_feature(feat_name) # For validation\n views_features[view_name].add(feat_name)\n elif view_name in on_demand_view_index:\n on_demand_view_index[view_name].projection.get_feature(\n feat_name\n ) # For validation\n on_demand_view_features[view_name].add(feat_name)\n # Let's also add in any FV Feature dependencies here.\n for input_fv_projection in on_demand_view_index[\n view_name\n ].source_feature_view_projections.values():\n for input_feat in input_fv_projection.features:\n views_features[input_fv_projection.name].add(input_feat.name)\n elif view_name in request_view_index:\n request_view_index[view_name].projection.get_feature(\n feat_name\n ) # For validation\n request_views_features[view_name].add(feat_name)\n request_view_refs.add(ref)\n else:\n raise FeatureViewNotFoundException(view_name)\n\n fvs_result: List[Tuple[FeatureView, List[str]]] = []\n odfvs_result: List[Tuple[OnDemandFeatureView, List[str]]] = []\n request_fvs_result: List[Tuple[RequestFeatureView, List[str]]] = []\n\n for view_name, feature_names in views_features.items():\n fvs_result.append((view_index[view_name], list(feature_names)))\n for view_name, feature_names in request_views_features.items():\n request_fvs_result.append((request_view_index[view_name], list(feature_names)))\n for view_name, feature_names in on_demand_view_features.items():\n odfvs_result.append((on_demand_view_index[view_name], list(feature_names)))\n return fvs_result, odfvs_result, request_fvs_result, request_view_refs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given an expanded set of features, this function returns the list of features that are contained in the original pandas DataFrame with the exception that dependencytail features are represented as a single string.
|
def get_original_features(features):
# Remove _max, _min, _avg, etc. endings and remove duplicates. (Duplicates
# are caused by the removal of the endings)
names = list(set([feat[:feat.rfind("_")] for feat in features]))
# Group dep-tail features
ctx_dep_cols = [c for c in names if "ctxDepTail" in c]
evt_dep_cols = [c for c in names if "evtDepTail" in c]
# Remove dep-tail features
reg_names = list(set(names) - set(ctx_dep_cols + evt_dep_cols))
# Add label for context-dep-tail features if any ctx-dep-tail features were
# found in the original list of features
if len(ctx_dep_cols) > 0:
reg_names.append("CTX_DEP_TAIL_FEATS")
# Add label for event-dep-tail features if any evt-dep-tail features were
# found in the original list of features
if len(evt_dep_cols) > 0:
reg_names.append("EVT_DEP_TAIL_FEATS")
return reg_names
|
[
"def list_present_features(self) -> List[str]:\n features = set(feature_path.name.replace('.encrypted', '')\n for feature_path in self.data_path.glob('features_*.parquet*'))\n features = [feature for feature in features if f'-{self.subset}' in feature]\n return sorted(features)",
"def extract_features(df, features):\n functions_dict = get_functions_dictionary()\n features_df = pd.DataFrame(columns=['writer'])\n features_df['writer'] = df['writer'].tolist()\n for feature in features:\n features_df = pd.merge(features_df, functions_dict[feature](df), on='writer')\n return features_df",
"def combine_feature_sets(features):\n return pd.concat(features, axis=1)",
"def feature_list(self):\n return self.features.features()",
"def get_features(data: pd.DataFrame) -> List[str]:\n feature_columns = [\n column\n for column in data.columns\n if column\n not in [\n \"data\",\n \"stato\",\n \"codice_regione\",\n \"denominazione_regione\",\n \"lat\",\n \"long\",\n \"note\",\n ]\n ]\n return feature_columns",
"def get_target_features(input_df):\n\n target = input_df.fire_bool\n features = input_df.drop('fire_bool', axis=1)\n \n return target, features",
"def get_feature_names(df, feature_name_substring):\n return [\n col_name\n for col_name in df.columns\n if col_name.find(feature_name_substring) != -1\n ]",
"def expanded_features(feats):\n results = list()\n for feat in feats:\n results.extend([\"{}_min\".format(feat),\n \"{}_avg\".format(feat),\n \"{}_max\".format(feat)])\n\n return results",
"def get_feature_names(df, include_c):\n names = [f for f in df.columns if not f.startswith('y')]\n if not include_c:\n names = [f for f in names if not f.startswith('c')]\n return names",
"def select_features(self, features):\n return list(compress(features, self.variance_selector.get_support()))",
"def feature_power_set(data_features):\n # Find all context-dep-tail/event-dep-tail features\n ctx_dep_cols = [c for c in data_features if \"ctxDepTail\" in c]\n evt_dep_cols = [c for c in data_features if \"evtDepTail\" in c]\n\n # Remove dep-tail features from overall list\n reg_cols = list(set(data_features) - set(ctx_dep_cols + evt_dep_cols))\n\n # Add lists of dep-tail features as single elements\n reg_cols.append(ctx_dep_cols)\n reg_cols.append(evt_dep_cols)\n\n # Finds the power set of all features in the cleaned version of data_features\n pow_set = chain.from_iterable(combinations(reg_cols, r)\n for r in range(len(reg_cols)+1))\n\n # Returns the grouped stat variant of a feature\n def get_feature_stats(f):\n return [f + \"_min\", f + \"_avg\", f + \"_max\"]\n\n # Flatten lists in power set so that feature sets that include dep-tail\n # features do not have a nested list as a member of their feature set\n expanded_pow_set = list()\n for feat_set in pow_set:\n if len(feat_set) > 0:\n new_feat_set = list()\n for item in feat_set:\n if isinstance(item, list):\n for feat in item:\n new_feat_set.extend(get_feature_stats(feat))\n else:\n new_feat_set.extend(get_feature_stats(item))\n\n expanded_pow_set.append(new_feat_set)\n\n return expanded_pow_set",
"def features_descriptions(self):\n return self.features.descriptions()",
"def get_feature_names(selector, dataBinaryIngredients):\n mask = selector.get_support(indices=True) #list of booleans\n column_names = dataBinaryIngredients.columns\n feature_names = column_names[mask].values\n return feature_names",
"def extract_features_from_df(df):\n X_list = []\n for i in range(1, 3 + 1):\n X_list.append([\n df[f'emb{i}'].to_numpy()[np.newaxis, :],\n # np.array(df[f'emb{i}'].values.tolist()).T,\n np.array(df[f'cm{i}'].values.tolist()).T,\n df[f'pr{i}'].to_numpy()[np.newaxis, :],\n df[f'rc{i}'].to_numpy()[np.newaxis, :],\n df[f'iou{i}'].to_numpy()[np.newaxis, :],\n df[f'fpr{i}'].to_numpy()[np.newaxis, :],\n df[f'fnr{i}'].to_numpy()[np.newaxis, :],\n df[f'f1{i}'].to_numpy()[np.newaxis, :],\n df[f'a_exp{i}'].to_numpy()[np.newaxis, :],\n df[f'a_smp{i}'].to_numpy()[np.newaxis, :],\n df[f'fp_edt{i}'].to_numpy()[np.newaxis, :],\n df[f'fn_edt{i}'].to_numpy()[np.newaxis, :],\n # df[f'i_prf{i}'].to_numpy()[np.newaxis, :],\n # df[f'i_suf{i}'].to_numpy()[np.newaxis, :],\n ])\n\n X = None\n for x_l in X_list:\n x = np.vstack(x_l)[:, np.newaxis, :]\n X = x if X is None else np.append(X, x, 1)\n X = X.astype(np.float32).transpose([2, 1, 0]) # [n_samples, 3, n_features]\n\n # add one-hot encoded sample-type feature\n X = np.append(X, np.zeros((*X.shape[:2], X.shape[1])), -1)\n for i in range(X.shape[1]):\n X[:, i, -X.shape[1] + i] = 1\n\n X = X.reshape((-1, X.shape[-1])) # [n_samples * 3, n_features + 3]\n\n return X",
"def preprocess_features(master_dataframe, used_features):\n\n #Select features to be used here\n selected_features = master_dataframe[used_features]\n\n #copy to processed feature list\n processed_features = selected_features.copy()\n\n #NOTE: If synthetic features are desired, you can add them HERE\n\n return processed_features",
"def get_feature_names(self):\n return self.features_",
"def choose_features(df):\n X = df.drop('quality', axis=1)\n return X",
"def extract_features(data_set):\n return data_set[[f'hist_{i}' for i in range(27)]]",
"def iterate_features(self) -> Iterator[Tuple[str, pd.DataFrame]]:\n for features_name in self.list_present_features():\n features_name = features_name.partition('.')[0].replace(f'-{self.subset}', '')\n yield features_name, getattr(self, features_name)()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Two solution instances are equal if their xvectors are roughly the same. There is logically no need for checking the y vectors as well, since there is a manytoone mapping. "Roughly the same" is defined by class static attribute Solution.eps which defines the relative and absolute tolerance allowed between individual coordinates. Testing for equality is done using numpys builtin function "isclose" which returns the boolean of the following
|
def __eq__(self, other):
if isinstance(other, Solution):
equalities = np.isclose(self.x, other.x, rtol=Solution.eps, atol=Solution.eps)
return np.all(equalities)
else:
raise InvalidComparison('Attempted to compare instance with nonSolution instance.')
|
[
"def are_points_equal(a, b, epsilon = 1e-9):\n try:\n x1, y1 = a.x, a.y\n x2, y2 = b.x, b.y\n except AttributeError:\n x1, y1 = a[0], a[1]\n x2, y2 = b[0], b[1]\n return (abs(x1-x2) < epsilon) and (abs(y1-y2) < epsilon)",
"def same_point(a, b):\n return math.fabs(b[0] - a[0]) < _s_epsilon and \\\n math.fabs(b[1] - a[1]) < _s_epsilon",
"def _all_isclose(left, right):\n return _all(np.isclose, left, right)",
"def isEqualToByTolerance(self, *args) -> \"bool\" :\n return _core.Point2D_isEqualToByTolerance(self, *args)",
"def is_equal_np(\n x: (np.ndarray, List), y: (np.ndarray, List), atol: float = 1.0e-7\n) -> bool:\n x = np.asarray(x, dtype=np.float32)\n y = np.asarray(y, dtype=np.float32)\n return np.all(np.isclose(x, y, atol=atol))",
"def IsApproximatelyEqual(x, y, epsilon):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon\n or -epsilon <= (x - y) / y <= epsilon)",
"def _any_isclose(left, right):\n return _any(np.isclose, left, right)",
"def approx_equals(a, b):\n return (a - b) < 1.5e-16",
"def exact_compare(m1: Minutia, m2: Minutia):\n if m1.x == m2.x and m1.y == m2.y and m1.theta == m2.theta:\n return True\n else:\n return False",
"def consistent_with(self, other):\n for wcs1, wcs2 in zip(self.wcs, other.wcs):\n try:\n ra, dec = at.get_center_of_projection(wcs1)\n except TypeError: # if this returns None\n return False\n x, y = wcs1.invert(ra, dec)\n x2, y2 = wcs2.invert(ra, dec)\n dx = other.xoffset - self.xoffset\n dy = other.yoffset - self.yoffset\n distsq = dx * dx + dy * dy\n if distsq > 100 and (x-x2)**2 + (y-y2)**2 < 0.25 * distsq:\n return False\n return True",
"def approximately_equal(graph1, graph2, tolerance=0.1):\n lines1 = list(graph1.as_quads())\n lines2 = list(graph2.as_quads())\n if len(lines1) != len(lines2):\n return False\n \n cutoff = 2 * tolerance * tolerance\n for li in lines1:\n lines = _np.asarray(lines2).T\n distsq = ((lines[0] - li[0])**2 + (lines[1] - li[1])**2 +\n (lines[2] - li[2])**2 + (lines[3] - li[3])**2)\n index = _np.argmin(distsq)\n if distsq[index] < cutoff:\n del lines2[index]\n else:\n return False\n return True",
"def is_equal(x, y, tol=0.001):\n return x <= y + tol and x >= y - tol",
"def test_exam_lsolve2a(self):\n\n a = symbol('a');\n b = symbol('b');\n x = symbol('x');\n y = symbol('y');\n eqns = [a*x + b*y == 3, x-y==b];\n solution = lsolve(eqns, [x,y]);\n solx = solution[0].rhs();\n soly = solution[1].rhs();\n realx = (3+pow(b,2))/(a+b);\n realy = (3-a*b)/(a+b);\n result = (solx-realx).normal().is_zero() and (soly-realy).normal().is_zero() \n self.assertEqual(result,1)",
"def same(x, y):\n if is_float(x) and isnan(x) and is_float(y) and isnan(y):\n return True\n elif is_datetime(x) and isnat(x) and is_datetime(y) and isnat(y):\n return True\n else:\n return x == y",
"def test_Point_can_be_compared_for_equality_equality():\n position1 = Point(10, 5)\n position2 = Point(10, 5)\n position3 = Point(15, 8)\n assert position1 == position2\n assert position2 == position1\n assert position1 != position3\n assert position3 == Point(15, 8)\n assert position3 == (15, 8)",
"def weakly_dominates(self, other_solution) -> bool:\n for val1, val2 in zip(self.value, other_solution.value):\n if val2 < val1:\n return False\n return True",
"def could_sym(bounds, eps):\n \n n = len(bounds) - 1 # number of voxels\n for i in range(1, n):\n ileft_l = i - 1\n ileft_r = ileft_l + 1\n \n irght_l = n - i\n irght_r = irght_l + 1\n \n dleft = bounds[ileft_r] - bounds[ileft_l]\n drght = bounds[irght_r] - bounds[irght_l]\n \n delta = math.fabs(drght - dleft)\n if (delta > eps):\n return False\n\n # bins are equal within eps\n return True",
"def assert_geometries_equal(\n x,\n y,\n tolerance=1e-7,\n equal_none=True,\n equal_nan=True,\n normalize=False,\n err_msg=\"\",\n verbose=True,\n):\n __tracebackhide__ = True # Hide traceback for py.test\n if normalize:\n x = shapely.normalize(x)\n y = shapely.normalize(y)\n x = np.array(x, copy=False)\n y = np.array(y, copy=False)\n\n is_scalar = x.ndim == 0 or y.ndim == 0\n\n # Check the shapes (condition is copied from numpy test_array_equal)\n if not (is_scalar or x.shape == y.shape):\n msg = build_err_msg(\n [x, y],\n err_msg + f\"\\n(shapes {x.shape}, {y.shape} mismatch)\",\n verbose=verbose,\n )\n raise AssertionError(msg)\n\n flagged = False\n if equal_none:\n flagged = _assert_none_same(x, y, err_msg, verbose)\n\n if not np.isscalar(flagged):\n x, y = x[~flagged], y[~flagged]\n # Only do the comparison if actual values are left\n if x.size == 0:\n return\n elif flagged:\n # no sense doing comparison if everything is flagged.\n return\n\n is_equal = _equals_exact_with_ndim(x, y, tolerance=tolerance)\n if is_scalar and not np.isscalar(is_equal):\n is_equal = bool(is_equal[0])\n\n if np.all(is_equal):\n return\n elif not equal_nan:\n msg = build_err_msg(\n [x, y],\n err_msg + f\"\\nNot equal to tolerance {tolerance:g}\",\n verbose=verbose,\n )\n raise AssertionError(msg)\n\n # Optionally refine failing elements if NaN should be considered equal\n if not np.isscalar(is_equal):\n x, y = x[~is_equal], y[~is_equal]\n # Only do the NaN check if actual values are left\n if x.size == 0:\n return\n elif is_equal:\n # no sense in checking for NaN if everything is equal.\n return\n\n is_equal = _assert_nan_coords_same(x, y, tolerance, err_msg, verbose)\n if not np.all(is_equal):\n msg = build_err_msg(\n [x, y],\n err_msg + f\"\\nNot equal to tolerance {tolerance:g}\",\n verbose=verbose,\n )\n raise AssertionError(msg)",
"def grid_equal (grid1, grid2):\r\n for x in range(4):\r\n for y in range(4):\r\n if grid1[x][y]!=grid2[x][y]:\r\n return False\r\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Logs the given message every n calls to a logger.
|
def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name
logger = logger or logging.getLogger()
def _gen(): # pylint: disable=missing-docstring
while True:
for _ in xrange(n):
yield False
logger.log(level, message, *args)
yield True
gen = _gen()
return lambda: next(gen)
|
[
"def log_every_n(n, level, message, *args): # pylint: disable=invalid-name\n return _log_every_n_to_logger(n, None, level, message, *args)",
"def log(self, message: str) -> bool:\n self.count += 1\n if self.count == self.frequency:\n self.logger.info(message)\n self.count = 0\n return True\n return False",
"def stats_log(self, metric, msg, *args, **kwargs):\n level = kwargs.pop('level', logging.DEBUG)\n log_message = '%s: ' % metric + msg\n self.logger.log(level, log_message, *args, **kwargs)\n self.stats[metric] += 1",
"def retry_and_log(retry_attempts):\r\n # type: (int, Callable[..., Any]) -> Callable[..., Any]\r\n def wrapper(f):\r\n # type: (Callable[..., Any]) -> Callable[..., Any]\r\n @functools.wraps(f)\r\n def inner(*args, **kwargs):\r\n # type: (*Any, **Any) -> Any\r\n for i in range(retry_attempts):\r\n \r\n try:\r\n \r\n return f(*args, **kwargs)\r\n except Exception as ex:\r\n print('Retrying....') \r\n print(ex)\r\n time.sleep(i+1)\r\n return inner\r\n\r\n return wrapper",
"def retry(n, errors, wait=0.0, logger_name=None):\n\n def wrapper(func):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n retries = 0\n while True:\n try:\n result = func(*args, **kwargs)\n if retries and logger_name:\n logger = logging.getLogger(logger_name)\n logger.debug('Retry of `%s` successful' % func.__name__)\n return result\n except errors:\n if retries >= n:\n if logger_name:\n logger = logging.getLogger(logger_name)\n logger.exception('I could not execute `%s` with args %s and kwargs %s, '\n 'starting next try. ' % (func.__name__,\n str(args),\n str(kwargs)))\n raise\n elif logger_name:\n logger = logging.getLogger(logger_name)\n logger.debug('I could not execute `%s` with args %s and kwargs %s, '\n 'starting next try. ' % (func.__name__,\n str(args),\n str(kwargs)))\n retries += 1\n if wait:\n time.sleep(wait)\n return new_func\n\n return wrapper",
"def CountCalls(f):\n f.count = 0\n\n def wrapper(*args, **kwargs):\n \"\"\"log calls to a function, and the return value\"\"\"\n module_logger.debug('%s called. %i', f.__name__, f.count)\n f.count += 1\n ret = f(*args, **kwargs)\n module_logger.debug('returned %s', ret)\n return ret\n return wrapper",
"def n_bounce(self, n:int=10):\n for _ in range(n):\n self.bounce()",
"def apply_n_times(f, x, n, step=None):\n assert callable(f),'f must be a function!'\n assert isinstance(x,int),\"x must be an integer!\"\n assert isinstance(n,int),\"n must be an integer!\"\n\n\n for i in range(n):\n \tif f == increment_by:\n \t\tx = f(x,step)\n \telse:\n \t\tx = f(x)\n return x",
"def log_i(func):\n def log_wrapper(*args, **kwargs):\n \"\"\"send function call to kivy log\"\"\"\n log_entry = \"{}()\".format(func.__name__)\n kivy.logger.Logger.info(log_entry)\n return func(*args, **kwargs)\n return log_wrapper",
"def repeated(test_fn):\n def repeat_decorator(*args, **kwargs):\n for i in range(0, 100):\n test_fn(*args, **kwargs)\n return repeat_decorator",
"def _mllog_print(logger, *args, **kwargs):\n if kwargs.pop('sync', False):\n barrier()\n if 'value' not in kwargs:\n kwargs['value'] = None\n if get_rank() == 0:\n logger(*args, **kwargs, stack_offset=3)",
"def log_list(msg, items, level=logging.INFO):\n max_len = 1024 - len(msg % \"\")\n cur_len = 0\n cur_items = list()\n\n while [i[:max_len] for i in items]:\n i = items.pop()\n if cur_len + len(i) + 2 > max_len:\n logging.info(msg, \", \".join(cur_items))\n cur_len = 0\n cur_items = list()\n\n cur_items.append(i)\n cur_len += len(i) + 2\n\n logging.log(level, msg, \", \".join(cur_items))",
"def repeat_melody(self, melody, n, offset=0):\n # parent_melody = Melody([melody] * n)\n parent_melody = Melody([melody.clone().step(offset*j) for j in range(n)], key=self.key)\n self.play_melody(parent_melody)",
"def log_n_queries():\n if not settings.DEBUG:\n logger.debug(\"DEBUG is False, will not count queries\")\n yield\n else:\n try:\n reset_queries()\n yield\n finally:\n logger.info(\"%s queries performed\", len(connection.queries))",
"def benchmark_logger(filepath, elapsed, n, d, eta, M, r, c, trial_number, num_trials, data_filepath, rows=None, columns=None):\n reset_log(filepath)\n \n msg1 = (\n \"Finished trial {0}/{1}, \".format(trial_number+1, num_trials) +\n \"elapsed={}, \".format(elapsed)\n )\n msg2 = (\n \"n={0}, d={1}, eta={2}, M={3}, \".format(n,d,eta,M) +\n \"r={0}, c={1}, \".format(r,c) +\n \"data={0}, rows={1}, columns={2}\".format(data_filepath, rows, columns)\n )\n\n msg = msg1 + msg2\n \n logging.info(msg)\n print(msg1)\n print(\"Recorded log to {}\".format(filepath))\n return None",
"def addLogMsg(self, msg):\n\n if len(self.log) >= 25:\n self.log = []\n\n self.log.append(msg)",
"def retry(times, exceptions):\n def decorator(func):\n def newfn(*args, **kwargs):\n attempt = 0\n while attempt < times:\n try:\n return func(*args, **kwargs)\n except exceptions as e:\n print(\n 'Exception {} thrown when attempting to run {}, attempt {} of {}' .format(\n e, func, attempt, times)\n )\n attempt += 1\n return func(*args, **kwargs)\n return newfn\n return decorator",
"def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs):\n if tensors_to_log is None:\n tensors_to_log = _TENSORS_TO_LOG\n\n return tensorflow.estimator.LoggingTensorHook(\n tensors=tensors_to_log,\n every_n_iter=every_n_iter)",
"def run_for_n(self,steps):\n for step in range(steps):\n self.step_all()\n self.steps += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Logs a message every n calls. See _log_every_n_to_logger.
|
def log_every_n(n, level, message, *args): # pylint: disable=invalid-name
return _log_every_n_to_logger(n, None, level, message, *args)
|
[
"def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name\n logger = logger or logging.getLogger()\n def _gen(): # pylint: disable=missing-docstring\n while True:\n for _ in xrange(n):\n yield False\n logger.log(level, message, *args)\n yield True\n gen = _gen()\n return lambda: next(gen)",
"def n_bounce(self, n:int=10):\n for _ in range(n):\n self.bounce()",
"def log(self, message: str) -> bool:\n self.count += 1\n if self.count == self.frequency:\n self.logger.info(message)\n self.count = 0\n return True\n return False",
"def stats_log(self, metric, msg, *args, **kwargs):\n level = kwargs.pop('level', logging.DEBUG)\n log_message = '%s: ' % metric + msg\n self.logger.log(level, log_message, *args, **kwargs)\n self.stats[metric] += 1",
"def once_per_hour():\n log.info('2 minutes passed')",
"def CountCalls(f):\n f.count = 0\n\n def wrapper(*args, **kwargs):\n \"\"\"log calls to a function, and the return value\"\"\"\n module_logger.debug('%s called. %i', f.__name__, f.count)\n f.count += 1\n ret = f(*args, **kwargs)\n module_logger.debug('returned %s', ret)\n return ret\n return wrapper",
"def retry_and_log(retry_attempts):\r\n # type: (int, Callable[..., Any]) -> Callable[..., Any]\r\n def wrapper(f):\r\n # type: (Callable[..., Any]) -> Callable[..., Any]\r\n @functools.wraps(f)\r\n def inner(*args, **kwargs):\r\n # type: (*Any, **Any) -> Any\r\n for i in range(retry_attempts):\r\n \r\n try:\r\n \r\n return f(*args, **kwargs)\r\n except Exception as ex:\r\n print('Retrying....') \r\n print(ex)\r\n time.sleep(i+1)\r\n return inner\r\n\r\n return wrapper",
"def run_for_n(self,steps):\n for step in range(steps):\n self.step_all()\n self.steps += 1",
"def retry(n, errors, wait=0.0, logger_name=None):\n\n def wrapper(func):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n retries = 0\n while True:\n try:\n result = func(*args, **kwargs)\n if retries and logger_name:\n logger = logging.getLogger(logger_name)\n logger.debug('Retry of `%s` successful' % func.__name__)\n return result\n except errors:\n if retries >= n:\n if logger_name:\n logger = logging.getLogger(logger_name)\n logger.exception('I could not execute `%s` with args %s and kwargs %s, '\n 'starting next try. ' % (func.__name__,\n str(args),\n str(kwargs)))\n raise\n elif logger_name:\n logger = logging.getLogger(logger_name)\n logger.debug('I could not execute `%s` with args %s and kwargs %s, '\n 'starting next try. ' % (func.__name__,\n str(args),\n str(kwargs)))\n retries += 1\n if wait:\n time.sleep(wait)\n return new_func\n\n return wrapper",
"def log_n_queries():\n if not settings.DEBUG:\n logger.debug(\"DEBUG is False, will not count queries\")\n yield\n else:\n try:\n reset_queries()\n yield\n finally:\n logger.info(\"%s queries performed\", len(connection.queries))",
"def __init__(self, every_n_steps=100, first_n_steps=1):\n super(EveryN, self).__init__()\n self._every_n_steps = every_n_steps\n self._first_n_steps = first_n_steps\n # Last step in the model.\n self._last_successful_step = None\n # Last step at which we called one of the every_n methods\n self._last_active_step = 0\n self._every_n_step_begin_called = False",
"def eventsPerTrigger(self, per):\n if per < 1 or per > 3:\n print \"must be between 1 and 3\"\n return\n self.dev.labc.repeat_count(per-1)",
"def add_tensorboard_image_logging(self,\n name,\n interval,\n num_images,\n resize=256,\n seed=None,\n truncation_psi=None,\n truncation_cutoff=None,\n label=None,\n pixel_min=-1,\n pixel_max=1):\n if self.rank:\n return\n def callback(seen):\n if seen % interval == 0:\n images = self.generate_images(\n num_images=num_images,\n seed=seed,\n truncation_psi=truncation_psi,\n truncation_cutoff=truncation_cutoff,\n label=label,\n pixel_min=pixel_min,\n pixel_max=pixel_max\n )\n self.log_images_tensorboard(\n images=images,\n name=name,\n resize=resize\n )\n self.callbacks.append(callback)",
"def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs):\n if tensors_to_log is None:\n tensors_to_log = _TENSORS_TO_LOG\n\n return tensorflow.estimator.LoggingTensorHook(\n tensors=tensors_to_log,\n every_n_iter=every_n_iter)",
"def apply_n_times(f, x, n, step=None):\n assert callable(f),'f must be a function!'\n assert isinstance(x,int),\"x must be an integer!\"\n assert isinstance(n,int),\"n must be an integer!\"\n\n\n for i in range(n):\n \tif f == increment_by:\n \t\tx = f(x,step)\n \telse:\n \t\tx = f(x)\n return x",
"def meter(self, name, count):\n self.calls.append((\"meter\", name, count))",
"def repeated(test_fn):\n def repeat_decorator(*args, **kwargs):\n for i in range(0, 100):\n test_fn(*args, **kwargs)\n return repeat_decorator",
"def get_logging_metric_hook(tensors_to_log=None, every_n_secs=600, **kwargs):\n if tensors_to_log is None:\n tensors_to_log = _TENSORS_TO_LOG\n return metric_hook.LoggingMetricHook(\n tensors=tensors_to_log,\n metric_logger=logger.get_benchmark_logger(),\n every_n_secs=every_n_secs)",
"def log_i(func):\n def log_wrapper(*args, **kwargs):\n \"\"\"send function call to kivy log\"\"\"\n log_entry = \"{}()\".format(func.__name__)\n kivy.logger.Logger.info(log_entry)\n return func(*args, **kwargs)\n return log_wrapper"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the version string of the 'openhtf' package.
|
def get_version():
version = 'Unknown'
try:
version = get_distribution('openhtf')
except DistributionNotFound:
version = 'Unknown - Perhaps openhtf was not installed via setup.py or pip.'
return version
|
[
"def get_version() -> str:\n return VERSION",
"def get_version_str():\n return pkg_resources.get_distribution(\"lando_messaging\").version",
"def get_version():\n from pkg_resources import get_distribution\n return get_distribution('funkload').version",
"def get_version():\n return 'PyS2OPC v' + VERSION + ' on ' + ffi.string(libsub.SOPC_LibSub_GetVersion()).decode()",
"def get_version_string():\n version = ffi.string(C.blosc_get_version_string())\n if not isinstance(version, str):\n version = version.decode()\n return version",
"def _get_version_string() -> str:\n return \" GDM Version: {}. Registered extension packages: {}.\".format(\n _version.version, extensions.get_registered_package_info())",
"def _get_version() -> str:\n spec = spec_from_file_location('ops.version', 'ops/version.py')\n if spec is None:\n raise ModuleNotFoundError('could not find /ops/version.py')\n if spec.loader is None:\n raise AttributeError('loader', spec, 'invalid module')\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n\n return module.version",
"def version(url):\n v = m.version(url) or r'1.0-u' # Unknown\n return ''.join(v)",
"def version():\n return 'v%s' % ninecms.__version__",
"def get_required_webots_version_short():\n return make_short_version(get_required_webots_version())",
"def make_plugin_version_string():\n with open(version_filesystem_location(), encoding='utf-8') as fp:\n version = json.load(fp)\n return '{major}.{minor}.{revision}'.format(**version)",
"def libVersion(self) -> str:\n return self._libVersion",
"def protocolVersion():",
"def getPackageVersion():\n cmd = locations.DPKG + \" -l \" + ' | grep surfids-sensor | awk \\'{print $3}\\''\n pversion = os.popen(cmd)\n ver = pversion.readline().strip()\n if ver == \"\":\n return \"Unknown\"\n else:\n return ver",
"def discover_lhotse_version() -> str:\n\n version = VERSION\n if not IS_DEV_VERSION:\n # This is a PyPI public release -- return a clean version string.\n return version\n\n version = version + \".dev\"\n\n # This is not a PyPI release -- try to read the git commit\n try:\n git_commit = (\n run(\n [\"git\", \"rev-parse\", \"--short\", \"HEAD\"],\n check=True,\n stdout=PIPE,\n stderr=DEVNULL,\n )\n .stdout.decode()\n .rstrip(\"\\n\")\n .strip()\n )\n dirty_commit = (\n len(\n run(\n [\"git\", \"diff\", \"--shortstat\"],\n check=True,\n stdout=PIPE,\n stderr=DEVNULL,\n )\n .stdout.decode()\n .rstrip(\"\\n\")\n .strip()\n )\n > 0\n )\n git_commit = git_commit + \".dirty\" if dirty_commit else git_commit + \".clean\"\n source_version = f\"+git.{git_commit}\"\n except Exception:\n source_version = \".unknownsource\"\n # See the format:\n # https://packaging.python.org/guides/distributing-packages-using-setuptools/#local-version-identifiers\n version = version + source_version\n\n return version",
"def get_version_from_package() -> str:\n\n path = os.path.join(os.path.dirname(__file__), \"pdchaoskit/__init__.py\")\n path = os.path.normpath(os.path.abspath(path))\n with open(path) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n token, version = line.split(\" = \", 1)\n version = version.replace(\"'\", \"\").strip()\n print(version)\n return version",
"def get_version() -> str:\n config = configparser.ConfigParser()\n path = Path(__file__).parent.parent / \"setup.cfg\"\n config.read(path)\n return str(config[\"metadata\"][\"version\"])",
"def f_version(self):\n return self._f_version",
"def get_version(self, fingerprint):\n\n try:\n desc = self.control.get_server_descriptor(fingerprint)\n return str(desc.tor_version)\n except stem.ControllerError:\n return ''",
"def get_distribution():\n\ts = platform.system() + ' ' + platform.version()\n\treturn s"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes path to file with a single word on each line Returns list of those words
|
def get_words(file_path):
words = []
with open(file_path) as f:
for line in f:
words.append(line.strip())
return words
|
[
"def get_wordle_list(filename: string) -> string:\n with open(filename, \"r\", encoding=\"utf-8\") as file_handler:\n return file_handler.read()",
"def get_wordlist():\n with open(WORDLIST_FILE) as english:\n wordlist = english.readlines()\n return [word.strip() for word in wordlist]",
"def brit_spelling(file_path):\n #https://www.lexico.com/grammar/british-and-spelling\n \n f = open(file_path)\n Lines = f.readlines()\n \n b = []\n \n for l in Lines:\n w = l.split('\\t')[0].lower()\n word = w.split()\n for x in word: \n if x != '\\n':\n b.append(x)\n \n return b",
"def load_doc_words(filename):\n with open(filename, 'r', encoding = 'ascii') as file:\n words = [word.lower() for word in re.findall(r'[A-Za-z]+', file.read())]\n return words",
"def file_with_all_words(path, word_list):\r\n return traverse_tree(path, word_list)",
"def read_word_list(filename):\n with open(filename) as f:\n words = frozenset(word.strip() for word in f)\n return words",
"def get_words(filename):\n file_words = []\n # your code goes here\n with open(filename) as f:\n for line in f:\n word_list = line.split()\n for word in word_list:\n file_words.append(delete_punctuation(word.lower()))\n return file_words",
"def get_words_theme(path):\r\n\r\n list_words = []\r\n text_list = os.listdir(path)\r\n for text in text_list:\r\n list_words.extend(get_words_in_file(path + \"/\" + text))\r\n\r\n list_words = list(set(list_words)) # Remove duplicate words\r\n return list_words",
"def _get_word_list():\n with open(static_path(NOUN_FILE)) as file:\n nouns = file.readlines()\n\n with open(static_path(ADJECTIVE_FILE)) as file:\n adjectives = file.readlines()\n\n return nouns, adjectives",
"def _generate_word_list(self, dictFile):\n word_list = []\n try:\n dict_f = open(str(dictFile))\n except FileNotFoundError:\n raise FileNotFoundError(\"Text file required in the same directory as anagram.py\")\n for entry in dict_f.readlines():\n word_list.append(entry.strip())\n return word_list",
"def grep(word, filename):\n lines = open(filename).readlines()\n return [line for line in lines if word in line]",
"def get_words(str_or_file):\n return [word.lower()\n for sent in get_sents(str_or_file)\n for word in word_tokenize(sent)]",
"def load_words():\n dict_list = []\n with open(DICTIONARY, 'rt') as fin:\n for word in fin:\n dict_list.append(word.strip('\\n'))\n # print(dict_list)\n return dict_list",
"def load_words(word_select):\n\twords = ()\n\t\n\twith open(f\"WORDS_{word_select}.txt\", \"r\") as file:\n\t\tfile_contents = file.readlines()\n\t\n\tfor line in file_contents:\n\t\tword = line.strip()\n\t\tif word != '':\n\t\t\twords += (word,)\n\n\treturn words",
"def __getWords(self,line):\n\n l=string.expandtabs(string.lower(line),1)\n words=string.split(string.lstrip(l))\n \n return words",
"def readf(self, file_path):\n if self._empty_file_check(file_path):\n with open(file=file_path, mode='r') as text_file:\n for word in text_file:\n self._word_container.append(self._format_word(word))\n else:\n raise IOError\n return self._word_container",
"def get_word_list(file_name):\n f = open(file_name, 'r')\n lines = f.readlines()\n curr_line = 0\n while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line += 1\n lines = lines[curr_line+1:]\n #print(lines)\n wordList = []\n\n for line in lines:\n if line in string.whitespace:\n lines.remove(line)\n else:\n words = line.split()\n for word in words:\n wordList.append(word)\n\n#only uses first 10 lines of book\n\n for line in wordList[0:10]:\n index = 0\n for word in wordList:\n a = word.strip(string.punctuation)\n wordList[index] = a.lower()\n index += 1;\n return wordList",
"def _load_word_list(self, at_file):\n words = []\n with open(at_file) as atf:\n for line in atf.readlines():\n word = line.strip()\n if word.startswith(\"#\"):\n continue\n if word.startswith(\"@\"):\n more = self._load_word_list(word[1:])\n else:\n more = word.split()\n words.extend(more)\n return self.get_words(words) # another pass to fix paths",
"def _get_words_by_file_paths(self, file_paths):\n word_names = []\n for file_path in file_paths:\n with open(file_path, 'r') as f:\n lines = f.readlines()\n\n def format_word(word): return word.strip().replace('\\n', '')\n word_names_of_lines = [format_word(word) for word in lines]\n word_names.extend(word_names_of_lines)\n\n return word_names"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of (word, count, percentage) tuples, return the top two word counts.
|
def top_two_word(counts):
limited_counts = counts[0:2]
count_data = [count for (_, count, _) in limited_counts]
return count_data
|
[
"def top_word(word_count):\n\n top_words = []\n last_count = 0\n for each in word_count:\n if len(each.split()) < 2:\n # we compress spaces in our word count but sometimes they still\n # show up as a count, we won't list them as a top word though.\n continue\n word = each.split()[1]\n count = int(each.split()[0])\n if count >= last_count:\n top_words.append(word)\n last_count = count\n else:\n break\n return top_words, last_count",
"def get_top_words(data_list, n_top_words=160):\n top_words = []\n \n d = Counter(concatenate_all_text(data_list))\n d_sorted = sorted(d.items(), key=itemgetter(1), reverse=True)\n \n assert len(d_sorted) >= n_top_words, 'Too many top words'\n \n for i in range(n_top_words):\n top_words.append(d_sorted[i][0])\n \n return top_words",
"def get_top_n_words(word_list, n):\n\tword_frequencies = {}\n\tfor word in word_list:\n\t\tword_frequencies[word.lower()] = word_frequencies.get(word.lower(), 0) + 1\n\n\ttop_words = sorted(word_frequencies, key=word_frequencies.get, reverse=True)[:n]\n\treturn [(word_frequencies[word], word) for word in top_words]",
"def give_score(word_list, sentence_list):\n\n word_count = FreqDist(word_list)\n len_sent = len(sentence_list)\n top_dict = defaultdict(int)\n for i in range(len_sent):\n for word in word_tokenize(sentence_list[i].lower()):\n if word in word_count:\n top_dict[i] += word_count[word]\n\n return top_dict",
"def wcount(lines, topn=10):\n dcount = defaultdict(int)\n for line in lines.splitlines():\n lst = [i.strip(string.punctuation) for i in line.split()]\n for word in lst:\n word = word.lower()\n dcount[word] += 1\n sor = sorted(dcount.items(), key=lambda t: t[1], reverse=True)\n if len(sor) >= topn:\n top = sor[:topn]\n else:\n top = sor\n for u in top:\n print(\"{}\\t{}\".format(*u))",
"def getWordPercentage(wordList: list, tokens) -> str:\n tokenList = nltk.FreqDist(tokens)\n sumOfFreq = 0.0\n wordListAsString = \"\"\n count = 1\n for word in wordList:\n wordListAsString += word\n if count != len(wordList):\n wordListAsString += \", \"\n count += 1\n sumOfFreq += tokenList.freq(word)\n percentWord = round(sumOfFreq * 100, 5)\n return \"Percentage of (\" + wordListAsString + \") is \" + \"{:5f}\".format(percentWord)",
"def getWordSuggestionsV1(word, fileName, n, commonPercent, topN):\n \n \n wordlist=getWordsOfSimLength(word,fileName,n)#gives a list of words with almost similar length\n \n winners=getWordsWithCommonPercent(word, wordlist,commonPercent)#words with commonletters from the list provided\n \n similarityDictionary=getSimilarityDict(word,winners)#gives the words that meets the similarity criteria\n return getBestWords(similarityDictionary, topN)#returns the tobN best words",
"def get_top_n_counts(all_data, n):\r\n top10_counter=Counter(all_data).most_common(n)\r\n top10_names=[name for (name, count) in top10_counter]\r\n top10_count=[count for (name, count) in top10_counter]\r\n top10_perc=[\"{0:.1%}\".format(count/len(all_data)) for count in top10_count]\r\n # zip them together into one list\r\n top_10_stats=list(zip(*[top10_names, top10_count, top10_perc]))\r\n return(top_10_stats)",
"def sort_by_count_word(people):\n return sorted(people, key=count_word)",
"def top_10_words(hist, num = 10):\n t = most_common(hist)\n for freq, word in t[:num]:\n print(word,\"\\t\", freq)",
"def most_common_words(df, sentence, cl, label, **kwargs):\n\n df_ = df[df[cl]==label]\n df_ = df_[sentence].tolist()\n docx = ' '.join(str(x) for x in df_)\n docx = docx.split()\n word_counter = Counter(docx)\n\n top = 10\n\n for key, value in kwargs.items():\n if key == 'top':\n top = value\n\n for word, count in word_counter.most_common(top):\n print(word, ': ', count)",
"def print_top_s(filename):\n word_count = words_count(filename)\n\n # Each item is a (word, count) tuple.\n # Sort them so the big counts are first using key=get_count() to extract count.\n items = sorted(word_count.items(), key= lambda w : w[1], reverse=True)\n\n # Print the first 20\n for item in items[:20]:\n print (item[0], item[1])",
"def top_ngrams(tokenized_words, n=2, top=10):\n\tall_ngrams = []\n\tfor each in tokenized_words:\n\t\tall_ngrams += ngrams(each, n)\n\treturn FreqDist(all_ngrams).most_common(top)",
"def top_words_by_tf_idf(document, counts, n):\n words = list(analyze(document))\n idfs = get_idf_dict(words, counts)\n tfs = FreqDist(words)\n words.sort(key=lambda w: -(tfs[w] * idfs[w]))\n return words[0:n]",
"def extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \n #use only topn items from vector\n sorted_items = sorted_items[:topn]\n \n score_vals = []\n feature_vals = []\n \n # word index and corresponding tf-idf score\n for idx, score in sorted_items:\n \n #keep track of feature name and its corresponding score\n score_vals.append(round(score, 3))\n feature_vals.append(feature_names[idx])\n \n \n results= {}\n for idx in range(len(feature_vals)):\n results[feature_vals[idx]]=score_vals[idx]\n \n return results, feature_vals",
"def top_hashtags(tags_and_counts, top_tags):\n top_ten_if_changed = top_tags.update(tags_and_counts)\n return (top_ten_if_changed, top_ten_if_changed != None)",
"def extract_most_freq_terms(counter, vocab_list):\r\n\tword_list=[]\r\n\tlist_of_counts=counter.most_common(50)\r\n\tfor i in range(len(list_of_counts)):\r\n\t\tif list_of_counts[i][0] in vocab_list:\r\n\t\t\tword_list.append(list_of_counts[i][0])\r\n\treturn word_list",
"def score(self, lst):\n words = []\n score = 0\n for each in lst:\n words.append(each[1])\n\n for word in words:\n if word in UNIGRAM_COUNTS:\n score = score + log10((UNIGRAM_COUNTS[word] / 1024908267229.0))\n else:\n score = score + log10((10.0 / (1024908267229.0 * 10 ** len(word))))\n\n return score",
"def _score_by_len(self, lst):\n words = []\n score = 0\n if isinstance(lst, tuple):\n words = [lst[1]]\n else:\n for each in lst:\n words.append(each[1])\n\n for word in words:\n if word in UNIGRAM_COUNTS:\n score = score + len(word)\n else:\n score = score + len(word)\n\n return score",
"def wcount(lines, topn=10):\n\n for i in lines:\n if i>='A' and i<=\"Z\":\n continue\n elif i>=\"a\" and i<='z':\n continue\n elif i==' ':\n continue\n else:\n lines=lines.replace(i,' ')\n n1=lines.split(' ')\n for i in range(n1.count('')):\n n1.remove('')\n n2=set()\n for i in n1:\n n2.add((i,n1.count(i)))\n \n n3=[]\n for (i,o) in n2:\n n3.append(o)\n for i in range(topn):\n x=max(n3)\n n3.remove(x)\n y=max(n3)\n n4={}\n for (i,o) in n2:\n if o>y:\n n4[o]=i\n n5=list((n4.keys()))\n n5.sort(reverse=True)\n for i in n5:\n print (n4[i], '\\t', i)\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
method to list the entity owners
|
def list_entity_owners(ipaddress, entity_owner_list):
entity = ":8181/restconf/operational/entity-owners:entity-owners"
url = "http://" + ipaddress + entity
resp = requests.get(url, headers=con_header, auth=authentication)
if resp.status_code != RESP_GET_SUCCESS:
print("controller is down, resp_code", resp.status_code)
print("response content", resp.content)
sys.exit(1)
data = json.loads(resp.content)
ovsdb = data["entity-owners"]["entity-type"]
print("\n\n=================== Entity Details ===================\n")
for e_type in ovsdb:
entities = e_type["entity"]
for entity in entities:
id = entity["id"]
if len(entity["owner"]) > 0:
print("NODE ID", str(id[id.rindex("=") + 2 : len(id) - 2]))
print("OWNER", str(entity["owner"]))
for owner in entity_owner_list:
owner_role = owner.split(":")
if entity["owner"] == owner_role[1]:
print("IP Address", str(owner_role[0]))
print("\n")
|
[
"def get_owners(conn):\n c = conn.cursor()\n sql = \"\"\"SELECT * FROM owners;\"\"\"\n c.execute(sql)\n return c.fetchall()",
"def get_all_owners():\n owners = []\n for owner in query_db('SELECT * FROM owner'):\n owners.append({\n 'id': owner['id'],\n 'firstName': owner['firstName'],\n 'lastName': owner['lastName'],\n\n })\n return owners",
"def owner_name_lists(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"owner_name_lists\")",
"def owner_id_lists(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"owner_id_lists\")",
"def owner_name_lists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"owner_name_lists\")",
"def test_list_image_param_owner(self):\n image_id = self.created_images[0]\n # Get image metadata\n image = self.client.show_image(image_id)\n\n params = {\"owner\": image['owner']}\n self._list_by_param_value_and_assert(params)",
"def get(self):\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin():\n owner_json = self._GetAllOwnerDataJson()\n else:\n owner_json = self._GetOwnerDataForUserJson(user)\n else:\n self.RenderHtml('result.html', {\n 'errors': ['Log in to edit test owners.']})\n return\n\n self.RenderHtml('edit_test_owners.html',\n {'owner_info': owner_json})",
"def read_owners_of_dog(self, dog_id) -> list:\n return {\n \"command\": \"read_owners_of_dog\",\n \"kwargs\": {\n \"id\": user_id\n }\n }",
"def owner_id_lists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"owner_id_lists\")",
"def list_accounts(*args):\n for account, owner in bank.get_all_accounts():\n print(str(account), \"/ Owner:\", owner.name)",
"def getAffiliations(entity):",
"def mentors_all(self):\n return self.all()\\\n .select_related('user').\\\n order_by('display_nr')",
"def list_by_owner(self, owner_id: int, skip: int, limit: int) -> List[Item]:\n return (\n self.session.query(Item)\n .filter_by(owner_id=owner_id)\n .order_by(Item.id)\n .offset(skip)\n .limit(limit)\n .all()\n )",
"async def _get_owners_access_task(self, client):\n try:\n owners_access = await client.owners_access('applications.commands.permissions.update')\n except GeneratorExit:\n raise\n \n except BaseException as err:\n if isinstance(err, ConnectionError):\n return None\n \n await client.events.error(\n client,\n f'{self!r}._get_owners_access_task',\n SlasherSyncError(None, err),\n )\n return None\n \n return owners_access",
"def get_owner(conn, owner_id):\n c = conn.cursor()\n sql = \"\"\"SELECT * FROM owners\n WHERE owner_id=?;\"\"\"\n c.execute(sql, (owner_id,))\n return c.fetchall()",
"def get_permission_owners_query():\n owners_query = \"\"\"\n {group_table_name!s} gug \n LEFT JOIN {owner_table_name!s} op \n ON gug.group_id = op.owner_object_id \n AND gug.group_content_type_id = op.owner_content_type_id \n AND (gug.roles & op.roles) != 0\n LEFT JOIN {global_table_name!s} gl \n ON gl.content_type_id = gug.group_content_type_id \n AND (gl.roles & gug.roles) != 0\n \"\"\"\n OwnerToPermission = apps.get_model('protector', 'OwnerToPermission')\n GenericUserToGroup = apps.get_model('protector', 'GenericUserToGroup')\n GenericGlobalPerm = apps.get_model('protector', 'GenericGlobalPerm')\n return owners_query.format(\n owner_table_name=OwnerToPermission._meta.db_table,\n group_table_name=GenericUserToGroup._meta.db_table,\n global_table_name=GenericGlobalPerm._meta.db_table,\n )",
"def getAllDevelopers(server,repo):\n nameDevelopers=[]\n #Get all contributors of repository\n url=server+\"/repos/\"+repo+\"/stats/contributors\"\n res=conn.get(url)\n dicres=json.loads(res.text)\n for developer in dicres:\n nameDevelopers.append(developer.get(\"author\").get(\"login\"))\n return nameDevelopers",
"def _GetAllOwnerDataJson(self):\n owner_dict = test_owner.GetMasterCachedOwner()\n results = []\n for test_suite_path in sorted(owner_dict):\n owners = owner_dict[test_suite_path]\n item = {\n 'name': test_suite_path,\n 'sub_items': []\n }\n for owner in owners:\n item['sub_items'].append({\n 'name': owner\n })\n results.append(item)\n return json.dumps(results)",
"def cob_by_owner(self, owner):\n srv = couchdb.Server(self.config['db']['url'])\n db = srv[self.config['db']['name']]\n dat = []\n\n\n view = self.config['all']['cob_by_owner']\n for item in db.view(view, key=owner):\n dat.append(item.value)\n\n return dat"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
compile .osl file with given filepath to temporary .oso file
|
def my_osl_compile(self, input_path):
output_file = tempfile.NamedTemporaryFile(mode='w', suffix=".oso", delete=False)
output_path = output_file.name
output_file.close()
ok = _cycles.osl_compile(input_path, output_path)
print("osl compile output = %s" % output_path)
if ok:
print("OSL shader compilation succeeded")
return ok, output_path
|
[
"def make_source_fs():\n return open_fs('temp://')",
"def _compile_file(engine, path, e_file):\n # Change directory and execute engine\n os.chdir(path)\n os.system(engine + e_file)",
"def compile_assembly(filename: str, cmd: str, temp: str):\n assembly = os.path.basename(filename).partition(\".\")[0]\n assembly = os.path.join(temp, f\"{assembly}.s\")\n with open(assembly, \"w\") as fp:\n sp.run([cmd, filename], stdout=fp)\n return assembly",
"def lcf_generate(output_path):\n\n import module0\n\n # load symbols from compiled files\n symbols = []\n for archive in ARCHIVES:\n symbols.extend(load_archive(archive))\n\n # load object files from the 'build/o_files', this way we need no list of\n # object files in the python code.\n with open(\"build/o_files\", 'r') as content_file:\n o_files = content_file.read().strip().split(\" \")\n\n for o_file in o_files:\n with open(o_file, 'rb') as file:\n obj = libelf.load_object_from_file(None, o_file, file)\n symbols.extend(get_symbols_from_object_file(obj))\n\n # write the file\n with output_path.open(\"w\") as file:\n file.write(\"MEMORY {\\n\")\n file.write(\"\\ttext: origin = 0x80003100\\n\")\n file.write(\"}\\n\")\n file.write(\"\\n\")\n\n file.write(\"SECTIONS {\\n\")\n file.write(\"\\tGROUP:{\\n\")\n\n for name, align in SECTIONS:\n file.write(\"\\t\\t%s ALIGN(0x%X):{}\\n\" % (name, align))\n\n # strip .dead section\n file.write(\"\\t\\t/DISCARD/ : { *(.dead) }\\n\")\n\n file.write(\"\\t} > text\\n\")\n file.write(\n \"\\t_stack_addr = (_f_sbss2 + SIZEOF(.sbss2) + 65536 + 0x7) & ~0x7;\\n\")\n file.write(\"\\t_stack_end = _f_sbss2 + SIZEOF(.sbss2);\\n\")\n file.write(\"\\t_db_stack_addr = (_stack_addr + 0x2000);\\n\")\n file.write(\"\\t_db_stack_end = _stack_addr;\\n\")\n file.write(\"\\t__ArenaLo = (_db_stack_addr + 0x1f) & ~0x1f;\\n\")\n file.write(\"\\t__ArenaHi = 0x81700000;\\n\")\n file.write(\"\\n\")\n file.write(\"\\t/* missing symbols */\\n\")\n\n # improve decompilation workflow by making so that function\n # which, for what ever reason, cannot be named the same as\n # the expected name to work. This will happen for all symbols\n # with weird characters.\n base_names = set(module0.SYMBOL_NAMES.keys())\n main_names = set([sym.name for sym in symbols])\n names = base_names - main_names\n for name in names:\n symbol = module0.SYMBOLS[module0.SYMBOL_NAMES[name]]\n if symbol['type'] == \"StringBase\": # @stringBase0 is handled below\n continue\n if symbol['type'] == \"LinkerGenerated\": # linker handles these symbols\n continue\n\n file.write(f\"\\t\\\"{symbol['label']}\\\" = 0x{symbol['addr']:08X};\\n\")\n file.write(\"\\n\")\n\n # @stringBase0 is generated by the compiler. The dol2asm is using a trick to\n # simulate the stringBase0 by creating another symbol (at the same location)\n # that is used instead, as it is impossible to reference the \"@stringBase0\" (because of the @).\n # So all references will be to the new symbol, thus the linker will think\n # that the @stringBase0 symbol is never used and strip it.\n file.write(\"\\t/* @stringBase0 */\\n\")\n for x in module0.SYMBOLS:\n if x['type'] == \"StringBase\":\n file.write(\"\\t\\\"%s\\\" = 0x%08X;\\n\" % (x['label'], x['addr']))\n\n file.write(\"}\\n\")\n file.write(\"\\n\")\n\n file.write(\"FORCEACTIVE {\\n\")\n for f in FORCE_ACTIVE:\n file.write(\"\\t\\\"%s\\\"\\n\" % f)\n file.write(\"\\n\")\n\n file.write(\"\\t/* unreferenced symbols */\\n\")\n for x in module0.SYMBOLS:\n k = x['label']\n if x['type'] == \"StringBase\":\n continue\n\n require_force_active = False\n\n # if the symbol is not reachable from the __start add it as forceactive\n if not x['is_reachable'] or sum(x['r']) == 0:\n require_force_active = True\n\n if require_force_active:\n file.write(f\"\\t\\\"{x['label']}\\\"\\n\")\n if not x['label'] in main_names:\n file.write(f\"\\t\\\"{x['name']}\\\"\\n\")\n\n for x in module0.SYMBOLS:\n if x['type'] == \"StringBase\":\n continue\n\n if x['is_reachable']:\n if x['label'] != x['name']:\n file.write(f\"\\t\\\"{x['name']}\\\"\\n\")\n\n for symbol in symbols:\n if not symbol.name:\n continue\n\n if \"__template\" in symbol.name:\n file.write(\"\\t\\\"%s\\\"\\n\" % (symbol.name))\n\n file.write(\"\\n\")\n file.write(\"}\\n\")\n file.write(\"\\n\")",
"def make_source_fs(self):\n return open_fs('temp://')",
"def render_stl(openscad_path, code_filename, output_name):\n with open(\"%s.scad\" % output_name, \"wb\") as f:\n f.write(code_filename)\n\n subprocess.call([openscad_path, \n \"-o\", \"%s.stl\" % output_name, \"%s.scad\" % output_name])",
"def test_convert_stl_file_binary():\n path_stl = join(dirname(__file__), \"tests/in/stl/box_binary.stl\")\n target_folder = join(dirname(__file__), \"tests/out/box_binary\")\n convert_stl_file(path_stl, target_folder, remove_original=False)\n assert isfile(_descriptor_filename(target_folder, basename(path_stl)))\n rmtree(target_folder, ignore_errors=True)",
"def _make_o_file(self, cfile, ext):\n ofile = cfile.new(ext=ext)\n if ofile.relto(udir):\n return ofile\n assert ofile.relto(rpythonroot), (\n \"%r should be relative to either %r or %r\" % (\n ofile, rpythonroot, udir))\n ofile = udir.join(ofile.relto(rpythonroot))\n ofile.dirpath().ensure(dir=True)\n return ofile",
"def l_write_locust_file(appName):\n global filename\n appName = appName if appName else \"AUT\"\n filename = appName + \"_\" + str(datetime.datetime.now().strftime(\n \"%d%m%Y_%H%M%S\")) + \".py\" # filename formatted to demo_DateMonthYear_HourMinuteSeconds\n with open(filename, \"w\") as f:\n f.write(code)",
"def make_source(name, content):\n src = source(name)\n with open(src._filename, 'w') as out:\n out.write(content)\n return src",
"def test_save_filename_path_object(self, prog, tmpdir):\n filename = tmpdir.join(\"test.xir\")\n sf.save(filename, prog, ir=\"xir\")\n\n with open(filename, \"r\") as f:\n res = f.read()\n\n assert res == test_xir_prog_not_compiled",
"def genOperationFile(self, filePath, dbname, fileNamePrefix, fileNameSuffix, incFileName):\r\n if not os.path.exists(ResultSrcDir + \"/db_\" + str(dbname).lower() + \"/src/\"):\r\n os.makedirs(ResultSrcDir + \"/db_\" + str(dbname).lower() + \"/src/\")\r\n\r\n logger.debug('writing '+ResultSrcDir + \"/db_\" + str(dbname).lower() + \"/src/\" + fileNamePrefix+\"convert\"+fileNameSuffix)\r\n convf=open(ResultSrcDir + \"/db_\" + str(dbname).lower() + \"/src/\" + fileNamePrefix+\"convert\"+fileNameSuffix, 'wb')\r\n #addf=open(ResultSrcDir + \"/db_\" + str(dbname).lower() + \"/src/\" + fileNamePrefix+\"add\"+fileNameSuffix, 'wb')\r\n #delf=open(ResultSrcDir + \"/db_\" + str(dbname).lower() + \"/src/\" + fileNamePrefix+\"del\"+fileNameSuffix, 'wb')\r\n #getf=open(ResultSrcDir + \"/db_\" + str(dbname).lower() + \"/src/\" + fileNamePrefix+\"get\"+fileNameSuffix, 'wb')\r\n #setf=open(ResultSrcDir + \"/db_\" + str(dbname).lower() + \"/src/\" + fileNamePrefix+\"set\"+fileNameSuffix, 'wb')\r\n\r\n #write the include\r\n convf.write('#include \"dm_internal.h\"\\n')\r\n convf.write('#include \\\"'+ incFileName + '\\\"\\n\\n')\r\n\r\n #addf.write('#include \"dm_internal.h\"\\n')\r\n #addf.write('#include \\\"'+ incFileName + '\\\"\\n\\n')\r\n\r\n #delf.write('#include \"dm_internal.h\"\\n')\r\n #delf.write('#include \\\"'+ incFileName + '\\\"\\n\\n')\r\n\r\n #getf.write('#include \"dm_internal.h\"\\n')\r\n #getf.write('#include \\\"'+ incFileName + '\\\"\\n\\n')\r\n\r\n #setf.write('#include \"dm_internal.h\"\\n')\r\n #setf.write('#include \\\"'+ incFileName + '\\\"\\n\\n')\r\n\r\n convf.write('/*----------beginning of convert function list----------*/\\n\\n')\r\n #gen the convert func for BITMAP and Enum\r\n for tmpItem in self.itemList[dbname]:\r\n if tmpItem.cType == \"UINT32_BITMAP\":\r\n self.convertBitmapToString(convf,tmpItem)\r\n self.convertStringToBitmap(convf,tmpItem)\r\n if tmpItem.cType == \"UINT32_ENUM\":\r\n self.convertEnumToString(convf,tmpItem)\r\n self.convertStringToEnum(convf,tmpItem)\r\n convf.write('/*----------end of convert function list----------*/\\n\\n')\r\n\r\n convf.close()\r\n\r\n #get Func Generation of each item\r\n #tmpFuncGen = 0\r\n #for tmpItem in self.itemList[dbname][::-1]:\r\n # if re.search('A', tmpItem.genOperationFlg, re.IGNORECASE): #add operation\r\n # if tmpItem.isObject:\r\n # self.genAddFunc(addf,tmpItem, dbname)\r\n # if re.search('G', tmpItem.genOperationFlg, re.IGNORECASE): #get operation\r\n # self.genGetFunc(getf,tmpItem, dbname)\r\n # if re.search('S', tmpItem.genOperationFlg, re.IGNORECASE): #set operation\r\n # self.genSetFunc(setf,tmpItem, dbname)\r\n # if re.search('D', tmpItem.genOperationFlg, re.IGNORECASE): #delete operation\r\n # if tmpItem.isObject:\r\n # self.genDelFunc(delf,tmpItem, dbname)\r\n\r\n #addf.close()\r\n #delf.close()\r\n #getf.close()\r\n #setf.close()\r",
"def svn_fs_make_file(*args) -> \"svn_error_t *\":\n return _fs.svn_fs_make_file(*args)",
"def test_generate_envoy_file__replaced(self) -> None:\n requested_out_file = os.path.join(self._config.envoy_config_dir, 'x.txt')\n with open(requested_out_file, 'w') as f:\n f.write('y')\n generate.generate_envoy_file(self._config, 'x.txt', 'x')\n self.assertTrue(os.path.isfile(requested_out_file))\n with open(requested_out_file, 'r') as f:\n self.assertEqual('x', f.read())",
"def optimize(filepath):\n command = [shared.LLVM_OPT, '-o=-', filepath] + shared.pick_llvm_opts(3, True)\n with get_temp_file('.bc') as out: ret = subprocess.call(command, stdout=out)\n if ret != 0: raise RuntimeError('Could not optimize %s.' % filepath)\n return out.name",
"def prepare_odx_file (subdir, odx_file):\n tmp_file = \"tmp_\" + os.path.basename(odx_file)\n with open (odx_file, \"r\") as odx_in, open (tmp_file, \"w\") as odx_out:\n for l in odx_in:\n if \"<FLASHDATA ID=\" in l:\n binary_file = l.split()[1].split(\".\")[-1][:-1] + \".bin\"\n elif \"<DATA>\" in l:\n # write binary data\n with open (os.path.join(subdir, binary_file), \"wb\") as bin_f:\n # write in chunks of 10 MB\n start_offset = 20\n end_offset = 20\n final_offset = len(l) - len(\"</DATA>\\n\")\n while (end_offset < final_offset):\n end_offset = end_offset + (10*1024*1024)\n if end_offset > final_offset:\n end_offset = final_offset\n bin_f.write(binascii.a2b_hex(l[start_offset:end_offset]))\n start_offset = end_offset\n # Attention! The tmp odx file only contains (at most) the last\n # 100 bytes of the data. This is enough to contain the complete trailer\n if not \"<DATA>\" in l[-100:] :\n l = \"<DATA>\" + l[-100:]\n odx_out.write(l)\n return tmp_file",
"def test_save_filename_string(self, prog, tmpdir):\n filename = str(tmpdir.join(\"test.xir\"))\n sf.save(filename, prog, ir=\"xir\")\n\n with open(filename, \"r\") as f:\n res = f.read()\n\n assert res == test_xir_prog_not_compiled",
"def create_local_py(python_dir, python_filename, lib_dir, python_file_to_run, case_config_path, render_config_path):\n with open(python_dir + \"/\" + python_filename, \"w\") as python_file:\n python_file.write(\"import os\\n\")\n python_file.write(\"os.chdir(\\\"\" + lib_dir + \"\\\")\\n\")\n python_file.write(\"os.system(\\\"python3 \" + lib_dir + \"/\" + python_file_to_run + \" -c \" + case_config_path + \" -r \" + render_config_path + \"\\\")\")",
"def remake_file(constants):\n file_names = [x for x in os.listdir(constants.CURRENT_DIR) if 'part' in x]\n\n # _ = [(x.split('.')[:-1], x.split('.')[-1]) for x in file_names]\n # _.sort(key = lambda (x,y): int(y))\n # file_names = ['.'.join(['.'.join(x), str(y)]) for (x,y) in _]\n file_names.sort(key=lambda x: int(x.split('.')[-1]))\n\n final_file = open(file_names[0].split('.')[0], \"wb\")\n\n for part_name in file_names:\n print \"Start joining\", part_name\n part_file = open(part_name, \"rb\")\n _copy_in_chunks(constants, part_file, final_file)\n part_file.close()\n\n final_file.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Scrapes website for info on a specific company
|
def scrape_company_info(driver, site, company_name):
source = get_page_source(driver, site, company_name)
soup = BeautifulSoup(source, "html.parser")
company_curr = Company(soup)
name = company_curr.get_name()
desc = company_curr.get_desc()
location = company_curr.get_location()
size = company_curr.get_size()
url = company_curr.get_url()
domain = company_curr.get_domains()
return name, desc, location, size, url, domain
|
[
"def get_company_info(name, session):\n escaped_name = urllib.parse.quote_plus(name)\n\n response = session.get(('https://www.linkedin.com'\n '/voyager/api/organization/companies?'\n 'q=universalName&universalName=' + escaped_name))\n\n if response.status_code == 404:\n print(\"[!] Could not find that company name. Please double-check LinkedIn and try again.\")\n sys.exit()\n\n if response.status_code != 200:\n print(\"[!] Unexpected HTTP response code when trying to get the company info:\")\n print(f\" {response.status_code}\")\n sys.exit()\n\n # Some geo regions are being fed a 'lite' version of LinkedIn mobile:\n # https://bit.ly/2vGcft0\n # The following bit is a temporary fix until I can figure out a\n # low-maintenance solution that is inclusive of these areas.\n if 'mwlite' in response.text:\n print(\"[!] You are being served the 'lite' version of\"\n \" LinkedIn (https://bit.ly/2vGcft0) that is not yet supported\"\n \" by this tool. Please try again using a VPN exiting from USA,\"\n \" EU, or Australia.\")\n print(\" A permanent fix is being researched. Sorry about that!\")\n sys.exit()\n\n try:\n response_json = json.loads(response.text)\n except json.decoder.JSONDecodeError:\n print(\"[!] Yikes! Could not decode JSON when getting company info! :(\")\n print(\"Here's the first 200 characters of the HTTP reply which may help in debugging:\\n\\n\")\n print(response.text[:200])\n sys.exit()\n\n company = response_json[\"elements\"][0]\n\n found_name = company.get('name', \"NOT FOUND\")\n found_desc = company.get('tagline', \"NOT FOUND\")\n found_staff = company['staffCount']\n found_website = company.get('companyPageUrl', \"NOT FOUND\")\n\n # We need the numerical id to search for employee info. This one requires some finessing\n # as it is a portion of a string inside the key.\n # Example: \"urn:li:company:1111111111\" - we need that 1111111111\n found_id = company['trackingInfo']['objectUrn'].split(':')[-1]\n\n print(\" Name: \" + found_name)\n print(\" ID: \" + found_id)\n print(\" Desc: \" + found_desc)\n print(\" Staff: \" + str(found_staff))\n print(\" URL: \" + found_website)\n print(f\"\\n[*] Hopefully that's the right {name}! If not, check LinkedIn and try again.\\n\")\n\n return (found_id, found_staff)",
"def getCompanies(biztype, limit=10):\n\n html = queryHoovers(biztype, limit)\n #with open('body.html') as f:\n # html = f.read()\n\n soup = BeautifulSoup(html)\n #print(\"made the soup\")\n interleave = lambda a,b: list(filter(None, reduce(add, izip_longest(a,b))))\n rows = interleave(soup.find_all('tr', 're-grey', limit=(limit+1)/2), soup.find_all('tr', 'sr-white', limit=limit/2))\n\n \n\n companies = [{'Name':row('td', 'company_name')[0].text, 'Location': row('td', 'company-location')[0].text, 'Sales': row('td', 'company-sales')[0].text} for row in rows]\n\n #print(companies)\n out = str(json.dumps(companies)) # from unicode to ascii\n out = out.replace('\\\\u00a0', ' ') # replace non-breaking spaces\n return(out)",
"def indeed_company(bsoup):\n company = []\n for div in bsoup.find_all(name=\"div\", attrs={\"class\": \"row\"}):\n try:\n company.append(div.find(\"span\", attrs={\"class\": \"company\"}).text)\n except:\n company.append(\"Nothing_found\")\n return company",
"def parse_company(url, api_key=API_KEY):\n response = requests.get(url, data=api_key)\n resp = dict()\n if response.status_code == 200:\n if response.headers['content-type'] == 'application/json':\n resp = response.json()['response']\n else:\n print('Error in the type of answer received: {} with the URL: {}'.format(response.headers['content-type'], url))\n else:\n print('Error {} in accessing service with the URL: {}'.format(response.status_code, url))\n return response.status_code, resp",
"def queryHoovers(biztype, limit=10):\n\n params = {'type': 'company', 'term': biztype, 'formstep': 0, 'sort': 'sales', 'sortDir': 'desc'}\n response = urllib2.urlopen('http://www.hoovers.com/search/company-search-results/100003765-1-1NJHZ8.html?%s' % urllib.urlencode(params))\n\n for line in response:\n if re.search('<h1>Company Results</h1>', line):\n filtered = '<html><body>' + line + response.read()\n break\n\n return filtered",
"def fetch_500companies(self):\n # get the list of startup urls\n # normally i would use lxml, but their html is horribly broken\n # not even beautifulsoup can handle it\n r = requests.get('http://500.co/companies/')\n urls = list(set(re.findall(r'http://500\\.co/startup-profiles/.+/', r.text)))\n\n # fetch each indidual startup\n results = []\n logger.info(\"Got %s urls from 500companies:\", len(urls))\n for url in urls:\n results.append(self.fetch_single_500startup(url))\n time.sleep(1) # be (a little) nice\n\n return results",
"def get_job_listings_from_website():\r\n title_location = get_title_location_from_cl()\r\n URL = \"https://www.monster.ca/jobs/search/?q={}&where={}\".format(title_location[0],title_location[1].capitalize())\r\n page = requests.get(URL)\r\n # content_bytes_to_string = (page.content).decode('utf-8')\r\n # with open(\"monster_site_content.txt\",'w') as job_content:\r\n # job_content.write(content_bytes_to_string)\r\n # pprint.pprint(page.content)\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n results = soup.find(id=\"ResultsContainer\") #results variable is a 'bs4.element.Tag'\r\n job_elements = results.find_all('section',class_='card-content') #job_elements variable is a 'bs4.element.ResultSet'\r\n for job_element in job_elements:\r\n element_title = job_element.find('h2', class_=\"title\")\r\n element_company = job_element.find('div', class_=\"company\")\r\n element_location = job_element.find('div', class_=\"location\")\r\n if None in (element_title,element_company,element_location):\r\n continue\r\n formatted_element_company = (element_company.text).rstrip()\r\n formatted_element_title = (element_title.text).replace('\\n',\"\")\r\n new_formatted_element_company = formatted_element_company.lstrip()\r\n formatted_element_location = (element_location.text).lstrip()\r\n print(formatted_element_title)\r\n print(new_formatted_element_company)\r\n print(formatted_element_location)\r\n print()\r\n return results",
"def get_company_links(url_str):\n scraper = get_scraper(url_str)\n\n companies = []\n # Get a list containing html describing the companies.\n company_html_list = scraper.find_all(\"li\", class_=\"mrgn-bttm-sm\")\n for company_html in company_html_list:\n url = company_html.a[\"href\"]\n # The url from the page is relative, so make it absolute.\n url = \"http://www.ic.gc.ca\" + url\n\n companies.append(url)\n\n return companies",
"def get_company_name(url_str):\n scraper = get_scraper(url_str)\n\n h1 = scraper.find_all(\"h1\", {\"id\": \"cn-cont\"})[0]\n return h1.contents[0].strip()",
"def dh_company_search(request, access_token, company_name):\n companies = []\n url = settings.DATA_HUB_COMPANY_SEARCH_URL\n payload = {\"name\": company_name}\n\n response = dh_request(request, access_token, \"POST\", url, payload)\n\n # It is not an error for us if the request fails, this can happen if the\n # Access token is invalid, consider that there are no matches however\n # user is notified of the error to take appropriate action\n if not response.ok:\n return companies, response.json()\n\n for company in response.json()[\"results\"]:\n address = company[\"address\"]\n companies.append(\n {\n \"datahub_id\": company[\"id\"],\n \"name\": company[\"name\"],\n \"company_number\": company[\"company_number\"],\n \"duns_number\": company[\"duns_number\"],\n \"address\": {\n \"line_1\": address[\"line_1\"],\n \"line_2\": address[\"line_2\"],\n \"town\": address[\"town\"],\n \"county\": address[\"county\"],\n \"postcode\": address[\"postcode\"],\n \"country\": address[\"country\"][\"name\"],\n },\n }\n )\n\n return companies, None",
"def SearchingCompanyLocation(url):\n location=[]\n response=requests.get(url, timeout=5)# grabbing the url\n html=response.content# turning it into content\n soup=BeautifulSoup(html,'html.parser')# creating out soup\n\n locations={\"location\":[]}\n # if the company gives useful info in the paragraph tag\n content_in_paragraph=[paragraph.getText() for paragraph in soup.find_all('p')]# grabbing the infromation from the paragraph tags\n\n #removing the new line or tab\n paragraph=[element.replace('\\n',\" \").replace('\\t',\"\") for element in content_in_paragraph]\n # removing wierd excess space https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python\n paragraph=[' '.join(element.split()) for element in paragraph]\n\n # from the paragraph tags seeing if there is a location being used by passing it to our RegexLocation() funciton\n location_with_paragraph_tag=(list(filter(RegexLocation, paragraph)))# next just gives us the first one, filter turns the value from the is_location() funtion no longer boolean\n\n # getting rid of the html jargen\n #reference https://stackoverflow.com/questions/328356/extracting-text-from-html-file-using-python\n # getting rid of the script sytle in html\n for script in soup([\"script\", \"style\"]):\n (script.extract()) # rip it out\n\n # get text\n # grabbing the first chunk of text\n text = soup.get_text()\n\n # creating a list of the text that shows up by splitting the text at the new line of each section then stripping away the new lines\n final_required = [t for t in text.split('\\n') if t.strip()]\n\n # removing wierd excess space https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python\n c=[' '.join(element.split()) for element in final_required]\n\n location_without_paragraph_tag=(list(filter(RegexLocation, c)))#filter turns the value from the is_location() funtion no longer boolean\n\n # find which list is longer if there is only one thing found in the lists\n # if the list size is equal and equals 1\n if len(location_without_paragraph_tag) == len(location_with_paragraph_tag) and len(location_with_paragraph_tag)==1:\n # print(\"size of lists is 1\")\n if len(location_without_paragraph_tag[0])==len(location_with_paragraph_tag[0]):\n # print(\"the two lists are the exact same \")\n # if there is the word copyright get rid of it bc it is a problem with the geocode\n location.append((location_without_paragraph_tag[0]).replace('Copyright',\"\"))\n elif len(location_without_paragraph_tag[0])>len(location_with_paragraph_tag[0]):\n # print(\"location_without_paragraph_tag is bigger\")\n location.append((location_without_paragraph_tag[0]).replace('Copyright',\"\"))\n else:\n # print(\"location_with_paragraph_tag is bigger\")\n location.append((location_with_paragraph_tag[0]).replace('Copyright',\"\"))\n # if the list size is equal and equals zero\n if len(location_without_paragraph_tag) == len(location_with_paragraph_tag) and len(location_with_paragraph_tag)==0:\n # there is no location found\n # print(\"there is no location found\")\n location.append((location_with_paragraph_tag))\n\n # if the sizes arn't equal we want to find the size of the string of the lists\n location_without_paragraph_tag_list=[]\n location_with_paragraph_tag_list=[]\n if len(location_without_paragraph_tag) != len(location_with_paragraph_tag):\n # print(\"the sizes of the two lists arn't equal\")\n location_without_paragraph_tag_str=' '.join(location_without_paragraph_tag)\n location_with_paragraph_tag_str=' '.join(location_with_paragraph_tag)\n\n\n location_without_paragraph_tag_list.append(location_without_paragraph_tag_str)\n\n location_with_paragraph_tag_list.append(location_with_paragraph_tag_str)\n\n if len(location_without_paragraph_tag_str)>len(location_with_paragraph_tag_str):\n location.append(location_without_paragraph_tag_str.replace('Copyright',\"\"))\n else:\n location.append(location_with_paragraph_tag_str.replace('Copyright',\"\"))\n\n # if the sizes of the tags are equal and are larger then 0 and 1 ind the size of the string of the lists\n location_without_paragraph_tag_list=[]\n location_with_paragraph_tag_list=[]\n if len(location_without_paragraph_tag) == len(location_with_paragraph_tag):\n location_without_paragraph_tag_str=' '.join(location_without_paragraph_tag)\n location_with_paragraph_tag_str=' '.join(location_with_paragraph_tag)\n\n location_without_paragraph_tag_list.append(location_without_paragraph_tag_str)\n\n location_with_paragraph_tag_list.append(location_with_paragraph_tag_str)\n\n if len(location_without_paragraph_tag_str)>len(location_with_paragraph_tag_str):\n location.append(location_without_paragraph_tag_str.replace('Copyright',\"\"))\n elif len(location_without_paragraph_tag_str)<len(location_with_paragraph_tag_str):\n location.append(location_with_paragraph_tag_str.replace('Copyright',\"\"))\n else:\n location.append(location_with_paragraph_tag_str.replace('Copyright',\"\"))\n\n return(location)",
"def test_get_soup(self):\n url = 'http://techcrunch.com/'\n header = ['company name', 'company website']\n test_csv = 'test.csv'\n tcs = TechCrunchScraper(test_csv, header)\n\n soup = tcs.get_soup(url)\n self.assertIsNotNone(soup)",
"def html_job_reader():\n with open(\"index.html\") as fp:\n soup = BeautifulSoup(fp, features=\"html.parser\")\n all_companies = soup.find_all(\"div\", class_=\"card\")\n data = []\n for company in all_companies:\n data.append(process_company_html(company))\n return data",
"def get_company(self, company_id):\n\n index = 1\n finding_titles = True\n while finding_titles:\n request = f'https://www.imdb.com/search/title/?companies={company_id}&view=simple&start={index}'\n try:\n tree = self._get_tree(request)\n except requests.exceptions.HTTPError as e:\n if e.response.status_code == 404:\n finding_titles = False\n else:\n raise e\n # Check if this was a valid company ID\n company_title_node = tree.css_first('div.article > h1.header')\n if company_title_node:\n company_title = company_title_node.text().replace('(Sorted by Popularity Ascending)', '').strip()\n if len(company_title) == 0:\n raise InvalidCompanyId(f'Invalid company ID: {company_id}')\n\n title_list_node = tree.css_first('div.lister-list')\n if not title_list_node:\n finding_titles = False\n else:\n for title_info_node in title_list_node.css('span.lister-item-header'):\n title_id = None\n start_year = None\n end_year = None\n notes = None\n\n year_info_node = None\n # Check if this is a TV episode\n episode_node = title_info_node.css_first('small')\n if episode_node and 'Episode' in episode_node.text():\n episode_link_node = title_info_node.css_first('small ~ a')\n title_id = get_title_id(episode_link_node)\n year_info_node = title_info_node.css_first('small ~ a ~ span.lister-item-year')\n else:\n title_info_node = title_info_node.css_first('span.lister-item-index ~ span')\n if title_info_node:\n title_link_node = title_info_node.css_first('a')\n title_id = get_title_id(title_link_node)\n year_info_node = title_info_node.css_first('span.lister-item-year')\n\n if year_info_node:\n year_info_text = year_info_node.text().strip('()')\n years_match = re.search(r'(\\d|–|-)+', year_info_text)\n notes_match = re.search(r'([A-Za-z]+\\s*)+', year_info_text)\n if years_match:\n year_info = re.sub(r'[–\\-]+', '\\t', years_match.group(0)).split('\\t')\n if len(year_info) > 1:\n start_year, end_year = year_info\n # Handle shows that are still on-air (ex: '2005- ')\n if len(end_year.strip()) == 0:\n end_year = None\n else:\n start_year, = year_info\n if notes_match:\n notes = notes_match.group(0)\n\n yield CompanyScrape(\n company_id=company_id,\n title_id=title_id,\n start_year=start_year,\n end_year=end_year,\n notes=notes\n )\n index += 50",
"def get_response(self, company_name):\r\n request_url = \"https://en.wikipedia.org/w/api.php?action=query&titles=\"+ company_name +\"&prop=revisions&rvprop=content&format=json\"\r\n print(request_url)\r\n wiki_response = requests.get(request_url)\r\n print(wiki_response)\r\n wiki_response_json = json.loads(wiki_response.text)\r\n # print(wiki_response_json)\r\n wiki_query = wiki_response_json['query']\r\n wiki_query_pages = wiki_query['pages']\r\n\r\n if str(wiki_response) == \"<Response [404]>\":\r\n print(\"404 Error\")\r\n return None\r\n else:\r\n print(\"Page Found\")\r\n return wiki_query_pages",
"async def create_company_page(self, data: Dict[str, Any]) -> Tuple[str, str]:\n # Generate URL of company logo\n url = LOGO_URL.format(**{\"image_id\": data[\"logo\"][\"image_id\"] if \"logo\" in data else \"\"})\n\n # Try to get found date of company\n founded = dt.utcfromtimestamp(data[\"start_date\"]).date() if \"start_date\" in data else \"?\"\n\n # Generate list of games, that company have developed or published\n developed = \", \".join(game[\"name\"] for game in data[\"developed\"]) if \"developed\" in data else \"?\"\n published = \", \".join(game[\"name\"] for game in data[\"published\"]) if \"published\" in data else \"?\"\n\n formatting = {\n \"name\": data[\"name\"],\n \"url\": data[\"url\"],\n \"description\": f\"{data['description']}\\n\\n\" if \"description\" in data else \"\\n\",\n \"founded\": founded,\n \"developed\": developed,\n \"published\": published\n }\n page = COMPANY_PAGE.format(**formatting)\n\n return page, url",
"def get_company_info():\n return _get(\"info\")",
"def get_html_for_item(self, item):\n with splinter.Browser(self.browser_type) as b:\n # Visit home page\n b.visit('https://www.numbeo.com/cost-of-living/')\n # Fill search form with city\n rand_wait_for_element(b, '//*[@id=\"dispatch_form\"]')\n search_form = b.driver.find_element_by_xpath('//*[@id=\"city_selector_city_id\"]')\n search_form.send_keys(item)\n time.sleep(5)\n search_form.send_keys(Keys.TAB)\n # Close signup popup if exists\n try:\n b.find_by_xpath('/html/body/div[6]/div[1]/button').first.click()\n except splinter.exceptions.ElementDoesNotExist:\n pass\n # Return search result\n return str(b.html)",
"def search_company(query):\n lookup = requests.get(SEARCH_QUERY, params={'query': query, 'limit': 10})\n if 200 <= lookup.status_code < 300:\n if len(lookup.json()) == 0:\n return None # Nothing found\n else:\n # Create dict with company name as key\n company_dict = {c['name'].lower(): c for c in lookup.json()}\n info, confidence = match_one(query.lower(), company_dict)\n # Return result if confidence is high enough, or query string\n # contained in company name eg Cisco > Cisco Systems\n if confidence > 0.5 or query.lower() in info['name'].lower():\n return info['symbol']\n else:\n # HTTP Status indicates something went wrong\n raise requests.HTTPError('API returned status code: '\n '{}'.format(lookup.status_code))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all wave files (recursively) from the provided directory in sorted order
|
def get_all_waves(directory: str) -> list:
files = glob.glob(directory + '/**/*.wav')
if not files:
logging.warning('No WAVE files found in ', directory)
else:
files.sort()
return files
|
[
"def _get_wav_files(dir_path):\n files = []\n for file in os.listdir(dir_path):\n if file.endswith(\".wav\"):\n files.append(file)\n return files",
"def collect_files_from_dir(directory, prefix=\"\", suffix=\"\", recursive=True):\n files = []\n _collect_files_from_dir(directory, prefix, suffix, recursive, files)\n return files",
"def get_audios(path):\n supported_formats = [\".wav\", \".mp3\", \".ogg\", \".flac\", \".m4a\"]\n return [\n item\n for sublist in [[os.path.join(dir, file) for file in files]\n for dir, _, files in list(os.walk(path))]\n for item in sublist if os.path.splitext(item)[1] in supported_formats\n ]",
"def recursive_wav_paths(path):\n absolute_paths = []\n for folder, subs, files in os.walk(path):\n for file in files:\n extension = os.path.splitext(file)[1]\n if extension.lower() == '.wav':\n file_path = os.path.join(folder, file)\n absolute_paths.append(os.path.abspath(file_path))\n\n return absolute_paths",
"def mp3files():\n\tBase_dir = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))\n\tmp3_source = os.path.join(Base_dir,'raw_data','mp3_files')\n\tmp3list = []\n\tfor paths,dirs,files in scandir.walk(mp3_source):\n\t#for paths,dirs,files in scandir.walk(r'D:\\Audio\\forJarvis'):\n\t\t\"\"\"if want to search mp3 files from all you HDD then \n\t\tprovide all drives path postions instead of D:\\\\Audio\n\t\tadd extra back slash where ever back slash occur. \n\t\t\"\"\"\n\t\tfor file in files:\n\t\t\tif file.endswith('.mp3'):\n\t\t\t\tfullpath =mp3list.append(os.path.join(paths,file))\n\t#print mp3list\n\t#print len(mp3list)\n\treturn mp3list",
"def traverse_dir(directory):\n files = []\n for file in os.listdir(directory):\n full_path = directory + file\n if os.path.isdir(full_path):\n files.extend(traverse_dir(full_path + \"/\"))\n else:\n files.append(full_path)\n return files",
"def get_all_audio(self):\n return [x.file for x in self.audio_data.values()]",
"def sort_files(dir_path: str) -> List:\n entries = Path(dir_path)\n files = []\n for entry in entries.iterdir():\n files.append(entry.name)\n sorted_files = natsort.natsorted(files, reverse=False)\n return sorted_files",
"def read_all_files_directory(self, path):\n check = Apios.check_files_in_directory(self, path)\n if check:\n src = path + \"*\"\n files = glob.iglob(src)\n for name in files:\n try:\n with open(name) as f:\n sys.stdout.write(f.read())\n except IOError:\n print_exc()",
"def get_all_fastq_files(data_dir):\r\n\r\n pattern = fn_translate(\"*.fastq.*\")\r\n fastq_files_path = path.join(data_dir, \"Data\", \"Intensities\", \"BaseCalls\")\r\n\r\n try:\r\n file_list = listdir(fastq_files_path)\r\n fastq_file_list = [path.join(fastq_files_path, file)\r\n for file in file_list if re.match(pattern, file)]\r\n fastq_file_list.sort()\r\n\r\n except OSError:\r\n msg = \"Invalid directory \" + fastq_files_path\r\n print(msg)\r\n\r\n return fastq_file_list",
"def get_filenames(dir_name):\n return os.listdir(dir_name)",
"def _collect_files_from_dir(directory, prefix, suffix, recursive, result):\n\n # Iterate through the entries of the directory.\n for name in sorted(os.listdir(directory)):\n path = os.path.abspath(os.path.join(directory, name))\n if os.path.isdir(path) and recursive is True:\n _collect_files_from_dir(path, prefix, suffix, recursive, result)\n elif os.path.isfile(path):\n # Check, if prefix/suffix matches.\n if not name.startswith(prefix):\n continue\n if not name.endswith(suffix):\n continue\n result.append(path)",
"def list_all_files(src_directory):\n cwd = os.getcwd()\n os.chdir(src_directory)\n files = []\n\n for file in glob.glob(\"*\"):\n files.append(file)\n os.chdir(cwd)\n\n return files",
"def read_sample_dir(db_dir, sample_dir):\n # save startdir\n startdir = os.getcwd()\n\n # specify set of samples using dir name\n os.chdir(os.path.join(db_dir, sample_dir))\n cwd = os.getcwd()\n\n # read files in cwd\n files = os.listdir('.')\n\n # make sure all files are sample files and sort before return\n def check_sample_file(filename):\n if filename.startswith('sample') and filename.endswith('pickle'):\n return True\n else:\n return Fale\n\n sample_files = [f for f in sorted(files) if check_sample_file(f)]\n\n # change directory back to start dir\n os.chdir(startdir)\n\n return sample_files",
"def get_dat(dir):\n os.chdir(dir)\n all_dat = []\n for file in glob.glob(\"*.dat\"):\n all_dat.append(file)\n return all_dat",
"def _file_list(self, directory, excluded=\"\"):\n for dirname, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if filename not in excluded:\n yield os.path.join(dirname, filename)",
"def json_files(rootpath):\n result = list()\n for root, _, filenames in os.walk(rootpath):\n goodfiles = fnmatch.filter(filenames, '*.json')\n result.extend(os.path.join(root, f) for f in goodfiles)\n return result",
"def sub_files(directory, file_suffix, prefix=None):\n file_list = []\n if prefix is not None:\n for filename in sorted(glob.glob('{}/{}*{}'.format(directory, prefix, file_suffix)), key=numerical_sort):\n file_list.append(filename)\n else:\n for filename in sorted(glob.glob('{}/*{}'.format(directory, file_suffix)), key=numerical_sort):\n file_list.append(filename)\n return file_list",
"def get_all_files(directory_str):\n ret_list = []\n\n # only reading .out files as they contain the input parameters we need (func name, precision),\n # along with the output parameters we need (gflops)\n for f in os.listdir(os.fsencode(directory_str)):\n filename = os.fsdecode(f)\n if filename.endswith(\".out\"):\n ret_list.append(os.path.join(directory_str, filename))\n\n return ret_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the original_filename of this Job.
|
def original_filename(self, original_filename):
self._original_filename = original_filename
|
[
"def set_filename(self, filename):\n return",
"def setfilename(self, filename):\n if os.path.exists(filename):\n self._filename = filename\n self.backup_filename = filename + self.backup_ext\n else:\n raise ValueError(\"Invalid path '%s' passed to ScriptFile\" % (filename))",
"def SetFileName(self, fileName):\n self.fileName = os.path.basename(fileName)\n self.fileName = os.path.splitext(self.fileName)[0]",
"def setFilename(self, filename):\n name = __file__\n\n if filename in [\"default.log\", \"\"]:\n # Don't use default.log. Instead use the name of the script.log\n name = name[name.rfind(\"/\")+1:name.find(\".py\")]\n filename = name + \".log\"\n self.Filename = filename\n else:\n self.Filename = filename",
"def restoreOriginalName(self) -> None:\n ...",
"def __setFileName(self, fileName):\n # fileName name of file\n\n self.__fileName = fileName # store audio file name",
"def set_filename(self, filename):\r\n self.filename = filename\r\n self.logger = logging.getLogger(filename)",
"def filename(self, filename: \"str\"):\n self._attrs[\"filename\"] = filename",
"def SetFileName(self, *args) -> \"void\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_SetFileName(self, *args)",
"def set_experiment_file(self, filename):\n self.experiment_file = filename",
"def setFilename(self, filename: 'char const *') -> \"void\":\n return _coin.ScXMLDocument_setFilename(self, filename)",
"def set_default_filename(cls, default_filename: str) -> None:\n if default_filename:\n filename_dict = {'default_filename': default_filename}\n cls.__save(filename_dict)",
"def _rename_filename(self, filename):\n directory = os.path.dirname(self._filepath) # keep the same path\n extension = os.path.splitext(self._filepath)[1] # keep the extension\n\n # Concatenate the new path for the file, rename the file and update the\n # _filepath variable.\n new_path = os.path.join(directory, filename + extension)\n os.rename(self._filepath, new_path)\n self._filepath = new_path",
"def _getModifiedFileName(self, originalFileName, suffix):\n originalFileName = originalFileName.split(self._GEOM_FILE_EXTENSION)[0]\n suffix = suffix.split(self._GEOM_FILE_EXTENSION)[0]\n self.modifiedFileName = originalFileName + suffix + self._GEOM_FILE_EXTENSION",
"def _update_filename(self):\n self.string_time = h.time_for_name()\n self.filename = self.filename_root + self.string_time",
"def set_inp_filename(self, filename=None):\n if filename is not None:\n p = Path(filename).absolute()\n self.inp_script = p.parent / f'{p.stem}.inp'\n self.inp_template = p.parent / f'{p.stem}_tmp.inp'\n self.Script.set_template_filename(self.inp_script)\n return",
"def set_filename(self, name):\n\t\tself.cfg.set_str(ROOTKEY, 'filename', os.path.basename(name))",
"def file_name(self, file_name: str):\n self._occurrence_data['fileName'] = file_name",
"def get_filename_old(self):\n\n # NOTE: This is just the filename, not the absolute filename path\n return self.validfilenameold",
"def setName(self, new_name):\n self.__NAME = new_name\n self.__file = self.deepCopy(self.__NAME, self.__DIR).__file"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the source_url of this Job.
|
def source_url(self, source_url):
self._source_url = source_url
|
[
"def set_source_path(self, source_path):\n\n self.source_path = source_path",
"def campaign_source(self, campaign_source):\n \n self._campaign_source = campaign_source",
"def url(self, url):\n self._url = url",
"def set_server_url(self, url: str):\n self.url = url",
"async def set_url(self, url: str):\n self.preview_embed.url = url",
"def source(self, source):\n allowed_values = [\"None\", \"Embedded\", \"File\", \"Template\"]\n if source not in allowed_values:\n raise ValueError(\n \"Invalid value for `source` ({0}), must be one of {1}\"\n .format(source, allowed_values)\n )\n\n self._source = source",
"def setSrcAttribute(self, src: 'char const *') -> \"void\":\n return _coin.ScXMLParallelElt_setSrcAttribute(self, src)",
"def set_url(self, url):\n self._attributes[VOPROV['url']] = {url}",
"def preview_url(self, value):\n self.logger.warn(\n \"Setting values on preview_url will NOT update the remote Canvas instance.\"\n )\n self._preview_url = value",
"def bundle_url(self, bundle_url):\n self._bundle_url = bundle_url",
"def setUrl(self, url):\n self.__url = url\n self.__fileExt = url.split('.')[-1]",
"def source_client(self, source_client):\n\n self._source_client = source_client",
"def set_external_url(url):",
"def instance_url(self, instance_url):\n\n self._instance_url = instance_url",
"def set_source(self, source):\n self.widget().setHtml(source)",
"def source_provider(self, source_provider):\n self._source_provider = source_provider",
"def event_url(self, event_url):\n\n self._event_url = event_url",
"def set(self, url):\n self.crawled_url.append(url)",
"def set_apk_source(self, source: str):\n\n if not os.path.exists(source):\n raise Exception('Source {0} not found.'.format(source))\n\n self.apk_source = source\n\n return self",
"def setSrcAttribute(self, src: 'char const *') -> \"void\":\n return _coin.ScXMLStateElt_setSrcAttribute(self, src)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the progress of this Job.
|
def progress(self, progress):
self._progress = progress
|
[
"def update_progress(self, value):\n self.progress.setValue(value)",
"def setProgress(self, n, m):\n pass",
"def set_progress(self, value):\n\n if self.active_socket is not None:\n msg = 'PROGRESS %f\\n' % float(value)\n try:\n self.active_socket.send(msg)\n except socket.error:\n pass",
"def SetProgress(self, percent):\r\n self.gauge.SetValue(percent)",
"def update_progress(self, progress):\n ## Modify this to change the length of the progress bar\n barLength = 10\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done...\\r\\n\"\n block = int(round(barLength*progress))\n text = \"\\rPercent: [{0}] {1}% {2}\".format( \"#\"*block + \"-\"*(barLength-block), progress*100, status)\n sys.stdout.write(text)\n sys.stdout.flush()",
"def update(self, progress):\n raise NotImplementedError()",
"def progress(self):\n now = time()\n if now - self.last_progress_t > 1:\n self.last_progress_t = now\n if self.status:\n self.uplink.status(self.status)\n self.status = None\n self.__spill_counters()\n self.uplink.progress(self.progress_value)\n self.uplink.flush()",
"def _update_progress(self, data):\n self._progress_bars.set_value(sample=data.sample,\n project=data.project,\n value=data.progress)",
"def setGameInProgress(self, value):\n self.inProgress = value",
"def emit(self, progress):\n socketio.emit('update',\n {\n 'model_id': self._model_id,\n 'update': 'progress',\n 'progress': progress,\n },\n namespace='/jobs',\n room='job_management'\n )\n # micro sleep so that emit is broadcast to the client\n time.sleep(0.001)",
"def test_set_progress(self):\n pass",
"def progress(self):\n \n return 100 if self.status == STATUS_COMPLETE else 0",
"def set_progress_step(n: int):\n global _progress_N, _progress_i \n if n > 0:\n _progress_N = n\n _progress_i = 0\n return",
"def update_RunProgressBar(self,run):\n self.progBarRun.setValue(run)",
"def progress(self, message):\n\n self._sysroot.progress(message, component=self)",
"def __progress(to_download, downloaded, to_upload, uploaded):\n\n del to_upload\n del uploaded\n\n if to_download != 0 and downloaded != 0:\n\n percent_completed = float(downloaded) / to_download\n rate = round(percent_completed * 100, ndigits=2)\n completed = \"#\" * int(rate)\n spaces = \" \" * (100 - int(rate))\n\n sys.stdout.write('\\r[%s%s] %s%%' % (completed, spaces, rate))\n sys.stdout.flush()",
"def progress(self, message):\n\n self._message_handler.progress_message(message)",
"def progress(self):\n self.remaining_duration -= 1",
"def progress(self) -> int:\n return pulumi.get(self, \"progress\")",
"def set_RunProgressBar(self):\n self.progBarRun.setRange(0,int(self.numRunsBox.text()))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the bitrate of this Job.
|
def bitrate(self, bitrate):
self._bitrate = bitrate
|
[
"def setBitrate(self, bitrate):\n try:\n # bypassed by request from Ivan\n if (pu.pxpconfig.IgnoreVideoSettings()):\n dbg.prn(dbg.TDK,\"td -- SetBitrate BYBASSED\")\n return\n \n url = \"http://\"+self.ip+\"/cgi-bin/api.cgi\"\n if(not self.tdSession):\n self.login()\n #end if not tdSession\n dbg.prn(dbg.TDK,\"logged in: \", self.tdSession)\n url +=\"?session=\"+self.tdSession\n # bitrate for teradek should be in bps not in kbps:\n bitrate = bitrate * 1000\n dbg.prn(dbg.TDK,\"NEW BITRATE:.....................................\", bitrate)\n setcmd = \"&VideoEncoder.Settings.1.bitrate=\"+str(bitrate)\n savecmd = \"&q=VideoEncoder.Settings.1.bitrate\"\n\n dbg.prn(dbg.TDK,\"setting...\")\n iter1=5\n answer = False\n while(not answer and iter1>=0):\n answer = pu.io.url(url+\"&command=set\"+setcmd,timeout=10)\n iter1 -= 1\n dbg.prn(dbg.TDK,answer)\n # apply settings\n dbg.prn(dbg.TDK,\"applying...\")\n iter1=5\n answer = False\n while(not answer and iter1>=0):\n answer = pu.io.url(url+\"&command=apply\"+savecmd,timeout=10)\n iter1 -= 1\n dbg.prn(dbg.TDK,answer)\n # save the settings\n dbg.prn(dbg.TDK,\"saving...\")\n iter1=5\n answer = False\n while(not answer and iter1>=0):\n answer = pu.io.url(url+\"&command=save\"+savecmd,timeout=10)\n iter1 -= 1\n dbg.prn(dbg.TDK,answer)\n self.updatedb() \n dbg.prn(dbg.TDK,\"td_cube.setbitrate:\", bitrate, self.ip) \n except Exception as e:\n dbg.prn(dbg.TDK|dbg.ERR,\"[---]encTeradek.setBitrate:\", e, sys.exc_info()[-1].tb_lineno)",
"def bitrate(self):\n b = 0\n if 'bit_rate' in self.__dict__:\n try:\n b = int(self.__dict__['bit_rate'])\n except Exception as e:\n pass\n return b",
"def blockJobSetSpeed(self, disk, bandwidth, flags=0):\n ret = libvirtmod.virDomainBlockJobSetSpeed(self._o, disk, bandwidth, flags)\n if ret == -1: raise libvirtError ('virDomainBlockJobSetSpeed() failed', dom=self)\n return ret",
"def mpeg_bit_rate_test(self, mpeg_bit_rate_test):\n\n self._mpeg_bit_rate_test = mpeg_bit_rate_test",
"def video_bitrate(self, video_bitrate):\n allowed_values = [\"high\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and video_bitrate not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `video_bitrate` ({0}), must be one of {1}\" # noqa: E501\n .format(video_bitrate, allowed_values)\n )\n\n self._video_bitrate = video_bitrate",
"def set_video_stream_param(\n self, streamtype, resolution, bitrate, framerate, gop, isvbr,\n callback=None):\n params = {\n \"streamType\": streamtype,\n \"resolution\": resolution,\n \"bitRate\": bitrate,\n \"frameRate\": framerate,\n \"GOP\": gop,\n \"isVBR\": isvbr}\n return self.execute_command(\n \"setVideoStreamParam\", params, callback=callback)",
"def set_blocksize(blocksize):\n return C.blosc_set_blocksize(blocksize)",
"def __set_baudrate(self, baud_rate):\n response = subprocess.call([\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"down\"])\n if response != 0:\n print(\"Error: Cannot deactivate '{0}' interface\".format(canSend.can_interface))\n print(response)\n response = subprocess.call(\n [\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"type\", \"can\", \"bitrate\", str(baud_rate)])\n if response != 0:\n print(\"Error: Cannot set {0} baudrate for interface '{1}'\".format(baud_rate, canSend.can_interface))\n print(response)\n response = subprocess.call([\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"up\"])\n if response != 0:\n print(\"Error: Cannot eactivate '{0}' interface\".format(canSend.can_interface))\n print(response)\n self.__print_actual_baudrate()",
"def set_bandwidth_limit(self, value='BWFULL'):\n #CMD$=“BWL C1,ON”\n print debug_msg.TBD_MSG",
"def set_bandwidth(self, bandwidth):\r\n self.obs.bandwidthHz = float(bandwidth)\r\n self.ave.bandwidthHz = float(bandwidth)\r\n self.hot.bandwidthHz = float(bandwidth)\r\n self.cold.bandwidthHz = float(bandwidth)\r\n self.ref.bandwidthHz = float(bandwidth)\r\n deltaNu = self.obs.bandwidthHz/float(self.vlen)\r\n n0 = self.obs.centerFreqHz - (self.obs.bandwidthHz/2.)\r\n nu = n0\r\n if len(self.ave.xdata) != self.vlen:\r\n self.update_len(self.ave)\r\n if len(self.hot.xdata) != self.vlen:\r\n self.update_len(self.hot)\r\n if len(self.cold.xdata) != self.vlen:\r\n self.update_len(self.cold)\r\n if len(self.ref.xdata) != self.vlen:\r\n self.update_len(self.ref)\r\n print(\"Setting Bandwidth: %10.0f Hz\" % (self.obs.bandwidthHz))\r\n for iii in range(self.vlen):\r\n self.obs.xdata[iii] = nu\r\n self.ave.xdata[iii] = nu\r\n self.hot.xdata[iii] = nu\r\n self.cold.xdata[iii] = nu\r\n self.ref.xdata[iii] = nu\r\n nu = nu + deltaNu",
"def set_buffer_size(self, buffer_size):\n self.buffer_size = buffer_size",
"def cmd_set_media_and_quality(self):\n logger.debug(\"--> cmd_set_media_and_quality\")",
"def set_bunit(self,bunit):\n self.bunit = bunit",
"def setB(self,b):\n\n if b not in [25, 50, 100, 200, 400, 800, 1600]:\n raise KeccakError.KeccakError('b value not supported - use 25, 50, 100, 200, 400, 800 or 1600')\n\n # Update all the parameters based on the used value of b\n self.b=b\n self.w=b//25\n self.l=int(math.log(self.w,2))\n self.nr=12+2*self.l",
"def SetBaud (self, baud=115200):\n\t\tif baud == 19200: output = chr(0) # Set to 19200 baud\n\t\telse: output = chr(1) # Set to 115200 baud\n\t\tself.SendCommand (33,output)",
"def video_frame_rate(self, video_frame_rate):\n\n self._video_frame_rate = video_frame_rate",
"def setFramerate(self, framerate):\n\n self.framerate = framerate",
"def set_bandwidth(self, out_bw, in_bw):\n self.m_outbound_bw = out_bw\n self.m_inbound_bw = in_bw",
"def mcuSetBaudRate(self, baudrate):\r\n if baudrate >= 0 and baudrate <= 12:\r\n self.mcuserial.write('b' + chr(baudrate) + chr(0))\r\n eb = \"\"\r\n while eb != \"eb\":\r\n eb = self.mcuserial.read(2)\r\n else:\r\n sys.stderr.write('Invalid Baud Rate Value, Valid Values are [0-6] See Help For More Details\\n')",
"def band(self, band: float):\n\n self._band = band"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the sample_rate of this Job.
|
def sample_rate(self, sample_rate):
self._sample_rate = sample_rate
|
[
"def change_sampling_rate(self, sampling_rate):\n\n self.sampling_rate = sampling_rate",
"def _set_sample_rate(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..16777215']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(32768), is_leaf=True, yang_name=\"sample-rate\", rest_name=\"sample-rate\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface sampling rate', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"sample_rate must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..16777215']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(32768), is_leaf=True, yang_name=\"sample-rate\", rest_name=\"sample-rate\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface sampling rate', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__sample_rate = t\n if hasattr(self, '_set'):\n self._set()",
"def set_sample_rate(self, sample_rate):\n self.dtg.write('TBAS:FREQ {0:e}'.format(sample_rate))\n return self.get_sample_rate()",
"def sample_rate(self) -> int:\n logger.debug(\"'self._sample_rate' is set to %.1f [Hz].\", self._sample_rate)\n return self._sample_rate",
"def sample_rate(self) -> float:\n return self._rate",
"def _set_samplerate(self, samplerate):\r\n raise NotImplementedError",
"def change_rate(self, new_rate):\n with self._lock:\n self.update_rate = new_rate",
"def sample_format(self, sample_format):\n self._sample_format = sample_format",
"def set_sample_time(self, sample_time):\n self.validate_sample_time(sample_time)\n self.sample_time = sample_time",
"def audio_sampling_rate_test(self, audio_sampling_rate_test):\n\n self._audio_sampling_rate_test = audio_sampling_rate_test",
"def set_learning_rate(self, learning_rate):\n self.learning_rate = learning_rate",
"def setFaceRecognitionRate(self, face_rate):\n self.face_recognition_rate = face_rate",
"def setRate(self, rate):\n if rate > 10:\n self.printError(\"Too high repetition rate\")\n return -1\n if rate == 0:\n pulse_division = 0\n else:\n pulse_division = int(10 / rate)\n\n self.setParameter(\"setPulseDivision\", format(pulse_division, \"03\"))",
"def setGenderRecognitionRate(self, gender_rate):\n self.gender_recognition_rate = gender_rate",
"def data_rate(self, data_rate):\n\n self._data_rate = data_rate",
"def sampling_rate(self):\n return self.file.sampling_rate",
"def sample_rate(self, idx):\n return int(self.audio_streams[idx][\"sample_rate\"])",
"def setScheduleRate(self, rate, unit='hz'):\n DPxSetDoutSchedRate(rate, unit)",
"def set_repetition_rate(self, rate):\n if not (type(rate) == int or type(rate) == float) or rate < 1 or rate > 5:\n raise ValueError(\"Laser repetition rate must be a positive number from 1 to 5!\")\n\n response = self._send_command(\"RR \" + str(rate))\n if response == b\"ok\\r\\n\":\n self.repRate = rate\n self.burstDuration = self.burstCount / self.repRate\n return True\n raise LaserCommandError(Laser.get_error_code_description(response))",
"def set_sampling_rate(address, name, sampling_rate):\n explore = explorepy.explore.Explore()\n explore.connect(mac_address=address, device_name=name)\n explore.set_sampling_rate(int(sampling_rate))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the file_size of this Job.
|
def file_size(self, file_size):
self._file_size = file_size
|
[
"def set_file_size(self, file_size):\n self.file_size = file_size",
"def setFilmSize(self, size):\n self.filmSize = size",
"def set_cache_size(self, size):\n self.__cache_max_size = size",
"def set_cache_size(self, size):\n pass",
"def model_size(self, model_size):\n\n self._model_size = model_size",
"def f_bsize(self, f_bsize):\n \n self._f_bsize = f_bsize",
"def set_buffer_size(self, buffer_size):\n self.buffer_size = buffer_size",
"def set_size_threshold(self, size_threshold: int) -> 'FileCompactStrategy.Builder':\n self._j_builder.setSizeThreshold(size_threshold)\n return self",
"def setStepSize(self,size):\n self.stepSize = size",
"def add(self, file, fsize):\r\n if (self.size + fsize) > self.max_size:\r\n raise FilesTooBigError()\r\n\r\n self.files[file] = fsize\r\n self.size += fsize",
"def send_file_size(self, filename):\n\t\tlength = os.path.getsize(filename)\n\t\tlength_bytes = pack('>Q', length)\n\t\twith BytesIO(length_bytes) as f:\n\t\t\tself.send_data(f, 8)",
"def uploaded_file_size(self, uploaded_file_size):\n if uploaded_file_size is None:\n raise ValueError(\"Invalid value for `uploaded_file_size`, must not be `None`\") # noqa: E501\n\n self._uploaded_file_size = uploaded_file_size",
"def update_size(self, size):\n self.batch_size_estimation = size\n self.trust_batch_estimation = True",
"def set_minibatch_size(self, size):\n self.batch_size = size",
"def set_size(self, value):\n self._rect.size = value",
"def chunk_size_in(self, chunk_size_in):\n\n self._chunk_size_in = chunk_size_in",
"def set_size(self, size):\n self.height = int(size)\n self.width = int(size) * 2",
"def set_page_size(self, size):\n self._set_paging_property(\"limit\", size)\n if self.has_start():\n self._set_start(0)\n else:\n self.add_start(0)\n return self",
"def get_file_size(self):\n return self.file_size",
"def set_point_size(self, point_size):\n self._point_size = point_size"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the custom_words of this Job.
|
def custom_words(self, custom_words):
self._custom_words = custom_words
|
[
"def setWords(self, words, style, substyle=-1):\n if substyle >= 0:\n # only supported for sub-styles\n self.__lex.setSubstyleWords(words, style, substyle)",
"def customs(self, customs):\n\n self._customs = customs",
"def setKeywords(self) -> None:\n # Add any new user keywords to leoKeywordsDict.\n d = self.keywordsDict\n keys = list(d.keys())\n for s in g.globalDirectiveList:\n key = '@' + s\n if key not in keys:\n d[key] = 'leokeyword'\n # Create a temporary chars list. It will be converted to a dict later.\n chars = [z for z in string.ascii_letters + string.digits]\n chars.append('_') # #2933.\n for key in list(d.keys()):\n for ch in key:\n if ch not in chars:\n chars.append(g.checkUnicode(ch))\n # jEdit2Py now does this check, so this isn't really needed.\n # But it is needed for forth.py.\n for ch in (' ', '\\t'):\n if ch in chars:\n # g.es_print('removing %s from word_chars' % (repr(ch)))\n chars.remove(ch)\n # Convert chars to a dict for faster access.\n self.word_chars: dict[str, str] = {}\n for z in chars:\n self.word_chars[z] = z",
"def set_words(self):\n words = possible_words(self.letters)\n self.word_set = {word for word in words if self.letters[0] in word}",
"def addStopWords(self, words):\n self.stopWords = self.stopWords.union(set(words))",
"def customattributes(self, customattributes):\n\n self._customattributes = customattributes",
"def remove_custom_words(text, custom_wordlist):\n result = [word for word in text.split() if word.lower() not in custom_wordlist]\n return \" \".join(result)",
"def set_custom_cwe_text(self, cwe_id, custom_text):\n params = {'customText' : custom_text}\n return self._request('POST', 'rest/cwe/' + str(cwe_id) + '/setCustomText')",
"def setWordContext(self, wordContext): #$NON-NLS-1$\r",
"def custom_labels(self, custom_labels):\n \n self._custom_labels = custom_labels",
"def custom_voice(self, custom_voice):\n\n self._custom_voice = custom_voice",
"def set_side_words(self, before_word = 'before', after_word = 'after'):\n self.before_word = before_word\n self.after_word = after_word",
"def buildDict(self, words):\n for w in words:\n self.add(w)",
"def set_spoken(self, spoken_forms):\n for ii in range(len(spoken_forms)):\n self.words[ii][1] = spoken_forms[ii]",
"def custom_data(self, custom_data):\n if not isinstance(custom_data, CustomData):\n custom_data = CustomData(custom_data)\n self['custom_data'] = custom_data",
"def load_words(self):\n for grade in c.GRADES:\n self.add_words_from_grade(grade)",
"def custom_workhours(self, custom_workhours):\n\n self._custom_workhours = custom_workhours",
"def new_words(self, words):\n self.solutions = [self.solve_word(word) for word in words]",
"def load_word_set(self, file_name=\"words.txt\"):\n file_path = self._find_word_set_path(file_name)\n\n # Remove leading and trailing whitespace from the words, and seperate them by line.\n self.word_set = set(self._load_text_from_file(file_path).strip(' ').split('\\n'))",
"def set_pre_tokenizer(self, custom_pre_tokenizer: CPT):\n self.pre_tokenizer = PreTokenizer.custom(custom_pre_tokenizer)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Whether or not this provider supports a given URI.
|
def supports(uri: str) -> bool:
is_remote, is_git = check_url(uri)
return is_remote and is_git
|
[
"def supports(uri):\n is_doi_ = is_doi(uri)\n\n is_dataverse_uri = is_doi_ is None and check_dataverse_uri(uri)\n is_dataverse_doi = is_doi_ and check_dataverse_doi(is_doi_.group(0))\n\n return is_dataverse_uri or is_dataverse_doi",
"def get_supported_uri(self, uris):\n transports = _conf.get(None, 'transports')\n for transport in transports:\n for uri in uris:\n if transport in uri:\n return uri\n return None",
"def is_uri(val: str = None) -> bool:\n is_valid = False\n validator = validators.Validator().allow_schemes(\n \"http\", \"https\", \"ftp\"\n ).require_presence_of(\n \"scheme\", \"host\"\n ).check_validity_of(\n \"scheme\", \"host\", \"path\"\n )\n uri = uri_reference(val)\n try:\n validator.validate(uri)\n is_valid = True\n except (InvalidComponentsError, MissingComponentError, UnpermittedComponentError) as ex:\n logger.debug(ex)\n return is_valid",
"def detect(self, uri):\n return uri.startswith(self.scheme + ':')",
"def isUri( uri ):\n\n # check uri\n return 'gs://' in uri",
"def is_uri(uri):\n scheme, netloc, path, params, query, fragment = urlparse(uri)\n if scheme and netloc and path:\n return True\n return False",
"def can_read(uri):\n\tif uri is None:\n\t\traise ValueError(\"Provided URI is None.\")\n\ttry:\n\t\tparsed = urllib.parse.urlparse(uri)\n\texcept ValueError: #Badly-formed IPv6 address.\n\t\treturn False #We don't care. We can only read locally anyway.\n\n\tif parsed.scheme != \"file\": #Can only read from file names.\n\t\treturn False\n\tif not parsed.path or parsed.path[-1] == \"/\": #Must have a file name, not a directory.\n\t\treturn False\n\treturn True",
"def has_valid_scheme(uri: ParseResult) -> bool:\n scheme = uri.scheme\n return scheme == 'ws' or scheme == 'warp'",
"def _resource_is_supported(self, resource_type):\n return resource_type == self.SUPPORTED_RESOURCE_TYPE",
"def supports_provider_query(self):\n return # boolean",
"def can_parse_url(self, url):\n raise NotImplementedError",
"def is_supported(cls, path):\n raise NotImplementedError",
"def is_referenced(self, uri):",
"def has_provider_for_url(url):\n registry = get_oembed_providers()\n return registry.provider_for_url(url) is not None",
"def supports(self, lookup_str: str) -> bool:\n return lookup_str.startswith('resolve:')",
"def _isUriMultiple(uri):\n if len(uri) > 1:\n return True\n return False",
"def test_check_uri(self):\n # OK\n self.assertTrue(SiteService.check_uri(\"localhost:12345\"))\n self.assertTrue(SiteService.check_uri(\"www.google.com:12345\"))\n self.assertTrue(SiteService.check_uri(\"127.0.0.1:12345\"))\n # Missing Port\n self.assertFalse(SiteService.check_uri(\"localhost:\"))\n # Missing seperator\n self.assertFalse(SiteService.check_uri(\"localhost\"))\n self.assertFalse(SiteService.check_uri(\"localhost12345\"))\n self.assertFalse(SiteService.check_uri(\"localhost@12345\"))\n # Starts with invalid char\n self.assertFalse(SiteService.check_uri(\"_localhost:12345\"))\n self.assertFalse(SiteService.check_uri(\".localhost:12345\"))\n # Non-numeric port\n self.assertFalse(SiteService.check_uri(\"localhost:bah\"))",
"def can_create_url(self):\n return (self.allowed_postfixes is not None)",
"def is_file_uri(self, uri):\n return(re.match('file:', uri) or not re.match('\\w{3,4}:', uri))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update dataset files from the remote provider.
|
def update_files(
self,
files: List[DynamicProxy],
dry_run: bool,
delete: bool,
context: Dict[str, Any],
ref: Optional[str] = None,
**kwargs,
) -> List["DatasetUpdateMetadata"]:
from renku.core.dataset.providers.models import DatasetUpdateAction, DatasetUpdateMetadata
if "visited_repos" not in context:
context["visited_repos"] = {}
progress_text = "Checking git files for updates"
results: List[DatasetUpdateMetadata] = []
try:
communication.start_progress(progress_text, len(files))
for file in files:
communication.update_progress(progress_text, 1)
if not file.based_on:
continue
based_on = file.based_on
url = based_on.url
if url in context["visited_repos"]:
remote_repository = context["visited_repos"][url]
else:
communication.echo(msg="Cloning remote repository...")
path = get_cache_directory_for_repository(url=url)
remote_repository = clone_repository(url=url, path=path, checkout_revision=ref)
context["visited_repos"][url] = remote_repository
checksum = remote_repository.get_object_hash(path=based_on.path, revision="HEAD")
found = checksum is not None
changed = found and based_on.checksum != checksum
src = remote_repository.path / based_on.path
dst = project_context.metadata_path.parent / file.entity.path
if not found:
if not dry_run and delete:
delete_dataset_file(dst, follow_symlinks=True)
project_context.repository.add(dst, force=True)
results.append(DatasetUpdateMetadata(entity=file, action=DatasetUpdateAction.DELETE))
elif changed:
if not dry_run:
# Fetch file if it is tracked by Git LFS
pull_paths_from_storage(remote_repository, remote_repository.path / based_on.path)
if is_linked_file(path=src, project_path=remote_repository.path):
delete_dataset_file(dst, follow_symlinks=True)
create_external_file(target=src.resolve(), path=dst)
else:
shutil.copy(src, dst)
file.based_on = RemoteEntity(
checksum=checksum, path=based_on.path, url=based_on.url # type: ignore
)
results.append(DatasetUpdateMetadata(entity=file, action=DatasetUpdateAction.UPDATE))
finally:
communication.finalize_progress(progress_text)
return results
|
[
"def pull_all_data_dependencies(self):\n self._overwrite_dvc_config()\n\n # checkout dvc pull files according to git checkout\n subprocess.check_call([\"dvc\", \"pull\", \"-r\", self.remote_repo])\n logging.getLogger(__name__).info(\"Pulling right data version from remote dvc storage... \"\n \"Done\")",
"def update_all_datasets(\n self, plan_year: str, date: Date = Date.today(), only_file: Optional[str] = None\n ) -> None:\n logger.info(f\"Updating {self.env} datasets on Socrata for {date}\")\n self.load_tracker_table()\n with create_data_dir():\n # Fetch and unzip zip file for this env and date\n zip_file_path: Path = self.fetch_zip_file(plan_year, date)\n extract_dir_path: Path = self.unzip_zip_file(zip_file_path)\n\n # Iterate over all existing datasets in tracker\n failed_updates: List[Tuple[str, str]] = []\n for file_name, dataset_id in self.tracker_table[self.env].items():\n data_file_path = extract_dir_path / file_name\n\n # If only_file is specified, skip files not matching the supplied filename\n if only_file is not None and data_file_path.name != only_file:\n continue\n\n # Skip files not included in this date's release or not in files table\n if not data_file_path.exists() or data_file_path.name not in self.files_table:\n continue\n\n # Update dataset, skipping this one if we get an error\n try:\n dataset_id: str = self.update_dataset(dataset_id, data_file_path, date)\n except Exception:\n logger.exception(f\"Failed to update dataset {dataset_id}: {data_file_path}\")\n failed_updates.append((file_name, dataset_id))\n continue\n logger.info(\n f\"Finished updating {self.env} datasets for {date}; {len(failed_updates)} failures\"\n )\n for file_name, dataset_id in failed_updates:\n logger.info(f\"Failed to update {file_name} [{self.env}] ({dataset_id})\")",
"def update_datapackage(self):\n\n if self.source == \"git\":\n file_content = _get_data_from_url(self.metadata_uri)\n\n if not file_content.status_code == 200:\n file_error_msg = \"Could not fetch remote file: {}; {}\".format(\n self.url, file_content.status_code\n )\n click.ClickException(file_error_msg)\n # file_content = json.dumps([{\"url\": self.url, \"error\": file_error_msg}])\n else:\n file_content = file_content.json() # .decode(self.decode)\n elif self.source == \"s3\":\n raise NotImplementedError(\n \"Directly get dataherb.json from S3 is not yet implemented.\"\n )\n\n self.datapackage_meta = file_content\n\n self.herb_meta_json[\"datapackage\"] = self.datapackage_meta\n\n self.datapackage = Package(self.datapackage_meta)\n\n return self.datapackage",
"def syncFileUris(self):\n \n fileDirs = []\n for fileUri in self.fileUris:\n if XnatSlicerUtils.isDICOM(fileUri):\n fileDir = os.path.dirname(fileUri)\n if not fileDir in fileDirs:\n fileDirs.append(fileDir)\n\n \n self._oldSrc = None\n if len(fileDirs) == 1 and self._dst != None:\n self._oldSrc = self._src\n self._oldDst = self._dst\n self._src = self._src.split('/data/')[0] + fileDirs[0] # + '?format=zip'\n self._dst = self._dstBase + fileDirs[0] + '.zip'\n\n #--------------------\n # Remove any folders that \n # exist after the 'files' level in self._src\n # We don't need them.\n #-------------------- \n splitter = 'files'\n self._src = self._src.split(splitter)[0] + splitter + '?format=zip'\n \n \n\n #--------------------\n # Update the download popup\n #-------------------- \n if self.MODULE.Workflow_Load.XnatDownloadPopup and self._oldSrc:\n self.MODULE.Workflow_Load.XnatDownloadPopup.changeRowKey(\\\n self._oldSrc.replace('?format=zip', ''),\\\n self._src.replace('?format=zip', ''))",
"def fetch(self):\n\n # Safely call since idempotent\n self.initialize()\n\n self._logger.info('Syncing analysis metadata from Anchore service')\n old_processed = self._images_processed\n result = self.resource_cache.get(self.remote_sync_url)\n if self.remote_sync_url.endswith('.tar.gz'):\n # TODO adding this temporarily while new feed service is in progress\n tar = tarfile.open(result['content'])\n try:\n member = tar.getmember('engine.json')\n tar.extract(member, path=self.sync_dir)\n except:\n pass\n tar.close()\n\n # as opposed to this\n # tarfile.open(result['content']).extractall(path=self.sync_dir)\n\n self._images_processed = self._load_processed_map()",
"def update_cache(self):\n data_dir = self.get_download_data_dir()\n if self.exists_dataset_in_cache():\n self.update_dataset_info_in_cache(data_dir)\n else:\n self.add_dataset_info_to_cache(data_dir)",
"def download_and_place(uid, data_path, replace=False):\n if not data_sources.get(uid):\n print(f\"Data download failed, no datasource named {uid}\")\n return\n\n # link_path = os.path.join(data_path, data_sources[uid][\"link\"])\n link_path = pathlib.Path(data_sources[uid][\"link\"])\n version_tag = data_sources[uid][\"version\"]\n version_dir = os.path.join(data_path, \"versioned_data/\" + uid + \"_\" + version_tag)\n\n # check for current version\n if os.path.exists(version_dir):\n print(\n f\"Existing data source ({uid}) version ({version_tag}) is current. Data located: '{version_dir}'. Symblink: '{link_path}'.\"\n )\n replace_existing = (\n replace if replace else prompt_yes_no(\"Replace versioned data?\")\n )\n\n if replace_existing:\n clean_data(uid, data_path)\n else:\n print(\"=======================================================\")\n print(\n f\"Not replacing data, generating symlink ({link_path}) and aborting download.\"\n )\n print(\"=======================================================\")\n # create a symlink to the versioned data\n if link_path.exists():\n os.unlink(link_path)\n elif not link_path.parent.exists():\n link_path.parent.mkdir(parents=True, exist_ok=True)\n os.symlink(src=version_dir, dst=link_path, target_is_directory=True)\n assert link_path.exists(), \"Failed, no symlink generated.\"\n return\n\n # download new version\n download_pre_args = data_sources[uid].get(\"download_pre_args\", \"\")\n download_post_args = data_sources[uid].get(\"download_post_args\", \"\")\n\n download_command = (\n \"wget --continue \"\n + download_pre_args\n + data_sources[uid][\"source\"]\n + \" -P \"\n + data_path\n + download_post_args\n )\n # print(download_command)\n os.system(download_command)\n assert os.path.exists(\n os.path.join(data_path, data_sources[uid][\"package_name\"])\n ), \"Download failed, no package found.\"\n\n # unpack\n package_name = data_sources[uid][\"package_name\"]\n if package_name.endswith(\".zip\"):\n with zipfile.ZipFile(data_path + package_name, \"r\") as zip_ref:\n zip_ref.extractall(version_dir)\n else:\n # TODO: support more compression types as necessary\n print(f\"Data unpack failed for {uid}. Unsupported filetype: {package_name}\")\n return\n\n assert os.path.exists(version_dir), \"Unpacking failed, no version directory.\"\n\n # create a symlink to the new versioned data\n if link_path.exists():\n os.unlink(link_path)\n elif not link_path.parent.exists():\n link_path.parent.mkdir(parents=True, exist_ok=True)\n os.symlink(src=version_dir, dst=link_path, target_is_directory=True)\n\n assert link_path.exists(), \"Unpacking failed, no symlink generated.\"\n\n # clean-up\n os.remove(data_path + package_name)\n\n print(\"=======================================================\")\n print(f\"Dataset ({uid}) successfully downloaded.\")\n print(f\"Source: '{version_dir}'\")\n print(f\"Symlink: '{link_path}'\")\n print(\"=======================================================\")",
"def _patch_file(self, download=True):\n dataset_id = '04abb580-e14b-4716-9ff2-f7b95740b99f'\n dataset = self.one.alyx.rest('datasets', 'read', id=dataset_id)\n # download\n local_file_path = self.one.load(dataset['session'],\n dataset_types=dataset['dataset_type'],\n download_only=True, clobber=True)[0]\n # change it\n np.save(local_file_path, ~np.load(local_file_path))\n new_check_sum = hashfile.md5(local_file_path)\n # try once with dry\n self.patcher.patch_dataset(local_file_path, dset_id=dataset['url'][-36:], dry=True)\n self.patcher.patch_dataset(local_file_path, dset_id=dataset['url'][-36:], dry=False)\n # the dataset hash should have been updated\n dataset = self.one.alyx.rest('datasets', 'read', id=dataset_id)\n self.assertEqual(uuid.UUID(dataset['hash']), uuid.UUID(new_check_sum))\n self.assertEqual(dataset['version'], __version__)\n if download:\n # download again and check the hash\n local_file_path.unlink()\n local_file_path = self.one.load(dataset['session'],\n dataset_types=dataset['dataset_type'],\n download_only=True, clobber=True)[0]\n self.assertEqual(hashfile.md5(local_file_path), new_check_sum)",
"def pull_data_dependency(self, remote_file):\n self._overwrite_dvc_config()\n\n # checkout dvc pull file according to git checkout\n subprocess.check_call([\"dvc\", \"pull\", \"-r\", self.remote_repo,\n \"{0}.dvc\".format(remote_file)])\n logging.getLogger(__name__).info(\"Pulling right data version of file {0} from remote dvc \"\n \"storage... Done\".format(remote_file))",
"def update(self):\n\n self.__check_update_ok()\n self.db.update_dataset_record(self.dataset_dict)",
"def upd_data(self, remote_id, data):\n self.remote_list[hex(remote_id)] = data\n self._do_save()",
"def update(self, data_source_id, update_data):\n\n if self.version >= 2:\n UPDATE_FUNC = self._patch\n else:\n UPDATE_FUNC = self._update\n\n return UPDATE_FUNC('/data-sources/%s' % data_source_id,\n update_data)",
"def move_files(dataset_gateway: IDatasetGateway, files: Dict[Path, Path], to_dataset_name: Optional[str] = None):\n datasets = [d.copy() for d in dataset_gateway.get_all_active_datasets()]\n\n to_dataset: Optional[Dataset] = None\n if to_dataset_name:\n # NOTE: Use the same dataset object or otherwise a race happens if dataset is in both source and destination\n to_dataset = next(d for d in datasets if d.name == to_dataset_name)\n modified_datasets: Dict[str, Dataset] = {}\n\n progress_name = \"Updating dataset metadata\"\n communication.start_progress(progress_name, total=len(files))\n try:\n checksums = project_context.repository.get_object_hashes(\n [file.relative_to(project_context.path) for file in files.values()]\n )\n for src, dst in files.items():\n src = src.relative_to(project_context.path)\n dst = dst.relative_to(project_context.path)\n # NOTE: Files are moved at this point, so, we can use dst\n new_dataset_file = DatasetFile.from_path(dst, checksum=checksums.get(dst))\n\n for dataset in datasets:\n removed = dataset.unlink_file(src, missing_ok=True)\n if removed:\n modified_datasets[dataset.name] = dataset\n new_dataset_file.based_on = removed.based_on\n new_dataset_file.source = removed.source\n\n if not to_dataset and (\n new_dataset_file.linked\n or is_subpath(project_context.path / dst, project_context.path / dataset.get_datadir())\n ):\n dataset.add_or_update_files(new_dataset_file)\n\n # NOTE: Update dataset if it contains a destination that is being overwritten\n modified = dataset.find_file(dst)\n added = is_subpath(project_context.path / dst, project_context.path / dataset.get_datadir())\n if modified or added:\n modified_datasets[dataset.name] = dataset\n dataset.add_or_update_files(new_dataset_file)\n\n if to_dataset:\n to_dataset.add_or_update_files(new_dataset_file)\n\n communication.update_progress(progress_name, amount=1)\n finally:\n communication.finalize_progress(progress_name)\n\n datasets_provenance = DatasetsProvenance()\n modified_dataset_values = list(modified_datasets.values())\n creator = get_git_user(repository=project_context.repository)\n for modified_dataset in modified_dataset_values:\n datasets_provenance.add_or_update(modified_dataset, creator=creator)\n if to_dataset and to_dataset not in modified_dataset_values:\n datasets_provenance.add_or_update(to_dataset, creator=creator)",
"def update_pull(self):\n \n file_path = os.path.join(self.script_dir,'pull list.json') \n if not os.path.isfile(file_path)or os.path.getsize(file_path) == 0 :\n with open(file_path,'w') as out:\n json.dump(self.pull_list,out)\n else:\n with open(file_path) as infile:\n data = json.load(infile)\n data.update(self.pull_list)\n\n with open(file_path,'w') as out:\n json.dump(self.pull_list,out)",
"def _overwrite_dvc_config(self):\n logging.getLogger(__name__).info(\n \"Pulling right data version from remote dvc storage...\")\n # add/overwrite technical user in dvc config\n subprocess.check_call(\n [\"dvc\", \"remote\", \"modify\", self.remote_repo, \"user\", self.user])\n\n # unset ask for password option to avoid prompt\n subprocess.check_call([\"dvc\", \"remote\", \"modify\", self.remote_repo, \"ask_password\",\n \"False\"])\n\n # set password\n subprocess.check_call([\"dvc\", \"remote\", \"modify\", self.remote_repo, \"password\",\n self.password])",
"def update_data_set_buffers(self):\n for dset_name, dset in self.data_sets.items():\n # Update only the data that are needed\n if dset.selected_for_preview or dset_name == self.selected_file_name:\n dset.update_buffers()",
"def update_dataset(self, dataset_id: str, data_file_path: Path, date: Date) -> str:\n # Delete old hanging drafts for this dataset (e.g. from previous failures)\n self.delete_old_hanging_drafts(dataset_id)\n\n # Load dataframe\n dataframe: pd.DataFrame = self.load_dataframe(data_file_path)\n dataset_name = f\"{data_file_path.stem} [{self.env}]\"\n description = f\"Plan Finder dataset {dataset_name}, released on {date}.\"\n\n # Create replace revision on Socrata and publish\n logger.info(f\"Updating dataset {dataset_id} on Socrata: {data_file_path}\")\n view: View = self.client.views.lookup(dataset_id)\n revision: Revision = view.revisions.create_replace_revision(\n metadata={\"description\": description}, permission=SOCRATA_DATASET_PERMISSION\n )\n upload: Source = revision.create_upload(data_file_path.name)\n source: Source = upload.df(dataframe)\n source.wait_for_finish()\n output_schema: OutputSchema = source.get_latest_input_schema().get_latest_output_schema()\n output_schema.wait_for_finish()\n revision.apply(output_schema=output_schema)\n logger.info(f\"Updated dataset: {dataset_id}\")\n return dataset_id",
"def _get_update_data_set_url(self, data_set_id):\n return self.api_url+'/data_sets/'+str(data_set_id)+'/update'",
"def download_dataset(self):\n data_dir = self.get_download_data_dir()\n cache_dir = self.get_download_cache_dir()\n self.download_dataset_files(data_dir, cache_dir)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify if server contains the file
|
def verify_if_server_contains_file(self, section="cs_url", path=""):
server = TESTDATA[section][u'server_address']
command = 'ssh {} [ -f {} ] && echo "Found" || echo "Not found"'.format(server, path)
if self.run_bash_command(command, True).strip() != "Found":
self.fail("File not found!")
|
[
"def exist_remote_file(target, path):\n cmd = 'test -f %s' % path\n res = run_ssh(target, cmd)\n if res == 0:\n return True\n return False",
"def file_exists(self, resource: GenomicResource, filename: str) -> bool:",
"def file_exists(self, path):\n path = path.strip('/')\n file_collection = self._get_fs_instance().list()\n if path == '':\n return False\n if path in file_collection:\n return True\n return False",
"def _check_local_exists(direct_path, tail):\n file_name = os.path.join(direct_path, tail)\n if os.path.exists(file_name):\n logging.info(\"%s existed\", tail)\n return True\n return False",
"def check_for_local_file(self, filename=None):\n files = glob.glob(filename)\n return bool(files)",
"def file_exist(file):\n\n if path.isfile(file):\n return True\n else:\n return False",
"def verify_source_exists(origin_file):\n\n return os.path.exists(origin_file)",
"def isavailable(file): # pragma: no cover\n return file",
"def file_exists(self) -> bool:\n return self._path.exists()",
"def check(self):\n exploitable = True\n try:\n # Create file o\n cmd = \"echo 1>o\"\n cmd = cmd.replace(\" \", \"${IFS}\") \n request = \"http://%s:%s/language/Swedish${IFS}&&\" % (self.target, str(self.port))\n request += cmd + \"&&tar${IFS}/string.js\"\n # Send cmd to server\n self.raw_url_request(request)\n # Next create symlink to WebSites dir\n cmd = \"ln o WebSites/o\"\n cmd = cmd.replace(\" \", \"${IFS}\") # Trick to use whitespaces\n cmd = cmd.replace(\"/\", \"${HOME}\") # Trick to use slash\n request = \"http://%s:%s/language/Swedish${IFS}&&\" % (self.target, str(self.port))\n request += cmd + \"&&tar${IFS}/string.js\"\n self.raw_url_request(request)\n # Check if file was correctly created\n response = self.raw_url_request(\"http://%s:%s/o\" % (self.target, str(self.port)))\n if response is None:\n exploitable = False\n elif response.text == \"\" or (response.text)[0] != '1': \n print_error(\"Expected response content first char to be '1' got %s. \" % response.text)\n exploitable = False\n \n except (ConnectionError, Timeout, timeout) as e:\n print_error(\"Unable to connect. reason: %s.\" % e.message)\n exploitable = False\n \n if exploitable:\n print_success(\"Exploitable!\")\n else:\n print_error(\"Not Exploitable.\")\n return(exploitable)",
"def sExists(self, filename):\r\n return self.__msg.sExists([self.__dir, filename])",
"def verify_file_on_transport(self,\n source_file,\n destination_path,\n port=0,\n method=VERIFY_METHOD_MD5SUM):",
"def is_file_suitable(file_path: str) -> bool:\n pass",
"def _credfile_exists(self):\n return os.path.exists(self.credfile_loc)",
"def execute_verification(self):\n client_socket = self.open_connection_with_the_cloud()\n client_socket.send(SEPARATOR.join([MY_IP, VERIFICATION, NONE]))\n directory_name = os.path.join(os.path.dirname(os.path.abspath(__file__)), FOLDER_NAME)\n for exists_file in os.listdir(directory_name):\n client_socket.send(exists_file)\n with open(os.path.join(directory_name, exists_file), READING) as file_handler:\n data = file_handler.read()\n if client_socket.recv(BUFFER) == EXISTS:\n if exists_file.endswith(tuple(MEDIA_EXTS)) or exists_file.endswith(tuple(PHOTO_EXTS)):\n client_socket.send(str(len(data)))\n to_send = SEND\n else:\n client_socket.send(str(data))\n time.sleep(0.3)\n to_send = NOT_SEND\n if client_socket.recv(BUFFER) != EXISTS and to_send == SEND:\n client_socket.send(data)\n time.sleep(0.3)\n else:\n client_socket.send(data)\n time.sleep(0.3)\n\n client_socket.send(DONE_VERIFICATION)",
"def is_response_file(param):\n # type: (str) -> bool\n return param[0] == '@' and os.path.isfile(param[1:])",
"async def pidfile_check(self):\n return self.pidfile.is_file()",
"def is_remote(path: str) -> bool:\n return split(path)[0] == \"cloud\"",
"def test_file_contents(self):\n self.assertTrue(valet.view(self.test_file)\n .find(\"file contents present\") >= 0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find exceptions from logs and save them
|
def find_exception_from_logs_and_save(self, start_time, stop_time, name_prefix="", copy_location=""):
self.run_folder = get_config_value("reporting_folder_run")
self.report_folder = get_config_value("reporting_folder")
error_log_file = open(self.report_folder + os.sep + "error_logs.txt", "w")
error_log_file.write("\nLOG START TIME: " + start_time + "\n")
has_error = False
for log_file in strings.ss_all_logs:
log_file_name = log_file.split("/")[-1]
try:
log_content = get_file_content(os.path.join(self.run_folder, log_file_name))
except:
continue
for line in log_content:
if "] ERROR" in line.upper():
has_error = True
print(log_file_name + ": " + line)
error_log_file.write(log_file_name + ": " + line)
elif ".EXCEPTION" in line.upper():
has_error = True
error_log_file.write(log_file_name + ": " + line)
elif "HTTPERROR" in line.upper():
has_error = True
error_log_file.write(log_file_name + ": " + line)
error_log_file.write("\nLOG STOP TIME: " + stop_time)
error_log_file.close()
if has_error:
self.warning("Error log has errors")
for log_file in strings.ss_all_logs:
log_file_name = log_file.split("/")[-1]
copy_location = copy_location.split("error_logs.txt")[0]
print(copy_location)
try:
if not os.path.exists(copy_location):
os.makedirs(copy_location)
command = "sudo cp " + self.run_folder + os.sep + log_file_name + " " + copy_location + name_prefix + "_" + log_file_name
self.run_bash_command(command, False)
except AssertionError:
self.warning("Could not copy file " + log_file_name)
return has_error
|
[
"def _extract_exceptions_from_logs(start_time, end_time, module_versions):\n if start_time and end_time and start_time >= end_time:\n raise webob.exc.HTTPBadRequest(\n 'Invalid range, start_time must be before end_time.')\n try:\n for entry in logservice.fetch(\n start_time=start_time or None,\n end_time=end_time or None,\n minimum_log_level=logservice.LOG_LEVEL_ERROR,\n include_incomplete=True,\n include_app_logs=True,\n module_versions=module_versions):\n # Merge all error messages. The main reason to do this is that sometimes\n # a single logging.error() 'Traceback' is split on each line as an\n # individual log_line entry.\n msgs = []\n log_time = None\n for log_line in entry.app_logs:\n # TODO(maruel): Specifically handle:\n # 'Request was aborted after waiting too long to attempt to service your\n # request.'\n # For an unknown reason, it is logged at level info (!?)\n if log_line.level < logservice.LOG_LEVEL_ERROR:\n continue\n msg = log_line.message.strip('\\n')\n if not msg.strip():\n continue\n # The message here is assumed to be utf-8 encoded but that is not\n # guaranteed. The dashboard does prints out utf-8 log entries properly.\n try:\n msg = msg.decode('utf-8')\n except UnicodeDecodeError:\n msg = msg.decode('ascii', 'replace')\n msgs.append(msg)\n log_time = log_time or log_line.time\n\n message = '\\n'.join(msgs)\n # Creates a unique signature string based on the message.\n signature, exception_type = _signature_from_message(message)\n if exception_type:\n yield _ErrorRecord(\n entry.request_id,\n entry.start_time, log_time, entry.latency, entry.mcycles,\n entry.ip, entry.nickname, entry.referrer, entry.user_agent,\n entry.host, entry.resource, entry.method, entry.task_queue_name,\n entry.was_loading_request, entry.version_id, entry.module_id,\n entry.url_map_entry, entry.app_engine_release, entry.instance_key,\n entry.status, message, signature, exception_type)\n except logservice.Error as e:\n # It's not worth generating an error log when logservice is temporarily\n # down. Retrying is not worth either.\n logging.warning('Failed to scrape log:\\n%s', e)",
"def upsert_exception(self, exception_log):\n self._repo.upsert_exception(exception_log)",
"def backup_failed_log(self, d):\n logs = [x for x in os.listdir(d) if x.endswith('.log')]\n for lg in logs:\n shutil.copy(os.path.join(d, lg), self.d_failed_nicad_logs)",
"def write_scan_error(msg):\n with open(os.path.join('persist', 'scan_err.log'), 'a') as scan_errors_file:\n scan_errors_file.write(msg + '\\n')",
"def handle_exception() -> None:\n\n config = read_config()\n log = open_log_file(config)\n print_exc(file=log)\n log.close()\n print('Si è verificato un errore. Ho creato un file chiamato \\'log\\' nella cartella Download. Inviamelo')\n print('Premi ENTER per terminare...')\n input()\n exit()",
"def save_logs(self, log_count):\n with open(self.FILE_PATH) as log_file:\n with tqdm(total=log_count, desc='save to database', ) as pbar:\n record_list = []\n record_count = 1\n for line in log_file:\n\n record = self.parse_line(line)\n if record is None:\n continue\n record_list.append(LogItem(\n ip=record['ip'],\n datetime=self.parse_date(record['date']),\n method=record['method'],\n uri=record['uri'],\n status_code=record['status'],\n body_size=record['body_size'],\n user_agent=record['agent']\n ))\n\n if record_count == self.MASS_SAVE_COUNT:\n LogItem.objects.bulk_create(record_list)\n record_list = []\n record_count = 0\n pbar.update(1)\n record_count += 1",
"def write_exception(exc):\n\n desktop_folder = u'%s/Desktop' % (expanduser(u'~%s' % CONFIG[u'desktop_user']))\n msg = u'Command line arguments: %s\\n' % u' '.join(sys.argv)\n msg += format_exc()\n\n # Create new file for each error\n i = 1\n fname = u'%s/BALANCE-HISTORY-FAILED.txt' % desktop_folder\n while isfile(fname):\n fname = u'%s/BALANCE-HISTORY-FAILED-%d.txt' % (desktop_folder, i)\n i += 1\n\n with open(fname, u'w') as f:\n f.write(msg)",
"def check_exceptions(self):\n if self.exc_counter:\n lines = self._lines\n self._lines = []\n exc_counter = self.exc_counter\n self.exc_counter = 0\n last_exc = self.last_exc\n self.last_exc = 0\n\n self._logger.critical(\"The following unhandled exceptions where raised during this test's execution:\")\n for line in lines:\n self._logger.critical(line)\n\n raise Exception(\"Test raised %d unhandled exceptions, last one was: %s\" % (exc_counter, last_exc))",
"def _log_errors(errors):\n # NOTE: DataCiteError is a tuple with the errors on the first\n errors = json.loads(errors.args[0])[\"errors\"]\n for error in errors:\n field = error[\"source\"]\n reason = error[\"title\"]\n logging.warning(f\"Error in {field}: {reason}\")",
"def load_failures(self):\n\n for pkgspec in self.logdict:\n logpath = self.logdict[pkgspec]\n try:\n with open(get_kpr_path(logpath), 'r') as kp:\n for line in kp:\n (where, problem) = self.parse_kpr_line(line)\n self.failures.append(make_failure(where, problem, pkgspec))\n except IOError:\n logging.error(\"Error processing %s\" % get_kpr_path(logpath))",
"def _parse_logs(self):\n BuildError = self.env['runbot.build.error']\n # only parse logs from builds in error and not already scanned\n builds_to_scan = self.search([('id', 'in', self.ids), ('local_result', 'in', ('ko', 'killed', 'warn')), ('build_error_ids', '=', False)])\n ir_logs = self.env['ir.logging'].search([('level', 'in', ('ERROR', 'WARNING', 'CRITICAL')), ('type', '=', 'server'), ('build_id', 'in', builds_to_scan.ids)])\n return BuildError._parse_logs(ir_logs)",
"def saveLogResults(self):\n try:\n # print(csvReportFolder)\n logPicklePath = os.path.join(self.getCurrentCsvReportFolder(), 'LogResults.pkl')\n with open(logPicklePath, 'wb') as f:\n pickle.dump(self.logDict, f)\n except:\n print(traceback.format_exc())",
"def test_exception_in_extra():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n try:\n pass\n except Exception as e:\n logging.exception('Exception occurred: {exc}', extra=dict(exc=e))\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, has_length(1))\n assert_that(visitor.violations[0][1], is_(equal_to(EXCEPTION_VIOLATION)))",
"def save_all_logs(self):\n\n # First get all unique properties\n # Obtain information on simulations\n simulation_dict = {\"SimulationID\": [], \"SimulationName\": []}\n self.get_unique_properties(\"simulations\", simulation_dict)\n\n # Obtain information on activities\n activity_dict = {\n \"ActivityID\": [],\n \"ActivityName\": [],\n \"EquipmentID\": [],\n \"ActivityFunction\": [],\n }\n self.get_unique_properties(\"activities\", activity_dict)\n\n # Obtain information on equipment\n equipment_dict = {\"EquipmentID\": [], \"EquipmentName\": []}\n self.get_unique_properties(\"equipment\", equipment_dict)\n\n # Obtain information on locations\n location_dict = {\n \"LocationID\": [],\n \"LocationName\": [],\n \"Longitude\": [],\n \"Latitude\": [],\n }\n self.get_unique_properties(\"location\", location_dict)\n\n # Obtain information on events\n event_dict = {\"EventID\": [], \"EventName\": []}\n self.get_unique_properties(\"events\", event_dict)\n\n # Continue with obtaining the logs, energy use and dredging spill\n self.get_equipment_log()\n self.get_energy()\n self.get_spill()\n self.get_results()\n\n # Save all as csv files\n self.generic_results.to_csv(self.location + \"generic_results.csv\", index=False)\n self.dredging_spill.to_csv(self.location + \"dredging_spill.csv\", index=False)\n self.energy_use.to_csv(self.location + \"energy_use.csv\", index=False)\n self.equipment_log.to_csv(self.location + \"equipment_log.csv\", index=False)\n self.unique_events.to_csv(self.location + \"events.csv\", index=False)\n self.unique_activities.to_csv(self.location + \"activities.csv\", index=False)\n self.unique_equipment.to_csv(self.location + \"equipment.csv\", index=False)\n self.unique_locations.to_csv(self.location + \"locations.csv\", index=False)\n self.unique_simulations.to_csv(self.location + \"simulations.csv\", index=False)",
"def search_log(logFileLoc):\n problem_messages = []\n\n try:\n log = logFile(logFileLoc)\n log.open()\n except FileNotFoundError as e:\n print('Could not find file: {}'.format(e))\n return []\n\n scanner.input_file = logFileLoc\n problem_messages = scanner.search_log()\n\n return problem_messages",
"def _log_exception():\n exc = traceback.format_exception(*sys.exc_info())\n rospy.logerr(\"\".join(exc))",
"def _log_exception(\n self, error: str, extra: Optional[dict] = None, save_to_data_object: bool = True\n ):\n if extra is None:\n extra = dict()\n extra.update({\"data_id\": self.data.id})\n logger.exception(error, extra=extra)\n if save_to_data_object:\n self._save_error(error)",
"def logging_logfile_recovery(issue: FileNotFoundError) -> None:\n print(\n f\"There was an error locating the log file or directory: {issue}\", file=sys.stderr,\n )\n try:\n os.mkdir(\"logs\")\n except FileExistsError as inst:\n raise ShanghaiError(error=f\"Unexpected exception, contact maintainer - {inst}\")\n else:\n print(\"Log directory was absent, created and continuing\", file=sys.stderr)",
"def scan_logs(self):\n self.log.debug('Scan log files')\n pattern = self.cfg['remote'][self.url_key + '.log.pattern']\n log_dir = self.cfg['remote'][self.url_key + '.log.dir']\n log_files = self.cfg['remote'][self.url_key + '.log.files']\n if pattern is None or log_dir is None or log_files is None:\n self.log.info('No log file scan configured or incomplete')\n self.__add_to_ssh_message('No log file scan configured or incomplete')\n return\n cmd = 'grep -i \"{0}\" {1}/{2}'.format(pattern, log_dir, log_files)\n grep = self.__ssh_command(cmd)\n self.record['logs'] = grep"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify audit log file in server.
|
def verify_audit_log(self, section=u'ss1_url', event="Log in user"):
# Sleep waiting log
sleep(1)
server_log_address = TESTDATA[section][u'server_address']
user = TESTDATA[section][u'j_username']
log_output = self.read_server_file(server_log_address, strings.audit_log)
# Prints full log string
newest_log_string = self.parse_log_file_tail(log_output, row_count=1)
print(newest_log_string)
newest_log = newest_log_string[newest_log_string.find("{"):]
print(newest_log)
if not newest_log:
self.fail(errors.audit_log_is_empty)
newest_log = eval(newest_log, {"null":None})
print("Log event:", newest_log["event"])
print("Log user:", newest_log["user"])
if not isinstance(newest_log, dict):
self.fail(errors.string_is_not_dict + "\n" + self.parse_log_file_tail(log_output))
if not event == newest_log["event"]:
log_file_tail = self.parse_log_file_tail(log_output)
if event in log_file_tail:
self.warning("newest log event is not {}, but found in file\n".format(newest_log["event"]) + log_file_tail)
else:
self.fail(errors.log_event_fail(event) + "\n" + log_file_tail)
if not user == newest_log["user"]:
log_file_tail = self.parse_log_file_tail(log_output)
if event in log_file_tail:
self.warning("newest log user is not {}, but found in file\n".format(newest_log["user"]) + log_file_tail)
else:
self.fail(errors.log_user_fail(user) + "\n" + log_file_tail)
|
[
"def CheckLog(file): \n lines_list = open(file).read().splitlines()\n rejects = []\n\n for l in lines_list:\n record = LogRecord({ 'record' : l})\n if record.action == \"REJECT\":\n rejects.append(l)\n return rejects",
"def run_audit(audits=None, plateform=None):\n from utils.plateform import update_plateform\n for audit in audits:\n import time\n start_time = time.time()\n audit.generate_scripts()\n print_step('Auditing %s with profile %s' % (audit.host.name, audit.profile.name))\n print_step('Initiate SSH Session on %s (%s@%s)\\n' % (audit.host.name, audit.host.account, audit.host.ip))\n\n audit.host = configure_host(audit.host, audit.config_script_path)\n update_plateform(plateform) # Updating yml file with new account and status values\n\n remove_generated_scripts(audit.config_script_path)\n\n if not audit.host.distribution:\n audit.host = get_distribution_info(audit.host)\n\n if not audit.host.kernel_version:\n audit.host = get_kernel_info(audit.host)\n update_plateform(plateform)\n\n host = deploy_audit_script(audit.host, audit.audit_script_path)\n\n if host: # if host need to be reconfigured, update account and status and exit\n audit.host = host\n update_plateform(plateform)\n exit(1)\n\n if audit.scripts_needed:\n deploy_script_needed(audit.host, audit.scripts_needed)\n\n execute_audit_script(audit.host, audit.audit_script_path)\n remove_generated_scripts(audit.audit_script_path)\n update_plateform(plateform)\n\n get_results(audit.host, audit.pickle_name)\n\n remove_script_needed(audit.host)\n from core.report import Report\n audit.report = Report(data=load_report(audit.pickle_name))\n print_step('Generating html report from audit results\\n')\n audit.generate_report()\n print_step('HTML report successfully generated -> %s\\n' % audit.report_path)\n print_step('Auditing %s with profile %s successfully finished in %d seconds\\n' % (\n audit.host.name, audit.profile.name, int(time.time() - start_time)))\n return True",
"def log_file_stat(self, file):\n if os.path.exists(file):\n stat = os.stat(file)\n size = os.path.getsize(file)\n message = '{0} stat is {1}, size is {2}'\n self.logger.info(message.format(file, stat.st_mtime, size))",
"def watchdog(self, bot):\n logging.debug(u\"Watchdog is checking audit status\")\n label = 'audit.switch.{}'.format(bot.id)\n if self.engine.get(label, 'off') == 'off':\n logging.debug(u\"- restarting audit\")\n self.audit_on(bot)",
"def test_simple_audit(capsys, monkeypatch):\n def mockreturn(path):\n return '9c3bb3efa8095f36aafd9bf3a698efe439505021'\n monkeypatch.setattr(toolaudit.readers, 'sha1_file', mockreturn)\n app = toolaudit.application.ToolauditApp()\n try:\n app.run(kitlist_file='test/example.yaml')\n except SystemExit:\n pass\n out, err = capsys.readouterr()\n returned_yaml = yaml.load(out)\n assert returned_yaml['tools'][0]['checksum'] == '9c3bb3efa8095f36aafd9bf3a698efe439505021'",
"def checkForTodaysLogfile(index):\n today = datetime.datetime.now().strftime(\"%Y.%m.%d\")\n if index.find(today) > -1:\n print(\"Warning: you are trying to transform todays logfile.\"\n \"This may corrupt the database since new data may come in. Transform today's data\"\n \" tomorrow\", file=sys.stderr)\n return False\n else:\n return True",
"def test_upload_run_logs(self):\n pass",
"def run(self):\n if self.log_file: # if path of SSH-log file is valid\n # Rotate & parse the log file\n self.parse_log_file()\n # Analyze the log for brute-force\n self.check_ssh_bruteforce()\n # Empty the dict to rotate the log-file\n self.username_dict.clear()",
"def check_log(logfilename, errors, warnings, requirements):\n\n # check for errors\n if grep_file(logfilename, errors):\n return False\n\n # check for warnings\n grep_file(logfilename, warnings)\n\n # check for requirements\n if requirements != None:\n if not grep_file(logfilename, requirements, verbose=False):\n print \"Requirements not found in logfile %s: \" % ( logfilename )\n return False\n\n # inform user\n return True",
"def verify(self, verifier: verify_mod.SnapshotVerifier) -> None:\n with self.edenfs() as eden:\n eden.start()\n print(\"Verifing snapshot data:\")\n print(\"=\" * 60)\n self.verify_snapshot_data(verifier, eden)\n print(\"=\" * 60)",
"def test_file_handler(self):\n\n # Initialize logger in a test directory\n args = {\"stream_level\": logging.CRITICAL,\n \"log_dir\": \"/var/log/bgp_mrt_test\"}\n self.logger = self._initialize_logger(args=args)\n log_me = \"hello\"\n self.logger.error(log_me)\n self.assertTrue(log_me in self._read_file())\n logging.shutdown()\n self.assertTrue(os.path.exists(self.file_path))\n # Had to comment this line out because of the server permissions\n # shutil.rmtree(args.get(\"log_dir\"))",
"def test_log_file(self):\n if self.log_file is not None and not os.access(self.log_file, os.W_OK):\n msg = 'Cannot open log file {} for writing.'.format(self.log_file)\n raise ConfigException(msg)",
"def host_audit(self, host):\n if self._sw_update is not None:\n self._sw_update.handle_event(\n strategy.STRATEGY_EVENT.HOST_AUDIT, host)",
"def audit(self, files_to_audit, db, audit_type=\"rom\"):\n system, roms = self.read_system_xml(db)\n have = os.listdir(files_to_audit)\n for fname in have:\n for rom in roms:\n if rom[\"name\"] == os.path.splitext(fname)[0]:\n rom[audit_type] = True\n\n return roms",
"def check_failures(log_file_name, number_lines, mail_creds_file):\n try:\n last_n_status = os.popen(\"tail -{} {}\".format(number_lines, log_file_name)).read()\n last_n_status = last_n_status.split(\"\\n\")[:number_lines]\n last_n_status = list({i.split(\": \")[1] for i in last_n_status})\n\n if \"SUCCESSFUL\" not in last_n_status:\n send_failure_alert_mail(mail_creds_file)\n except Exception as error:\n print(error)\n system_exit_error(\"CheckFailureError\")",
"def test_personality_file_created_on_rebuild(self):\n\n remote_client = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config, password=self.password,\n key=self.key.private_key)\n self.assertTrue(remote_client.is_file_present('/rebuild.txt'))\n self.assertEqual(\n remote_client.get_file_details('/rebuild.txt').content,\n self.file_contents)",
"def test_filelog():\n\tlgr = simplelog.make_logger (handler='test/out/test.log')\n\tlgr.info (\"what?\")",
"def log_check(url, flag=\"Dropped intermediate tables.\", delay=15, log=LOGGER, line=0):\n log.debug(\"Begin log check at line %i.\", line)\n complete = False\n count = 0\n\n while not complete:\n server_log = urllib2.urlopen(url).read()\n server_log = server_log.strip().split(\"\\n\")\n\n if (len(server_log) > 0) and \\\n (server_log[-1] != \"\") and \\\n (server_log[-1][-1] != \".\"):\n server_log.pop(-1)\n\n if (len(server_log[line + count:]) > 0) and \\\n (server_log[line + count:] != [\"\"]):\n for entry in server_log[line + count:]:\n log.debug(\"Parsing log entry: %s.\", repr(entry))\n msg_type, msg = entry.split(\",\")[1:]\n relogger(log, msg_type, msg)\n count = count + 1\n if msg.strip() == flag.strip():\n log.debug(\"Detected log check exit message.\")\n complete = True\n if len(server_log[line + count:]) > 0:\n log.warn(\"Unwritten messages on the remote log.\")\n break\n\n else:\n log.info(\"Please wait.\")\n time.sleep(delay)\n\n line = line + count\n log.debug(\"End log check at line %i.\", line)\n\n return line",
"def test_get_audit_logs_using_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate log probabilities of a batch of observations & actions Calculates log probabilities using previous step's model parameters and new parameters being trained.
|
def _logprob(self):
logp = -0.5 * tf.reduce_sum(self.log_vars)
logp += -0.5 * tf.reduce_sum(tf.square(self.old_actions - self.means) /
tf.exp(self.log_vars), axis=-1)
self.logp = logp
logp_old = -0.5 * tf.reduce_sum(self.log_vars)
logp_old += -0.5 * tf.reduce_sum(tf.square(self.old_actions - self.old_means) /
tf.exp(self.log_vars), axis=-1)
self.logp_old = logp_old
|
[
"def logprob(self, action_sample, policy_params):\n return self.action_head.logprob(action_sample, policy_params)",
"def posteriorLikelihood(self, step):",
"def sample(self):\n observations, rewards, actions, logprobs, dones, values = [], [], [], [], [], []\n done = False\n for step in range(self.config['steps_per_rollout']):\n value, action, logprob, mean = self.net.forward(to_variable(self.obs[np.newaxis], self.config['cuda']))\n action = action.cpu().data.numpy()[0] if self.config['cuda'] else action.data.numpy()[0]\n next_obs, reward, done, _ = self.env.step(action)\n\n if self.config['cuda']:\n # torch has an additional dimension for batch size, so we need to select that batch\n value, logprob, mean = value.data.cpu().numpy()[0], logprob.data.cpu().numpy()[0], \\\n mean.data.cpu().numpy()[0]\n else:\n value, logprob, mean = value.data.numpy()[0], logprob.data.numpy()[0], \\\n mean.data.numpy()[0]\n\n observations.append(self.obs)\n actions.append(action)\n rewards.append(reward)\n logprobs.append(logprob)\n values.append(value)\n dones.append(done)\n\n self.obs = next_obs\n\n if done:\n # reset the environment\n self.obs = self.env.reset()\n\n if done:\n last_value = 0.0\n else:\n # bootstrap, we only need the last value to do this\n value, action, logprob, mean = self.net.forward(to_variable(self.obs[np.newaxis], self.config['cuda']))\n\n if self.config['cuda']:\n # torch has an additional dimension for batch size, so we need to select that batch\n value, = value.data.cpu().numpy()[0]\n else:\n value, = value.data.numpy()[0]\n last_value = value\n\n # same as ppo_single/model/ppo.py\n observations = np.asarray(observations)\n rewards = np.asarray(rewards)\n logprobs = np.asarray(logprobs)\n dones = np.asarray(dones)\n values = np.asarray(values)\n actions = np.asarray(actions)\n returns = calculate_returns(rewards, dones, last_value, self.config['gamma'])\n return SampleBatch({'observations': observations,\n 'rewards': rewards,\n 'logprobs': logprobs,\n 'dones': dones,\n 'values': values,\n 'actions': actions,\n 'returns': returns[:-1]})",
"def generate_probs_from_log_action_probs(self):\n total_prob = self.locals().log_action_probs + self.locals().log_priors\n assert total_prob.max() > -1e4, \"We need at least one allowed action!\"\n tmp = np.exp(total_prob - total_prob.max())\n self.locals().probs = tmp / tmp.sum()\n assert not np.isnan(self.locals().probs.sum())",
"def forward(log_emlik, log_startprob, log_transmat):",
"def log_parameters_prob(self) -> torch.Tensor:\n return Dirichlet(self.prior).log_prob(self.probs)",
"def priorLikelihood(self, step):",
"def update_loglike(self, iteration):\n # todo: implement log-like\n # Hint: use scipy.special.gammaln (imported as gammaln) for log(gamma)\n\n #theta prior and phi prior\n theta_prior = self.n_docs * (gammaln(self.n_topics * self.alpha) - self.n_docs * self.n_topics * gammaln(self.alpha)) #gammaln is the log of gamma function\n phi_prior = self.n_topics * (gammaln(self.n_words * self.beta) - self.n_topics * self.n_words * gammaln(self.beta))\n\n # p(z_id) and p(x_id)\n p_zid = np.sum(np.multiply((self.A_dk + self.alpha -1) , np.log(self.theta)))\n #print('p_zid',self.p_zid)\n p_xid = np.sum(np.multiply((self.B_kw + self.beta -1) , np.log(self.phi)))\n # print(\"p_xid\",self.p_xid)\n self.loglike[iteration+1] = theta_prior + phi_prior + p_zid + p_xid #plus one since the first one is the initial loglike\n #print(\"loglikelihood is \", self.loglike)\n '''\n ll = 0\n ll += self.n_topics * gammaln(self.n_words * self.beta)\n ll += - self.n_words * self.n_topics * gammaln(self.beta)\n ll += self.n_docs * gammaln(self.n_topics * self.alpha)\n ll += - self.n_topics * self.n_docs * gammaln(self.alpha)\n ll += ((self.A_dk + self.alpha -1 ) * np.log(self.theta)).sum()\n ll += ((self.B_kw + self.beta - 1) * np.log(self.phi)).sum()\n self.loglike[iteration + 1] = ll\n '''\n\n pass",
"def log_diagnostics(self, iteration, batch):\n\n feeds = {self._observations_ph: batch['observations']}\n sess = tf_utils.get_default_session()\n probs = sess.run(self.distribution.p_all, feeds)\n\n logger.record_tabular('policy-prob-sum', np.mean([np.mean(np.sum(prob,1)) for prob in probs]))",
"def log_parameters_prob(self) -> torch.Tensor:\n ll = Gamma(self.prec_alpha_prior,\n self.prec_beta_prior).log_prob(self.precs.abs()).sum()\n ll += MultivariateNormal(\n loc=self.means_prior,\n precision_matrix=(\n self.n0 * self.precs.abs()).diag()).log_prob(self.means)\n return ll",
"def log_prob(self, ts):\n self.k_inv = np.linalg.inv(self.k)\n self.k_det = np.linalg.det(self.k)\n\n # calculate predictions at each time point\n predictors = self.munge(ts, order=self.order)\n predictions = self.a_full.dot(predictors.T)\n truths = ts[self.order:, :].T\n\n log_probs = self.log_prob_mvn(truths, means=predictions, cov_inv=self.k_inv, cov_det=self.k_det)\n return log_probs.sum()",
"def update_ppo(\r\n self,\r\n states: torch.Tensor,\r\n actions: torch.Tensor,\r\n rewards: torch.Tensor,\r\n dones: torch.Tensor,\r\n log_pis: torch.Tensor,\r\n next_states: torch.Tensor,\r\n writer: SummaryWriter\r\n ):\r\n with torch.no_grad():\r\n values = self.critic(states)\r\n next_values = self.critic(next_states)\r\n\r\n targets, gaes = calculate_gae(\r\n values, rewards, dones, next_values, self.gamma, self.lambd)\r\n\r\n for _ in range(self.epoch_ppo):\r\n self.learning_steps_ppo += 1\r\n self.update_critic(states, targets, writer)\r\n self.update_actor(states, actions, log_pis, gaes, writer)",
"def log_prob(self, inputs, context=None):\n\n # Get necessary quantities.\n logits, means, precisions, sumlogdiag, _ = self.get_mixture_components(context)\n\n batch_size, n_mixtures, output_dim = means.size()\n inputs = inputs.view(-1, 1, output_dim)\n\n # Split up evaluation into parts.\n a = logits - torch.logsumexp(logits, dim=-1, keepdim=True)\n b = -(output_dim / 2.0) * np.log(2 * np.pi)\n c = sumlogdiag\n d1 = (inputs.expand_as(means) - means).view(\n batch_size, n_mixtures, output_dim, 1\n )\n d2 = torch.matmul(precisions, d1)\n d = -0.5 * torch.matmul(torch.transpose(d1, 2, 3), d2).view(\n batch_size, n_mixtures\n )\n\n return torch.logsumexp(a + b + c + d, dim=-1)",
"def hook_post_train(self) -> None:\n self.logger.info(f\"In total, training {self.state.current_epoch} epochs took \"\n f\"{self.state.time_total:.3f}s ({self.state.time_total - self.state.time_val:.3f}s \"\n f\"train / {self.state.time_val:.3f}s val)\")",
"def likelihoods(self, step):",
"def on_train_end(self, *args, **kwargs):\n self._model.set_weights(self._best_weights)\n x, y = self._train\n train_res = self._model.evaluate(x=x, y=y)\n for name, value in zip(self._model.metrics_names, train_res):\n mlflow.log_metric(f\"train_{name}\", value)\n x, y = self._valid\n valid_res = self._model.evaluate(x=x, y=y)\n for name, value in zip(self._model.metrics_names, valid_res):\n mlflow.log_metric(f\"valid_{name}\", value)\n signature = infer_signature(x, y)\n log_model(keras_model=self._model, signature=signature, **self._pyfunc_params)",
"def train(self, observations, actions, rewards_list, next_observations, terminals):\n # step 1: calculate q values of each (s_t, a_t) point, using rewards (r_0, ..., r_t, ..., r_T)\n q_values = self.calculate_q_vals(rewards_list)\n\n # step 2: calculate advantages that correspond to each (s_t, a_t) point\n advantages = self.estimate_advantage(observations, q_values)\n\n # DONE: step 3: use all datapoints (s_t, a_t, q_t, adv_t) to update the PG actor/policy\n # HINT: `train_log` should be returned by your actor update method\n train_log = self.actor.update(observations, actions, advantages, q_values)\n\n return train_log",
"def log(self):\n\t\tnp.save(os.path.join(self.experiment_dir, 'train_eval_iters.npy'), self.train_eval_iters)\n\t\tnp.save(os.path.join(self.experiment_dir, 'train_losses.npy'), self.train_losses)\n\t\tnp.save(os.path.join(self.experiment_dir, 'train_errors.npy'), self.train_errors)\n\t\tnp.save(os.path.join(self.experiment_dir, 'total_train_errors.npy'), self.total_train_errors)\n\t\tnp.save(os.path.join(self.experiment_dir, 'val_eval_iters.npy'), self.val_eval_iters)\n\t\tnp.save(os.path.join(self.experiment_dir, 'val_errors.npy'), self.val_errors)\n\t\tnp.save(os.path.join(self.experiment_dir, 'learning_rates.npy'), self.learning_rates)",
"def _update_predict_log(y_pred,y_proba,query,runtime):\n\n ## name the logfile using something that cycles with date (day, month, year) \n today = date.today()\n logfile = \"example-predict-{}-{}.log\".format(today.year, today.month)\n\n ## write the data to a csv file \n header = ['unique_id','timestamp','y_pred','y_proba','x_shape','model_version','runtime']\n write_header = False\n if not os.path.exists(logfile):\n write_header = True\n with open(logfile,'a') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|')\n if write_header:\n writer.writerow(header)\n\n to_write = map(str,[uuid.uuid4(),time.time(),y_pred,y_proba,query.shape,MODEL_VERSION,runtime])\n writer.writerow(to_write)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parses the itype argument and returns a set of strings with all the selected interaction types
|
def parse_itypes(itype_argument):
if "all" in itype_argument:
return ["sb", "pc", "ps", "ts", "vdw", "hb", "lhb", "hbbb", "hbsb",
"hbss", "wb", "wb2", "hls", "hlb", "lwb", "lwb2"]
return set(itype_argument.split(","))
|
[
"def get_input_types():\n return [\"normal\", \"string_all\", \"string_upper\", \"string_lower\",\n \"specials\", \"integer\", \"float\", \"version\", \"nothing\"]",
"def getTypeInfo():",
"def get_result_type_ids():\n try:\n from pydoas import _LIBDIR\n except:\n raise\n with open(join(_LIBDIR, join(\"data\", \"import_info.txt\"))) as f:\n types = []\n for line in f: \n spl = line.split(\":\", 1)\n if spl[0] == \"type\":\n tp = spl[1].split(\"#\")[0].strip()\n if len(tp) > 0:\n types.append(tp)\n return types",
"def arg_types(self) -> List[ast.Type]:",
"def _get_feature_types_for_featuretools(self, X):",
"def get_shapeof_metatypes() -> List[OperatorMetatype]:",
"def argtypes(self):\n if self.dimension is not None:\n result = []\n if \"in\" in self.direction:\n #The only complication here is that the 'known' dimensionality could actually\n #be a function like \"size\" that needs information about other variables.\n #If we choose to ignore known shapes, we lose the error checking for the passed\n #in variables from the python side.\n if self.direction == \"(inout)\" and \":\" not in self.dimension:\n wstr = \", writeable\"\n else:\n wstr = \"\"\n\n if \":\" in self.dimension or \"size\" in self.dimension:\n template = 'ndpointer(dtype={}, ndim={}, flags=\"F{}\")'\n result.append(template.format(self.pytype, self.D, wstr))\n else:\n template = 'ndpointer(dtype={}, ndim={}, shape=({}), flags=\"F{}\")'\n sdim = self.dimension + (\"\" if self.D > 1 else \",\")\n result.append(template.format(self.pytype, self.D, sdim, wstr))\n elif self.direction == \"(out)\":\n result.append(\"c_void_p\")\n\n if self.D > 0 and \":\" in self.dimension:\n result.extend([\"c_int_p\" for i in range(self.D)])\n if (self.direction == \"(inout)\" and \":\" in self.dimension and\n (\"allocatable\" in self.modifiers or \"pointer\" in self.modifiers)):\n result.append(\"c_void_p\")\n return result\n else:\n ctype = self.ctype\n if ctype is not None:\n return [\"{}_p\".format(ctype.lower())]",
"def interaction_types(self):\n raise NotImplementedError(\"Implement in your subclass!\")",
"def get_const_metatypes() -> List[OperatorMetatype]:",
"def get_string_match_types(self):\n raise errors.Unimplemented()",
"def data_type_attrs():\n return itertools.product([\"point\", \"cell\"], [\"scalars\", \"vectors\"])",
"def valid_types(self):\n types = re.sub(r'[ ]?,[ ]?', ',', self.node.content_types).split(',')\n return [t.lower() for t in types]",
"def optstr2types(ostr):\n\n typeconv = {None:'',int:'',float:''}\n typemap = {'s':None,'i':int,'f':float}\n opt_re = re.compile(r'([\\w]*)([^:=]*:?=?)([sif]?)')\n\n for w in ostr.split():\n oname,alias,otype = opt_re.match(w).groups()\n if otype == '' or alias == '!': # simple switches are integers too\n otype = 'i'\n typeconv[typemap[otype]] += oname + ' '\n return typeconv",
"def GetIntronType(sequence):\n\n for name, prime5, prime3 in param_intron_types:\n if sequence[:len(prime5)].upper() == prime5 and \\\n sequence[-len(prime3):].upper() == prime3:\n return name\n else:\n return \"unknown-\" + sequence[:5] + \"-\" + sequence[-5:]",
"def get_effect_types(types=True, groups=False):\n effect_types = [\n \"tRNA:ANTICODON\",\n \"splice-site\",\n \"frame-shift\",\n \"nonsense\",\n \"no-frame-shift-newStop\",\n \"noStart\",\n \"noEnd\",\n \"missense\",\n \"no-frame-shift\",\n \"CDS\",\n \"synonymous\",\n \"coding_unknown\",\n \"3'UTR\",\n \"5'UTR\",\n \"intron\",\n \"non-coding\",\n \"5'UTR-intron\",\n \"3'UTR-intron\",\n \"promoter\",\n \"non-coding-intron\",\n \"unknown\",\n \"intergenic\",\n \"no-mutation\",\n \"CNV-\",\n \"CNV+\",\n ]\n\n effect_groups = [\n \"LGDs\", \"LoF\", \"nonsynonymous\", \"coding\", \"introns\", \"UTRs\", \"CNVs\"]\n\n if types:\n if not groups:\n return effect_types\n result = list(effect_groups)\n result.extend(effect_types)\n return result\n if groups:\n return effect_groups\n return []",
"def atom_type_library(self):\n return list(set(self.atom_type))",
"def list_multiple_data_types():\n return [93, 77, 'fiftyfive', 54, 44, 31, 26, 20, 17, 3]",
"def get_quantizer_metatypes() -> List[OperatorMetatype]:",
"def getInputExtension(self):\n validExtensions = ('i', 'I')\n return validExtensions",
"def _get_cmds_of_type(self, state, types=None):\n if(state == 0):\n self.__current_command_list = []\n self.__logger.debug(\"_get_cmds_of_type\")\n if(not types): # Return commands of all types\n types = ['app', 'dev_all', 'dev_rx', 'dev_tx']\n for cmd in dir(self):\n fn = getattr(self, cmd)\n if(hasattr(fn, 'dev_type')):\n if(fn.dev_type in types):\n self.__current_command_list.append(cmd)\n return self.__current_command_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a traceplot from the contact frames and writes a figure to an image file.
|
def write_trace(contact_frames, labels, output_file):
assert len(contact_frames) == len(labels)
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
num_interactions = len(contact_frames)
num_frames = max(map(max, contact_frames)) + 1
f, axs = plt.subplots(num_interactions, sharex=True, sharey=True)
# Do actual plotting
for ax, contacts, label in zip(axs, contact_frames, labels):
contact_set = set(contacts)
x = range(num_frames)
y = [1 if c in contact_set else 0 for c in range(num_frames)]
ax.bar(x, y, width=1.0, linewidth=0, color="#76b8cb")
ax.set_yticks([])
ax.set_ylabel(label, rotation=0, va='center', ha='left')
ax.yaxis.set_label_coords(1.05, 0.5)
plt.xlim((-0.5, num_frames - 0.5))
plt.ylim((0, 1))
# for ax in axs:
# ax.get_yaxis().set_visible(False)
for ax in axs[:-1]:
ax.get_xaxis().set_visible(False)
plt.tight_layout()
f.subplots_adjust(hspace=0)
# plt.setp([a.get_xticklabels() for a in axs[:-1]], visible=False)
axs[-1].xaxis.set_major_locator(MaxNLocator(integer=True))
print("Writing trace-plot to", output_file)
f.savefig(output_file)
|
[
"def save_frame(frame_num, frame_path, frame_plot):\n # frame plot\n frame_plot()\n plt.savefig(frame_path + str(frame_num) + '.png')\n plt.close()",
"def PlotGeometry(self,plot_file):\n #for each beam, get the (x,y) coordinates and plot\n max_x,max_y = np.max(self.joints_arr[:,0]),np.max(self.joints_arr[:,1])\n min_x,min_y = np.min(self.joints_arr[:,0]),np.min(self.joints_arr[:,1])\n for i in np.arange(self.nBeams):\n joints = np.where(self.beams_arr[:,i] == 1)[0]\n x_coord = self.joints_arr[joints,0]\n y_coord = self.joints_arr[joints,1]\n plt.plot(x_coord,y_coord,'b-') #plot each beam one at a time\n plt.title(\"2D Truss Geometry\")\n plt.xlim((min_x-0.5,max_x+0.5))\n plt.ylim((min_y-0.5,max_y+0.5))\n plt.savefig(plot_file)",
"def write_frames_ascii(filename,trace,size,sx,sy,A,n,b):\n \n file = str(filename)\n f=open(file,'w') #opens in only writing mode first, to overwrite the file if it already exists\n f.close()\n f=open(file, 'a')\n \n t = trace[:,0] # the trace array contains the 2D coordinates and the time points\n for i in t:\n xc = trace[i,1]\n yc = trace[i,2]\n frame = gaussian_frame(xc,yc,size,sx,sy,A,n,b) #appends every new frame to the file\n np.savetxt(f,frame)\n f.close() \n \n #writes a metafile about the frames (as a dictionary), containing the size and the length:\n trace_length = len(trace)\n Info = {\"trace length\": trace_length, \"size frames\": size} \n writer = csv.writer(open(filename + 'Info', 'wb'))\n for key, value in Info.items():\n writer.writerow([key, value])",
"def render(self, ba_list):\n # initialized for storing figure labels with decoded hex values\n decoded_hex = list()\n\n print(\"Rendering Spatial Bitmaps:\")\n\n for steps in range(len(ba_list)):\n # encode bitarray into list of Spatial bits\n frame = self.encode(ba_list[steps])\n print(\"Encoded frame: \" + str(steps))\n # Add the new trace to the scatter\n tx = frame.x\n ty = frame.y\n tz = frame.z\n self.fig.add_trace(go.Scatter3d(visible=True, x=tx,y=ty,z=tz))\n\n # decode Frame object back into bitarray\n ba = self.decode(frame)\n # append decoded bitarray to decoded hex list for figure labelling\n decoded_hex.append(ba.tobytes().hex())\n print(\"Decoded frame: \" + str(steps))\n\n # clear arrays for next frame\n tx.clear()\n ty.clear()\n tz.clear()\n\n steps = []\n\n for i in range(len(self.fig.data)):\n step = dict(\n method=\"restyle\",\n args=[\"visible\", [False] * len(self.fig.data)],\n label=decoded_hex[i],\n )\n step[\"args\"][1][i] = True # Toggle i'th trace to \"visible\"\n steps.append(step)\n\n sliders = [dict(\n active=0,\n currentvalue={\"prefix\": \"Frame: \"},\n pad={\"t\": 50},\n steps=steps\n )]\n\n self.fig.update_layout(\n sliders=sliders,\n )\n\n self.fig.show()",
"def createplot(ntraces, npoints, fmin, fmax):\n global direction, f, axarr, x, y, l, lfit\n direction = [\"synch. osz.\", r\"$q_x$\", r\"$q_y$\"]\n color = ['g', 'r', 'b']\n f, axarr = plt.subplots(3, 1)#, sharex='col') # 2 plots in one\n plt.rcParams['font.size'] = 14\n plt.rcParams['savefig.format'] = 'pdf'\n plt.rcParams['mathtext.default'] = 'regular'\n plt.xlabel(\"f / kHz\" )\n plt.ion() \n f.suptitle(\"Beam Spectra\")\n x = [[fmin + n * (fmax - fmin) / npoints for n in range(2*npoints)], [fmin + n * (fmax - fmin) / npoints for n in range(npoints)], [fmin + n * (fmax - fmin) / npoints for n in range(npoints)]] # Fill x-data\n y = [[[0] * 2*npoints], [[0] * npoints], [[0] * npoints]] # Fill y-data for plots with zeros\n l = [[0] * (ntraces + 1), [0] * (ntraces + 1), [0] * (ntraces + 1)]\n lfit = [[0], [0], [0]]\n for j, ax in enumerate(axarr):\n for i in range(ntraces):\n y[j].append([0] * len(x[j]))\n l[j][i], = ax.plot(x[j], y[j][i], c = str( 0.1 + 0.1 * float(i)), ls = '-') # Append empty history spectra\n l[j][ntraces], = ax.plot(x[j], [0] * len(x[j]), '%s-' %color[j], lw = 2, label = \"current %s tune\" % direction[j]) # Last trace for empty, current sprectrum\n lfit[j], = ax.plot(x[j], [0] * len(x[j]), 'k--', lw = 2, label = \"Lorentzian\") # Add empty fit function \n ax.legend(fancybox=True, framealpha=0.5)\n ax.set_ylabel(\"Amplitude %s\" % direction[j])\n ax.ticklabel_format(style='sci', axis='x', scilimits=(-3, 3))\n ax.ticklabel_format(style='sci', axis='y', scilimits=(-3, 3))\n plt.show()\n #plt.tight_layout()\n return",
"def plotSaveROISignals(dataDir, saveFig = False, mode='Stimulus'):\n \n # Get parent dir\n t_series_dir = os.path.dirname(dataDir)\n\n # Get signal file if exists if not extract\n try:\n signalFile_path = os.path.join(dataDir, '*.csv')\n signalFile = (glob.glob(signalFile_path))[0] \n print('Signal file found...')\n except:\n print('Signal file not found proceeding with extraction from ROIs...')\n print('File: %s'% dataDir)\n (signalFile, chNames, usedChannel,\n roiKeys, usedRoiKey, usedExtLabel) = extractRawSignal(motCorrDir\n =dataDir)\n \n if mode == 'MarkPoints':\n xmlPath = os.path.join(t_series_dir, '*_MarkPoints.xml')\n xmlFile = (glob.glob(xmlPath))[0]\n \n \n # Read the file and organize the data frame for plotting\n # Data comes from the bg subtracted traces and tags comes from the csv\n # file which has the no bg subtracted traces. \n ROI_data = pd.read_csv(signalFile,sep='\\t',header=2,dtype='float')\n ROI_data = ROI_data.drop(['Unnamed: 0', 'tags'], axis=1) \n \n # Background subtraction by finding the 'bg' tag as background\n bg_data = ROI_data['bg']\n signal_data = ROI_data.subtract(bg_data, axis=0)\n signal_data = signal_data.drop(['bg'], axis=1) # Get rid of bg\n \n # dF/F by mean of traces\n mean_data = signal_data.mean(axis=0)\n signal_data = (signal_data - mean_data)/mean_data\n \n # Checking the tags of ROIs, while importing a pandas df the column names\n # which in our case are tags,that have the same name are altered with\n # a dot '.' and a number -> e.g. Layer1 & Layer1 -> Layer1 & Layer1.1\n # Here label the same type of ROIs the same again for convenient indexing\n signal_data = signal_data.T\n signal_data.index = [this.split(\".\")[0] for this in signal_data.index]\n signal_data = signal_data.T\n \n # Finding the unique tags and their occurences for plotting\n unique_columns, column_occurences = np.unique(signal_data.columns.values,\n return_counts=True)\n \n if mode == 'Stimulus':\n # Finding stimulus\n stimOutPath = os.path.join(t_series_dir, '_stimulus_output_*')\n stimOutFile_path = (glob.glob(stimOutPath))[0]\n (stimType, rawStimData) = readStimOut(stimOutFile=stimOutFile_path, \n skipHeader=1)\n stim_name = stimType.split('\\\\')[-1] \n \n stim_frames = rawStimData[:,7] # Frame information\n stim_vals = rawStimData[:,3] # Stimulus value\n uniq_frame_id = np.unique(stim_frames,return_index=True)[1]\n stim_vals = stim_vals[uniq_frame_id]\n stim_vals = stim_vals[:signal_data.shape[0]]\n stim_df = pd.DataFrame(stim_vals,columns=['Stimulus'],dtype='float')\n # Make normalized values of stimulus values for plotting\n stim_df = (stim_df/np.max(np.unique(stim_vals)))*5 \n elif mode == 'MarkPoints':\n a=5\n \n \n # Some color maps for plotting\n cmaps = OrderedDict()\n cmaps['Sequential'] = [\n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']\n \n \n # Figure size etc.\n fig = plt.figure(1,figsize=(14, 12), facecolor='w', edgecolor='k')\n fig.suptitle('%s, stim: %s' % (os.path.basename(t_series_dir),stim_name),\n fontsize=16)\n \n subPlotNumbers = unique_columns.shape[0]\n nrows = round(float(subPlotNumbers)/float(2))\n if nrows == 1:\n ncols = 1\n else:\n ncols = 2\n for iSubplot, column_name in enumerate(unique_columns):\n # Add linear values 1-2-3 to traces to shift them for visualization\n add_to_shift_traces = np.linspace(1,column_occurences[iSubplot],\n column_occurences[iSubplot])\n curr_plot_data = signal_data[[column_name]] + add_to_shift_traces\n iSubplot = iSubplot +1\n ax = fig.add_subplot(nrows, ncols, iSubplot)\n curr_plot_data.plot(ax=ax,legend=False,\n colormap = cmaps['Sequential'][iSubplot],alpha=0.8)\n stim_df.plot(dashes=[6, 2],ax=ax, color='k')\n plt.title(column_name)\n plt.ylabel('dF/F')\n \n a = int(raw_input(\"How long you want to inspect this image?\"))\n plt.pause(a)\n \n # Save the figure if desired\n if saveFig:\n # Saving figure\n exp_ID = os.path.split(os.path.split(t_series_dir)[0])[1]\n save_name = 'dF-%s-%s' % (exp_ID, os.path.basename(t_series_dir))\n os.chdir(dataDir)\n plt.savefig('%s.png'% save_name, bbox_inches='tight')\n print('Figure saved')\n plt.close(fig)\n return None",
"def __draw_3d_trajectories(self, trace1, trace2, path):\n title = 'Two Aircraft Trajectory Plot'\n fig = plt.figure(figsize=(10, 10))\n ax = fig.gca(projection='3d')\n plt.grid(True)\n plt.title(title, loc='left')\n plt.axis('equal')\n \n ax.set_xlabel('X (m)')\n ax.set_ylabel('Y (m)')\n ax.set_zlabel('Z (m)')\n \n ta1, xa1, ya1, za1 = ([], [], [], [])\n for timeslice in trace1:\n t, x, y, z, psi, theta, phi, v, weight, fuel = timeslice\n ta1.append(t)\n xa1.append(x)\n ya1.append(y)\n za1.append(z)\n \n xmin = min(xa1)\n xmax = max(xa1)\n ymin = min(ya1)\n ymax = max(ya1)\n zmin = min(za1)\n zmax = max(za1)\n \n ta2, xa2, ya2, za2 = ([], [], [], [])\n for timeslice in trace2:\n t, x, y, z, psi, theta, phi, v, weight, fuel = timeslice\n ta2.append(t)\n xa2.append(x)\n ya2.append(y)\n za2.append(z)\n \n xmin = min(min(xa2), xmin)\n xmax = max(max(xa2), xmax)\n ymin = min(min(ya2), ymin)\n ymax = max(max(ya2), ymax)\n zmin = min(min(za2), zmin)\n zmax = max(max(za2), zmax)\n \n # Fix aspect ratio\n max_range = np.array([xmax - xmin, ymax - ymin, zmax - zmin]).max() / 2.0\n mid_x = (xmax + xmin) * 0.5\n mid_y = (ymax + ymin) * 0.5\n mid_z = (zmax + zmin) * 0.5\n ax.set_xlim(mid_x - max_range, mid_x + max_range)\n ax.set_ylim(mid_y - max_range, mid_y + max_range)\n ax.set_zlim(mid_z - max_range, mid_z + max_range)\n \n # Plot Trajectories\n plt.plot(xa1, ya1, za1, color='C0')\n plt.plot(xa2, ya2, za2, color='C3')\n \n # Draw t=0.0 marker for trace1\n #ax.text(xa1[0], ya1[0], za1[0]+1, \"t = %2.1f\" % ta1[0], color='b', alpha=0.5)\n #ax.scatter(xa1[0], ya1[0], za1[0], color='b', marker='o', s=100, alpha=0.5)\n # and now trace2\n #ax.text(xa2[0], ya2[0], za2[0]+1, \"t = %2.1f\" % ta2[0], color='r', alpha=0.5)\n #ax.scatter(xa2[0], ya2[0], za2[0], color='r', marker='o', s=100, alpha=0.5)\n \n # Draw t=tmax marker for trace1\n ax.text(xa1[-1]+50, ya1[-1]+50, za1[-1]+750, \"t = %2.1f s\" % ta1[-1], color='C0', alpha=0.9)\n ax.scatter(xa1[-1], ya1[-1], za1[-1], color='C0', marker='>', s=100, alpha=0.5)\n # and now for trace 2\n ax.text(xa2[-1]+50, ya2[-1]+50, za2[-1]+750, \"t = %2.1f s\" % ta2[-1], color='C3', alpha=0.9)\n ax.scatter(xa2[-1], ya2[-1], za2[-1], color='C3', marker='>', s=100, alpha=0.5)\n plt.savefig(path)\n plt.close()",
"def plot_patterned_input_individual_trial_traces(rec_t, vm_array, theta_traces, ramp_traces, index=None,\n svg_title=None):\n if svg_title is not None:\n remember_font_size = mpl.rcParams['font.size']\n mpl.rcParams['font.size'] = 8\n if index is not None:\n index_range = [index]\n else:\n index_range = list(range(len(vm_array)))\n for i in index_range:\n fig, axes = plt.subplots(3, sharey=True, sharex=True)\n label_handles = []\n axes[0].plot(rec_t, vm_array[i], color='k', label='Raw Vm')\n axes[0].set_axis_off()\n label_handles.append(mlines.Line2D([], [], color='k', label='Raw Vm'))\n axes[1].plot(rec_t, ramp_traces[i], color='r', label='Subthreshold Vm')\n axes[1].set_axis_off()\n label_handles.append(mlines.Line2D([], [], color='r', label='Subthreshold Vm'))\n axes[2].plot(rec_t, theta_traces[i], color='c', label='Theta Vm')\n label_handles.append(mlines.Line2D([], [], color='c', label='Theta Vm'))\n axes[2].set_xlim(0., 7500.)\n axes[2].set_ylim(-67., 30.)\n clean_axes(axes)\n if svg_title is not None:\n #axes[1].legend(handles=label_handles, loc='best', frameon=False, framealpha=0.5,\n # fontsize=mpl.rcParams['font.size'])\n fig.set_size_inches(2.7696, 3.1506)\n fig.savefig(data_dir+svg_title+str(i)+' - example traces.svg', format='svg', transparent=True)\n plt.show()\n plt.close()\n if svg_title is not None:\n mpl.rcParams['font.size'] = remember_font_size",
"def AtlasPlots(cf, p, atlas, m_array, EnergyHistory): \n\n fig = plt.figure(1)\n fig.patch.set_facecolor('white')\n\n TE = [sum(x) for x in EnergyHistory] \n VE = [row[0] for row in EnergyHistory] \n IE = [row[1] for row in EnergyHistory] \n\n plt.subplot(1,3,1)\n plt.plot(TE)\n plt.title('Total Energy')\n plt.hold(False)\n plt.subplot(1,3,2)\n plt.plot(VE)\n plt.title('Vector Energy')\n plt.hold(False)\n plt.subplot(1,3,3)\n plt.plot(IE)\n plt.title('Image Energy')\n plt.hold(False)\n plt.draw()\n plt.show()\n\n if cf.io.outputPrefix != None: \n energyFilename = cf.io.outputPrefix + \"Energy.pdf\"\n plt.savefig(energyFilename)",
"def PlotToFilePath(self) -> str:",
"def plot_collection(self):\n frame = self.cards[0]\n pad = np.zeros((frame.shape[0], 20, frame.shape[2]), dtype=np.uint8)\n for card in self.cards[1:]:\n frame = np.append(frame, pad, axis=1)\n frame = np.append(frame, card, axis=1)\n\n im = Image.fromarray(frame)\n im.save(f\"{self.output_dir}/FrameCollection.png\")",
"def create_picture():\n result = Data.query.all()\n\n date = []\n cases = []\n death = []\n for r in result:\n date.append(str(r.date))\n cases.append(r.cases)\n death.append(r.death)\n\n pic_path = 'daily_report_{}.png'.format(date[-1])\n\n fig = plt.figure(figsize=(100, 50))\n\n ax1 = fig.add_subplot(111)\n ax1.set_title(\"Covid-19 Daily Report\", fontsize=70)\n ax1.set_ylabel('Cases', fontsize=40)\n\n plt.xticks(rotation=270, fontsize=40)\n plt.yticks(fontsize=50)\n plot1 = ax1.plot(date, cases, '-*', color='r', label='cases')\n\n ax2 = ax1.twinx() # this is the important function\n\n plot2 = ax2.plot(date, death, '-o', color='g', label='death')\n lines = plot1 + plot2\n\n ax2.set_ylabel('Death', fontsize=40)\n ax2.set_xlabel('Date', fontsize=70)\n ax2.tick_params(axis='y', labelsize=50)\n\n plt.gca().xaxis.set_major_locator(ticker.MultipleLocator(12))\n ax1.legend(lines, [l.get_label() for l in lines], fontsize=50)\n\n plt.savefig(pic_path)\n plt.cla()\n plt.clf()\n plt.close()\n return pic_path",
"def make_traceplot(samples, var_name: str):\n fig, ax = plt.subplots()\n fig.set_size_inches([16, 8])\n ax.set_title(f'Trace Plot of {var_name}')\n ax.plot(samples, alpha=0.3, color='b')\n ax.grid()",
"def plot_footprint(img_file_name, camera_file, \n reference_dem, output_directory=None,\n basemap='ctx', cam_on=True,\n verbose=False):\n # TODO\n # - Add tsai camera plotting.\n \n out_dir_abs = bare.io.create_dir(output_directory)\n img_base_name = os.path.splitext(os.path.split(img_file_name)[-1])[0]\n cam_extension = os.path.splitext(camera_file)[-1] \n \n footprint_polygon = prepare_footprint(img_file_name,\n camera_file,\n reference_dem,\n verbose=verbose)\n \n if type(footprint_polygon) == gpd.geodataframe.GeoDataFrame:\n print('Plotting camera footprint.')\n if basemap == 'ctx':\n footprint_polygon = footprint_polygon.to_crs(epsg=3857)\n \n footprint_polygon = bare.geospatial.extract_polygon_centers(footprint_polygon)\n\n fig, ax = plt.subplots(1,figsize=(10,10))\n footprint_polygon.plot(ax=ax,\n facecolor=\"none\",\n edgecolor='b')\n \n if cam_on == True:\n if cam_extension == '.xml':\n ax.set_title('camera footprint and scanner positions')\n camera_positions = bare.core.wv_xml_to_gdf(camera_file)\n if basemap == 'ctx':\n camera_positions = camera_positions.to_crs(epsg=3857)\n # add coordinates as seperate columns to gdf\n bare.geospatial.extract_gpd_geometry(camera_positions)\n # annotate start and end of aquisition\n plt.annotate(s='start',\n xy=(camera_positions.iloc[0].x, camera_positions.iloc[0].y),\n horizontalalignment='center')\n \n plt.annotate(s='end',\n xy=(camera_positions.iloc[-1].x, camera_positions.iloc[-1].y),\n horizontalalignment='center')\n \n elif cam_extension == '.tsai':\n ax.set_title('camera footprint and position')\n camera_positions = bare.core.tsai_to_gdf(camera_file)\n if basemap == 'ctx':\n camera_positions = camera_positions.to_crs(epsg=3857)\n \n # # Not sure if this is useful to be labeled for tsai.\n # # add coordinates as seperate columns to gdf\n # bare.geospatial.extract_gpd_geometry(camera_positions)\n # # annotate camera position\n # plt.annotate(s='camera position',\n # xy=(camera_positions.iloc[-1].x, camera_positions.iloc[-1].y),\n # horizontalalignment='center')\n \n if basemap == 'ctx':\n camera_positions = camera_positions.to_crs(epsg=3857)\n camera_positions.plot(ax=ax,marker='.',color='b')\n \n line0, line1, line2, line3 = plot_cam(footprint_polygon, \n camera_positions, \n basemap=basemap, \n camera_type='.xml')\n line0.plot(ax=ax,color='b')\n line1.plot(ax=ax,color='b')\n line2.plot(ax=ax,color='b')\n line3.plot(ax=ax,color='b')\n \n else:\n ax.set_title('camera footprint')\n \n if basemap == 'ctx':\n add_ctx_basemap(ax)\n\n for idx, row in footprint_polygon.iterrows():\n plt.annotate(s=row['file_name'],\n xy=row['polygon_center'],\n horizontalalignment='center')\n \n \n\n if out_dir_abs is not None:\n out = os.path.join(out_dir_abs, img_base_name+'_footprint.png')\n fig.savefig(out, bbox_inches = \"tight\")\n plt.close()\n else:\n plt.show()\n else:\n pass",
"def save_scatter_frame(state, file_name, animate_CM, animate_2D_scatter, animate_3D_scatter, init_HII, size_viewing_window, folder):\n # creating ticks on axis\n amount_of_pc = int(size_viewing_window / pc) + 1\n max_amount_ticks = 21\n factor_pc = int(amount_of_pc / max_amount_ticks) + 1\n amount_of_ticks = int(amount_of_pc / factor_pc) + 1\n middle_tick = int(amount_of_ticks / 2) # should be +1 but since python starts counting at 0, i is the (i+1)th item\n distance_values = []\n axis_labels = []\n for i in range(amount_of_ticks):\n axis_labels.append((i - middle_tick) * factor_pc)\n distance_values.append((i - middle_tick) * factor_pc * pc)\n\n # if the simulation is in 2D\n if animate_2D_scatter:\n fig = plt.figure()\n fig.set_size_inches(10, 10) # 10 inches wide and long\n ax = fig.add_subplot(111)\n\n # Plot the BGG\n if state.init_BGG:\n plt.scatter(0, 0, s=1.24e6 * state.outer_radius_cloud**2\\\n * state.size_viewing_window**(-2), label = \"Background gas\", \\\n facecolor = \"#0390fc\", alpha=0.5)\n\n # plot HII region\n if init_HII and state.star and state.init_BGG:\n ax.scatter(0, 0, s=1.24e6 * state.HII_radius**2 * \\\n state.size_viewing_window**(-2), label = \"HII region\", \\\n facecolor = \"white\")\n\n # plot clumps\n for clump in state.clumps:\n plt.scatter(clump.x, clump.y, s=1.24e6 * clump.R**2 * \\\n state.size_viewing_window**(-2), label = \"Clump\", \\\n facecolor = \"#0303fc\")\n\n # plot star\n if state.star:\n plt.scatter(state.star.x, state.star.y, label=\"Star\",\\\n facecolor=\"red\")\n\n # plot centre of mass\n if animate_CM:\n plt.scatter(state.CM[0], state.CM[1], label = \"Centre of Mass\", \\\n facecolor = \"green\")\n\n # settings that apply for both 2D and 3D\n # ax.set_xlabel('Distance (pc)')\n # ax.set_ylabel('Distance (pc)')\n\n ax.set_xticks(distance_values)\n ax.set_xticklabels(axis_labels)\n ax.set_yticks(distance_values)\n ax.set_yticklabels(axis_labels)\n\n ax.set_xlim(-size_viewing_window / 2, size_viewing_window / 2)\n ax.set_ylim(-size_viewing_window / 2, size_viewing_window / 2)\n\n ax.set_xlabel('Distance (pc)')\n ax.set_ylabel('Distance (pc)')\n plt.title(\"State of cloud after %.2f Myr\" %(state.time / Myr))\n plt.grid()\n\n fig.savefig(my_path + folder + \"/scatter_frames_2D/\" + file_name + \".png\")\n plt.close(fig)\n\n # if the simulation is in 3D\n if animate_3D_scatter:\n fig = plt.figure()\n fig.set_size_inches(10, 10) # 10 inches wide and long\n ax = fig.add_subplot(111, projection='3d')\n\n # Plot the BGG\n if state.init_BGG:\n if state.HII_radius < size_viewing_window:\n ax.scatter(0, 0, s=0.33e6 * state.outer_radius_cloud**2\\\n * state.size_viewing_window**(-2), label = \"Background gas\", \\\n facecolor = \"#0390fc\", alpha=0.5)\n\n # plot HII region\n if init_HII and state.star and state.init_BGG:\n ax.scatter(0, 0, 0, s=0.33e6 * state.HII_radius**2\\\n * state.size_viewing_window**(-2), label = \"HII region\", \\\n facecolor = \"white\", alpha=0.5)\n\n # plot star\n if state.star:\n ax.scatter(state.star.x, state.star.y, state.star.z, label=\"Star\",\\\n facecolor=\"red\")\n\n # plot clumps\n for clump in state.clumps:\n ax.scatter(clump.x, clump.y, clump.z, s=1.24e6 * clump.R**2 * \\\n state.size_viewing_window**(-2), label = \"Clump\", \\\n facecolor = \"#0303fc\")\n\n # plot centre of mass\n if animate_CM:\n ax.scatter(state.CM[0], state.CM[1], state.CM[2], label = \"Centre of Mass\", \\\n facecolor = \"green\")\n\n # settings that apply for both 2D and 3D\n ax.set_xlabel('Distance (pc)')\n ax.set_ylabel('Distance (pc)')\n ax.set_zlabel('Distance (pc)')\n\n ax.set_xticks(distance_values)\n ax.set_xticklabels(axis_labels)\n ax.set_yticks(distance_values)\n ax.set_yticklabels(axis_labels)\n ax.set_zticks(distance_values)\n ax.set_zticklabels(axis_labels)\n\n ax.set_xlim(-size_viewing_window / 2, size_viewing_window / 2)\n ax.set_ylim(-size_viewing_window / 2, size_viewing_window / 2)\n ax.set_zlim(-size_viewing_window / 2, size_viewing_window / 2)\n\n ax.set_xlabel('Distance (pc)')\n ax.set_ylabel('Distance (pc)')\n plt.title(\"State of cloud after %.2f Myr\" %(state.time / Myr))\n plt.grid()\n\n fig.savefig(my_path + folder + \"/scatter_frames_3D/\" + file_name + \".png\")\n plt.close(fig)",
"def plot_frame(source, spectral_lines=None, plot_fit_points=False, plot_circ_fit=False,\n plot_line_fit=False, window_name='Frame plot', control=None):\n\n frame_ds = None\n if source[P.naming_frame_data] is not None:\n frame = source[P.naming_frame_data]\n frame_ds = source\n else:\n frame = source\n\n height = source[P.dim_y].size\n\n _,ax = plt.subplots(num=window_name,nrows=2, figsize=plotting.get_figure_size())\n ax[0].imshow(frame, origin='lower')\n\n if spectral_lines is not None:\n # Colormap\n cmap = cm.get_cmap('PiYG')\n\n x_offset = 0\n y_offset = 0\n\n if control is not None:\n x_offset = control[P.ctrl_scan_settings][P.ctrl_width_offset]\n y_offset = control[P.ctrl_scan_settings][P.ctrl_height_offset]\n\n for i,sl in enumerate(spectral_lines):\n # Change color for every circle\n color = cmap(1 / (i+1) )\n if plot_circ_fit:\n ax[0].add_artist(plt.Circle((sl.circ_cntr_x + x_offset, sl.circ_cntr_y + y_offset),\n sl.circ_r, color=color, fill=False))\n if plot_fit_points:\n xx = sl.x + x_offset\n yy = sl.y + y_offset\n ax[0].plot(xx,yy,'.',linewidth=1,color=color)\n if plot_line_fit:\n liny = (sl.line_a*sl.x+sl.line_b) + y_offset\n liny = np.clip(liny, 0, frame[P.dim_y].size)\n ax[0].plot(sl.x, liny, linewidth=1,color=color)\n\n if frame_ds is not None and len(frame_ds.attrs) >= 1:\n print(f\"Frame metadata from Dataset:\")\n for key,val in frame_ds.attrs.items():\n print(f\"\\t{key} : \\t{val}\")\n\n if len(frame.attrs) >= 1:\n print(f\"Frame metadata from DataArray:\")\n for key,val in frame.attrs.items():\n print(f\"\\t{key} : \\t{val}\")\n\n ### Spectrogram\n row_selection = np.linspace(height * 0.1, height * 0.9, num=3, dtype=np.int)\n rows = frame.isel({P.dim_y:row_selection}).values\n rows = rows.transpose()\n ax[1].plot(rows)\n\n plt.show()",
"def create_scatter_plot(self):\n xy = self.get_x_and_y_as_dict()\n x = xy[\"x\"]\n y = xy[\"y\"]\n plt.scatter(x, y)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.title(\"Scatter plot of x and y values\")\n plt.savefig(f\"{self.save_directory}/task_2_scatter_plot.png\")",
"def save_plot(dataframe, filename):\n plt.clf()\n dataframe.plot()\n plt.savefig(filename)",
"def mk_raw_vel_trace_figures():\n # use the same data as in mk_eyegaze_classification_figures()\n # (no need for file retrieval, should be there)\n datalad_get(op.join('data', 'raw_eyegaze'), get_data=False)\n infiles = [\n op.join(\n 'data',\n 'raw_eyegaze', 'sub-32', 'beh',\n 'sub-32_task-movie_run-5_recording-eyegaze_physio.tsv.gz'),\n op.join(\n 'data',\n 'raw_eyegaze', 'sub-02', 'ses-movie', 'func',\n 'sub-02_ses-movie_task-movie_run-5_recording-eyegaze_physio.tsv.gz'\n ),\n ]\n # we need the sampling rate for plotting in seconds and velocity calculation\n sr = 1000\n # load data\n for i, f in enumerate(infiles):\n # read data\n datalad_get(f)\n data = np.recfromcsv(f,\n delimiter='\\t',\n names=['x', 'y', 'pupil', 'frame'])\n\n # subset data. Hessels et al., 2017 display different noise levels on 4\n # second time series (ref. Fig 10). That still looks a bit dense, so we\n # go with 2 seconds, from start of 10sec excerpt to make it easier to\n # associate the 2 sec excerpt in to its place in the 10 sec excerpt\n # above\n data_subset = data[15000:17000]\n px2deg, ext = (0.0266711972026, 'lab') if '32' in f \\\n else (0.0185581232561, 'mri')\n # take raw data and convert it to velocity: euclidean distance between\n # successive coordinate samples. Note: no entry for first datapoint!\n # Will plot all but first data point in other time series\n velocities = cal_velocities(data_subset, sr, px2deg)\n vel_color = 'xkcd:gunmetal'\n # prepare plotting - much manual setup, quite ugly - sorry\n fig, ax1 = plt.subplots()\n fig.set_figheight(2)\n fig.set_figwidth(7)\n fig.set_dpi(120)\n time_idx = np.linspace(0, len(data_subset) / sr, len(data_subset))[1:]\n max_x = float(len(data_subset) / sr)\n ax1.set_xlim(0, max_x)\n ax1.set_xlabel('time (seconds)')\n ax1.set_ylabel('coordinates')\n # left y axis set to max screensize in px\n ax1.set_ylim(0, 1280)\n # plot gaze trajectories (not preprocessed)\n ax1.plot(time_idx,\n data_subset['x'][1:],\n color='black', lw=1)\n ax1.plot(\n time_idx,\n data_subset['y'][1:],\n color='black', lw=1)\n # right y axis shows velocity \"as is\" (not preprocessed)\n ax2 = ax1.twinx()\n ax2.set_ylabel('velocity (deg/sec)', color=vel_color)\n ax2.tick_params(axis='y', labelcolor=vel_color)\n #ax2.set_yscale('log') ## TODO: Log scale or not?\n ax2.set_ylim(1, 2000)\n ax2.plot(time_idx,\n velocities,\n color=vel_color, lw=1)\n plt.savefig(\n op.join('img', 'rawtrace_{}.svg'.format(ext)),\n transparent=True,\n bbox_inches=\"tight\",\n metadata={'Date': None})\n plt.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given the feature set xFeat, predict what class the values will have.
|
def predict(self, xFeat):
yHat = []
for row in xFeat:
row_df = pd.DataFrame(row)
votes = []
for tree, features in zip(self.trees, self.features):
xtest = row_df.iloc[features]
votes.append(tree.predict(xtest.T)[0])
yHat.append(np.argmax(np.bincount(votes)))
return yHat
|
[
"def classify1(self,X):\n prediction = self.classify.predict(X)\n \n return prediction",
"def predict(self, X: np.ndarray) -> np.ndarray:\n return np.array([self._classify(x) for x in X])",
"def predict(self, x_set):\n def classify(x):\n # Pick top-voted label among the k nearest neighbors.\n label_votes = self.knn_label_votes(x)\n return max(label_votes, key=label_votes.get)\n\n return np.array(list(map(classify, x_set)))",
"def predict(self, X):\n # predict the class of y with classifier\n classes = self.clf.predict(X)\n \n # create default regressor predictions - zeros\n y_pred = np.zeros(X.shape[0])\n \n for lbl, r in zip(self._class_labels, self.regs):\n # use the portion of X with the given label \n mask = (classes == lbl)\n \n if sum(mask) > 0:\n # fit the regressor for this class\n y_pred[np.array(mask)] = r.predict(X[mask])\n \n return y_pred",
"def predict(self, X):\n probabilities = self.predict_probability(X)\n\n def classForProbability(probability):\n if probability > 0.5:\n return self.classOneLabel\n return self.classZeroLabel\n\n return numpy.array([\n classForProbability(p) for p in probabilities\n ])",
"def predict(self, X):\n\n # List with size X.shape[0] and each value is a dict too,\n # Ex: [{0:0.2, 1:0.7}, {1:0.3, 2:0.5}]\n list_label_instance = []\n\n # For each classifier in self.models, predict the labels for X\n for model in self.models:\n clf = model.clf\n pred = clf.predict(X)\n weight = model.weight\n for i, label in enumerate(pred.tolist()):\n if i == len(list_label_instance): # maintain the dictionary\n list_label_instance.append({label: weight})\n else:\n try:\n list_label_instance[i][label] += weight\n except:\n list_label_instance[i][label] = weight\n\n predict_weighted_voting = []\n for dic in list_label_instance:\n max_value = max(dic.items(), key=operator.itemgetter(1))[0] # return the key of max value in a dict\n predict_weighted_voting.append(max_value)\n\n return predict_weighted_voting",
"def predict(self, X):\n\n predicted_probabilitiy = self.predict_proba(X)\n return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)),\n axis=0)",
"def predict(self, x, **kwargs):\n # compute predictions from different models of the ensemble\n predictions_list = np.array([classifier.predict(x) for classifier in self.ensemble_classifiers])\n # sum the probabilities across all predictors\n ensemble_predictions = np.sum(predictions_list, axis=0)\n return ensemble_predictions",
"def predict(self, X):\n proba = {}\n total_probabilities = np.array([])\n for classifier_index in range(1, 5):\n clf = self.classifiers[classifier_index]\n proba[classifier_index] = clf.predict_proba(X)[:, 1]\n for class_index in range(1, 6):\n if class_index == 1:\n # probability = 1 - probability(bigger than 1)\n total_probabilities = np.vstack(1 - proba[class_index])\n elif 1 < class_index < 5:\n # probability = probabillity(bigger than i) - probability(bigger than i-1)\n total_probabilities = np.column_stack((total_probabilities, (proba[class_index-1]-proba[class_index])))\n elif class_index == 5:\n # probability = probability(bigger than 4)\n total_probabilities = np.column_stack((total_probabilities, (proba[class_index-1])))\n # add one to the results because indexes start at 0, but classes range in (1 - 5)\n results = np.argmax(total_probabilities, axis=1) + 1\n return results",
"def predict(self, X):\n return predicted_value",
"def predict(self, features):\n features = self.normalise(features)\n return self.clf.predict(features)",
"def supervised_predict(self, x):\n\n z_ik = self.get_posterior(x)\n N = x.shape[0]\n y_hat = np.zeros((1,N)).reshape(N,)\n\n cluster_index = np.argmax(z_ik, axis=1)\n\n n_components = self._n_components\n print('cluster_index', cluster_index)\n print('cluster map', self.cluster_label_map)\n for i in range(N):\n y_hat[i] = self.cluster_label_map[cluster_index[i]]\n\n\n return np.array(y_hat)",
"def predict(self, x):\n # Create an array to store predictions in. Add an extra dimension if this\n predictions = []\n # Loop over the cross-validation models\n for i, model in enumerate(self._models):\n\n # Make and store predictions\n predictions.append(model.predict(x).flatten())\n predictions=np.asarray(predictions)\n # Get the mean and standard deviation of predictions\n mean_preds = np.mean(predictions, axis = 0)\n stdev_preds = np.std(predictions, axis = 0)\n # Return the mean predictions and standard deviation of predictions\n return mean_preds, stdev_preds",
"def predict(self, x):\n predictionList=[]\n if self._model.loaded:\n for xValue in x:\n systemLabel=self._model.infer_topic(xValue)\n result=self._model.topicLabelling[systemLabel]\n predictionList.append(int(result))\n else:\n self._util.logError('TopicClusteringPredictionModel','Model needs to be loaded before prediction')\n\n return predictionList",
"def predict(self, x_data):\n \n # Filtering data if required\n if self.filter_variables is not None:\n x_data = x_data[:,self.filter_variables]\n \n # Initialization of predictions\n predictions = np.zeros(x_data.shape[0])\n \n # Prediction for each subject\n for subject_index in range(x_data.shape[0]):\n \n # Computing the log likelihoods\n log_likelihood = np.array(\n [\n self.kde[0].score(x_data[subject_index,:].reshape(1, -1)),\n self.kde[1].score(x_data[subject_index,:].reshape(1, -1))\n ]\n )\n \n # Computing the log posteriori unnormalized\n log_posteriori_unnormalized = log_likelihood + self.classes_log_distribution\n predictions[subject_index] = 1 if log_posteriori_unnormalized[1] > log_posteriori_unnormalized[0] else 0\n \n # Return the predictions made by the model\n return predictions",
"def _classify(self, X: np.ndarray) -> int:\n assert len(X) == self.num_features\n posteriors = []\n for c in self.classes:\n prior = self.priors[c] # Pr(c)\n likelihood = 1\n for i, dist in enumerate(self.distributions[c]):\n likelihood *= norm(dist.mean, dist.std).pdf(X[i]) # Pr(X | c)\n posteriors.append(prior * likelihood)\n # Normalize to add up to 1\n posteriors = normalize(posteriors)\n # Return class with highest posterior\n return self.classes[np.argmax(posteriors)]",
"def predict_XGB(self):\r\n pass",
"def predict_meta(self, X):\n # Check parameters and run approriate prediction\n if self.use_probability:\n\n probs = np.asarray([clf.predict_probs(X) for clf in self.clfs_])\n\n if self.average_probs:\n preds = np.average(probs, axis=0)\n\n else:\n preds = np.concatenate(probs, axis=1)\n\n else:\n preds = np.column_stack([clf.predict(X) for clf in self.clfs_])\n \n return preds",
"def predict(self, X):\n n = X.shape[0]\n m = self.num_obj\n Y_m = np.ndarray((n, m))\n Y_v = np.ndarray((n, m))\n for i in xrange(m):\n if self.denoised:\n if hasattr(self.surrogates[i],'likelihood') and hasattr(self.surrogates[i].likelihood,'variance'):\n noise = self.surrogates[i].likelihood.variance\n else:\n noise = 0.\n else:\n noise = 0.\n m, v = self.surrogates[i].predict(X)\n Y_m[:, i] = m.flatten()\n Y_v[:, i] = v.flatten() - noise\n return Y_m, Y_v"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read an input file and convert it to numpy
|
def file_to_numpy(filename):
df = pd.read_csv(filename)
return df.to_numpy()
|
[
"def _file_to_array(self, file, type=int):\n\n mlist = []\n for line in open(file):\n mlist.append(line)\n return np.asarray(mlist, dtype=type)",
"def txt_to_array(pathname, shape):\n import numpy as np\n f = open(pathname, 'r')\n data = np.array(\n [float(i) for i in f.read().split()]).reshape(shape)\n f.close()\n return data",
"def csv_to_ndarray(fname): \n\t\ttry:\n\t\t\treturn np.genfromtxt(fname, delimiter=\",\")\t\n\t\texcept Exception, e:\n\t\t\tprint \"Error loading file %s:\" % fname\n\t\t\traise",
"def fileInput(fileName):\r\n #arr = pd.read_csv(fileName)\r\n file = np.genfromtxt(fileName, delimiter=',') \r\n arr = DataFrame(data = file, columns=['y','x1','x2'])\r\n arr = arr.as_matrix()\r\n return arr",
"def load_data_array(fname):\n data = np.genfromtxt(fname)\n #data = np.load(fname)\n return data",
"def data2array(filepath):\n file = open(filepath, 'r')\n skip_bill = file.readline() #skip over column name\n lines = file.readlines()\n\n lst = []\n #iterate through the lines and append to list\n for line in lines:\n line = line.strip() #get rid of the \\n\n value = float(line) #get the float value\n lst.append(value)\n\n arr = np.asarray(lst)\n return arr",
"def FileOpen(filename):\n if filename[-4:] != \".txt\":\n filename = filename + \".txt\"\n\n data = np.array([])\n\n nlines = 0\n\n file = open(filename, \"r\") # opens on 'read' mode\n\n for line in file:\n nlines += 1\n data = np.append(data, np.fromstring(line, dtype=np.float, sep=','))\n\n file.close\n\n data = np.reshape(data, (nlines, int(data.size / nlines)))\n\n return data",
"def run_numpy(in_file, out_file):\n data = np.loadtxt(in_file, delimiter=',')\n data = np.array([[1, 2, 3], [1, 4, 9]])\n np.savetxt(out_file, data, delimiter=',')",
"def readmask(name) :\n f=open(name)\n data=[]\n for line in f :\n data.append(float(line))\n return np.array(data)",
"def read_matrix(transfo_file):\n from numpy import loadtxt\n lines = loadtxt(transfo_file)\n return np.asarray(lines)",
"def file_to_array(file_path):\n res_array = []\n if os.path.isfile(file_path):\n with open(file_path, 'r', encoding='utf-8') as file_content:\n for line in file_content:\n res_array.append(int(line))\n return res_array\n else:\n print(file_path, 'file does not exist.')",
"def readspec(name) :\n f=open(name)\n data=[]\n for line in f :\n spec=np.array(line.split())\n spec=spec.astype(float)\n data.append(spec)\n return np.array(data)",
"def load_np_file(file_name: str, full_path=False) -> np.array:\n if not full_path:\n file_name = os.path.join(NUMPY_DIR, file_name)\n\n if not os.path.isfile(file_name):\n raise OSError(\"{0} does not exist!\".format(file_name))\n f = open(file_name, \"rb\")\n arr = np.load(f)\n f.close()\n return arr",
"def readdata(filename):\n\n infile = open(filename, \"r\")\n lines = infile.readlines()\n infile.close()\n \n xlist = []\n ylist = []\n for line in lines:\n coor = line.split()\n x = float(coor[1])\n y = float(coor[2])\n xlist.append(x)\n ylist.append(y)\n \n xarr = np.array(xlist)\n yarr = np.array(ylist)\n \n return xarr, yarr",
"def _csv_to_numpy(string_like, dtype=None): # type: (str) -> np.array\n stream = StringIO(string_like)\n return np.genfromtxt(stream, dtype=dtype, delimiter=\",\")",
"def image_as_numpy(filename):\n return np.array(Image.open(filename), dtype=np.float)",
"def load_ascii_data(fname):\n return np.genfromtxt(fname)",
"def read_array(filename, dtype, separator='\\t'):\n cast = numpy.cast\n data = [[] for dummy in xrange(len(dtype))]\n for line in open(filename, 'r'):\n fields = line.strip().split(separator)\n for i, number in enumerate(fields):\n data[i].append(number)\n for i in xrange(len(dtype)):\n data[i] = cast[dtype[i]](data[i])\n return numpy.rec.array(data, dtype=dtype)",
"def extract(filepath):\r\n with open(filepath, \"r\") as f:\r\n dataset = f.readlines()\r\n dataset = map(lambda i: i.strip('\\n').split(';'), dataset)\r\n dataset = np.array(list(dataset))\r\n return dataset",
"def load_npy(name):\n\twith open(name, \"rb\") as fr:\n\t\treturn np.load(fr)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Distributes position over a lattice given the type
|
def position_lattice(type = 'cubic'):
x = np.zeros(system.N)
y = np.zeros(system.N)
z = np.zeros(system.N)
places_z = np.linspace(0, system.L[2], num=system.n[2], endpoint = False)
places_z += places_z[1]*0.5
n_part = 0
for i,j,k in itertools.product(list(np.arange(system.n[0])), list(np.arange(system.n[1])), list(np.arange(system.n[2]))):
x[n_part] = system.alat*i
y[n_part] = system.alat*j
if(type=='cubic'):
z[n_part] = system.alat*k
elif(type == 'rectangular-z'):
z[n_part] = places_z[k]
else:
print("Error: unknown lattice type.")
n_part += 1
system.pos[:,0] = x
system.pos[:,1] = y
system.pos[:,2] = z
|
[
"def position_generator(lattice_structure, nx = 20, ny = 20):\n\n a1 = None\n a2 = None\n phi = None\n\n if lattice_structure=='hexagonal':\n a1 = a2 = random.uniform(0.8, 2)\n phi = 2*(math.pi/3)\n \n elif lattice_structure=='square':\n a1 = a2 = random.uniform(0.8, 2)\n phi = math.pi/2\n \n elif lattice_structure=='rectangular':\n a2 = random.uniform(0.8,2)\n a1 = a2*random.uniform(1.09,1.5)\n phi = math.pi/2\n \n elif lattice_structure=='oblique':\n a1 = random.uniform(0.8, 2)\n a2 = random.uniform(0.8, 2)\n phi = random.uniform(0, math.pi)\n while(math.isclose(phi, math.pi/2, abs_tol=1e-3)):\n phi = random.uniform(0, math.pi)\n \n elif lattice_structure=='noise':\n a1 = random.uniform(0.8, 2)\n a2 = random.uniform(0.8, 2)\n phi = random.uniform(0, math.pi)\n while(math.isclose(phi, math.pi/2, abs_tol=1e-3)):\n phi = random.uniform(0, math.pi)\n \n else:\n a1 = a2 = random.uniform(0.8, 2)\n phi = random.uniform(0, math.pi)\n while(math.isclose(phi, math.pi/2, abs_tol=1e-1)):\n phi = random.uniform(0, math.pi)\n \n\n\n nx = ny = 20\n nx, ny = np.meshgrid(np.arange(nx), np.arange(ny))\n atom_pos = []\n for nxx, nyy in zip(nx.ravel(), ny.ravel()):\n x_ind = nxx*a1 +nyy*a2*np.cos(phi)\n y_ind = nyy*a2*np.sin(phi)\n atom_pos.append((x_ind, y_ind))\n atom_pos = np.array(atom_pos)\n return atom_pos, a1, a2",
"def make_lattice(self, latt_type = 'cubic', lat_parms):\n\n if latt_type = 'cubic':\n lx, ly, lz = lat_parms\n latt = {}\n latt['box'] = ['cubic', lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n latt['xyzs'].append([ix, iy, iz,1])\n\n elif latt_type = 'bcc':\n lx, ly, lz = lat_parms\n latt = {}\n latt['box'] = ['bcc', lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n if ix + 0.5 <= (lx - 1) and iy + 0.5 <= (ly - 1) and iz + 0.5 <= (lz - 1):\n latt['xyzs'].append([ix + 0.5, iy + 0.5, iz + 0.5, 1])\n latt['xyzs'].append([1 * ix, 1 * iy, 1 * iz, 1])\n\n\n\n elif latt_type = 'fcc':\n lx, ly, lz = lat_parms\n\n latt = {}\n latt['nat'] = lx * ly * lz\n latt['box'] = ['fcc', 2 * lx, ly, lz]\n latt['xyzs'] = []\n\n # box dimensions in lattice units\n\n # layer number\n for iz in range(lz):\n # layer structure\n for iy in range(ly):\n for ix in range(lx):\n rx = 2 * ix + (iy + iz) % 2\n latt['xyzs'].append([rx, iy, iz,1])\n\n return latt",
"def compute_distance_field(self, entity_type):\n grid_width = poc_grid.Grid.get_grid_width(self)\n grid_height = poc_grid.Grid.get_grid_height(self)\n self._visited = poc_grid.Grid(grid_height, grid_width)\n self._distance_field = [[grid_width*grid_height for dummy_col in range(0, grid_width)] for dummy_row in range(0, grid_height)]\n self._boundary_list = poc_queue.Queue()\n if entity_type == ZOMBIE:\n for entity in self._zombie_list:\n self._boundary_list.enqueue(entity)\n elif entity_type == HUMAN:\n for entity in self._human_list:\n self._boundary_list.enqueue(entity)\n else:\n print \"Invalid Entity\"\n return\n\n\n #set all initial distance to 0\n for boundary in self._boundary_list:\n self._distance_field[boundary[0]][boundary[1]] = 0\n\n #each step outward of unoccupied space gets +1 distance to their\n #corresponding field position\n while len(self._boundary_list)>0:\n #if DEBUG_CDF:\n # print \"len(self._boundary_list)\", len(self._boundary_list)\n boundary = self._boundary_list.dequeue()\n if boundary == None:\n return self._distance_field\n self._visited.set_full(boundary[0], boundary[1])\n #self._distance_field[boundary[0], boundary[1]] = distance\n neighbors = self.four_neighbors(boundary[0], boundary[1])\n for neighbor in neighbors:\n #check if already iterated over tile this calculation, if not add distance calculation\n #if self._visited.is_empty(neighbor[0], neighbor[1]) and self.is_empty(neighbor[0], neighbor[1]):\n #modified version, checks if neighbor distance > current cell distance and also adds it to the calculation\n if self._visited.is_empty(neighbor[0], neighbor[1]) and self.is_empty(neighbor[0], neighbor[1]) \\\n or self._distance_field[neighbor[0]][neighbor[1]] > self._distance_field[boundary[0]][boundary[1]] and self.is_empty(neighbor[0], neighbor[1]):\n self._distance_field[neighbor[0]][neighbor[1]] = self._distance_field[boundary[0]][boundary[1]] + self.get_weight(boundary[0], boundary[1])\n self._boundary_list.enqueue(neighbor)\n self._visited.set_full(neighbor[0], neighbor[1])\n if DEBUG_CDF:\n for line in self._distance_field:\n print line\n return self._distance_field\n\n\n #print \"w\", grid_width\n #print \"h\", grid_height\n #for line in self._visited:\n # print line",
"def compute_distance_field(self, entity_type):\n \n MAX_DISTANCE = (self.get_grid_height() * self.get_grid_width())\n \n # a new grid visited of the same size \n # as the original grid \n visited = poc_grid.Grid(self.get_grid_height(), self.get_grid_width())\n \n # and initialize its cells to be empty\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n visited[row][col] = EMPTY\n \n # Create a 2D list distance_field of the same size as the original \n # grid \n distance_field = poc_grid.Grid(self.get_grid_height(), self.get_grid_width())\n \n # initialize each of its entries to be the product of\n # the height times the width of the grid.\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n distance_field[row][col] = MAX_DISTANCE\n \n # boundary that is a copy of either the zombie list \n # or the human list. \n entity_list = self._human_list if entity_type == HUMAN else self._zombie_list\n \n boundary = poc_grid.Queue()\n \n for elm in entity_list:\n boundary.enqueue(elm)\n \n # For cells in the queue, initialize\n # visited to be FULL and distance_field to be zero. \n for cell in boundary:\n visited[cell[0]][cell[1]] = FULL\n distance_field[cell[0]][cell[1]] = 0\n \n # BFS implementation \n \n while len(boundary) > 0 \n current_cell = boundary.dequeue()\n neighbors = self.four_neighbors(current_cell[0], current_cell[1])\n distance_field[current_cell[0]][current_cell[1]] + 1\n for neighhor in neighbors:\n # if neighbor_cell is not in visited\n if visited.is_empty(neighbor[0], neighbor[1]):\n visited.set_full(neighbor[0], neighbor[1])\n boundary.enqueue(neighhor)\n \n return distance_field",
"def generate_lattice(n_points):\n grid = np.zeros(tuple(4 * [4] + [4, 3, 3]), dtype=np.complex64)\n for t in range(n_points):\n for x in range(n_points):\n for y in range(n_points):\n for z in range(n_points):\n grid[t, x, y, z, :, :, :] = generate_site()\n return grid",
"def particleDistribution(latticeList):\n\n\n particlesRemaining = particleNumber\n \n # the following while loop attempts to place a particle each iteration, if the spot\n # picked is full, it finds a new random spot on the next iteration. It consumes\n # particles as it goes, until it runs out. If a spot is full, nothing is consumed.\n\n while particlesRemaining != 0:\n \n # these randomly select a row and column\n randomColumn = random.randint(0, containerSize-1)\n randomRow = random.randint(0, containerSize-1)\n\n # this statement finds the number of spots left in the target, and which index\n # values they have\n spotsRemaining, openSpots = latticeList.spots_remaining(randomColumn, randomRow)\n \n\n # this if statement ensures that the only time a particle is added is when there\n # are spots remaining.\n if spotsRemaining > 0:\n positionChosen = random.choice(openSpots)\n\n latticeList.array[randomRow][randomColumn][positionChosen] = 1\n\n particlesRemaining -= 1\n\n return latticeList",
"def lattice(self, perturb):\n\n # Check if perturbation is below maximum allowed. If not, default to maximum perturbation.\n if perturb > 1:\n print('Warning: Random perturbation must not exceed 1. Setting perturb = 1.')\n perturb = 1 # Maximum perturbation\n\n print('Initializing particles with maximum random perturbation of {} times the lattice spacing.'.format(\n perturb * 0.5))\n\n # Determining number of particles per side of simple cubic lattice\n part_per_side = self.total_num_ptcls ** (1. / 3.) # Number of particles per side of cubic lattice\n\n # Check if total number of particles is a perfect cube, if not, place more than the requested amount\n if round(part_per_side) ** 3 != self.total_num_ptcls:\n part_per_side = np.ceil(self.total_num_ptcls ** (1. / 3.))\n print('\\nWARNING: Total number of particles requested is not a perfect cube.')\n print('Initializing with {} particles.'.format(int(part_per_side ** 3)))\n\n dx_lattice = self.pbox_lengths[0] / (self.total_num_ptcls ** (1. / 3.)) # Lattice spacing\n dz_lattice = self.pbox_lengths[1] / (self.total_num_ptcls ** (1. / 3.)) # Lattice spacing\n dy_lattice = self.pbox_lengths[2] / (self.total_num_ptcls ** (1. / 3.)) # Lattice spacing\n\n # Create x, y, and z position arrays\n x = np.arange(0, self.pbox_lengths[0], dx_lattice) + 0.5 * dx_lattice\n y = np.arange(0, self.pbox_lengths[1], dy_lattice) + 0.5 * dy_lattice\n z = np.arange(0, self.pbox_lengths[2], dz_lattice) + 0.5 * dz_lattice\n\n # Create a lattice with appropriate x, y, and z values based on arange\n X, Y, Z = np.meshgrid(x, y, z)\n\n # Perturb lattice\n X += self.rnd_gen.uniform(-0.5, 0.5, np.shape(X)) * perturb * dx_lattice\n Y += self.rnd_gen.uniform(-0.5, 0.5, np.shape(Y)) * perturb * dy_lattice\n Z += self.rnd_gen.uniform(-0.5, 0.5, np.shape(Z)) * perturb * dz_lattice\n\n # Flatten the meshgrid values for plotting and computation\n self.pos[:, 0] = X.ravel() + self.box_lengths[0]/2 - self.pbox_lengths[0]/2\n self.pos[:, 1] = Y.ravel() + self.box_lengths[1]/2 - self.pbox_lengths[1]/2\n self.pos[:, 2] = Z.ravel() + self.box_lengths[2]/2 - self.pbox_lengths[2]/2",
"def updateLattice(lattice):\n z=0.\n for i in range(len(lattice)):\n lattice[i]['z']=z\n z=z+lattice[i]['length']",
"def grow_cluster(self):\n rn = np.random.rand\n\n # Set the lattice size\n lattice = np.zeros([self.L * 2 + 1, self.L * 2 + 1], dtype=int)\n # center of the lattice\n self.center = self.L\n # the center of lattice is occupied by a particle from start.\n lattice[self.center, self.center] = 1\n\n # visualization\n if self.view:\n self.c(\n (2 * self.center - self.L)*self.rsize + self.margin,\n (2 * self.center - self.L)*self.rsize + self.margin,\n (2 * (self.center + 1) - self.L)*self.rsize + self.margin - 1,\n (2 * (self.center + 1) - self.L)*self.rsize + self.margin - 1,\n outline='black',\n fill='black'\n )\n self.update()\n\n def reset_particle_postion():\n \"\"\"Initialise the postion of the particle.\"\"\"\n theta = 2 * np.pi * rn()\n x = int((self.R + 2) * np.cos(theta)) + self.center\n y = int((self.R + 2) * np.sin(theta)) + self.center\n return x, y\n\n def diffusion(x, y):\n \"\"\"Set a partcle at outer circle and move it as random walk.\n Then, if it contacts the existing cluster, the cluster grows.\n \"\"\"\n\n def get_distance_from_center(x, y):\n \"\"\"Get the distance from the center to the particle position\"\"\"\n return np.sqrt((x - self.center)**2 + (y - self.center)**2)\n\n # increase the step size of RW when it is far from the center.\n # r: distance from the center to the particle\n r = get_distance_from_center(x, y)\n\n # l: step size of the random walk of the particle\n l = int(r - self.R - 2) if int(r - self.R - 2) > 0 else 1\n\n # Random walk\n p = rn() * 4\n if p < 1:\n x += l\n elif p < 2:\n x -= l\n elif p < 3:\n y += l\n else:\n y -= l\n\n # if the particle is far from the center, reset the possition.\n r = get_distance_from_center(x, y)\n if r >= 2 * self.R:\n return 2\n\n # if there is no occupied site near the particle, continue.\n # if judge == 0:\n if not (lattice[x-1, y] == 1 or lattice[x+1, y] == 1 or\n lattice[x, y-1] == 1 or lattice[x, y+1] == 1):\n return x, y\n\n # else, the particle is occupied to the DLA cluster.\n lattice[x, y] = 1\n\n # visualise\n if self.view:\n if self.color:\n colors = ['#ff0000', '#ff8000', '#ffff00', '#80ff00',\n '#00ff00', '#00ff80', '#00ffff', '#0080ff',\n '#0000ff', '#8000ff', '#ff00ff', '#ff0080']\n len_colors = 12\n n_samecolor = (self.N / len_colors) + 1\n color = colors[n / n_samecolor]\n else:\n color = \"black\"\n\n self.c(\n (2 * x - self.L) * self.rsize + self.margin,\n (2 * y - self.L) * self.rsize + self.margin,\n (2 * (x + 1) - self.L) * self.rsize + self.margin - 1,\n (2 * (y + 1) - self.L) * self.rsize + self.margin - 1,\n outline=color,\n fill=color\n )\n self.update()\n\n # Update R\n self.R = int(r) + 1 if int(r) + 1 > self.R else self.R\n # Update sum_rxr\n self.sum_rxr += r*r\n # Update R_g\n self.R_g.append(np.sqrt(self.sum_rxr/(len(self.R_g)+1.)))\n # Finish the random walk of the particle\n return 0\n\n n = 0\n while n < self.N:\n x, y = reset_particle_postion()\n while True:\n res = diffusion(x, y)\n # 0: process successfully done\n # 2: restart process\n if res == 0:\n # increment n\n n += 1\n break\n elif res == 2:\n x, y = reset_particle_postion()\n else:\n x, y = res\n else:\n if self.view:\n # Save the canvas image\n # filename = \"img/\" + str(time.time()) + \".eps\"\n # self.canvas.postscript(file=filename)\n # print \"Save the figure to \" + filename\n\n # Print the time\n self.end_time = time.time()\n t = self.end_time - self.start_time\n print \"done; N = %d, time = \" % self.N + str(t) + ' (s)'\n\n self.lattice = lattice\n return self.lattice",
"def generatorSpatialLattice(self, scale_factor=1.0):\n # nested for loop in one line\n latticeCoordToPositionXY = self.latticeCoordToPositionXY\n for latticeCoord in product(range(self._max_row), range(self._max_col)):\n row, col = latticeCoord\n x, y = latticeCoordToPositionXY(row, col, scale_factor)\n yield x, y, row, col",
"def compute_distance_field(self, entity_type):\n #while boundary is not empty:\n #\tcurrent_cell ← dequeue boundary\n #\tfor all neighbors neighbor_cell of current_cell:\n #\t\tif neighbor_cell is not in visited:\n #\t\t\tadd neighbor_cell to visited\n #\t\t\tenqueue neighbor_cell onto boundary\n visited = poc_grid.Grid(self.get_grid_height(), self.get_grid_width())\n distance_field= []\n boundary = poc_queue.Queue()\n for _dum_x in range(self.get_grid_height()):\n distance_field.append([])\n for _dum_y in range(self.get_grid_width()):\n distance_field[-1].append(self.get_grid_height()*self.get_grid_width()) \n if entity_type=='zombie':\n for zom in self.zombies():\n boundary.enqueue(zom)\n visited.set_full(zom[0],zom[1])\n distance_field[zom[0]][zom[1]]=0\n elif entity_type=='human':\n for hum in self.humans():\n boundary.enqueue(hum)\n visited.set_full(hum[0],hum[1])\n distance_field[hum[0]][hum[1]]=0\n \n while boundary.__len__() > 0:\n curr_cell = boundary.dequeue()\n distance=distance_field[curr_cell[0]][curr_cell[1]]+1\n neighbors = self.four_neighbors(curr_cell[0], curr_cell[1])\n #neighbors = self.eight_neighbors(curr_cell[0], curr_cell[1])\n for neighbor in neighbors:\n if visited.is_empty(neighbor[0], neighbor[1]):\n visited.set_full(neighbor[0], neighbor[1])\n if self.is_empty(neighbor[0],neighbor[1]):\n boundary.enqueue(neighbor)\n distance_field[neighbor[0]][neighbor[1]] = distance\n return distance_field",
"def generate_lattice(self, verbose=False):\n if not self._lattice:\n lat = StrictOrders().get_orders(xrange(1, self.set_n + 1), verbose)\n self._lattice = lat",
"def pos2dis(pos, boxsize, Ng):\n cellsize = boxsize / Ng\n lattice = np.arange(Ng) * cellsize\n\n pos[..., 0] -= lattice.reshape(-1, 1, 1)\n pos[..., 1] -= lattice.reshape(-1, 1)\n pos[..., 2] -= lattice\n\n pos -= np.rint(pos / boxsize) * boxsize\n\n return pos",
"def broadcast(self, symbol, domains, broadcast_type):\n domain = domains[\"primary\"]\n primary_domain_size = self.mesh[domain].npts\n secondary_domain_size = self._get_auxiliary_domain_repeats(\n {\"secondary\": domains[\"secondary\"]}\n )\n tertiary_domain_size = self._get_auxiliary_domain_repeats(\n {\"tertiary\": domains[\"tertiary\"]}\n )\n auxiliary_domains_size = self._get_auxiliary_domain_repeats(domains)\n full_domain_size = primary_domain_size * auxiliary_domains_size\n if broadcast_type.endswith(\"to edges\"):\n # add one point to each domain for broadcasting to edges\n primary_domain_size += 1\n full_domain_size = primary_domain_size * auxiliary_domains_size\n secondary_domain_size += 1\n tertiary_domain_size += 1\n\n if broadcast_type.startswith(\"primary\"):\n # Make copies of the child stacked on top of each other\n sub_vector = np.ones((primary_domain_size, 1))\n if symbol.shape_for_testing == ():\n out = symbol * pybamm.Vector(sub_vector)\n else:\n # Repeat for secondary points\n matrix = csr_matrix(kron(eye(symbol.shape_for_testing[0]), sub_vector))\n out = pybamm.Matrix(matrix) @ symbol\n elif broadcast_type.startswith(\"secondary\"):\n # Make copies of the child stacked on top of each other\n identity = eye(symbol.shape[0])\n matrix = vstack([identity for _ in range(secondary_domain_size)])\n out = pybamm.Matrix(matrix) @ symbol\n elif broadcast_type.startswith(\"tertiary\"):\n # Make copies of the child stacked on top of each other\n identity = eye(symbol.shape[0])\n matrix = vstack([identity for _ in range(tertiary_domain_size)])\n out = pybamm.Matrix(matrix) @ symbol\n elif broadcast_type.startswith(\"full\"):\n out = symbol * pybamm.Vector(np.ones(full_domain_size), domains=domains)\n\n out.domains = domains.copy()\n return out",
"def shiftElements(self, geometry_type, offset):\n # Make sure the geometry is not read only.\n if self.isReadOnly():\n raise hou.GeometryPermissionError()\n\n if not isinstance(offset, int):\n raise TypeError(\n \"Got '{}', expected 'int'.\".format(type(offset).__name__)\n )\n\n if geometry_type in (hou.geometryType.Points, hou.geometryType.Primitives):\n attrib_owner = _get_attrib_owner_from_geometry_type(geometry_type)\n _cpp_methods.shiftList(self, attrib_owner, offset)\n\n else:\n raise hou.OperationFailed(\n \"Geometry type must be points or primitives.\"\n )",
"def place_spawn_items(width, height, cell_size, xmin, ymin, punishement=True):\n #Still the original function\n #Initialize\n item_start = 20;\n num_items = 4\n output=[]\n items = []\n locations=[]\n punishements=[]\n \"\"\"\n #Place spawn at random\n spawn_i = random.randint(0, width-1)\n spawn_j = random.randint(0, height-1) \n locations.append((spawn_i,spawn_j))\"\"\"\n #Place spawn not randomly\n spawn_i = 0\n spawn_j = height//2\n \n #Generate output\n spawn = (xmin + spawn_i*cell_size + cell_size/2, ymin + spawn_j*cell_size + cell_size/2)\n \n #this chooses which items \n item_tids = [2018, 2019, 2012, 2013]\n colors = ['g','r', 'b', 'c']\n\n #let's place the items\n for i in range(num_items):\n item_i = random.randint(0, width-1)\n #item_j = random.randint(0, height-1)\n item_j = random.randint(0, 1)*(height-1)\n \n #This avoid items superposition\n while (item_i, item_j) in locations:\n item_i = random.randint(0, width-1)\n item_j = random.randint(0, 1)*(height-1) \n locations.append((item_i, item_j))\n \n #add in the wad output\n item_x = xmin + item_i*cell_size + cell_size/2\n item_y = ymin + item_j*cell_size + cell_size/2\n output += create_object(item_x, item_y, item_tids[i], item_start + i)\n \n #add the item\n items.append((item_x, item_y, colors[i]))\n \n #let's place the punishement items\n for i in range(width):\n #place in upper combs\n for j in range(height//2):\n if not (0,j) in locations:\n idx=100*len(punishements)+2-j%((height//2)/2)\n item_x = xmin + i*cell_size + cell_size/2\n item_y = ymin + j*cell_size + cell_size/2\n tid=2013\n output += create_object(item_x, item_y, tid, idx) # unvisible=True)\n items.append((item_x, item_y, colors[2]))\n #place in lower combs\n for j in range(h//2+1,height):\n if not (width, j) in locations:\n idx=100*len(punishements)+(j-height//2)%((height-height//2)/2)\n item_x = xmin + i*cell_size + cell_size/2\n item_y = ymin + j*cell_size + cell_size/2\n tid=2013\n output += create_object(item_x, item_y, tid, idx) # unvisible=True)\n items.append((item_x, item_y, colors[2]))\n \n return items, output, spawn",
"def interval(generators, lattice_pos):\n if isinstance(lattice_pos, sp.Matrix):\n output = sp.ones(lattice_pos.shape[0], 1)\n\n for i in range(lattice_pos.rows):\n row = lattice_pos.row(i)\n #print(row)\n #print(generators)\n for j in range(len(row)):\n output[i] *= generators[j] ** row[j]\n while output[i] < 1 or output[i] > 2:\n if output[i] < 1:\n output[i] *= 2\n else:\n output[i] /= 2\n elif isinstance(lattice_pos, sp.Point2D):\n output = sp.ones(1, 1)\n output *= generators[0] ** lattice_pos.x\n output *= generators[1] ** lattice_pos.y\n while output[0] < 1 or output[0] > 2:\n if output[0] < 1:\n output *= 2\n else:\n output /= 2\n\n\n return output",
"def _position_nodes(self, partition, **kwargs):\n\n communities = dict()\n for node, community in partition.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = self.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph, **kwargs)\n pos.update(pos_subgraph)\n\n return pos",
"def mlvl_point_generator__single_level_grid_priors__tensorrt(\n self,\n featmap_size,\n level_idx,\n dtype=torch.float32,\n device='cuda',\n with_stride=False):\n feat_h, feat_w = featmap_size\n stride_w, stride_h = self.strides[level_idx]\n shift_x = (torch.arange(0, feat_w, device=device) + self.offset) * stride_w\n # keep featmap_size as Tensor instead of int, so that we\n # can convert to ONNX correctly\n shift_x = shift_x.to(dtype)\n\n shift_y = (torch.arange(0, feat_h, device=device) + self.offset) * stride_h\n # keep featmap_size as Tensor instead of int, so that we\n # can convert to ONNX correctly\n shift_y = shift_y.to(dtype)\n shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n if not with_stride:\n shifts = torch.stack([shift_xx, shift_yy], dim=-1)\n else:\n # use `feat_w * feat_h` instead of `shift_xx.shape[0]` for TensorRT\n stride_w = shift_xx.new_full((feat_w * feat_h, ), stride_w).to(dtype)\n stride_h = shift_xx.new_full((feat_w * feat_h, ), stride_h).to(dtype)\n shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1)\n all_points = shifts.to(device)\n return all_points"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Randomly generates velocities in the range (1,1)
|
def velocity_random(type = 'uniform'):
if(type == 'uniform'):
system.vel = np.random.uniform(-1.0, 1.0, (system.N,system.dim))
#Velocities are shifted to avoid unwanted momenta
for dim in range(system.vel.shape[1]):
system.vel[:,dim] -= np.mean(system.vel[:,dim])
elif(type == 'boltzmann'):
sigma = system.T*const.KB/system.mass
system.vel = np.sqrt(sigma)*np.random.normal(0, 1, size=(system.N, system.dim))
|
[
"def generate_random_velocity():\n return random.randrange(5), random.randrange(5), random.randrange(5)",
"def __random_velocity():\n heading = random.randint(0, 360)\n magnitude = random.randint(ASTEROID_MIN_VEL, ASTEROID_MAX_VEL)\n return heading, magnitude",
"def _get_random_velocity(self):\n\t\t# create random matrix v where each row is velocity vector of each point\n\t\tv = np.random.uniform(-1, 1, (self.N, 2))\n\t\t# turn each vector in v into a unit vector\n\t\tmag = v**2\n\t\tmag = (mag[:,[0]] + mag[:,[1]])**0.5\n\t\tv_unit = v / mag\n\t\t# multiply each row in v by some constant speed \n\t\tv_new = self.speed * v_unit\n\t\treturn v_new",
"def random_step(self):\n\t\t# calculate new positions\n\t\tself.positions = self.positions + self.v * self.dt\n\n\t\t# detect any points that are put of bounds\n\t\t# negate their original velocities to keep them in bounds\n\t\toutofbounds = self._boundary_detection()\n\t\tself.positions = self.positions - outofbounds * self.v * self.dt\n\t\t\n\t\t# generate new random velocities\n\t\tself.v = self._get_random_velocity()",
"def init_vel():\n x = random.randint(-Controller.MAX_RADIUS, Controller.MAX_RADIUS+1)\n y = random.randint(-Controller.MAX_RADIUS, Controller.MAX_RADIUS+1)\n z = random.randint(-Controller.MAX_RADIUS, Controller.MAX_RADIUS+1)\n\n return Vector((x, y, z))",
"def _default_initial_velocity_sample_func(self):\n initial_speed = random.gauss(60, 20)\n initial_theta = random.uniform(0, np.pi * 2)\n initial_velocity = initial_speed * np.array([np.cos(initial_theta), np.sin(initial_theta)])\n return initial_velocity",
"def set_ball_velocity(self):\n self.__dx = random.randint(1, MAX_X_SPEED)\n self.__dy = INITIAL_Y_SPEED\n\n if random.random( ) > 0.5:\n self.__dx = -self.__dx",
"def set_ball_velocity(self):\n # set up dx random velocity in range\n self.__dx = random.randint(1, MAX_X_SPEED)\n # ball had one half chance to run in opposite x direction\n if random.random() > 0.5:\n self.__dx = -self.__dx",
"def init_star():\n\n # TODO\n example = 1\n # TODO\n\n dir = random.randrange(100000)\n\n # get a random number between 0 and 1\n velocity = random.random()\n\n if example == 1:\n velmult = velocity\n elif example == 2:\n velmult = velocity * 10\n else:\n velmult = velocity * 100\n\n # print \"velmult = \", velmult\n\n vel = [math.sin(dir) * velmult, math.cos(dir) * velmult]\n\n return vel, WINCENTER[:]",
"def initialise_new_swarm_velocities(self):\n\n reject = 0\n temp_vel = self.mn - 1\n while np.sum(temp_vel < self.mn) > 0 or np.sum(temp_vel > self.mx) > 0:\n temp_vel = self.mode_location + \\\n nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0] * \\\n (self.dist / 2)\n reject += 1\n\n if reject > 20:\n temp_vel = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn\n\n self.velocities[0, :] = temp_vel",
"def random_vector():\n X = random.randint(VECTOR_LEN*(-1), VECTOR_LEN)\n Y = random.randint(VECTOR_LEN*(-1), VECTOR_LEN)\n return [X, Y]",
"def random_gen(self):\n\t\ttypes = [\"Normal\", \"Robot\", \"Ninja\", \"Fire\", \"Water\", \"Dinosaur\", \"Earth\", \"Sound\", \"Wind\", \"Darkness\", \"Light\", \"Plasma\", \"Solar\", \"Lunar\", \"Meme\", \"Magic\"]\n\t\tself._name_gen()\n\t\tself.speed = random.randint(1, 6) # All ranges here are balanced using eyeballs and hopes. And wishes.\n\t\tself.attk_pw = random.randint(0, 5)\n\t\tself.attk_type = random.choice(['physical', 'emotional'])\n\t\tself.moveType = random.choice(types)\n\t\tif self.attk_type == 'emotional':\n\t\t\tself.fp = random.randint(1, 5)",
"def set_initial_location(self):\n self.changed = True\n self.new_location = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn\n # random initial velocities of swarm\n self.velocities[0, :] = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn",
"def _randomize_asteroid(self):\n def randomize(vel):\n return vel * rand.choice([1, -1]) * rand.uniform(.5, 2)\n # randomly choose an image\n self.base_img, self.rect = self.fleet.get_random_image()\n # randomize size\n self.scale = rand.uniform(.25, 1.25)\n # randomize velocities\n self.vel_x = randomize(self.base_vel)\n self.vel_y = randomize(self.base_vel)\n self.rotation_vel = randomize(self.base_rotation_vel)",
"def random_move(self):\n\t\toptions = [90, 180, 270]\n\t\tang = randint(0,2)\n\t\tn = randint(2, self.length - 1)\n\t\tself.rotate(n, radians(options[ang]))",
"def reset(self):\n self.x = self.config.screen_width / 2\n self.y = self.config.screen_height / 2\n self.speed = 8.0\n # Choose a side to serve to\n if randrange(2) == 0:\n # random initial velocities\n self.velocity_x = -randrange(self.speed / 2, self.speed)\n self.velocity_y = randrange(-self.speed, self.speed)\n else:\n self.velocity_x = randrange(self.speed / 2, self.speed)\n self.velocity_y = randrange(-self.speed, self.speed)",
"def velocity(self, t):\n pass",
"def init_pos():\n x = random.randint(0, Controller.POSITION_RANGE+1)\n y = random.randint(0, Controller.POSITION_RANGE+1)\n z = random.randint(0, Controller.POSITION_RANGE+1)\n\n return Vector((x, y, z))",
"def randomise(self):\n self.timer = self.period * random.random()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calls Numba for rescaling the velocities
|
def velocity_rescale():
system.vel = v_res(system.vel, system.T, const.KB, system.mass)
|
[
"def updateVelocities(self) -> None:\r\n for idx1 in range(self.size() - 1):\r\n for idx2 in range(idx1 + 1, self.size()):\r\n self.updateVelocity(idx1, idx2)",
"def action_scaling_vecs(self):\n vel_vec = np.arange(1, self.specs['velocity_limits'][1] + 1, 1)\n\n acc_pos_vec = self.calc_acceleration_from_power(\n vel_vec, self.specs['power_limits'][1])\n acc_neg_vec = self.calc_acceleration_from_power(\n vel_vec, self.specs['power_limits'][0])\n acc_0_vec = self.calc_acceleration_from_power(vel_vec, 0)\n\n acc_pos_vec = np.min([\n acc_pos_vec,\n np.ones(len(acc_pos_vec)) * self.specs['acceleration_limits'][1]\n ],\n axis=0)\n acc_neg_vec = np.max([\n acc_neg_vec,\n np.ones(len(acc_neg_vec)) * self.specs['acceleration_limits'][0]\n ],\n axis=0)\n\n # TODO: Find better solution :)\n # This is kind of a workaround. Roman got the values for 0 from the\n # data, which seems difficult to implement here. So the added 1.0 in\n # acc_pos_vec is handcrafted.\n self.vel_vec = np.append(0, vel_vec)\n self.acc_pos_vec = np.append(1.0, acc_pos_vec)\n self.acc_neg_vec = np.append(0.0, acc_neg_vec)\n self.acc_0_vec = np.append(0.0, acc_0_vec)",
"def velocity_bamber(args, nc_insar, nc_base, trans):\n insar_y = nc_insar.variables['y']\n insar_ny = insar_y[:].shape[0]\n\n insar_x = nc_insar.variables['x']\n insar_nx = insar_x[:].shape[0]\n\n base_data = np.ndarray( (trans.ny,trans.nx) )\n\n\n for vv in ['vy','vx','ey','ex'] :\n insar_data[:,:] = 0.\n base_data[:,:] = 0.\n \n insar_var = nc_insar.variables[ vv ]\n insar_data = np.ma.masked_values( nc_bamber.variables[var_list[1]][:,:], -2.e9)\n data_min = insar_data.min() \n data_max = insar_data.max() \n\n\n speak.verbose(args,\" Interpolating \"+vv+\".\")\n insar_to_base = scipy.interpolate.RectBivariateSpline( insar_y[:], insar_x[:], insar_data, kx=1, ky=1, s=0) # regular 2d linear interp. but faster\n\n for ii in range(0, trans.nx):\n base_data[:,ii] = insar_to_base.ev(trans.y_grid[:,ii], trans.x_grid[:,ii] )\n \n base_data[base_data < data_min] = -2.e9\n base_data[base_data > data_max] = -2.e9\n \n speak.verbose(args,\" Writing \"+vv+\" to base.\")\n base_var = nc_base.createVariable( vv, 'f4', ('y','x',) )\n base_var[:,:] = base_data[:,:]\n copy_atts(insar_var, base_var)",
"def rescale_velocity(velocities, masses, kT):\n alpha = np.sqrt(kT / utils.compute_temperature(velocities, masses))\n rescaled_velocities = velocities * alpha\n return rescaled_velocities",
"def apply_velocity(self):\n for moon in self.moons:\n for axis, vel in moon['vel'].items():\n moon['pos'][axis] += vel",
"def update(self, **kwargs):\n self.apply_velocity()",
"def update_relative_velocities(self, msg):\t\n\t\tself.sen.emu.rel_vel = ROS_list_to_np_array(msg.data)",
"def _update_intermediate_vel_bc_(self, u, w, mask, time, _bc):\n\n # Interior boundaries\n # Apply no-slip boundary conditions to obstacles.\n # Setup masks that are 0 where velocities need to be updated,\n # and 1 where they stay unmodified.\n # Note that (mask & 1) has 1 in the ghost cells.\n u_mask = ( mask[:-1,:] | mask[1:,:] ) & 1\n w_mask = ( mask[:,:-1] | mask[:,1:] ) & 1\n\n # zero velocity inside and on the boundary of obstacles\n u[:,:] *= ( mask[:-1,:] & mask[1:,:] & 1 )\n # negate velocities inside obstacles\n u[:,1:-2] -= ( 1 - u_mask[:,1:-2] ) * u[:,2:-1]\n u[:,2:-1] -= ( 1 - u_mask[:,2:-1] ) * u[:,1:-2]\n\n # zero velocity inside and on the boundary of obstacles\n w[:,:] *= ( mask[:,:-1] & mask[:,1:] & 1 )\n # nullify velocities inside obstacles\n w[1:-2,:] -= ( 1 - w_mask[1:-2,:] ) * w[2:-1,:]\n w[2:-1,:] -= ( 1 - w_mask[2:-1,:] ) * w[1:-2,:] \n\n # top boundary\n _bc_ = _bc[self.UP]\n if 'w' in _bc_:\n fun_ = _bc_['w']\n if callable(fun_):\n for i in range(w.shape[0]):\n node = self.grid[i-0.5, w.shape[1]-1]\n w[i,-1] = fun_(node[0], node[1], time) * (mask[i,-2] & 1)\n else:\n w[:,-1] = fun_ \n\n # bottom boundary\n _bc_ = _bc[self.DOWN]\n if 'w' in _bc_:\n fun_ = _bc_['w']\n if callable(fun_):\n for i in range(w.shape[0]):\n node = self.grid[i-0.5, 0]\n w[i,0] = fun_(node[0], node[1], time) * (mask[i,1] & 1)\n else:\n w[:,0] = fun_ \n\n # left boundary\n _bc_ = _bc[self.LEFT]\n if 'u' in _bc_:\n fun_ = _bc_['u']\n if callable(fun_):\n for i in range(u.shape[1]):\n node = self.grid[u.shape[0]-1, i-0.5]\n u[-1,i] = fun_(node[0], node[1], time) * (mask[-2,i] & 1)\n else:\n u[-1,:] = fun_\n\n # west boundary\n _bc_ = _bc[self.RIGHT]\n if 'u' in _bc_:\n fun_ = _bc_['u']\n if callable(fun_):\n for i in range(u.shape[1]):\n node = self.grid[0, i-0.5]\n u[0,i] = fun_(node[0], node[1], time) * (mask[1,i] & 1)\n else:\n u[0,:] = fun_",
"def calibrate(self, vp: 'SbViewportRegion') -> \"void\":\n return _coin.SoVectorizeAction_calibrate(self, vp)",
"def _update_a(self, fs_updated: bool) -> None:\n\t\tif fs_updated:\n\t\t\tself.A = np.exp(as_col(self.tvec) * 2 * np.pi * 1j * r(self.f_mat))\n\t\t\tself.a = as_col(self.A[-1, :])\n\t\telse:\n\t\t\ttval = self.t[self.t_stop - 1]\n\t\t\tself.a = np.exp(as_col(2 * np.pi * 1j * r(self.f_mat)) * tval)\n\t\t\tself.A = np.roll(self.A, -1, axis=0)\n\t\t\tself.A[-1, :] = r(self.a)",
"def test_mc_radial_velocity_float_vs_array_args2():\n npts = 100\n conc = 10\n mass = 1e12\n scaled_radius = 0.4\n scaled_radius_array = np.zeros(npts) + scaled_radius\n concarr = np.zeros_like(scaled_radius_array) + conc\n galbias = 1.\n galbiasarr = np.zeros_like(scaled_radius_array) + galbias\n\n nfw = BiasedNFWPhaseSpace(concentration_bins=conc_bins, conc_gal_bias_bins=gal_bias_bins)\n\n mc_vr_from_arr = nfw.mc_radial_velocity(scaled_radius_array, mass, concarr, galbiasarr, seed=43)\n mc_vr_from_float = nfw.mc_radial_velocity(scaled_radius_array, mass, conc, galbias, seed=43)\n assert np.allclose(mc_vr_from_arr, mc_vr_from_float)",
"def get_velocity( b ):\n v = []\n for i in range(1,len(b)-1):\n D2 = b[i+1] - 2.0*b[i] + b[i-1]\n D1 = (b[i+1] - b[i-1])/2.0\n D1norm2 = D1[0]**2.0 + D1[1]**2.0\n v.append( D2/D1norm2 )\n return np.array(v)",
"def update_angular_velocity(self, msg):\n\t\tself.ekf.ang_vel = enu_to_ned(np.array([[msg.twist.angular.x], [msg.twist.angular.y], [msg.twist.angular.z]]))",
"def init_evelocity_diag13():\n s1.mtv = int((nloop - 1)/in1.ntv) + 1; s1.itv = 0\n# fv = global electron velocity distribution functions\n s1.fv = numpy.empty((2*in1.nmv+2,in1.ndim),float_type,'F')\n# sfv = electron velocity distribution functions in tile\n s1.sfv = numpy.empty((2*in1.nmv+2,in1.ndim,mx1+1),float_type,'F')\n# fvm = electron vdrift, vth, entropy for global distribution\n s1.fvm = numpy.empty((in1.ndim,3),float_type,'F')\n# fvtm = time history of electron vdrift, vth, and entropy\n s1.fvtm = numpy.zeros((s1.mtv,in1.ndim,3),float_type,'F')\n ws[0] = 2.0*max(4.0*in1.vtx+abs(in1.vx0),4.0*in1.vtdx+abs(in1.vdx))\n ws[0] = max(ws[0],2.0*max(4.0*in1.vty+abs(in1.vy0),\n 4.0*in1.vtdy+abs(in1.vdy)))\n ws[0] = max(ws[0],2.0*max(4.0*in1.vtz+abs(in1.vz0),\n 4.0*in1.vtdz+abs(in1.vdz)))\n s1.sfv[0,0,:] = ws[0]\n s1.sfv[0,1,:] = ws[0]\n s1.sfv[0,2,:] = ws[0]",
"def update_velocity_body(self, msg):\n\t\tself.ekf.vel_body = enu_to_ned(np.array([[msg.twist.linear.x], [msg.twist.linear.y], [msg.twist.linear.z]]))",
"def update_velocity(self, msg):\n\t\tself.ekf.vel = enu_to_ned(np.array([[msg.twist.linear.x], [msg.twist.linear.y], [msg.twist.linear.z]]))",
"def ConvectiveVelocity(self):\n\n T = self.T[:,:,75]; P = self.P[:,:,75]; rho = self.rho[:,:,75]\n uy = self.vy[:,:,75]; ux = self.vx[:,:,75]\n d = 1.0 # delta\n self.vel_conv = np.nan_to_num(np.sqrt(self.F*d*self.dTdy(P,T,rho)/T)*self.dx)\n\n xx,yy = np.meshgrid(self.y, self.x)\n fig = plt.figure('conv vel')\n ax = fig.gca(projection='3d')\n ax.plot_surface(xx, yy, rho, cmap=cm.plasma)\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m/s]')\n plt.tight_layout()\n plt.savefig('Density.png')\n\n print '------'\n print 'The convective velocity is,'\n print self.vel_conv\n print '------'\n print 'Difference between convective velocity and vertical velocity'\n print self.vel_conv - uy\n\n \"\"\"\n Mass fraction moving with convective velocity +/- 10%. For each cell, the\n mass moving up with the given velocity range needs to be summed up. This\n gives the mass fraction moving with the given velocity range.\n \"\"\"\n\n mass_y = []; mass_x = []\n for i in range(self.nx):\n for j in range(self.ny):\n if uy[i,j] >= self.vel_conv[i,j]*0.9 and uy[i,j] <= self.vel_conv[i,j]*1.1:\n mass_y.append(rho[i,j])\n if ux[i,j] >= self.vel_conv[i,j]*0.9 and ux[i,j] <= self.vel_conv[i,j]*1.1:\n mass_x.append(rho[i,j])\n\n MassFraction_y = np.sum(mass_y)/np.sum(rho)\n MassFraction_x = np.sum(mass_x)/np.sum(rho)\n print 'Fraction of mass moving with velocities v_conv +/- 10% in x direction:',MassFraction_x\n print 'Fraction of mass moving with velocities v_conv +/- 10% in y direction:',MassFraction_y\n\n print '-----------'\n return self.vel_conv",
"def velocity_lims(rate, N):\n dt = N / rate\n vmin = 0.5 * PLATESCALE / dt\n vmax = np.minimum(8.2 / dt, 50 * (1 - OVERLAP_RATIO) * PLATESCALE / dt)\n return vmin, vmax",
"def test_mc_radial_velocity_float_vs_array_args1():\n nfw = BiasedNFWPhaseSpace(concentration_bins=conc_bins, conc_gal_bias_bins=gal_bias_bins)\n\n conc = 10\n mass = 1e12\n scaled_radius = 0.4\n scaled_radius_array = np.atleast_1d(scaled_radius)\n concarr = np.atleast_1d(conc)\n galbias = 1.\n galbiasarr = np.atleast_1d(galbias)\n\n mc_vr_from_arr = nfw.mc_radial_velocity(scaled_radius_array, mass, concarr, galbiasarr, seed=43)\n mc_vr_from_float = nfw.mc_radial_velocity(scaled_radius, mass, conc, galbias, seed=43)\n assert np.shape(mc_vr_from_arr) == np.shape(mc_vr_from_float)\n assert np.allclose(mc_vr_from_arr, mc_vr_from_float)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the Nose Hoover energy contribution given by E = xixiQ/2 + 3NkbTlns
|
def nose_hoover_energy(Q, xi, N, kb, T, lns):
energy = 0.5*Q*xi**2 + 3*N*kb*T*lns
return energy
|
[
"def sodiumHydrogenExchanger(Na_i, H_i, enable_I_NaH):\n if (enable_I_NaH == True):\n n_H = params_dict[\"n_H\"]; K_H_i_mod = params_dict[\"K_H_i_mod\"]; I_NaH_scale = params_dict[\"I_NaH_scale\"]\n k1_p = params_dict[\"k1_p\"]; k1_m = params_dict[\"k1_m\"]; k2_p = params_dict[\"k2_p\"]; k2_m = params_dict[\"k2_m\"]\n Na_o = params_dict[\"Na_o\"]; H_o = params_dict[\"H_o\"]; N_NaH_channel = params_dict[\"N_NaH_channel\"]\n K_Na_o = params_dict[\"K_Na_o\"]; K_H_o = params_dict[\"K_H_o\"];K_Na_i = params_dict[\"K_Na_i\"]; K_H_i = params_dict[\"K_H_i\"]\n\n I_NaH_mod = 1/(1 + (K_H_i_mod**(n_H)/H_i**(n_H)))\n t1 = k1_p*Na_o/K_Na_o / (1 + Na_o/K_Na_o + H_o/K_H_o)\n t2 = k2_p*H_i/K_H_i / (1 + Na_i/K_Na_i + H_i/K_H_i)\n t3 = k1_m*Na_i/K_Na_i / (1 + Na_i/K_Na_i + H_i/K_H_i)\n t4 = k2_m*H_o/K_H_o / (1 + Na_o/K_Na_o + H_o/K_H_o)\n I_NaH_exch = (t1*t2 - t3*t4) / (t1 + t2 + t3 + t4)\n I_NaH = I_NaH_scale*N_NaH_channel*I_NaH_mod*I_NaH_exch\n else:\n I_NaH = 0.0\n \n return I_NaH",
"def get_ion_energy(element,niveau):\n\t\n\t#Get the energy and return an error if the element doesn't exist\n\tif element == 'N':\n\t\tU_G = [14.53,29.60,47.44,77.47,97.89,552.07,667.05]\n\telif element == 'Ar':\n\t\tU_G = [15.76,27.62,40.74,59.81,75.02,91.01,124.32,\\\n\t\t\t\t143.46,422.45,478.69,538.96,618.26,686.10,\\\n\t\t\t\t755.74,854.77,918.03,4120.885,4426.23]\n\telif element == 'Kr':\n\t\tU_G = [14.0,24.36,36.95,52.5,64.7,78.5,111.0,125.80,\\\n\t\t\t\t230.85,268.2,308.,350.,391.,447.,492.,541.,\\\n\t\t\t\t592.,641.,786.,833.,884.,937.,998.,1051.,1151.,\\\n\t\t\t\t1205.3,2928.,3070.,3227.,3381.]\n\telse:\n\t\tprint(\"Error: Unknown element given!\")\n\t\n\t#Returns the energy for total ionization if niveau > number of electrons of the atom\n\tif niveau >= len(U_G):\n\t\tU_i = U_G[len(U_G)-1]\n\telse:\n\t\tU_i = U_G[niveau-1]\n\n\treturn(U_i)",
"def compute_energy(self,X,H):\n print(\"I am computing energy E(X,h)...\")\n num_samples=X.shape[1]\n Es=0\n for n in range(num_samples):\n En=-self.rbms[0].zeta(theta=self.a, X=X[:,[n]], fixed_param=self.visible_type_fixed_param, distribution=self.visible_type)\n #En=-numpy.dot(self.a.transpose(),X[:,[n]])\n for l in range(self.NK):\n if l==0:\n if self.visible_type==\"Multinoulli\":\n for m in range(self.M):\n En=En - numpy.dot(self.b[l].transpose(),H[l][:,[n]]) - X[m][:,[n]].transpose().dot(self.W[l][m]).dot(H[l][:,[n]])\n else:\n En=En - numpy.dot(self.b[l].transpose(),H[l][:,[n]]) - X[:,[n]].transpose().dot(self.W[l]).dot(H[l][:,[n]])\n else: # not first hidden layer\n En=En - numpy.dot(self.b[l].transpose(),H[l][:,[n]]) - H[l-1][:,[n]].transpose().dot(self.W[l]).dot(H[l][:,[n]])\n Es=Es+En\n Es=Es[0,0] # take off [[]]\n ME=Es/num_samples # mean energy\n return ME,Es",
"def electron_concentration(Nc, Ef, Eg, T):\n return Nc * np.exp(EV_TO_ERG * (Ef - Eg) / (k * T))",
"def get_Hu():\n \n ue = np.zeros((nx+1,ny)) \n uw = np.zeros((nx+1,ny))\n un = np.zeros((nx+1,ny))\n us = np.zeros((nx+1,ny))\n vn = np.zeros((nx+1,ny))\n vs = np.zeros((nx+1,ny))\n τxxe = np.zeros((nx+1,ny))\n τxxw = np.zeros((nx+1,ny))\n τxyn = np.zeros((nx+1,ny))\n τxys = np.zeros((nx+1,ny))\n Hu = np.zeros((nx+1,ny))\n \n i = np.arange(1,nx) # u-cell centers in domain interior\n \n ue[i,:] = (u[i+1,:] + u[i,:])/2\n uw[i,:] = (u[i,:] + u[i-1,:])/2\n \n j = np.arange(0,ny-1)\n un[IJ(i,j)] = (u[IJ(i,j+1)] + u[IJ(i,j)])/2\n un[i,ny-1] = ubc_t\n j = np.arange(1,ny)\n us[IJ(i,j)] = (u[IJ(i,j)] + u[IJ(i,j-1)])/2\n us[i,0] = ubc_b\n \n j = np.arange(0,ny)\n vn[IJ(i,j)] = (v[IJ(i-1,j+1)]+v[IJ(i,j+1)])/2\n vs[IJ(i,j)] = (v[IJ(i-1,j)] +v[IJ(i,j)]) /2\n \n τxxe[i,:] = -2*ν*(u[i+1,:] - u[i,:]) /Δx\n τxxw[i,:] = -2*ν*(u[i,:] - u[i-1,:])/Δx\n \n j = np.arange(0,ny-1)\n τxyn[IJ(i,j)] = -ν*(u[IJ(i,j+1)]-u[IJ(i,j)])/Δy - ν*(v[IJ(i,j+1)]-v[IJ(i-1,j+1)])/Δx\n τxyn[i,ny-1] = -ν*(ubc_t-u[i,ny-1])/(Δy/2) - ν*(v[i,ny]-v[i-1,ny])/Δx \n \n j = np.arange(1,ny)\n τxys[IJ(i,j)] = -ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy - ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx\n τxys[i,0] = -ν*(u[i,0]-ubc_b)/(Δy/2) - ν*(v[i,0]-v[i-1,0])/Δx\n \n Hu[i,:] = -((ue[i,:]*ue[i,:] - uw[i,:]*uw[i,:])/Δx + (un[i,:]*vn[i,:] - us[i,:]*vs[i,:])/Δy) \\\n -((τxxe[i,:] - τxxw[i,:])/Δx + (τxyn[i,:] - τxys[i,:])/Δy)\n \n return Hu",
"def einstein_energy(self, dbe):\n phase = dbe.phases[self.phase_name]\n param_search = dbe.search\n theta_param_query = (\n (where('phase_name') == phase.name) & \\\n (where('parameter_type') == 'THETA') & \\\n (where('constituent_array').test(self._array_validity))\n )\n lntheta = self.redlich_kister_sum(phase, param_search, theta_param_query)\n theta = exp(lntheta)\n if lntheta != 0:\n result = 1.5*v.R*theta + 3*v.R*v.T*log(1-exp(-theta/v.T))\n else:\n result = 0\n return result / self._site_ratio_normalization",
"def find_eigen_energy(self, guesses):\r\n\r\n #implement secant method\r\n while abs(guesses[0]-guesses[1])>1e-6:\r\n temp = (guesses[0]*self.Psi5(guesses[1]) - guesses[1]*self.Psi5(guesses[0]))/(self.Psi5(guesses[1]) - self.Psi5(guesses[0]))\r\n guesses[0] = guesses[1]\r\n guesses[1] = temp\r\n return guesses[1]",
"def E_K(E_inv_cm):\n E_hz = E_inv_cm*c # (1/cm)*(cm/s)\n E_ergs = h*E_hz # ergs\n return E_ergs/k # K",
"def extract_qubit_E_c(qubit: QuDev_transmon) -> float:\n # TODO Implement this method to give a meaningful value! (from the\n # design DB?)\n log.warning(\"Implement the `extract_qubit_E_c()` method to give a\"\n \"meaningful value!\")\n return 165e6",
"def energy(self,mu):\r\n\t\t\r\n\t\treturn -sum(sum(self.weight[i,j]*self.x[i]*self.x[j] for j in range(self.N)) for i in range(self.N))",
"def test_nose_hoover_integrator():\n temperature = 298*unit.kelvin\n testsystem = testsystems.WaterBox()\n num_dof = 3*testsystem.system.getNumParticles() - testsystem.system.getNumConstraints()\n integrator = NoseHooverChainVelocityVerletIntegrator(testsystem.system, temperature)\n # Create Context and initialize positions.\n context = openmm.Context(testsystem.system, integrator)\n context.setPositions(testsystem.positions)\n context.setVelocitiesToTemperature(temperature)\n integrator.step(200) # Short equilibration\n energies = []\n temperatures = []\n for n in range(100):\n integrator.step(1)\n state = context.getState(getEnergy=True)\n # temperature\n kinE = state.getKineticEnergy()\n temp = (2.0 * kinE / (num_dof * unit.MOLAR_GAS_CONSTANT_R)).value_in_unit(unit.kelvin)\n temperatures.append(temp)\n # total energy\n KE = kinE.value_in_unit(unit.kilojoules_per_mole)\n PE = state.getPotentialEnergy().value_in_unit(unit.kilojoules_per_mole)\n bathKE = integrator.getGlobalVariableByName('bathKE')\n bathPE = integrator.getGlobalVariableByName('bathPE')\n conserved = KE + PE + bathKE + bathPE\n energies.append(conserved)\n\n # Compute maximum deviation from the mean for conserved energies\n meanenergies = np.mean(energies)\n maxdeviation = np.amax(np.abs(energies - meanenergies)/meanenergies)\n assert maxdeviation < 1e-3\n\n # Coarse check for target temperature\n mean_temperature = np.mean(temperatures)\n assert abs(mean_temperature - temperature.value_in_unit(unit.kelvin)) < 10.0, mean_temperature",
"def rate_photon(E, Ec, A = 1): \r\n dNdE = np.zeros(len(E))\r\n for i in range(len(E)):\r\n if E[i] != 0:\r\n dNdE[i] = A *(E[i]**(-2/3)) * np.exp(-E[i]/Ec)\r\n else:\r\n dNdE[i] = 0\r\n return dNdE",
"def specialUnitaryAmoeba(energyFcn,n):\n def suKarcherMean(points,meanGuess):\n mean = meanGuess\n N = len(points)\n delta = 0.0001\n omega = 1.0/N*sum([scipy.linalg.logm(mean.conj().T@x) for x in points])\n omega = 0.5*(omega-omega.conj().T)\n while np.linalg.norm(omega) > delta :\n mean = mean@scipy.linalg.expm(omega)\n omega = 1.0/N*sum([scipy.linalg.logm(mean.conj().T@x) for x in points])\n omega = 0.5*(omega-omega.conj().T)\n mean = mean@scipy.linalg.expm(omega)\n return mean\n def suGeodesic(startPoint,endPoint):\n def geodesicFcn(t):\n log = scipy.linalg.logm(startPoint.conj().T@endPoint)\n log = 0.5*(log - log.conj().T)\n return startPoint@scipy.linalg.expm(-t*1j*log)\n return geodesicFcn\n def suExpansionMax(simplex,mean):\n logprod = scipy.linalg.logm(mean.conj().T@simplex[-1])\n logprod = 0.5*(logprod - logprod.conj().T)\n t = np.trace(logprod@logprod)\n return (np.pi/(4.0 * np.sqrt(-0.5*1.0/n*t))).real\n \n return RiemannianAmoeba(energyFcn,suKarcherMean,suGeodesic,suExpansionMax)",
"def energy(params,circuit):\n # setup circuit (resolve parameters to numerical values)\n resolver = cirq.ParamResolver({'theta'+str(j):params[j] for j in range(n_var_params)})\n resolved_circuit = cirq.resolve_parameters(circuit, resolver) \n u = resolved_circuit.unitary(qubit_order = qp+qb)\n unitary = u.reshape([2**nphys,2**nbond,2**nphys,2**nbond])\n # Convert to MPS and compute <H> \n # change the order of indices to [p, vL, vR] = [p_out, b_in, b_out] \n # (with p_in = 0 to go from unitary to isometry)\n B = np.swapaxes(unitary[:,:,0,:],1,2)\n psi = MPS.from_Bflat([site], [B], bc='infinite', dtype=complex, form=None)\n psi.canonical_form()\n psi.convert_form(psi.form)\n holo_E = (H.expectation_value(psi)).real\n \n return holo_E",
"def find_E(self,i,j,hi,hj):\n\t\tdirname = \"Q%dQ%d_%d%d\" % (i,j,hi,hj)\n\t\tout_str = open(\"HESS/%s/output.dat\" % dirname, \"r\").read()\n\t\tmatch = re.findall(\"Total Energy\\s=\\s+-\\d+.\\d+\",out_str)\n\t\tif match == []:\n\t\t\tout = \"Cannot find energy!\"\n\t\telse:\n\t\t\tout = float(match[0].split()[-1])\n\t\treturn out",
"def energy(signal):\n return np.sum(np.square(signal)) #change with np.mean to calculate mean energy",
"def kinetic_energy(ctx: Context, momentum: Q_, mass: Q_) -> Q_:\n return 0.5 * ctx.sum(momentum * momentum / mass)",
"def _compute_kinetic_energy_cell(self):\n b_cell_sq = torch.matmul(\n self.b_velocities_cell.transpose(2, 3), self.b_velocities_cell\n )\n # Einsum computes the trace\n return (\n self.b_masses_cell * torch.einsum(\"abii->ab\", b_cell_sq)[:, :, None, None]\n )",
"def Calculate_Xe(self):\n\t\tX_e_TempArray = [1]\n\t\tfor i in range(0,self.n_eta-1):\n\t\t\tif X_e_TempArray[i] > 0.99:\n\t\t\t\tX_e_TempArray.append(self.Saha_equation(self.x_eta[i]))\n\t\t\telse:\n\t\t\t\tPeebleXe = integrate.odeint(self.Peebles_equation, X_e_TempArray[i], self.x_eta[i:])\n\t\t\t\tbreak\n\n\t\tPeebleXe2 = []\n\t\tfor i in range(0, len(PeebleXe)-1):\n\t\t\tPeebleXe2.append(PeebleXe[i][0])\n\t\tself.X_e_array = np.concatenate([np.array(X_e_TempArray),np.array(PeebleXe2)])\t# Merges arrays"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the running average of a given observable at time t
|
def running_average(array, dt=1):
if not isinstance(array, np.ndarray):
array = np.asarray(array)
r_ave = np.cumsum(array*dt)
for j in range(len(r_ave)):
r_ave[j] = r_ave[j]/(dt*(j+1))
return r_ave
|
[
"def get_avg(t):\n l = []\n for pl in range(n):\n l.append(markov[pl][t])\n expr = l[0]\n for i in range(1,n):\n expr = expr+l[i]\n return(1/n*expr)",
"def at(self, t):\n return \\\n self._avg + (self._lastValue - self._avg)*(1 - (1 - self.x.alpha)**( t - self._lastTime)) \\\n if self._avg is not None else None",
"def mean_rate(self, t_start=None, t_stop=None):\n if t_start is None: \n t_start = self._t_start\n if t_stop is None: \n t_stop=self._t_stop\n idx = numpy.where((self._spike_times >= t_start) & (self._spike_times <= t_stop))[0]\n return len(idx)/(t_stop-t_start)",
"def _compute_avg(self, gen):\n tuple_map = map(lambda f: (1, f.delay), gen)\n sum_reduce = reduce(lambda x, y: (x[0]+y[0], x[1]+y[1]), tuple_map)\n avg = sum_reduce[1] / sum_reduce[0]\n return avg",
"def running_mean(x, N): \n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / float(N)",
"def average_tensor(t):\n a = np.trace(t) / 3.0\n return np.array([[a, 0, 0], [0, a, 0], [0, 0, a]])",
"def moving_average(self, wdt=100):\n beg_mov_ave = self.beg + wdt\n end_mov_ave = self.end - wdt\n i = np.arange(beg_mov_ave, end_mov_ave)\n phi_mov_ave = np.array([self.phi_t[j - wdt:j + wdt].mean() for j in i])\n return beg_mov_ave, end_mov_ave, phi_mov_ave",
"def compute_average(n):\n data = []\n start = time() # start time in secs\n for k in range(n):\n data.append(None)\n end = time() # end time\n return (end - start) / n # compute average time",
"def running_mean(x, N):\n # cumsum = np.cumsum(np.insert(x, 0, 0))\n # return (cumsum[N:] - cumsum[:-N]) / float(N)\n return uniform_filter1d(x, size=N)",
"def moving_average(x, n, type='simple'):\n x = np.asarray(x)\n if type == 'simple':\n weights = np.ones(n)\n else:\n weights = np.exp(np.linspace(-1., 0., n))\n\n weights /= weights.sum()\n\n a = np.convolve(x, weights, mode='full')[:len(x)]\n a[:n] = a[n]\n return mean(a)",
"def running_mean(data, npoints): \n y = np.convolve(data,1.0/npoints*np.ones(npoints),mode=\"same\")\n return y[npoints/2:-npoints/2]",
"def calculate_average(self, n: int) -> int:\n total = 0\n counter = 0\n i = 0\n while counter != n:\n total += self.history[i]\n i += 1\n counter += 1\n return counter / n",
"def get_avg(self):\n\t\treturn self.sum / max(len(self.window), 1)",
"def moving_average(x, N):\n\tcumsum = np.cumsum(np.insert(x, 0, 0)) \n\treturn (cumsum[N:] - cumsum[:-N]) / float(N)",
"def mean_velocity(x, fs, timewin=1):\n rx = np.asarray([abs(x[0])] + [abs(x[i]-x[i-1]) for i in range(1,len(x))])\n return rx/(len(x)/fs)",
"def average(source: Observable) -> Observable:\n\n if key_mapper:\n return source.pipe(\n operators.map(key_mapper),\n operators.average()\n )\n\n def accumulator(prev, cur):\n return AverageValue(sum=prev.sum+cur, count=prev.count+1)\n\n def mapper(s):\n if s.count == 0:\n raise Exception('The input sequence was empty')\n\n return s.sum / float(s.count)\n\n seed = AverageValue(sum=0, count=0)\n return source.pipe(\n operators.scan(accumulator, seed),\n operators.last(),\n operators.map(mapper)\n )",
"def t_diff_mean(score, decay, t_max=None, t_step=None):\n if t_max is None:\n t_max = 5.5 / decay.item()\n if t_step is None:\n t_step = 0.5 / decay.item()\n t_range = torch.arange(0, t_max, t_step, device=score.device)\n integrand = torch.cat([t * torch.exp(-intensity_nll(t, score, decay))\n for t in t_range], dim=2)\n t_diff = np.trapz(integrand.detach().cpu().numpy(), dx=t_step, axis=2)\n return torch.tensor(t_diff).unsqueeze(-1).to(score.device)",
"def moving_average(array, N):\n return np.convolve(array, np.ones(N), 'valid') / N",
"def _moving_average(a: np.ndarray, n: int) -> np.ndarray:\n b = np.copy(a)\n b = np.insert(b, 0, np.full(n, a[0]))\n s = np.cumsum(b)\n res = (s[n:] - s[:-n]) / n\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the block average of a potentially correlated timeseries "array"
|
def block_average(array, block_size):
if not isinstance(array, np.ndarray):
array = np.asarray(array)
n_blocks = int(len(array)/block_size)
blocks = np.array_split(array, n_blocks)
average = []
for block in blocks:
average.append(np.mean(block))
error = np.std(np.asarray(average))/np.sqrt(len(average))
return average, error
|
[
"def mean_per_block(array, axis=None, controller=None):\n if axis is None or axis == 0:\n return sum_per_block(array, axis, controller) / count_per_block(array, axis, controller)\n else:\n return sum(array, axis, controller)",
"def block_mean(ar, fact):\n\t\n\tassert isinstance(fact, int), type(fact)\n\tsx, sy = ar.shape\n\tX, Y = np.ogrid[0:sx, 0:sy]\n\tregions = sy/fact * (X/fact) + Y/fact\n\tres = ndimage.mean(ar, labels=regions, index=np.arange(regions.max() + 1))\n\tres.shape = (sx/fact, sy/fact)\n\treturn res",
"def running_average(array, dt=1):\n if not isinstance(array, np.ndarray):\n array = np.asarray(array)\n\n r_ave = np.cumsum(array*dt)\n for j in range(len(r_ave)):\n r_ave[j] = r_ave[j]/(dt*(j+1))\n return r_ave",
"def moving_average(array, N):\n return np.convolve(array, np.ones(N), 'valid') / N",
"def avgDisplacement(arr):\n sum_squares = np.sum(arr**2, axis=1)\n return np.mean(np.sqrt(sum_squares))",
"def running_mean(data, npoints): \n y = np.convolve(data,1.0/npoints*np.ones(npoints),mode=\"same\")\n return y[npoints/2:-npoints/2]",
"def func(arr):\n return arr.mean()",
"def nonzero_mean(array):\n return array.sum() / np.count_nonzero(array)",
"def EMA(arr: ndarray, period: int, offset: int) -> float:\n a = 2 / (period + 1)\n upper_sum = 0.0\n lower_sum = 0.0\n\n for i in range(period + 1):\n if offset - i < 0:\n break\n upper_sum += arr[offset - i] * (1 - a) ** i\n lower_sum += (1 - a) ** i\n\n return upper_sum / lower_sum",
"def runmean0axis_periodic(a, halfwin):\n sa = a.shape\n da = len(sa)\n if not hasattr(halfwin, \"__len__\"): # halfwin is a scalar\n halfwin = np.repeat(halfwin, sa[0])\n sh = halfwin.shape\n dh = len(sh)\n if dh > da:\n raise ValueError(\"halfwin has more dimensions than a\")\n elif tuple(list(sa)[:dh]) != sh:\n raise ValueError(\"a and halfwin do not have the same shape in their first \"+str(dh)+\" dimensions\")\n hmax = halfwin.max()\n b = extend0axis_periodic(a, hmax)\n rm = np.empty_like(a)\n for indices, h in np.ndenumerate(halfwin):\n i = indices[0]\n sliceindices = (slice(i+hmax-h, i+hmax+h+1),)+indices[1:]\n rm[indices] = b[sliceindices].mean(axis=0)\n return rm",
"def rebin_time(self, arr, trb):\n nt = arr.shape[0]\n rbshape = (nt//trb, trb, ) + arr.shape[1:]\n\n arr = arr[:nt // trb * trb].reshape(rbshape)\n\n return arr.mean(1)",
"def _avg3(image):\n\n # Cast to appropriate type for safe averaging\n if image.dtype == np.uint8:\n dtype = np.uint16\n elif image.dtype == np.uint16:\n dtype = np.uint32\n elif image.dtype == np.uint32:\n dtype = np.uint64\n elif image.dtype == np.int8:\n dtype = np.int16\n elif image.dtype == np.int16:\n dtype = np.int32\n elif image.dtype == np.int32:\n dtype = np.int64\n else:\n dtype = image.dtype\n\n # Store original data type, and cast to safe data type for averaging\n odtype = image.dtype\n image = image.astype(dtype)\n imgshape = image.shape\n\n # Account for dimensions with odd dimensions to prevent data loss\n ypos = imgshape[0]\n xpos = imgshape[1]\n zpos = imgshape[2]\n z_max = zpos - zpos % 2 # if even then subtracting 0. \n y_max = ypos - ypos % 2 # if odd then subtracting 1\n x_max = xpos - xpos % 2\n yxz_max = [y_max, x_max, z_max]\n\n # Initialize the output\n avg_imgshape = np.ceil([d/2 for d in imgshape]).astype(int)\n avg_img = np.zeros(avg_imgshape,dtype=dtype)\n\n # Do the work\n avg_img[0:int(y_max/2),0:int(x_max/2),0:int(z_max/2)]= (\n image[0:y_max-1:2,0:x_max-1:2,0:z_max-1:2] + \n image[1:y_max:2 ,0:x_max-1:2,0:z_max-1:2] + \n image[0:y_max-1:2,1:x_max:2 ,0:z_max-1:2] + \n image[1:y_max:2 ,1:x_max:2 ,0:z_max-1:2] + \n image[0:y_max-1:2,0:x_max-1:2,1:z_max:2 ] + \n image[1:y_max:2 ,0:x_max-1:2,1:z_max:2 ] + \n image[0:y_max-1:2,1:x_max:2 ,1:z_max:2 ] + \n image[1:y_max:2 ,1:x_max:2 ,1:z_max:2 ]\n )/8\n\n # Account for odd shaped dimensions to prevent data loss\n if z_max != image.shape[2]:\n avg_img[:int(y_max/2),:int(x_max/2),-1] = (image[0:y_max-1:2,0:x_max-1:2,-1] + \n image[1:y_max:2 ,0:x_max-1:2,-1] + \n image[0:y_max-1:2,1:x_max:2 ,-1] + \n image[1:y_max:2 ,1:x_max:2 ,-1])/4\n if y_max != image.shape[0]:\n avg_img[-1,:int(x_max/2),:int(z_max/2)] = (image[-1,0:x_max-1:2,0:z_max-1:2] + \\\n image[-1,0:x_max-1:2,1:z_max:2 ] + \\\n image[-1,1:x_max:2 ,0:z_max-1:2] + \\\n image[-1,1:x_max:2 ,1:z_max:2 ])/4\n if x_max != image.shape[1]:\n avg_img[:int(y_max/2),-1,:int(z_max/2)] = (image[0:y_max-1:2,-1,0:z_max-1:2] + \\\n image[0:y_max-1:2,-1,1:z_max:2 ] + \\\n image[1:y_max:2 ,-1,0:z_max-1:2] + \\\n image[1:y_max:2 ,-1,1:z_max:2 ])/4\n if (y_max != image.shape[0] and x_max != image.shape[1]) and (z_max != image.shape[2]):\n avg_img[-1,-1,-1] = image[-1,-1,-1]\n\n return avg_img.astype(odtype)",
"def average_if_3D(array):\n if array.ndim == 3:\n array = np.mean(array, axis=2)\n\n return array",
"def __non_zero_mean__(inArray):\n # type: (numpy.ndarray) -> float\n return inArray[numpy.nonzero(inArray)[0]].mean()",
"def _compute_average(kernel: np.array, reps_i: List[np.array], reps_j: List[np.array]):\n\n # Count the number of atoms in the rows and columns\n # Works by accessing where the atomic number is stored in the FCHL representation\n natoms_i = np.array([np.greater(x[:][0][1], 0).sum() for x in reps_i])\n natoms_j = np.array([np.greater(x[:][0][1], 0).sum() for x in reps_j])\n total_atoms = natoms_i[:, None] * natoms_j[None, :]\n\n # Compute the average\n kernel /= total_atoms",
"def running_mean(x, N):\n if len(np.shape(x)) == 1:\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / N\n elif len(np.shape(x)) == 2:\n # Apply same reasoning to the array row-by-row\n dmyi = 0\n for row in x:\n tmpsum = np.cumsum(np.insert(row, 0, 0))\n outrow = (tmpsum[N:] - tmpsum[:-N]) / N\n if dmyi == 0:\n outarray = np.zeros((np.shape(x)[0], len(outrow)), dtype=x.dtype)\n outarray[dmyi, :] = outrow\n dmyi += 1\n\n return outarray\n else:\n raise RuntimeError('Input array x in running_mean(x, N) must be 1d or 2d.')",
"def _moving_average(a: np.ndarray, n: int) -> np.ndarray:\n b = np.copy(a)\n b = np.insert(b, 0, np.full(n, a[0]))\n s = np.cumsum(b)\n res = (s[n:] - s[:-n]) / n\n return res",
"def _mean(self):\n mat = self._factorize(self.matrix, self.xdef)\n mat = self._rdc_x(mat, self.xdef)\n ysects = self._by_ysect(mat, self.ydef)\n return np.expand_dims([np.nansum(ymat[:, 0] /\n np.nansum(ymat[:, -1]))\n for ymat in ysects], 1).T",
"def running_mean(x, N):\n # cumsum = np.cumsum(np.insert(x, 0, 0))\n # return (cumsum[N:] - cumsum[:-N]) / float(N)\n return uniform_filter1d(x, size=N)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes current pressure using the kinetic energy and the calculated internal virial
|
def current_pressure(virial):
pressure = (2*system.kinetic + system.virial)/3/system.V
system.pressure = pressure
return pressure
|
[
"def pressure(current_data):\n pressure = gamma1*(current_data.q[2,:]-\n 0.5*current_data.q[1,:]**2/current_data.q[0,:])\n return pressure",
"def velocity_pressure(self) -> qty.Pressure:\n rho = self._fluid.density()\n v = self._flow_rate / self._cross_section.area()\n return qty.Pressure(rho * v ** 2.0 / 2.0)",
"def pressure(self):\n self._read_temperature()\n\n # Algorithm from the BME280 driver:\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\n adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped\n var1 = float(self._t_fine) / 2.0 - 64000.0\n var2 = var1 * var1 * self._pressure_calib[5] / 32768.0\n var2 = var2 + var1 * self._pressure_calib[4] * 2.0\n var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0\n var3 = self._pressure_calib[2] * var1 * var1 / 524288.0\n var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0\n var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]\n if not var1: # avoid exception caused by division by zero\n raise ArithmeticError(\"Invalid result possibly related to error while \\\nreading the calibration registers\")\n\n pressure = 1048576.0 - adc\n pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1\n var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0\n var2 = pressure * self._pressure_calib[7] / 32768.0\n pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0\n pressure /= 100\n if pressure < _BME280_PRESSURE_MIN_HPA:\n return _BME280_PRESSURE_MIN_HPA\n if pressure > _BME280_PRESSURE_MAX_HPA:\n return _BME280_PRESSURE_MAX_HPA\n return pressure",
"def _compute_pressure(self, system):\n # Get the pressure (R x M)\n pressure = system.compute_pressure(kinetic_component=False, tensor=False)\n # Get the volume (R x M)\n volume = system.volume\n return pressure, volume",
"def getPressureInPsi(self):\n return self.getPressure() * 0.014503773773",
"def pressure(self, ps):\n return ps * self.sigma",
"def calculate_pressure_perturbation(self):\n\n self.Pprime = self.P.copy()\n\n for i in xrange(len(self.hpid)):\n\n nans = np.isnan(self.P[:, i])\n\n z = self.z[~nans, i]\n b = self.b[~nans, i]\n\n # z should be increasing.\n if z[0] > z[-1]:\n z = np.flipud(z)\n b = np.flipud(b)\n\n bi = cumtrapz(b, z, initial=0.)\n bii = cumtrapz(bi, z, initial=0.)\n\n Pprime = bi + (bii[0] - bii[-1])/(-z[0])\n\n self.Pprime[~nans, i] = np.flipud(Pprime)\n\n else:\n bi = cumtrapz(b, z, initial=0.)\n bii = cumtrapz(bi, z, initial=0.)\n\n self.Pprime[~nans, i] = bi + (bii[0] - bii[-1])/(-z[0])",
"def _compute_pressure(self, system):\n # Get the pressure (R x M x 3 x 3)\n pressure = system.compute_pressure(kinetic_component=False, tensor=True)\n # Get the volume (R x M x 1 x 1)\n volume = system.volume[..., None]\n return pressure, volume",
"def pressure(self) -> Quantity:\r\n return self._pressure",
"def pressure(self):\n return None",
"def lifetime_pressure_velocitydispersion_tradeoff(n, selected_pillar):\n # (1 Gauss / (1 cm^−(1/2) * g^(1/2) * s^−1))\n cgs_to_gauss = (u.Gauss / (u.cm**(-1/2) * u.g**(1/2) * u.s**-1))\n\n\n #### check what B field needed for 1-3 x 10^7 K cm-3\n def reverse_engineer_B_field(p):\n print(f\"For pressure P = {p:.1E}, \", end='')\n b = ((p*8*np.pi*const.k_B)**(1/2) * cgs_to_gauss).to(u.microGauss)\n print(f\"B = {b:.2f}\")\n reverse_engineer_B_field(3e6*u.K/u.cm**3)\n reverse_engineer_B_field(1e7*u.K/u.cm**3)\n reverse_engineer_B_field(2e7*u.K/u.cm**3)\n reverse_engineer_B_field(3e7*u.K/u.cm**3)\n print()\n\n\n def calc_B_field_Pattle(nH2, sigma_v, mmw=1.4):\n \"\"\"\n Implementing the equation for B field using Pattle's numbers but allowing\n mean molecular weight, sigma_v and nH2 to change\n I will use MMW = 1.33 but I want to check equations using theirs, 1.4\n \"\"\"\n Q = 0.5\n sigma_th = (14.4*u.deg).to(u.rad).to_value()\n rho = (2 * nH2 * mmw * Hmass).to(u.g/u.cm**3)\n return (Q * np.sqrt(4 * np.pi * rho) * (sigma_v / sigma_th) * cgs_to_gauss).to(u.microGauss)\n\n def calc_turbulent_pressure(nH2, sigma_v):\n \"\"\"\n Now default to mmw=1.33\n \"\"\"\n return ((2 * nH2 * mean_molecular_weight_neutral * Hmass) * sigma_v**2 / const.k_B).to(u.K * u.cm**-3)\n\n b_170ug = calc_B_field_Pattle(5e4 * u.cm**-3, 0.5 * kms)\n print(f\"This should be ~170uG: {b_170ug:.1f}\")\n\n nH2_lo = 1.3e5\n nH2_hi = 1.3e5\n\n b_molecular_lo = calc_B_field_Pattle(nH2_lo * u.cm**-3, 0.6 * kms, mmw=mean_molecular_weight_neutral)\n b_molecular_hi = calc_B_field_Pattle(nH2_hi * u.cm**-3, 0.6 * kms, mmw=mean_molecular_weight_neutral)\n print(f\"This is my best number for molecular gas: {b_molecular_lo:.1f} -- {b_molecular_hi:.1f}\")\n\n def calc_Bpressure_Pattle(B_field):\n return ((B_field/cgs_to_gauss)**2 / (8*np.pi * const.k_B)).to(u.K * u.cm**-3)\n\n pB_mol_lo = calc_Bpressure_Pattle(b_molecular_lo)\n pB_mol_hi = calc_Bpressure_Pattle(b_molecular_hi)\n print(f\"Molecular B pressures: {pB_mol_lo:.2E} -- {pB_mol_hi:.2E}\")\n p_therm_mol_lo = 25 * nH2_lo\n p_therm_mol_hi = 25 * nH2_hi\n p_turb_mol_lo = calc_turbulent_pressure(nH2_lo*u.cm**-3, 0.6*kms)\n p_turb_mol_hi = calc_turbulent_pressure(nH2_hi*u.cm**-3, 0.6*kms)\n print(f\"Molecular thermal pressure: {p_therm_mol_lo:.1E} -- {p_therm_mol_hi:.1E} \")\n print(f\"Molecular turbulent pressure: {p_turb_mol_lo:.1E} -- {p_turb_mol_hi:.1E}\")\n\n p_tot_mol_lo = (pB_mol_lo.to_value() + p_turb_mol_lo.to_value() + p_therm_mol_lo) / 1e6\n p_tot_mol_hi = (pB_mol_hi.to_value() + p_turb_mol_hi.to_value() + p_therm_mol_hi) / 1e6\n\n print(f\"Total molecular pressures: {p_tot_mol_lo:.1f} -- {p_tot_mol_hi:.1f}\")\n\n p_atom_lo = pB_mol_lo * (n/(2*nH2_lo))\n p_atom_hi = pB_mol_hi * (n/(2*nH2_hi))\n # print(f\"Atomic pressures: {p_atom_lo:.2E} -- {p_atom_hi:.2E}\")\n\n # n/2 because I baked in the 2xmH for molecular H2 into that function\n b_atom = calc_B_field_Pattle(n/2 * u.cm**-3, 0.6*kms, mmw=mean_molecular_weight_neutral)\n pB_atom = calc_Bpressure_Pattle(b_atom)\n print(f\"Atomic B values: {b_atom:.1f}, {pB_atom:.2E}\")\n\n\n\n \"\"\"\n There is a unit issue in the pressure expression; check on Wolfram that my combination of P_B(Bfield) has valid units\n It works it's just the Gaussian units thing\n \"\"\"\n\n\n def sigma_turb(alpha, sigma_total):\n return np.sqrt(alpha) * sigma_total\n\n def sigma_flow(alpha, sigma_total):\n return np.sqrt(1 - alpha) * sigma_total\n\n # rho is mass density\n n = n * u.cm**-3 # or 2e4\n # Neutral mass density\n rho = (n*mean_molecular_weight_neutral*Hmass).to(u.g/u.cm**3)\n\n def turb_pressure(alpha, sigma_total):\n # Combining magnetic and turbulent pressure, which have the same dependence on the quantity rho*sigma^2\n return (rho * sigma_turb(alpha, sigma_total)**2 / const.k_B).to(u.K / u.cm**3)\n\n\n p_turb_atomic = (rho * (1.3*kms)**2 / const.k_B).to(u.K / u.cm**3)\n print(f\"Atomic turbulent pressure: {p_turb_atomic:.2E}\")\n\n\n\n pillar_properties = { # area (pc2), mass (solMass from CO)\n 'P1a-head': (0.17886, 64.12), 'P2-head': (0.07557, 11.32), 'P3-head': (0.02191, 4.27)\n }\n def mdot_and_pillar_lifetime(alpha, sigma_total, pillar_label):\n # Return both so we can make 2 plots\n area_pc2, mass_solMass = pillar_properties[pillar_label]\n area = area_pc2 * u.pc**2\n mass = mass_solMass * u.solMass\n mass_loss_rate = (sigma_flow(alpha, sigma_total) * rho * area / 2.).to(u.solMass / u.Myr)\n lifetime = (mass / mass_loss_rate).to(u.Myr)\n return mass_loss_rate, lifetime\n\n alpha_range = np.arange(0, 1, 0.05)\n\n fig = plt.figure(figsize=(10, 9))\n ax1 = plt.subplot(221)\n ax2 = plt.subplot(222)\n ax3 = plt.subplot(223)\n ax4 = plt.subplot(224)\n\n transparency = 0.2\n p_therm_lo = n.to_value()*100/1e6\n p_therm_hi = n.to_value()*250/1e6\n print(f\"Atomic thermal pressure {p_therm_lo} -- {p_therm_hi}\")\n print(f\"Atomic total pressure {(p_turb_atomic+pB_atom).to_value()/1e6 + p_therm_lo:.1f} -- {(p_turb_atomic+pB_atom).to_value()/1e6 + p_therm_hi:.1f}\")\n pB_atom_val = pB_atom.to_value()/1e6\n\n colors = marcs_colors[:3]\n # selected_pillar = \"P2-head\"\n\n for i, sigma_total in enumerate([1.0, 1.1, 1.3][::-1]*kms):\n label = \"$\\\\sigma_{\\\\rm tot} =$ \" + f\"{sigma_total:.2f}\"\n ax1.plot(alpha_range, sigma_turb(alpha_range, sigma_total).to_value(), color=colors[i], label=label)\n ax1.plot(alpha_range, sigma_flow(alpha_range, sigma_total).to_value(), color=colors[i], linestyle='--')\n\n p_turb = turb_pressure(alpha_range, sigma_total).to_value()/1e6\n ax2.fill_between(alpha_range, p_therm_lo+pB_atom_val+p_turb, y2=p_therm_hi+pB_atom_val+p_turb, color=colors[i], alpha=transparency)\n\n mass_loss_rate, lifetime = mdot_and_pillar_lifetime(alpha_range, sigma_total, selected_pillar)\n ax3.plot(alpha_range, mass_loss_rate.to_value(), color=colors[i])\n ax4.plot(alpha_range, lifetime.to_value(), color=colors[i])\n\n ax1.legend()\n\n ax1.set_title(f\"bottom plots using {selected_pillar}\")\n ax2.set_title(f\"Density n={n:.1E}\")\n\n ax2.set_ylim([0, 40])\n ax2.axhspan(p_tot_mol_lo, p_tot_mol_hi, color=marcs_colors[5], alpha=transparency, label='$P_{{\\\\rm H}_2}$') # fill region\n ax2.axhspan(18, 36, color=marcs_colors[6], alpha=transparency, label='$P_{\\\\rm HII}$') # fill region\n ax2.axhline(pB_atom_val, color=marcs_colors[5], alpha=transparency, label='$P_{{\\\\rm HI,B}}$')\n ax2.axhspan(p_therm_lo + pB_atom_val, p_therm_hi + pB_atom_val, color=marcs_colors[7], alpha=transparency, label='$P_{{\\\\rm HI,B}} + P_{{\\\\rm HI,therm}}$')\n ax2.legend(loc='upper left')\n\n ax3.set_xlabel(\"$\\\\alpha$\")\n ax4.set_xlabel(\"$\\\\alpha$\")\n ax1.set_ylabel(\"1D Velocity dispersion $\\\\sigma$ (km s-1)\")\n ax2.set_ylabel(\"Total non-thermal pressure (cm-3)\")\n ax3.set_ylabel(f\"{selected_pillar}\" + \" $M_{\\\\odot}$ (solMass Myr-1)\")\n ax3.set_ylim([0, 100])\n ax4.set_ylabel(f\"{selected_pillar} Pillar lifetime (Myr)\")\n ax4.axhspan(1, 3, color=marcs_colors[5], alpha=transparency)\n ax4.set_ylim([0, 8])\n # 2023-02-06,21, 03-16,25\n fig.savefig(f\"/home/ramsey/Pictures/2023-03-25/pressure_mdot_tradeoff_{selected_pillar}_{n.to_value():.1E}.png\",\n metadata=catalog.utils.create_png_metadata(title=f\"B pressure scaled by density only; {selected_pillar}; n={n:.1E}\",\n file=__file__, func=\"lifetime_pressure_velocitydispersion_tradeoff\"))",
"def get_vapour_pressure(T):\r\n\r\n # Parameters\r\n pc = 22.064e6 # [Pa]\r\n Tc = 647.096 # [K]\r\n a1 = -7.85951783\r\n a2 = 1.84408259\r\n a3 = -11.7866497\r\n a4 = 22.6807411\r\n a5 = -15.9618719\r\n a6 = 1.80122502\r\n\r\n # Conversion degree Celsius -> Kelvin\r\n #T0_Kelvin = 273.15 # [K]\r\n T = T + T0_Kelvin\r\n\r\n theta = 1 - T / Tc\r\n\r\n # Compute vapour pressure pv\r\n # as a function of the temperature T\r\n pv = pc * np.exp(Tc / T * (a1 * theta \\\r\n + a2 * theta ** 1.5 \\\r\n + a3 * theta ** 3 \\\r\n + a4 * theta ** 3.5 \\\r\n + a5 * theta ** 4 \\\r\n + a6 * theta ** 7.5))\r\n\r\n return pv",
"def calculate_dynamic_pressure(self):\n # Calculate and return the freestream dynamic pressure\n return 0.5 * self.density * self.velocity ** 2",
"def gas_pressure(density, internal_energy, gamma=4./3.):\n\n g_minus_1 = gamma - 1\n return g_minus_1 * density * internal_energy",
"def reactor_pressure_deriv(self):\n deriv = np.zeros((2, 5 + self.num_vars, self.num_nw_vars))\n # derivatives for pressure oxygen inlet\n deriv[0, 1, 1] = -1\n deriv[0, 4, 1] = 1\n # derivatives for pressure hydrogen inlet\n deriv[1, 2, 1] = -1\n deriv[1, 4, 1] = 1\n\n return deriv",
"def _water_vapour_pressure(t):\n if t is None:\n return None\n ex_pwr = (17.2694 * t)/(t + 238.3)\n return 610.17 * np.exp(ex_pwr)",
"def __call__(self,pressure, temperature = linspace(0,20000, 201)):\r\n self.pressure = pressure\r\n self.temperature = temperature\r\n \r\n n = self.alpha.shape[1]\r\n \r\n self.p = sum(self.alpha*(log(self.pressure)**(arange(n)))[newaxis,...], axis =1)\r\n \r\n self.const = self.p[:2]\r\n self.a = self.p[slice(2+0, None, 3)]\r\n self.c = self.p[slice(2+1, None, 3)]\r\n self.delta = self.p[slice(2+2, None, 3)]\r\n \r\n self.a = exp(self.a)\r\n self.c = exp(self.c)\r\n self.delta = exp(self.delta)\r\n \r\n self.specificHeat = sum(self.const[:,newaxis]*temperature[newaxis,...]**arange(self.const.shape[0])[:,newaxis],axis = 0)\r\n for i in range(self.sigmaIdx):\r\n self.specificHeat+= self.a[i]*self.sigma(temperature, self.c[i], self.delta[i])\r\n for i in range(self.sigmaIdx, self.a.shape[0]):\r\n self.specificHeat+= self.a[i]*self.gamma(temperature, self.c[i], self.delta[i])\r\n \r\n self.specificHeat *= 4.184*1000.\r\n with open(\"airSpecificHeat_p=%1.f_nT=%d.pkl\"%(pressure, temperature.shape[0]), 'wb') as f:\r\n pkl.dump(dict(zip(['pressure', 'temperature', 'specificHeat'], \r\n [pressure, temperature, self.specificHeat])), f, -1)\r\n \r\n self.specificHeat[0] = self.specificHeat[1]\r\n return self.specificHeat",
"def sat_vapor_pressure(T):\n T_C = T-273\n return 1000 * 0.61078 * np.exp(17.27*T_C/(T_C+237.3))",
"def _calculate_turbulent_kinetic_energy(self):\n\n map_values(\n self,\n h=self.h_temp,\n h_link=self.h_link_temp,\n u=self.u_temp,\n v=self.v_temp,\n U=self.U_temp,\n )\n self.Kh_temp[self.wet_pwet_links[self.Kh_temp[self.wet_pwet_links] < 0]] = 0.0\n\n # development of turbulent kinetic energy\n alpha = 0.1\n # Ri = self.R * self.g * self.Ch_link[self.wet_pwet_links] \\\n # / self.U[self.wet_pwet_links] / self.U[self.wet_pwet_links]\n # beta = (0.5 * self.ew_link[self.wet_pwet_links] *\n # (1 - Ri - 2.0 * self.Cf / alpha) + self.Cf) / (self.Cf /\n # alpha)**1.5\n\n beta = alpha ** 1.5 / self.Cf ** 0.5\n # K = self.Kh[self.wet_pwet_links] / \\\n # self.h_link_temp[self.wet_pwet_links]\n\n # self.Kh_temp[self.wet_pwet_links] += self.dt_local * (\n # alpha * K * self.U_temp[self.wet_pwet_links]\n # + 0.5\n # * self.ew_link[self.wet_pwet_links]\n # * self.U_temp[self.wet_pwet_links] ** 3\n # - beta * K ** 1.5\n\n self.Kh_temp[self.wet_pwet_links] += self.dt_local * (\n (self.Cf + 0.5 * self.ew_link[self.wet_pwet_links])\n * self.U_temp[self.wet_pwet_links]\n * self.U_temp[self.wet_pwet_links]\n * self.U_temp[self.wet_pwet_links]\n - beta\n * (self.Kh[self.wet_pwet_links] / self.h_link[self.wet_pwet_links]) ** 1.5\n - self.R\n * self.g\n * (\n np.sum(\n self.Ch_link_i_temp[:, self.wet_pwet_links] * self.ws, axis=0)\n + 0.5\n * self.U_temp[self.wet_pwet_links]\n * self.ew_link[self.wet_pwet_links]\n * self.Ch_link_temp[self.wet_pwet_links]\n )\n )\n\n # remove negative values\n self.Kh_temp[self.wet_pwet_links[self.Kh_temp[self.wet_pwet_links] < 0.0]] = 0.0\n\n # adjust_negative_values(self.Kh,\n # self.wet_pwet_links,\n # self.link_east,\n # self.link_west,\n # self.link_north,\n # self.link_south,\n # out_f=self.Kh_temp)\n\n # update friction coefficient Cf_link and Cf_nodes\n U_exist = self.U_temp[self.wet_pwet_links] > 1.0e-10\n self.Cf_link[self.wet_pwet_links[U_exist]] = (\n alpha\n * self.Kh_temp[self.wet_pwet_links[U_exist]]\n / self.U_temp[self.wet_pwet_links[U_exist]]\n / self.U_temp[self.wet_pwet_links[U_exist]]\n )\n # self.Cf_link[self.Cf_link > 0.1] = 0.1\n self.Cf_link[self.wet_pwet_links[~U_exist]] = 0.0\n map_values(self, Cf_link=self.Cf_link, Cf_node=self.Cf_node)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the internal virial given by Theta = sum_i r_i dot f_i
|
def internal_virial(r,f):
virial = 0
for i in range(r.shape[0]):
for dim in range(r.shape[1]):
virial += r[i,dim]*f[i,dim]
return virial
|
[
"def integrate(self,f,use_dV=False):\n if use_dV:\n return ((f[0:self.N-1]+f[1:self.N])*self.dV).sum()*0.5\n else:\n return ((f[0:self.N-1]+f[1:self.N])*self.dr).sum()*0.5",
"def _refraction(self, F, i, forward=True):\n\n\n if forward:\n dx = self.calc_x_coords[i+1]-self.calc_x_coords[i]\n else:\n dx = self.calc_x_coords[i]-self.calc_x_coords[i-1]\n\n C = self.C[...,i]\n if self._oblique_correction:\n oblique_coeff = np.abs(cos(self.tilt_h)*cos(self.tilt_v))\n else:\n oblique_coeff = 1\n phase = dx* (np.real(C) + \\\n 1j*np.imag(C)/oblique_coeff) / \\\n (2*self.k_0[i])\n\n if self._debug:\n if forward:\n self._temp_dphi_eps = phase\n else:\n self.dphi_eps[..., self._counter] = \\\n self.dphi_eps[..., self._counter-1]+\\\n self._temp_dphi_eps + phase\n self._counter += 1\n\n return np.exp(1j*phase)*F",
"def taylor(x,f,i,n):\n #total = 0\n #dx = x[i] - x[i-1]\n #for j in range(n):\n # mat = np.linalg.matrix_power(np.matrix(ac.derivative(x[0],x[-1],n)),j) @ f\n # total = total + (mat[i-1]*(dx**j))\n def tay(e):\n total = 0\n for j in range(n):\n mat = np.linalg.matrix_power(np.matrix(ac.derivative(x[0],x[-1],n)),j) @ f\n total = total + (mat[i]*((x[e]-x[i])**j)/np.factorial(j))\n return total\n fx = np.vectorize(tay)\n return (x,fx(x))",
"def objective(self,data):\r\n F = -0.5*self.lbda*(np.sum(self.U*self.U)+np.sum(self.V*self.V))\r\n for i in xrange(len(self.U)):\r\n f = self.precompute_f(data,i)\r\n for j in f:\r\n F += log(g(f[j]))\r\n for k in f:\r\n F += log(1-g(f[k]-f[j]))\r\n return F",
"def scheme(u, q, f, i, j, n, i2, i3, j2, j3, x ,y, dtdx2, dtdy2, dt2, dt, b):\n\n u[i,j,n+1] = 2*u[i,j,n] - (1 - 0.5*b*dt)*u[i,j,n-1] + \\\n dtdx2*((q(x[i2],y[j]) + q(x[i],y[j]))*(u[i2,j,n] - u[i,j,n]) - (q(x[i],y[j]) + q(x[i3],y[j]))*(u[i,j,n] -u[i3,j,n])) + \\\n dtdy2*((q(x[i],y[j2]) + q(x[i],y[j]))*(u[i,j2,n] - u[i,j,n]) - (q(x[i],y[j]) + q(x[i],y[j3]))*(u[i,j,n] -u[i,j3,n])) + \\\n dt2*f(x[i],y[j],dt*n)\n \n u[i,j,n+1] /= 1 + 0.5*b*dt",
"def solver(I, w, dt, T, V, f):\n dt = float(dt)\n Nt = int(round(T/dt)) # 100000\n u = np.zeros(Nt+1)\n t = np.linspace(0, Nt*dt, Nt+1)\n\n u[0] = I\n u[1] = u[0] + dt*V + 0.5*(f(t[0]) - w**2*u[0])*dt**2#compute first step by 1'st order difference\n for n in range(1, Nt):\n u[n+1] = (f(t[n])-w**2*u[n])*dt**2 + 2*u[n]-u[n-1]\n return u, t",
"def integrate(self, f):\n # Start timing the approximation.\n start = time.time()\n\n # Evaluate the quadrature method.\n if (self.weights is None):\n self.approx = self.outer * np.sum(f(self.evaluation_pts))\n else:\n self.approx = self.outer * np.sum(self.weights *\n f(self.evaluation_pts))\n\n # Finish timing the approximation.\n end = time.time()\n\n # Store and return the approximated integral and the time it\n # took to run the quadrature method.\n self.time_taken = end - start\n return self.approx",
"def I_fxn(param, to, tf, r):\n Ks = param[\"Ks\"]\n Ao = param[\"Ao\"]\n\n return (r - Ks) * (tf - to) - Ao * (np.sqrt(tf) - np.sqrt(to))",
"def solver(I, V, f, w, dt, T):\n dt = float(dt)\n Nt = int(round(T/dt))\n u = np.zeros(Nt+1)\n t = np.linspace(0, Nt*dt, Nt+1)\n\n u[0] = I\n u[1] = u[0] - 0.5*dt**2*w**2*u[0] + dt*V + 0.5*dt**2*f(t[0])\n for n in range(1,Nt):\n u[n+1] = dt**2*f(t[n]) + 2*u[n] - u[n-1] - dt**2*w**2*u[n]\n return u,t",
"def calculate_r3(self):\n logger.debug('Calculating O(r**3) terms')\n \n # Shorthand\n sign_psi = self.spsi\n sign_G = self.sG\n G2 = self.G2\n N_helicity = self.iota - self.iotaN\n B0 = self.B0\n G0 = self.G0\n I2 = self.I2\n X1c = self.X1c\n Y1c = self.Y1c\n Y1s = self.Y1s\n X20 = self.X20\n X2s = self.X2s\n X2c = self.X2c\n Y20 = self.Y20\n Y2s = self.Y2s\n Y2c = self.Y2c\n Z20 = self.Z20\n Z2s = self.Z2s\n Z2c = self.Z2c\n B20 = self.B20\n B1c = self.etabar * B0\n torsion = self.torsion\n curvature = self.curvature\n abs_G0_over_B0 = self.abs_G0_over_B0\n d_X1c_d_varphi = self.d_X1c_d_varphi\n d_Y1c_d_varphi = self.d_Y1c_d_varphi\n d_Z20_d_varphi = self.d_Z20_d_varphi\n\n # The expression below is computed in \"20190305-01 GarrenBoozer r2 corrected radius.nb\" in the section \"Approach of adding r**3 terms, assuming quasisymmetry\"\n # 20190714: To account for QH cases, changed iota -> iota_N where it occurs 3 lines below:\n flux_constraint_coefficient = (-4*B0**2*G0*X20**2*Y1c**2 + 8*B0**2*G0*X20*X2c*Y1c**2 - 4*B0**2*G0*X2c**2*Y1c**2 - \\\n 4*B0**2*G0*X2s**2*Y1c**2 + 8*B0*G0*B1c*X1c*X2s*Y1c*Y1s + 16*B0**2*G0*X20*X2s*Y1c*Y1s + \\\n 2*B0**2*I2*self.iotaN*X1c**2*Y1s**2 - G0*B1c**2*X1c**2*Y1s**2 - 4*B0*G0*B20*X1c**2*Y1s**2 - \\\n 8*B0*G0*B1c*X1c*X20*Y1s**2 - 4*B0**2*G0*X20**2*Y1s**2 - 8*B0*G0*B1c*X1c*X2c*Y1s**2 - \\\n 8*B0**2*G0*X20*X2c*Y1s**2 - 4*B0**2*G0*X2c**2*Y1s**2 - 4*B0**2*G0*X2s**2*Y1s**2 + \\\n 8*B0**2*G0*X1c*X20*Y1c*Y20 - 8*B0**2*G0*X1c*X2c*Y1c*Y20 - 8*B0**2*G0*X1c*X2s*Y1s*Y20 - \\\n 4*B0**2*G0*X1c**2*Y20**2 - 8*B0**2*G0*X1c*X20*Y1c*Y2c + 8*B0**2*G0*X1c*X2c*Y1c*Y2c + \\\n 24*B0**2*G0*X1c*X2s*Y1s*Y2c + 8*B0**2*G0*X1c**2*Y20*Y2c - 4*B0**2*G0*X1c**2*Y2c**2 + \\\n 8*B0**2*G0*X1c*X2s*Y1c*Y2s - 8*B0*G0*B1c*X1c**2*Y1s*Y2s - 8*B0**2*G0*X1c*X20*Y1s*Y2s - \\\n 24*B0**2*G0*X1c*X2c*Y1s*Y2s - 4*B0**2*G0*X1c**2*Y2s**2 - 4*B0**2*G0*X1c**2*Z20**2 - \\\n 4*B0**2*G0*Y1c**2*Z20**2 - 4*B0**2*G0*Y1s**2*Z20**2 - 4*B0**2*abs_G0_over_B0*I2*Y1c*Y1s*Z2c + \\\n 8*B0**2*G0*X1c**2*Z20*Z2c + 8*B0**2*G0*Y1c**2*Z20*Z2c - 8*B0**2*G0*Y1s**2*Z20*Z2c - \\\n 4*B0**2*G0*X1c**2*Z2c**2 - 4*B0**2*G0*Y1c**2*Z2c**2 - 4*B0**2*G0*Y1s**2*Z2c**2 + \\\n 2*B0**2*abs_G0_over_B0*I2*X1c**2*Z2s + 2*B0**2*abs_G0_over_B0*I2*Y1c**2*Z2s - 2*B0**2*abs_G0_over_B0*I2*Y1s**2*Z2s + \\\n 16*B0**2*G0*Y1c*Y1s*Z20*Z2s - 4*B0**2*G0*X1c**2*Z2s**2 - 4*B0**2*G0*Y1c**2*Z2s**2 - \\\n 4*B0**2*G0*Y1s**2*Z2s**2 + B0**2*abs_G0_over_B0*I2*X1c**3*Y1s*torsion + B0**2*abs_G0_over_B0*I2*X1c*Y1c**2*Y1s*torsion + \\\n B0**2*abs_G0_over_B0*I2*X1c*Y1s**3*torsion - B0**2*I2*X1c*Y1c*Y1s*d_X1c_d_varphi + \\\n B0**2*I2*X1c**2*Y1s*d_Y1c_d_varphi)/(16*B0**2*G0*X1c**2*Y1s**2)\n\n self.X3c1 = self.X1c * flux_constraint_coefficient\n self.Y3c1 = self.Y1c * flux_constraint_coefficient\n self.Y3s1 = self.Y1s * flux_constraint_coefficient\n self.X3s1 = self.X1s * flux_constraint_coefficient\n self.Z3c1 = 0\n self.Z3s1 = 0\n\n self.X3c3 = 0\n self.X3s3 = 0\n self.Y3c3 = 0\n self.Y3s3 = 0\n self.Z3c3 = 0\n self.Z3s3 = 0\n\n self.d_X3c1_d_varphi = self.d_d_varphi @ self.X3c1\n self.d_Y3c1_d_varphi = self.d_d_varphi @ self.Y3c1\n self.d_Y3s1_d_varphi = self.d_d_varphi @ self.Y3s1\n\n # The expression below is derived in the O(r**2) paper, and in \"20190318-01 Wrick's streamlined Garren-Boozer method, MHD.nb\" in the section \"Not assuming quasisymmetry\".\n # Note Q = (1/2) * (XYEquation0 without X3 and Y3 terms) where XYEquation0 is the quantity in the above notebook.\n Q = -sign_psi * B0 * abs_G0_over_B0 / (2*G0*G0) * (self.iotaN * I2 + mu0 * self.p2 * G0 / (B0 * B0)) + 2 * (X2c * Y2s - X2s * Y2c) \\\n + sign_psi * B0 / (2*G0) * (abs_G0_over_B0 * X20 * curvature - d_Z20_d_varphi) \\\n + I2 / (4 * G0) * (-abs_G0_over_B0 * torsion * (X1c*X1c + Y1s*Y1s + Y1c*Y1c) + Y1c * d_X1c_d_varphi - X1c * d_Y1c_d_varphi)\n predicted_flux_constraint_coefficient = - Q / (2 * sign_G * sign_psi)\n\n B0_order_a_squared_to_cancel = -sign_G * B0 * B0 * (G2 + I2 * N_helicity) * abs_G0_over_B0 / (2*G0*G0) \\\n -sign_G * sign_psi * B0 * 2 * (X2c * Y2s - X2s * Y2c) \\\n -sign_G * B0 * B0 / (2*G0) * (abs_G0_over_B0 * X20 * curvature - d_Z20_d_varphi) \\\n -sign_G * sign_psi * B0 * I2 / (4*G0) * (-abs_G0_over_B0 * torsion * (X1c*X1c + Y1c*Y1c + Y1s*Y1s) + Y1c * d_X1c_d_varphi - X1c * d_Y1c_d_varphi)\n\n logger.debug('max|flux_constraint_coefficient - predicted_flux_constraint_coefficient|: '\n f'{np.max(abs(flux_constraint_coefficient - predicted_flux_constraint_coefficient))}')\n logger.debug('max|flux_constraint_coefficient - B0_order_a_squared_to_cancel/(2*B0)|: '\n f'{np.max(abs(flux_constraint_coefficient - B0_order_a_squared_to_cancel/(2*B0)))}')\n\n if np.max(abs(flux_constraint_coefficient - predicted_flux_constraint_coefficient)) > 1e-7 \\\n or np.max(abs(flux_constraint_coefficient - B0_order_a_squared_to_cancel/(2*B0))) > 1e-7:\n logger.warning(\"Methods of computing lambda disagree!! Higher nphi resolution might be needed.\")\n\n self.flux_constraint_coefficient = flux_constraint_coefficient\n self.B0_order_a_squared_to_cancel = B0_order_a_squared_to_cancel\n\n if self.helicity == 0:\n self.X3c1_untwisted = self.X3c1\n self.Y3c1_untwisted = self.Y3c1\n self.Y3s1_untwisted = self.Y3s1\n self.X3s1_untwisted = self.X3s1\n self.X3s3_untwisted = self.X3s3\n self.X3c3_untwisted = self.X3c3\n self.Y3c3_untwisted = self.Y3c3\n self.Y3s3_untwisted = self.Y3s3\n self.Z3s1_untwisted = self.Z3s1\n self.Z3s3_untwisted = self.Z3s3\n self.Z3c1_untwisted = self.Z3c1\n self.Z3c3_untwisted = self.Z3c3\n else:\n angle = -self.helicity * self.nfp * self.varphi\n sinangle = np.sin(angle)\n cosangle = np.cos(angle)\n self.X3s1_untwisted = self.X3s1 * cosangle + self.X3c1 * sinangle\n self.X3c1_untwisted = self.X3s1 * (-sinangle) + self.X3c1 * cosangle\n self.Y3s1_untwisted = self.Y3s1 * cosangle + self.Y3c1 * sinangle\n self.Y3c1_untwisted = self.Y3s1 * (-sinangle) + self.Y3c1 * cosangle\n self.Z3s1_untwisted = self.Z3s1 * cosangle + self.Z3c1 * sinangle\n self.Z3c1_untwisted = self.Z3s1 * (-sinangle) + self.Z3c1 * cosangle\n sinangle = np.sin(3*angle)\n cosangle = np.cos(3*angle)\n self.X3s3_untwisted = self.X3s3 * cosangle + self.X3c3 * sinangle\n self.X3c3_untwisted = self.X3s3 * (-sinangle) + self.X3c3 * cosangle\n self.Y3s3_untwisted = self.Y3s3 * cosangle + self.Y3c3 * sinangle\n self.Y3c3_untwisted = self.Y3s3 * (-sinangle) + self.Y3c3 * cosangle\n self.Z3s3_untwisted = self.Z3s3 * cosangle + self.Z3c3 * sinangle\n self.Z3c3_untwisted = self.Z3s3 * (-sinangle) + self.Z3c3 * cosangle",
"def vizualize_accumulation():\n\n global b,V,I,w,dt,f,t\n n = 10\n b = 2.2\n V = 2\n I = 1\n w = 2.*np.pi\n dt = 0.05\n eps_array = np.zeros(n) #store deviation\n num_periods = 5\n P = 2.*np.pi/w # one period\n T = np.linspace(1,P*num_periods,n)\n\n f = ode_source_term(f_numerical(b, V, I, t)) \n f_ = sym.lambdify(t,f)\n\n for i in range(0,n):\n u_num, t_num = solver(I=I, w=w, dt=dt, T=T[i], V=V, f=f_)\n\n u_analytic = f_numerical(b, V, I, t_num)\n eps_array[i] = np.abs(u_num - u_analytic(t_num)).max()\n\n plt.plot(T,eps_array)\n plt.xlabel('dt')\n plt.ylabel('deviation')\n plt.title('Accumulation of error with increase in T')\n umin = 1.2*eps_array.min(); umax = 1.2*eps_array.max()\n plt.axis([T[0], T[-1], umin, umax])\n plt.show()",
"def v_ratio_analytic(tau, n):\n\n\teta = n*0.05**2\n\n\tvrvk = -eta/(tau+(tau**(-1)))\n\treturn np.abs(vrvk)",
"def _solve_v(Theta, U, V, L, lambda2, S, pi):\n pass",
"def euler_method(t, f_y_t, y0, vin):\n \n y = np.zeros((len(y0), len(t)+1))\n dt = t[1]-t[0]\n print(y.shape)\n y[:,0] = y0\n \n\n \n for index, tn in enumerate(t):\n \n y[:,index+1] = dt * (f_y_t(tn, y[:,index], dt)) + y[:,index]\n \n return y[:,:len(t)]",
"def I_3(n):\n\n I_3 = integrate.quad(lambda x: -x**(n-1) /\n (iv(1, x)**2 - iv(0, x) * iv(2, x)), 0, 100)[0]\n\n return I_3",
"def tv_denoise_fista(im, weight=50, eps=5.e-5, n_iter_max=200,\n check_gap_frequency=3, val_min=None, val_max=None,\n verbose=False):\n input_img = im\n if not input_img.dtype.kind == 'f':\n input_img = input_img.astype(np.float)\n shape = [input_img.ndim, ] + list(input_img.shape)\n grad_im = np.zeros(shape)\n grad_aux = np.zeros(shape)\n t = 1.\n i = 0\n if input_img.ndim == 2:\n # Upper bound on the Lipschitz constant\n lipschitz_constant = 9\n elif input_img.ndim == 3:\n lipschitz_constant = 12\n else:\n raise ValueError('Cannot compute TV for images that are not '\n '2D or 3D')\n # negated_output is the negated primal variable in the optimization\n # loop\n negated_output = -input_img\n # Clipping values for the inner loop\n negated_val_min = np.nan\n negated_val_max = np.nan\n if val_min is not None:\n negated_val_min = -val_min\n if val_max is not None:\n negated_val_max = -val_max\n if (val_min is not None or val_max is not None):\n # With bound constraints, the stopping criterion is on the\n # evolution of the output\n negated_output_old = negated_output.copy()\n while i < n_iter_max:\n grad_tmp = gradient(negated_output)\n grad_tmp *= 1. / (lipschitz_constant * weight)\n grad_aux += grad_tmp\n grad_tmp = _projector_on_dual(grad_aux)\n t_new = 1. / 2 * (1 + np.sqrt(1 + 4 * t ** 2))\n t_factor = (t - 1) / t_new\n grad_aux = (1 + t_factor) * grad_tmp - t_factor * grad_im\n grad_im = grad_tmp\n t = t_new\n gap = weight * div(grad_im)\n # Compute the primal variable\n negated_output = gap - input_img\n if (val_min is not None or val_max is not None):\n negated_output = negated_output.clip(negated_val_max,\n negated_val_min,\n out=negated_output)\n if (i % check_gap_frequency) == 0:\n if val_min is None and val_max is None:\n # In the case of bound constraints, we don't have\n # the dual gap\n dgap = dual_gap(input_img, -negated_output, gap, weight)\n if verbose:\n print 'Iteration % 2i, dual gap: % 6.3e' % (i, dgap)\n if dgap < eps:\n break\n else:\n diff = np.max(np.abs(negated_output_old - negated_output))\n diff /= np.max(np.abs(negated_output))\n if verbose:\n print 'Iteration % 2i, relative difference: % 6.3e' % (i,\n diff)\n if diff < eps:\n break\n negated_output_old = negated_output\n i += 1\n # Compute the primal variable\n output = input_img - gap\n if (val_min is not None or val_max is not None):\n output = output.clip(-negated_val_min, -negated_val_max, out=output)\n return output",
"def fv_annuity(r, n, pmt):\n ratio = (pow(1 + r, n) - 1) / r\n return pmt * ratio",
"def psi(self, x, t):\n\t\t## initialize\n\t\tz = 0.0j * x\n\t\t## calculate\n\t\tfor i in range(len(self.n)):\n\t\t\tz += self.ck[i] * np.exp(-1.0j*self.w[i]*t) * np.sqrt(2) * np.sin(self.k[i]*x)\n\t\t## return\n\t\treturn z",
"def _inverse_stress_tensor(self, f, j, p=None, formulation=None):\n\n mu = self._parameters['mu']\n finv = dlf.inv(f)\n c = f.T*f\n i1 = dlf.tr(c)\n i2 = dlf.Constant(0.5)*(i1**2 - dlf.tr(c*c))\n T = self._basic_stress_tensor(dlf.inv(c), mu)\n dim = ufl.domain.find_geometric_dimension(f)\n I = dlf.Identity(dim)\n\n if self._incompressible:\n\n T *= j**(-5.0/dim)\n b_vol = (-1.0/dim)*mu*(-1.0/dim)*i2\n if p is None:\n kappa = self._parameters['kappa']\n b_vol += self._volumetric_strain_energy_diff(1.0/j, kappa,\n formulation)\n else:\n b_vol -= p\n T += b_vol*I\n else:\n la = self._parameters['la']\n T = self._basic_stress_tensor(dlf.inv(c), mu)\n T += self._compressible_strain_energy_diff(1.0/j, la, mu)*I\n\n return T"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the radial distribution function of the system, among with the coordination number and the isothermal compressibility
|
def radial_distribution_function(nbins=50):
# Array of distances
dist = rdf_distances(system.pos/force.sigma, system.L/force.sigma, np.zeros(system.N*(system.N-1)))
max_dist = 0.5*system.L[0]/force.sigma
bins = np.linspace(0., max_dist, nbins)
rdf = nrdf(bins, np.zeros(len(bins)-1, dtype = np.float), dist, system.N, system.rho*force.sigma**3)
# Coordination Number
#n_c = 4*np.pi*system.rho * np.cumsum(rdf*bins[1]*bins[1:]**2)
# Isothermal Compressibility
#kt = 4*np.pi*np.cumsum((rdf-1) * bins[1]*force.sigma * (bins[1:]*force.sigma)**2)/system.T/const.KB + 1/(const.KB * system.T * system.rho)
#tot_area = 4*np.pi*np.sum(rdf*bins[1]*force.sigma)*system.L[0]**2
#kt = (1/const.KB*system.T)*(1/system.rho + tot_area - 4/3*np.pi*system.L[0]**3)
integral = isothermal_integral(rdf, bins[1:]*force.sigma)
kt = 1/const.KB/system.T/system.rho + 4*np.pi*integral/const.KB/system.T
return rdf, bins[1:], kt
|
[
"def radial_gaussian( rij, i_atom , width, rshift, Rc ):\n\n #print(\" symmetry function \", i_atom )\n\n Gi=0\n for j_atom in range( rij.shape[0] ):\n\n fc = cutoff_function( rij[i_atom][j_atom] , Rc )\n Gi = Gi + fc * np.exp(-width * (rij[i_atom][j_atom]-rshift)**2 )\n #print( j_atom , Gi )\n\n return Gi",
"def radial_data(data,annulus_width=1,working_mask=None, weight = None, x=None,y=None,rmax=None):\n \n# 2012-02-25 20:40 IJMC: Empty bins now have numel=0, not nan.\n# 2012-02-04 17:41 IJMC: Added \"SUM\" flag\n# 2010-11-19 16:36 IJC: Updated documentation for Sphinx\n# 2010-03-10 19:22 IJC: Ported to python from Matlab\n# 2005/12/19 Added 'working_region' option (IJC)\n# 2005/12/15 Switched order of outputs (IJC)\n# 2005/12/12 IJC: Removed decifact, changed name, wrote comments.\n# 2005/11/04 by Ian Crossfield at the Jet Propulsion Laboratory\n \n import numpy as np\n\n class radialDat:\n \"\"\"Empty object container.\n \"\"\"\n def __init__(self): \n self.mean = None\n self.std = None\n self.median = None\n self.numel = None\n self.max = None\n self.min = None\n self.r = None\n self.rmean = None\n self.meannan = None \n self.meanweight = None\n #---------------------\n # Set up input parameters\n #---------------------\n data = np.array(data)\n \n if working_mask is None:\n working_mask = np.ones(data.shape,bool)\n\n if weight is not None:\n weight_data = data#*weight \n else:\n \tweight_data = data\n \n npix, npiy = data.shape\n if x==None or y==None:\n x1 = np.arange(-npix/2.,npix/2.) #x1=np.arange(npix-npix,npix)\n y1 = np.arange(-npiy/2.,npiy/2.) #y1=np.arange(npiy-npiy,npiy)\n x,y = np.meshgrid(y1,x1)\n\n r = abs(x+1j*y) #abs(np.hypot(1*x,1*y)) #distance from center for each point\n #print (r[0,0])\n #print (r[540,540])\n if rmax==None:\n rmax = r[working_mask].max()\n\n #---------------------\n # Prepare the data container - empty\n #---------------------\n \n dr = np.abs([x[0,0] - x[0,1]]) * annulus_width #width (rmax of the bin)\n radial = np.arange(rmax/dr)*dr + dr/2. #makes the radial coordinate - half point on the bin\n \n \n nrad = len(radial)\n radialdata = radialDat()\n radialdata.mean = np.zeros(nrad)\n radialdata.sum = np.zeros(nrad)\n radialdata.std = np.zeros(nrad)\n radialdata.median = np.zeros(nrad)\n radialdata.numel = np.zeros(nrad, dtype=int)\n radialdata.max = np.zeros(nrad)\n radialdata.min = np.zeros(nrad)\n radialdata.r = radial # gives you the middle point of the bin\n radialdata.rmean = np.zeros(nrad)\n radialdata.meannan = np.zeros(nrad)\n radialdata.meanweight = np.zeros(nrad)\n \n #---------------------\n # Loop through the bins\n #---------------------\n #bin23=np.zeros(shape=(1,9428))\n for irad in range(nrad): #= 1:numel(radial)\n \n\n minrad = irad*dr #lower edge of bin\n maxrad = minrad + dr # upper edge of bin - excluded\n\n thisindex = (r>=minrad) * (r<maxrad) * working_mask #true or false about the statement\n #import pylab as py\n #pdb.set_trace() #debbuger\n #print data[irad,irad]\n\n #if not math.isnan(data[irad, irad]):\n # continue\n if not thisindex.ravel().any(): #if not true statements\n #continue\n\n radialdata.mean[irad] = np.nan\n radialdata.sum[irad] = np.nan\n radialdata.std[irad] = np.nan\n radialdata.median[irad] = np.nan\n radialdata.numel[irad] = 0\n radialdata.max[irad] = np.nan\n radialdata.min[irad] = np.nan\n\n else:\n\n nonzero= np.count_nonzero(data[thisindex])\n #if nonzero ==0:\n \n\n if nonzero > 0: #if nonzero = 0 it means no values in the bin\n radialdata.meannan[irad] = data[thisindex].sum()/nonzero\n if weight is not None:\n if np.all(weight == 1):\n radialdata.meanweight[irad] =radialdata.meannan[irad]\n else:\n \t radialdata.meanweight[irad] = weight_data[thisindex].sum()/weight[thisindex].sum()\n #print 'nonzero',nonzero\n else: #meaning nonzero = 0 all thew values are 0 which means they were nan before\n radialdata.meannan[irad] = 0\n radialdata.meanweight[irad] =0\n\n radialdata.mean[irad] = data[thisindex].mean()\n radialdata.sum[irad] = (np.abs(data[thisindex])).sum() #data[r<maxrad] gives you the sum up to that radii; data[thisindex].sum - gives you the sum only on the bin \n radialdata.std[irad] = np.nanstd(data[thisindex])\n radialdata.median[irad] = np.median(data[thisindex])\n radialdata.numel[irad] = data[thisindex].size #number of points per bin\n radialdata.max[irad] = data[thisindex].max()\n radialdata.min[irad] = data[thisindex].min()\n radialdata.rmean[irad] = ((r[thisindex].sum())/data[thisindex].size)\n #print 'real_size', data[thisindex].size, 'r_sum', r[thisindex].sum()\n\n # if nonzero > 0: #if nonzero = 0 it means no values in the bin\n # numzeros = data[thisindex].size - nonzero\n # actual = data[thisindex].size - numzeros\n # #print r[thisindex].sum(), nonzero\n # radialdata.rmeannan[irad] = ((r[thisindex].sum())/actual)\n\n # else: #meaning nonzero = 0 all thew values are 0 which means they were nan \n # radialdata.rmeannan[irad] = 0\n # # radialdata.rmeannan[irad] = ((r[thisindex].sum())/data[thisindex].size)\n \n #if irad==(nrad-1) :\n #pass irad==23:\n #+= data[thisindex] \n #print data[thisindex]\n if weight is not None:\n #print ('!!meannan is the weighted mean since a weight map was given!')\n radialdata.meannan = radialdata.meanweight\n \n #---------------------\n # Return with data\n #---------------------\n if __name__ == '__main__':\n main()\n \n return radialdata",
"def radialDistortions(self):\n return self.__radial_distortions",
"def radial_data(data,annulus_width=1,working_mask=None,x=None,y=None,rmax=None):\n \n# 2012-02-25 20:40 IJMC: Empty bins now have numel=0, not nan.\n# 2012-02-04 17:41 IJMC: Added \"SUM\" flag\n# 2010-11-19 16:36 IJC: Updated documentation for Sphinx\n# 2010-03-10 19:22 IJC: Ported to python from Matlab\n# 2005/12/19 Added 'working_region' option (IJC)\n# 2005/12/15 Switched order of outputs (IJC)\n# 2005/12/12 IJC: Removed decifact, changed name, wrote comments.\n# 2005/11/04 by Ian Crossfield at the Jet Propulsion Laboratory\n \n import numpy as ny\n\n class radialDat:\n \"\"\"Empty object container.\n \"\"\"\n def __init__(self): \n self.mean = None\n self.std = None\n self.median = None\n self.numel = None\n self.max = None\n self.min = None\n self.r = None\n\n #---------------------\n # Set up input parameters\n #---------------------\n data = ny.array(data)\n \n if working_mask==None:\n working_mask = ny.ones(data.shape,bool)\n \n npix, npiy = data.shape\n if x==None or y==None:\n x1 = ny.arange(-npix/2.,npix/2.)\n y1 = ny.arange(-npiy/2.,npiy/2.)\n x,y = ny.meshgrid(y1,x1)\n\n r = abs(x+1j*y)\n\n if rmax==None:\n rmax = r[working_mask].max()\n\n #---------------------\n # Prepare the data container\n #---------------------\n dr = ny.abs([x[0,0] - x[0,1]]) * annulus_width\n radial = ny.arange(rmax/dr)*dr + dr/2.\n nrad = len(radial)\n radialdata = radialDat()\n radialdata.mean = ny.zeros(nrad)\n radialdata.sum = ny.zeros(nrad)\n radialdata.std = ny.zeros(nrad)\n radialdata.median = ny.zeros(nrad)\n radialdata.numel = ny.zeros(nrad, dtype=int)\n radialdata.max = ny.zeros(nrad)\n radialdata.min = ny.zeros(nrad)\n radialdata.r = radial\n \n #---------------------\n # Loop through the bins\n #---------------------\n for irad in range(nrad): #= 1:numel(radial)\n minrad = irad*dr\n maxrad = minrad + dr\n thisindex = (r>=minrad) * (r<maxrad) * working_mask\n #import pylab as py\n #pdb.set_trace()\n if not thisindex.ravel().any():\n radialdata.mean[irad] = ny.nan\n radialdata.sum[irad] = ny.nan\n radialdata.std[irad] = ny.nan\n radialdata.median[irad] = ny.nan\n radialdata.numel[irad] = 0\n radialdata.max[irad] = ny.nan\n radialdata.min[irad] = ny.nan\n else:\n radialdata.mean[irad] = data[thisindex].mean()\n radialdata.sum[irad] = data[r<maxrad].sum()\n radialdata.std[irad] = data[thisindex].std()\n radialdata.median[irad] = ny.median(data[thisindex])\n radialdata.numel[irad] = data[thisindex].size\n radialdata.max[irad] = data[thisindex].max()\n radialdata.min[irad] = data[thisindex].min()\n \n #---------------------\n # Return with data\n #---------------------\n \n return radialdata",
"def gravitational_gradient(self, xyz):\n xyz = check_xyz_dim(xyz)\n r_vec = xyz - self.location\n r = np.linalg.norm(r_vec, axis=-1)\n g_tens = np.zeros((*r.shape, 3, 3))\n ind0 = r > self.radius\n ind1 = r == self.radius\n g_tens[ind0] = super().gravitational_gradient(xyz[ind0])\n g_tens[~ind0] = -G * 4 / 3 * np.pi * self.rho * np.eye(3)\n g_tens[ind1] = np.NaN\n return g_tens",
"def radialforce(self):\n warnings.warn(\"Check radial force direction in titin\")\n return self.force() * np.sin(self.angle())",
"def ion_density(self, r):\n\n r = np.asarray(r)\n if np.any(r < 0):\n raise ValueError(\"Minor radius must not be negative\")\n\n if self.mode == \"L\":\n density = (\n self.ion_density_centre\n * (1 - (r / self.major_radius) ** 2) ** self.ion_density_peaking_factor\n )\n elif self.mode in [\"H\", \"A\"]:\n density = np.where(\n r < self.pedestal_radius,\n (\n (self.ion_density_centre - self.ion_density_pedestal)\n * (1 - (r / self.pedestal_radius) ** 2)\n ** self.ion_density_peaking_factor\n + self.ion_density_pedestal\n ),\n (\n (self.ion_density_pedestal - self.ion_density_separatrix)\n * (self.major_radius - r)\n / (self.major_radius - self.pedestal_radius)\n + self.ion_density_separatrix\n ),\n )\n return density",
"def build_quadrature(self) :\n\n# Compute the Gauss-Legendre quadrature\n [self.polar_nodes,self.polar_weight] = scipy.special.orthogonal.p_roots(self.sn) \n\n# Compute the Chebyshev quadrature\n [self.azith_nodes,self.azith_weight] = self.chebyshev()\n\n self.cos_theta = np.zeros((self.sn/2,1))\n for i in xrange(0,self.sn/2) :\n self.cos_theta[i] = np.real(self.polar_nodes[self.sn/2+i])\n self.sin_theta = np.sqrt(1-self.cos_theta**2)\n\n# Compute omega on one octant\n self.build_octant()\n\n# Compute omega by deploying the octant \n self.deploy_octant()\n\n# Compute the spherical harmonics\n self.compute_harmonics()\n\n# Compute D\n if self.galerkin == True :\n self.D = scipy.linalg.inv(self.M)\n else :\n self.D = np.dot(self.M.transpose(),np.diag(self.weight))",
"def radial_gaussian_nb(result, r, sigma):\n for i in range(len(result)):\n result[i] = math.exp(-r[i] ** 2. / 2. / sigma ** 2.)",
"def radial_force(self):\n return np.sum([t.radial_force_of_filament() for t in self.thick], 0)",
"def gen_radius_nonorm(self,n):\n \n n = self.gen_input_check(n)\n R = np.array([])\n for j in range(len(self.Rvals)):\n nsamp = np.random.poisson(lam=self.Rvals[j]*n) \n R = np.hstack((R, np.exp(np.log(self.Rs[j])+\\\n (np.log(self.Rs[j+1])-np.log(self.Rs[j]))*\\\n np.random.uniform(size=nsamp))))\n Rp = R*const.R_earth.to('km')\n \n return Rp",
"def gravitational_gradient(self, xyz):\n xyz = check_xyz_dim(xyz)\n\n # need to evaluate f node at each source locations\n gxx = self._eval_def_int(prism_fzz, xyz[..., 0], xyz[..., 1], xyz[..., 2], cycle=1)\n gxy = self._eval_def_int(prism_fzx, xyz[..., 0], xyz[..., 1], xyz[..., 2], cycle=1)\n gxz = self._eval_def_int(prism_fzx, xyz[..., 0], xyz[..., 1], xyz[..., 2])\n\n gyy = self._eval_def_int(prism_fzz, xyz[..., 0], xyz[..., 1], xyz[..., 2], cycle=2)\n gyz = self._eval_def_int(prism_fzy, xyz[..., 0], xyz[..., 1], xyz[..., 2])\n\n # gzz = - gxx - gyy - 4 * np.pi * G * rho[in_cell]\n # easiest to just calculate it using another integral\n gzz = self._eval_def_int(prism_fzz, xyz[..., 0], xyz[..., 1], xyz[..., 2])\n\n first = np.stack([gxx, gxy, gxz], axis=-1)\n second = np.stack([gxy, gyy, gyz], axis=-1)\n third = np.stack([gxz, gyz, gzz], axis=-1)\n\n return - G * self.rho * np.stack([first, second, third], axis=-1)",
"def gen_radius(self,n):\n \n n = self.gen_input_check(n)\n R = np.array([])\n for j in range(len(self.Rvals)):\n nsamp = int(np.ceil(n*self.Rvals[j]/np.sum(self.Rvals)))\n R = np.hstack((R, np.exp(np.log(self.Rs[j])+\\\n (np.log(self.Rs[j+1])-np.log(self.Rs[j]))*\\\n np.random.uniform(size=nsamp))))\n \n if len(R) > n:\n R = R[np.random.choice(range(len(R)),size=n,replace=False)]\n Rp = R*const.R_earth.to('km')\n \n return Rp",
"def ferromagnetic_disperion(p, x):\n return 4 * p[0] * (1 - np.cos(p[1] * x))",
"def pollard_rho_factor(n):\n\t\n\tif es_primo(n):\n\t\treturn n\n\n\tsemillero = itertools.count(2)\n\tx = y = semillero.next()\n\td = 1\n\tencontrado = False\n\n\twhile not encontrado:\n\t\tencontrado = True\n\n\t\twhile d == 1:\n\t\t\tx = generadora_aleatorios(x,n)\n\t\t\ty = generadora_aleatorios(generadora_aleatorios(y,n),n)\n\t\t\td = gdc(abs(x - y), n)\n\n\t\tif d == n:\n\t\t\tencontrado = False\n\t\t\tx = y = semillero.next()\n\t\t\td = 1\n\n\treturn d",
"def rho(self, d):\n\t\tif (self.distribution == 'spherical'):\n\t\t\t# log(rho) = (13.86 +/- 0.47) - (3.34 +/- 0.11)*log(R) [Galactocentric distance] (Wetterer 1991)\n\t\t\trho = 10**(13.86 - 3.34*log10(d*1e3))\n\t\t\terr = 10**(0.47 - 0.11*log10(d*1e3))\n\t\t\t\n\t\telif (self.distribution == 'ellipsoidal'):\n\t\t\t# log(rho) = (15.71 +/- 0.56) - (3.76 +/- 0.13)*log(a) [Galactocentric semimajor distance] (Wetterer 1991)\n\t\t\trho = 10**(15.71 - 3.76*log10(d*1e3))\n\t\t\terr = 10**(0.56 - 0.13*log10(d*1e3))\n\t\telse:\traise TypeError, 'Spatial density distribution unknown, only spherical or ellipsoidal available'\n\n\t\t\n\t\treturn [rho, err]",
"def dens_func( z, r ):\n # Allocate relative density\n n = np.ones_like(z)\n n = np.where( z<ramp_p3, 4/3-1/3*(z-ramp_p2)/(ramp_p3-ramp_p2), 1)\n n = np.where( z<ramp_p2, 4/3, n )\n n = np.where( z<ramp_p1, 4/3*(z-ramp_start)/(ramp_p1-ramp_start), n )\n n = np.where( z<ramp_start, 0., n )\n return(n)",
"def generate_galaxy(num_stars, radius):\n genlookup(1000000, r_step, NFW_potential, [rho_0,r_s], \"potentials.npy\")\n potential = np.load('potentials.npy')\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n gradient = gengrad(potential, 1)\n plt.plot(np.linspace(0, radius, radius), gradient[:radius])\n plt.show()\n\n stars = np.empty((num_stars, 7))\n # Work in cylindrical coordinates\n stars[:, 0] = np.abs(np.random.normal(0, radius, num_stars)) # Distance from center from gaussian\n stars[:, 1] = np.random.uniform(0, 2 * np.pi, num_stars) # Uniform dist for angle\n stars[:, 2] = np.random.normal(0, radius / 6 * np.exp(-(stars[:, 0]/radius)**2), num_stars) # Height of stars depends on r\n\n # Mass of stars\n stars[:, 3] = np.asarray(mass_generator(num_stars)) * 1.98e+30 # Masses in metric (conversion)\n\n\n # Velocities initialized with unit velocity in random directions\n direction = np.random.normal(0, 1e-6, num_stars)\n v = np.sqrt(stars[:, 0] * conversion * -interpolatelookup(gradient, stars[:, 0])) / conversion * sectoyear\n stars[:, 4] = 0 # Velocity in radial direction\n stars[:, 5] = np.sqrt(stars[:, 0] * conversion * -interpolatelookup(gradient, stars[:, 0])) / conversion * sectoyear / stars[:, 0] # Velocity in theta direction\n\n return stars, gradient",
"def radial_coord(self, r):\n return r / np.sqrt(1 - (2 * self.M.value / r))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the density profile of the particles over a given axis
|
def density_profile(axis, nbins = 100):
bins = np.linspace(0., system.L[axis], num=nbins)
hist = np.histogram(system.pos[:,axis], bins=bins, density=True)
return hist[0], hist[1]
|
[
"def sweep_density(pos, box, window, pts, axis=0):\n if axis == 0:\n L = box.Lx\n h = box.Ly\n elif axis == 1:\n L = box.Ly\n h = box.Lx\n else:\n raise ValueError('axis must be 0 or 1')\n X = np.linspace(-L/2, L/2, pts)\n Y = np.zeros(pts)\n for i, x in enumerate(X):\n Y[i] = len(np.where((pos[:,axis] > x - window/2)*((pos[:,axis] < x + window/2)))[0])\n Y /= (window*h)\n return X, Y",
"def getDensity(self, x):\n return (math.exp(-math.pow((x - self.mu), 2) / (2 * self.sigmaSquared)) / self.normConst);",
"def density_mix_uniforms(x, pbs, pmix):\n scale = pbs[:, 1] - pbs[:, 0]\n den = np.zeros(len(x))\n for i, pi in enumerate(pbs):\n den += pmix[i]*stats.uniform.pdf(x, loc=pbs[i, 0], scale=scale[i])\n return den",
"def density(self, xs, x):\n xs = np.asarray(xs)\n n = len(xs) # before in_domain?\n if self.weights is not None:\n xs, weights = self.in_domain( xs, self.weights, x )\n else:\n xs = self.in_domain( xs, xs, x )[0]\n xs = np.asarray(xs)\n #print 'len(xs)', len(xs), x\n if xs.ndim == 1:\n xs = xs[:,None]\n if len(xs)>0:\n h = self.h\n if self.weights is not None:\n w = 1 / h * np.sum(self((xs-x)/h).T * weights, axis=1)\n else:\n w = 1. / (h * n) * np.sum(self((xs-x)/h), axis=0)\n return w\n else:\n return np.nan",
"def porosity_profile(img, axis = None, sample_type = None, void_fraction = 1):\n \n \n img = img.copy();\n phi = [];\n \n \n if (sample_type == '3_Phase'):\n for i in img:\n n = i[i > 0].size;\n phi.append(100*np.sum(i[i>0])/n/255);\n \n elif (sample_type == 'Linda_et_al'):\n #This part only calculate for one slice\n i = img;\n n = i[i > 0].size\n phi_m = np.sum(i[i==255]);\n phi_g = np.sum(i[i==1]);\n tmp = i[i < 255];\n phi_micro = np.sum(tmp[tmp>1])\n \n porosity = (100*(phi_m + phi_g + void_fraction*phi_micro)/n/255)\n phi.append(porosity)\n \n \n elif (sample_type == 'Core_2_Phase'):\n print(\"This part is still need to implement\");\n \n elif (sample_type == 'Crop_1_Phase'):\n for i in img:\n n = i.size;\n phi.append(100*np.sum(i)/n/255);\n \n return phi;",
"def get_final_density(input_linear_field, output_resolution=None, time=0.025):\n assert input_linear_field.shape[0]==input_linear_field.shape[1]\n N=len(input_linear_field)\n if not output_resolution:\n output_resolution = N\n x,y = get_evolved_particle_positions(input_linear_field,time)\n f = pynbody.new(len(x))\n f['x']=x-N/2\n f['y']=y-N/2\n f['mass']=1.0\n f['mass'].units=\"kg\"\n f['x'].units=\"cm\"\n return pynbody.plot.sph.image(f,width=N,resolution=output_resolution,units=\"kg cm^-2\")",
"def dens_func( z, r ):\n # Allocate relative density\n n = np.ones_like(z)\n n = np.where( z<ramp_p3, 4/3-1/3*(z-ramp_p2)/(ramp_p3-ramp_p2), 1)\n n = np.where( z<ramp_p2, 4/3, n )\n n = np.where( z<ramp_p1, 4/3*(z-ramp_start)/(ramp_p1-ramp_start), n )\n n = np.where( z<ramp_start, 0., n )\n return(n)",
"def calculate_particle_surface(particle_diameter):\n\n return math.pi * particle_diameter ** 2",
"def obs_fn(particles, obs):\n devs = obs - particles[0]\n likeli = norm(0, OBS_STDDEV).pdf(devs)\n likeli /= np.sum(likeli)\n return likeli",
"def density_x1(x1: float):\n # Bind global variables\n global mu1, sigma1\n # Marginal density of x1 is normal with mean mu1 and standard deviation sigma1\n return norm.pdf(x1, loc=mu1, scale=sigma1)",
"def density_x2(x2: float):\n # Bind global variables\n global mu2, sigma2\n # Marginal density of x1 is normal with mean mu1 and standard deviation sigma1\n return norm.pdf(x2, loc=mu2, scale=sigma2)",
"def density_at_points(data):\n data = numpy.asarray(data)\n kd = kde.gaussian_kde(data.T)\n return kd(data.T)",
"def profile(self, domain, component):\n np = domain.nPoints()\n x = zeros(np,'d')\n for n in range(np):\n x[n] = self.value(domain, component, n)\n return x",
"def density_estimation(sample, X, h, kernel=\"epanechnikov\"):\n kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))\n log_dens = kde.score_samples(X.reshape(-1, 1))\n density = np.exp(log_dens)\n return density",
"def density(self) -> float:\n pass",
"def two_d_uniform_density(vector):\n #prototype of a density function. This is how measures are specified.\n x = vector[0]\n y = vector[1]\n if (0<=x) and (x<=1) and (0<=y) and (y<=1):\n return 1.0\n else:\n return 0.0",
"def get_mean_x(particles):\n sum = 0.0;\n for particle in particles:\n sum += particle[0]\n return sum / (len(particles) * 1.0)",
"def forward_density(self, Y, X_points, marginals=None, sub_densities=0):\n\n if (not marginals) and not X_points.shape[1] == self.L:\n raise WrongContextError(\"Dimension of X samples doesn't match the choosen Lw\")\n proj, alpha, _ = self._helper_forward_conditionnal_density(Y)\n\n\n NX, D = X_points.shape\n N = Y.shape[0]\n if marginals:\n proj = proj[:, :, marginals] # len(marginals) , N , K\n covs = self.SigmakListS[:, marginals, :][:, :, marginals] # K, len(marginals), len(marginals)\n else:\n covs = self.SigmakListS\n\n densites = np.empty((N, NX))\n sub_dens = np.empty((sub_densities, N, NX))\n t = time.time()\n for n, meann, alphan in zip(range(N), proj, alpha):\n densites[n] = densite_melange(X_points, alphan, meann, covs)\n if sub_densities:\n dominants = dominant_components(alphan, meann, covs)[0:sub_densities]\n for i, (_, w, m, c) in enumerate(dominants):\n sub_dens[i, n] = np.exp(chol_loggausspdf(X_points.T, m.reshape((D, 1)), c)) * w\n if self.verbose:\n logging.debug(\"Density calcul time {:.3f}\".format(time.time() - t))\n\n return densites, sub_dens",
"def dXDistribution1D(\n self, axis, nbTimesIntervalle=1, bins=50, output=False, plot=False\n ):\n if axis == \"x\":\n x = self.x\n elif axis == \"y\":\n x = self.y\n elif axis == \"z\":\n x = self.z\n else:\n raise ValueError(\"axis should be equal to 'x' or 'y' or 'z'\")\n\n self.dX = x[nbTimesIntervalle:] - x[:-nbTimesIntervalle]\n hist, bin_edges = np.histogram(self.dX, bins=bins, density=True)\n binsPosition = (bin_edges[:-1] + bin_edges[1:]) / 2\n\n if plot:\n plt.plot(\n binsPosition,\n hist,\n \"o\",\n label=\"Times interval = \" + np.str(nbTimesIntervalle) + \" dt\",\n )\n plt.title(\"Probability density function 1D\")\n plt.xlabel(\"$\\Delta $\" + axis + \" $[m]$\")\n plt.ylabel(\"Density normalised $[m^{-1}]$\")\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0, 0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0, 0))\n plt.legend()\n plt.show()\n\n if output:\n return hist, binsPosition"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return empty perms dict thus hiding the model from admin index.
|
def get_model_perms(self, request):
return {}
|
[
"def _permissions():\n return getattr(g, '_request_permissions', {})",
"def initial_permissions() -> [[str, str]]:\n return {'admin_all': ['user__Admin', 'resource__All'],\n 'guest_all': ['user__Guest', 'resource__All']}",
"def get_permissions_for_registration(self):\n from wagtail.snippets.models import SNIPPET_MODELS\n\n if not self.is_pagemodel and self.model not in SNIPPET_MODELS:\n return self.permission_helper.get_all_model_permissions()\n return Permission.objects.none()",
"def get_queryset(self):\n return ApplcationQuerySet(self.model).exclude(status=STATUS.deleted)",
"def get_queryset(self):\n return super(UpdateUserPermissionView, self).get_queryset().exclude(\n is_superuser=True).exclude(pk=self.request.user.pk)",
"def get_permissions_queryset(self):\n editable_permissions_queryset = self.get_editable_permissions()\n existing_permissions_queryset = self.object.user_permissions.all()\n\n return Permission.objects.filter(\n Q(pk__in=editable_permissions_queryset.values('pk')) |\n Q(pk__in=existing_permissions_queryset.values('pk'))\n ).order_by('content_type__app_label').select_related('content_type')",
"def unrestricted(self):\n queryset = self._clone()\n access_control_meta = getattr(\n queryset, '_access_control_meta', {}).copy()\n access_control_meta['user'] = None\n access_control_meta['unrestricted'] = True\n queryset._access_control_meta = access_control_meta\n return queryset",
"def all_permissions(request, id, \\\n template='object_permissions/permissions/objects.html' ):\n user = request.user\n group = get_object_or_404(Group, pk=id)\n \n if not (user.is_superuser or group.user_set.filter(pk=user.pk).exists()):\n return HttpResponseForbidden('You do not have sufficient privileges')\n \n perm_dict = group.get_all_objects_any_perms()\n \n try:\n del perm_dict[Group]\n except KeyError:\n pass\n \n return render_to_response(template, \\\n {'persona':group, 'perm_dict':perm_dict}, \\\n context_instance=RequestContext(request),\n )",
"def get_field_permissions(self):\n view = self.get_view()\n return getattr(self.Meta, \"field_permissions\", {}) or getattr(\n view, \"field_permissions\", {}\n )",
"def permissions(self, role):\n # NOTE: use ordered keys\n permissions = OrderedDict()\n\n # NOTE: WMS service permissions collected by OGC service config\n permissions['wms_services'] = []\n\n return permissions",
"def permission(request):\n ctxt = dict()\n if not request.user.is_authenticated or request.user.is_anonymous:\n ctxt[\"is_admin\"] = False\n else:\n ctxt[\"is_admin\"] = (FSUser.objects.get(user=request.user).permission.name == \"admin\")\n return ctxt",
"def get_default_privileges_dict(self):\n # _DEFAULT tenant is created with two privileges\n return [{'datastore_url': auth_data_const.ALL_DS_URL,\n 'allow_create': 1,\n 'max_volume_size': 0,\n 'usage_quota': 0},\n {'datastore_url': auth_data_const.VM_DS_URL,\n 'allow_create': 1,\n 'max_volume_size': 0,\n 'usage_quota': 0}]",
"def get_user_permissions(self, id, model):\n cff_permissions = getattr(model, \"cff_permissions\", {})\n current_user_perms = {}\n if id is not \"cm:cognitoUserPool:anonymousUser\":\n current_user_perms.update(cff_permissions.get(\"cm:loggedInUser\", {}))\n current_user_perms.update(cff_permissions.get(id, {}))\n return current_user_perms",
"def __invert__(self) -> Permissions:\n return Permissions.from_int(~self.to_int())",
"def permissions(self) -> Mapping[str, str]:\n return pulumi.get(self, \"permissions\")",
"def get_user_obj_perms_model(obj):\n from guardian.models import UserObjectPermissionBase\n from guardian.models import UserObjectPermission\n return get_obj_perms_model(obj, UserObjectPermissionBase, UserObjectPermission)",
"async def _viewperms(self, ctx: commands.Context):\n settings = await self.config.guild(ctx.guild).all()\n color = discord.Color.dark_purple()\n statuschannel = ctx.guild.get_channel(settings['statuschannel'])\n if settings['statuschannel']:\n statuschannel = statuschannel.mention\n if not settings['statuschannel']:\n statuschannel = \"Not Set\"\n try:\n embed = discord.Embed(\n title=f\"Permission Settings\",\n color=color,\n description=f\"**Full Access Role:** {settings['fullaccessrole']}\\n\"\n f\"**Mod Roles:** {settings['modroles']}\\n\"\n f\"**Mod Commands:** {settings['modcommands']}\\n\"\n f\"**Blacklisted Names:** {settings['badnames']}\\n\"\n f\"**Status Channel:** {statuschannel}\"\n )\n return await ctx.send(embed=embed)\n except KeyError:\n await ctx.send(f\"Setup permissions first.\")",
"def hidden_modules(self):\n\n hidden_modules = []\n if self.policy in (3, 4, 5):\n restricted_modules = [m for m in self.modules\n if self.modules[m].restricted]\n roles = []\n if self.session.s3 is not None:\n roles = self.session.s3.roles or []\n if self.ADMIN in roles or self.EDITOR in roles:\n return []\n if not roles:\n hidden_modules = restricted_modules\n else:\n t = self.table\n query = (t.controller.belongs(restricted_modules)) & \\\n (t.tablename == None)\n if roles:\n query = query & (t.group_id.belongs(roles))\n else:\n query = query & (t.group_id == None)\n rows = self.db(query).select()\n acls = dict()\n for acl in rows:\n if acl.controller not in acls:\n acls[acl.controller] = self.NONE\n acls[acl.controller] |= acl.oacl | acl.uacl\n hidden_modules = [m for m in restricted_modules if m not in acls or not acls[m]]\n\n return hidden_modules",
"def _filter_permissions(self, qs):\n # We use qs.all() here because we want to allow a manager object (e.g. MediaItem.objects)\n # to be passed as well.\n return (\n qs.all()\n .viewable_by_user(self.request.user)\n .annotate_viewable(self.request.user)\n .annotate_editable(self.request.user)\n )",
"def get_default_fields(self, obj):\r\n# if isinstance(obj, models.Model):\r\n# opts = obj._meta\r\n# return [field.name for field in opts.fields + opts.many_to_many]\r\n# else:\r\n return obj.keys()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get paths like ``sysconfig.get_paths()`` for installation.
|
def get_paths(self) -> Dict[str, str]:
paths = sysconfig.get_paths()
scripts = "Scripts" if os.name == "nt" else "bin"
packages_path = self.packages_path
paths["platlib"] = paths["purelib"] = (packages_path / "lib").as_posix()
paths["scripts"] = (packages_path / scripts).as_posix()
paths["data"] = paths["prefix"] = packages_path.as_posix()
paths["include"] = paths["platinclude"] = paths["headers"] = (
packages_path / "include"
).as_posix()
return paths
|
[
"def get_paths(self, name):\n info = self.get_module_info(name)\n if info:\n return info.get(constants.MODULE_PATH, [])\n return []",
"def get_paths() -> list[pathlib.Path]:\n logger.debug(\"Identifying service paths\")\n\n base_paths = [\"/\", \"/System\"] if os.getenv(\"SUDO_USER\") else [pathlib.Path.home()]\n service_paths = []\n\n for base in base_paths:\n for path in [\"Library/LaunchAgents\", \"Library/LaunchDaemons\"]:\n service_path = pathlib.Path(base, path)\n\n if service_path.is_dir():\n service_paths.append(service_path)\n\n if not service_paths:\n raise ValueError(\"No service paths found\")\n\n return service_paths",
"def gethomepaths(self):\n cwd = os.getcwd()\n home_dir = os.path.expanduser('~')\n os.chdir(home_dir)\n fs_dir = os.path.abspath('.')\n\tos.chdir(cwd) # I hope this will always get you back to the original place...\n if home_dir!= fs_dir:\n return [home_dir, fs_dir]\n else:\n return [home_dir]",
"def GetRequiredSysPaths(self):\n reqSysPaths = []\n for mod in [p3d]:\n modPath = os.path.dirname(mod.__file__)\n modLoc = os.path.dirname(modPath).replace('\\\\', '/')\n reqSysPaths.append(modLoc)\n \n return reqSysPaths",
"def InstalledPaths(self):\n with open(self.manifest_file) as f:\n files = [line.rstrip() for line in f]\n return files",
"def _getRepositoryListPaths():\r\n _repositoryListPaths = []\r\n _repositoryListPaths.append(os.path.join(home,\".subuser\",\"repositories.json\"))\r\n _repositoryListPaths.append(\"/etc/subuser/repositories.json\") # TODO how does this work on windows?\r\n _repositoryListPaths.append(os.path.join(_getSubuserDir(),\"repositories.json\"))\r\n repositoryListPaths = []\r\n for path in _repositoryListPaths:\r\n if os.path.exists(path):\r\n repositoryListPaths.append(path)\r\n return repositoryListPaths",
"def getOcelotPaths():\n\t\n\ttry:\n\t\tllvm_config_path = which('OcelotConfig')\n\texcept:\n\t\traise ValueError, 'Error: Failed to find OcelotConfig, make sure ' + \\\n\t\t\t'it is on your PATH'\n\t\n\t# determine defaults\n\tbin_path = os.popen('OcelotConfig --bindir').read().split()\n\tlib_path = os.popen('OcelotConfig --libdir').read().split()\n\tinc_path = os.popen('OcelotConfig --includedir').read().split()\n\tcflags = os.popen('OcelotConfig --cppflags').read().split()\n\tlflags = os.popen('OcelotConfig --ldflags').read().split()\n\tlibs = os.popen('OcelotConfig --libs').read().split()\n\t\n\treturn (bin_path,lib_path,inc_path,cflags,lflags,libs)",
"def setup_python_path(self):\n self.prepare_environment_variables()\n fixed_paths = self.prepare_libraries()\n fixed_paths += self.prepare_code_directories()\n return fixed_paths",
"def external_plugin_paths(self):\n return self._external_plugin_paths",
"def build_possible_paths():\n dir_path = os.path.abspath(os.path.dirname(__file__))\n app_dir = os.path.dirname(os.path.dirname(dir_path))\n paths = [os.path.join(app_dir, '.google_appengine'),\n '/usr/local/google_appengine',\n '/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine']\n # Then if on windows, look for where the Windows SDK installed it.\n for path in os.environ.get('PATH', '').replace(';', ':').split(':'):\n path = path.rstrip(os.sep)\n if path.endswith('google_appengine'):\n paths.append(path)\n try:\n from win32com.shell import shell\n from win32com.shell import shellcon\n id_list = shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_PROGRAM_FILES)\n program_files = shell.SHGetPathFromIDList(id_list)\n paths.append(os.path.join(program_files, 'Google','google_appengine'))\n except ImportError, e:\n # Not windows.\n pass\n return paths",
"def get_import_paths() -> List[str]:\n return _redun_import_paths",
"def _get_config_dirs():\n config_dirs = [\n USER_CONFIG_DIR,\n os.path.join(\"/\", \"etc\", \"rapport\"),\n os.path.abspath(os.path.join(\"rapport\", \"config\"))\n ]\n return config_dirs",
"def _get_existing_paths(self, modulepath):\n path_strings = modulepath.split(\":\")\n return [x for x in path_strings if path.exists(x)]",
"def get_paths_triggering_build(config_settings=None):",
"def get_test_paths(self):\n return self.test_paths",
"def dirpaths(self):\n parts = self.split()\n result = [DotPath(parts[0] or \"/\")]\n for name in parts[1:]:\n result.append(result[-1] / name)\n return result",
"def cmd_paths():\n if not 'PATH' in os.environ:\n return False\n PATH = os.environ['PATH']\n PATH = os.path.normpath(PATH)\n return PATH.split(os.path.pathsep)",
"def paths(self) -> List[str]:\n return list({the_runner.process_cwd for the_runner in self._runners})",
"def _ask_ld_for_paths(self):\n\n try:\n ld = Popen(['ld', '--verbose'], stdin=DEVNULL, stdout=PIPE)\n output = ld.stdout.read().decode()\n except:\n return []\n\n search_dirs = re.compile(r'SEARCH_DIR\\(([^)]*)\\)').findall(output)\n return [d.strip(' \"') for d in search_dirs]",
"def requiredAdditionalModulePaths(self):\n requiredPaths = []\n if not slicer.app.platform.startswith('macosx'):\n for relPath in @CUSTOM_REL_PATHS@:\n absPath = os.path.join(\n slicer.app.slicerHome,\n \"@CUSTOM_APP_NAME@\"+\"-Extensions\",\n relPath)\n requiredPaths.append(absPath)\n else:\n for relPath in @CUSTOM_REL_PATHS@:\n absPath = os.path.join(\n slicer.app.slicerHome,\n \"Extensions-\"+slicer.app.repositoryRevision,\n relPath)\n requiredPaths.append(absPath)\n return requiredPaths"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Activate the environment. Manipulate the ``PYTHONPATH`` and patches ``pip`` to be aware of local packages. This method acts like a context manager.
|
def activate(self):
paths = self.get_paths()
with temp_environ():
working_set = self.get_working_set()
_old_ws = pkg_resources.working_set
pkg_resources.working_set = working_set.pkg_ws
# HACK: Replace the is_local with environment version so that packages can
# be removed correctly.
_old_sitepackages = misc.site_packages
misc.site_packages = paths["purelib"]
_is_local = misc.is_local
misc.is_local = req_uninstall.is_local = self.is_local
_evaluate_marker = pkg_resources.evaluate_marker
pkg_resources.evaluate_marker = self.evaluate_marker
sys_executable = sys.executable
sys.executable = self.python_executable
yield
sys.executable = sys_executable
pkg_resources.evaluate_marker = _evaluate_marker
misc.is_local = req_uninstall.is_local = _is_local
misc.site_packages = _old_sitepackages
pkg_resources.working_set = _old_ws
|
[
"def activate_env(prefix):\n # type: (str) -> None\n logger.info(\"activate_env %s\", locals())\n os.environ[\"PATH\"] = \":\".join([prefix + \"/bin\", os.environ.get(\"PATH\", \"\")])\n os.environ[\"CONDA_PREFIX\"] = prefix",
"def post_setup(self, context):\n os.environ[\"VIRTUAL_ENV\"] = context.env_dir\n # if not self.nodist:\n # self.install_setuptools(context)\n # Can't install pip without setuptools\n if not self.nopip and not self.nodist:\n self.install_pip(context)",
"def ensure_activated_venv():\n import click\n\n if sys.prefix != sys.base_prefix:\n click.secho('Inception prevention', fg='red', file=sys.stderr)\n click.secho('You are already in a venv. Not activating itagain.',\n file=sys.stderr)\n exit(1)\n\n current_dir = Path('.').resolve()\n venv_dir = current_dir\n last_git_dir = None\n root = Path('/')\n\n while venv_dir.parent != venv_dir:\n if (venv_dir / NAME).exists():\n break\n if (venv_dir / '.git').exists():\n last_git_dir = venv_dir\n venv_dir = venv_dir.parent\n else:\n if last_git_dir == current_dir:\n click.secho('This is a Git repository; will create venv here',\n fg='green',\n file=sys.stderr)\n venv_dir = last_git_dir\n elif last_git_dir:\n click.secho('Containing Git repository found; will create venv there',\n fg='yellow',\n file=sys.stderr)\n venv_dir = last_git_dir\n else:\n click.secho(\n 'Git repository not found; will create venv in current directory',\n fg='red',\n file=sys.stderr)\n venv_dir = current_dir\n click.confirm(f'Create venv at {venv_dir}?',\n abort=True)\n venv.create(venv_dir / NAME, with_pip=True)\n\n click.secho('Upgrading pip...', fg='blue', file=sys.stderr)\n subprocess.run([venv_dir / NAME / 'bin/pip', 'install',\n '--upgrade', 'pip'],\n check=True)\n\n reqs_path = venv_dir / 'requirements.txt'\n if reqs_path.exists():\n click.secho('A requirements.txt file exists. Contents:',\n fg='green', file=sys.stderr)\n try:\n with reqs_path.open(encoding='utf-8') as f:\n for line in f:\n click.echo(' ' + line.rstrip())\n except UnicodeDecodeError:\n click.secho(f'Cannot decode {reqs_path}',\n fg='red', file=sys.stderr)\n else:\n click.confirm(f'Install requirements from {reqs_path}?',\n abort=True)\n subprocess.run([venv_dir / NAME / 'bin/pip', 'install',\n '-r', reqs_path\n ],\n check=True)\n\n click.secho(f'Entering venv in {venv_dir}; Ctrl+D to exit',\n fg='blue', file=sys.stderr)\n os.execv(\n '/bin/bash',\n ['/bin/bash', '-c', f'. {venv_dir}/{NAME}/bin/activate; exec bash'])",
"def setup_environment():\n osinter = ostool.get_interface()\n pypath = osinter.get_maya_envpath()\n for p in sys.path:\n pypath = os.pathsep.join((pypath, p))\n os.environ['PYTHONPATH'] = pypath",
"def update_pipenv_env():\n subprocess.call([\"pipenv\", \"sync\", \"--dev\"])",
"def setup_environment():",
"def bootstrap_pex_env(entry_point):\n from .environment import PEXEnvironment\n from .finders import register_finders\n from .pex_info import PexInfo\n\n monkeypatch_build_zipmanifest()\n register_finders()\n\n PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate()",
"def activate_virtualenv_in_precommit_hooks(session: Session) -> None:\n\n if session.bin is None:\n return\n\n virtualenv = session.env.get(\"VIRTUAL_ENV\")\n\n if virtualenv is None:\n return\n\n hookdir = Path(\".git\") / \"hooks\"\n\n if not hookdir.is_dir():\n return\n\n for hook in hookdir.iterdir():\n if hook.name.endswith(\".sample\") or not hook.is_file():\n continue\n\n text = hook.read_text()\n bindir = repr(session.bin)[1:-1] # strip quotes\n\n if not (\n Path(\"A\") == Path(\"a\") and bindir.lower() in text.lower() or bindir in text\n ):\n continue\n\n lines = text.splitlines()\n\n if not (lines[0].startswith(\"#!\") and \"python\" in lines[0].lower()):\n continue\n\n header = dedent(\n f\"\"\"\\\n import os\n os.environ[\"VIRTUAL_ENV\"] = {virtualenv!r}\n os.environ[\"PATH\"] = os.pathsep.join((\n {session.bin!r},\n os.environ.get(\"PATH\", \"\"),\n ))\n \"\"\"\n )\n\n lines.insert(1, header)\n hook.write_text(\"\\n\".join(lines))",
"def main(env_dir):\n env_dir = os.path.abspath(env_dir)\n\n builder = venv.EnvBuilder()\n context = builder.ensure_directories(env_dir)\n context.env_dir = convert_path(context.env_dir)\n\n activate_path = os.path.join(context.bin_path, 'activate')\n if os.path.exists(activate_path):\n print('Script already exists: \"%s\"' % activate_path)\n return\n\n venv_path = os.path.abspath(os.path.dirname(venv.__file__))\n template = os.path.join(venv_path, 'scripts', 'posix', 'activate')\n\n with open(activate_path, 'w', newline='') as activate_file:\n with open(template, 'r') as activate_tpl:\n activate_file.write(builder.replace_variables(activate_tpl.read(), context))\n print('Created: \"%s\"' % activate_path)",
"def install_dependencies():\n local('pip install --upgrade setuptools pip')",
"def activate():\n\n env_path = '/'.join([deployment_root(), 'env', env.project_fullname])\n\n if not exists(env_path):\n print env.host, \"ERROR: The version\", env.project_version, \\\n \"does not exist at\"\n print env_path\n sys.exit(1)\n\n active = active_version()\n servers = webserver_list()\n\n if env.patch or active != env.project_fullname:\n for s in servers:\n stop_webserver(s)\n\n if not env.patch and active != env.project_fullname:\n\n if env.verbosity:\n print env.host, \"ACTIVATING version\", env_path\n\n if not env.nomigration:\n sync_db()\n\n #south migration\n if ('south' in env.INSTALLED_APPS and\n not env.nomigration and\n not env.manualmigration):\n migration()\n\n if env.manualmigration or env.MANUAL_MIGRATION:\n manual_migration()\n\n #activate sites\n activate_sites = [''.join([\n d.name.replace('.', '_'),\n '-',\n env.project_version,\n '.conf'])\n for d in domain_sites()]\n if 'apache2' in get_packages():\n site_paths = ['/etc/apache2', '/etc/nginx']\n else:\n site_paths = ['/etc/nginx']\n\n #disable existing sites\n for path in site_paths:\n for site in _ls_sites('/'.join([path, 'sites-enabled'])):\n if site not in activate_sites:\n sudo(\"rm %s/sites-enabled/%s\" % (path, site))\n\n #activate new sites\n for path in site_paths:\n for site in activate_sites:\n if not exists('/'.join([path, 'sites-enabled', site])):\n sudo(\"chmod 644 %s\" % '/'.join(\n [path, 'sites-available', site]))\n sudo(\"ln -s %s/sites-available/%s %s/sites-enabled/%s\" % (\n path, site, path, site))\n if env.verbosity:\n print \" * enabled\", \"%s/sites-enabled/%s\" % (\n path, site)\n\n #delete existing symlink\n ln_path = '/'.join([deployment_root(), 'env', env.project_name])\n run('rm -f ' + ln_path)\n #run post deploy hooks\n post_exec_hook('post_deploy')\n #activate\n run('ln -s %s %s' % (env_path, ln_path))\n\n if env.verbosity:\n print env.host, env.project_fullname, \"ACTIVATED\"\n else:\n if env.verbosity and not env.patch:\n print env.project_fullname, \"is the active version\"\n\n if env.patch or active != env.project_fullname:\n for s in servers:\n start_webserver(s)\n print\n return",
"def activate_egg(eggpath):\r\n try:\r\n d = pkg_resources.find_distributions(eggpath).next()\r\n except StopIteration:\r\n raise ValueError(\"Unknown or corrupt egg\")\r\n d.activate()\r\n settings_module = d.get_entry_info('scrapy', 'settings').module_name\r\n os.environ.setdefault('SCRAPY_SETTINGS_MODULE', settings_module)",
"def prefixed(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n activate_virtualenv()\n with prefix(env.active_prefixes[0]), prefix(env.active_prefixes[1]):\n func(*args, **kwargs)\n return wrapper",
"def inject_into_activate_this(venv_root, body):\n activate_this_file = os.path.join(venv_root, 'bin', 'activate_this.py')\n inject_into_file(activate_this_file, body)",
"def setup_environment(target):\n if target.name not in conda_environments():\n conda_create_env(target.name)\n for dep in target.conda_dependencies:\n conda_install(target.name, dep)\n for dep in target.pip_dependencies:\n pip_install(target.name, dep)",
"def activate_egg(eggpath):\n try:\n d = pkg_resources.find_distributions(eggpath).next()\n except StopIteration:\n raise ValueError(\"Unknown or corrupt egg\")\n d.activate()\n settings_module = d.get_entry_info('scrapy', 'settings').module_name\n os.environ['SCRAPY_SETTINGS_MODULE'] = settings_module",
"def _activate_path(self):\n return os.path.join(self.env, 'bin', 'activate')",
"def activate_virtualenv_in_precommit_hooks(session: Session) -> None:\n assert session.bin is not None # nosec\n\n # Only patch hooks containing a reference to this session's bindir. Support\n # quoting rules for Python and bash, but strip the outermost quotes so we\n # can detect paths within the bindir, like <bindir>/python.\n bindirs = [\n bindir[1:-1] if bindir[0] in \"'\\\"\" else bindir\n for bindir in (repr(session.bin), shlex.quote(session.bin))\n ]\n\n virtualenv = session.env.get(\"VIRTUAL_ENV\")\n if virtualenv is None:\n return\n\n headers = {\n # pre-commit < 2.16.0\n \"python\": f\"\"\"\\\n import os\n os.environ[\"VIRTUAL_ENV\"] = {virtualenv!r}\n os.environ[\"PATH\"] = os.pathsep.join((\n {session.bin!r},\n os.environ.get(\"PATH\", \"\"),\n ))\n \"\"\",\n # pre-commit >= 2.16.0\n \"bash\": f\"\"\"\\\n VIRTUAL_ENV={shlex.quote(virtualenv)}\n PATH={shlex.quote(session.bin)}\"{os.pathsep}$PATH\"\n \"\"\",\n # pre-commit >= 2.17.0 on Windows forces sh shebang\n \"/bin/sh\": f\"\"\"\\\n VIRTUAL_ENV={shlex.quote(virtualenv)}\n PATH={shlex.quote(session.bin)}\"{os.pathsep}$PATH\"\n \"\"\",\n }\n\n hookdir = Path(\".git\") / \"hooks\"\n if not hookdir.is_dir():\n return\n\n for hook in hookdir.iterdir():\n if hook.name.endswith(\".sample\") or not hook.is_file():\n continue\n\n if not hook.read_bytes().startswith(b\"#!\"):\n continue\n\n text = hook.read_text()\n\n if not any(\n Path(\"A\") == Path(\"a\") and bindir.lower() in text.lower() or bindir in text\n for bindir in bindirs\n ):\n continue\n\n lines = text.splitlines()\n\n for executable, header in headers.items():\n if executable in lines[0].lower():\n lines.insert(1, dedent(header))\n hook.write_text(\"\\n\".join(lines))\n break",
"def install(ctx, req, packages, update):\n if is_in_venv():\n if req:\n install_requirements(ctx.obj.db, ctx.obj.engine)\n elif update:\n update_packages(packages, ctx.obj.db, ctx.obj.engine)\n elif packages:\n install_packages(packages, ctx.obj.db, ctx.obj.engine)\n else:\n click.echo(\"Give the package name to be installed\")\n else:\n click.echo(\"Activate the environment first before installing packages\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the package finder of given index sources.
|
def get_finder(
self,
sources: Optional[List[Source]] = None,
ignore_requires_python: bool = False,
) -> Generator[pip_shims.PackageFinder, None, None]:
if sources is None:
sources = self.project.sources
for source in sources:
source["url"] = expand_env_vars_in_auth(source["url"])
python_version, _ = get_python_version(self.python_executable, digits=2)
finder = get_finder(
sources,
self.project.cache_dir.as_posix(),
python_version,
ignore_requires_python,
)
# Reuse the auth across sessions to avoid prompting repeatly.
finder.session.auth = self.auth
yield finder
finder.session.close()
|
[
"def find_installed_sources():",
"def _build_package_finder(\n self,\n options, # type: Values\n session, # type: PipSession\n platform=None, # type: Optional[str]\n python_versions=None, # type: Optional[List[str]]\n abi=None, # type: Optional[str]\n implementation=None # type: Optional[str]\n ):\n # type: (...) -> PackageFinder\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.debug(\n 'Ignoring indexes: %s',\n ','.join(redact_password_from_url(url) for url in index_urls),\n )\n index_urls = []\n\n return PackageFinder(\n find_links=options.find_links,\n format_control=options.format_control,\n index_urls=index_urls,\n trusted_hosts=options.trusted_hosts,\n allow_all_prereleases=options.pre,\n session=session,\n platform=platform,\n versions=python_versions,\n abi=abi,\n implementation=implementation,\n prefer_binary=options.prefer_binary,\n )",
"def get_sources(self, package_name):\n for package in self.data:\n if package_name == package['name']:\n print(package['sources'])\n return package['sources']",
"def generate_package_index(cache_prefix):\n try:\n file_list = (\n entry\n for entry in web_util.list_url(cache_prefix)\n if entry.endswith(\".yaml\")\n or entry.endswith(\"spec.json\")\n or entry.endswith(\"spec.json.sig\")\n )\n except KeyError as inst:\n msg = \"No packages at {0}: {1}\".format(cache_prefix, inst)\n tty.warn(msg)\n return\n except Exception as err:\n # If we got some kind of S3 (access denied or other connection\n # error), the first non boto-specific class in the exception\n # hierarchy is Exception. Just print a warning and return\n msg = \"Encountered problem listing packages at {0}: {1}\".format(cache_prefix, err)\n tty.warn(msg)\n return\n\n tty.debug(\"Retrieving spec descriptor files from {0} to build index\".format(cache_prefix))\n\n tmpdir = tempfile.mkdtemp()\n db_root_dir = os.path.join(tmpdir, \"db_root\")\n db = spack_db.Database(\n None,\n db_dir=db_root_dir,\n enable_transaction_locking=False,\n record_fields=[\"spec\", \"ref_count\", \"in_buildcache\"],\n )\n\n try:\n _read_specs_and_push_index(file_list, cache_prefix, db, db_root_dir)\n except Exception as err:\n msg = \"Encountered problem pushing package index to {0}: {1}\".format(cache_prefix, err)\n tty.warn(msg)\n tty.debug(\"\\n\" + traceback.format_exc())\n finally:\n shutil.rmtree(tmpdir)",
"def source_index(self):\n return os.path.join(self.data_directory, 'sources')",
"def get_installed_sources():",
"def _package(indices, results):\n if isinstance(indices, dict):\n value = {}\n for key, ind in indices.iteritems():\n value[key] = GenUtils._package(ind, results)\n return value\n elif isinstance(indices, (list, tuple)):\n value = []\n for ind in indices:\n value.append(GenUtils._package(ind, results))\n return value\n else:\n return results[indices]",
"def sources_with_repository(self) -> Iterator[Tuple[str, str,\n PurePosixPath, bytes]]:\n query = select([repository_source.c.owner, repository_source.c.name,\n repository_source.c.path, source_file.c.source])\\\n .select_from(repository_source.join(source_file))\n for owner, name, pathstr, source in self.conn.execute(query):\n yield owner, name, PurePosixPath(pathstr), source",
"def find_packages(self, publication):\n sources = publication['Sources']\n packages = []\n if publication['SourceKind'] == 'snapshot':\n for source in sources:\n source_name = source['Name']\n packages += self.get_packages_from_snapshot(source_name)\n elif publication['SourceKind'] == 'local':\n for source in sources:\n source_name = source['Name']\n packages += self.get_packages_from_local_repo(source_name)\n else:\n pass\n\n return packages",
"def find_index(names):\n for name in names:\n if os.path.basename(name).startswith('index.'):\n return name",
"def get_sources():\n return list(SOURCES)",
"def allChannelSources(self, channelIndex = None, channel = None, N = 10):\n values = self.sourceInfo.copy()\n if channelIndex is not None:\n values.setChannelIndex(channelIndex)\n assert channel is None\n elif channel is not None:\n values.setChannel(channel)\n assert channelIndex is None\n values.setIndex((self.sourceInfo.position - 1) * N)\n \n files = []\n for i in xrange(N):\n values.increaseIndex()\n files.append(\n os.path.join(\n self.sourceDir, values.formatString(self.sourceFilePattern)\n ))\n # for i\n return files",
"def find_plugin_sources_by_package(plugin_source_root, package_name=None):\r\n result = []\r\n \r\n # Find all plug-in sources\r\n sources = find_all_plugin_sources(plugin_source_root)\r\n \r\n # Always return common plug-ins\r\n if 'common' in sources:\r\n for path, modname in sources['common']:\r\n result.append(path)\r\n \r\n # Return extra plug-ins if necessary\r\n if package_name not in (None, '', 'common'):\r\n if package_name not in sources:\r\n raise ValueError(\"Invalid plug-in package name: '%s'\" % package_name)\r\n else:\r\n for path, modname in sources[package_name]:\r\n result.append(path)\r\n \r\n return result",
"def _getSourcesList(self):\n\t\tif self._sources is not None:\n\t\t\treturn self._sources\n\n\t\tsources = []\n\t\tfiles = os.listdir(self._app_dir)\n\t\tfor file in files:\n\t\t\tname, ext = os.path.splitext(file)\n\t\t\tif ext == \".py\":\n\t\t\t\tsources.append(file)\n\t\treturn sources",
"def get_sources_by_type(self, source_type):\r\n\t\tif not source_type:\r\n\t\t\treturn self.sources\r\n\t\telse:\r\n\t\t\tmeth_name = \"get_%s_sources\" % source_type\r\n\t\t\treturn getattr(self, meth_name)()",
"def modules_to_search(source, line, col, identifier):\n\n # check if identifier is qualified, if it's\n # like \"String.join\" instead of just \"join\"\n qualified_module = _qualified_namespace(source, line, col, identifier)\n if qualified_module:\n return qualified_module\n # search for explicit import\n importers = [_imports_function(i, identifier) for i in source.split(\"\\n\")]\n modules = [i.groups()[0] for i in importers if i]\n if len(modules) > 0:\n log.debug(\"searching exposing imports\")\n log.debug(modules)\n return modules\n # if nothing obvious is left, do all wildcards\n wild = [_wildcard_import(i) for i in source.split(\"\\n\")]\n mods = [i.groups()[0] for i in wild if i]\n log.debug(\"searching wildcard imports\")\n log.debug(mods)\n return mods",
"def source_package(self) -> 'SourcePackage':",
"def sources(self) -> List[str]:",
"def getPackageInfo(package_pattern, package_index):\n\n # Parse for package info\n matchs = re.search(package_pattern, package_index)\n package_info = matchs.group(0)\n\n return package_info"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the working set based on local packages directory.
|
def get_working_set(self) -> WorkingSet:
paths = self.get_paths()
return WorkingSet(
[paths["platlib"]], python=get_python_version(self.python_executable)[0]
)
|
[
"def get_local_packages(directory=THIRDPARTY_DIR):\n return list(PypiPackage.packages_from_dir(directory=directory))",
"def get_packages(self) -> list:\r\n return os.listdir(f\"{self.path}/uniflash-packages\")",
"def get_installed_sources():",
"def AddDistToWorkingSet(distPath):\n zpDists = []\n for d in pkg_resources.find_distributions(distPath):\n pkg_resources.working_set.add(d, replace=True)\n pkg_resources.require(d.project_name)\n if d.project_name.startswith('ZenPacks.'):\n zpDists.append(d)\n return zpDists",
"def find_installed_sources():",
"def get_installedpackages():\n\n # TODO finish me\n\n return []",
"def get_local_packages():\n sys.stdout = mystdout = StringIO()\n pip.main(['freeze', '-l'])\n sys.stdout = sys.__stdout__\n \n pkgs = mystdout.getvalue().split('\\n')\n return [p.split('==') for p in pkgs]",
"def activate(self):\n paths = self.get_paths()\n with temp_environ():\n working_set = self.get_working_set()\n _old_ws = pkg_resources.working_set\n pkg_resources.working_set = working_set.pkg_ws\n # HACK: Replace the is_local with environment version so that packages can\n # be removed correctly.\n _old_sitepackages = misc.site_packages\n misc.site_packages = paths[\"purelib\"]\n _is_local = misc.is_local\n misc.is_local = req_uninstall.is_local = self.is_local\n _evaluate_marker = pkg_resources.evaluate_marker\n pkg_resources.evaluate_marker = self.evaluate_marker\n sys_executable = sys.executable\n sys.executable = self.python_executable\n yield\n sys.executable = sys_executable\n pkg_resources.evaluate_marker = _evaluate_marker\n misc.is_local = req_uninstall.is_local = _is_local\n misc.site_packages = _old_sitepackages\n pkg_resources.working_set = _old_ws",
"def get_projects():\n sciprojects = []\n paths = glob.glob(local_dirs.proj_conf_path+\"*\")\n for path in paths:\n if os.path.isdir(path):\n sci_project = os.path.basename(path)\n sciprojects.append(sci_project)\n # is this necessary?\n try:\n sciprojects.remove('__pycache__')\n except:\n pass\n logger.debug(\"get_projects: directory items: %s\", sciprojects)\n return sciprojects",
"def resolved_packages(self):\n return self.resolved_packages_",
"def set_working_dir():\n global pymod_working_dir\n try:\n pymod_working_dir = os.getcwd()\n except OSError:\n os.chdir(pymod.paths.prefix)\n pymod_working_dir = pymod.paths.prefix",
"def _prepare_working_directory(self):\n pwd = self._make_mapping(os.getcwd())[os.getcwd()]['bind']\n\n return pwd",
"def setup_fs():\n from fsspec.implementations.local import LocalFileSystem\n from pathlib import Path\n\n # Setup path to local folder structure, as if copied from a CANedge SD.\n # Assumes the folder is placed in same directory as this file\n fs = LocalFileSystem()\n\n return fs",
"def system_packages(self):\n return self._system_packages",
"def init_packages(self):\n return self._init_packages",
"def get_relative_packages_dir():\n return os.path.join('..', PACKAGESDIR)",
"def find_qidoc_root(cwd=None):\n if not cwd:\n cwd = os.getcwd()\n dirname = None\n while dirname or cwd:\n if os.path.exists(os.path.join(cwd, \".qi\", \"worktree.xml\")):\n return cwd\n (new_cwd, dirname) = os.path.split(cwd)\n if new_cwd == cwd:\n return\n cwd = new_cwd",
"def in_cwd():\n configs = []\n\n for filename in os.listdir(os.getcwd()):\n if filename.startswith('.tmuxp') and is_config_file(filename):\n configs.append(filename)\n\n return configs",
"def gethomepaths(self):\n cwd = os.getcwd()\n home_dir = os.path.expanduser('~')\n os.chdir(home_dir)\n fs_dir = os.path.abspath('.')\n\tos.chdir(cwd) # I hope this will always get you back to the original place...\n if home_dir!= fs_dir:\n return [home_dir, fs_dir]\n else:\n return [home_dir]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get environment for marker evaluation
|
def marker_environment(self) -> Dict[str, Any]:
return get_pep508_environment(self.python_executable)
|
[
"def get_env():\n env.output_prefix = False\n run('export | sed -e \"s/declare -x/export/g\"')",
"def environment(self) -> rl_environment.Environment:\n return self._environment",
"def runtime_environment(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"runtime_environment\")",
"def setup_environment():",
"def runtime_environment(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"runtime_environment\")",
"def env(self) -> \"OperatorEnv\":\n if self._env is None:\n self._env = OperatorEnv()\n return self._env",
"def get_env_ax():\n value = {k: v for k, v in os.environ.items() if k.startswith(KEY_PRE)}\n value = {k: HIDDEN if k in KEYS_HIDDEN else v for k, v in value.items()}\n return value",
"def environnement(self):\n return self.__environnement",
"def get_environment_from_request(self):\n environment = Environment.objects.get(api_key=self.kwargs['environment_api_key'])\n return environment",
"def get_execution_environment():\n gateway = get_gateway()\n j_execution_environment = gateway.jvm.org.apache.flink.api.java.ExecutionEnvironment\\\n .getExecutionEnvironment()\n return ExecutionEnvironment(j_execution_environment)",
"def gisenv(env=None):\n s = read_command(\"g.gisenv\", flags='n', env=env)\n return parse_key_val(s)",
"def test_environment(self):\n pass",
"def shell_source(script):\n \n pipe = subprocess.Popen(\". %s; env\" % script, stdout=subprocess.PIPE, shell=True)\n output = pipe.communicate()[0]\n env = dict((line.split(\"=\", 1) for line in output.splitlines()))\n return env",
"def get_current_environment():\n env = os.getenv('TASKING_MANAGER_ENV', 'Dev') # default to Dev if config environment var not set\n return env.capitalize()",
"def test_runtime_envs_get(self):\n pass",
"def environment(self):\n return {\n t.observable\n for t in self.transitions\n if t.abstracted is ENVIRONMENT and t.observable is not None\n }",
"def GetEnvironmentalBG2(self):\n ...",
"def _prepare_environment(self):\n env = {'HOME': self._make_mapping(HOME)}\n\n return env",
"def _get_execution_env(environment):\n if environment is None:\n typename = 'LocalAsync'\n else:\n typename = type(environment).__name__\n\n tracker = _mt._get_metric_tracker()\n tracker.track('deploy.job.create.%s' % typename.lower(), value=1)\n\n if typename == 'Local':\n exec_env = LocalExecutionEnvironment\n elif typename == 'LocalAsync':\n exec_env = LocalAsynchronousEnvironment\n elif typename in ['EC2', 'Ec2Cluster']:\n exec_env = Ec2ExecutionEnvironment\n elif typename in ['Hadoop', 'HadoopCluster']:\n exec_env = HadoopExecutionEnvironment\n else:\n raise Exception(\"Validation Failed: Unknown execution environment.\")\n\n return exec_env"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the shebang lines
|
def update_shebangs(self, new_path: str) -> None:
scripts = self.get_paths()["scripts"]
maker = ScriptMaker(None, None)
maker.executable = new_path
shebang = maker._get_shebang("utf-8").rstrip().replace(b"\\", b"\\\\")
for child in Path(scripts).iterdir():
if not child.is_file() or child.suffix not in (".exe", ".py", ""):
continue
child.write_bytes(
re.sub(rb"#!.+?python.*?$", shebang, child.read_bytes(), flags=re.M)
)
|
[
"def shebang(self):\n try:\n first_line = self.stripped_lines()[0]\n if first_line.startswith(\"#!\"):\n return first_line[2:].strip()\n except IndexError:\n pass\n return \"\"",
"def shebang(path):\n return get(path)",
"def test_shebang(self):\n with open(\"models/engine/file_storage.py\", mode='r') as _file:\n readShebang = _file.read()\n lines = readShebang.splitlines()\n self.assertEqual(lines[0], '#!/usr/bin/python3')",
"def test_shebang_test(self):\n with open(\"tests/test_models/test_engine/test_file_storage.py\\\n\", mode='r') as _file:\n readShebang = _file.read()\n lines = readShebang.splitlines()\n self.assertEqual(lines[0], '#!/usr/bin/python3')",
"def rewrite_shebang(data, target, prefix):\n shebang_match = re.match(SHEBANG_REGEX, data, re.MULTILINE)\n prefix_b = prefix.encode('utf-8')\n\n if shebang_match:\n if data.count(prefix_b) > 1:\n # More than one occurrence of prefix, can't fully cleanup.\n return data, False\n\n shebang, executable, options = shebang_match.groups()\n\n if executable.startswith(prefix_b):\n # shebang points inside environment, rewrite\n executable_name = executable.decode(\"utf-8\").split(\"/\")[-1]\n new_shebang = \"#!/usr/bin/env {}{}\".format(\n executable_name, options.decode(\"utf-8\")\n )\n data = data.replace(shebang, new_shebang.encode(\"utf-8\"))\n\n return data, True\n\n return data, False",
"def resolve_shebang(path, ignoreshell=False):\n try:\n f = file(path)\n try:\n # At most 80 characters in the first line\n header = f.read(80).splitlines()[0]\n finally:\n f.close()\n \n m = _RE_SHEBANG.search(header)\n if not m:\n return []\n cmd, arg = m.group(1,2)\n if os.path.isfile(cmd):\n # Keep this one, the hg script for instance contains a weird windows\n # shebang referencing the current python install.\n cmdfile = os.path.basename(cmd).lower()\n if cmdfile == 'python.exe':\n cmd = 'python'\n pass\n elif cmd not in _SHEBANG_CMDS:\n raise CommandNotFound('Unknown interpreter \"%s\" referenced in '\\\n 'shebang' % header)\n cmd = _SHEBANG_CMDS.get(cmd)\n if cmd is None or (ignoreshell and cmd == 'pysh'):\n return []\n if arg is None:\n return [cmd, win32_to_unix_path(path)]\n return [cmd, arg, win32_to_unix_path(path)]\n except IOError, e:\n if e.errno!=errno.ENOENT and \\\n (e.errno!=errno.EPERM and not os.path.isdir(path)): # Opening a directory raises EPERM\n raise\n return []",
"def reexecute(self):\n getLogger().info(\"Reloading executable: '{} {}'\".format(\n sys.executable,\n \" \".join(sys.argv),\n ))\n os.execve(sys.executable, [sys.executable] + sys.argv, os.environ)",
"def update_init_script():\n put('scripts/mwana-route-init-script.sh', '/etc/init.d/mwana-route', 0755)\n run(\"sudo sed -i 's/PROJECT_DIR=/PROJECT_DIR=%s/' /etc/init.d/mwana-route\"\n % env.path.replace('/', '\\/'))\n run(\"sudo sed -i 's/USER=/USER=%s/' /etc/init.d/mwana-route\"\n % env.user)",
"def add_to_path(self):\n if self.bin not in os.environ['PATH']:\n os.environ['PATH'] = os.environ['PATH'] + f':{self.bin}'",
"def _ExtractInterpFromShebang(data):\n firstline = data.splitlines()[:1]\n if not firstline:\n return None\n\n # The format here can be tricky.\n shebang = firstline[0].strip()\n m = re.match(r\"^#!\\s*([^\\s]+)(?:\\s+([^\\s]+))?\", shebang)\n if not m:\n return None\n\n # If the using `env`, find the target program.\n interp = m.group(1)\n if os.path.basename(interp) == \"env\":\n interp = m.group(2)\n\n return interp",
"def launcher_lines(conda_setup_path, conda_env, launcher,\n pythonpath=''):\n lines = []\n lines.append('#!/usr/bin/env bash\\n')\n lines.append('unset PYTHONPATH\\n')\n lines.append('unset LD_LIBRARY_PATH\\n')\n lines.append('source {}\\n'.format(conda_setup_path))\n lines.append('conda activate {}\\n'.format(conda_env))\n if pythonpath:\n lines.append('export PYTHONPATH=\"{}:$PYTHONPATH\"\\n'.format(pythonpath))\n lines.append(launcher + ' $@\\n')\n return lines",
"def create_bin():\n f = open ('%s/mygrate' % bindir, 'w')\n f.writelines(['#! /bin/sh\\n', 'python %s/mygrate.py \"$@\"' % tempdir])\n f.close()\n os.chmod('%s/mygrate' % bindir, 0755)",
"def bang_bang(args, stdin=None):\n return bang_n(['1'])",
"def UpdateBuildScripts(self):\n for key, obj in self.IterObjectsByIsa('PBXShellScriptBuildPhase'):\n\n shell_path = obj['shellPath']\n shell_code = obj['shellScript']\n if shell_path.endswith('/sh'):\n shell_code = shell_code.replace(\n 'ninja -C .',\n 'ninja -C \"../${CONFIGURATION}${EFFECTIVE_PLATFORM_NAME}\"')\n elif PYTHON_RE.search(shell_path):\n shell_code = shell_code.replace(\n '''ninja_params = [ '-C', '.' ]''',\n '''ninja_params = [ '-C', '../' + os.environ['CONFIGURATION']'''\n ''' + os.environ['EFFECTIVE_PLATFORM_NAME'] ]''')\n\n # Replace the build script in the object.\n obj['shellScript'] = shell_code",
"def update():\n update_code()\n update_env()\n symlink()\n set_current()\n permissions()",
"def pysh():\n pass",
"def generate(self):\n\n sbatch = \"#SBATCH\"\n self.script = \"#!/bin/bash \\n\"\n\n # TODO: build the string self.script that contains content of job script ",
"def _set_sys_executable(self) -> None:\n python_name: str = os.path.basename(sys.executable)\n if sys.platform == \"win32\":\n compiler_executable = os.path.join(self._env_path, \"Scripts\", python_name)\n else:\n compiler_executable = os.path.join(self._env_path, \"bin\", python_name)\n\n sys.executable = compiler_executable",
"def main(houdiniEnvPath):\n\t# TODO - set PYTHONPATH as well\n\tpathname = os.path.dirname(sys.argv[0])\n\ttoolsLoc = os.path.abspath(pathname)\n\tpythonLoc = os.path.join(os.path.abspath(os.path.dirname(toolsLoc)), 'python')\n\thPathRegx = re.compile(r'HOUDINI_PATH\\s*=\\s*\"*([\\w\\\\\\/;&-_\\s]+)')\n\tpyPathRegx = re.compile(r'PYTHONPATH\\s*=\\s*\"*([\\w\\\\\\/;&-_\\s]+)')\n\t_, tmp = mkstemp()\n\n\twith open(tmp, 'w') as output, open(houdiniEnvPath) as env:\n\t\treplacedHPath = False\n\t\treplacedPyPath = False\n\n\t\tprint('Reading houdini.env...')\n\n\t\tfor l in env:\n\t\t\thMatch = hPathRegx.match(l)\n\t\t\tpyMatch = pyPathRegx.match(l)\n\n\t\t\t# If the user has already defined HOUDINI_PATH, we just append ours\n\t\t\tif hMatch:\n\t\t\t\tprint('Found HOUDINI_PATH, appending')\n\t\t\t\toldPath = hMatch.group(1)\n\t\t\t\tnewPath = '{};{}'.format(oldPath, toolsLoc)\n\t\t\t\tpathParts = oldPath.split(';')\n\n\t\t\t\tpathParts.append(toolsLoc)\n\n\t\t\t\tpathParts = list(set(pathParts))\n\n\t\t\t\toutput.write('\\nHOUDINI_PATH = \"{};&\"'.format(';'.join(pathParts).replace(';&', '')))\n\t\t\t\treplacedHPath = True\n\t\t\t\tprint('Done appending to HOUDINI_PATH')\n\t\t\t# Same for PYTHONPATH..\n\t\t\telif pyMatch:\n\t\t\t\tprint('Found PYTHONPATH, appending')\n\t\t\t\toldPath = pyMatch.group(1)\n\t\t\t\tnewPath = '{};{}'.format(oldPath, pythonLoc)\n\t\t\t\tpathParts = oldPath.split(';')\n\n\t\t\t\tpathParts.append(pythonLoc)\n\n\t\t\t\tpathParts = list(set(pathParts))\n\n\t\t\t\toutput.write('\\nPYTHONPATH = \"{}\"'.format(';'.join(pathParts).replace(';&', '')))\n\t\t\t\treplacedPyPath = True\n\t\t\t\tprint('Done appending to PYTHONPATH')\n\t\t\telse:\n\t\t\t\toutput.write(l)\n\n\t\t# If we didn't find HOUDINI_PATH originally, we'll write it at the end\n\t\tif not replacedHPath:\n\t\t\tprint('HOUDINI_PATH not found, adding')\n\t\t\toutput.write('\\nHOUDINI_PATH = \"{};&\"'.format(toolsLoc))\n\t\t\tprint('Done')\n\n\t\t# Same for PYTHONPATH..\n\t\tif not replacedPyPath:\n\t\t\tprint('PYTHONPATH not found, adding')\n\t\t\toutput.write('\\nPYTHONPATH = \"{}\"'.format(pythonLoc))\n\t\t\tprint('Done')\n\n\t\tenv.close()\n\t\toutput.close()\n\n\tprint('Prepping to save houdini.env...')\n\tos.remove(houdiniEnvPath)\n\tmove(tmp, houdiniEnvPath)\n\tprint('Installation complete')",
"def update_uwsgitool():\n url = 'https://github.com/kyan001/PyMyApps/raw/master/UwsgiTool/uwsgiTool.py'\n if cct.update_file(__file__, url):\n cct.run_cmd('{py} \"{f}\"'.format(py=cct.get_py_cmd(), f=__file__))\n cit.bye(0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
create a linked instance of SharedArrays that uses the same data and shm_manager
|
def fork(self):
return SharedArrays(self.array_headers, self.shm_manager)
|
[
"def create_array(self, key, proto: ArrayProto):\n self[key] = shared_array = SharedArray(proto, self.shm_manager.SharedMemory(size=proto.nbytes))\n return shared_array",
"def allocate_shared_mem(self):\n # Get array shape and data types\n if self.snapshot.snapshot_type == \"numpy\":\n self.input_shape, self.input_dtype = self.descriptor_calculator. \\\n read_dimensions_from_numpy_file(\n os.path.join(self.snapshot.input_npy_directory,\n self.snapshot.input_npy_file), read_dtype=True)\n\n self.output_shape, self.output_dtype = self.target_calculator. \\\n read_dimensions_from_numpy_file(\n os.path.join(self.snapshot.output_npy_directory,\n self.snapshot.output_npy_file), read_dtype=True)\n elif self.snapshot.snapshot_type == \"openpmd\":\n self.input_shape, self.input_dtype = self.descriptor_calculator. \\\n read_dimensions_from_openpmd_file(\n os.path.join(self.snapshot.input_npy_directory,\n self.snapshot.input_npy_file), read_dtype=True)\n\n self.output_shape, self.output_dtype = self.target_calculator. \\\n read_dimensions_from_openpmd_file(\n os.path.join(self.snapshot.output_npy_directory,\n self.snapshot.output_npy_file), read_dtype=True)\n else:\n raise Exception(\"Invalid snapshot type selected.\")\n\n # To avoid copies and dealing with in-place casting from FP64, restrict\n # usage to data in FP32 type (which is a good idea anyway to save\n # memory)\n if self.input_dtype != np.float32 or self.output_dtype != np.float32:\n raise Exception(\"LazyLoadDatasetSingle requires numpy data in \"\n \"FP32.\")\n\n # Allocate shared memory buffer\n input_bytes = self.input_dtype.itemsize * np.prod(self.input_shape)\n output_bytes = self.output_dtype.itemsize * np.prod(self.output_shape)\n input_shm = shared_memory.SharedMemory(create=True, size=input_bytes)\n output_shm = shared_memory.SharedMemory(create=True, size=output_bytes)\n\n self.input_shm_name = input_shm.name\n self.output_shm_name = output_shm.name\n\n input_shm.close()\n output_shm.close()\n self.allocated = True",
"def __init__(self):\n super(SharedList, self).__init__()\n self.mux = SharedDictionary.globalMux\n self.mux.acquire()\n self.listImpl = SharedDictionary.globalManager.list()\n self.mux.release()",
"def shared_np_array(shape):\n from multiprocessing import Array\n arr_len = np.product(shape)\n shared_array_base = Array(ctypes.c_double, arr_len)\n shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())\n shared_array = shared_array.reshape(*shape)\n return shared_array",
"def init_shared_buffers(shared_buf_array):\n\n result = shared_buf_array.open(bufName=\"disturbance-gen-cmd-channel-buffer\", isProxy=False)\n if result == BUF_NOT_INITIALIZED or result == FAILURE:\n print \"Failed to open communication channel ! Not starting any threads !\"\n sys.exit(0)\n print \"Buffer opened: disturbance-gen-cmd-channel-buffer\"\n sys.stdout.flush()",
"def create_shared(self, name, ref):\n if self._shared is not None:\n raise RuntimeError('Can only set_shared once.')\n self._shared = GLShared(name, ref)",
"def share_memory_(self):\n if torch is None:\n raise ValueError('pytorch could not be loaded. It is required to share memory.')\n\n self._offsets = torch.as_tensor(self._offsets).share_memory_()\n self._pickle_data = torch.as_tensor(self._pickle_data).share_memory_()\n return self",
"def initShared(value):\n return theano.shared(np.array(value).astype(np.float32))",
"def _get_shared(self, array):\n\n dtype = self.NUMPY_TO_C_DTYPE[array.dtype.type]\n shape = array.shape\n shared = RawArray(dtype, array.reshape(-1))\n return np.frombuffer(shared, dtype).reshape(shape)",
"def prepare_shared_data(self):\n\t\tstructure_path = self.work_dir_local + \"/\" + self.inp_folder + \"/\" + \\\n\t\t\t\t\t\t self.namd_structure\n\t\tcoords_path = self.work_dir_local + \"/\" + self.inp_folder + \"/\" + \\\n\t\t\t\t\t self.namd_coordinates\n\t\tparams_path = self.work_dir_local + \"/\" + self.inp_folder + \"/\" + \\\n\t\t\t\t\t self.namd_parameters\n\n\t\tself.shared_files.append(self.namd_structure)\n\t\tself.shared_files.append(self.namd_coordinates)\n\t\tself.shared_files.append(self.namd_parameters)\n\n\t\tstruct_url = 'file://%s' % (structure_path)\n\t\tself.shared_urls.append(struct_url)\n \n\t\tcoords_url = 'file://%s' % (coords_path)\n\t\tself.shared_urls.append(coords_url) \n\n\t\tparams_url = 'file://%s' % (params_path)\n\t\tself.shared_urls.append(params_url)",
"def share_nodes():\n \n shared_node = SummitNode()\n for i in range(20):\n shared_node.cpu[i] = \"simulation:{}\".format(i)\n shared_node.cpu[21+i] = \"pdf_calc:{}\".format(i)\n\n return [shared_node]",
"def share_nodes_sockets():\n\n shared_sockets = SummitNode()\n for i in range(10):\n shared_sockets.cpu[i] = \"simulation:{}\".format(i)\n shared_sockets.cpu[21+i] = \"simulation:{}\".format(10+i)\n \n for i in range(10):\n shared_sockets.cpu[10+i] = \"pdf_calc:{}\".format(i)\n shared_sockets.cpu[21+10+i] = \"pdf_calc:{}\".format(10+i)\n\n return [shared_sockets]",
"def fillSharedMemDictForLocalStep(self, ShMem=None):\n if ShMem is None:\n ShMem = dict()\n if 'nu' in ShMem:\n fillSharedMemArray(ShMem['nu'], self.Post.nu)\n fillSharedMemArray(ShMem['cholB'], self._cholB('all'))\n fillSharedMemArray(ShMem['E_logdetL'], self._E_logdetL('all'))\n\n else:\n ShMem['nu'] = numpyToSharedMemArray(self.Post.nu)\n ShMem['cholB'] = numpyToSharedMemArray(self._cholB('all'))\n ShMem['E_logdetL'] = numpyToSharedMemArray(self._E_logdetL('all'))\n\n return ShMem",
"def _create_instances(self):\n #initialize the module\n _instance = self._module()\n self._instance_list = [_instance]",
"def set_shared_muse_instance(muse_instance):\n global _shared_muse_instance\n _shared_muse_instance = muse_instance",
"def getSharedData(self):\n return {}",
"def shared_object_use(self, so):\r\n if so in self.shared_objects:\r\n return\r\n so.use(self.reader, self.writer)\r\n self.shared_objects.append(so)",
"def set_shared_objects(self, shared_objects: Any = None) -> None:\n self.shared_objects = shared_objects",
"def shared_memory_size(data_buffers=None):\n\n shared_size = 0\n\n if data_buffers is None:\n data_buffers = inject.get_injectable(\"data_buffers\", {})\n\n for k, data_buffer in data_buffers.items():\n if isinstance(data_buffer, str) and data_buffer.startswith(\"sh.Dataset:\"):\n from sharrow import Dataset\n\n shared_size += Dataset.shm.preload_shared_memory_size(data_buffer[11:])\n continue\n try:\n obj = data_buffer.get_obj()\n except Exception:\n obj = data_buffer\n data = np.ctypeslib.as_array(obj)\n data_size = data.nbytes\n\n shared_size += data_size\n\n return shared_size"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create and return a shared array under the specified key. if key already exists, overwrite
|
def create_array(self, key, proto: ArrayProto):
self[key] = shared_array = SharedArray(proto, self.shm_manager.SharedMemory(size=proto.nbytes))
return shared_array
|
[
"def add(self, key, value):\r\n index = self.hash(key)\r\n\r\n if self.array[index] is not None:\r\n # This index contains some values.\r\n # We need to check if the key we're adding already exists, this\r\n # way, we can update it with the new value, this way, we can update\r\n # it with the new value\r\n\r\n # kvp = key/value pair\r\n for kvp in self.array[index]:\r\n # If the key is found, then update the current value to the new\r\n # value.\r\n\r\n if kvp[0] == key:\r\n kvp[1] = value\r\n break\r\n\r\n # Remember for/else, the else executes after the loop completetes\r\n # normally. Meaning, if no breaks happen, it will execute this else\r\n # statement.\r\n else:\r\n # If no breaks happened, it means that no existing key was\r\n # found. Therefore, we can simply append it to the end of the\r\n # list at this index.\r\n self.array[index].append([key, value])\r\n\r\n else:\r\n # This index is empty. We will create an empty list and append the\r\n # key value pair.\r\n self.array[index] = []\r\n self.array[index].append([key, value])",
"def shared_np_array(shape):\n from multiprocessing import Array\n arr_len = np.product(shape)\n shared_array_base = Array(ctypes.c_double, arr_len)\n shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())\n shared_array = shared_array.reshape(*shape)\n return shared_array",
"def _get_shared(self, array):\n\n dtype = self.NUMPY_TO_C_DTYPE[array.dtype.type]\n shape = array.shape\n shared = RawArray(dtype, array.reshape(-1))\n return np.frombuffer(shared, dtype).reshape(shape)",
"def fork(self):\n return SharedArrays(self.array_headers, self.shm_manager)",
"def makeKey(key, n):\n assert len(key) == n*n\n\n k = np.array(key)\n k.shape = (n, n)\n\n return np.mat(k)",
"def getSharedNumpy(*args):\n if len(args) == 1:\n return sharedNumpy(args[0])\n return [sharedNumpy(arg) for arg in args]",
"def _make_or_add(dictionary, key, item):\n if not dictionary.get(key):\n dictionary[key] = [item]\n return\n\n dictionary[key].append(item)",
"def memorize(self, key, item):\n self._memory[key].append(item)",
"def grow_dict_array(arrays_dict, arraylabel, new_elem): \n if arraylabel in arrays_dict.keys():\n arrays_dict[arraylabel] = np.append(arrays_dict[arraylabel], new_elem) \n else:\n arrays_dict[arraylabel] = np.array([new_elem])",
"def insert(self, key, value):\n # hash the key and map that hash to a bucket\n hash_key = self.hash_function(key) % len(self.buckets)\n\n bucket = self.buckets[hash_key]\n\n for i, val in enumerate(bucket):\n # check if exists, and override if so\n if val[0] == key:\n bucket[i] = (key, value)\n return\n # insert new\n bucket.append((key, value))",
"def copy(self, new_key=None):\r\n return self.dbobj.copy(new_key=new_key)",
"def insert(self, key, value):\n h = hashlib.sha256(key)\n index = int(h.hexdigest(), 16) % 10000\n self.hash_table[index].append([key, value])",
"def copy(self, key):\r\n copy = Set(key=key, db=self.db)\r\n copy.clear()\r\n copy |= self\r\n return copy",
"def AddKey(self, key, data=[], index=-1):\n\n # Default index simply appends\n if index == -1:\n index = len(self.header)\n\n self.header.insert(index, key)\n\n # Loop over data\n for i, item in enumerate(self.data):\n\n # Check value types\n if not data or type(data) != list:\n element = data\n else:\n element = data[i]\n\n # Add element at corresponding key\n self.data[i][key] = element",
"def dict_index(key, arr):\n bindex = {}\n for r in arr:\n rh = xxhash.xxh64(str(r)).hexdigest()\n bindex[r[key]] = rh\n return bindex",
"def getSharedData(self):\n return {}",
"def safeappend(self, key, item):\n ...",
"def append_keys(results, search_key, keys):\n if search_key in results:\n results[search_key][\"keys\"].extend(keys)\n else:\n results[search_key] = {\"keys\": keys}\n\n return results",
"def shared(base, key=None):\n from hypothesis.searchstrategy.shared import SharedStrategy\n return SharedStrategy(base, key)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
create a player character. Initializes health, xp, score,char_class, name, and debug
|
def __init__(self, name="Samalander",score=0, char_class='human',xp=10, debug=False):
self.char_class = char_class
self.xp = xp
self.level = 0
self.attack = 0
self.defense = 0
self.__setLevel()
self.__setMaxHP()
self.health = self.maxHealth
self.score = score
self.inventory = items.getItems()
for item in self.inventory:
self.inventory[item]['quantity'] = 0
self.carryLimit = 500
self.currentCarry = 0
self.name = name
self.icon = u"\U0001f355"
# Player Class stuff.
# Most of this is not implemented
# Sight radius This will later be a factor of player class
self.sight=3
self.attack = 5
self.defense = 5
self.__setAttack()
self.__setDefense()
|
[
"def create_char(char_values):\n consts = _CharacterCreator._get_constants(char_values)\n if consts is None:\n return 3\n\n if CharacterManager.does_char_with_name_exist(char_values[\"name\"]):\n return 2\n\n char_data = _CharacterCreator._try_create_char(char_values, consts)\n if char_data is None:\n return 1\n\n _CharacterCreator._add_default_skills(char_data, consts)\n _CharacterCreator._add_default_spells(char_data, consts)\n\n LOG.debug(\"Character \" + char_data.name + \" created.\")\n return 0",
"def func(self):\r\n\r\n # making sure caller is really a player\r\n self.character = None\r\n if utils.inherits_from(self.caller, \"src.objects.objects.Object\"):\r\n # An object of some type is calling. Convert to player.\r\n #print self.caller, self.caller.__class__\r\n self.character = self.caller\r\n if hasattr(self.caller, \"player\"):\r\n self.caller = self.caller.player\r\n\r\n if not self.args:\r\n self.caller.msg(\"Usage: create <character name>\")\r\n return\r\n charname = self.args.strip()\r\n old_char = managers.objects.get_objs_with_key_and_typeclass(charname, CHARACTER_TYPECLASS)\r\n if old_char:\r\n self.caller.msg(\"Character {c%s{n already exists.\" % charname)\r\n return\r\n # create the character\r\n\r\n new_character = create_object(CHARACTER_TYPECLASS, key=charname)\r\n if not new_character:\r\n self.caller.msg(\"{rThe Character couldn't be created. This is a bug. Please contact an admin.\")\r\n return\r\n # make sure to lock the character to only be puppeted by this player\r\n new_character.locks.add(\"puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)\" %\r\n (new_character.id, self.caller.id))\r\n\r\n # save dbref\r\n avail_chars = self.caller.db._character_dbrefs\r\n if avail_chars:\r\n avail_chars.append(new_character.id)\r\n else:\r\n avail_chars = [new_character.id]\r\n self.caller.db._character_dbrefs = avail_chars\r\n self.caller.msg(\"{gThe Character {c%s{g was successfully created!\" % charname)",
"def char_gen(stat_list: list, start_room: RoomGenerator.Room) -> Character:\n combat = 10-2*(stat_list.index('combat')+1)\n defense = 10-2*(stat_list.index('defense')+1)\n speed = 10-2*(stat_list.index('speed')+1)\n stat_prec = {'combat':combat, 'def':defense, 'speed':speed}\n stat_final = stat_gen(stat_prec, 1)\n char = Character('You', start_room, stat_final['combat'], stat_final['def'], stat_final['speed'])\n return char",
"def create():\n\n # Retrieve the data\n req_data = request.get_json()\n hat_data = req_data.pop('hat', None)\n\n # Check rules\n err = CharacterModel.verify_char_rules(req_data)\n if err:\n return custom_response(err, 400)\n\n # Create the character\n character = CharacterModel(req_data)\n\n # Create an associated hat if requested\n if hat_data:\n # Color doens't exist\n if (req_data.get('color') and\n req_data.get('color') not in ColorType._member_names_):\n return custom_response({'message': 'color doesn\\'t exist'}, 400)\n # Check hat rules\n err = HatModel.verify_hat_rules(req_data, hat_data)\n if err:\n return custom_response(err, 400)\n # Save the character and create the hat\n character.save()\n hat = HatModel({'color': hat_data.get('color'),\n 'character_id': character.id})\n hat.save()\n\n # Create a character without hat\n else:\n character.save()\n\n return custom_response({'message': 'character created'}, 201)",
"def func(self):\n\n # making sure caller is really an account\n self.character = None\n if utils.inherits_from(self.caller, \"evennia.objects.objects.Object\"):\n # An object of some type is calling. Convert to account.\n self.character = self.caller\n if hasattr(self.caller, \"account\"):\n self.caller = self.caller.account\n\n if not self.args:\n self.caller.msg(\"Usage: create <character name>\")\n return\n charname = self.args.strip()\n old_char = managers.objects.get_objs_with_key_and_typeclass(charname, CHARACTER_TYPECLASS)\n if old_char:\n self.caller.msg(\"Character |c%s|n already exists.\" % charname)\n return\n # create the character\n\n new_character = create_object(CHARACTER_TYPECLASS, key=charname)\n if not new_character:\n self.caller.msg(\n \"|rThe Character couldn't be created. This is a bug. Please contact an admin.\"\n )\n return\n # make sure to lock the character to only be puppeted by this account\n new_character.locks.add(\n \"puppet:id(%i) or pid(%i) or perm(Developer) or pperm(Developer)\"\n % (new_character.id, self.caller.id)\n )\n\n # save dbref\n avail_chars = self.caller.db._character_dbrefs\n if avail_chars:\n avail_chars.append(new_character.id)\n else:\n avail_chars = [new_character.id]\n self.caller.db._character_dbrefs = avail_chars\n self.caller.msg(\"|gThe character |c%s|g was successfully created!\" % charname)",
"def __init__(self, name, char, rank, descrip):\n\n self.name = name\n self.char = char\n self.rank = rank\n self.descrip = descrip\n self.skill = {\"Name\": self.name, \"Characteristic\": self.char, \"Rank\": self.rank,\n \"Description\": self.descrip}",
"def add_character(self, number):\n from evennia.utils import create\n setattr(self, \"account%s\" % number,\n create.create_account(\"TestAccount%s\" % number, email=\"test@test.com\", password=\"testpassword\",\n typeclass=self.account_typeclass))\n setattr(self, \"char%s\" % number,\n create.create_object(self.character_typeclass, key=\"Char%s\" % number,\n location=self.room1, home=self.room1))",
"def setup_character_and_account(self, character, account, num=\"\"):\n from world.dominion.setup_utils import setup_dom_for_player, setup_assets\n # the attributes that are for 1 don't have a number\n if num == 1:\n num = \"\"\n num = str(num)\n setattr(self, 'dompc%s' % num, setup_dom_for_player(account))\n setattr(self, \"assetowner%s\" % num, setup_assets(getattr(self, \"dompc%s\" % num), 0))\n setattr(self, \"roster_entry%s\" % num,\n self.active_roster.entries.create(player=getattr(self, \"account%s\" % num),\n character=getattr(self, \"char%s\" % num)))",
"def add_character(self, character, position, name='', symbol='', ):\n if name == '':\n name = character.name\n if symbol == '':\n symbol = character.name.strip()[0].lower()\n self.atlas[name] = position\n self.people[name] = character\n self.symbols[name] = symbol",
"def create_player(name):\n if name.lower() == \"ai\":\n return Player(name.upper(), 'computer')\n else:\n return Player(name.title(), 'human')",
"def __init__(self):\n # Main character id\n self.name = None\n self.p1 = None\n self.p1_is = None\n self.p2 = None\n self.p3 = None",
"def add_character(name, picture, list_of_songs):\n\tname = name.lower()\n\tchar = Character(name, picture, list_of_songs)\n\tchar.save()",
"def create_character(self):\n def on_face_default(button):\n \"\"\"Handle pressing SQUARE button.\"\"\"\n self.character_id = 0\n for b in self.character_buttons:\n self.objects.remove(b)\n self.mouse_handlers.remove(b.handle_mouse_event)\n self.character_buttons = []\n self.create_character()\n\n def on_face_duck(button):\n \"\"\"Handle pressing DUCK button.\"\"\"\n self.character_id = 1\n for b in self.character_buttons:\n self.objects.remove(b)\n self.mouse_handlers.remove(b.handle_mouse_event)\n self.character_buttons = []\n self.create_character()\n\n def on_face_horse(button):\n \"\"\"Handle pressing HORSE button.\"\"\"\n self.character_id = 2\n for b in self.character_buttons:\n self.objects.remove(b)\n self.mouse_handlers.remove(b.handle_mouse_event)\n self.character_buttons = []\n self.create_character()\n\n def on_back_from_character(button):\n \"\"\"Handle pressing RETURN from character switching menu.\"\"\"\n for b in self.character_buttons:\n self.objects.remove(b)\n self.mouse_handlers.remove(b.handle_mouse_event)\n for p in self.character_images:\n self.objects.remove(p)\n if self.mode == 'main':\n self.create_settings()\n elif self.mode == 'short':\n self.create_menu()\n\n # first rendering of character buttons\n if len(self.character_buttons) == 0:\n for i, (text, click_handler) in \\\n enumerate(((_(\"SQUARE\"), on_face_default),\n (_(\"DUCK\"), on_face_duck),\n (_(\"HORSE\"), on_face_horse),\n (_(\"RETURN\"), on_back_from_character))):\n if self.character_id == i:\n text_color = c.button_text_color_chosen\n else:\n text_color = c.button_text_color\n b = Button(c.character_offset_x,\n c.character_offset_y +\n (c.character_button_h + 50) * i,\n c.character_button_w,\n c.character_button_h,\n text,\n click_handler,\n padding=5,\n text_color=text_color)\n self.objects.append(b)\n self.character_buttons.append(b)\n self.mouse_handlers.append(b.handle_mouse_event)\n\n for i, (text, file_path) in \\\n enumerate(((_(\"SQUARE\"), 'images/square.png'),\n (_(\"DUCK\"), 'images/duck.png'),\n (_(\"HORSE\"), 'images/horse.png'))):\n p = Image(c.character_offset_x +\n c.character_button_w + c.image_w,\n c.character_offset_y +\n (c.character_button_h + 50) * i,\n c.image_w,\n c.image_h,\n file_path)\n\n self.objects.append(p)\n self.character_images.append(p)\n # re-rendering of character buttons\n else:\n for b in self.character_buttons:\n self.objects.append(b)\n self.mouse_handlers.append(b.handle_mouse_event)\n for p in self.character_images:\n self.objects.append(p)",
"def character_display():\n global player\n #sets the player to a new player created from character creation\n player = Player.create_player_from_form(request.form)\n\n return render_template(\"CharacterDisplay.html\", playerData=player.to_dict())",
"def createPlayerObject(cls, player_info):\n\t\tid, row, col, health, power, luck, gold, num_bosses_defeated = player_info\n\t\treturn Player(id, row, col, health, power, luck, gold, num_bosses_defeated)",
"def at_player_creation(self):\r\n # set an (empty) attribute holding the characters this player has\r\n lockstring = \"attrread:perm(Admins);attredit:perm(Admins);attrcreate:perm(Admins)\"\r\n self.attributes.add(\"_playable_characters\", [], lockstring=lockstring)",
"def __init__(self, player_name, player_color):\n self._player_name = player_name\n self._player_color = player_color\n self._reserve_pieces = 0\n self._captured_pieces = 0",
"def __init__(self, gender, profession='peasent'):\n # Ensure that the caller passes a gender\n if not gender in ('male','female'):\n raise ValueError(\"Only the values 'male' and 'female' may be passed.\")\n return 1\n # Ensure that the caller passed a proper profession.\n if not profession in ('peasent','farmer','shopkeeper','knight','warrior','aristocrat'):\n raise ValueError(\"Please pass a proper profession.\")\n return 2\n self.profession = profession\n self.gender = gender\n self.character = dict()\n # This contains the four stats (Strength, Intelligence, Constitution, and Luck) for the NPC.\n self.character['stats'] = self.__assignStats(self.profession)\n # This gives a name which is full unicode, e.g. u'John Smith'.\n self.character['name'] = names.get_full_name(gender=self.gender)\n # Assign the NPC's gender\n self.character['gender'] = self.gender\n # Assign the NPC's profession, i.e. their job.\n self.character['profession'] = self.profession\n # Set the NPC's personality.\n self.character['personality'] = self.__assignPersonality(self.character['stats'])",
"def gen_character(self, story_type):\n\n self.story_type = story_type\n\n # for generating a random gender & pov where indicated\n g = ['male', 'female']\n p = ['first', 'third']\n\n if self.story_type == \"1\" or self.story_type == \"2\":\n # generate 4 characters; takes: gender, pov, plural\n\n gender = random.choice(g)\n ch = characters.Character(gender, 'third', False)\n self.ch1 = ch.create_character()\n\n gender = random.choice(g)\n ch = characters.Character(gender, 'third', False)\n self.ch2 = ch.create_character()\n\n gender = random.choice(g)\n pov = random.choice(p)\n ch = characters.Character(gender, pov, False)\n self.ch3 = ch.create_character()\n\n gender = random.choice(g)\n ch = characters.Character(gender, 'third', False)\n self.ch4 = ch.create_character()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
used to set self.health
|
def __setHealth(self,health):
self.health = health
|
[
"def set_health(self, new_health):\n self.health = new_health",
"def set_health(self, health):\r\n if(health > self._max_health):\r\n self._health = self._max_health\r\n elif(health < 0):\r\n self._health = 0\r\n else:\r\n self._health = health",
"def reset_health(self):\n self.health = 100",
"def modify_health(self, health):\n if self.health < 0:\n self.health = 0\n elif self.hunger >= 100 and health < 0:\n self.health += health*2\n elif self.health > 100:\n self.health = 100\n else:\n self.health += health",
"def regulatehealth(self):\n if self.health <= 0:\n self.kill()\n elif self.health > self.max_health:\n self.health = self.max_health",
"def healHealth(self):\n\t\tself.health += 1",
"def resetHealth(self):\n\t\tself.health = 10",
"def adjust_health(self,ammount):\n self.health += ammount",
"def _heal(self, hp):\n\n self._health += hp",
"def heal(self, health):\n self.health = min(self.health + health, 100)",
"def check_health(self):\n pass",
"def update_health_visual(self):\n pass",
"def getHealth(self):\n\t\treturn self.health",
"def raiseHealthStatus( self, valIn = 1 ):\n\t\tself.healthStatus += valIn\n\t\tif self.healthStatus > self.__statusMax:\n\t\t\tself.healthStatus = self.__statusMax",
"def regenHealth(self):\n\n now = pygame.time.get_ticks()\n if self.health < self.maxHealth:\n if now - self.last_regen > self.regen:\n self.last_regen = now\n self.health += self.regenSpeed",
"def heal(self):\n if self.rage >= 10:\n self.rage = max(self.rage - 10, 0)\n self.health = min(self.health + 5, 100)",
"def heal(self):\n self.current_health += int((self.base_health - self.current_health) * random.random())",
"def healthReduce(self):\n if self.trueHealth > 0:\n self.externHealth -= 0.1\n self.trueHealth -= 0.1\n else:\n self.dead = True",
"def lower_health(self, time_elapsed):\n if self.hunger == 100:\n self.health -= int(int(time_elapsed)\n * self.health_decline_rate * 2)\n self.health = self.valid_stats(self.health)\n else:\n self.health -= int(int(time_elapsed)\n * self.health_decline_rate)\n self.health = self.valid_stats(self.health)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get the current player inventory.
|
def getInventory(self):
return self.inventory
|
[
"def inventory(self):\n return self.meta['inventory']",
"def inventory(self):\n if self.player.inventory != []:\n items = ''\n for things in self.player.inventory:\n items += things.name + ', '\n items1 = items[0:-2]\n print ('Inventory: ' + items1)\n else:\n print(self.msg_no_inventory)\n\n # print( '[NOT IMPLEMENTED] inventory' ) ",
"def get_inventory(self):\n return self.inventory_level",
"def inventory(self): \n runner = self._get_runner('rinv') \n self.result_dict = DefaultInventoryManager().get_inventory_info(runner, ['all'])\n return self._get_result()",
"def inventory(self, name):\n return self._inventory_map[name]",
"def GetInventory(self):\n if not self._inventory : \n self._inventory = Session.ExecCommand(\"show chassis hardware\")\n return self._inventory",
"def inventory(self):\n # Check empty inventory.\n try:\n check = self.spawn_tiny().stdin(\"INVENTORY\")\n check.stdout(re.escape(\"Your inventory is empty\"), str_output=\"Your inventory is empty\")\n except Error as error:\n raise Error(rationale=f\"Let the player know they have no items.\\n\"\n f\" {error}\")\n\n # Check having keys.\n check = self.spawn_tiny()\n moves = [\"IN\", \"TAKE keys\", \"INVENTORY\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(re.escape(\"KEYS\"), str_output=\"KEYS\")\n check.stdout(re.escape(\"a set of keys\"), str_output=\"KEYS\")",
"def get_inventory(self,named=True):\n # iterate through all items, append to out variable\n out = []\n for item in self.inventory:\n if named:\n value = item.name\n else:\n value = item\n\n out.append(value)\n\n return out",
"def show_inventory(self):\n print('Inventory: \\n')\n for itm in self.inventory:\n print(itm.name)\n print('Quantity: ' + str(itm.quantity))\n print('Description: ' + itm.description)\n print()",
"def inventory(self):\n \n print(\"What do we got at this party?\")\n print(\"Attendees: {0}\".format(self.attendees))\n print(\"Attendees w/hats: {0}\".format(self.party_hatted_attendees))\n print(\"Beers: {0}\".format(self.beers))\n print(\"Sangrias: {0}\".format(self.sangrias))\n print(\"Wines: {0}\".format(self.wines))\n print(\"Lemonades: {0}\".format(self.lemonades))\n print(\"=\"*40)",
"def player_data(self):\n data = PlayerData(self.inventory) \n data.load_meter_data(self)\n return data",
"def print_inventory(self):\n\t\tprint(\"You have beaten {} tiles!\".format(World.how_many_tile()))\n\t\tprint(\"You have {} guesses remaining\".format(self.guesses_remaining))\n\n\t\tprint(\"And you are located at {}, {}\".format(self.location_x, self.location_y))\n\t\tfor item in self.inventory:\n\t\t\tprint(item, '\\n')",
"def item_select(self):\r\n layout = [[sg.Text(\"Inventory:\", border_width=0)]]\r\n for i, item in enumerate(self.player.inventory):\r\n layout.append([sg.Button(item, key=i, size=(10, 1), border_width=0)])\r\n layout.append([sg.Button(\"Exit\", key=\"EXIT\", size=(10, 1), button_color=(\"#edf2ce\", \"#444444\"))])\r\n window = sg.Window(\"Inventory Viewer\", layout, size=(200, 500), element_justification='c')\r\n choice = window.read()[0]\r\n window.close()\r\n if choice is None:\r\n return None\r\n if choice == \"EXIT\":\r\n return None\r\n else:\r\n print(choice)\r\n return self.player.inventory[choice]",
"def inventory_menu(header):\n if len(var.inventory) == 0:\n options = ['Inventory is empty.']\n else:\n options = []\n for item in var.inventory:\n text = item.name\n # show additional information, in case it's equipped\n if item.equipment and item.equipment.is_equipped:\n text = text + ' (on ' + item.equipment.slot + ')'\n options.append(text)\n\n index = menu(header, options, var.INVENTORY_WIDTH)\n\n #if an item was chosen, return it\n if index is None or len(var.inventory) == 0:\n return None\n return var.inventory[index].item",
"def getEquipped(self) -> List[InventoryItem]:\r\n\t\treturn sorted((i for i in self.items.values() if i.is_equipped), key=lambda i: i.slot)",
"def get(self):\n options = {\n \"table\": T[\"inventory\"],\n \"limit\": request.args.get(\"limit\", None),\n \"offset\": request.args.get(\"offset\", None),\n }\n return db.query(options)",
"def inventory_status(self):\n\n if self.inventory > 0:\n status = _('Available')\n else:\n status = _('Unavailable')\n return status",
"def get_bios_inventory(ctx):\n\n bios_recipe = BIOSRecipe(ctx.obj['client'])\n settings = bios_recipe.inventory()\n\n print(json.dumps(settings, indent=4, sort_keys=True))",
"def retrieveInventoryData(client):\n resource_classes = ['dpm-resources']\n api_features = client.consoles.console.list_api_features()\n if 'secure-boot-with-certificates' in api_features:\n resource_classes.append('certificate-resources')\n\n inventory_list = client.get_inventory(resource_classes)\n error_msgs = []\n for item in inventory_list:\n if item.get('class') == 'inventory-error':\n msg = (\"Inventory error {} for resource with URI {}: {}; \"\n \"Details: {}\".format(\n item.get('inventory-error-code'),\n item.get('uri'),\n item.get('inventory-error-text'),\n dict(item.get('inventory-error-details'))))\n error_msgs.append(msg)\n if error_msgs:\n raise ConsistencyError(\n \"Some resources could not be fully inventoried:\\n {}\".\n format('\\n '.join(error_msgs)))\n return inventory_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
used to set self.score
|
def __setScore(self, score):
self.score = score
return self.score
|
[
"def set_input_score(self, score):\n pass",
"def set_score(self,new_score):\n self.__fitness = new_score",
"def change_score(self, new_score):\n raise NotImplementedError",
"def reset_score(self):\n\n self.score = 0",
"def qm_score(self, qm_score):\n self._qm_score = qm_score",
"def setNodeScore(self, score):\n self.score = score",
"def increase_score(self):\n self.score += 1\n self.update_score()",
"def update_score(self, to_add):\n self.score += to_add",
"def reset_score(self):\n self.score = 0\n self._set_score()",
"def update_score(self, score):\n change_text(self.score_text, \"SCORE: % 4d\" % score)",
"def reset_score(self):\n if self.score > self.high_score:\n self.memorize()\n self.score = 0\n self.update_score()",
"def set_score(self, a, b, score):\n\t\tself.match_matrix[(a,b)] = score",
"def update_score(self, score: float):\n if self.score == score:\n return\n self.score = score\n for edge in self._in_edges:\n edge.top.taint()",
"def _update_score(self, hit_asteroid):\n size = hit_asteroid.get_size()\n self.__score += GameRunner.ASTR_HIT_VALS[size]\n self.__screen.set_score(self.__score)",
"def update_scores(self):\n\t\tself.score_black.text = str(self.data['score'][1])\n\t\tself.score_white.text = str(self.data['score'][0])",
"def add_score(self, score):\r\n self.add_scores([score])",
"def setMinScore(self, value) -> None:\n ...",
"def add_score(self, player_score):\n self.score += player_score",
"def modify_score(self, score):\n self.opponents[0][1] += float(score)\n self.opponents[1][1] += 1 - float(score)\n if float(score) == 1:\n self.winner = f\"{self.opponents[0][0].last_name} {self.opponents[0][0].first_name}\"\n elif float(score) == 0.5:\n self.winner = \"match nul\"\n else:\n self.winner = f\"{self.opponents[1][0].last_name} {self.opponents[1][0].first_name}\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
used to set self.level
|
def setLevel(self):
self.level = int(floor(sqrt(self.xp)))
|
[
"def SetLevel(self, level):\n self.level = level",
"def set_new_level(self, level):\r\n\r\n self.property_set(\"level\",\r\n Sample(0, int(level), unit=\"%\"))",
"def set_level(self, grade_id):\n pass",
"def setLevel(self, level):\n self.Logger.setLevel(self.LevelDict[level])",
"def setLevel(self, logLevel):\n\n pass",
"def write_level(self):\n self.clear()\n self.write(f\"Level: {self.level}\",\n align=TEXT_ALIGNMENT_LEVEL, font=TEXT_FONT)",
"def set_level(self, elem):\n tag_level = int(elem.tag[-1])\n if not self.is_base_level_adjusted:\n self.base_level = self.base_level + 1 - tag_level\n self.is_base_level_adjusted = True\n level = tag_level + self.base_level\n if level > 6:\n level = 6\n elem.tag = \"h%d\" % level",
"def prep_level(self):\r\n\t\tself.level_image = self.font.render(str(self.stats.level),True\r\n\t\t,self.text_color,self.ai_settings.bg_color)\r\n\t\t\r\n\t\t#display level below score\r\n\t\tself.level_rect = self.level_image.get_rect()\r\n\t\tself.level_rect.right = self.score_rect.right\r\n\t\tself.level_rect.top = self.score_rect.bottom +10",
"def __levelShow(self):\n self.__showLevel = Text(text = \"Level\", score = self._level + 1,\\\n width = LEVEL_X, height = LEVEL_Y)\n self._textList.add(self.__showLevel)",
"def test_update_level(self):\n pass",
"def get_level(self):\r\n return self.__level",
"def _set_stats_at_level_(self, level):\n self.current_level = level\n self.hp = self._use_growth_formula(self.hp_min, self.hp_max, self.hp_scale)\n self.hp += self.hp_bonus\n self.hp_base = self.hp\n self._set_stats_with_pluses_('hp', self.hp_plus)\n self.base_atk = self._use_growth_formula(self.base_atk_min, self.base_atk_max, self.base_atk_scale)\n self.base_atk += self.base_atk_bonus\n self.base_base_atk = self.base_atk\n self._set_stats_with_pluses_('atk', self.base_atk_plus)\n self.rcv = self._use_growth_formula(self.rcv_min, self.rcv_max, self.rcv_scale)\n self.rcv += self.rcv_bonus\n self.rcv_base = self.rcv\n self._set_stats_with_pluses_('rcv', self.rcv_plus)",
"def set_levels(self):\n\n for m in self.assets.keys():\n\n m_dict = self.assets[m]\n\n if \"levels\" in m_dict.keys():\n pass\n elif \"unique\" in m_dict.keys() and m_dict[\"unique\"]:\n self.assets[m][\"levels\"] = 0\n else:\n self.assets[m][\"levels\"] = 3",
"def prep_level(self):\r\n self.level_image=self.font.render(str(self.stats.level),True,self.text_color,self.ai_settings.bg_color)\r\n #Position the level below the score.\r\n self.level_rect=self.level_image.get_rect()\r\n self.level_rect.right=self.score_rect.right\r\n self.level_rect.top=self.score_rect.bottom+10",
"def test_set_level_valid(self):\n\n self.player.set_level(2)\n self.assertEqual(2, self.player.get_level())\n\n self.player.set_level(5)\n self.assertEqual(5, self.player.get_level())\n\n self.player.set_level(10)\n self.assertEqual(10, self.player.get_level())",
"def setLevelAttribute(self, level: 'char const *') -> \"void\":\n return _coin.ScXMLLogElt_setLevelAttribute(self, level)",
"def update_level(self):\n\n # Since we obviously do not update the level of a leaf, the if self.leaf condition\n # can be omitted.\n if self.r_child is None:\n # Every node that is not a leaf has at least a left child, in case it does not\n # have a right child, the node's level is the increment by 1 of the level of\n # its left child.\n self.level = self.l_child.level + 1\n\n else:\n # In case the node has both children, it takes the increment by 1 of the\n # minimum level. The reason is that when the tree evolves by adding new\n # leaves, this node will eventually have its children change until reaching\n # the mentioned minimum level.\n self.level = min(self.l_child.level, self.r_child.level) + 1",
"def set_data_level(self, data_level):\n if data_level < 4 and data_level > 0:\n self.data_level = data_level\n else:\n print \"ERROR: data level must be 1, 2 or 3\"",
"def advance_level(self, level):\n self.curr_level += 1\n self.init_map(level)\n self.init_character()\n self.turn = 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.